Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * linux/mm/mempool.c
4 : *
5 : * memory buffer pool support. Such pools are mostly used
6 : * for guaranteed, deadlock-free memory allocations during
7 : * extreme VM load.
8 : *
9 : * started by Ingo Molnar, Copyright (C) 2001
10 : * debugging by David Rientjes, Copyright (C) 2015
11 : */
12 :
13 : #include <linux/mm.h>
14 : #include <linux/slab.h>
15 : #include <linux/highmem.h>
16 : #include <linux/kasan.h>
17 : #include <linux/kmemleak.h>
18 : #include <linux/export.h>
19 : #include <linux/mempool.h>
20 : #include <linux/writeback.h>
21 : #include "slab.h"
22 :
23 : #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 : static void poison_error(mempool_t *pool, void *element, size_t size,
25 : size_t byte)
26 : {
27 : const int nr = pool->curr_nr;
28 : const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 : const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 : int i;
31 :
32 : pr_err("BUG: mempool element poison mismatch\n");
33 : pr_err("Mempool %p size %zu\n", pool, size);
34 : pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 : for (i = start; i < end; i++)
36 : pr_cont("%x ", *(u8 *)(element + i));
37 : pr_cont("%s\n", end < size ? "..." : "");
38 : dump_stack();
39 : }
40 :
41 : static void __check_element(mempool_t *pool, void *element, size_t size)
42 : {
43 : u8 *obj = element;
44 : size_t i;
45 :
46 : for (i = 0; i < size; i++) {
47 : u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48 :
49 : if (obj[i] != exp) {
50 : poison_error(pool, element, size, i);
51 : return;
52 : }
53 : }
54 : memset(obj, POISON_INUSE, size);
55 : }
56 :
57 : static void check_element(mempool_t *pool, void *element)
58 : {
59 : /* Mempools backed by slab allocator */
60 : if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
61 : __check_element(pool, element, ksize(element));
62 : } else if (pool->free == mempool_free_pages) {
63 : /* Mempools backed by page allocator */
64 : int order = (int)(long)pool->pool_data;
65 : void *addr = kmap_atomic((struct page *)element);
66 :
67 : __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
68 : kunmap_atomic(addr);
69 : }
70 : }
71 :
72 : static void __poison_element(void *element, size_t size)
73 : {
74 : u8 *obj = element;
75 :
76 : memset(obj, POISON_FREE, size - 1);
77 : obj[size - 1] = POISON_END;
78 : }
79 :
80 : static void poison_element(mempool_t *pool, void *element)
81 : {
82 : /* Mempools backed by slab allocator */
83 : if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
84 : __poison_element(element, ksize(element));
85 : } else if (pool->alloc == mempool_alloc_pages) {
86 : /* Mempools backed by page allocator */
87 : int order = (int)(long)pool->pool_data;
88 : void *addr = kmap_atomic((struct page *)element);
89 :
90 : __poison_element(addr, 1UL << (PAGE_SHIFT + order));
91 : kunmap_atomic(addr);
92 : }
93 : }
94 : #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
95 : static inline void check_element(mempool_t *pool, void *element)
96 : {
97 : }
98 : static inline void poison_element(mempool_t *pool, void *element)
99 : {
100 : }
101 : #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
102 :
103 : static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
104 : {
105 : if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
106 12 : kasan_slab_free_mempool(element);
107 : else if (pool->alloc == mempool_alloc_pages)
108 : kasan_poison_pages(element, (unsigned long)pool->pool_data,
109 : false);
110 : }
111 :
112 : static void kasan_unpoison_element(mempool_t *pool, void *element)
113 : {
114 0 : if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
115 0 : kasan_unpoison_range(element, __ksize(element));
116 : else if (pool->alloc == mempool_alloc_pages)
117 : kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
118 : false);
119 : }
120 :
121 : static __always_inline void add_element(mempool_t *pool, void *element)
122 : {
123 12 : BUG_ON(pool->curr_nr >= pool->min_nr);
124 12 : poison_element(pool, element);
125 12 : kasan_poison_element(pool, element);
126 12 : pool->elements[pool->curr_nr++] = element;
127 : }
128 :
129 0 : static void *remove_element(mempool_t *pool)
130 : {
131 0 : void *element = pool->elements[--pool->curr_nr];
132 :
133 0 : BUG_ON(pool->curr_nr < 0);
134 0 : kasan_unpoison_element(pool, element);
135 0 : check_element(pool, element);
136 0 : return element;
137 : }
138 :
139 : /**
140 : * mempool_exit - exit a mempool initialized with mempool_init()
141 : * @pool: pointer to the memory pool which was initialized with
142 : * mempool_init().
143 : *
144 : * Free all reserved elements in @pool and @pool itself. This function
145 : * only sleeps if the free_fn() function sleeps.
146 : *
147 : * May be called on a zeroed but uninitialized mempool (i.e. allocated with
148 : * kzalloc()).
149 : */
150 0 : void mempool_exit(mempool_t *pool)
151 : {
152 0 : while (pool->curr_nr) {
153 0 : void *element = remove_element(pool);
154 0 : pool->free(element, pool->pool_data);
155 : }
156 0 : kfree(pool->elements);
157 0 : pool->elements = NULL;
158 0 : }
159 : EXPORT_SYMBOL(mempool_exit);
160 :
161 : /**
162 : * mempool_destroy - deallocate a memory pool
163 : * @pool: pointer to the memory pool which was allocated via
164 : * mempool_create().
165 : *
166 : * Free all reserved elements in @pool and @pool itself. This function
167 : * only sleeps if the free_fn() function sleeps.
168 : */
169 0 : void mempool_destroy(mempool_t *pool)
170 : {
171 0 : if (unlikely(!pool))
172 : return;
173 :
174 0 : mempool_exit(pool);
175 0 : kfree(pool);
176 : }
177 : EXPORT_SYMBOL(mempool_destroy);
178 :
179 4 : int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
180 : mempool_free_t *free_fn, void *pool_data,
181 : gfp_t gfp_mask, int node_id)
182 : {
183 4 : spin_lock_init(&pool->lock);
184 4 : pool->min_nr = min_nr;
185 4 : pool->pool_data = pool_data;
186 4 : pool->alloc = alloc_fn;
187 4 : pool->free = free_fn;
188 4 : init_waitqueue_head(&pool->wait);
189 :
190 4 : pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
191 : gfp_mask, node_id);
192 4 : if (!pool->elements)
193 : return -ENOMEM;
194 :
195 : /*
196 : * First pre-allocate the guaranteed number of buffers.
197 : */
198 16 : while (pool->curr_nr < pool->min_nr) {
199 : void *element;
200 :
201 12 : element = pool->alloc(gfp_mask, pool->pool_data);
202 12 : if (unlikely(!element)) {
203 0 : mempool_exit(pool);
204 0 : return -ENOMEM;
205 : }
206 : add_element(pool, element);
207 : }
208 :
209 : return 0;
210 : }
211 : EXPORT_SYMBOL(mempool_init_node);
212 :
213 : /**
214 : * mempool_init - initialize a memory pool
215 : * @pool: pointer to the memory pool that should be initialized
216 : * @min_nr: the minimum number of elements guaranteed to be
217 : * allocated for this pool.
218 : * @alloc_fn: user-defined element-allocation function.
219 : * @free_fn: user-defined element-freeing function.
220 : * @pool_data: optional private data available to the user-defined functions.
221 : *
222 : * Like mempool_create(), but initializes the pool in (i.e. embedded in another
223 : * structure).
224 : *
225 : * Return: %0 on success, negative error code otherwise.
226 : */
227 4 : int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
228 : mempool_free_t *free_fn, void *pool_data)
229 : {
230 4 : return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
231 : pool_data, GFP_KERNEL, NUMA_NO_NODE);
232 :
233 : }
234 : EXPORT_SYMBOL(mempool_init);
235 :
236 : /**
237 : * mempool_create - create a memory pool
238 : * @min_nr: the minimum number of elements guaranteed to be
239 : * allocated for this pool.
240 : * @alloc_fn: user-defined element-allocation function.
241 : * @free_fn: user-defined element-freeing function.
242 : * @pool_data: optional private data available to the user-defined functions.
243 : *
244 : * this function creates and allocates a guaranteed size, preallocated
245 : * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
246 : * functions. This function might sleep. Both the alloc_fn() and the free_fn()
247 : * functions might sleep - as long as the mempool_alloc() function is not called
248 : * from IRQ contexts.
249 : *
250 : * Return: pointer to the created memory pool object or %NULL on error.
251 : */
252 0 : mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
253 : mempool_free_t *free_fn, void *pool_data)
254 : {
255 0 : return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data,
256 : GFP_KERNEL, NUMA_NO_NODE);
257 : }
258 : EXPORT_SYMBOL(mempool_create);
259 :
260 0 : mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
261 : mempool_free_t *free_fn, void *pool_data,
262 : gfp_t gfp_mask, int node_id)
263 : {
264 : mempool_t *pool;
265 :
266 0 : pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
267 0 : if (!pool)
268 : return NULL;
269 :
270 0 : if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
271 : gfp_mask, node_id)) {
272 0 : kfree(pool);
273 0 : return NULL;
274 : }
275 :
276 : return pool;
277 : }
278 : EXPORT_SYMBOL(mempool_create_node);
279 :
280 : /**
281 : * mempool_resize - resize an existing memory pool
282 : * @pool: pointer to the memory pool which was allocated via
283 : * mempool_create().
284 : * @new_min_nr: the new minimum number of elements guaranteed to be
285 : * allocated for this pool.
286 : *
287 : * This function shrinks/grows the pool. In the case of growing,
288 : * it cannot be guaranteed that the pool will be grown to the new
289 : * size immediately, but new mempool_free() calls will refill it.
290 : * This function may sleep.
291 : *
292 : * Note, the caller must guarantee that no mempool_destroy is called
293 : * while this function is running. mempool_alloc() & mempool_free()
294 : * might be called (eg. from IRQ contexts) while this function executes.
295 : *
296 : * Return: %0 on success, negative error code otherwise.
297 : */
298 0 : int mempool_resize(mempool_t *pool, int new_min_nr)
299 : {
300 : void *element;
301 : void **new_elements;
302 : unsigned long flags;
303 :
304 0 : BUG_ON(new_min_nr <= 0);
305 : might_sleep();
306 :
307 0 : spin_lock_irqsave(&pool->lock, flags);
308 0 : if (new_min_nr <= pool->min_nr) {
309 0 : while (new_min_nr < pool->curr_nr) {
310 0 : element = remove_element(pool);
311 0 : spin_unlock_irqrestore(&pool->lock, flags);
312 0 : pool->free(element, pool->pool_data);
313 0 : spin_lock_irqsave(&pool->lock, flags);
314 : }
315 0 : pool->min_nr = new_min_nr;
316 0 : goto out_unlock;
317 : }
318 0 : spin_unlock_irqrestore(&pool->lock, flags);
319 :
320 : /* Grow the pool */
321 0 : new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
322 : GFP_KERNEL);
323 0 : if (!new_elements)
324 : return -ENOMEM;
325 :
326 0 : spin_lock_irqsave(&pool->lock, flags);
327 0 : if (unlikely(new_min_nr <= pool->min_nr)) {
328 : /* Raced, other resize will do our work */
329 0 : spin_unlock_irqrestore(&pool->lock, flags);
330 0 : kfree(new_elements);
331 0 : goto out;
332 : }
333 0 : memcpy(new_elements, pool->elements,
334 0 : pool->curr_nr * sizeof(*new_elements));
335 0 : kfree(pool->elements);
336 0 : pool->elements = new_elements;
337 0 : pool->min_nr = new_min_nr;
338 :
339 0 : while (pool->curr_nr < pool->min_nr) {
340 0 : spin_unlock_irqrestore(&pool->lock, flags);
341 0 : element = pool->alloc(GFP_KERNEL, pool->pool_data);
342 0 : if (!element)
343 : goto out;
344 0 : spin_lock_irqsave(&pool->lock, flags);
345 0 : if (pool->curr_nr < pool->min_nr) {
346 : add_element(pool, element);
347 : } else {
348 0 : spin_unlock_irqrestore(&pool->lock, flags);
349 0 : pool->free(element, pool->pool_data); /* Raced */
350 0 : goto out;
351 : }
352 : }
353 : out_unlock:
354 0 : spin_unlock_irqrestore(&pool->lock, flags);
355 : out:
356 : return 0;
357 : }
358 : EXPORT_SYMBOL(mempool_resize);
359 :
360 : /**
361 : * mempool_alloc - allocate an element from a specific memory pool
362 : * @pool: pointer to the memory pool which was allocated via
363 : * mempool_create().
364 : * @gfp_mask: the usual allocation bitmask.
365 : *
366 : * this function only sleeps if the alloc_fn() function sleeps or
367 : * returns NULL. Note that due to preallocation, this function
368 : * *never* fails when called from process contexts. (it might
369 : * fail if called from an IRQ context.)
370 : * Note: using __GFP_ZERO is not supported.
371 : *
372 : * Return: pointer to the allocated element or %NULL on error.
373 : */
374 0 : void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
375 : {
376 : void *element;
377 : unsigned long flags;
378 : wait_queue_entry_t wait;
379 : gfp_t gfp_temp;
380 :
381 : VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
382 0 : might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
383 :
384 0 : gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
385 0 : gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
386 0 : gfp_mask |= __GFP_NOWARN; /* failures are OK */
387 :
388 0 : gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
389 :
390 : repeat_alloc:
391 :
392 0 : element = pool->alloc(gfp_temp, pool->pool_data);
393 0 : if (likely(element != NULL))
394 : return element;
395 :
396 0 : spin_lock_irqsave(&pool->lock, flags);
397 0 : if (likely(pool->curr_nr)) {
398 0 : element = remove_element(pool);
399 0 : spin_unlock_irqrestore(&pool->lock, flags);
400 : /* paired with rmb in mempool_free(), read comment there */
401 0 : smp_wmb();
402 : /*
403 : * Update the allocation stack trace as this is more useful
404 : * for debugging.
405 : */
406 0 : kmemleak_update_trace(element);
407 0 : return element;
408 : }
409 :
410 : /*
411 : * We use gfp mask w/o direct reclaim or IO for the first round. If
412 : * alloc failed with that and @pool was empty, retry immediately.
413 : */
414 0 : if (gfp_temp != gfp_mask) {
415 0 : spin_unlock_irqrestore(&pool->lock, flags);
416 0 : gfp_temp = gfp_mask;
417 0 : goto repeat_alloc;
418 : }
419 :
420 : /* We must not sleep if !__GFP_DIRECT_RECLAIM */
421 0 : if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
422 0 : spin_unlock_irqrestore(&pool->lock, flags);
423 0 : return NULL;
424 : }
425 :
426 : /* Let's wait for someone else to return an element to @pool */
427 0 : init_wait(&wait);
428 0 : prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
429 :
430 0 : spin_unlock_irqrestore(&pool->lock, flags);
431 :
432 : /*
433 : * FIXME: this should be io_schedule(). The timeout is there as a
434 : * workaround for some DM problems in 2.6.18.
435 : */
436 0 : io_schedule_timeout(5*HZ);
437 :
438 0 : finish_wait(&pool->wait, &wait);
439 0 : goto repeat_alloc;
440 : }
441 : EXPORT_SYMBOL(mempool_alloc);
442 :
443 : /**
444 : * mempool_free - return an element to the pool.
445 : * @element: pool element pointer.
446 : * @pool: pointer to the memory pool which was allocated via
447 : * mempool_create().
448 : *
449 : * this function only sleeps if the free_fn() function sleeps.
450 : */
451 0 : void mempool_free(void *element, mempool_t *pool)
452 : {
453 : unsigned long flags;
454 :
455 0 : if (unlikely(element == NULL))
456 : return;
457 :
458 : /*
459 : * Paired with the wmb in mempool_alloc(). The preceding read is
460 : * for @element and the following @pool->curr_nr. This ensures
461 : * that the visible value of @pool->curr_nr is from after the
462 : * allocation of @element. This is necessary for fringe cases
463 : * where @element was passed to this task without going through
464 : * barriers.
465 : *
466 : * For example, assume @p is %NULL at the beginning and one task
467 : * performs "p = mempool_alloc(...);" while another task is doing
468 : * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
469 : * may end up using curr_nr value which is from before allocation
470 : * of @p without the following rmb.
471 : */
472 0 : smp_rmb();
473 :
474 : /*
475 : * For correctness, we need a test which is guaranteed to trigger
476 : * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
477 : * without locking achieves that and refilling as soon as possible
478 : * is desirable.
479 : *
480 : * Because curr_nr visible here is always a value after the
481 : * allocation of @element, any task which decremented curr_nr below
482 : * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
483 : * incremented to min_nr afterwards. If curr_nr gets incremented
484 : * to min_nr after the allocation of @element, the elements
485 : * allocated after that are subject to the same guarantee.
486 : *
487 : * Waiters happen iff curr_nr is 0 and the above guarantee also
488 : * ensures that there will be frees which return elements to the
489 : * pool waking up the waiters.
490 : */
491 0 : if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
492 0 : spin_lock_irqsave(&pool->lock, flags);
493 0 : if (likely(pool->curr_nr < pool->min_nr)) {
494 0 : add_element(pool, element);
495 0 : spin_unlock_irqrestore(&pool->lock, flags);
496 0 : wake_up(&pool->wait);
497 0 : return;
498 : }
499 0 : spin_unlock_irqrestore(&pool->lock, flags);
500 : }
501 0 : pool->free(element, pool->pool_data);
502 : }
503 : EXPORT_SYMBOL(mempool_free);
504 :
505 : /*
506 : * A commonly used alloc and free fn.
507 : */
508 12 : void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
509 : {
510 12 : struct kmem_cache *mem = pool_data;
511 : VM_BUG_ON(mem->ctor);
512 12 : return kmem_cache_alloc(mem, gfp_mask);
513 : }
514 : EXPORT_SYMBOL(mempool_alloc_slab);
515 :
516 0 : void mempool_free_slab(void *element, void *pool_data)
517 : {
518 0 : struct kmem_cache *mem = pool_data;
519 0 : kmem_cache_free(mem, element);
520 0 : }
521 : EXPORT_SYMBOL(mempool_free_slab);
522 :
523 : /*
524 : * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
525 : * specified by pool_data
526 : */
527 0 : void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
528 : {
529 0 : size_t size = (size_t)pool_data;
530 0 : return kmalloc(size, gfp_mask);
531 : }
532 : EXPORT_SYMBOL(mempool_kmalloc);
533 :
534 0 : void mempool_kfree(void *element, void *pool_data)
535 : {
536 0 : kfree(element);
537 0 : }
538 : EXPORT_SYMBOL(mempool_kfree);
539 :
540 : /*
541 : * A simple mempool-backed page allocator that allocates pages
542 : * of the order specified by pool_data.
543 : */
544 0 : void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
545 : {
546 0 : int order = (int)(long)pool_data;
547 0 : return alloc_pages(gfp_mask, order);
548 : }
549 : EXPORT_SYMBOL(mempool_alloc_pages);
550 :
551 0 : void mempool_free_pages(void *element, void *pool_data)
552 : {
553 0 : int order = (int)(long)pool_data;
554 0 : __free_pages(element, order);
555 0 : }
556 : EXPORT_SYMBOL(mempool_free_pages);
|