Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 : *
5 : * (C) SGI 2006, Christoph Lameter
6 : * Cleaned up and restructured to ease the addition of alternative
7 : * implementations of SLAB allocators.
8 : * (C) Linux Foundation 2008-2013
9 : * Unified interface for all slab allocators
10 : */
11 :
12 : #ifndef _LINUX_SLAB_H
13 : #define _LINUX_SLAB_H
14 :
15 : #include <linux/gfp.h>
16 : #include <linux/overflow.h>
17 : #include <linux/types.h>
18 : #include <linux/workqueue.h>
19 : #include <linux/percpu-refcount.h>
20 :
21 :
22 : /*
23 : * Flags to pass to kmem_cache_create().
24 : * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
25 : */
26 : /* DEBUG: Perform (expensive) checks on alloc/free */
27 : #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28 : /* DEBUG: Red zone objs in a cache */
29 : #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30 : /* DEBUG: Poison objects */
31 : #define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32 : /* Align objs on cache lines */
33 : #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34 : /* Use GFP_DMA memory */
35 : #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36 : /* Use GFP_DMA32 memory */
37 : #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38 : /* DEBUG: Store the last owner for bug hunting */
39 : #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40 : /* Panic if kmem_cache_create() fails */
41 : #define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42 : /*
43 : * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
44 : *
45 : * This delays freeing the SLAB page by a grace period, it does _NOT_
46 : * delay object freeing. This means that if you do kmem_cache_free()
47 : * that memory location is free to be reused at any time. Thus it may
48 : * be possible to see another object there in the same RCU grace period.
49 : *
50 : * This feature only ensures the memory location backing the object
51 : * stays valid, the trick to using this is relying on an independent
52 : * object validation pass. Something like:
53 : *
54 : * rcu_read_lock()
55 : * again:
56 : * obj = lockless_lookup(key);
57 : * if (obj) {
58 : * if (!try_get_ref(obj)) // might fail for free objects
59 : * goto again;
60 : *
61 : * if (obj->key != key) { // not the object we expected
62 : * put_ref(obj);
63 : * goto again;
64 : * }
65 : * }
66 : * rcu_read_unlock();
67 : *
68 : * This is useful if we need to approach a kernel structure obliquely,
69 : * from its address obtained without the usual locking. We can lock
70 : * the structure to stabilize it and check it's still at the given address,
71 : * only if we can be sure that the memory has not been meanwhile reused
72 : * for some other kind of object (which our subsystem's lock might corrupt).
73 : *
74 : * rcu_read_lock before reading the address, then rcu_read_unlock after
75 : * taking the spinlock within the structure expected at that address.
76 : *
77 : * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
78 : */
79 : /* Defer freeing slabs to RCU */
80 : #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81 : /* Spread some memory over cpuset */
82 : #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83 : /* Trace allocations and frees */
84 : #define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85 :
86 : /* Flag to prevent checks on free */
87 : #ifdef CONFIG_DEBUG_OBJECTS
88 : # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89 : #else
90 : # define SLAB_DEBUG_OBJECTS 0
91 : #endif
92 :
93 : /* Avoid kmemleak tracing */
94 : #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95 :
96 : /* Fault injection mark */
97 : #ifdef CONFIG_FAILSLAB
98 : # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99 : #else
100 : # define SLAB_FAILSLAB 0
101 : #endif
102 : /* Account to memcg */
103 : #ifdef CONFIG_MEMCG_KMEM
104 : # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105 : #else
106 : # define SLAB_ACCOUNT 0
107 : #endif
108 :
109 : #ifdef CONFIG_KASAN
110 : #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111 : #else
112 : #define SLAB_KASAN 0
113 : #endif
114 :
115 : /* The following flags affect the page allocator grouping pages by mobility */
116 : /* Objects are reclaimable */
117 : #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118 : #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
119 :
120 : /*
121 : * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
122 : *
123 : * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
124 : *
125 : * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
126 : * Both make kfree a no-op.
127 : */
128 : #define ZERO_SIZE_PTR ((void *)16)
129 :
130 : #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
131 : (unsigned long)ZERO_SIZE_PTR)
132 :
133 : #include <linux/kasan.h>
134 :
135 : struct list_lru;
136 : struct mem_cgroup;
137 : /*
138 : * struct kmem_cache related prototypes
139 : */
140 : void __init kmem_cache_init(void);
141 : bool slab_is_available(void);
142 :
143 : struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
144 : unsigned int align, slab_flags_t flags,
145 : void (*ctor)(void *));
146 : struct kmem_cache *kmem_cache_create_usercopy(const char *name,
147 : unsigned int size, unsigned int align,
148 : slab_flags_t flags,
149 : unsigned int useroffset, unsigned int usersize,
150 : void (*ctor)(void *));
151 : void kmem_cache_destroy(struct kmem_cache *s);
152 : int kmem_cache_shrink(struct kmem_cache *s);
153 :
154 : /*
155 : * Please use this macro to create slab caches. Simply specify the
156 : * name of the structure and maybe some flags that are listed above.
157 : *
158 : * The alignment of the struct determines object alignment. If you
159 : * f.e. add ____cacheline_aligned_in_smp to the struct declaration
160 : * then the objects will be properly aligned in SMP configurations.
161 : */
162 : #define KMEM_CACHE(__struct, __flags) \
163 : kmem_cache_create(#__struct, sizeof(struct __struct), \
164 : __alignof__(struct __struct), (__flags), NULL)
165 :
166 : /*
167 : * To whitelist a single field for copying to/from usercopy, use this
168 : * macro instead for KMEM_CACHE() above.
169 : */
170 : #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
171 : kmem_cache_create_usercopy(#__struct, \
172 : sizeof(struct __struct), \
173 : __alignof__(struct __struct), (__flags), \
174 : offsetof(struct __struct, __field), \
175 : sizeof_field(struct __struct, __field), NULL)
176 :
177 : /*
178 : * Common kmalloc functions provided by all allocators
179 : */
180 : void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
181 : void kfree(const void *objp);
182 : void kfree_sensitive(const void *objp);
183 : size_t __ksize(const void *objp);
184 : size_t ksize(const void *objp);
185 : #ifdef CONFIG_PRINTK
186 : bool kmem_valid_obj(void *object);
187 : void kmem_dump_obj(void *object);
188 : #endif
189 :
190 : /*
191 : * Some archs want to perform DMA into kmalloc caches and need a guaranteed
192 : * alignment larger than the alignment of a 64-bit integer.
193 : * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
194 : */
195 : #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
196 : #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
197 : #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
198 : #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
199 : #else
200 : #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
201 : #endif
202 :
203 : /*
204 : * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
205 : * Intended for arches that get misalignment faults even for 64 bit integer
206 : * aligned buffers.
207 : */
208 : #ifndef ARCH_SLAB_MINALIGN
209 : #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
210 : #endif
211 :
212 : /*
213 : * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
214 : * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
215 : * aligned pointers.
216 : */
217 : #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
218 : #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
219 : #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
220 :
221 : /*
222 : * Kmalloc array related definitions
223 : */
224 :
225 : #ifdef CONFIG_SLAB
226 : /*
227 : * The largest kmalloc size supported by the SLAB allocators is
228 : * 32 megabyte (2^25) or the maximum allocatable page order if that is
229 : * less than 32 MB.
230 : *
231 : * WARNING: Its not easy to increase this value since the allocators have
232 : * to do various tricks to work around compiler limitations in order to
233 : * ensure proper constant folding.
234 : */
235 : #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
236 : (MAX_ORDER + PAGE_SHIFT - 1) : 25)
237 : #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
238 : #ifndef KMALLOC_SHIFT_LOW
239 : #define KMALLOC_SHIFT_LOW 5
240 : #endif
241 : #endif
242 :
243 : #ifdef CONFIG_SLUB
244 : /*
245 : * SLUB directly allocates requests fitting in to an order-1 page
246 : * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
247 : */
248 : #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
249 : #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
250 : #ifndef KMALLOC_SHIFT_LOW
251 : #define KMALLOC_SHIFT_LOW 3
252 : #endif
253 : #endif
254 :
255 : #ifdef CONFIG_SLOB
256 : /*
257 : * SLOB passes all requests larger than one page to the page allocator.
258 : * No kmalloc array is necessary since objects of different sizes can
259 : * be allocated from the same page.
260 : */
261 : #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
262 : #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
263 : #ifndef KMALLOC_SHIFT_LOW
264 : #define KMALLOC_SHIFT_LOW 3
265 : #endif
266 : #endif
267 :
268 : /* Maximum allocatable size */
269 : #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
270 : /* Maximum size for which we actually use a slab cache */
271 : #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
272 : /* Maximum order allocatable via the slab allocator */
273 : #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
274 :
275 : /*
276 : * Kmalloc subsystem.
277 : */
278 : #ifndef KMALLOC_MIN_SIZE
279 : #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
280 : #endif
281 :
282 : /*
283 : * This restriction comes from byte sized index implementation.
284 : * Page size is normally 2^12 bytes and, in this case, if we want to use
285 : * byte sized index which can represent 2^8 entries, the size of the object
286 : * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
287 : * If minimum size of kmalloc is less than 16, we use it as minimum object
288 : * size and give up to use byte sized index.
289 : */
290 : #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
291 : (KMALLOC_MIN_SIZE) : 16)
292 :
293 : /*
294 : * Whenever changing this, take care of that kmalloc_type() and
295 : * create_kmalloc_caches() still work as intended.
296 : *
297 : * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
298 : * is for accounted but unreclaimable and non-dma objects. All the other
299 : * kmem caches can have both accounted and unaccounted objects.
300 : */
301 : enum kmalloc_cache_type {
302 : KMALLOC_NORMAL = 0,
303 : #ifndef CONFIG_ZONE_DMA
304 : KMALLOC_DMA = KMALLOC_NORMAL,
305 : #endif
306 : #ifndef CONFIG_MEMCG_KMEM
307 : KMALLOC_CGROUP = KMALLOC_NORMAL,
308 : #else
309 : KMALLOC_CGROUP,
310 : #endif
311 : KMALLOC_RECLAIM,
312 : #ifdef CONFIG_ZONE_DMA
313 : KMALLOC_DMA,
314 : #endif
315 : NR_KMALLOC_TYPES
316 : };
317 :
318 : #ifndef CONFIG_SLOB
319 : extern struct kmem_cache *
320 : kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
321 :
322 : /*
323 : * Define gfp bits that should not be set for KMALLOC_NORMAL.
324 : */
325 : #define KMALLOC_NOT_NORMAL_BITS \
326 : (__GFP_RECLAIMABLE | \
327 : (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
328 : (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
329 :
330 : static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
331 : {
332 : /*
333 : * The most common case is KMALLOC_NORMAL, so test for it
334 : * with a single branch for all the relevant flags.
335 : */
336 5828 : if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
337 : return KMALLOC_NORMAL;
338 :
339 : /*
340 : * At least one of the flags has to be set. Their priorities in
341 : * decreasing order are:
342 : * 1) __GFP_DMA
343 : * 2) __GFP_RECLAIMABLE
344 : * 3) __GFP_ACCOUNT
345 : */
346 : if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
347 : return KMALLOC_DMA;
348 : if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
349 : return KMALLOC_RECLAIM;
350 : else
351 : return KMALLOC_CGROUP;
352 : }
353 :
354 : /*
355 : * Figure out which kmalloc slab an allocation of a certain size
356 : * belongs to.
357 : * 0 = zero alloc
358 : * 1 = 65 .. 96 bytes
359 : * 2 = 129 .. 192 bytes
360 : * n = 2^(n-1)+1 .. 2^n
361 : *
362 : * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
363 : * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
364 : * Callers where !size_is_constant should only be test modules, where runtime
365 : * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
366 : */
367 : static __always_inline unsigned int __kmalloc_index(size_t size,
368 : bool size_is_constant)
369 : {
370 2430 : if (!size)
371 : return 0;
372 :
373 2430 : if (size <= KMALLOC_MIN_SIZE)
374 : return KMALLOC_SHIFT_LOW;
375 :
376 2398 : if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
377 : return 1;
378 2220 : if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
379 : return 2;
380 : if (size <= 8) return 3;
381 2186 : if (size <= 16) return 4;
382 2185 : if (size <= 32) return 5;
383 2136 : if (size <= 64) return 6;
384 1810 : if (size <= 128) return 7;
385 1790 : if (size <= 256) return 8;
386 1254 : if (size <= 512) return 9;
387 1229 : if (size <= 1024) return 10;
388 696 : if (size <= 2 * 1024) return 11;
389 686 : if (size <= 4 * 1024) return 12;
390 : if (size <= 8 * 1024) return 13;
391 : if (size <= 16 * 1024) return 14;
392 : if (size <= 32 * 1024) return 15;
393 : if (size <= 64 * 1024) return 16;
394 : if (size <= 128 * 1024) return 17;
395 : if (size <= 256 * 1024) return 18;
396 : if (size <= 512 * 1024) return 19;
397 : if (size <= 1024 * 1024) return 20;
398 : if (size <= 2 * 1024 * 1024) return 21;
399 : if (size <= 4 * 1024 * 1024) return 22;
400 : if (size <= 8 * 1024 * 1024) return 23;
401 : if (size <= 16 * 1024 * 1024) return 24;
402 : if (size <= 32 * 1024 * 1024) return 25;
403 :
404 : if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
405 : BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
406 : else
407 : BUG();
408 :
409 : /* Will never be reached. Needed because the compiler may complain */
410 : return -1;
411 : }
412 : #define kmalloc_index(s) __kmalloc_index(s, true)
413 : #endif /* !CONFIG_SLOB */
414 :
415 : void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
416 : void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
417 : void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
418 : gfp_t gfpflags) __assume_slab_alignment __malloc;
419 : void kmem_cache_free(struct kmem_cache *s, void *objp);
420 :
421 : /*
422 : * Bulk allocation and freeing operations. These are accelerated in an
423 : * allocator specific way to avoid taking locks repeatedly or building
424 : * metadata structures unnecessarily.
425 : *
426 : * Note that interrupts must be enabled when calling these functions.
427 : */
428 : void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
429 : int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
430 :
431 : /*
432 : * Caller must not use kfree_bulk() on memory not originally allocated
433 : * by kmalloc(), because the SLOB allocator cannot handle this.
434 : */
435 : static __always_inline void kfree_bulk(size_t size, void **p)
436 : {
437 : kmem_cache_free_bulk(NULL, size, p);
438 : }
439 :
440 : #ifdef CONFIG_NUMA
441 : void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
442 : __alloc_size(1);
443 : void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
444 : __malloc;
445 : #else
446 : static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
447 : {
448 19 : return __kmalloc(size, flags);
449 : }
450 :
451 : static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
452 : {
453 231 : return kmem_cache_alloc(s, flags);
454 : }
455 : #endif
456 :
457 : #ifdef CONFIG_TRACING
458 : extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
459 : __assume_slab_alignment __alloc_size(3);
460 :
461 : #ifdef CONFIG_NUMA
462 : extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
463 : int node, size_t size) __assume_slab_alignment
464 : __alloc_size(4);
465 : #else
466 : static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
467 : gfp_t gfpflags, int node, size_t size)
468 : {
469 : return kmem_cache_alloc_trace(s, gfpflags, size);
470 : }
471 : #endif /* CONFIG_NUMA */
472 :
473 : #else /* CONFIG_TRACING */
474 : static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
475 : gfp_t flags, size_t size)
476 : {
477 2574 : void *ret = kmem_cache_alloc(s, flags);
478 :
479 2574 : ret = kasan_kmalloc(s, ret, size, flags);
480 : return ret;
481 : }
482 :
483 : static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
484 : int node, size_t size)
485 : {
486 25 : void *ret = kmem_cache_alloc_node(s, gfpflags, node);
487 :
488 25 : ret = kasan_kmalloc(s, ret, size, gfpflags);
489 : return ret;
490 : }
491 : #endif /* CONFIG_TRACING */
492 :
493 : extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
494 : __alloc_size(1);
495 :
496 : #ifdef CONFIG_TRACING
497 : extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
498 : __assume_page_alignment __alloc_size(1);
499 : #else
500 : static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
501 : unsigned int order)
502 : {
503 8 : return kmalloc_order(size, flags, order);
504 : }
505 : #endif
506 :
507 : static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
508 : {
509 8 : unsigned int order = get_order(size);
510 8 : return kmalloc_order_trace(size, flags, order);
511 : }
512 :
513 : /**
514 : * kmalloc - allocate memory
515 : * @size: how many bytes of memory are required.
516 : * @flags: the type of memory to allocate.
517 : *
518 : * kmalloc is the normal method of allocating memory
519 : * for objects smaller than page size in the kernel.
520 : *
521 : * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
522 : * bytes. For @size of power of two bytes, the alignment is also guaranteed
523 : * to be at least to the size.
524 : *
525 : * The @flags argument may be one of the GFP flags defined at
526 : * include/linux/gfp.h and described at
527 : * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
528 : *
529 : * The recommended usage of the @flags is described at
530 : * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
531 : *
532 : * Below is a brief outline of the most useful GFP flags
533 : *
534 : * %GFP_KERNEL
535 : * Allocate normal kernel ram. May sleep.
536 : *
537 : * %GFP_NOWAIT
538 : * Allocation will not sleep.
539 : *
540 : * %GFP_ATOMIC
541 : * Allocation will not sleep. May use emergency pools.
542 : *
543 : * %GFP_HIGHUSER
544 : * Allocate memory from high memory on behalf of user.
545 : *
546 : * Also it is possible to set different flags by OR'ing
547 : * in one or more of the following additional @flags:
548 : *
549 : * %__GFP_HIGH
550 : * This allocation has high priority and may use emergency pools.
551 : *
552 : * %__GFP_NOFAIL
553 : * Indicate that this allocation is in no way allowed to fail
554 : * (think twice before using).
555 : *
556 : * %__GFP_NORETRY
557 : * If memory is not immediately available,
558 : * then give up at once.
559 : *
560 : * %__GFP_NOWARN
561 : * If allocation fails, don't issue any warnings.
562 : *
563 : * %__GFP_RETRY_MAYFAIL
564 : * Try really hard to succeed the allocation but fail
565 : * eventually.
566 : */
567 : static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
568 : {
569 3161 : if (__builtin_constant_p(size)) {
570 : #ifndef CONFIG_SLOB
571 : unsigned int index;
572 : #endif
573 2414 : if (size > KMALLOC_MAX_CACHE_SIZE)
574 0 : return kmalloc_large(size, flags);
575 : #ifndef CONFIG_SLOB
576 2574 : index = kmalloc_index(size);
577 :
578 2414 : if (!index)
579 : return ZERO_SIZE_PTR;
580 :
581 5148 : return kmem_cache_alloc_trace(
582 2574 : kmalloc_caches[kmalloc_type(flags)][index],
583 : flags, size);
584 : #endif
585 : }
586 747 : return __kmalloc(size, flags);
587 : }
588 :
589 : static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
590 : {
591 : #ifndef CONFIG_SLOB
592 31 : if (__builtin_constant_p(size) &&
593 : size <= KMALLOC_MAX_CACHE_SIZE) {
594 25 : unsigned int i = kmalloc_index(size);
595 :
596 16 : if (!i)
597 : return ZERO_SIZE_PTR;
598 :
599 50 : return kmem_cache_alloc_node_trace(
600 25 : kmalloc_caches[kmalloc_type(flags)][i],
601 : flags, node, size);
602 : }
603 : #endif
604 15 : return __kmalloc_node(size, flags, node);
605 : }
606 :
607 : /**
608 : * kmalloc_array - allocate memory for an array.
609 : * @n: number of elements.
610 : * @size: element size.
611 : * @flags: the type of memory to allocate (see kmalloc).
612 : */
613 267 : static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
614 : {
615 : size_t bytes;
616 :
617 534 : if (unlikely(check_mul_overflow(n, size, &bytes)))
618 : return NULL;
619 267 : if (__builtin_constant_p(n) && __builtin_constant_p(size))
620 22 : return kmalloc(bytes, flags);
621 245 : return __kmalloc(bytes, flags);
622 : }
623 :
624 : /**
625 : * krealloc_array - reallocate memory for an array.
626 : * @p: pointer to the memory chunk to reallocate
627 : * @new_n: new number of elements to alloc
628 : * @new_size: new size of a single member of the array
629 : * @flags: the type of memory to allocate (see kmalloc)
630 : */
631 : static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
632 : size_t new_n,
633 : size_t new_size,
634 : gfp_t flags)
635 : {
636 : size_t bytes;
637 :
638 0 : if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
639 : return NULL;
640 :
641 0 : return krealloc(p, bytes, flags);
642 : }
643 :
644 : /**
645 : * kcalloc - allocate memory for an array. The memory is set to zero.
646 : * @n: number of elements.
647 : * @size: element size.
648 : * @flags: the type of memory to allocate (see kmalloc).
649 : */
650 : static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
651 : {
652 34 : return kmalloc_array(n, size, flags | __GFP_ZERO);
653 : }
654 :
655 : /*
656 : * kmalloc_track_caller is a special version of kmalloc that records the
657 : * calling function of the routine calling it for slab leak tracking instead
658 : * of just the calling function (confusing, eh?).
659 : * It's useful when the call to kmalloc comes from a widely-used standard
660 : * allocator where we care about the real place the memory allocation
661 : * request comes from.
662 : */
663 : extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
664 : #define kmalloc_track_caller(size, flags) \
665 : __kmalloc_track_caller(size, flags, _RET_IP_)
666 :
667 4 : static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
668 : int node)
669 : {
670 : size_t bytes;
671 :
672 8 : if (unlikely(check_mul_overflow(n, size, &bytes)))
673 : return NULL;
674 4 : if (__builtin_constant_p(n) && __builtin_constant_p(size))
675 0 : return kmalloc_node(bytes, flags, node);
676 8 : return __kmalloc_node(bytes, flags, node);
677 : }
678 :
679 : static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
680 : {
681 0 : return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
682 : }
683 :
684 :
685 : #ifdef CONFIG_NUMA
686 : extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
687 : unsigned long caller) __alloc_size(1);
688 : #define kmalloc_node_track_caller(size, flags, node) \
689 : __kmalloc_node_track_caller(size, flags, node, \
690 : _RET_IP_)
691 :
692 : #else /* CONFIG_NUMA */
693 :
694 : #define kmalloc_node_track_caller(size, flags, node) \
695 : kmalloc_track_caller(size, flags)
696 :
697 : #endif /* CONFIG_NUMA */
698 :
699 : /*
700 : * Shortcuts
701 : */
702 : static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
703 : {
704 11414 : return kmem_cache_alloc(k, flags | __GFP_ZERO);
705 : }
706 :
707 : /**
708 : * kzalloc - allocate memory. The memory is set to zero.
709 : * @size: how many bytes of memory are required.
710 : * @flags: the type of memory to allocate (see kmalloc).
711 : */
712 3133 : static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
713 : {
714 6266 : return kmalloc(size, flags | __GFP_ZERO);
715 : }
716 :
717 : /**
718 : * kzalloc_node - allocate zeroed memory from a particular memory node.
719 : * @size: how many bytes of memory are required.
720 : * @flags: the type of memory to allocate (see kmalloc).
721 : * @node: memory node from which to allocate
722 : */
723 16 : static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
724 : {
725 50 : return kmalloc_node(size, flags | __GFP_ZERO, node);
726 : }
727 :
728 : extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
729 : static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
730 : {
731 0 : return kvmalloc_node(size, flags, NUMA_NO_NODE);
732 : }
733 : static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
734 : {
735 0 : return kvmalloc_node(size, flags | __GFP_ZERO, node);
736 : }
737 : static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
738 : {
739 0 : return kvmalloc(size, flags | __GFP_ZERO);
740 : }
741 :
742 : static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
743 : {
744 : size_t bytes;
745 :
746 0 : if (unlikely(check_mul_overflow(n, size, &bytes)))
747 : return NULL;
748 :
749 0 : return kvmalloc(bytes, flags);
750 : }
751 :
752 : static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
753 : {
754 0 : return kvmalloc_array(n, size, flags | __GFP_ZERO);
755 : }
756 :
757 : extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
758 : __alloc_size(3);
759 : extern void kvfree(const void *addr);
760 : extern void kvfree_sensitive(const void *addr, size_t len);
761 :
762 : unsigned int kmem_cache_size(struct kmem_cache *s);
763 : void __init kmem_cache_init_late(void);
764 :
765 : #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
766 : int slab_prepare_cpu(unsigned int cpu);
767 : int slab_dead_cpu(unsigned int cpu);
768 : #else
769 : #define slab_prepare_cpu NULL
770 : #define slab_dead_cpu NULL
771 : #endif
772 :
773 : #endif /* _LINUX_SLAB_H */
|