LCOV - code coverage report
Current view: top level - mm - slab_common.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 148 327 45.3 %
Date: 2022-12-09 01:23:36 Functions: 16 39 41.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Slab allocator functions that are independent of the allocator strategy
       4             :  *
       5             :  * (C) 2012 Christoph Lameter <cl@linux.com>
       6             :  */
       7             : #include <linux/slab.h>
       8             : 
       9             : #include <linux/mm.h>
      10             : #include <linux/poison.h>
      11             : #include <linux/interrupt.h>
      12             : #include <linux/memory.h>
      13             : #include <linux/cache.h>
      14             : #include <linux/compiler.h>
      15             : #include <linux/kfence.h>
      16             : #include <linux/module.h>
      17             : #include <linux/cpu.h>
      18             : #include <linux/uaccess.h>
      19             : #include <linux/seq_file.h>
      20             : #include <linux/proc_fs.h>
      21             : #include <linux/debugfs.h>
      22             : #include <linux/kasan.h>
      23             : #include <asm/cacheflush.h>
      24             : #include <asm/tlbflush.h>
      25             : #include <asm/page.h>
      26             : #include <linux/memcontrol.h>
      27             : 
      28             : #define CREATE_TRACE_POINTS
      29             : #include <trace/events/kmem.h>
      30             : 
      31             : #include "internal.h"
      32             : 
      33             : #include "slab.h"
      34             : 
      35             : enum slab_state slab_state;
      36             : LIST_HEAD(slab_caches);
      37             : DEFINE_MUTEX(slab_mutex);
      38             : struct kmem_cache *kmem_cache;
      39             : 
      40             : static LIST_HEAD(slab_caches_to_rcu_destroy);
      41             : static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
      42             : static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
      43             :                     slab_caches_to_rcu_destroy_workfn);
      44             : 
      45             : /*
      46             :  * Set of flags that will prevent slab merging
      47             :  */
      48             : #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
      49             :                 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
      50             :                 SLAB_FAILSLAB | kasan_never_merge())
      51             : 
      52             : #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
      53             :                          SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
      54             : 
      55             : /*
      56             :  * Merge control. If this is set then no merging of slab caches will occur.
      57             :  */
      58             : static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
      59             : 
      60           0 : static int __init setup_slab_nomerge(char *str)
      61             : {
      62           0 :         slab_nomerge = true;
      63           0 :         return 1;
      64             : }
      65             : 
      66           0 : static int __init setup_slab_merge(char *str)
      67             : {
      68           0 :         slab_nomerge = false;
      69           0 :         return 1;
      70             : }
      71             : 
      72             : #ifdef CONFIG_SLUB
      73             : __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
      74             : __setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
      75             : #endif
      76             : 
      77             : __setup("slab_nomerge", setup_slab_nomerge);
      78             : __setup("slab_merge", setup_slab_merge);
      79             : 
      80             : /*
      81             :  * Determine the size of a slab object
      82             :  */
      83           0 : unsigned int kmem_cache_size(struct kmem_cache *s)
      84             : {
      85           0 :         return s->object_size;
      86             : }
      87             : EXPORT_SYMBOL(kmem_cache_size);
      88             : 
      89             : #ifdef CONFIG_DEBUG_VM
      90             : static int kmem_cache_sanity_check(const char *name, unsigned int size)
      91             : {
      92             :         if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
      93             :                 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
      94             :                 return -EINVAL;
      95             :         }
      96             : 
      97             :         WARN_ON(strchr(name, ' '));     /* It confuses parsers */
      98             :         return 0;
      99             : }
     100             : #else
     101             : static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
     102             : {
     103             :         return 0;
     104             : }
     105             : #endif
     106             : 
     107           0 : void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
     108             : {
     109             :         size_t i;
     110             : 
     111           0 :         for (i = 0; i < nr; i++) {
     112           0 :                 if (s)
     113           0 :                         kmem_cache_free(s, p[i]);
     114             :                 else
     115           0 :                         kfree(p[i]);
     116             :         }
     117           0 : }
     118             : 
     119           0 : int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
     120             :                                                                 void **p)
     121             : {
     122             :         size_t i;
     123             : 
     124           0 :         for (i = 0; i < nr; i++) {
     125           0 :                 void *x = p[i] = kmem_cache_alloc(s, flags);
     126           0 :                 if (!x) {
     127           0 :                         __kmem_cache_free_bulk(s, i, p);
     128           0 :                         return 0;
     129             :                 }
     130             :         }
     131           0 :         return i;
     132             : }
     133             : 
     134             : /*
     135             :  * Figure out what the alignment of the objects will be given a set of
     136             :  * flags, a user specified alignment and the size of the objects.
     137             :  */
     138             : static unsigned int calculate_alignment(slab_flags_t flags,
     139             :                 unsigned int align, unsigned int size)
     140             : {
     141             :         /*
     142             :          * If the user wants hardware cache aligned objects then follow that
     143             :          * suggestion if the object is sufficiently large.
     144             :          *
     145             :          * The hardware cache alignment cannot override the specified
     146             :          * alignment though. If that is greater then use it.
     147             :          */
     148         114 :         if (flags & SLAB_HWCACHE_ALIGN) {
     149             :                 unsigned int ralign;
     150             : 
     151          37 :                 ralign = cache_line_size();
     152          38 :                 while (size <= ralign / 2)
     153             :                         ralign /= 2;
     154          37 :                 align = max(align, ralign);
     155             :         }
     156             : 
     157         114 :         if (align < ARCH_SLAB_MINALIGN)
     158          29 :                 align = ARCH_SLAB_MINALIGN;
     159             : 
     160         114 :         return ALIGN(align, sizeof(void *));
     161             : }
     162             : 
     163             : /*
     164             :  * Find a mergeable slab cache
     165             :  */
     166          67 : int slab_unmergeable(struct kmem_cache *s)
     167             : {
     168        2080 :         if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
     169             :                 return 1;
     170             : 
     171        2008 :         if (s->ctor)
     172             :                 return 1;
     173             : 
     174        1879 :         if (s->usersize)
     175             :                 return 1;
     176             : 
     177             :         /*
     178             :          * We may have set a slab to be unmergeable during bootstrap.
     179             :          */
     180         530 :         if (s->refcount < 0)
     181             :                 return 1;
     182             : 
     183          24 :         return 0;
     184             : }
     185             : 
     186          54 : struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
     187             :                 slab_flags_t flags, const char *name, void (*ctor)(void *))
     188             : {
     189             :         struct kmem_cache *s;
     190             : 
     191          54 :         if (slab_nomerge)
     192             :                 return NULL;
     193             : 
     194          54 :         if (ctor)
     195             :                 return NULL;
     196             : 
     197          47 :         size = ALIGN(size, sizeof(void *));
     198          47 :         align = calculate_alignment(flags, align, size);
     199          47 :         size = ALIGN(size, align);
     200          47 :         flags = kmem_cache_flags(size, flags, name);
     201             : 
     202          47 :         if (flags & SLAB_NEVER_MERGE)
     203             :                 return NULL;
     204             : 
     205        2037 :         list_for_each_entry_reverse(s, &slab_caches, list) {
     206        2013 :                 if (slab_unmergeable(s))
     207        1599 :                         continue;
     208             : 
     209         414 :                 if (size > s->size)
     210         186 :                         continue;
     211             : 
     212         228 :                 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
     213          13 :                         continue;
     214             :                 /*
     215             :                  * Check if alignment is compatible.
     216             :                  * Courtesy of Adrian Drzewiecki
     217             :                  */
     218         215 :                 if ((s->size & ~(align - 1)) != s->size)
     219          15 :                         continue;
     220             : 
     221         200 :                 if (s->size - size >= sizeof(void *))
     222         179 :                         continue;
     223             : 
     224             :                 if (IS_ENABLED(CONFIG_SLAB) && align &&
     225             :                         (align > s->align || s->align % align))
     226             :                         continue;
     227             : 
     228             :                 return s;
     229             :         }
     230             :         return NULL;
     231             : }
     232             : 
     233          39 : static struct kmem_cache *create_cache(const char *name,
     234             :                 unsigned int object_size, unsigned int align,
     235             :                 slab_flags_t flags, unsigned int useroffset,
     236             :                 unsigned int usersize, void (*ctor)(void *),
     237             :                 struct kmem_cache *root_cache)
     238             : {
     239             :         struct kmem_cache *s;
     240             :         int err;
     241             : 
     242          39 :         if (WARN_ON(useroffset + usersize > object_size))
     243           0 :                 useroffset = usersize = 0;
     244             : 
     245          39 :         err = -ENOMEM;
     246          78 :         s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
     247          39 :         if (!s)
     248             :                 goto out;
     249             : 
     250          39 :         s->name = name;
     251          39 :         s->size = s->object_size = object_size;
     252          39 :         s->align = align;
     253          39 :         s->ctor = ctor;
     254          39 :         s->useroffset = useroffset;
     255          39 :         s->usersize = usersize;
     256             : 
     257          39 :         err = __kmem_cache_create(s, flags);
     258          39 :         if (err)
     259             :                 goto out_free_cache;
     260             : 
     261          39 :         s->refcount = 1;
     262          39 :         list_add(&s->list, &slab_caches);
     263             : out:
     264          39 :         if (err)
     265           0 :                 return ERR_PTR(err);
     266             :         return s;
     267             : 
     268             : out_free_cache:
     269           0 :         kmem_cache_free(kmem_cache, s);
     270             :         goto out;
     271             : }
     272             : 
     273             : /**
     274             :  * kmem_cache_create_usercopy - Create a cache with a region suitable
     275             :  * for copying to userspace
     276             :  * @name: A string which is used in /proc/slabinfo to identify this cache.
     277             :  * @size: The size of objects to be created in this cache.
     278             :  * @align: The required alignment for the objects.
     279             :  * @flags: SLAB flags
     280             :  * @useroffset: Usercopy region offset
     281             :  * @usersize: Usercopy region size
     282             :  * @ctor: A constructor for the objects.
     283             :  *
     284             :  * Cannot be called within a interrupt, but can be interrupted.
     285             :  * The @ctor is run when new pages are allocated by the cache.
     286             :  *
     287             :  * The flags are
     288             :  *
     289             :  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
     290             :  * to catch references to uninitialised memory.
     291             :  *
     292             :  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
     293             :  * for buffer overruns.
     294             :  *
     295             :  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
     296             :  * cacheline.  This can be beneficial if you're counting cycles as closely
     297             :  * as davem.
     298             :  *
     299             :  * Return: a pointer to the cache on success, NULL on failure.
     300             :  */
     301             : struct kmem_cache *
     302          60 : kmem_cache_create_usercopy(const char *name,
     303             :                   unsigned int size, unsigned int align,
     304             :                   slab_flags_t flags,
     305             :                   unsigned int useroffset, unsigned int usersize,
     306             :                   void (*ctor)(void *))
     307             : {
     308          60 :         struct kmem_cache *s = NULL;
     309             :         const char *cache_name;
     310             :         int err;
     311             : 
     312             : #ifdef CONFIG_SLUB_DEBUG
     313             :         /*
     314             :          * If no slub_debug was enabled globally, the static key is not yet
     315             :          * enabled by setup_slub_debug(). Enable it if the cache is being
     316             :          * created with any of the debugging flags passed explicitly.
     317             :          */
     318          60 :         if (flags & SLAB_DEBUG_FLAGS)
     319           0 :                 static_branch_enable(&slub_debug_enabled);
     320             : #endif
     321             : 
     322          60 :         mutex_lock(&slab_mutex);
     323             : 
     324          60 :         err = kmem_cache_sanity_check(name, size);
     325             :         if (err) {
     326             :                 goto out_unlock;
     327             :         }
     328             : 
     329             :         /* Refuse requests with allocator specific flags */
     330          60 :         if (flags & ~SLAB_FLAGS_PERMITTED) {
     331             :                 err = -EINVAL;
     332             :                 goto out_unlock;
     333             :         }
     334             : 
     335             :         /*
     336             :          * Some allocators will constraint the set of valid flags to a subset
     337             :          * of all flags. We expect them to define CACHE_CREATE_MASK in this
     338             :          * case, and we'll just provide them with a sanitized version of the
     339             :          * passed flags.
     340             :          */
     341          60 :         flags &= CACHE_CREATE_MASK;
     342             : 
     343             :         /* Fail closed on bad usersize of useroffset values. */
     344         120 :         if (WARN_ON(!usersize && useroffset) ||
     345          60 :             WARN_ON(size < usersize || size - usersize < useroffset))
     346             :                 usersize = useroffset = 0;
     347             : 
     348          60 :         if (!usersize)
     349          54 :                 s = __kmem_cache_alias(name, size, align, flags, ctor);
     350          60 :         if (s)
     351             :                 goto out_unlock;
     352             : 
     353          39 :         cache_name = kstrdup_const(name, GFP_KERNEL);
     354          39 :         if (!cache_name) {
     355             :                 err = -ENOMEM;
     356             :                 goto out_unlock;
     357             :         }
     358             : 
     359          39 :         s = create_cache(cache_name, size,
     360             :                          calculate_alignment(flags, align, size),
     361             :                          flags, useroffset, usersize, ctor, NULL);
     362          39 :         if (IS_ERR(s)) {
     363           0 :                 err = PTR_ERR(s);
     364           0 :                 kfree_const(cache_name);
     365             :         }
     366             : 
     367             : out_unlock:
     368          60 :         mutex_unlock(&slab_mutex);
     369             : 
     370          60 :         if (err) {
     371           0 :                 if (flags & SLAB_PANIC)
     372           0 :                         panic("%s: Failed to create slab '%s'. Error %d\n",
     373             :                                 __func__, name, err);
     374             :                 else {
     375           0 :                         pr_warn("%s(%s) failed with error %d\n",
     376             :                                 __func__, name, err);
     377           0 :                         dump_stack();
     378             :                 }
     379           0 :                 return NULL;
     380             :         }
     381             :         return s;
     382             : }
     383             : EXPORT_SYMBOL(kmem_cache_create_usercopy);
     384             : 
     385             : /**
     386             :  * kmem_cache_create - Create a cache.
     387             :  * @name: A string which is used in /proc/slabinfo to identify this cache.
     388             :  * @size: The size of objects to be created in this cache.
     389             :  * @align: The required alignment for the objects.
     390             :  * @flags: SLAB flags
     391             :  * @ctor: A constructor for the objects.
     392             :  *
     393             :  * Cannot be called within a interrupt, but can be interrupted.
     394             :  * The @ctor is run when new pages are allocated by the cache.
     395             :  *
     396             :  * The flags are
     397             :  *
     398             :  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
     399             :  * to catch references to uninitialised memory.
     400             :  *
     401             :  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
     402             :  * for buffer overruns.
     403             :  *
     404             :  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
     405             :  * cacheline.  This can be beneficial if you're counting cycles as closely
     406             :  * as davem.
     407             :  *
     408             :  * Return: a pointer to the cache on success, NULL on failure.
     409             :  */
     410             : struct kmem_cache *
     411          54 : kmem_cache_create(const char *name, unsigned int size, unsigned int align,
     412             :                 slab_flags_t flags, void (*ctor)(void *))
     413             : {
     414          54 :         return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
     415             :                                           ctor);
     416             : }
     417             : EXPORT_SYMBOL(kmem_cache_create);
     418             : 
     419           0 : static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
     420             : {
     421           0 :         LIST_HEAD(to_destroy);
     422             :         struct kmem_cache *s, *s2;
     423             : 
     424             :         /*
     425             :          * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
     426             :          * @slab_caches_to_rcu_destroy list.  The slab pages are freed
     427             :          * through RCU and the associated kmem_cache are dereferenced
     428             :          * while freeing the pages, so the kmem_caches should be freed only
     429             :          * after the pending RCU operations are finished.  As rcu_barrier()
     430             :          * is a pretty slow operation, we batch all pending destructions
     431             :          * asynchronously.
     432             :          */
     433           0 :         mutex_lock(&slab_mutex);
     434           0 :         list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
     435           0 :         mutex_unlock(&slab_mutex);
     436             : 
     437           0 :         if (list_empty(&to_destroy))
     438           0 :                 return;
     439             : 
     440           0 :         rcu_barrier();
     441             : 
     442           0 :         list_for_each_entry_safe(s, s2, &to_destroy, list) {
     443           0 :                 debugfs_slab_release(s);
     444           0 :                 kfence_shutdown_cache(s);
     445             : #ifdef SLAB_SUPPORTS_SYSFS
     446           0 :                 sysfs_slab_release(s);
     447             : #else
     448             :                 slab_kmem_cache_release(s);
     449             : #endif
     450             :         }
     451             : }
     452             : 
     453           0 : static int shutdown_cache(struct kmem_cache *s)
     454             : {
     455             :         /* free asan quarantined objects */
     456           0 :         kasan_cache_shutdown(s);
     457             : 
     458           0 :         if (__kmem_cache_shutdown(s) != 0)
     459             :                 return -EBUSY;
     460             : 
     461           0 :         list_del(&s->list);
     462             : 
     463           0 :         if (s->flags & SLAB_TYPESAFE_BY_RCU) {
     464             : #ifdef SLAB_SUPPORTS_SYSFS
     465           0 :                 sysfs_slab_unlink(s);
     466             : #endif
     467           0 :                 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
     468             :                 schedule_work(&slab_caches_to_rcu_destroy_work);
     469             :         } else {
     470           0 :                 kfence_shutdown_cache(s);
     471           0 :                 debugfs_slab_release(s);
     472             : #ifdef SLAB_SUPPORTS_SYSFS
     473           0 :                 sysfs_slab_unlink(s);
     474           0 :                 sysfs_slab_release(s);
     475             : #else
     476             :                 slab_kmem_cache_release(s);
     477             : #endif
     478             :         }
     479             : 
     480             :         return 0;
     481             : }
     482             : 
     483           0 : void slab_kmem_cache_release(struct kmem_cache *s)
     484             : {
     485           0 :         __kmem_cache_release(s);
     486           0 :         kfree_const(s->name);
     487           0 :         kmem_cache_free(kmem_cache, s);
     488           0 : }
     489             : 
     490           0 : void kmem_cache_destroy(struct kmem_cache *s)
     491             : {
     492           0 :         if (unlikely(!s) || !kasan_check_byte(s))
     493             :                 return;
     494             : 
     495             :         cpus_read_lock();
     496           0 :         mutex_lock(&slab_mutex);
     497             : 
     498           0 :         s->refcount--;
     499           0 :         if (s->refcount)
     500             :                 goto out_unlock;
     501             : 
     502           0 :         WARN(shutdown_cache(s),
     503             :              "%s %s: Slab cache still has objects when called from %pS",
     504             :              __func__, s->name, (void *)_RET_IP_);
     505             : out_unlock:
     506           0 :         mutex_unlock(&slab_mutex);
     507             :         cpus_read_unlock();
     508             : }
     509             : EXPORT_SYMBOL(kmem_cache_destroy);
     510             : 
     511             : /**
     512             :  * kmem_cache_shrink - Shrink a cache.
     513             :  * @cachep: The cache to shrink.
     514             :  *
     515             :  * Releases as many slabs as possible for a cache.
     516             :  * To help debugging, a zero exit status indicates all slabs were released.
     517             :  *
     518             :  * Return: %0 if all slabs were released, non-zero otherwise
     519             :  */
     520           0 : int kmem_cache_shrink(struct kmem_cache *cachep)
     521             : {
     522             :         int ret;
     523             : 
     524             : 
     525           0 :         kasan_cache_shrink(cachep);
     526           0 :         ret = __kmem_cache_shrink(cachep);
     527             : 
     528           0 :         return ret;
     529             : }
     530             : EXPORT_SYMBOL(kmem_cache_shrink);
     531             : 
     532          22 : bool slab_is_available(void)
     533             : {
     534          22 :         return slab_state >= UP;
     535             : }
     536             : 
     537             : #ifdef CONFIG_PRINTK
     538             : /**
     539             :  * kmem_valid_obj - does the pointer reference a valid slab object?
     540             :  * @object: pointer to query.
     541             :  *
     542             :  * Return: %true if the pointer is to a not-yet-freed object from
     543             :  * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
     544             :  * is to an already-freed object, and %false otherwise.
     545             :  */
     546           0 : bool kmem_valid_obj(void *object)
     547             : {
     548             :         struct folio *folio;
     549             : 
     550             :         /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
     551           0 :         if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
     552             :                 return false;
     553           0 :         folio = virt_to_folio(object);
     554           0 :         return folio_test_slab(folio);
     555             : }
     556             : EXPORT_SYMBOL_GPL(kmem_valid_obj);
     557             : 
     558             : static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
     559             : {
     560           0 :         if (__kfence_obj_info(kpp, object, slab))
     561             :                 return;
     562           0 :         __kmem_obj_info(kpp, object, slab);
     563             : }
     564             : 
     565             : /**
     566             :  * kmem_dump_obj - Print available slab provenance information
     567             :  * @object: slab object for which to find provenance information.
     568             :  *
     569             :  * This function uses pr_cont(), so that the caller is expected to have
     570             :  * printed out whatever preamble is appropriate.  The provenance information
     571             :  * depends on the type of object and on how much debugging is enabled.
     572             :  * For a slab-cache object, the fact that it is a slab object is printed,
     573             :  * and, if available, the slab name, return address, and stack trace from
     574             :  * the allocation and last free path of that object.
     575             :  *
     576             :  * This function will splat if passed a pointer to a non-slab object.
     577             :  * If you are not sure what type of object you have, you should instead
     578             :  * use mem_dump_obj().
     579             :  */
     580           0 : void kmem_dump_obj(void *object)
     581             : {
     582           0 :         char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
     583             :         int i;
     584             :         struct slab *slab;
     585             :         unsigned long ptroffset;
     586           0 :         struct kmem_obj_info kp = { };
     587             : 
     588           0 :         if (WARN_ON_ONCE(!virt_addr_valid(object)))
     589           0 :                 return;
     590           0 :         slab = virt_to_slab(object);
     591           0 :         if (WARN_ON_ONCE(!slab)) {
     592           0 :                 pr_cont(" non-slab memory.\n");
     593           0 :                 return;
     594             :         }
     595           0 :         kmem_obj_info(&kp, object, slab);
     596           0 :         if (kp.kp_slab_cache)
     597           0 :                 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
     598             :         else
     599           0 :                 pr_cont(" slab%s", cp);
     600           0 :         if (is_kfence_address(object))
     601             :                 pr_cont(" (kfence)");
     602           0 :         if (kp.kp_objp)
     603           0 :                 pr_cont(" start %px", kp.kp_objp);
     604           0 :         if (kp.kp_data_offset)
     605           0 :                 pr_cont(" data offset %lu", kp.kp_data_offset);
     606           0 :         if (kp.kp_objp) {
     607           0 :                 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
     608           0 :                 pr_cont(" pointer offset %lu", ptroffset);
     609             :         }
     610           0 :         if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
     611           0 :                 pr_cont(" size %u", kp.kp_slab_cache->usersize);
     612           0 :         if (kp.kp_ret)
     613           0 :                 pr_cont(" allocated at %pS\n", kp.kp_ret);
     614             :         else
     615           0 :                 pr_cont("\n");
     616           0 :         for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
     617           0 :                 if (!kp.kp_stack[i])
     618             :                         break;
     619           0 :                 pr_info("    %pS\n", kp.kp_stack[i]);
     620             :         }
     621             : 
     622           0 :         if (kp.kp_free_stack[0])
     623           0 :                 pr_cont(" Free path:\n");
     624             : 
     625           0 :         for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
     626           0 :                 if (!kp.kp_free_stack[i])
     627             :                         break;
     628           0 :                 pr_info("    %pS\n", kp.kp_free_stack[i]);
     629             :         }
     630             : 
     631             : }
     632             : EXPORT_SYMBOL_GPL(kmem_dump_obj);
     633             : #endif
     634             : 
     635             : #ifndef CONFIG_SLOB
     636             : /* Create a cache during boot when no slab services are available yet */
     637          28 : void __init create_boot_cache(struct kmem_cache *s, const char *name,
     638             :                 unsigned int size, slab_flags_t flags,
     639             :                 unsigned int useroffset, unsigned int usersize)
     640             : {
     641             :         int err;
     642          28 :         unsigned int align = ARCH_KMALLOC_MINALIGN;
     643             : 
     644          28 :         s->name = name;
     645          28 :         s->size = s->object_size = size;
     646             : 
     647             :         /*
     648             :          * For power of two sizes, guarantee natural alignment for kmalloc
     649             :          * caches, regardless of SL*B debugging options.
     650             :          */
     651          56 :         if (is_power_of_2(size))
     652          22 :                 align = max(align, size);
     653          28 :         s->align = calculate_alignment(flags, align, size);
     654             : 
     655          28 :         s->useroffset = useroffset;
     656          28 :         s->usersize = usersize;
     657             : 
     658          28 :         err = __kmem_cache_create(s, flags);
     659             : 
     660          28 :         if (err)
     661           0 :                 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
     662             :                                         name, size, err);
     663             : 
     664          28 :         s->refcount = -1;    /* Exempt from merging for now */
     665          28 : }
     666             : 
     667          26 : struct kmem_cache *__init create_kmalloc_cache(const char *name,
     668             :                 unsigned int size, slab_flags_t flags,
     669             :                 unsigned int useroffset, unsigned int usersize)
     670             : {
     671          52 :         struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
     672             : 
     673          26 :         if (!s)
     674           0 :                 panic("Out of memory when creating slab %s\n", name);
     675             : 
     676          26 :         create_boot_cache(s, name, size, flags, useroffset, usersize);
     677             :         kasan_cache_create_kmalloc(s);
     678          52 :         list_add(&s->list, &slab_caches);
     679          26 :         s->refcount = 1;
     680          26 :         return s;
     681             : }
     682             : 
     683             : struct kmem_cache *
     684             : kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
     685             : { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
     686             : EXPORT_SYMBOL(kmalloc_caches);
     687             : 
     688             : /*
     689             :  * Conversion table for small slabs sizes / 8 to the index in the
     690             :  * kmalloc array. This is necessary for slabs < 192 since we have non power
     691             :  * of two cache sizes there. The size of larger slabs can be determined using
     692             :  * fls.
     693             :  */
     694             : static u8 size_index[24] __ro_after_init = {
     695             :         3,      /* 8 */
     696             :         4,      /* 16 */
     697             :         5,      /* 24 */
     698             :         5,      /* 32 */
     699             :         6,      /* 40 */
     700             :         6,      /* 48 */
     701             :         6,      /* 56 */
     702             :         6,      /* 64 */
     703             :         1,      /* 72 */
     704             :         1,      /* 80 */
     705             :         1,      /* 88 */
     706             :         1,      /* 96 */
     707             :         7,      /* 104 */
     708             :         7,      /* 112 */
     709             :         7,      /* 120 */
     710             :         7,      /* 128 */
     711             :         2,      /* 136 */
     712             :         2,      /* 144 */
     713             :         2,      /* 152 */
     714             :         2,      /* 160 */
     715             :         2,      /* 168 */
     716             :         2,      /* 176 */
     717             :         2,      /* 184 */
     718             :         2       /* 192 */
     719             : };
     720             : 
     721             : static inline unsigned int size_index_elem(unsigned int bytes)
     722             : {
     723        3327 :         return (bytes - 1) / 8;
     724             : }
     725             : 
     726             : /*
     727             :  * Find the kmem_cache structure that serves a given size of
     728             :  * allocation
     729             :  */
     730        3398 : struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
     731             : {
     732             :         unsigned int index;
     733             : 
     734        3398 :         if (size <= 192) {
     735        3327 :                 if (!size)
     736             :                         return ZERO_SIZE_PTR;
     737             : 
     738        6654 :                 index = size_index[size_index_elem(size)];
     739             :         } else {
     740          71 :                 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
     741             :                         return NULL;
     742         142 :                 index = fls(size - 1);
     743             :         }
     744             : 
     745        3398 :         return kmalloc_caches[kmalloc_type(flags)][index];
     746             : }
     747             : 
     748             : #ifdef CONFIG_ZONE_DMA
     749             : #define KMALLOC_DMA_NAME(sz)    .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
     750             : #else
     751             : #define KMALLOC_DMA_NAME(sz)
     752             : #endif
     753             : 
     754             : #ifdef CONFIG_MEMCG_KMEM
     755             : #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
     756             : #else
     757             : #define KMALLOC_CGROUP_NAME(sz)
     758             : #endif
     759             : 
     760             : #define INIT_KMALLOC_INFO(__size, __short_size)                 \
     761             : {                                                               \
     762             :         .name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,    \
     763             :         .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,        \
     764             :         KMALLOC_CGROUP_NAME(__short_size)                       \
     765             :         KMALLOC_DMA_NAME(__short_size)                          \
     766             :         .size = __size,                                         \
     767             : }
     768             : 
     769             : /*
     770             :  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
     771             :  * kmalloc_index() supports up to 2^25=32MB, so the final entry of the table is
     772             :  * kmalloc-32M.
     773             :  */
     774             : const struct kmalloc_info_struct kmalloc_info[] __initconst = {
     775             :         INIT_KMALLOC_INFO(0, 0),
     776             :         INIT_KMALLOC_INFO(96, 96),
     777             :         INIT_KMALLOC_INFO(192, 192),
     778             :         INIT_KMALLOC_INFO(8, 8),
     779             :         INIT_KMALLOC_INFO(16, 16),
     780             :         INIT_KMALLOC_INFO(32, 32),
     781             :         INIT_KMALLOC_INFO(64, 64),
     782             :         INIT_KMALLOC_INFO(128, 128),
     783             :         INIT_KMALLOC_INFO(256, 256),
     784             :         INIT_KMALLOC_INFO(512, 512),
     785             :         INIT_KMALLOC_INFO(1024, 1k),
     786             :         INIT_KMALLOC_INFO(2048, 2k),
     787             :         INIT_KMALLOC_INFO(4096, 4k),
     788             :         INIT_KMALLOC_INFO(8192, 8k),
     789             :         INIT_KMALLOC_INFO(16384, 16k),
     790             :         INIT_KMALLOC_INFO(32768, 32k),
     791             :         INIT_KMALLOC_INFO(65536, 64k),
     792             :         INIT_KMALLOC_INFO(131072, 128k),
     793             :         INIT_KMALLOC_INFO(262144, 256k),
     794             :         INIT_KMALLOC_INFO(524288, 512k),
     795             :         INIT_KMALLOC_INFO(1048576, 1M),
     796             :         INIT_KMALLOC_INFO(2097152, 2M),
     797             :         INIT_KMALLOC_INFO(4194304, 4M),
     798             :         INIT_KMALLOC_INFO(8388608, 8M),
     799             :         INIT_KMALLOC_INFO(16777216, 16M),
     800             :         INIT_KMALLOC_INFO(33554432, 32M)
     801             : };
     802             : 
     803             : /*
     804             :  * Patch up the size_index table if we have strange large alignment
     805             :  * requirements for the kmalloc array. This is only the case for
     806             :  * MIPS it seems. The standard arches will not generate any code here.
     807             :  *
     808             :  * Largest permitted alignment is 256 bytes due to the way we
     809             :  * handle the index determination for the smaller caches.
     810             :  *
     811             :  * Make sure that nothing crazy happens if someone starts tinkering
     812             :  * around with ARCH_KMALLOC_MINALIGN
     813             :  */
     814           1 : void __init setup_kmalloc_cache_index_table(void)
     815             : {
     816             :         unsigned int i;
     817             : 
     818           1 :         BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
     819             :                 !is_power_of_2(KMALLOC_MIN_SIZE));
     820             : 
     821           1 :         for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
     822             :                 unsigned int elem = size_index_elem(i);
     823             : 
     824             :                 if (elem >= ARRAY_SIZE(size_index))
     825             :                         break;
     826             :                 size_index[elem] = KMALLOC_SHIFT_LOW;
     827             :         }
     828             : 
     829             :         if (KMALLOC_MIN_SIZE >= 64) {
     830             :                 /*
     831             :                  * The 96 byte sized cache is not used if the alignment
     832             :                  * is 64 byte.
     833             :                  */
     834             :                 for (i = 64 + 8; i <= 96; i += 8)
     835             :                         size_index[size_index_elem(i)] = 7;
     836             : 
     837             :         }
     838             : 
     839             :         if (KMALLOC_MIN_SIZE >= 128) {
     840             :                 /*
     841             :                  * The 192 byte sized cache is not used if the alignment
     842             :                  * is 128 byte. Redirect kmalloc to use the 256 byte cache
     843             :                  * instead.
     844             :                  */
     845             :                 for (i = 128 + 8; i <= 192; i += 8)
     846             :                         size_index[size_index_elem(i)] = 8;
     847             :         }
     848           1 : }
     849             : 
     850             : static void __init
     851          26 : new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
     852             : {
     853          26 :         if (type == KMALLOC_RECLAIM) {
     854          13 :                 flags |= SLAB_RECLAIM_ACCOUNT;
     855             :         } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
     856             :                 if (mem_cgroup_kmem_disabled()) {
     857             :                         kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
     858             :                         return;
     859             :                 }
     860             :                 flags |= SLAB_ACCOUNT;
     861             :         }
     862             : 
     863          26 :         kmalloc_caches[type][idx] = create_kmalloc_cache(
     864             :                                         kmalloc_info[idx].name[type],
     865             :                                         kmalloc_info[idx].size, flags, 0,
     866             :                                         kmalloc_info[idx].size);
     867             : 
     868             :         /*
     869             :          * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
     870             :          * KMALLOC_NORMAL caches.
     871             :          */
     872             :         if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
     873             :                 kmalloc_caches[type][idx]->refcount = -1;
     874             : }
     875             : 
     876             : /*
     877             :  * Create the kmalloc array. Some of the regular kmalloc arrays
     878             :  * may already have been created because they were needed to
     879             :  * enable allocations for slab creation.
     880             :  */
     881           1 : void __init create_kmalloc_caches(slab_flags_t flags)
     882             : {
     883             :         int i;
     884             :         enum kmalloc_cache_type type;
     885             : 
     886             :         /*
     887             :          * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
     888             :          */
     889           3 :         for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
     890          22 :                 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
     891          22 :                         if (!kmalloc_caches[type][i])
     892          22 :                                 new_kmalloc_cache(i, type, flags);
     893             : 
     894             :                         /*
     895             :                          * Caches that are not of the two-to-the-power-of size.
     896             :                          * These have to be created immediately after the
     897             :                          * earlier power of two caches
     898             :                          */
     899          24 :                         if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
     900           2 :                                         !kmalloc_caches[type][1])
     901           2 :                                 new_kmalloc_cache(1, type, flags);
     902          24 :                         if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
     903           2 :                                         !kmalloc_caches[type][2])
     904           2 :                                 new_kmalloc_cache(2, type, flags);
     905             :                 }
     906             :         }
     907             : 
     908             :         /* Kmalloc array is now usable */
     909           1 :         slab_state = UP;
     910             : 
     911             : #ifdef CONFIG_ZONE_DMA
     912             :         for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
     913             :                 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
     914             : 
     915             :                 if (s) {
     916             :                         kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
     917             :                                 kmalloc_info[i].name[KMALLOC_DMA],
     918             :                                 kmalloc_info[i].size,
     919             :                                 SLAB_CACHE_DMA | flags, 0,
     920             :                                 kmalloc_info[i].size);
     921             :                 }
     922             :         }
     923             : #endif
     924           1 : }
     925             : #endif /* !CONFIG_SLOB */
     926             : 
     927           0 : gfp_t kmalloc_fix_flags(gfp_t flags)
     928             : {
     929           0 :         gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
     930             : 
     931           0 :         flags &= ~GFP_SLAB_BUG_MASK;
     932           0 :         pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
     933             :                         invalid_mask, &invalid_mask, flags, &flags);
     934           0 :         dump_stack();
     935             : 
     936           0 :         return flags;
     937             : }
     938             : 
     939             : /*
     940             :  * To avoid unnecessary overhead, we pass through large allocation requests
     941             :  * directly to the page allocator. We use __GFP_COMP, because we will need to
     942             :  * know the allocation order to free the pages properly in kfree.
     943             :  */
     944           8 : void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
     945             : {
     946           8 :         void *ret = NULL;
     947             :         struct page *page;
     948             : 
     949           8 :         if (unlikely(flags & GFP_SLAB_BUG_MASK))
     950           0 :                 flags = kmalloc_fix_flags(flags);
     951             : 
     952           8 :         flags |= __GFP_COMP;
     953           8 :         page = alloc_pages(flags, order);
     954           8 :         if (likely(page)) {
     955           8 :                 ret = page_address(page);
     956           8 :                 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
     957           8 :                                       PAGE_SIZE << order);
     958             :         }
     959           8 :         ret = kasan_kmalloc_large(ret, size, flags);
     960             :         /* As ret might get tagged, call kmemleak hook after KASAN. */
     961           8 :         kmemleak_alloc(ret, size, 1, flags);
     962           8 :         return ret;
     963             : }
     964             : EXPORT_SYMBOL(kmalloc_order);
     965             : 
     966             : #ifdef CONFIG_TRACING
     967             : void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
     968             : {
     969             :         void *ret = kmalloc_order(size, flags, order);
     970             :         trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
     971             :         return ret;
     972             : }
     973             : EXPORT_SYMBOL(kmalloc_order_trace);
     974             : #endif
     975             : 
     976             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
     977             : /* Randomize a generic freelist */
     978             : static void freelist_randomize(struct rnd_state *state, unsigned int *list,
     979             :                                unsigned int count)
     980             : {
     981             :         unsigned int rand;
     982             :         unsigned int i;
     983             : 
     984             :         for (i = 0; i < count; i++)
     985             :                 list[i] = i;
     986             : 
     987             :         /* Fisher-Yates shuffle */
     988             :         for (i = count - 1; i > 0; i--) {
     989             :                 rand = prandom_u32_state(state);
     990             :                 rand %= (i + 1);
     991             :                 swap(list[i], list[rand]);
     992             :         }
     993             : }
     994             : 
     995             : /* Create a random sequence per cache */
     996             : int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
     997             :                                     gfp_t gfp)
     998             : {
     999             :         struct rnd_state state;
    1000             : 
    1001             :         if (count < 2 || cachep->random_seq)
    1002             :                 return 0;
    1003             : 
    1004             :         cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
    1005             :         if (!cachep->random_seq)
    1006             :                 return -ENOMEM;
    1007             : 
    1008             :         /* Get best entropy at this stage of boot */
    1009             :         prandom_seed_state(&state, get_random_long());
    1010             : 
    1011             :         freelist_randomize(&state, cachep->random_seq, count);
    1012             :         return 0;
    1013             : }
    1014             : 
    1015             : /* Destroy the per-cache random freelist sequence */
    1016             : void cache_random_seq_destroy(struct kmem_cache *cachep)
    1017             : {
    1018             :         kfree(cachep->random_seq);
    1019             :         cachep->random_seq = NULL;
    1020             : }
    1021             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
    1022             : 
    1023             : #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
    1024             : #ifdef CONFIG_SLAB
    1025             : #define SLABINFO_RIGHTS (0600)
    1026             : #else
    1027             : #define SLABINFO_RIGHTS (0400)
    1028             : #endif
    1029             : 
    1030           0 : static void print_slabinfo_header(struct seq_file *m)
    1031             : {
    1032             :         /*
    1033             :          * Output format version, so at least we can change it
    1034             :          * without _too_ many complaints.
    1035             :          */
    1036             : #ifdef CONFIG_DEBUG_SLAB
    1037             :         seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
    1038             : #else
    1039           0 :         seq_puts(m, "slabinfo - version: 2.1\n");
    1040             : #endif
    1041           0 :         seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
    1042           0 :         seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
    1043           0 :         seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
    1044             : #ifdef CONFIG_DEBUG_SLAB
    1045             :         seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
    1046             :         seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
    1047             : #endif
    1048           0 :         seq_putc(m, '\n');
    1049           0 : }
    1050             : 
    1051           0 : static void *slab_start(struct seq_file *m, loff_t *pos)
    1052             : {
    1053           0 :         mutex_lock(&slab_mutex);
    1054           0 :         return seq_list_start(&slab_caches, *pos);
    1055             : }
    1056             : 
    1057           0 : static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
    1058             : {
    1059           0 :         return seq_list_next(p, &slab_caches, pos);
    1060             : }
    1061             : 
    1062           0 : static void slab_stop(struct seq_file *m, void *p)
    1063             : {
    1064           0 :         mutex_unlock(&slab_mutex);
    1065           0 : }
    1066             : 
    1067           0 : static void cache_show(struct kmem_cache *s, struct seq_file *m)
    1068             : {
    1069             :         struct slabinfo sinfo;
    1070             : 
    1071           0 :         memset(&sinfo, 0, sizeof(sinfo));
    1072           0 :         get_slabinfo(s, &sinfo);
    1073             : 
    1074           0 :         seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
    1075             :                    s->name, sinfo.active_objs, sinfo.num_objs, s->size,
    1076           0 :                    sinfo.objects_per_slab, (1 << sinfo.cache_order));
    1077             : 
    1078           0 :         seq_printf(m, " : tunables %4u %4u %4u",
    1079             :                    sinfo.limit, sinfo.batchcount, sinfo.shared);
    1080           0 :         seq_printf(m, " : slabdata %6lu %6lu %6lu",
    1081             :                    sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
    1082           0 :         slabinfo_show_stats(m, s);
    1083           0 :         seq_putc(m, '\n');
    1084           0 : }
    1085             : 
    1086           0 : static int slab_show(struct seq_file *m, void *p)
    1087             : {
    1088           0 :         struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
    1089             : 
    1090           0 :         if (p == slab_caches.next)
    1091           0 :                 print_slabinfo_header(m);
    1092           0 :         cache_show(s, m);
    1093           0 :         return 0;
    1094             : }
    1095             : 
    1096           0 : void dump_unreclaimable_slab(void)
    1097             : {
    1098             :         struct kmem_cache *s;
    1099             :         struct slabinfo sinfo;
    1100             : 
    1101             :         /*
    1102             :          * Here acquiring slab_mutex is risky since we don't prefer to get
    1103             :          * sleep in oom path. But, without mutex hold, it may introduce a
    1104             :          * risk of crash.
    1105             :          * Use mutex_trylock to protect the list traverse, dump nothing
    1106             :          * without acquiring the mutex.
    1107             :          */
    1108           0 :         if (!mutex_trylock(&slab_mutex)) {
    1109           0 :                 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
    1110           0 :                 return;
    1111             :         }
    1112             : 
    1113           0 :         pr_info("Unreclaimable slab info:\n");
    1114           0 :         pr_info("Name                      Used          Total\n");
    1115             : 
    1116           0 :         list_for_each_entry(s, &slab_caches, list) {
    1117           0 :                 if (s->flags & SLAB_RECLAIM_ACCOUNT)
    1118           0 :                         continue;
    1119             : 
    1120           0 :                 get_slabinfo(s, &sinfo);
    1121             : 
    1122           0 :                 if (sinfo.num_objs > 0)
    1123           0 :                         pr_info("%-17s %10luKB %10luKB\n", s->name,
    1124             :                                 (sinfo.active_objs * s->size) / 1024,
    1125             :                                 (sinfo.num_objs * s->size) / 1024);
    1126             :         }
    1127           0 :         mutex_unlock(&slab_mutex);
    1128             : }
    1129             : 
    1130             : /*
    1131             :  * slabinfo_op - iterator that generates /proc/slabinfo
    1132             :  *
    1133             :  * Output layout:
    1134             :  * cache-name
    1135             :  * num-active-objs
    1136             :  * total-objs
    1137             :  * object size
    1138             :  * num-active-slabs
    1139             :  * total-slabs
    1140             :  * num-pages-per-slab
    1141             :  * + further values on SMP and with statistics enabled
    1142             :  */
    1143             : static const struct seq_operations slabinfo_op = {
    1144             :         .start = slab_start,
    1145             :         .next = slab_next,
    1146             :         .stop = slab_stop,
    1147             :         .show = slab_show,
    1148             : };
    1149             : 
    1150           0 : static int slabinfo_open(struct inode *inode, struct file *file)
    1151             : {
    1152           0 :         return seq_open(file, &slabinfo_op);
    1153             : }
    1154             : 
    1155             : static const struct proc_ops slabinfo_proc_ops = {
    1156             :         .proc_flags     = PROC_ENTRY_PERMANENT,
    1157             :         .proc_open      = slabinfo_open,
    1158             :         .proc_read      = seq_read,
    1159             :         .proc_write     = slabinfo_write,
    1160             :         .proc_lseek     = seq_lseek,
    1161             :         .proc_release   = seq_release,
    1162             : };
    1163             : 
    1164           1 : static int __init slab_proc_init(void)
    1165             : {
    1166           1 :         proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
    1167           1 :         return 0;
    1168             : }
    1169             : module_init(slab_proc_init);
    1170             : 
    1171             : #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
    1172             : 
    1173             : static __always_inline void *__do_krealloc(const void *p, size_t new_size,
    1174             :                                            gfp_t flags)
    1175             : {
    1176             :         void *ret;
    1177             :         size_t ks;
    1178             : 
    1179             :         /* Don't use instrumented ksize to allow precise KASAN poisoning. */
    1180         228 :         if (likely(!ZERO_OR_NULL_PTR(p))) {
    1181         228 :                 if (!kasan_check_byte(p))
    1182             :                         return NULL;
    1183         228 :                 ks = kfence_ksize(p) ?: __ksize(p);
    1184             :         } else
    1185             :                 ks = 0;
    1186             : 
    1187             :         /* If the object still fits, repoison it precisely. */
    1188         228 :         if (ks >= new_size) {
    1189             :                 p = kasan_krealloc((void *)p, new_size, flags);
    1190             :                 return (void *)p;
    1191             :         }
    1192             : 
    1193          93 :         ret = kmalloc_track_caller(new_size, flags);
    1194          93 :         if (ret && p) {
    1195             :                 /* Disable KASAN checks as the object's redzone is accessed. */
    1196             :                 kasan_disable_current();
    1197          93 :                 memcpy(ret, kasan_reset_tag(p), ks);
    1198             :                 kasan_enable_current();
    1199             :         }
    1200             : 
    1201             :         return ret;
    1202             : }
    1203             : 
    1204             : /**
    1205             :  * krealloc - reallocate memory. The contents will remain unchanged.
    1206             :  * @p: object to reallocate memory for.
    1207             :  * @new_size: how many bytes of memory are required.
    1208             :  * @flags: the type of memory to allocate.
    1209             :  *
    1210             :  * The contents of the object pointed to are preserved up to the
    1211             :  * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
    1212             :  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
    1213             :  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
    1214             :  *
    1215             :  * Return: pointer to the allocated memory or %NULL in case of error
    1216             :  */
    1217         228 : void *krealloc(const void *p, size_t new_size, gfp_t flags)
    1218             : {
    1219             :         void *ret;
    1220             : 
    1221         228 :         if (unlikely(!new_size)) {
    1222           0 :                 kfree(p);
    1223           0 :                 return ZERO_SIZE_PTR;
    1224             :         }
    1225             : 
    1226         228 :         ret = __do_krealloc(p, new_size, flags);
    1227         228 :         if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
    1228          93 :                 kfree(p);
    1229             : 
    1230             :         return ret;
    1231             : }
    1232             : EXPORT_SYMBOL(krealloc);
    1233             : 
    1234             : /**
    1235             :  * kfree_sensitive - Clear sensitive information in memory before freeing
    1236             :  * @p: object to free memory of
    1237             :  *
    1238             :  * The memory of the object @p points to is zeroed before freed.
    1239             :  * If @p is %NULL, kfree_sensitive() does nothing.
    1240             :  *
    1241             :  * Note: this function zeroes the whole allocated buffer which can be a good
    1242             :  * deal bigger than the requested buffer size passed to kmalloc(). So be
    1243             :  * careful when using this function in performance sensitive code.
    1244             :  */
    1245           0 : void kfree_sensitive(const void *p)
    1246             : {
    1247             :         size_t ks;
    1248           0 :         void *mem = (void *)p;
    1249             : 
    1250           0 :         ks = ksize(mem);
    1251           0 :         if (ks)
    1252             :                 memzero_explicit(mem, ks);
    1253           0 :         kfree(mem);
    1254           0 : }
    1255             : EXPORT_SYMBOL(kfree_sensitive);
    1256             : 
    1257             : /**
    1258             :  * ksize - get the actual amount of memory allocated for a given object
    1259             :  * @objp: Pointer to the object
    1260             :  *
    1261             :  * kmalloc may internally round up allocations and return more memory
    1262             :  * than requested. ksize() can be used to determine the actual amount of
    1263             :  * memory allocated. The caller may use this additional memory, even though
    1264             :  * a smaller amount of memory was initially specified with the kmalloc call.
    1265             :  * The caller must guarantee that objp points to a valid object previously
    1266             :  * allocated with either kmalloc() or kmem_cache_alloc(). The object
    1267             :  * must not be freed during the duration of the call.
    1268             :  *
    1269             :  * Return: size of the actual memory used by @objp in bytes
    1270             :  */
    1271           0 : size_t ksize(const void *objp)
    1272             : {
    1273             :         size_t size;
    1274             : 
    1275             :         /*
    1276             :          * We need to first check that the pointer to the object is valid, and
    1277             :          * only then unpoison the memory. The report printed from ksize() is
    1278             :          * more useful, then when it's printed later when the behaviour could
    1279             :          * be undefined due to a potential use-after-free or double-free.
    1280             :          *
    1281             :          * We use kasan_check_byte(), which is supported for the hardware
    1282             :          * tag-based KASAN mode, unlike kasan_check_read/write().
    1283             :          *
    1284             :          * If the pointed to memory is invalid, we return 0 to avoid users of
    1285             :          * ksize() writing to and potentially corrupting the memory region.
    1286             :          *
    1287             :          * We want to perform the check before __ksize(), to avoid potentially
    1288             :          * crashing in __ksize() due to accessing invalid metadata.
    1289             :          */
    1290           0 :         if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
    1291             :                 return 0;
    1292             : 
    1293           0 :         size = kfence_ksize(objp) ?: __ksize(objp);
    1294             :         /*
    1295             :          * We assume that ksize callers could use whole allocated area,
    1296             :          * so we need to unpoison this area.
    1297             :          */
    1298           0 :         kasan_unpoison_range(objp, size);
    1299           0 :         return size;
    1300             : }
    1301             : EXPORT_SYMBOL(ksize);
    1302             : 
    1303             : /* Tracepoints definitions. */
    1304             : EXPORT_TRACEPOINT_SYMBOL(kmalloc);
    1305             : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
    1306             : EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
    1307             : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
    1308             : EXPORT_TRACEPOINT_SYMBOL(kfree);
    1309             : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
    1310             : 
    1311       18327 : int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
    1312             : {
    1313       18327 :         if (__should_failslab(s, gfpflags))
    1314             :                 return -ENOMEM;
    1315             :         return 0;
    1316             : }
    1317             : ALLOW_ERROR_INJECTION(should_failslab, ERRNO);

Generated by: LCOV version 1.14