LCOV - code coverage report
Current view: top level - mm - slub.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 606 1568 38.6 %
Date: 2022-12-09 01:23:36 Functions: 43 137 31.4 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * SLUB: A slab allocator that limits cache line use instead of queuing
       4             :  * objects in per cpu and per node lists.
       5             :  *
       6             :  * The allocator synchronizes using per slab locks or atomic operations
       7             :  * and only uses a centralized lock to manage a pool of partial slabs.
       8             :  *
       9             :  * (C) 2007 SGI, Christoph Lameter
      10             :  * (C) 2011 Linux Foundation, Christoph Lameter
      11             :  */
      12             : 
      13             : #include <linux/mm.h>
      14             : #include <linux/swap.h> /* struct reclaim_state */
      15             : #include <linux/module.h>
      16             : #include <linux/bit_spinlock.h>
      17             : #include <linux/interrupt.h>
      18             : #include <linux/swab.h>
      19             : #include <linux/bitops.h>
      20             : #include <linux/slab.h>
      21             : #include "slab.h"
      22             : #include <linux/proc_fs.h>
      23             : #include <linux/seq_file.h>
      24             : #include <linux/kasan.h>
      25             : #include <linux/cpu.h>
      26             : #include <linux/cpuset.h>
      27             : #include <linux/mempolicy.h>
      28             : #include <linux/ctype.h>
      29             : #include <linux/debugobjects.h>
      30             : #include <linux/kallsyms.h>
      31             : #include <linux/kfence.h>
      32             : #include <linux/memory.h>
      33             : #include <linux/math64.h>
      34             : #include <linux/fault-inject.h>
      35             : #include <linux/stacktrace.h>
      36             : #include <linux/prefetch.h>
      37             : #include <linux/memcontrol.h>
      38             : #include <linux/random.h>
      39             : #include <kunit/test.h>
      40             : 
      41             : #include <linux/debugfs.h>
      42             : #include <trace/events/kmem.h>
      43             : 
      44             : #include "internal.h"
      45             : 
      46             : /*
      47             :  * Lock order:
      48             :  *   1. slab_mutex (Global Mutex)
      49             :  *   2. node->list_lock (Spinlock)
      50             :  *   3. kmem_cache->cpu_slab->lock (Local lock)
      51             :  *   4. slab_lock(slab) (Only on some arches or for debugging)
      52             :  *   5. object_map_lock (Only for debugging)
      53             :  *
      54             :  *   slab_mutex
      55             :  *
      56             :  *   The role of the slab_mutex is to protect the list of all the slabs
      57             :  *   and to synchronize major metadata changes to slab cache structures.
      58             :  *   Also synchronizes memory hotplug callbacks.
      59             :  *
      60             :  *   slab_lock
      61             :  *
      62             :  *   The slab_lock is a wrapper around the page lock, thus it is a bit
      63             :  *   spinlock.
      64             :  *
      65             :  *   The slab_lock is only used for debugging and on arches that do not
      66             :  *   have the ability to do a cmpxchg_double. It only protects:
      67             :  *      A. slab->freelist    -> List of free objects in a slab
      68             :  *      B. slab->inuse               -> Number of objects in use
      69             :  *      C. slab->objects     -> Number of objects in slab
      70             :  *      D. slab->frozen              -> frozen state
      71             :  *
      72             :  *   Frozen slabs
      73             :  *
      74             :  *   If a slab is frozen then it is exempt from list management. It is not
      75             :  *   on any list except per cpu partial list. The processor that froze the
      76             :  *   slab is the one who can perform list operations on the slab. Other
      77             :  *   processors may put objects onto the freelist but the processor that
      78             :  *   froze the slab is the only one that can retrieve the objects from the
      79             :  *   slab's freelist.
      80             :  *
      81             :  *   list_lock
      82             :  *
      83             :  *   The list_lock protects the partial and full list on each node and
      84             :  *   the partial slab counter. If taken then no new slabs may be added or
      85             :  *   removed from the lists nor make the number of partial slabs be modified.
      86             :  *   (Note that the total number of slabs is an atomic value that may be
      87             :  *   modified without taking the list lock).
      88             :  *
      89             :  *   The list_lock is a centralized lock and thus we avoid taking it as
      90             :  *   much as possible. As long as SLUB does not have to handle partial
      91             :  *   slabs, operations can continue without any centralized lock. F.e.
      92             :  *   allocating a long series of objects that fill up slabs does not require
      93             :  *   the list lock.
      94             :  *
      95             :  *   cpu_slab->lock local lock
      96             :  *
      97             :  *   This locks protect slowpath manipulation of all kmem_cache_cpu fields
      98             :  *   except the stat counters. This is a percpu structure manipulated only by
      99             :  *   the local cpu, so the lock protects against being preempted or interrupted
     100             :  *   by an irq. Fast path operations rely on lockless operations instead.
     101             :  *   On PREEMPT_RT, the local lock does not actually disable irqs (and thus
     102             :  *   prevent the lockless operations), so fastpath operations also need to take
     103             :  *   the lock and are no longer lockless.
     104             :  *
     105             :  *   lockless fastpaths
     106             :  *
     107             :  *   The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
     108             :  *   are fully lockless when satisfied from the percpu slab (and when
     109             :  *   cmpxchg_double is possible to use, otherwise slab_lock is taken).
     110             :  *   They also don't disable preemption or migration or irqs. They rely on
     111             :  *   the transaction id (tid) field to detect being preempted or moved to
     112             :  *   another cpu.
     113             :  *
     114             :  *   irq, preemption, migration considerations
     115             :  *
     116             :  *   Interrupts are disabled as part of list_lock or local_lock operations, or
     117             :  *   around the slab_lock operation, in order to make the slab allocator safe
     118             :  *   to use in the context of an irq.
     119             :  *
     120             :  *   In addition, preemption (or migration on PREEMPT_RT) is disabled in the
     121             :  *   allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
     122             :  *   local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
     123             :  *   doesn't have to be revalidated in each section protected by the local lock.
     124             :  *
     125             :  * SLUB assigns one slab for allocation to each processor.
     126             :  * Allocations only occur from these slabs called cpu slabs.
     127             :  *
     128             :  * Slabs with free elements are kept on a partial list and during regular
     129             :  * operations no list for full slabs is used. If an object in a full slab is
     130             :  * freed then the slab will show up again on the partial lists.
     131             :  * We track full slabs for debugging purposes though because otherwise we
     132             :  * cannot scan all objects.
     133             :  *
     134             :  * Slabs are freed when they become empty. Teardown and setup is
     135             :  * minimal so we rely on the page allocators per cpu caches for
     136             :  * fast frees and allocs.
     137             :  *
     138             :  * slab->frozen              The slab is frozen and exempt from list processing.
     139             :  *                      This means that the slab is dedicated to a purpose
     140             :  *                      such as satisfying allocations for a specific
     141             :  *                      processor. Objects may be freed in the slab while
     142             :  *                      it is frozen but slab_free will then skip the usual
     143             :  *                      list operations. It is up to the processor holding
     144             :  *                      the slab to integrate the slab into the slab lists
     145             :  *                      when the slab is no longer needed.
     146             :  *
     147             :  *                      One use of this flag is to mark slabs that are
     148             :  *                      used for allocations. Then such a slab becomes a cpu
     149             :  *                      slab. The cpu slab may be equipped with an additional
     150             :  *                      freelist that allows lockless access to
     151             :  *                      free objects in addition to the regular freelist
     152             :  *                      that requires the slab lock.
     153             :  *
     154             :  * SLAB_DEBUG_FLAGS     Slab requires special handling due to debug
     155             :  *                      options set. This moves slab handling out of
     156             :  *                      the fast path and disables lockless freelists.
     157             :  */
     158             : 
     159             : /*
     160             :  * We could simply use migrate_disable()/enable() but as long as it's a
     161             :  * function call even on !PREEMPT_RT, use inline preempt_disable() there.
     162             :  */
     163             : #ifndef CONFIG_PREEMPT_RT
     164             : #define slub_get_cpu_ptr(var)   get_cpu_ptr(var)
     165             : #define slub_put_cpu_ptr(var)   put_cpu_ptr(var)
     166             : #else
     167             : #define slub_get_cpu_ptr(var)           \
     168             : ({                                      \
     169             :         migrate_disable();              \
     170             :         this_cpu_ptr(var);              \
     171             : })
     172             : #define slub_put_cpu_ptr(var)           \
     173             : do {                                    \
     174             :         (void)(var);                    \
     175             :         migrate_enable();               \
     176             : } while (0)
     177             : #endif
     178             : 
     179             : #ifdef CONFIG_SLUB_DEBUG
     180             : #ifdef CONFIG_SLUB_DEBUG_ON
     181             : DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
     182             : #else
     183             : DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
     184             : #endif
     185             : #endif          /* CONFIG_SLUB_DEBUG */
     186             : 
     187             : static inline bool kmem_cache_debug(struct kmem_cache *s)
     188             : {
     189        3554 :         return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
     190             : }
     191             : 
     192           0 : void *fixup_red_left(struct kmem_cache *s, void *p)
     193             : {
     194         908 :         if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
     195           0 :                 p += s->red_left_pad;
     196             : 
     197           0 :         return p;
     198             : }
     199             : 
     200             : static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
     201             : {
     202             : #ifdef CONFIG_SLUB_CPU_PARTIAL
     203             :         return !kmem_cache_debug(s);
     204             : #else
     205             :         return false;
     206             : #endif
     207             : }
     208             : 
     209             : /*
     210             :  * Issues still to be resolved:
     211             :  *
     212             :  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
     213             :  *
     214             :  * - Variable sizing of the per node arrays
     215             :  */
     216             : 
     217             : /* Enable to log cmpxchg failures */
     218             : #undef SLUB_DEBUG_CMPXCHG
     219             : 
     220             : /*
     221             :  * Minimum number of partial slabs. These will be left on the partial
     222             :  * lists even if they are empty. kmem_cache_shrink may reclaim them.
     223             :  */
     224             : #define MIN_PARTIAL 5
     225             : 
     226             : /*
     227             :  * Maximum number of desirable partial slabs.
     228             :  * The existence of more partial slabs makes kmem_cache_shrink
     229             :  * sort the partial list by the number of objects in use.
     230             :  */
     231             : #define MAX_PARTIAL 10
     232             : 
     233             : #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
     234             :                                 SLAB_POISON | SLAB_STORE_USER)
     235             : 
     236             : /*
     237             :  * These debug flags cannot use CMPXCHG because there might be consistency
     238             :  * issues when checking or reading debug information
     239             :  */
     240             : #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
     241             :                                 SLAB_TRACE)
     242             : 
     243             : 
     244             : /*
     245             :  * Debugging flags that require metadata to be stored in the slab.  These get
     246             :  * disabled when slub_debug=O is used and a cache's min order increases with
     247             :  * metadata.
     248             :  */
     249             : #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
     250             : 
     251             : #define OO_SHIFT        16
     252             : #define OO_MASK         ((1 << OO_SHIFT) - 1)
     253             : #define MAX_OBJS_PER_PAGE       32767 /* since slab.objects is u15 */
     254             : 
     255             : /* Internal SLUB flags */
     256             : /* Poison object */
     257             : #define __OBJECT_POISON         ((slab_flags_t __force)0x80000000U)
     258             : /* Use cmpxchg_double */
     259             : #define __CMPXCHG_DOUBLE        ((slab_flags_t __force)0x40000000U)
     260             : 
     261             : /*
     262             :  * Tracking user of a slab.
     263             :  */
     264             : #define TRACK_ADDRS_COUNT 16
     265             : struct track {
     266             :         unsigned long addr;     /* Called from address */
     267             : #ifdef CONFIG_STACKTRACE
     268             :         unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
     269             : #endif
     270             :         int cpu;                /* Was running on cpu */
     271             :         int pid;                /* Pid context */
     272             :         unsigned long when;     /* When did the operation occur */
     273             : };
     274             : 
     275             : enum track_item { TRACK_ALLOC, TRACK_FREE };
     276             : 
     277             : #ifdef CONFIG_SYSFS
     278             : static int sysfs_slab_add(struct kmem_cache *);
     279             : static int sysfs_slab_alias(struct kmem_cache *, const char *);
     280             : #else
     281             : static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
     282             : static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
     283             :                                                         { return 0; }
     284             : #endif
     285             : 
     286             : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
     287             : static void debugfs_slab_add(struct kmem_cache *);
     288             : #else
     289             : static inline void debugfs_slab_add(struct kmem_cache *s) { }
     290             : #endif
     291             : 
     292             : static inline void stat(const struct kmem_cache *s, enum stat_item si)
     293             : {
     294             : #ifdef CONFIG_SLUB_STATS
     295             :         /*
     296             :          * The rmw is racy on a preemptible kernel but this is acceptable, so
     297             :          * avoid this_cpu_add()'s irq-disable overhead.
     298             :          */
     299             :         raw_cpu_inc(s->cpu_slab->stat[si]);
     300             : #endif
     301             : }
     302             : 
     303             : /*
     304             :  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
     305             :  * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
     306             :  * differ during memory hotplug/hotremove operations.
     307             :  * Protected by slab_mutex.
     308             :  */
     309             : static nodemask_t slab_nodes;
     310             : 
     311             : /********************************************************************
     312             :  *                      Core slab cache functions
     313             :  *******************************************************************/
     314             : 
     315             : /*
     316             :  * Returns freelist pointer (ptr). With hardening, this is obfuscated
     317             :  * with an XOR of the address where the pointer is held and a per-cache
     318             :  * random number.
     319             :  */
     320             : static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
     321             :                                  unsigned long ptr_addr)
     322             : {
     323             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
     324             :         /*
     325             :          * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
     326             :          * Normally, this doesn't cause any issues, as both set_freepointer()
     327             :          * and get_freepointer() are called with a pointer with the same tag.
     328             :          * However, there are some issues with CONFIG_SLUB_DEBUG code. For
     329             :          * example, when __free_slub() iterates over objects in a cache, it
     330             :          * passes untagged pointers to check_object(). check_object() in turns
     331             :          * calls get_freepointer() with an untagged pointer, which causes the
     332             :          * freepointer to be restored incorrectly.
     333             :          */
     334             :         return (void *)((unsigned long)ptr ^ s->random ^
     335             :                         swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
     336             : #else
     337             :         return ptr;
     338             : #endif
     339             : }
     340             : 
     341             : /* Returns the freelist pointer recorded at location ptr_addr. */
     342             : static inline void *freelist_dereference(const struct kmem_cache *s,
     343             :                                          void *ptr_addr)
     344             : {
     345       23721 :         return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
     346             :                             (unsigned long)ptr_addr);
     347             : }
     348             : 
     349             : static inline void *get_freepointer(struct kmem_cache *s, void *object)
     350             : {
     351       23721 :         object = kasan_reset_tag(object);
     352       47442 :         return freelist_dereference(s, object + s->offset);
     353             : }
     354             : 
     355             : static void prefetch_freepointer(const struct kmem_cache *s, void *object)
     356             : {
     357       17828 :         prefetchw(object + s->offset);
     358             : }
     359             : 
     360             : static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
     361             : {
     362             :         unsigned long freepointer_addr;
     363             :         void *p;
     364             : 
     365             :         if (!debug_pagealloc_enabled_static())
     366       35656 :                 return get_freepointer(s, object);
     367             : 
     368             :         object = kasan_reset_tag(object);
     369             :         freepointer_addr = (unsigned long)object + s->offset;
     370             :         copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
     371             :         return freelist_ptr(s, p, freepointer_addr);
     372             : }
     373             : 
     374             : static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
     375             : {
     376       24810 :         unsigned long freeptr_addr = (unsigned long)object + s->offset;
     377             : 
     378             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
     379             :         BUG_ON(object == fp); /* naive detection of double free or corruption */
     380             : #endif
     381             : 
     382       24810 :         freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
     383       24810 :         *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
     384             : }
     385             : 
     386             : /* Loop over all objects in a slab */
     387             : #define for_each_object(__p, __s, __addr, __objects) \
     388             :         for (__p = fixup_red_left(__s, __addr); \
     389             :                 __p < (__addr) + (__objects) * (__s)->size; \
     390             :                 __p += (__s)->size)
     391             : 
     392             : static inline unsigned int order_objects(unsigned int order, unsigned int size)
     393             : {
     394         269 :         return ((unsigned int)PAGE_SIZE << order) / size;
     395             : }
     396             : 
     397             : static inline struct kmem_cache_order_objects oo_make(unsigned int order,
     398             :                 unsigned int size)
     399             : {
     400         134 :         struct kmem_cache_order_objects x = {
     401         268 :                 (order << OO_SHIFT) + order_objects(order, size)
     402             :         };
     403             : 
     404             :         return x;
     405             : }
     406             : 
     407             : static inline unsigned int oo_order(struct kmem_cache_order_objects x)
     408             : {
     409        1318 :         return x.x >> OO_SHIFT;
     410             : }
     411             : 
     412             : static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
     413             : {
     414          67 :         return x.x & OO_MASK;
     415             : }
     416             : 
     417             : #ifdef CONFIG_SLUB_CPU_PARTIAL
     418             : static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
     419             : {
     420             :         unsigned int nr_slabs;
     421             : 
     422             :         s->cpu_partial = nr_objects;
     423             : 
     424             :         /*
     425             :          * We take the number of objects but actually limit the number of
     426             :          * slabs on the per cpu partial list, in order to limit excessive
     427             :          * growth of the list. For simplicity we assume that the slabs will
     428             :          * be half-full.
     429             :          */
     430             :         nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
     431             :         s->cpu_partial_slabs = nr_slabs;
     432             : }
     433             : #else
     434             : static inline void
     435             : slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
     436             : {
     437             : }
     438             : #endif /* CONFIG_SLUB_CPU_PARTIAL */
     439             : 
     440             : /*
     441             :  * Per slab locking using the pagelock
     442             :  */
     443             : static __always_inline void __slab_lock(struct slab *slab)
     444             : {
     445        1789 :         struct page *page = slab_page(slab);
     446             : 
     447             :         VM_BUG_ON_PAGE(PageTail(page), page);
     448        1789 :         bit_spin_lock(PG_locked, &page->flags);
     449             : }
     450             : 
     451             : static __always_inline void __slab_unlock(struct slab *slab)
     452             : {
     453        1789 :         struct page *page = slab_page(slab);
     454             : 
     455             :         VM_BUG_ON_PAGE(PageTail(page), page);
     456        1789 :         __bit_spin_unlock(PG_locked, &page->flags);
     457             : }
     458             : 
     459             : static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
     460             : {
     461             :         if (IS_ENABLED(CONFIG_PREEMPT_RT))
     462             :                 local_irq_save(*flags);
     463         509 :         __slab_lock(slab);
     464             : }
     465             : 
     466             : static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
     467             : {
     468         509 :         __slab_unlock(slab);
     469             :         if (IS_ENABLED(CONFIG_PREEMPT_RT))
     470             :                 local_irq_restore(*flags);
     471             : }
     472             : 
     473             : /*
     474             :  * Interrupts must be disabled (for the fallback code to work right), typically
     475             :  * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
     476             :  * so we disable interrupts as part of slab_[un]lock().
     477             :  */
     478             : static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
     479             :                 void *freelist_old, unsigned long counters_old,
     480             :                 void *freelist_new, unsigned long counters_new,
     481             :                 const char *n)
     482             : {
     483             :         if (!IS_ENABLED(CONFIG_PREEMPT_RT))
     484             :                 lockdep_assert_irqs_disabled();
     485             : #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
     486             :     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
     487             :         if (s->flags & __CMPXCHG_DOUBLE) {
     488             :                 if (cmpxchg_double(&slab->freelist, &slab->counters,
     489             :                                    freelist_old, counters_old,
     490             :                                    freelist_new, counters_new))
     491             :                         return true;
     492             :         } else
     493             : #endif
     494             :         {
     495             :                 /* init to 0 to prevent spurious warnings */
     496         509 :                 unsigned long flags = 0;
     497             : 
     498         509 :                 slab_lock(slab, &flags);
     499        1018 :                 if (slab->freelist == freelist_old &&
     500         509 :                                         slab->counters == counters_old) {
     501         509 :                         slab->freelist = freelist_new;
     502         509 :                         slab->counters = counters_new;
     503         509 :                         slab_unlock(slab, &flags);
     504         509 :                         return true;
     505             :                 }
     506           0 :                 slab_unlock(slab, &flags);
     507             :         }
     508             : 
     509             :         cpu_relax();
     510           0 :         stat(s, CMPXCHG_DOUBLE_FAIL);
     511             : 
     512             : #ifdef SLUB_DEBUG_CMPXCHG
     513             :         pr_info("%s %s: cmpxchg double redo ", n, s->name);
     514             : #endif
     515             : 
     516             :         return false;
     517             : }
     518             : 
     519        1280 : static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
     520             :                 void *freelist_old, unsigned long counters_old,
     521             :                 void *freelist_new, unsigned long counters_new,
     522             :                 const char *n)
     523             : {
     524             : #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
     525             :     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
     526             :         if (s->flags & __CMPXCHG_DOUBLE) {
     527             :                 if (cmpxchg_double(&slab->freelist, &slab->counters,
     528             :                                    freelist_old, counters_old,
     529             :                                    freelist_new, counters_new))
     530             :                         return true;
     531             :         } else
     532             : #endif
     533             :         {
     534             :                 unsigned long flags;
     535             : 
     536        1280 :                 local_irq_save(flags);
     537        1280 :                 __slab_lock(slab);
     538        2560 :                 if (slab->freelist == freelist_old &&
     539        1280 :                                         slab->counters == counters_old) {
     540        1280 :                         slab->freelist = freelist_new;
     541        1280 :                         slab->counters = counters_new;
     542        1280 :                         __slab_unlock(slab);
     543        2560 :                         local_irq_restore(flags);
     544             :                         return true;
     545             :                 }
     546           0 :                 __slab_unlock(slab);
     547           0 :                 local_irq_restore(flags);
     548             :         }
     549             : 
     550             :         cpu_relax();
     551           0 :         stat(s, CMPXCHG_DOUBLE_FAIL);
     552             : 
     553             : #ifdef SLUB_DEBUG_CMPXCHG
     554             :         pr_info("%s %s: cmpxchg double redo ", n, s->name);
     555             : #endif
     556             : 
     557             :         return false;
     558             : }
     559             : 
     560             : #ifdef CONFIG_SLUB_DEBUG
     561             : static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
     562             : static DEFINE_RAW_SPINLOCK(object_map_lock);
     563             : 
     564           0 : static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
     565             :                        struct slab *slab)
     566             : {
     567           0 :         void *addr = slab_address(slab);
     568             :         void *p;
     569             : 
     570           0 :         bitmap_zero(obj_map, slab->objects);
     571             : 
     572           0 :         for (p = slab->freelist; p; p = get_freepointer(s, p))
     573           0 :                 set_bit(__obj_to_index(s, addr, p), obj_map);
     574           0 : }
     575             : 
     576             : #if IS_ENABLED(CONFIG_KUNIT)
     577           0 : static bool slab_add_kunit_errors(void)
     578             : {
     579             :         struct kunit_resource *resource;
     580             : 
     581           0 :         if (likely(!current->kunit_test))
     582             :                 return false;
     583             : 
     584           0 :         resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
     585           0 :         if (!resource)
     586             :                 return false;
     587             : 
     588           0 :         (*(int *)resource->data)++;
     589           0 :         kunit_put_resource(resource);
     590           0 :         return true;
     591             : }
     592             : #else
     593             : static inline bool slab_add_kunit_errors(void) { return false; }
     594             : #endif
     595             : 
     596             : /*
     597             :  * Determine a map of objects in use in a slab.
     598             :  *
     599             :  * Node listlock must be held to guarantee that the slab does
     600             :  * not vanish from under us.
     601             :  */
     602             : static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
     603             :         __acquires(&object_map_lock)
     604             : {
     605             :         VM_BUG_ON(!irqs_disabled());
     606             : 
     607           0 :         raw_spin_lock(&object_map_lock);
     608             : 
     609           0 :         __fill_map(object_map, s, slab);
     610             : 
     611             :         return object_map;
     612             : }
     613             : 
     614             : static void put_map(unsigned long *map) __releases(&object_map_lock)
     615             : {
     616             :         VM_BUG_ON(map != object_map);
     617           0 :         raw_spin_unlock(&object_map_lock);
     618             : }
     619             : 
     620             : static inline unsigned int size_from_object(struct kmem_cache *s)
     621             : {
     622           0 :         if (s->flags & SLAB_RED_ZONE)
     623           0 :                 return s->size - s->red_left_pad;
     624             : 
     625             :         return s->size;
     626             : }
     627             : 
     628             : static inline void *restore_red_left(struct kmem_cache *s, void *p)
     629             : {
     630           0 :         if (s->flags & SLAB_RED_ZONE)
     631           0 :                 p -= s->red_left_pad;
     632             : 
     633             :         return p;
     634             : }
     635             : 
     636             : /*
     637             :  * Debug settings:
     638             :  */
     639             : #if defined(CONFIG_SLUB_DEBUG_ON)
     640             : static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
     641             : #else
     642             : static slab_flags_t slub_debug;
     643             : #endif
     644             : 
     645             : static char *slub_debug_string;
     646             : static int disable_higher_order_debug;
     647             : 
     648             : /*
     649             :  * slub is about to manipulate internal object metadata.  This memory lies
     650             :  * outside the range of the allocated object, so accessing it would normally
     651             :  * be reported by kasan as a bounds error.  metadata_access_enable() is used
     652             :  * to tell kasan that these accesses are OK.
     653             :  */
     654             : static inline void metadata_access_enable(void)
     655             : {
     656             :         kasan_disable_current();
     657             : }
     658             : 
     659             : static inline void metadata_access_disable(void)
     660             : {
     661             :         kasan_enable_current();
     662             : }
     663             : 
     664             : /*
     665             :  * Object debugging
     666             :  */
     667             : 
     668             : /* Verify that a pointer has an address that is valid within a slab page */
     669           0 : static inline int check_valid_pointer(struct kmem_cache *s,
     670             :                                 struct slab *slab, void *object)
     671             : {
     672             :         void *base;
     673             : 
     674           0 :         if (!object)
     675             :                 return 1;
     676             : 
     677           0 :         base = slab_address(slab);
     678           0 :         object = kasan_reset_tag(object);
     679           0 :         object = restore_red_left(s, object);
     680           0 :         if (object < base || object >= base + slab->objects * s->size ||
     681           0 :                 (object - base) % s->size) {
     682             :                 return 0;
     683             :         }
     684             : 
     685           0 :         return 1;
     686             : }
     687             : 
     688             : static void print_section(char *level, char *text, u8 *addr,
     689             :                           unsigned int length)
     690             : {
     691             :         metadata_access_enable();
     692           0 :         print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
     693           0 :                         16, 1, kasan_reset_tag((void *)addr), length, 1);
     694             :         metadata_access_disable();
     695             : }
     696             : 
     697             : /*
     698             :  * See comment in calculate_sizes().
     699             :  */
     700             : static inline bool freeptr_outside_object(struct kmem_cache *s)
     701             : {
     702             :         return s->offset >= s->inuse;
     703             : }
     704             : 
     705             : /*
     706             :  * Return offset of the end of info block which is inuse + free pointer if
     707             :  * not overlapping with object.
     708             :  */
     709             : static inline unsigned int get_info_end(struct kmem_cache *s)
     710             : {
     711           0 :         if (freeptr_outside_object(s))
     712           0 :                 return s->inuse + sizeof(void *);
     713             :         else
     714             :                 return s->inuse;
     715             : }
     716             : 
     717             : static struct track *get_track(struct kmem_cache *s, void *object,
     718             :         enum track_item alloc)
     719             : {
     720             :         struct track *p;
     721             : 
     722           0 :         p = object + get_info_end(s);
     723             : 
     724           0 :         return kasan_reset_tag(p + alloc);
     725             : }
     726             : 
     727           0 : static void set_track(struct kmem_cache *s, void *object,
     728             :                         enum track_item alloc, unsigned long addr)
     729             : {
     730           0 :         struct track *p = get_track(s, object, alloc);
     731             : 
     732           0 :         if (addr) {
     733             : #ifdef CONFIG_STACKTRACE
     734             :                 unsigned int nr_entries;
     735             : 
     736             :                 metadata_access_enable();
     737           0 :                 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
     738             :                                               TRACK_ADDRS_COUNT, 3);
     739             :                 metadata_access_disable();
     740             : 
     741           0 :                 if (nr_entries < TRACK_ADDRS_COUNT)
     742           0 :                         p->addrs[nr_entries] = 0;
     743             : #endif
     744           0 :                 p->addr = addr;
     745           0 :                 p->cpu = smp_processor_id();
     746           0 :                 p->pid = current->pid;
     747           0 :                 p->when = jiffies;
     748             :         } else {
     749           0 :                 memset(p, 0, sizeof(struct track));
     750             :         }
     751           0 : }
     752             : 
     753           1 : static void init_tracking(struct kmem_cache *s, void *object)
     754             : {
     755           1 :         if (!(s->flags & SLAB_STORE_USER))
     756             :                 return;
     757             : 
     758           0 :         set_track(s, object, TRACK_FREE, 0UL);
     759           0 :         set_track(s, object, TRACK_ALLOC, 0UL);
     760             : }
     761             : 
     762           0 : static void print_track(const char *s, struct track *t, unsigned long pr_time)
     763             : {
     764           0 :         if (!t->addr)
     765             :                 return;
     766             : 
     767           0 :         pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
     768             :                s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
     769             : #ifdef CONFIG_STACKTRACE
     770             :         {
     771             :                 int i;
     772           0 :                 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
     773           0 :                         if (t->addrs[i])
     774           0 :                                 pr_err("\t%pS\n", (void *)t->addrs[i]);
     775             :                         else
     776             :                                 break;
     777             :         }
     778             : #endif
     779             : }
     780             : 
     781           0 : void print_tracking(struct kmem_cache *s, void *object)
     782             : {
     783           0 :         unsigned long pr_time = jiffies;
     784           0 :         if (!(s->flags & SLAB_STORE_USER))
     785             :                 return;
     786             : 
     787           0 :         print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
     788           0 :         print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
     789             : }
     790             : 
     791             : static void print_slab_info(const struct slab *slab)
     792             : {
     793           0 :         struct folio *folio = (struct folio *)slab_folio(slab);
     794             : 
     795           0 :         pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
     796             :                slab, slab->objects, slab->inuse, slab->freelist,
     797             :                folio_flags(folio, 0));
     798             : }
     799             : 
     800           0 : static void slab_bug(struct kmem_cache *s, char *fmt, ...)
     801             : {
     802             :         struct va_format vaf;
     803             :         va_list args;
     804             : 
     805           0 :         va_start(args, fmt);
     806           0 :         vaf.fmt = fmt;
     807           0 :         vaf.va = &args;
     808           0 :         pr_err("=============================================================================\n");
     809           0 :         pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
     810           0 :         pr_err("-----------------------------------------------------------------------------\n\n");
     811           0 :         va_end(args);
     812           0 : }
     813             : 
     814             : __printf(2, 3)
     815           0 : static void slab_fix(struct kmem_cache *s, char *fmt, ...)
     816             : {
     817             :         struct va_format vaf;
     818             :         va_list args;
     819             : 
     820           0 :         if (slab_add_kunit_errors())
     821           0 :                 return;
     822             : 
     823           0 :         va_start(args, fmt);
     824           0 :         vaf.fmt = fmt;
     825           0 :         vaf.va = &args;
     826           0 :         pr_err("FIX %s: %pV\n", s->name, &vaf);
     827           0 :         va_end(args);
     828             : }
     829             : 
     830           0 : static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
     831             : {
     832             :         unsigned int off;       /* Offset of last byte */
     833           0 :         u8 *addr = slab_address(slab);
     834             : 
     835           0 :         print_tracking(s, p);
     836             : 
     837           0 :         print_slab_info(slab);
     838             : 
     839           0 :         pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
     840             :                p, p - addr, get_freepointer(s, p));
     841             : 
     842           0 :         if (s->flags & SLAB_RED_ZONE)
     843           0 :                 print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
     844             :                               s->red_left_pad);
     845           0 :         else if (p > addr + 16)
     846           0 :                 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
     847             : 
     848           0 :         print_section(KERN_ERR,         "Object   ", p,
     849           0 :                       min_t(unsigned int, s->object_size, PAGE_SIZE));
     850           0 :         if (s->flags & SLAB_RED_ZONE)
     851           0 :                 print_section(KERN_ERR, "Redzone  ", p + s->object_size,
     852           0 :                         s->inuse - s->object_size);
     853             : 
     854           0 :         off = get_info_end(s);
     855             : 
     856           0 :         if (s->flags & SLAB_STORE_USER)
     857           0 :                 off += 2 * sizeof(struct track);
     858             : 
     859           0 :         off += kasan_metadata_size(s);
     860             : 
     861           0 :         if (off != size_from_object(s))
     862             :                 /* Beginning of the filler is the free pointer */
     863           0 :                 print_section(KERN_ERR, "Padding  ", p + off,
     864           0 :                               size_from_object(s) - off);
     865             : 
     866           0 :         dump_stack();
     867           0 : }
     868             : 
     869           0 : static void object_err(struct kmem_cache *s, struct slab *slab,
     870             :                         u8 *object, char *reason)
     871             : {
     872           0 :         if (slab_add_kunit_errors())
     873             :                 return;
     874             : 
     875           0 :         slab_bug(s, "%s", reason);
     876           0 :         print_trailer(s, slab, object);
     877           0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
     878             : }
     879             : 
     880          82 : static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
     881             :                                void **freelist, void *nextfree)
     882             : {
     883          82 :         if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
     884           0 :             !check_valid_pointer(s, slab, nextfree) && freelist) {
     885           0 :                 object_err(s, slab, *freelist, "Freechain corrupt");
     886           0 :                 *freelist = NULL;
     887           0 :                 slab_fix(s, "Isolate corrupted freechain");
     888           0 :                 return true;
     889             :         }
     890             : 
     891             :         return false;
     892             : }
     893             : 
     894           0 : static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
     895             :                         const char *fmt, ...)
     896             : {
     897             :         va_list args;
     898             :         char buf[100];
     899             : 
     900           0 :         if (slab_add_kunit_errors())
     901           0 :                 return;
     902             : 
     903           0 :         va_start(args, fmt);
     904           0 :         vsnprintf(buf, sizeof(buf), fmt, args);
     905           0 :         va_end(args);
     906           0 :         slab_bug(s, "%s", buf);
     907           0 :         print_slab_info(slab);
     908           0 :         dump_stack();
     909           0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
     910             : }
     911             : 
     912           1 : static void init_object(struct kmem_cache *s, void *object, u8 val)
     913             : {
     914           1 :         u8 *p = kasan_reset_tag(object);
     915             : 
     916           1 :         if (s->flags & SLAB_RED_ZONE)
     917           0 :                 memset(p - s->red_left_pad, val, s->red_left_pad);
     918             : 
     919           1 :         if (s->flags & __OBJECT_POISON) {
     920           0 :                 memset(p, POISON_FREE, s->object_size - 1);
     921           0 :                 p[s->object_size - 1] = POISON_END;
     922             :         }
     923             : 
     924           1 :         if (s->flags & SLAB_RED_ZONE)
     925           0 :                 memset(p + s->object_size, val, s->inuse - s->object_size);
     926           1 : }
     927             : 
     928           0 : static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
     929             :                                                 void *from, void *to)
     930             : {
     931           0 :         slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
     932           0 :         memset(from, data, to - from);
     933           0 : }
     934             : 
     935           0 : static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
     936             :                         u8 *object, char *what,
     937             :                         u8 *start, unsigned int value, unsigned int bytes)
     938             : {
     939             :         u8 *fault;
     940             :         u8 *end;
     941           0 :         u8 *addr = slab_address(slab);
     942             : 
     943             :         metadata_access_enable();
     944           0 :         fault = memchr_inv(kasan_reset_tag(start), value, bytes);
     945             :         metadata_access_disable();
     946           0 :         if (!fault)
     947             :                 return 1;
     948             : 
     949           0 :         end = start + bytes;
     950           0 :         while (end > fault && end[-1] == value)
     951           0 :                 end--;
     952             : 
     953           0 :         if (slab_add_kunit_errors())
     954             :                 goto skip_bug_print;
     955             : 
     956           0 :         slab_bug(s, "%s overwritten", what);
     957           0 :         pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
     958             :                                         fault, end - 1, fault - addr,
     959             :                                         fault[0], value);
     960           0 :         print_trailer(s, slab, object);
     961           0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
     962             : 
     963             : skip_bug_print:
     964           0 :         restore_bytes(s, what, value, fault, end);
     965           0 :         return 0;
     966             : }
     967             : 
     968             : /*
     969             :  * Object layout:
     970             :  *
     971             :  * object address
     972             :  *      Bytes of the object to be managed.
     973             :  *      If the freepointer may overlay the object then the free
     974             :  *      pointer is at the middle of the object.
     975             :  *
     976             :  *      Poisoning uses 0x6b (POISON_FREE) and the last byte is
     977             :  *      0xa5 (POISON_END)
     978             :  *
     979             :  * object + s->object_size
     980             :  *      Padding to reach word boundary. This is also used for Redzoning.
     981             :  *      Padding is extended by another word if Redzoning is enabled and
     982             :  *      object_size == inuse.
     983             :  *
     984             :  *      We fill with 0xbb (RED_INACTIVE) for inactive objects and with
     985             :  *      0xcc (RED_ACTIVE) for objects in use.
     986             :  *
     987             :  * object + s->inuse
     988             :  *      Meta data starts here.
     989             :  *
     990             :  *      A. Free pointer (if we cannot overwrite object on free)
     991             :  *      B. Tracking data for SLAB_STORE_USER
     992             :  *      C. Padding to reach required alignment boundary or at minimum
     993             :  *              one word if debugging is on to be able to detect writes
     994             :  *              before the word boundary.
     995             :  *
     996             :  *      Padding is done using 0x5a (POISON_INUSE)
     997             :  *
     998             :  * object + s->size
     999             :  *      Nothing is used beyond s->size.
    1000             :  *
    1001             :  * If slabcaches are merged then the object_size and inuse boundaries are mostly
    1002             :  * ignored. And therefore no slab options that rely on these boundaries
    1003             :  * may be used with merged slabcaches.
    1004             :  */
    1005             : 
    1006           0 : static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
    1007             : {
    1008           0 :         unsigned long off = get_info_end(s);    /* The end of info */
    1009             : 
    1010           0 :         if (s->flags & SLAB_STORE_USER)
    1011             :                 /* We also have user information there */
    1012           0 :                 off += 2 * sizeof(struct track);
    1013             : 
    1014           0 :         off += kasan_metadata_size(s);
    1015             : 
    1016           0 :         if (size_from_object(s) == off)
    1017             :                 return 1;
    1018             : 
    1019           0 :         return check_bytes_and_report(s, slab, p, "Object padding",
    1020           0 :                         p + off, POISON_INUSE, size_from_object(s) - off);
    1021             : }
    1022             : 
    1023             : /* Check the pad bytes at the end of a slab page */
    1024           0 : static int slab_pad_check(struct kmem_cache *s, struct slab *slab)
    1025             : {
    1026             :         u8 *start;
    1027             :         u8 *fault;
    1028             :         u8 *end;
    1029             :         u8 *pad;
    1030             :         int length;
    1031             :         int remainder;
    1032             : 
    1033           0 :         if (!(s->flags & SLAB_POISON))
    1034             :                 return 1;
    1035             : 
    1036           0 :         start = slab_address(slab);
    1037           0 :         length = slab_size(slab);
    1038           0 :         end = start + length;
    1039           0 :         remainder = length % s->size;
    1040           0 :         if (!remainder)
    1041             :                 return 1;
    1042             : 
    1043           0 :         pad = end - remainder;
    1044             :         metadata_access_enable();
    1045           0 :         fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
    1046             :         metadata_access_disable();
    1047           0 :         if (!fault)
    1048             :                 return 1;
    1049           0 :         while (end > fault && end[-1] == POISON_INUSE)
    1050           0 :                 end--;
    1051             : 
    1052           0 :         slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
    1053             :                         fault, end - 1, fault - start);
    1054           0 :         print_section(KERN_ERR, "Padding ", pad, remainder);
    1055             : 
    1056           0 :         restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
    1057           0 :         return 0;
    1058             : }
    1059             : 
    1060           0 : static int check_object(struct kmem_cache *s, struct slab *slab,
    1061             :                                         void *object, u8 val)
    1062             : {
    1063           0 :         u8 *p = object;
    1064           0 :         u8 *endobject = object + s->object_size;
    1065             : 
    1066           0 :         if (s->flags & SLAB_RED_ZONE) {
    1067           0 :                 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
    1068           0 :                         object - s->red_left_pad, val, s->red_left_pad))
    1069             :                         return 0;
    1070             : 
    1071           0 :                 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
    1072           0 :                         endobject, val, s->inuse - s->object_size))
    1073             :                         return 0;
    1074             :         } else {
    1075           0 :                 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
    1076           0 :                         check_bytes_and_report(s, slab, p, "Alignment padding",
    1077             :                                 endobject, POISON_INUSE,
    1078             :                                 s->inuse - s->object_size);
    1079             :                 }
    1080             :         }
    1081             : 
    1082           0 :         if (s->flags & SLAB_POISON) {
    1083           0 :                 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
    1084           0 :                         (!check_bytes_and_report(s, slab, p, "Poison", p,
    1085           0 :                                         POISON_FREE, s->object_size - 1) ||
    1086           0 :                          !check_bytes_and_report(s, slab, p, "End Poison",
    1087           0 :                                 p + s->object_size - 1, POISON_END, 1)))
    1088             :                         return 0;
    1089             :                 /*
    1090             :                  * check_pad_bytes cleans up on its own.
    1091             :                  */
    1092           0 :                 check_pad_bytes(s, slab, p);
    1093             :         }
    1094             : 
    1095           0 :         if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
    1096             :                 /*
    1097             :                  * Object and freepointer overlap. Cannot check
    1098             :                  * freepointer while object is allocated.
    1099             :                  */
    1100             :                 return 1;
    1101             : 
    1102             :         /* Check free pointer validity */
    1103           0 :         if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
    1104           0 :                 object_err(s, slab, p, "Freepointer corrupt");
    1105             :                 /*
    1106             :                  * No choice but to zap it and thus lose the remainder
    1107             :                  * of the free objects in this slab. May cause
    1108             :                  * another error because the object count is now wrong.
    1109             :                  */
    1110           0 :                 set_freepointer(s, p, NULL);
    1111           0 :                 return 0;
    1112             :         }
    1113             :         return 1;
    1114             : }
    1115             : 
    1116           0 : static int check_slab(struct kmem_cache *s, struct slab *slab)
    1117             : {
    1118             :         int maxobj;
    1119             : 
    1120           0 :         if (!folio_test_slab(slab_folio(slab))) {
    1121           0 :                 slab_err(s, slab, "Not a valid slab page");
    1122           0 :                 return 0;
    1123             :         }
    1124             : 
    1125           0 :         maxobj = order_objects(slab_order(slab), s->size);
    1126           0 :         if (slab->objects > maxobj) {
    1127           0 :                 slab_err(s, slab, "objects %u > max %u",
    1128             :                         slab->objects, maxobj);
    1129           0 :                 return 0;
    1130             :         }
    1131           0 :         if (slab->inuse > slab->objects) {
    1132           0 :                 slab_err(s, slab, "inuse %u > max %u",
    1133             :                         slab->inuse, slab->objects);
    1134           0 :                 return 0;
    1135             :         }
    1136             :         /* Slab_pad_check fixes things up after itself */
    1137           0 :         slab_pad_check(s, slab);
    1138           0 :         return 1;
    1139             : }
    1140             : 
    1141             : /*
    1142             :  * Determine if a certain object in a slab is on the freelist. Must hold the
    1143             :  * slab lock to guarantee that the chains are in a consistent state.
    1144             :  */
    1145           0 : static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
    1146             : {
    1147           0 :         int nr = 0;
    1148             :         void *fp;
    1149           0 :         void *object = NULL;
    1150             :         int max_objects;
    1151             : 
    1152           0 :         fp = slab->freelist;
    1153           0 :         while (fp && nr <= slab->objects) {
    1154           0 :                 if (fp == search)
    1155             :                         return 1;
    1156           0 :                 if (!check_valid_pointer(s, slab, fp)) {
    1157           0 :                         if (object) {
    1158           0 :                                 object_err(s, slab, object,
    1159             :                                         "Freechain corrupt");
    1160           0 :                                 set_freepointer(s, object, NULL);
    1161             :                         } else {
    1162           0 :                                 slab_err(s, slab, "Freepointer corrupt");
    1163           0 :                                 slab->freelist = NULL;
    1164           0 :                                 slab->inuse = slab->objects;
    1165           0 :                                 slab_fix(s, "Freelist cleared");
    1166           0 :                                 return 0;
    1167             :                         }
    1168             :                         break;
    1169             :                 }
    1170           0 :                 object = fp;
    1171           0 :                 fp = get_freepointer(s, object);
    1172           0 :                 nr++;
    1173             :         }
    1174             : 
    1175           0 :         max_objects = order_objects(slab_order(slab), s->size);
    1176           0 :         if (max_objects > MAX_OBJS_PER_PAGE)
    1177           0 :                 max_objects = MAX_OBJS_PER_PAGE;
    1178             : 
    1179           0 :         if (slab->objects != max_objects) {
    1180           0 :                 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
    1181             :                          slab->objects, max_objects);
    1182           0 :                 slab->objects = max_objects;
    1183           0 :                 slab_fix(s, "Number of objects adjusted");
    1184             :         }
    1185           0 :         if (slab->inuse != slab->objects - nr) {
    1186           0 :                 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
    1187             :                          slab->inuse, slab->objects - nr);
    1188           0 :                 slab->inuse = slab->objects - nr;
    1189           0 :                 slab_fix(s, "Object count adjusted");
    1190             :         }
    1191           0 :         return search == NULL;
    1192             : }
    1193             : 
    1194           0 : static void trace(struct kmem_cache *s, struct slab *slab, void *object,
    1195             :                                                                 int alloc)
    1196             : {
    1197           0 :         if (s->flags & SLAB_TRACE) {
    1198           0 :                 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
    1199             :                         s->name,
    1200             :                         alloc ? "alloc" : "free",
    1201             :                         object, slab->inuse,
    1202             :                         slab->freelist);
    1203             : 
    1204           0 :                 if (!alloc)
    1205           0 :                         print_section(KERN_INFO, "Object ", (void *)object,
    1206             :                                         s->object_size);
    1207             : 
    1208           0 :                 dump_stack();
    1209             :         }
    1210           0 : }
    1211             : 
    1212             : /*
    1213             :  * Tracking of fully allocated slabs for debugging purposes.
    1214             :  */
    1215             : static void add_full(struct kmem_cache *s,
    1216             :         struct kmem_cache_node *n, struct slab *slab)
    1217             : {
    1218           0 :         if (!(s->flags & SLAB_STORE_USER))
    1219             :                 return;
    1220             : 
    1221             :         lockdep_assert_held(&n->list_lock);
    1222           0 :         list_add(&slab->slab_list, &n->full);
    1223             : }
    1224             : 
    1225             : static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
    1226             : {
    1227          49 :         if (!(s->flags & SLAB_STORE_USER))
    1228             :                 return;
    1229             : 
    1230             :         lockdep_assert_held(&n->list_lock);
    1231           0 :         list_del(&slab->slab_list);
    1232             : }
    1233             : 
    1234             : /* Tracking of the number of slabs for debugging purposes */
    1235             : static inline unsigned long slabs_node(struct kmem_cache *s, int node)
    1236             : {
    1237           0 :         struct kmem_cache_node *n = get_node(s, node);
    1238             : 
    1239           0 :         return atomic_long_read(&n->nr_slabs);
    1240             : }
    1241             : 
    1242             : static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
    1243             : {
    1244           0 :         return atomic_long_read(&n->nr_slabs);
    1245             : }
    1246             : 
    1247             : static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
    1248             : {
    1249         455 :         struct kmem_cache_node *n = get_node(s, node);
    1250             : 
    1251             :         /*
    1252             :          * May be called early in order to allocate a slab for the
    1253             :          * kmem_cache_node structure. Solve the chicken-egg
    1254             :          * dilemma by deferring the increment of the count during
    1255             :          * bootstrap (see early_kmem_cache_node_alloc).
    1256             :          */
    1257         455 :         if (likely(n)) {
    1258         908 :                 atomic_long_inc(&n->nr_slabs);
    1259         454 :                 atomic_long_add(objects, &n->total_objects);
    1260             :         }
    1261             : }
    1262             : static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
    1263             : {
    1264           3 :         struct kmem_cache_node *n = get_node(s, node);
    1265             : 
    1266           6 :         atomic_long_dec(&n->nr_slabs);
    1267           6 :         atomic_long_sub(objects, &n->total_objects);
    1268             : }
    1269             : 
    1270             : /* Object debug checks for alloc/free paths */
    1271       14186 : static void setup_object_debug(struct kmem_cache *s, struct slab *slab,
    1272             :                                                                 void *object)
    1273             : {
    1274       28372 :         if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
    1275             :                 return;
    1276             : 
    1277           0 :         init_object(s, object, SLUB_RED_INACTIVE);
    1278           0 :         init_tracking(s, object);
    1279             : }
    1280             : 
    1281             : static
    1282         454 : void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
    1283             : {
    1284         908 :         if (!kmem_cache_debug_flags(s, SLAB_POISON))
    1285             :                 return;
    1286             : 
    1287           0 :         metadata_access_enable();
    1288           0 :         memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
    1289             :         metadata_access_disable();
    1290             : }
    1291             : 
    1292           0 : static inline int alloc_consistency_checks(struct kmem_cache *s,
    1293             :                                         struct slab *slab, void *object)
    1294             : {
    1295           0 :         if (!check_slab(s, slab))
    1296             :                 return 0;
    1297             : 
    1298           0 :         if (!check_valid_pointer(s, slab, object)) {
    1299           0 :                 object_err(s, slab, object, "Freelist Pointer check fails");
    1300           0 :                 return 0;
    1301             :         }
    1302             : 
    1303           0 :         if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
    1304             :                 return 0;
    1305             : 
    1306           0 :         return 1;
    1307             : }
    1308             : 
    1309           0 : static noinline int alloc_debug_processing(struct kmem_cache *s,
    1310             :                                         struct slab *slab,
    1311             :                                         void *object, unsigned long addr)
    1312             : {
    1313           0 :         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
    1314           0 :                 if (!alloc_consistency_checks(s, slab, object))
    1315             :                         goto bad;
    1316             :         }
    1317             : 
    1318             :         /* Success perform special debug activities for allocs */
    1319           0 :         if (s->flags & SLAB_STORE_USER)
    1320           0 :                 set_track(s, object, TRACK_ALLOC, addr);
    1321           0 :         trace(s, slab, object, 1);
    1322           0 :         init_object(s, object, SLUB_RED_ACTIVE);
    1323           0 :         return 1;
    1324             : 
    1325             : bad:
    1326           0 :         if (folio_test_slab(slab_folio(slab))) {
    1327             :                 /*
    1328             :                  * If this is a slab page then lets do the best we can
    1329             :                  * to avoid issues in the future. Marking all objects
    1330             :                  * as used avoids touching the remaining objects.
    1331             :                  */
    1332           0 :                 slab_fix(s, "Marking all objects used");
    1333           0 :                 slab->inuse = slab->objects;
    1334           0 :                 slab->freelist = NULL;
    1335             :         }
    1336             :         return 0;
    1337             : }
    1338             : 
    1339           0 : static inline int free_consistency_checks(struct kmem_cache *s,
    1340             :                 struct slab *slab, void *object, unsigned long addr)
    1341             : {
    1342           0 :         if (!check_valid_pointer(s, slab, object)) {
    1343           0 :                 slab_err(s, slab, "Invalid object pointer 0x%p", object);
    1344             :                 return 0;
    1345             :         }
    1346             : 
    1347           0 :         if (on_freelist(s, slab, object)) {
    1348           0 :                 object_err(s, slab, object, "Object already free");
    1349             :                 return 0;
    1350             :         }
    1351             : 
    1352           0 :         if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
    1353             :                 return 0;
    1354             : 
    1355           0 :         if (unlikely(s != slab->slab_cache)) {
    1356           0 :                 if (!folio_test_slab(slab_folio(slab))) {
    1357           0 :                         slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
    1358             :                                  object);
    1359           0 :                 } else if (!slab->slab_cache) {
    1360           0 :                         pr_err("SLUB <none>: no slab for object 0x%p.\n",
    1361             :                                object);
    1362           0 :                         dump_stack();
    1363             :                 } else
    1364           0 :                         object_err(s, slab, object,
    1365             :                                         "page slab pointer corrupt.");
    1366             :                 return 0;
    1367             :         }
    1368             :         return 1;
    1369             : }
    1370             : 
    1371             : /* Supports checking bulk free of a constructed freelist */
    1372           0 : static noinline int free_debug_processing(
    1373             :         struct kmem_cache *s, struct slab *slab,
    1374             :         void *head, void *tail, int bulk_cnt,
    1375             :         unsigned long addr)
    1376             : {
    1377           0 :         struct kmem_cache_node *n = get_node(s, slab_nid(slab));
    1378           0 :         void *object = head;
    1379           0 :         int cnt = 0;
    1380             :         unsigned long flags, flags2;
    1381           0 :         int ret = 0;
    1382             : 
    1383           0 :         spin_lock_irqsave(&n->list_lock, flags);
    1384           0 :         slab_lock(slab, &flags2);
    1385             : 
    1386           0 :         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
    1387           0 :                 if (!check_slab(s, slab))
    1388             :                         goto out;
    1389             :         }
    1390             : 
    1391             : next_object:
    1392           0 :         cnt++;
    1393             : 
    1394           0 :         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
    1395           0 :                 if (!free_consistency_checks(s, slab, object, addr))
    1396             :                         goto out;
    1397             :         }
    1398             : 
    1399           0 :         if (s->flags & SLAB_STORE_USER)
    1400           0 :                 set_track(s, object, TRACK_FREE, addr);
    1401           0 :         trace(s, slab, object, 0);
    1402             :         /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
    1403           0 :         init_object(s, object, SLUB_RED_INACTIVE);
    1404             : 
    1405             :         /* Reached end of constructed freelist yet? */
    1406           0 :         if (object != tail) {
    1407           0 :                 object = get_freepointer(s, object);
    1408           0 :                 goto next_object;
    1409             :         }
    1410             :         ret = 1;
    1411             : 
    1412             : out:
    1413           0 :         if (cnt != bulk_cnt)
    1414           0 :                 slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
    1415             :                          bulk_cnt, cnt);
    1416             : 
    1417           0 :         slab_unlock(slab, &flags2);
    1418           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    1419           0 :         if (!ret)
    1420           0 :                 slab_fix(s, "Object at 0x%p not freed", object);
    1421           0 :         return ret;
    1422             : }
    1423             : 
    1424             : /*
    1425             :  * Parse a block of slub_debug options. Blocks are delimited by ';'
    1426             :  *
    1427             :  * @str:    start of block
    1428             :  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
    1429             :  * @slabs:  return start of list of slabs, or NULL when there's no list
    1430             :  * @init:   assume this is initial parsing and not per-kmem-create parsing
    1431             :  *
    1432             :  * returns the start of next block if there's any, or NULL
    1433             :  */
    1434             : static char *
    1435           0 : parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
    1436             : {
    1437           0 :         bool higher_order_disable = false;
    1438             : 
    1439             :         /* Skip any completely empty blocks */
    1440           0 :         while (*str && *str == ';')
    1441           0 :                 str++;
    1442             : 
    1443           0 :         if (*str == ',') {
    1444             :                 /*
    1445             :                  * No options but restriction on slabs. This means full
    1446             :                  * debugging for slabs matching a pattern.
    1447             :                  */
    1448           0 :                 *flags = DEBUG_DEFAULT_FLAGS;
    1449           0 :                 goto check_slabs;
    1450             :         }
    1451           0 :         *flags = 0;
    1452             : 
    1453             :         /* Determine which debug features should be switched on */
    1454           0 :         for (; *str && *str != ',' && *str != ';'; str++) {
    1455           0 :                 switch (tolower(*str)) {
    1456             :                 case '-':
    1457           0 :                         *flags = 0;
    1458           0 :                         break;
    1459             :                 case 'f':
    1460           0 :                         *flags |= SLAB_CONSISTENCY_CHECKS;
    1461           0 :                         break;
    1462             :                 case 'z':
    1463           0 :                         *flags |= SLAB_RED_ZONE;
    1464           0 :                         break;
    1465             :                 case 'p':
    1466           0 :                         *flags |= SLAB_POISON;
    1467           0 :                         break;
    1468             :                 case 'u':
    1469           0 :                         *flags |= SLAB_STORE_USER;
    1470           0 :                         break;
    1471             :                 case 't':
    1472           0 :                         *flags |= SLAB_TRACE;
    1473           0 :                         break;
    1474             :                 case 'a':
    1475             :                         *flags |= SLAB_FAILSLAB;
    1476           0 :                         break;
    1477             :                 case 'o':
    1478             :                         /*
    1479             :                          * Avoid enabling debugging on caches if its minimum
    1480             :                          * order would increase as a result.
    1481             :                          */
    1482             :                         higher_order_disable = true;
    1483             :                         break;
    1484             :                 default:
    1485           0 :                         if (init)
    1486           0 :                                 pr_err("slub_debug option '%c' unknown. skipped\n", *str);
    1487             :                 }
    1488             :         }
    1489             : check_slabs:
    1490           0 :         if (*str == ',')
    1491           0 :                 *slabs = ++str;
    1492             :         else
    1493           0 :                 *slabs = NULL;
    1494             : 
    1495             :         /* Skip over the slab list */
    1496           0 :         while (*str && *str != ';')
    1497           0 :                 str++;
    1498             : 
    1499             :         /* Skip any completely empty blocks */
    1500           0 :         while (*str && *str == ';')
    1501           0 :                 str++;
    1502             : 
    1503           0 :         if (init && higher_order_disable)
    1504           0 :                 disable_higher_order_debug = 1;
    1505             : 
    1506           0 :         if (*str)
    1507             :                 return str;
    1508             :         else
    1509           0 :                 return NULL;
    1510             : }
    1511             : 
    1512           0 : static int __init setup_slub_debug(char *str)
    1513             : {
    1514             :         slab_flags_t flags;
    1515             :         slab_flags_t global_flags;
    1516             :         char *saved_str;
    1517             :         char *slab_list;
    1518           0 :         bool global_slub_debug_changed = false;
    1519           0 :         bool slab_list_specified = false;
    1520             : 
    1521           0 :         global_flags = DEBUG_DEFAULT_FLAGS;
    1522           0 :         if (*str++ != '=' || !*str)
    1523             :                 /*
    1524             :                  * No options specified. Switch on full debugging.
    1525             :                  */
    1526             :                 goto out;
    1527             : 
    1528             :         saved_str = str;
    1529           0 :         while (str) {
    1530           0 :                 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
    1531             : 
    1532           0 :                 if (!slab_list) {
    1533           0 :                         global_flags = flags;
    1534           0 :                         global_slub_debug_changed = true;
    1535             :                 } else {
    1536             :                         slab_list_specified = true;
    1537             :                 }
    1538             :         }
    1539             : 
    1540             :         /*
    1541             :          * For backwards compatibility, a single list of flags with list of
    1542             :          * slabs means debugging is only changed for those slabs, so the global
    1543             :          * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
    1544             :          * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
    1545             :          * long as there is no option specifying flags without a slab list.
    1546             :          */
    1547           0 :         if (slab_list_specified) {
    1548           0 :                 if (!global_slub_debug_changed)
    1549           0 :                         global_flags = slub_debug;
    1550           0 :                 slub_debug_string = saved_str;
    1551             :         }
    1552             : out:
    1553           0 :         slub_debug = global_flags;
    1554           0 :         if (slub_debug != 0 || slub_debug_string)
    1555           0 :                 static_branch_enable(&slub_debug_enabled);
    1556             :         else
    1557           0 :                 static_branch_disable(&slub_debug_enabled);
    1558           0 :         if ((static_branch_unlikely(&init_on_alloc) ||
    1559           0 :              static_branch_unlikely(&init_on_free)) &&
    1560           0 :             (slub_debug & SLAB_POISON))
    1561           0 :                 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
    1562           0 :         return 1;
    1563             : }
    1564             : 
    1565             : __setup("slub_debug", setup_slub_debug);
    1566             : 
    1567             : /*
    1568             :  * kmem_cache_flags - apply debugging options to the cache
    1569             :  * @object_size:        the size of an object without meta data
    1570             :  * @flags:              flags to set
    1571             :  * @name:               name of the cache
    1572             :  *
    1573             :  * Debug option(s) are applied to @flags. In addition to the debug
    1574             :  * option(s), if a slab name (or multiple) is specified i.e.
    1575             :  * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
    1576             :  * then only the select slabs will receive the debug option(s).
    1577             :  */
    1578         114 : slab_flags_t kmem_cache_flags(unsigned int object_size,
    1579             :         slab_flags_t flags, const char *name)
    1580             : {
    1581             :         char *iter;
    1582             :         size_t len;
    1583             :         char *next_block;
    1584             :         slab_flags_t block_flags;
    1585         114 :         slab_flags_t slub_debug_local = slub_debug;
    1586             : 
    1587             :         /*
    1588             :          * If the slab cache is for debugging (e.g. kmemleak) then
    1589             :          * don't store user (stack trace) information by default,
    1590             :          * but let the user enable it via the command line below.
    1591             :          */
    1592         114 :         if (flags & SLAB_NOLEAKTRACE)
    1593           0 :                 slub_debug_local &= ~SLAB_STORE_USER;
    1594             : 
    1595         114 :         len = strlen(name);
    1596         114 :         next_block = slub_debug_string;
    1597             :         /* Go through all blocks of debug options, see if any matches our slab's name */
    1598         228 :         while (next_block) {
    1599           0 :                 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
    1600           0 :                 if (!iter)
    1601           0 :                         continue;
    1602             :                 /* Found a block that has a slab list, search it */
    1603           0 :                 while (*iter) {
    1604             :                         char *end, *glob;
    1605             :                         size_t cmplen;
    1606             : 
    1607           0 :                         end = strchrnul(iter, ',');
    1608           0 :                         if (next_block && next_block < end)
    1609           0 :                                 end = next_block - 1;
    1610             : 
    1611           0 :                         glob = strnchr(iter, end - iter, '*');
    1612           0 :                         if (glob)
    1613           0 :                                 cmplen = glob - iter;
    1614             :                         else
    1615           0 :                                 cmplen = max_t(size_t, len, (end - iter));
    1616             : 
    1617           0 :                         if (!strncmp(name, iter, cmplen)) {
    1618           0 :                                 flags |= block_flags;
    1619           0 :                                 return flags;
    1620             :                         }
    1621             : 
    1622           0 :                         if (!*end || *end == ';')
    1623             :                                 break;
    1624           0 :                         iter = end + 1;
    1625             :                 }
    1626             :         }
    1627             : 
    1628         114 :         return flags | slub_debug_local;
    1629             : }
    1630             : #else /* !CONFIG_SLUB_DEBUG */
    1631             : static inline void setup_object_debug(struct kmem_cache *s,
    1632             :                         struct slab *slab, void *object) {}
    1633             : static inline
    1634             : void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
    1635             : 
    1636             : static inline int alloc_debug_processing(struct kmem_cache *s,
    1637             :         struct slab *slab, void *object, unsigned long addr) { return 0; }
    1638             : 
    1639             : static inline int free_debug_processing(
    1640             :         struct kmem_cache *s, struct slab *slab,
    1641             :         void *head, void *tail, int bulk_cnt,
    1642             :         unsigned long addr) { return 0; }
    1643             : 
    1644             : static inline int slab_pad_check(struct kmem_cache *s, struct slab *slab)
    1645             :                         { return 1; }
    1646             : static inline int check_object(struct kmem_cache *s, struct slab *slab,
    1647             :                         void *object, u8 val) { return 1; }
    1648             : static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
    1649             :                                         struct slab *slab) {}
    1650             : static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
    1651             :                                         struct slab *slab) {}
    1652             : slab_flags_t kmem_cache_flags(unsigned int object_size,
    1653             :         slab_flags_t flags, const char *name)
    1654             : {
    1655             :         return flags;
    1656             : }
    1657             : #define slub_debug 0
    1658             : 
    1659             : #define disable_higher_order_debug 0
    1660             : 
    1661             : static inline unsigned long slabs_node(struct kmem_cache *s, int node)
    1662             :                                                         { return 0; }
    1663             : static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
    1664             :                                                         { return 0; }
    1665             : static inline void inc_slabs_node(struct kmem_cache *s, int node,
    1666             :                                                         int objects) {}
    1667             : static inline void dec_slabs_node(struct kmem_cache *s, int node,
    1668             :                                                         int objects) {}
    1669             : 
    1670             : static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
    1671             :                                void **freelist, void *nextfree)
    1672             : {
    1673             :         return false;
    1674             : }
    1675             : #endif /* CONFIG_SLUB_DEBUG */
    1676             : 
    1677             : /*
    1678             :  * Hooks for other subsystems that check memory allocations. In a typical
    1679             :  * production configuration these hooks all should produce no code at all.
    1680             :  */
    1681             : static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
    1682             : {
    1683             :         ptr = kasan_kmalloc_large(ptr, size, flags);
    1684             :         /* As ptr might get tagged, call kmemleak hook after KASAN. */
    1685             :         kmemleak_alloc(ptr, size, 1, flags);
    1686             :         return ptr;
    1687             : }
    1688             : 
    1689             : static __always_inline void kfree_hook(void *x)
    1690             : {
    1691           8 :         kmemleak_free(x);
    1692           8 :         kasan_kfree_large(x);
    1693             : }
    1694             : 
    1695             : static __always_inline bool slab_free_hook(struct kmem_cache *s,
    1696             :                                                 void *x, bool init)
    1697             : {
    1698        5311 :         kmemleak_free_recursive(x, s->flags);
    1699             : 
    1700        5311 :         debug_check_no_locks_freed(x, s->object_size);
    1701             : 
    1702             :         if (!(s->flags & SLAB_DEBUG_OBJECTS))
    1703        5311 :                 debug_check_no_obj_freed(x, s->object_size);
    1704             : 
    1705             :         /* Use KCSAN to help debug racy use-after-free. */
    1706             :         if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
    1707             :                 __kcsan_check_access(x, s->object_size,
    1708             :                                      KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
    1709             : 
    1710             :         /*
    1711             :          * As memory initialization might be integrated into KASAN,
    1712             :          * kasan_slab_free and initialization memset's must be
    1713             :          * kept together to avoid discrepancies in behavior.
    1714             :          *
    1715             :          * The initialization memset's clear the object and the metadata,
    1716             :          * but don't touch the SLAB redzone.
    1717             :          */
    1718        5311 :         if (init) {
    1719             :                 int rsize;
    1720             : 
    1721             :                 if (!kasan_has_integrated_init())
    1722           0 :                         memset(kasan_reset_tag(x), 0, s->object_size);
    1723           0 :                 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
    1724           0 :                 memset((char *)kasan_reset_tag(x) + s->inuse, 0,
    1725           0 :                        s->size - s->inuse - rsize);
    1726             :         }
    1727             :         /* KASAN might put x into memory quarantine, delaying its reuse. */
    1728        5311 :         return kasan_slab_free(s, x, init);
    1729             : }
    1730             : 
    1731        5311 : static inline bool slab_free_freelist_hook(struct kmem_cache *s,
    1732             :                                            void **head, void **tail,
    1733             :                                            int *cnt)
    1734             : {
    1735             : 
    1736             :         void *object;
    1737        5311 :         void *next = *head;
    1738        5311 :         void *old_tail = *tail ? *tail : *head;
    1739             : 
    1740        5311 :         if (is_kfence_address(next)) {
    1741             :                 slab_free_hook(s, next, false);
    1742             :                 return true;
    1743             :         }
    1744             : 
    1745             :         /* Head and tail of the reconstructed freelist */
    1746        5311 :         *head = NULL;
    1747        5311 :         *tail = NULL;
    1748             : 
    1749             :         do {
    1750        5311 :                 object = next;
    1751       10622 :                 next = get_freepointer(s, object);
    1752             : 
    1753             :                 /* If object's reuse doesn't have to be delayed */
    1754       15933 :                 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
    1755             :                         /* Move object to the new freelist */
    1756       10622 :                         set_freepointer(s, object, *head);
    1757        5311 :                         *head = object;
    1758        5311 :                         if (!*tail)
    1759        5311 :                                 *tail = object;
    1760             :                 } else {
    1761             :                         /*
    1762             :                          * Adjust the reconstructed freelist depth
    1763             :                          * accordingly if object's reuse is delayed.
    1764             :                          */
    1765             :                         --(*cnt);
    1766             :                 }
    1767        5311 :         } while (object != old_tail);
    1768             : 
    1769        5311 :         if (*head == *tail)
    1770        5311 :                 *tail = NULL;
    1771             : 
    1772        5311 :         return *head != NULL;
    1773             : }
    1774             : 
    1775             : static void *setup_object(struct kmem_cache *s, struct slab *slab,
    1776             :                                 void *object)
    1777             : {
    1778       14186 :         setup_object_debug(s, slab, object);
    1779       14186 :         object = kasan_init_slab_obj(s, object);
    1780       14186 :         if (unlikely(s->ctor)) {
    1781         287 :                 kasan_unpoison_object_data(s, object);
    1782         287 :                 s->ctor(object);
    1783         287 :                 kasan_poison_object_data(s, object);
    1784             :         }
    1785             :         return object;
    1786             : }
    1787             : 
    1788             : /*
    1789             :  * Slab allocation and freeing
    1790             :  */
    1791         454 : static inline struct slab *alloc_slab_page(gfp_t flags, int node,
    1792             :                 struct kmem_cache_order_objects oo)
    1793             : {
    1794             :         struct folio *folio;
    1795             :         struct slab *slab;
    1796         454 :         unsigned int order = oo_order(oo);
    1797             : 
    1798         454 :         if (node == NUMA_NO_NODE)
    1799         453 :                 folio = (struct folio *)alloc_pages(flags, order);
    1800             :         else
    1801           1 :                 folio = (struct folio *)__alloc_pages_node(node, flags, order);
    1802             : 
    1803         454 :         if (!folio)
    1804             :                 return NULL;
    1805             : 
    1806         454 :         slab = folio_slab(folio);
    1807         454 :         __folio_set_slab(folio);
    1808         908 :         if (page_is_pfmemalloc(folio_page(folio, 0)))
    1809             :                 slab_set_pfmemalloc(slab);
    1810             : 
    1811             :         return slab;
    1812             : }
    1813             : 
    1814             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
    1815             : /* Pre-initialize the random sequence cache */
    1816             : static int init_cache_random_seq(struct kmem_cache *s)
    1817             : {
    1818             :         unsigned int count = oo_objects(s->oo);
    1819             :         int err;
    1820             : 
    1821             :         /* Bailout if already initialised */
    1822             :         if (s->random_seq)
    1823             :                 return 0;
    1824             : 
    1825             :         err = cache_random_seq_create(s, count, GFP_KERNEL);
    1826             :         if (err) {
    1827             :                 pr_err("SLUB: Unable to initialize free list for %s\n",
    1828             :                         s->name);
    1829             :                 return err;
    1830             :         }
    1831             : 
    1832             :         /* Transform to an offset on the set of pages */
    1833             :         if (s->random_seq) {
    1834             :                 unsigned int i;
    1835             : 
    1836             :                 for (i = 0; i < count; i++)
    1837             :                         s->random_seq[i] *= s->size;
    1838             :         }
    1839             :         return 0;
    1840             : }
    1841             : 
    1842             : /* Initialize each random sequence freelist per cache */
    1843             : static void __init init_freelist_randomization(void)
    1844             : {
    1845             :         struct kmem_cache *s;
    1846             : 
    1847             :         mutex_lock(&slab_mutex);
    1848             : 
    1849             :         list_for_each_entry(s, &slab_caches, list)
    1850             :                 init_cache_random_seq(s);
    1851             : 
    1852             :         mutex_unlock(&slab_mutex);
    1853             : }
    1854             : 
    1855             : /* Get the next entry on the pre-computed freelist randomized */
    1856             : static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab,
    1857             :                                 unsigned long *pos, void *start,
    1858             :                                 unsigned long page_limit,
    1859             :                                 unsigned long freelist_count)
    1860             : {
    1861             :         unsigned int idx;
    1862             : 
    1863             :         /*
    1864             :          * If the target page allocation failed, the number of objects on the
    1865             :          * page might be smaller than the usual size defined by the cache.
    1866             :          */
    1867             :         do {
    1868             :                 idx = s->random_seq[*pos];
    1869             :                 *pos += 1;
    1870             :                 if (*pos >= freelist_count)
    1871             :                         *pos = 0;
    1872             :         } while (unlikely(idx >= page_limit));
    1873             : 
    1874             :         return (char *)start + idx;
    1875             : }
    1876             : 
    1877             : /* Shuffle the single linked freelist based on a random pre-computed sequence */
    1878             : static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
    1879             : {
    1880             :         void *start;
    1881             :         void *cur;
    1882             :         void *next;
    1883             :         unsigned long idx, pos, page_limit, freelist_count;
    1884             : 
    1885             :         if (slab->objects < 2 || !s->random_seq)
    1886             :                 return false;
    1887             : 
    1888             :         freelist_count = oo_objects(s->oo);
    1889             :         pos = get_random_int() % freelist_count;
    1890             : 
    1891             :         page_limit = slab->objects * s->size;
    1892             :         start = fixup_red_left(s, slab_address(slab));
    1893             : 
    1894             :         /* First entry is used as the base of the freelist */
    1895             :         cur = next_freelist_entry(s, slab, &pos, start, page_limit,
    1896             :                                 freelist_count);
    1897             :         cur = setup_object(s, slab, cur);
    1898             :         slab->freelist = cur;
    1899             : 
    1900             :         for (idx = 1; idx < slab->objects; idx++) {
    1901             :                 next = next_freelist_entry(s, slab, &pos, start, page_limit,
    1902             :                         freelist_count);
    1903             :                 next = setup_object(s, slab, next);
    1904             :                 set_freepointer(s, cur, next);
    1905             :                 cur = next;
    1906             :         }
    1907             :         set_freepointer(s, cur, NULL);
    1908             : 
    1909             :         return true;
    1910             : }
    1911             : #else
    1912             : static inline int init_cache_random_seq(struct kmem_cache *s)
    1913             : {
    1914             :         return 0;
    1915             : }
    1916             : static inline void init_freelist_randomization(void) { }
    1917             : static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
    1918             : {
    1919             :         return false;
    1920             : }
    1921             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
    1922             : 
    1923         454 : static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
    1924             : {
    1925             :         struct slab *slab;
    1926         454 :         struct kmem_cache_order_objects oo = s->oo;
    1927             :         gfp_t alloc_gfp;
    1928             :         void *start, *p, *next;
    1929             :         int idx;
    1930             :         bool shuffle;
    1931             : 
    1932         454 :         flags &= gfp_allowed_mask;
    1933             : 
    1934         454 :         flags |= s->allocflags;
    1935             : 
    1936             :         /*
    1937             :          * Let the initial higher-order allocation fail under memory pressure
    1938             :          * so we fall-back to the minimum order allocation.
    1939             :          */
    1940         454 :         alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
    1941         864 :         if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
    1942          76 :                 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
    1943             : 
    1944         454 :         slab = alloc_slab_page(alloc_gfp, node, oo);
    1945         454 :         if (unlikely(!slab)) {
    1946           0 :                 oo = s->min;
    1947           0 :                 alloc_gfp = flags;
    1948             :                 /*
    1949             :                  * Allocation may have failed due to fragmentation.
    1950             :                  * Try a lower order alloc if possible
    1951             :                  */
    1952           0 :                 slab = alloc_slab_page(alloc_gfp, node, oo);
    1953           0 :                 if (unlikely(!slab))
    1954             :                         goto out;
    1955             :                 stat(s, ORDER_FALLBACK);
    1956             :         }
    1957             : 
    1958         454 :         slab->objects = oo_objects(oo);
    1959             : 
    1960         908 :         account_slab(slab, oo_order(oo), s, flags);
    1961             : 
    1962         454 :         slab->slab_cache = s;
    1963             : 
    1964         454 :         kasan_poison_slab(slab);
    1965             : 
    1966         454 :         start = slab_address(slab);
    1967             : 
    1968         454 :         setup_slab_debug(s, slab, start);
    1969             : 
    1970         454 :         shuffle = shuffle_freelist(s, slab);
    1971             : 
    1972             :         if (!shuffle) {
    1973         454 :                 start = fixup_red_left(s, start);
    1974         908 :                 start = setup_object(s, slab, start);
    1975         454 :                 slab->freelist = start;
    1976       14186 :                 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
    1977       13732 :                         next = p + s->size;
    1978       27464 :                         next = setup_object(s, slab, next);
    1979       27464 :                         set_freepointer(s, p, next);
    1980       13732 :                         p = next;
    1981             :                 }
    1982         454 :                 set_freepointer(s, p, NULL);
    1983             :         }
    1984             : 
    1985         454 :         slab->inuse = slab->objects;
    1986         454 :         slab->frozen = 1;
    1987             : 
    1988             : out:
    1989         454 :         if (!slab)
    1990             :                 return NULL;
    1991             : 
    1992         908 :         inc_slabs_node(s, slab_nid(slab), slab->objects);
    1993             : 
    1994             :         return slab;
    1995             : }
    1996             : 
    1997         454 : static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
    1998             : {
    1999         454 :         if (unlikely(flags & GFP_SLAB_BUG_MASK))
    2000           0 :                 flags = kmalloc_fix_flags(flags);
    2001             : 
    2002         454 :         WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
    2003             : 
    2004         454 :         return allocate_slab(s,
    2005             :                 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
    2006             : }
    2007             : 
    2008           3 : static void __free_slab(struct kmem_cache *s, struct slab *slab)
    2009             : {
    2010           3 :         struct folio *folio = slab_folio(slab);
    2011           3 :         int order = folio_order(folio);
    2012           3 :         int pages = 1 << order;
    2013             : 
    2014           6 :         if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
    2015             :                 void *p;
    2016             : 
    2017           0 :                 slab_pad_check(s, slab);
    2018           0 :                 for_each_object(p, s, slab_address(slab), slab->objects)
    2019           0 :                         check_object(s, slab, p, SLUB_RED_INACTIVE);
    2020             :         }
    2021             : 
    2022           3 :         __slab_clear_pfmemalloc(slab);
    2023           3 :         __folio_clear_slab(folio);
    2024           3 :         folio->mapping = NULL;
    2025           3 :         if (current->reclaim_state)
    2026           0 :                 current->reclaim_state->reclaimed_slab += pages;
    2027           3 :         unaccount_slab(slab, order, s);
    2028           3 :         __free_pages(folio_page(folio, 0), order);
    2029           3 : }
    2030             : 
    2031           0 : static void rcu_free_slab(struct rcu_head *h)
    2032             : {
    2033           0 :         struct slab *slab = container_of(h, struct slab, rcu_head);
    2034             : 
    2035           0 :         __free_slab(slab->slab_cache, slab);
    2036           0 : }
    2037             : 
    2038           3 : static void free_slab(struct kmem_cache *s, struct slab *slab)
    2039             : {
    2040           3 :         if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
    2041           0 :                 call_rcu(&slab->rcu_head, rcu_free_slab);
    2042             :         } else
    2043           3 :                 __free_slab(s, slab);
    2044           3 : }
    2045             : 
    2046             : static void discard_slab(struct kmem_cache *s, struct slab *slab)
    2047             : {
    2048           9 :         dec_slabs_node(s, slab_nid(slab), slab->objects);
    2049           3 :         free_slab(s, slab);
    2050             : }
    2051             : 
    2052             : /*
    2053             :  * Management of partially allocated slabs.
    2054             :  */
    2055             : static inline void
    2056             : __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
    2057             : {
    2058          52 :         n->nr_partial++;
    2059           2 :         if (tail == DEACTIVATE_TO_TAIL)
    2060          49 :                 list_add_tail(&slab->slab_list, &n->partial);
    2061             :         else
    2062           3 :                 list_add(&slab->slab_list, &n->partial);
    2063             : }
    2064             : 
    2065             : static inline void add_partial(struct kmem_cache_node *n,
    2066             :                                 struct slab *slab, int tail)
    2067             : {
    2068             :         lockdep_assert_held(&n->list_lock);
    2069           2 :         __add_partial(n, slab, tail);
    2070             : }
    2071             : 
    2072             : static inline void remove_partial(struct kmem_cache_node *n,
    2073             :                                         struct slab *slab)
    2074             : {
    2075             :         lockdep_assert_held(&n->list_lock);
    2076          98 :         list_del(&slab->slab_list);
    2077          49 :         n->nr_partial--;
    2078             : }
    2079             : 
    2080             : /*
    2081             :  * Remove slab from the partial list, freeze it and
    2082             :  * return the pointer to the freelist.
    2083             :  *
    2084             :  * Returns a list of objects or NULL if it fails.
    2085             :  */
    2086          46 : static inline void *acquire_slab(struct kmem_cache *s,
    2087             :                 struct kmem_cache_node *n, struct slab *slab,
    2088             :                 int mode)
    2089             : {
    2090             :         void *freelist;
    2091             :         unsigned long counters;
    2092             :         struct slab new;
    2093             : 
    2094             :         lockdep_assert_held(&n->list_lock);
    2095             : 
    2096             :         /*
    2097             :          * Zap the freelist and set the frozen bit.
    2098             :          * The old freelist is the list of objects for the
    2099             :          * per cpu allocation list.
    2100             :          */
    2101          46 :         freelist = slab->freelist;
    2102          46 :         counters = slab->counters;
    2103          46 :         new.counters = counters;
    2104          46 :         if (mode) {
    2105          46 :                 new.inuse = slab->objects;
    2106          46 :                 new.freelist = NULL;
    2107             :         } else {
    2108             :                 new.freelist = freelist;
    2109             :         }
    2110             : 
    2111             :         VM_BUG_ON(new.frozen);
    2112          46 :         new.frozen = 1;
    2113             : 
    2114          92 :         if (!__cmpxchg_double_slab(s, slab,
    2115             :                         freelist, counters,
    2116             :                         new.freelist, new.counters,
    2117             :                         "acquire_slab"))
    2118             :                 return NULL;
    2119             : 
    2120          92 :         remove_partial(n, slab);
    2121          46 :         WARN_ON(!freelist);
    2122             :         return freelist;
    2123             : }
    2124             : 
    2125             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    2126             : static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
    2127             : #else
    2128             : static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
    2129             :                                    int drain) { }
    2130             : #endif
    2131             : static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
    2132             : 
    2133             : /*
    2134             :  * Try to allocate a partial slab from a specific node.
    2135             :  */
    2136         499 : static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
    2137             :                               struct slab **ret_slab, gfp_t gfpflags)
    2138             : {
    2139             :         struct slab *slab, *slab2;
    2140         499 :         void *object = NULL;
    2141             :         unsigned long flags;
    2142         499 :         unsigned int partial_slabs = 0;
    2143             : 
    2144             :         /*
    2145             :          * Racy check. If we mistakenly see no partial slabs then we
    2146             :          * just allocate an empty slab. If we mistakenly try to get a
    2147             :          * partial slab and there is none available then get_partial()
    2148             :          * will return NULL.
    2149             :          */
    2150         499 :         if (!n || !n->nr_partial)
    2151             :                 return NULL;
    2152             : 
    2153          46 :         spin_lock_irqsave(&n->list_lock, flags);
    2154          92 :         list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
    2155             :                 void *t;
    2156             : 
    2157          46 :                 if (!pfmemalloc_match(slab, gfpflags))
    2158           0 :                         continue;
    2159             : 
    2160          46 :                 t = acquire_slab(s, n, slab, object == NULL);
    2161          46 :                 if (!t)
    2162             :                         break;
    2163             : 
    2164             :                 if (!object) {
    2165          46 :                         *ret_slab = slab;
    2166          46 :                         stat(s, ALLOC_FROM_PARTIAL);
    2167          46 :                         object = t;
    2168             :                 } else {
    2169             :                         put_cpu_partial(s, slab, 0);
    2170             :                         stat(s, CPU_PARTIAL_NODE);
    2171             :                         partial_slabs++;
    2172             :                 }
    2173             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    2174             :                 if (!kmem_cache_has_cpu_partial(s)
    2175             :                         || partial_slabs > s->cpu_partial_slabs / 2)
    2176             :                         break;
    2177             : #else
    2178             :                 break;
    2179             : #endif
    2180             : 
    2181             :         }
    2182          92 :         spin_unlock_irqrestore(&n->list_lock, flags);
    2183             :         return object;
    2184             : }
    2185             : 
    2186             : /*
    2187             :  * Get a slab from somewhere. Search in increasing NUMA distances.
    2188             :  */
    2189             : static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
    2190             :                              struct slab **ret_slab)
    2191             : {
    2192             : #ifdef CONFIG_NUMA
    2193             :         struct zonelist *zonelist;
    2194             :         struct zoneref *z;
    2195             :         struct zone *zone;
    2196             :         enum zone_type highest_zoneidx = gfp_zone(flags);
    2197             :         void *object;
    2198             :         unsigned int cpuset_mems_cookie;
    2199             : 
    2200             :         /*
    2201             :          * The defrag ratio allows a configuration of the tradeoffs between
    2202             :          * inter node defragmentation and node local allocations. A lower
    2203             :          * defrag_ratio increases the tendency to do local allocations
    2204             :          * instead of attempting to obtain partial slabs from other nodes.
    2205             :          *
    2206             :          * If the defrag_ratio is set to 0 then kmalloc() always
    2207             :          * returns node local objects. If the ratio is higher then kmalloc()
    2208             :          * may return off node objects because partial slabs are obtained
    2209             :          * from other nodes and filled up.
    2210             :          *
    2211             :          * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
    2212             :          * (which makes defrag_ratio = 1000) then every (well almost)
    2213             :          * allocation will first attempt to defrag slab caches on other nodes.
    2214             :          * This means scanning over all nodes to look for partial slabs which
    2215             :          * may be expensive if we do it every time we are trying to find a slab
    2216             :          * with available objects.
    2217             :          */
    2218             :         if (!s->remote_node_defrag_ratio ||
    2219             :                         get_cycles() % 1024 > s->remote_node_defrag_ratio)
    2220             :                 return NULL;
    2221             : 
    2222             :         do {
    2223             :                 cpuset_mems_cookie = read_mems_allowed_begin();
    2224             :                 zonelist = node_zonelist(mempolicy_slab_node(), flags);
    2225             :                 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
    2226             :                         struct kmem_cache_node *n;
    2227             : 
    2228             :                         n = get_node(s, zone_to_nid(zone));
    2229             : 
    2230             :                         if (n && cpuset_zone_allowed(zone, flags) &&
    2231             :                                         n->nr_partial > s->min_partial) {
    2232             :                                 object = get_partial_node(s, n, ret_slab, flags);
    2233             :                                 if (object) {
    2234             :                                         /*
    2235             :                                          * Don't check read_mems_allowed_retry()
    2236             :                                          * here - if mems_allowed was updated in
    2237             :                                          * parallel, that was a harmless race
    2238             :                                          * between allocation and the cpuset
    2239             :                                          * update
    2240             :                                          */
    2241             :                                         return object;
    2242             :                                 }
    2243             :                         }
    2244             :                 }
    2245             :         } while (read_mems_allowed_retry(cpuset_mems_cookie));
    2246             : #endif  /* CONFIG_NUMA */
    2247             :         return NULL;
    2248             : }
    2249             : 
    2250             : /*
    2251             :  * Get a partial slab, lock it and return it.
    2252             :  */
    2253             : static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
    2254             :                          struct slab **ret_slab)
    2255             : {
    2256             :         void *object;
    2257         499 :         int searchnode = node;
    2258             : 
    2259         499 :         if (node == NUMA_NO_NODE)
    2260         499 :                 searchnode = numa_mem_id();
    2261             : 
    2262         499 :         object = get_partial_node(s, get_node(s, searchnode), ret_slab, flags);
    2263         499 :         if (object || node != NUMA_NO_NODE)
    2264             :                 return object;
    2265             : 
    2266         453 :         return get_any_partial(s, flags, ret_slab);
    2267             : }
    2268             : 
    2269             : #ifdef CONFIG_PREEMPTION
    2270             : /*
    2271             :  * Calculate the next globally unique transaction for disambiguation
    2272             :  * during cmpxchg. The transactions start with the cpu number and are then
    2273             :  * incremented by CONFIG_NR_CPUS.
    2274             :  */
    2275             : #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
    2276             : #else
    2277             : /*
    2278             :  * No preemption supported therefore also no need to check for
    2279             :  * different cpus.
    2280             :  */
    2281             : #define TID_STEP 1
    2282             : #endif
    2283             : 
    2284             : static inline unsigned long next_tid(unsigned long tid)
    2285             : {
    2286       22362 :         return tid + TID_STEP;
    2287             : }
    2288             : 
    2289             : #ifdef SLUB_DEBUG_CMPXCHG
    2290             : static inline unsigned int tid_to_cpu(unsigned long tid)
    2291             : {
    2292             :         return tid % TID_STEP;
    2293             : }
    2294             : 
    2295             : static inline unsigned long tid_to_event(unsigned long tid)
    2296             : {
    2297             :         return tid / TID_STEP;
    2298             : }
    2299             : #endif
    2300             : 
    2301             : static inline unsigned int init_tid(int cpu)
    2302             : {
    2303          67 :         return cpu;
    2304             : }
    2305             : 
    2306             : static inline void note_cmpxchg_failure(const char *n,
    2307             :                 const struct kmem_cache *s, unsigned long tid)
    2308             : {
    2309             : #ifdef SLUB_DEBUG_CMPXCHG
    2310             :         unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
    2311             : 
    2312             :         pr_info("%s %s: cmpxchg redo ", n, s->name);
    2313             : 
    2314             : #ifdef CONFIG_PREEMPTION
    2315             :         if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
    2316             :                 pr_warn("due to cpu change %d -> %d\n",
    2317             :                         tid_to_cpu(tid), tid_to_cpu(actual_tid));
    2318             :         else
    2319             : #endif
    2320             :         if (tid_to_event(tid) != tid_to_event(actual_tid))
    2321             :                 pr_warn("due to cpu running other code. Event %ld->%ld\n",
    2322             :                         tid_to_event(tid), tid_to_event(actual_tid));
    2323             :         else
    2324             :                 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
    2325             :                         actual_tid, tid, next_tid(tid));
    2326             : #endif
    2327             :         stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
    2328             : }
    2329             : 
    2330             : static void init_kmem_cache_cpus(struct kmem_cache *s)
    2331             : {
    2332             :         int cpu;
    2333             :         struct kmem_cache_cpu *c;
    2334             : 
    2335          67 :         for_each_possible_cpu(cpu) {
    2336          67 :                 c = per_cpu_ptr(s->cpu_slab, cpu);
    2337          67 :                 local_lock_init(&c->lock);
    2338          67 :                 c->tid = init_tid(cpu);
    2339             :         }
    2340             : }
    2341             : 
    2342             : /*
    2343             :  * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
    2344             :  * unfreezes the slabs and puts it on the proper list.
    2345             :  * Assumes the slab has been already safely taken away from kmem_cache_cpu
    2346             :  * by the caller.
    2347             :  */
    2348           2 : static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
    2349             :                             void *freelist)
    2350             : {
    2351             :         enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
    2352           6 :         struct kmem_cache_node *n = get_node(s, slab_nid(slab));
    2353           2 :         int free_delta = 0;
    2354           2 :         enum slab_modes mode = M_NONE;
    2355             :         void *nextfree, *freelist_iter, *freelist_tail;
    2356           2 :         int tail = DEACTIVATE_TO_HEAD;
    2357           2 :         unsigned long flags = 0;
    2358             :         struct slab new;
    2359             :         struct slab old;
    2360             : 
    2361           2 :         if (slab->freelist) {
    2362           0 :                 stat(s, DEACTIVATE_REMOTE_FREES);
    2363           0 :                 tail = DEACTIVATE_TO_TAIL;
    2364             :         }
    2365             : 
    2366             :         /*
    2367             :          * Stage one: Count the objects on cpu's freelist as free_delta and
    2368             :          * remember the last object in freelist_tail for later splicing.
    2369             :          */
    2370           2 :         freelist_tail = NULL;
    2371           2 :         freelist_iter = freelist;
    2372          86 :         while (freelist_iter) {
    2373         164 :                 nextfree = get_freepointer(s, freelist_iter);
    2374             : 
    2375             :                 /*
    2376             :                  * If 'nextfree' is invalid, it is possible that the object at
    2377             :                  * 'freelist_iter' is already corrupted.  So isolate all objects
    2378             :                  * starting at 'freelist_iter' by skipping them.
    2379             :                  */
    2380          82 :                 if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
    2381             :                         break;
    2382             : 
    2383          82 :                 freelist_tail = freelist_iter;
    2384          82 :                 free_delta++;
    2385             : 
    2386          82 :                 freelist_iter = nextfree;
    2387             :         }
    2388             : 
    2389             :         /*
    2390             :          * Stage two: Unfreeze the slab while splicing the per-cpu
    2391             :          * freelist to the head of slab's freelist.
    2392             :          *
    2393             :          * Ensure that the slab is unfrozen while the list presence
    2394             :          * reflects the actual number of objects during unfreeze.
    2395             :          *
    2396             :          * We first perform cmpxchg holding lock and insert to list
    2397             :          * when it succeed. If there is mismatch then the slab is not
    2398             :          * unfrozen and number of objects in the slab may have changed.
    2399             :          * Then release lock and retry cmpxchg again.
    2400             :          */
    2401             : redo:
    2402             : 
    2403           2 :         old.freelist = READ_ONCE(slab->freelist);
    2404           2 :         old.counters = READ_ONCE(slab->counters);
    2405             :         VM_BUG_ON(!old.frozen);
    2406             : 
    2407             :         /* Determine target state of the slab */
    2408           2 :         new.counters = old.counters;
    2409           2 :         if (freelist_tail) {
    2410           2 :                 new.inuse -= free_delta;
    2411           4 :                 set_freepointer(s, freelist_tail, old.freelist);
    2412           2 :                 new.freelist = freelist;
    2413             :         } else
    2414             :                 new.freelist = old.freelist;
    2415             : 
    2416           2 :         new.frozen = 0;
    2417             : 
    2418           2 :         if (!new.inuse && n->nr_partial >= s->min_partial) {
    2419             :                 mode = M_FREE;
    2420           2 :         } else if (new.freelist) {
    2421           2 :                 mode = M_PARTIAL;
    2422             :                 /*
    2423             :                  * Taking the spinlock removes the possibility that
    2424             :                  * acquire_slab() will see a slab that is frozen
    2425             :                  */
    2426           2 :                 spin_lock_irqsave(&n->list_lock, flags);
    2427           0 :         } else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
    2428           0 :                 mode = M_FULL;
    2429             :                 /*
    2430             :                  * This also ensures that the scanning of full
    2431             :                  * slabs from diagnostic functions will not see
    2432             :                  * any frozen slabs.
    2433             :                  */
    2434           0 :                 spin_lock_irqsave(&n->list_lock, flags);
    2435             :         } else {
    2436             :                 mode = M_FULL_NOLIST;
    2437             :         }
    2438             : 
    2439             : 
    2440           2 :         if (!cmpxchg_double_slab(s, slab,
    2441             :                                 old.freelist, old.counters,
    2442             :                                 new.freelist, new.counters,
    2443             :                                 "unfreezing slab")) {
    2444           0 :                 if (mode == M_PARTIAL || mode == M_FULL)
    2445           0 :                         spin_unlock_irqrestore(&n->list_lock, flags);
    2446             :                 goto redo;
    2447             :         }
    2448             : 
    2449             : 
    2450           2 :         if (mode == M_PARTIAL) {
    2451           2 :                 add_partial(n, slab, tail);
    2452           4 :                 spin_unlock_irqrestore(&n->list_lock, flags);
    2453           2 :                 stat(s, tail);
    2454           0 :         } else if (mode == M_FREE) {
    2455           0 :                 stat(s, DEACTIVATE_EMPTY);
    2456             :                 discard_slab(s, slab);
    2457             :                 stat(s, FREE_SLAB);
    2458           0 :         } else if (mode == M_FULL) {
    2459           0 :                 add_full(s, n, slab);
    2460           0 :                 spin_unlock_irqrestore(&n->list_lock, flags);
    2461             :                 stat(s, DEACTIVATE_FULL);
    2462             :         } else if (mode == M_FULL_NOLIST) {
    2463             :                 stat(s, DEACTIVATE_FULL);
    2464             :         }
    2465           2 : }
    2466             : 
    2467             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    2468             : static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
    2469             : {
    2470             :         struct kmem_cache_node *n = NULL, *n2 = NULL;
    2471             :         struct slab *slab, *slab_to_discard = NULL;
    2472             :         unsigned long flags = 0;
    2473             : 
    2474             :         while (partial_slab) {
    2475             :                 struct slab new;
    2476             :                 struct slab old;
    2477             : 
    2478             :                 slab = partial_slab;
    2479             :                 partial_slab = slab->next;
    2480             : 
    2481             :                 n2 = get_node(s, slab_nid(slab));
    2482             :                 if (n != n2) {
    2483             :                         if (n)
    2484             :                                 spin_unlock_irqrestore(&n->list_lock, flags);
    2485             : 
    2486             :                         n = n2;
    2487             :                         spin_lock_irqsave(&n->list_lock, flags);
    2488             :                 }
    2489             : 
    2490             :                 do {
    2491             : 
    2492             :                         old.freelist = slab->freelist;
    2493             :                         old.counters = slab->counters;
    2494             :                         VM_BUG_ON(!old.frozen);
    2495             : 
    2496             :                         new.counters = old.counters;
    2497             :                         new.freelist = old.freelist;
    2498             : 
    2499             :                         new.frozen = 0;
    2500             : 
    2501             :                 } while (!__cmpxchg_double_slab(s, slab,
    2502             :                                 old.freelist, old.counters,
    2503             :                                 new.freelist, new.counters,
    2504             :                                 "unfreezing slab"));
    2505             : 
    2506             :                 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
    2507             :                         slab->next = slab_to_discard;
    2508             :                         slab_to_discard = slab;
    2509             :                 } else {
    2510             :                         add_partial(n, slab, DEACTIVATE_TO_TAIL);
    2511             :                         stat(s, FREE_ADD_PARTIAL);
    2512             :                 }
    2513             :         }
    2514             : 
    2515             :         if (n)
    2516             :                 spin_unlock_irqrestore(&n->list_lock, flags);
    2517             : 
    2518             :         while (slab_to_discard) {
    2519             :                 slab = slab_to_discard;
    2520             :                 slab_to_discard = slab_to_discard->next;
    2521             : 
    2522             :                 stat(s, DEACTIVATE_EMPTY);
    2523             :                 discard_slab(s, slab);
    2524             :                 stat(s, FREE_SLAB);
    2525             :         }
    2526             : }
    2527             : 
    2528             : /*
    2529             :  * Unfreeze all the cpu partial slabs.
    2530             :  */
    2531             : static void unfreeze_partials(struct kmem_cache *s)
    2532             : {
    2533             :         struct slab *partial_slab;
    2534             :         unsigned long flags;
    2535             : 
    2536             :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2537             :         partial_slab = this_cpu_read(s->cpu_slab->partial);
    2538             :         this_cpu_write(s->cpu_slab->partial, NULL);
    2539             :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2540             : 
    2541             :         if (partial_slab)
    2542             :                 __unfreeze_partials(s, partial_slab);
    2543             : }
    2544             : 
    2545             : static void unfreeze_partials_cpu(struct kmem_cache *s,
    2546             :                                   struct kmem_cache_cpu *c)
    2547             : {
    2548             :         struct slab *partial_slab;
    2549             : 
    2550             :         partial_slab = slub_percpu_partial(c);
    2551             :         c->partial = NULL;
    2552             : 
    2553             :         if (partial_slab)
    2554             :                 __unfreeze_partials(s, partial_slab);
    2555             : }
    2556             : 
    2557             : /*
    2558             :  * Put a slab that was just frozen (in __slab_free|get_partial_node) into a
    2559             :  * partial slab slot if available.
    2560             :  *
    2561             :  * If we did not find a slot then simply move all the partials to the
    2562             :  * per node partial list.
    2563             :  */
    2564             : static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
    2565             : {
    2566             :         struct slab *oldslab;
    2567             :         struct slab *slab_to_unfreeze = NULL;
    2568             :         unsigned long flags;
    2569             :         int slabs = 0;
    2570             : 
    2571             :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2572             : 
    2573             :         oldslab = this_cpu_read(s->cpu_slab->partial);
    2574             : 
    2575             :         if (oldslab) {
    2576             :                 if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
    2577             :                         /*
    2578             :                          * Partial array is full. Move the existing set to the
    2579             :                          * per node partial list. Postpone the actual unfreezing
    2580             :                          * outside of the critical section.
    2581             :                          */
    2582             :                         slab_to_unfreeze = oldslab;
    2583             :                         oldslab = NULL;
    2584             :                 } else {
    2585             :                         slabs = oldslab->slabs;
    2586             :                 }
    2587             :         }
    2588             : 
    2589             :         slabs++;
    2590             : 
    2591             :         slab->slabs = slabs;
    2592             :         slab->next = oldslab;
    2593             : 
    2594             :         this_cpu_write(s->cpu_slab->partial, slab);
    2595             : 
    2596             :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2597             : 
    2598             :         if (slab_to_unfreeze) {
    2599             :                 __unfreeze_partials(s, slab_to_unfreeze);
    2600             :                 stat(s, CPU_PARTIAL_DRAIN);
    2601             :         }
    2602             : }
    2603             : 
    2604             : #else   /* CONFIG_SLUB_CPU_PARTIAL */
    2605             : 
    2606             : static inline void unfreeze_partials(struct kmem_cache *s) { }
    2607             : static inline void unfreeze_partials_cpu(struct kmem_cache *s,
    2608             :                                   struct kmem_cache_cpu *c) { }
    2609             : 
    2610             : #endif  /* CONFIG_SLUB_CPU_PARTIAL */
    2611             : 
    2612           0 : static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
    2613             : {
    2614             :         unsigned long flags;
    2615             :         struct slab *slab;
    2616             :         void *freelist;
    2617             : 
    2618           0 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2619             : 
    2620           0 :         slab = c->slab;
    2621           0 :         freelist = c->freelist;
    2622             : 
    2623           0 :         c->slab = NULL;
    2624           0 :         c->freelist = NULL;
    2625           0 :         c->tid = next_tid(c->tid);
    2626             : 
    2627           0 :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2628             : 
    2629           0 :         if (slab) {
    2630           0 :                 deactivate_slab(s, slab, freelist);
    2631           0 :                 stat(s, CPUSLAB_FLUSH);
    2632             :         }
    2633           0 : }
    2634             : 
    2635           2 : static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
    2636             : {
    2637           2 :         struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
    2638           2 :         void *freelist = c->freelist;
    2639           2 :         struct slab *slab = c->slab;
    2640             : 
    2641           2 :         c->slab = NULL;
    2642           2 :         c->freelist = NULL;
    2643           4 :         c->tid = next_tid(c->tid);
    2644             : 
    2645           2 :         if (slab) {
    2646           2 :                 deactivate_slab(s, slab, freelist);
    2647           2 :                 stat(s, CPUSLAB_FLUSH);
    2648             :         }
    2649             : 
    2650           2 :         unfreeze_partials_cpu(s, c);
    2651           2 : }
    2652             : 
    2653             : struct slub_flush_work {
    2654             :         struct work_struct work;
    2655             :         struct kmem_cache *s;
    2656             :         bool skip;
    2657             : };
    2658             : 
    2659             : /*
    2660             :  * Flush cpu slab.
    2661             :  *
    2662             :  * Called from CPU work handler with migration disabled.
    2663             :  */
    2664           0 : static void flush_cpu_slab(struct work_struct *w)
    2665             : {
    2666             :         struct kmem_cache *s;
    2667             :         struct kmem_cache_cpu *c;
    2668             :         struct slub_flush_work *sfw;
    2669             : 
    2670           0 :         sfw = container_of(w, struct slub_flush_work, work);
    2671             : 
    2672           0 :         s = sfw->s;
    2673           0 :         c = this_cpu_ptr(s->cpu_slab);
    2674             : 
    2675           0 :         if (c->slab)
    2676           0 :                 flush_slab(s, c);
    2677             : 
    2678           0 :         unfreeze_partials(s);
    2679           0 : }
    2680             : 
    2681             : static bool has_cpu_slab(int cpu, struct kmem_cache *s)
    2682             : {
    2683           0 :         struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
    2684             : 
    2685           0 :         return c->slab || slub_percpu_partial(c);
    2686             : }
    2687             : 
    2688             : static DEFINE_MUTEX(flush_lock);
    2689             : static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
    2690             : 
    2691           0 : static void flush_all_cpus_locked(struct kmem_cache *s)
    2692             : {
    2693             :         struct slub_flush_work *sfw;
    2694             :         unsigned int cpu;
    2695             : 
    2696             :         lockdep_assert_cpus_held();
    2697           0 :         mutex_lock(&flush_lock);
    2698             : 
    2699           0 :         for_each_online_cpu(cpu) {
    2700           0 :                 sfw = &per_cpu(slub_flush, cpu);
    2701           0 :                 if (!has_cpu_slab(cpu, s)) {
    2702           0 :                         sfw->skip = true;
    2703           0 :                         continue;
    2704             :                 }
    2705           0 :                 INIT_WORK(&sfw->work, flush_cpu_slab);
    2706           0 :                 sfw->skip = false;
    2707           0 :                 sfw->s = s;
    2708           0 :                 schedule_work_on(cpu, &sfw->work);
    2709             :         }
    2710             : 
    2711           0 :         for_each_online_cpu(cpu) {
    2712           0 :                 sfw = &per_cpu(slub_flush, cpu);
    2713           0 :                 if (sfw->skip)
    2714           0 :                         continue;
    2715           0 :                 flush_work(&sfw->work);
    2716             :         }
    2717             : 
    2718           0 :         mutex_unlock(&flush_lock);
    2719           0 : }
    2720             : 
    2721             : static void flush_all(struct kmem_cache *s)
    2722             : {
    2723             :         cpus_read_lock();
    2724           0 :         flush_all_cpus_locked(s);
    2725             :         cpus_read_unlock();
    2726             : }
    2727             : 
    2728             : /*
    2729             :  * Use the cpu notifier to insure that the cpu slabs are flushed when
    2730             :  * necessary.
    2731             :  */
    2732           0 : static int slub_cpu_dead(unsigned int cpu)
    2733             : {
    2734             :         struct kmem_cache *s;
    2735             : 
    2736           0 :         mutex_lock(&slab_mutex);
    2737           0 :         list_for_each_entry(s, &slab_caches, list)
    2738           0 :                 __flush_cpu_slab(s, cpu);
    2739           0 :         mutex_unlock(&slab_mutex);
    2740           0 :         return 0;
    2741             : }
    2742             : 
    2743             : /*
    2744             :  * Check if the objects in a per cpu structure fit numa
    2745             :  * locality expectations.
    2746             :  */
    2747             : static inline int node_match(struct slab *slab, int node)
    2748             : {
    2749             : #ifdef CONFIG_NUMA
    2750             :         if (node != NUMA_NO_NODE && slab_nid(slab) != node)
    2751             :                 return 0;
    2752             : #endif
    2753             :         return 1;
    2754             : }
    2755             : 
    2756             : #ifdef CONFIG_SLUB_DEBUG
    2757           0 : static int count_free(struct slab *slab)
    2758             : {
    2759           0 :         return slab->objects - slab->inuse;
    2760             : }
    2761             : 
    2762             : static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
    2763             : {
    2764           0 :         return atomic_long_read(&n->total_objects);
    2765             : }
    2766             : #endif /* CONFIG_SLUB_DEBUG */
    2767             : 
    2768             : #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
    2769           0 : static unsigned long count_partial(struct kmem_cache_node *n,
    2770             :                                         int (*get_count)(struct slab *))
    2771             : {
    2772             :         unsigned long flags;
    2773           0 :         unsigned long x = 0;
    2774             :         struct slab *slab;
    2775             : 
    2776           0 :         spin_lock_irqsave(&n->list_lock, flags);
    2777           0 :         list_for_each_entry(slab, &n->partial, slab_list)
    2778           0 :                 x += get_count(slab);
    2779           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    2780           0 :         return x;
    2781             : }
    2782             : #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
    2783             : 
    2784             : static noinline void
    2785           0 : slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
    2786             : {
    2787             : #ifdef CONFIG_SLUB_DEBUG
    2788             :         static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
    2789             :                                       DEFAULT_RATELIMIT_BURST);
    2790             :         int node;
    2791             :         struct kmem_cache_node *n;
    2792             : 
    2793           0 :         if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
    2794             :                 return;
    2795             : 
    2796           0 :         pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
    2797             :                 nid, gfpflags, &gfpflags);
    2798           0 :         pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
    2799             :                 s->name, s->object_size, s->size, oo_order(s->oo),
    2800             :                 oo_order(s->min));
    2801             : 
    2802           0 :         if (oo_order(s->min) > get_order(s->object_size))
    2803           0 :                 pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
    2804             :                         s->name);
    2805             : 
    2806           0 :         for_each_kmem_cache_node(s, node, n) {
    2807             :                 unsigned long nr_slabs;
    2808             :                 unsigned long nr_objs;
    2809             :                 unsigned long nr_free;
    2810             : 
    2811           0 :                 nr_free  = count_partial(n, count_free);
    2812           0 :                 nr_slabs = node_nr_slabs(n);
    2813           0 :                 nr_objs  = node_nr_objs(n);
    2814             : 
    2815           0 :                 pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
    2816             :                         node, nr_slabs, nr_objs, nr_free);
    2817             :         }
    2818             : #endif
    2819             : }
    2820             : 
    2821             : static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
    2822             : {
    2823        1008 :         if (unlikely(slab_test_pfmemalloc(slab)))
    2824           0 :                 return gfp_pfmemalloc_allowed(gfpflags);
    2825             : 
    2826             :         return true;
    2827             : }
    2828             : 
    2829             : /*
    2830             :  * Check the slab->freelist and either transfer the freelist to the
    2831             :  * per cpu freelist or deactivate the slab.
    2832             :  *
    2833             :  * The slab is still frozen if the return value is not NULL.
    2834             :  *
    2835             :  * If this function returns NULL then the slab has been unfrozen.
    2836             :  */
    2837             : static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
    2838             : {
    2839             :         struct slab new;
    2840             :         unsigned long counters;
    2841             :         void *freelist;
    2842             : 
    2843             :         lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
    2844             : 
    2845             :         do {
    2846         463 :                 freelist = slab->freelist;
    2847         463 :                 counters = slab->counters;
    2848             : 
    2849         463 :                 new.counters = counters;
    2850             :                 VM_BUG_ON(!new.frozen);
    2851             : 
    2852         463 :                 new.inuse = slab->objects;
    2853         463 :                 new.frozen = freelist != NULL;
    2854             : 
    2855         926 :         } while (!__cmpxchg_double_slab(s, slab,
    2856             :                 freelist, counters,
    2857             :                 NULL, new.counters,
    2858         463 :                 "get_freelist"));
    2859             : 
    2860             :         return freelist;
    2861             : }
    2862             : 
    2863             : /*
    2864             :  * Slow path. The lockless freelist is empty or we need to perform
    2865             :  * debugging duties.
    2866             :  *
    2867             :  * Processing is still very fast if new objects have been freed to the
    2868             :  * regular freelist. In that case we simply take over the regular freelist
    2869             :  * as the lockless freelist and zap the regular freelist.
    2870             :  *
    2871             :  * If that is not working then we fall back to the partial lists. We take the
    2872             :  * first element of the freelist as the object to allocate now and move the
    2873             :  * rest of the freelist to the lockless freelist.
    2874             :  *
    2875             :  * And if we were unable to get a new slab from the partial slab lists then
    2876             :  * we need to allocate a new slab. This is the slowest path since it involves
    2877             :  * a call to the page allocator and the setup of a new slab.
    2878             :  *
    2879             :  * Version of __slab_alloc to use when we know that preemption is
    2880             :  * already disabled (which is the case for bulk allocation).
    2881             :  */
    2882         499 : static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    2883             :                           unsigned long addr, struct kmem_cache_cpu *c)
    2884             : {
    2885             :         void *freelist;
    2886             :         struct slab *slab;
    2887             :         unsigned long flags;
    2888             : 
    2889         499 :         stat(s, ALLOC_SLOWPATH);
    2890             : 
    2891             : reread_slab:
    2892             : 
    2893         499 :         slab = READ_ONCE(c->slab);
    2894         499 :         if (!slab) {
    2895             :                 /*
    2896             :                  * if the node is not online or has no normal memory, just
    2897             :                  * ignore the node constraint
    2898             :                  */
    2899          36 :                 if (unlikely(node != NUMA_NO_NODE &&
    2900             :                              !node_isset(node, slab_nodes)))
    2901           0 :                         node = NUMA_NO_NODE;
    2902             :                 goto new_slab;
    2903             :         }
    2904             : redo:
    2905             : 
    2906         463 :         if (unlikely(!node_match(slab, node))) {
    2907             :                 /*
    2908             :                  * same as above but node_match() being false already
    2909             :                  * implies node != NUMA_NO_NODE
    2910             :                  */
    2911             :                 if (!node_isset(node, slab_nodes)) {
    2912             :                         node = NUMA_NO_NODE;
    2913             :                         goto redo;
    2914             :                 } else {
    2915             :                         stat(s, ALLOC_NODE_MISMATCH);
    2916             :                         goto deactivate_slab;
    2917             :                 }
    2918             :         }
    2919             : 
    2920             :         /*
    2921             :          * By rights, we should be searching for a slab page that was
    2922             :          * PFMEMALLOC but right now, we are losing the pfmemalloc
    2923             :          * information when the page leaves the per-cpu allocator
    2924             :          */
    2925         926 :         if (unlikely(!pfmemalloc_match(slab, gfpflags)))
    2926             :                 goto deactivate_slab;
    2927             : 
    2928             :         /* must check again c->slab in case we got preempted and it changed */
    2929         463 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2930         463 :         if (unlikely(slab != c->slab)) {
    2931           0 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2932             :                 goto reread_slab;
    2933             :         }
    2934         463 :         freelist = c->freelist;
    2935         463 :         if (freelist)
    2936             :                 goto load_freelist;
    2937             : 
    2938         463 :         freelist = get_freelist(s, slab);
    2939             : 
    2940         463 :         if (!freelist) {
    2941         463 :                 c->slab = NULL;
    2942         463 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2943             :                 stat(s, DEACTIVATE_BYPASS);
    2944             :                 goto new_slab;
    2945             :         }
    2946             : 
    2947             :         stat(s, ALLOC_REFILL);
    2948             : 
    2949             : load_freelist:
    2950             : 
    2951         499 :         lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
    2952             : 
    2953             :         /*
    2954             :          * freelist is pointing to the list of objects to be used.
    2955             :          * slab is pointing to the slab from which the objects are obtained.
    2956             :          * That slab must be frozen for per cpu allocations to work.
    2957             :          */
    2958             :         VM_BUG_ON(!c->slab->frozen);
    2959         998 :         c->freelist = get_freepointer(s, freelist);
    2960         998 :         c->tid = next_tid(c->tid);
    2961         998 :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2962         499 :         return freelist;
    2963             : 
    2964             : deactivate_slab:
    2965             : 
    2966           0 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2967           0 :         if (slab != c->slab) {
    2968           0 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2969             :                 goto reread_slab;
    2970             :         }
    2971           0 :         freelist = c->freelist;
    2972           0 :         c->slab = NULL;
    2973           0 :         c->freelist = NULL;
    2974           0 :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2975           0 :         deactivate_slab(s, slab, freelist);
    2976             : 
    2977             : new_slab:
    2978             : 
    2979             :         if (slub_percpu_partial(c)) {
    2980             :                 local_lock_irqsave(&s->cpu_slab->lock, flags);
    2981             :                 if (unlikely(c->slab)) {
    2982             :                         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2983             :                         goto reread_slab;
    2984             :                 }
    2985             :                 if (unlikely(!slub_percpu_partial(c))) {
    2986             :                         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2987             :                         /* we were preempted and partial list got empty */
    2988             :                         goto new_objects;
    2989             :                 }
    2990             : 
    2991             :                 slab = c->slab = slub_percpu_partial(c);
    2992             :                 slub_set_percpu_partial(c, slab);
    2993             :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2994             :                 stat(s, CPU_PARTIAL_ALLOC);
    2995             :                 goto redo;
    2996             :         }
    2997             : 
    2998             : new_objects:
    2999             : 
    3000         499 :         freelist = get_partial(s, gfpflags, node, &slab);
    3001         499 :         if (freelist)
    3002             :                 goto check_new_slab;
    3003             : 
    3004         453 :         slub_put_cpu_ptr(s->cpu_slab);
    3005         453 :         slab = new_slab(s, gfpflags, node);
    3006         453 :         c = slub_get_cpu_ptr(s->cpu_slab);
    3007             : 
    3008         453 :         if (unlikely(!slab)) {
    3009           0 :                 slab_out_of_memory(s, gfpflags, node);
    3010           0 :                 return NULL;
    3011             :         }
    3012             : 
    3013             :         /*
    3014             :          * No other reference to the slab yet so we can
    3015             :          * muck around with it freely without cmpxchg
    3016             :          */
    3017         453 :         freelist = slab->freelist;
    3018         453 :         slab->freelist = NULL;
    3019             : 
    3020         453 :         stat(s, ALLOC_SLAB);
    3021             : 
    3022             : check_new_slab:
    3023             : 
    3024         499 :         if (kmem_cache_debug(s)) {
    3025           0 :                 if (!alloc_debug_processing(s, slab, freelist, addr)) {
    3026             :                         /* Slab failed checks. Next slab needed */
    3027             :                         goto new_slab;
    3028             :                 } else {
    3029             :                         /*
    3030             :                          * For debug case, we don't load freelist so that all
    3031             :                          * allocations go through alloc_debug_processing()
    3032             :                          */
    3033             :                         goto return_single;
    3034             :                 }
    3035             :         }
    3036             : 
    3037         998 :         if (unlikely(!pfmemalloc_match(slab, gfpflags)))
    3038             :                 /*
    3039             :                  * For !pfmemalloc_match() case we don't load freelist so that
    3040             :                  * we don't make further mismatched allocations easier.
    3041             :                  */
    3042             :                 goto return_single;
    3043             : 
    3044             : retry_load_slab:
    3045             : 
    3046         499 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    3047         499 :         if (unlikely(c->slab)) {
    3048           0 :                 void *flush_freelist = c->freelist;
    3049           0 :                 struct slab *flush_slab = c->slab;
    3050             : 
    3051           0 :                 c->slab = NULL;
    3052           0 :                 c->freelist = NULL;
    3053           0 :                 c->tid = next_tid(c->tid);
    3054             : 
    3055           0 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3056             : 
    3057           0 :                 deactivate_slab(s, flush_slab, flush_freelist);
    3058             : 
    3059           0 :                 stat(s, CPUSLAB_FLUSH);
    3060             : 
    3061             :                 goto retry_load_slab;
    3062             :         }
    3063         499 :         c->slab = slab;
    3064             : 
    3065         499 :         goto load_freelist;
    3066             : 
    3067             : return_single:
    3068             : 
    3069           0 :         deactivate_slab(s, slab, get_freepointer(s, freelist));
    3070           0 :         return freelist;
    3071             : }
    3072             : 
    3073             : /*
    3074             :  * A wrapper for ___slab_alloc() for contexts where preemption is not yet
    3075             :  * disabled. Compensates for possible cpu changes by refetching the per cpu area
    3076             :  * pointer.
    3077             :  */
    3078             : static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    3079             :                           unsigned long addr, struct kmem_cache_cpu *c)
    3080             : {
    3081             :         void *p;
    3082             : 
    3083             : #ifdef CONFIG_PREEMPT_COUNT
    3084             :         /*
    3085             :          * We may have been preempted and rescheduled on a different
    3086             :          * cpu before disabling preemption. Need to reload cpu area
    3087             :          * pointer.
    3088             :          */
    3089             :         c = slub_get_cpu_ptr(s->cpu_slab);
    3090             : #endif
    3091             : 
    3092         499 :         p = ___slab_alloc(s, gfpflags, node, addr, c);
    3093             : #ifdef CONFIG_PREEMPT_COUNT
    3094             :         slub_put_cpu_ptr(s->cpu_slab);
    3095             : #endif
    3096             :         return p;
    3097             : }
    3098             : 
    3099             : /*
    3100             :  * If the object has been wiped upon free, make sure it's fully initialized by
    3101             :  * zeroing out freelist pointer.
    3102             :  */
    3103             : static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
    3104             :                                                    void *obj)
    3105             : {
    3106       18327 :         if (unlikely(slab_want_init_on_free(s)) && obj)
    3107           0 :                 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
    3108             :                         0, sizeof(void *));
    3109             : }
    3110             : 
    3111             : /*
    3112             :  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
    3113             :  * have the fastpath folded into their functions. So no function call
    3114             :  * overhead for requests that can be satisfied on the fastpath.
    3115             :  *
    3116             :  * The fastpath works by first checking if the lockless freelist can be used.
    3117             :  * If not then __slab_alloc is called for slow processing.
    3118             :  *
    3119             :  * Otherwise we can simply pick the next object from the lockless free list.
    3120             :  */
    3121             : static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
    3122             :                 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
    3123             : {
    3124             :         void *object;
    3125             :         struct kmem_cache_cpu *c;
    3126             :         struct slab *slab;
    3127             :         unsigned long tid;
    3128       18327 :         struct obj_cgroup *objcg = NULL;
    3129       18327 :         bool init = false;
    3130             : 
    3131       36654 :         s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
    3132       18327 :         if (!s)
    3133             :                 return NULL;
    3134             : 
    3135       18327 :         object = kfence_alloc(s, orig_size, gfpflags);
    3136             :         if (unlikely(object))
    3137             :                 goto out;
    3138             : 
    3139             : redo:
    3140             :         /*
    3141             :          * Must read kmem_cache cpu data via this cpu ptr. Preemption is
    3142             :          * enabled. We may switch back and forth between cpus while
    3143             :          * reading from one cpu area. That does not matter as long
    3144             :          * as we end up on the original cpu again when doing the cmpxchg.
    3145             :          *
    3146             :          * We must guarantee that tid and kmem_cache_cpu are retrieved on the
    3147             :          * same cpu. We read first the kmem_cache_cpu pointer and use it to read
    3148             :          * the tid. If we are preempted and switched to another cpu between the
    3149             :          * two reads, it's OK as the two are still associated with the same cpu
    3150             :          * and cmpxchg later will validate the cpu.
    3151             :          */
    3152       18327 :         c = raw_cpu_ptr(s->cpu_slab);
    3153       18327 :         tid = READ_ONCE(c->tid);
    3154             : 
    3155             :         /*
    3156             :          * Irqless object alloc/free algorithm used here depends on sequence
    3157             :          * of fetching cpu_slab's data. tid should be fetched before anything
    3158             :          * on c to guarantee that object and slab associated with previous tid
    3159             :          * won't be used with current tid. If we fetch tid first, object and
    3160             :          * slab could be one associated with next tid and our alloc/free
    3161             :          * request will be failed. In this case, we will retry. So, no problem.
    3162             :          */
    3163       18327 :         barrier();
    3164             : 
    3165             :         /*
    3166             :          * The transaction ids are globally unique per cpu and per operation on
    3167             :          * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
    3168             :          * occurs on the right processor and that there was no operation on the
    3169             :          * linked list in between.
    3170             :          */
    3171             : 
    3172       18327 :         object = c->freelist;
    3173       18327 :         slab = c->slab;
    3174             :         /*
    3175             :          * We cannot use the lockless fastpath on PREEMPT_RT because if a
    3176             :          * slowpath has taken the local_lock_irqsave(), it is not protected
    3177             :          * against a fast path operation in an irq handler. So we need to take
    3178             :          * the slow path which uses local_lock. It is still relatively fast if
    3179             :          * there is a suitable cpu freelist.
    3180             :          */
    3181       18327 :         if (IS_ENABLED(CONFIG_PREEMPT_RT) ||
    3182       36155 :             unlikely(!object || !slab || !node_match(slab, node))) {
    3183         499 :                 object = __slab_alloc(s, gfpflags, node, addr, c);
    3184             :         } else {
    3185       17828 :                 void *next_object = get_freepointer_safe(s, object);
    3186             : 
    3187             :                 /*
    3188             :                  * The cmpxchg will only match if there was no additional
    3189             :                  * operation and if we are on the right processor.
    3190             :                  *
    3191             :                  * The cmpxchg does the following atomically (without lock
    3192             :                  * semantics!)
    3193             :                  * 1. Relocate first pointer to the current per cpu area.
    3194             :                  * 2. Verify that tid and freelist have not been changed
    3195             :                  * 3. If they were not changed replace tid and freelist
    3196             :                  *
    3197             :                  * Since this is without lock semantics the protection is only
    3198             :                  * against code executing on this cpu *not* from access by
    3199             :                  * other cpus.
    3200             :                  */
    3201       71312 :                 if (unlikely(!this_cpu_cmpxchg_double(
    3202             :                                 s->cpu_slab->freelist, s->cpu_slab->tid,
    3203             :                                 object, tid,
    3204             :                                 next_object, next_tid(tid)))) {
    3205             : 
    3206             :                         note_cmpxchg_failure("slab_alloc", s, tid);
    3207             :                         goto redo;
    3208             :                 }
    3209       17828 :                 prefetch_freepointer(s, next_object);
    3210             :                 stat(s, ALLOC_FASTPATH);
    3211             :         }
    3212             : 
    3213       36654 :         maybe_wipe_obj_freeptr(s, object);
    3214       36654 :         init = slab_want_init_on_alloc(gfpflags, s);
    3215             : 
    3216             : out:
    3217       18327 :         slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
    3218             : 
    3219       18327 :         return object;
    3220             : }
    3221             : 
    3222             : static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
    3223             :                 gfp_t gfpflags, unsigned long addr, size_t orig_size)
    3224             : {
    3225       18327 :         return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
    3226             : }
    3227             : 
    3228             : static __always_inline
    3229             : void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
    3230             :                              gfp_t gfpflags)
    3231             : {
    3232       29858 :         void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
    3233             : 
    3234       14929 :         trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
    3235       14929 :                                 s->size, gfpflags);
    3236             : 
    3237             :         return ret;
    3238             : }
    3239             : 
    3240       14895 : void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
    3241             : {
    3242       14895 :         return __kmem_cache_alloc_lru(s, NULL, gfpflags);
    3243             : }
    3244             : EXPORT_SYMBOL(kmem_cache_alloc);
    3245             : 
    3246          34 : void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
    3247             :                            gfp_t gfpflags)
    3248             : {
    3249          34 :         return __kmem_cache_alloc_lru(s, lru, gfpflags);
    3250             : }
    3251             : EXPORT_SYMBOL(kmem_cache_alloc_lru);
    3252             : 
    3253             : #ifdef CONFIG_TRACING
    3254             : void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
    3255             : {
    3256             :         void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
    3257             :         trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
    3258             :         ret = kasan_kmalloc(s, ret, size, gfpflags);
    3259             :         return ret;
    3260             : }
    3261             : EXPORT_SYMBOL(kmem_cache_alloc_trace);
    3262             : #endif
    3263             : 
    3264             : #ifdef CONFIG_NUMA
    3265             : void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
    3266             : {
    3267             :         void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
    3268             : 
    3269             :         trace_kmem_cache_alloc_node(_RET_IP_, ret,
    3270             :                                     s->object_size, s->size, gfpflags, node);
    3271             : 
    3272             :         return ret;
    3273             : }
    3274             : EXPORT_SYMBOL(kmem_cache_alloc_node);
    3275             : 
    3276             : #ifdef CONFIG_TRACING
    3277             : void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
    3278             :                                     gfp_t gfpflags,
    3279             :                                     int node, size_t size)
    3280             : {
    3281             :         void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
    3282             : 
    3283             :         trace_kmalloc_node(_RET_IP_, ret,
    3284             :                            size, s->size, gfpflags, node);
    3285             : 
    3286             :         ret = kasan_kmalloc(s, ret, size, gfpflags);
    3287             :         return ret;
    3288             : }
    3289             : EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
    3290             : #endif
    3291             : #endif  /* CONFIG_NUMA */
    3292             : 
    3293             : /*
    3294             :  * Slow path handling. This may still be called frequently since objects
    3295             :  * have a longer lifetime than the cpu slabs in most processing loads.
    3296             :  *
    3297             :  * So we still attempt to reduce cache line usage. Just take the slab
    3298             :  * lock and free the item. If there is no additional partial slab
    3299             :  * handling required then we can return immediately.
    3300             :  */
    3301        1278 : static void __slab_free(struct kmem_cache *s, struct slab *slab,
    3302             :                         void *head, void *tail, int cnt,
    3303             :                         unsigned long addr)
    3304             : 
    3305             : {
    3306             :         void *prior;
    3307             :         int was_frozen;
    3308             :         struct slab new;
    3309             :         unsigned long counters;
    3310        1278 :         struct kmem_cache_node *n = NULL;
    3311             :         unsigned long flags;
    3312             : 
    3313        1278 :         stat(s, FREE_SLOWPATH);
    3314             : 
    3315        1278 :         if (kfence_free(head))
    3316        1275 :                 return;
    3317             : 
    3318        1278 :         if (kmem_cache_debug(s) &&
    3319           0 :             !free_debug_processing(s, slab, head, tail, cnt, addr))
    3320             :                 return;
    3321             : 
    3322             :         do {
    3323        1278 :                 if (unlikely(n)) {
    3324           0 :                         spin_unlock_irqrestore(&n->list_lock, flags);
    3325           0 :                         n = NULL;
    3326             :                 }
    3327        1278 :                 prior = slab->freelist;
    3328        1278 :                 counters = slab->counters;
    3329        2556 :                 set_freepointer(s, tail, prior);
    3330        1278 :                 new.counters = counters;
    3331        1278 :                 was_frozen = new.frozen;
    3332        1278 :                 new.inuse -= cnt;
    3333        1278 :                 if ((!new.inuse || !prior) && !was_frozen) {
    3334             : 
    3335          91 :                         if (kmem_cache_has_cpu_partial(s) && !prior) {
    3336             : 
    3337             :                                 /*
    3338             :                                  * Slab was on no list before and will be
    3339             :                                  * partially empty
    3340             :                                  * We can defer the list move and instead
    3341             :                                  * freeze it.
    3342             :                                  */
    3343             :                                 new.frozen = 1;
    3344             : 
    3345             :                         } else { /* Needs to be taken off a list */
    3346             : 
    3347         273 :                                 n = get_node(s, slab_nid(slab));
    3348             :                                 /*
    3349             :                                  * Speculatively acquire the list_lock.
    3350             :                                  * If the cmpxchg does not succeed then we may
    3351             :                                  * drop the list_lock without any processing.
    3352             :                                  *
    3353             :                                  * Otherwise the list_lock will synchronize with
    3354             :                                  * other processors updating the list of slabs.
    3355             :                                  */
    3356          91 :                                 spin_lock_irqsave(&n->list_lock, flags);
    3357             : 
    3358             :                         }
    3359             :                 }
    3360             : 
    3361        1278 :         } while (!cmpxchg_double_slab(s, slab,
    3362             :                 prior, counters,
    3363             :                 head, new.counters,
    3364        1278 :                 "__slab_free"));
    3365             : 
    3366        1278 :         if (likely(!n)) {
    3367             : 
    3368             :                 if (likely(was_frozen)) {
    3369             :                         /*
    3370             :                          * The list lock was not taken therefore no list
    3371             :                          * activity can be necessary.
    3372             :                          */
    3373             :                         stat(s, FREE_FROZEN);
    3374             :                 } else if (new.frozen) {
    3375             :                         /*
    3376             :                          * If we just froze the slab then put it onto the
    3377             :                          * per cpu partial list.
    3378             :                          */
    3379             :                         put_cpu_partial(s, slab, 1);
    3380             :                         stat(s, CPU_PARTIAL_FREE);
    3381             :                 }
    3382             : 
    3383             :                 return;
    3384             :         }
    3385             : 
    3386          91 :         if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
    3387             :                 goto slab_empty;
    3388             : 
    3389             :         /*
    3390             :          * Objects left in the slab. If it was not on the partial list before
    3391             :          * then add it.
    3392             :          */
    3393          88 :         if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
    3394          98 :                 remove_full(s, n, slab);
    3395             :                 add_partial(n, slab, DEACTIVATE_TO_TAIL);
    3396             :                 stat(s, FREE_ADD_PARTIAL);
    3397             :         }
    3398          88 :         spin_unlock_irqrestore(&n->list_lock, flags);
    3399             :         return;
    3400             : 
    3401             : slab_empty:
    3402           3 :         if (prior) {
    3403             :                 /*
    3404             :                  * Slab on the partial list.
    3405             :                  */
    3406           3 :                 remove_partial(n, slab);
    3407             :                 stat(s, FREE_REMOVE_PARTIAL);
    3408             :         } else {
    3409             :                 /* Slab must be on the full list */
    3410           0 :                 remove_full(s, n, slab);
    3411             :         }
    3412             : 
    3413           6 :         spin_unlock_irqrestore(&n->list_lock, flags);
    3414           3 :         stat(s, FREE_SLAB);
    3415           3 :         discard_slab(s, slab);
    3416             : }
    3417             : 
    3418             : /*
    3419             :  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
    3420             :  * can perform fastpath freeing without additional function calls.
    3421             :  *
    3422             :  * The fastpath is only possible if we are freeing to the current cpu slab
    3423             :  * of this processor. This typically the case if we have just allocated
    3424             :  * the item before.
    3425             :  *
    3426             :  * If fastpath is not possible then fall back to __slab_free where we deal
    3427             :  * with all sorts of special processing.
    3428             :  *
    3429             :  * Bulk free of a freelist with several objects (all pointing to the
    3430             :  * same slab) possible by specifying head and tail ptr, plus objects
    3431             :  * count (cnt). Bulk free indicated by tail pointer being set.
    3432             :  */
    3433             : static __always_inline void do_slab_free(struct kmem_cache *s,
    3434             :                                 struct slab *slab, void *head, void *tail,
    3435             :                                 int cnt, unsigned long addr)
    3436             : {
    3437        5311 :         void *tail_obj = tail ? : head;
    3438             :         struct kmem_cache_cpu *c;
    3439             :         unsigned long tid;
    3440             : 
    3441             :         /* memcg_slab_free_hook() is already called for bulk free. */
    3442             :         if (!tail)
    3443             :                 memcg_slab_free_hook(s, &head, 1);
    3444             : redo:
    3445             :         /*
    3446             :          * Determine the currently cpus per cpu slab.
    3447             :          * The cpu may change afterward. However that does not matter since
    3448             :          * data is retrieved via this pointer. If we are on the same cpu
    3449             :          * during the cmpxchg then the free will succeed.
    3450             :          */
    3451        5311 :         c = raw_cpu_ptr(s->cpu_slab);
    3452        5311 :         tid = READ_ONCE(c->tid);
    3453             : 
    3454             :         /* Same with comment on barrier() in slab_alloc_node() */
    3455        5311 :         barrier();
    3456             : 
    3457        5311 :         if (likely(slab == c->slab)) {
    3458             : #ifndef CONFIG_PREEMPT_RT
    3459        4033 :                 void **freelist = READ_ONCE(c->freelist);
    3460             : 
    3461        8066 :                 set_freepointer(s, tail_obj, freelist);
    3462             : 
    3463       16132 :                 if (unlikely(!this_cpu_cmpxchg_double(
    3464             :                                 s->cpu_slab->freelist, s->cpu_slab->tid,
    3465             :                                 freelist, tid,
    3466             :                                 head, next_tid(tid)))) {
    3467             : 
    3468             :                         note_cmpxchg_failure("slab_free", s, tid);
    3469             :                         goto redo;
    3470             :                 }
    3471             : #else /* CONFIG_PREEMPT_RT */
    3472             :                 /*
    3473             :                  * We cannot use the lockless fastpath on PREEMPT_RT because if
    3474             :                  * a slowpath has taken the local_lock_irqsave(), it is not
    3475             :                  * protected against a fast path operation in an irq handler. So
    3476             :                  * we need to take the local_lock. We shouldn't simply defer to
    3477             :                  * __slab_free() as that wouldn't use the cpu freelist at all.
    3478             :                  */
    3479             :                 void **freelist;
    3480             : 
    3481             :                 local_lock(&s->cpu_slab->lock);
    3482             :                 c = this_cpu_ptr(s->cpu_slab);
    3483             :                 if (unlikely(slab != c->slab)) {
    3484             :                         local_unlock(&s->cpu_slab->lock);
    3485             :                         goto redo;
    3486             :                 }
    3487             :                 tid = c->tid;
    3488             :                 freelist = c->freelist;
    3489             : 
    3490             :                 set_freepointer(s, tail_obj, freelist);
    3491             :                 c->freelist = head;
    3492             :                 c->tid = next_tid(tid);
    3493             : 
    3494             :                 local_unlock(&s->cpu_slab->lock);
    3495             : #endif
    3496             :                 stat(s, FREE_FASTPATH);
    3497             :         } else
    3498        1278 :                 __slab_free(s, slab, head, tail_obj, cnt, addr);
    3499             : 
    3500             : }
    3501             : 
    3502             : static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
    3503             :                                       void *head, void *tail, int cnt,
    3504             :                                       unsigned long addr)
    3505             : {
    3506             :         /*
    3507             :          * With KASAN enabled slab_free_freelist_hook modifies the freelist
    3508             :          * to remove objects, whose reuse must be delayed.
    3509             :          */
    3510        5311 :         if (slab_free_freelist_hook(s, &head, &tail, &cnt))
    3511        5311 :                 do_slab_free(s, slab, head, tail, cnt, addr);
    3512             : }
    3513             : 
    3514             : #ifdef CONFIG_KASAN_GENERIC
    3515             : void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
    3516             : {
    3517             :         do_slab_free(cache, virt_to_slab(x), x, NULL, 1, addr);
    3518             : }
    3519             : #endif
    3520             : 
    3521        2969 : void kmem_cache_free(struct kmem_cache *s, void *x)
    3522             : {
    3523        2969 :         s = cache_from_obj(s, x);
    3524        2969 :         if (!s)
    3525             :                 return;
    3526        2969 :         trace_kmem_cache_free(_RET_IP_, x, s->name);
    3527        5938 :         slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
    3528             : }
    3529             : EXPORT_SYMBOL(kmem_cache_free);
    3530             : 
    3531             : struct detached_freelist {
    3532             :         struct slab *slab;
    3533             :         void *tail;
    3534             :         void *freelist;
    3535             :         int cnt;
    3536             :         struct kmem_cache *s;
    3537             : };
    3538             : 
    3539           8 : static inline void free_large_kmalloc(struct folio *folio, void *object)
    3540             : {
    3541           8 :         unsigned int order = folio_order(folio);
    3542             : 
    3543           8 :         if (WARN_ON_ONCE(order == 0))
    3544           0 :                 pr_warn_once("object pointer: 0x%p\n", object);
    3545             : 
    3546           8 :         kfree_hook(object);
    3547          16 :         mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
    3548           8 :                               -(PAGE_SIZE << order));
    3549           8 :         __free_pages(folio_page(folio, 0), order);
    3550           8 : }
    3551             : 
    3552             : /*
    3553             :  * This function progressively scans the array with free objects (with
    3554             :  * a limited look ahead) and extract objects belonging to the same
    3555             :  * slab.  It builds a detached freelist directly within the given
    3556             :  * slab/objects.  This can happen without any need for
    3557             :  * synchronization, because the objects are owned by running process.
    3558             :  * The freelist is build up as a single linked list in the objects.
    3559             :  * The idea is, that this detached freelist can then be bulk
    3560             :  * transferred to the real freelist(s), but only requiring a single
    3561             :  * synchronization primitive.  Look ahead in the array is limited due
    3562             :  * to performance reasons.
    3563             :  */
    3564             : static inline
    3565           0 : int build_detached_freelist(struct kmem_cache *s, size_t size,
    3566             :                             void **p, struct detached_freelist *df)
    3567             : {
    3568           0 :         size_t first_skipped_index = 0;
    3569           0 :         int lookahead = 3;
    3570             :         void *object;
    3571             :         struct folio *folio;
    3572             :         struct slab *slab;
    3573             : 
    3574             :         /* Always re-init detached_freelist */
    3575           0 :         df->slab = NULL;
    3576             : 
    3577             :         do {
    3578           0 :                 object = p[--size];
    3579             :                 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
    3580           0 :         } while (!object && size);
    3581             : 
    3582           0 :         if (!object)
    3583             :                 return 0;
    3584             : 
    3585           0 :         folio = virt_to_folio(object);
    3586           0 :         if (!s) {
    3587             :                 /* Handle kalloc'ed objects */
    3588           0 :                 if (unlikely(!folio_test_slab(folio))) {
    3589           0 :                         free_large_kmalloc(folio, object);
    3590           0 :                         p[size] = NULL; /* mark object processed */
    3591           0 :                         return size;
    3592             :                 }
    3593             :                 /* Derive kmem_cache from object */
    3594           0 :                 slab = folio_slab(folio);
    3595           0 :                 df->s = slab->slab_cache;
    3596             :         } else {
    3597           0 :                 slab = folio_slab(folio);
    3598           0 :                 df->s = cache_from_obj(s, object); /* Support for memcg */
    3599             :         }
    3600             : 
    3601           0 :         if (is_kfence_address(object)) {
    3602             :                 slab_free_hook(df->s, object, false);
    3603             :                 __kfence_free(object);
    3604             :                 p[size] = NULL; /* mark object processed */
    3605             :                 return size;
    3606             :         }
    3607             : 
    3608             :         /* Start new detached freelist */
    3609           0 :         df->slab = slab;
    3610           0 :         set_freepointer(df->s, object, NULL);
    3611           0 :         df->tail = object;
    3612           0 :         df->freelist = object;
    3613           0 :         p[size] = NULL; /* mark object processed */
    3614           0 :         df->cnt = 1;
    3615             : 
    3616           0 :         while (size) {
    3617           0 :                 object = p[--size];
    3618           0 :                 if (!object)
    3619           0 :                         continue; /* Skip processed objects */
    3620             : 
    3621             :                 /* df->slab is always set at this point */
    3622           0 :                 if (df->slab == virt_to_slab(object)) {
    3623             :                         /* Opportunity build freelist */
    3624           0 :                         set_freepointer(df->s, object, df->freelist);
    3625           0 :                         df->freelist = object;
    3626           0 :                         df->cnt++;
    3627           0 :                         p[size] = NULL; /* mark object processed */
    3628             : 
    3629           0 :                         continue;
    3630             :                 }
    3631             : 
    3632             :                 /* Limit look ahead search */
    3633           0 :                 if (!--lookahead)
    3634             :                         break;
    3635             : 
    3636           0 :                 if (!first_skipped_index)
    3637           0 :                         first_skipped_index = size + 1;
    3638             :         }
    3639             : 
    3640           0 :         return first_skipped_index;
    3641             : }
    3642             : 
    3643             : /* Note that interrupts must be enabled when calling this function. */
    3644           0 : void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
    3645             : {
    3646           0 :         if (WARN_ON(!size))
    3647             :                 return;
    3648             : 
    3649             :         memcg_slab_free_hook(s, p, size);
    3650             :         do {
    3651             :                 struct detached_freelist df;
    3652             : 
    3653           0 :                 size = build_detached_freelist(s, size, p, &df);
    3654           0 :                 if (!df.slab)
    3655           0 :                         continue;
    3656             : 
    3657           0 :                 slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
    3658           0 :         } while (likely(size));
    3659             : }
    3660             : EXPORT_SYMBOL(kmem_cache_free_bulk);
    3661             : 
    3662             : /* Note that interrupts must be enabled when calling this function. */
    3663           0 : int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
    3664             :                           void **p)
    3665             : {
    3666             :         struct kmem_cache_cpu *c;
    3667             :         int i;
    3668           0 :         struct obj_cgroup *objcg = NULL;
    3669             : 
    3670             :         /* memcg and kmem_cache debug support */
    3671           0 :         s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
    3672           0 :         if (unlikely(!s))
    3673             :                 return false;
    3674             :         /*
    3675             :          * Drain objects in the per cpu slab, while disabling local
    3676             :          * IRQs, which protects against PREEMPT and interrupts
    3677             :          * handlers invoking normal fastpath.
    3678             :          */
    3679           0 :         c = slub_get_cpu_ptr(s->cpu_slab);
    3680           0 :         local_lock_irq(&s->cpu_slab->lock);
    3681             : 
    3682           0 :         for (i = 0; i < size; i++) {
    3683           0 :                 void *object = kfence_alloc(s, s->object_size, flags);
    3684             : 
    3685             :                 if (unlikely(object)) {
    3686             :                         p[i] = object;
    3687             :                         continue;
    3688             :                 }
    3689             : 
    3690           0 :                 object = c->freelist;
    3691           0 :                 if (unlikely(!object)) {
    3692             :                         /*
    3693             :                          * We may have removed an object from c->freelist using
    3694             :                          * the fastpath in the previous iteration; in that case,
    3695             :                          * c->tid has not been bumped yet.
    3696             :                          * Since ___slab_alloc() may reenable interrupts while
    3697             :                          * allocating memory, we should bump c->tid now.
    3698             :                          */
    3699           0 :                         c->tid = next_tid(c->tid);
    3700             : 
    3701           0 :                         local_unlock_irq(&s->cpu_slab->lock);
    3702             : 
    3703             :                         /*
    3704             :                          * Invoking slow path likely have side-effect
    3705             :                          * of re-populating per CPU c->freelist
    3706             :                          */
    3707           0 :                         p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
    3708           0 :                                             _RET_IP_, c);
    3709           0 :                         if (unlikely(!p[i]))
    3710             :                                 goto error;
    3711             : 
    3712           0 :                         c = this_cpu_ptr(s->cpu_slab);
    3713           0 :                         maybe_wipe_obj_freeptr(s, p[i]);
    3714             : 
    3715           0 :                         local_lock_irq(&s->cpu_slab->lock);
    3716             : 
    3717           0 :                         continue; /* goto for-loop */
    3718             :                 }
    3719           0 :                 c->freelist = get_freepointer(s, object);
    3720           0 :                 p[i] = object;
    3721           0 :                 maybe_wipe_obj_freeptr(s, p[i]);
    3722             :         }
    3723           0 :         c->tid = next_tid(c->tid);
    3724           0 :         local_unlock_irq(&s->cpu_slab->lock);
    3725           0 :         slub_put_cpu_ptr(s->cpu_slab);
    3726             : 
    3727             :         /*
    3728             :          * memcg and kmem_cache debug support and memory initialization.
    3729             :          * Done outside of the IRQ disabled fastpath loop.
    3730             :          */
    3731           0 :         slab_post_alloc_hook(s, objcg, flags, size, p,
    3732           0 :                                 slab_want_init_on_alloc(flags, s));
    3733           0 :         return i;
    3734             : error:
    3735           0 :         slub_put_cpu_ptr(s->cpu_slab);
    3736           0 :         slab_post_alloc_hook(s, objcg, flags, i, p, false);
    3737           0 :         __kmem_cache_free_bulk(s, i, p);
    3738           0 :         return 0;
    3739             : }
    3740             : EXPORT_SYMBOL(kmem_cache_alloc_bulk);
    3741             : 
    3742             : 
    3743             : /*
    3744             :  * Object placement in a slab is made very easy because we always start at
    3745             :  * offset 0. If we tune the size of the object to the alignment then we can
    3746             :  * get the required alignment by putting one properly sized object after
    3747             :  * another.
    3748             :  *
    3749             :  * Notice that the allocation order determines the sizes of the per cpu
    3750             :  * caches. Each processor has always one slab available for allocations.
    3751             :  * Increasing the allocation order reduces the number of times that slabs
    3752             :  * must be moved on and off the partial lists and is therefore a factor in
    3753             :  * locking overhead.
    3754             :  */
    3755             : 
    3756             : /*
    3757             :  * Minimum / Maximum order of slab pages. This influences locking overhead
    3758             :  * and slab fragmentation. A higher order reduces the number of partial slabs
    3759             :  * and increases the number of allocations possible without having to
    3760             :  * take the list_lock.
    3761             :  */
    3762             : static unsigned int slub_min_order;
    3763             : static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
    3764             : static unsigned int slub_min_objects;
    3765             : 
    3766             : /*
    3767             :  * Calculate the order of allocation given an slab object size.
    3768             :  *
    3769             :  * The order of allocation has significant impact on performance and other
    3770             :  * system components. Generally order 0 allocations should be preferred since
    3771             :  * order 0 does not cause fragmentation in the page allocator. Larger objects
    3772             :  * be problematic to put into order 0 slabs because there may be too much
    3773             :  * unused space left. We go to a higher order if more than 1/16th of the slab
    3774             :  * would be wasted.
    3775             :  *
    3776             :  * In order to reach satisfactory performance we must ensure that a minimum
    3777             :  * number of objects is in one slab. Otherwise we may generate too much
    3778             :  * activity on the partial lists which requires taking the list_lock. This is
    3779             :  * less a concern for large slabs though which are rarely used.
    3780             :  *
    3781             :  * slub_max_order specifies the order where we begin to stop considering the
    3782             :  * number of objects in a slab as critical. If we reach slub_max_order then
    3783             :  * we try to keep the page order as low as possible. So we accept more waste
    3784             :  * of space in favor of a small page order.
    3785             :  *
    3786             :  * Higher order allocations also allow the placement of more objects in a
    3787             :  * slab and thereby reduce object handling overhead. If the user has
    3788             :  * requested a higher minimum order then we start with that one instead of
    3789             :  * the smallest order which will fit the object.
    3790             :  */
    3791          68 : static inline unsigned int calc_slab_order(unsigned int size,
    3792             :                 unsigned int min_objects, unsigned int max_order,
    3793             :                 unsigned int fract_leftover)
    3794             : {
    3795          68 :         unsigned int min_order = slub_min_order;
    3796             :         unsigned int order;
    3797             : 
    3798          68 :         if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
    3799           0 :                 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
    3800             : 
    3801         207 :         for (order = max(min_order, (unsigned int)get_order(min_objects * size));
    3802           3 :                         order <= max_order; order++) {
    3803             : 
    3804          70 :                 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
    3805             :                 unsigned int rem;
    3806             : 
    3807          70 :                 rem = slab_size % size;
    3808             : 
    3809          70 :                 if (rem <= slab_size / fract_leftover)
    3810             :                         break;
    3811             :         }
    3812             : 
    3813             :         return order;
    3814             : }
    3815             : 
    3816          67 : static inline int calculate_order(unsigned int size)
    3817             : {
    3818             :         unsigned int order;
    3819             :         unsigned int min_objects;
    3820             :         unsigned int max_objects;
    3821             :         unsigned int nr_cpus;
    3822             : 
    3823             :         /*
    3824             :          * Attempt to find best configuration for a slab. This
    3825             :          * works by first attempting to generate a layout with
    3826             :          * the best configuration and backing off gradually.
    3827             :          *
    3828             :          * First we increase the acceptable waste in a slab. Then
    3829             :          * we reduce the minimum objects required in a slab.
    3830             :          */
    3831          67 :         min_objects = slub_min_objects;
    3832          67 :         if (!min_objects) {
    3833             :                 /*
    3834             :                  * Some architectures will only update present cpus when
    3835             :                  * onlining them, so don't trust the number if it's just 1. But
    3836             :                  * we also don't want to use nr_cpu_ids always, as on some other
    3837             :                  * architectures, there can be many possible cpus, but never
    3838             :                  * onlined. Here we compromise between trying to avoid too high
    3839             :                  * order on systems that appear larger than they are, and too
    3840             :                  * low order on systems that appear smaller than they are.
    3841             :                  */
    3842          67 :                 nr_cpus = num_present_cpus();
    3843             :                 if (nr_cpus <= 1)
    3844          67 :                         nr_cpus = nr_cpu_ids;
    3845          67 :                 min_objects = 4 * (fls(nr_cpus) + 1);
    3846             :         }
    3847         134 :         max_objects = order_objects(slub_max_order, size);
    3848          67 :         min_objects = min(min_objects, max_objects);
    3849             : 
    3850         134 :         while (min_objects > 1) {
    3851             :                 unsigned int fraction;
    3852             : 
    3853             :                 fraction = 16;
    3854          68 :                 while (fraction >= 4) {
    3855          68 :                         order = calc_slab_order(size, min_objects,
    3856             :                                         slub_max_order, fraction);
    3857          68 :                         if (order <= slub_max_order)
    3858          67 :                                 return order;
    3859           1 :                         fraction /= 2;
    3860             :                 }
    3861           0 :                 min_objects--;
    3862             :         }
    3863             : 
    3864             :         /*
    3865             :          * We were unable to place multiple objects in a slab. Now
    3866             :          * lets see if we can place a single object there.
    3867             :          */
    3868           0 :         order = calc_slab_order(size, 1, slub_max_order, 1);
    3869           0 :         if (order <= slub_max_order)
    3870           0 :                 return order;
    3871             : 
    3872             :         /*
    3873             :          * Doh this slab cannot be placed using slub_max_order.
    3874             :          */
    3875           0 :         order = calc_slab_order(size, 1, MAX_ORDER, 1);
    3876           0 :         if (order < MAX_ORDER)
    3877           0 :                 return order;
    3878             :         return -ENOSYS;
    3879             : }
    3880             : 
    3881             : static void
    3882             : init_kmem_cache_node(struct kmem_cache_node *n)
    3883             : {
    3884          67 :         n->nr_partial = 0;
    3885          67 :         spin_lock_init(&n->list_lock);
    3886         134 :         INIT_LIST_HEAD(&n->partial);
    3887             : #ifdef CONFIG_SLUB_DEBUG
    3888         134 :         atomic_long_set(&n->nr_slabs, 0);
    3889         134 :         atomic_long_set(&n->total_objects, 0);
    3890         134 :         INIT_LIST_HEAD(&n->full);
    3891             : #endif
    3892             : }
    3893             : 
    3894          67 : static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
    3895             : {
    3896             :         BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
    3897             :                         KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
    3898             : 
    3899             :         /*
    3900             :          * Must align to double word boundary for the double cmpxchg
    3901             :          * instructions to work; see __pcpu_double_call_return_bool().
    3902             :          */
    3903          67 :         s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
    3904             :                                      2 * sizeof(void *));
    3905             : 
    3906          67 :         if (!s->cpu_slab)
    3907             :                 return 0;
    3908             : 
    3909             :         init_kmem_cache_cpus(s);
    3910             : 
    3911             :         return 1;
    3912             : }
    3913             : 
    3914             : static struct kmem_cache *kmem_cache_node;
    3915             : 
    3916             : /*
    3917             :  * No kmalloc_node yet so do it by hand. We know that this is the first
    3918             :  * slab on the node for this slabcache. There are no concurrent accesses
    3919             :  * possible.
    3920             :  *
    3921             :  * Note that this function only works on the kmem_cache_node
    3922             :  * when allocating for the kmem_cache_node. This is used for bootstrapping
    3923             :  * memory on a fresh node that has no slab structures yet.
    3924             :  */
    3925           1 : static void early_kmem_cache_node_alloc(int node)
    3926             : {
    3927             :         struct slab *slab;
    3928             :         struct kmem_cache_node *n;
    3929             : 
    3930           1 :         BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
    3931             : 
    3932           1 :         slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
    3933             : 
    3934           1 :         BUG_ON(!slab);
    3935           2 :         if (slab_nid(slab) != node) {
    3936           0 :                 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
    3937           0 :                 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
    3938             :         }
    3939             : 
    3940           1 :         n = slab->freelist;
    3941           1 :         BUG_ON(!n);
    3942             : #ifdef CONFIG_SLUB_DEBUG
    3943           1 :         init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
    3944           1 :         init_tracking(kmem_cache_node, n);
    3945             : #endif
    3946           1 :         n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
    3947           2 :         slab->freelist = get_freepointer(kmem_cache_node, n);
    3948           1 :         slab->inuse = 1;
    3949           1 :         slab->frozen = 0;
    3950           1 :         kmem_cache_node->node[node] = n;
    3951           1 :         init_kmem_cache_node(n);
    3952           2 :         inc_slabs_node(kmem_cache_node, node, slab->objects);
    3953             : 
    3954             :         /*
    3955             :          * No locks need to be taken here as it has just been
    3956             :          * initialized and there is no concurrent access.
    3957             :          */
    3958           1 :         __add_partial(n, slab, DEACTIVATE_TO_HEAD);
    3959           1 : }
    3960             : 
    3961           0 : static void free_kmem_cache_nodes(struct kmem_cache *s)
    3962             : {
    3963             :         int node;
    3964             :         struct kmem_cache_node *n;
    3965             : 
    3966           0 :         for_each_kmem_cache_node(s, node, n) {
    3967           0 :                 s->node[node] = NULL;
    3968           0 :                 kmem_cache_free(kmem_cache_node, n);
    3969             :         }
    3970           0 : }
    3971             : 
    3972           0 : void __kmem_cache_release(struct kmem_cache *s)
    3973             : {
    3974           0 :         cache_random_seq_destroy(s);
    3975           0 :         free_percpu(s->cpu_slab);
    3976           0 :         free_kmem_cache_nodes(s);
    3977           0 : }
    3978             : 
    3979          67 : static int init_kmem_cache_nodes(struct kmem_cache *s)
    3980             : {
    3981             :         int node;
    3982             : 
    3983         134 :         for_each_node_mask(node, slab_nodes) {
    3984             :                 struct kmem_cache_node *n;
    3985             : 
    3986          67 :                 if (slab_state == DOWN) {
    3987           1 :                         early_kmem_cache_node_alloc(node);
    3988           1 :                         continue;
    3989             :                 }
    3990         132 :                 n = kmem_cache_alloc_node(kmem_cache_node,
    3991             :                                                 GFP_KERNEL, node);
    3992             : 
    3993          66 :                 if (!n) {
    3994           0 :                         free_kmem_cache_nodes(s);
    3995           0 :                         return 0;
    3996             :                 }
    3997             : 
    3998          66 :                 init_kmem_cache_node(n);
    3999          66 :                 s->node[node] = n;
    4000             :         }
    4001             :         return 1;
    4002             : }
    4003             : 
    4004             : static void set_cpu_partial(struct kmem_cache *s)
    4005             : {
    4006             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    4007             :         unsigned int nr_objects;
    4008             : 
    4009             :         /*
    4010             :          * cpu_partial determined the maximum number of objects kept in the
    4011             :          * per cpu partial lists of a processor.
    4012             :          *
    4013             :          * Per cpu partial lists mainly contain slabs that just have one
    4014             :          * object freed. If they are used for allocation then they can be
    4015             :          * filled up again with minimal effort. The slab will never hit the
    4016             :          * per node partial lists and therefore no locking will be required.
    4017             :          *
    4018             :          * For backwards compatibility reasons, this is determined as number
    4019             :          * of objects, even though we now limit maximum number of pages, see
    4020             :          * slub_set_cpu_partial()
    4021             :          */
    4022             :         if (!kmem_cache_has_cpu_partial(s))
    4023             :                 nr_objects = 0;
    4024             :         else if (s->size >= PAGE_SIZE)
    4025             :                 nr_objects = 6;
    4026             :         else if (s->size >= 1024)
    4027             :                 nr_objects = 24;
    4028             :         else if (s->size >= 256)
    4029             :                 nr_objects = 52;
    4030             :         else
    4031             :                 nr_objects = 120;
    4032             : 
    4033             :         slub_set_cpu_partial(s, nr_objects);
    4034             : #endif
    4035             : }
    4036             : 
    4037             : /*
    4038             :  * calculate_sizes() determines the order and the distribution of data within
    4039             :  * a slab object.
    4040             :  */
    4041          67 : static int calculate_sizes(struct kmem_cache *s)
    4042             : {
    4043          67 :         slab_flags_t flags = s->flags;
    4044          67 :         unsigned int size = s->object_size;
    4045             :         unsigned int order;
    4046             : 
    4047             :         /*
    4048             :          * Round up object size to the next word boundary. We can only
    4049             :          * place the free pointer at word boundaries and this determines
    4050             :          * the possible location of the free pointer.
    4051             :          */
    4052          67 :         size = ALIGN(size, sizeof(void *));
    4053             : 
    4054             : #ifdef CONFIG_SLUB_DEBUG
    4055             :         /*
    4056             :          * Determine if we can poison the object itself. If the user of
    4057             :          * the slab may touch the object after free or before allocation
    4058             :          * then we should never poison the object itself.
    4059             :          */
    4060          67 :         if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
    4061           0 :                         !s->ctor)
    4062           0 :                 s->flags |= __OBJECT_POISON;
    4063             :         else
    4064          67 :                 s->flags &= ~__OBJECT_POISON;
    4065             : 
    4066             : 
    4067             :         /*
    4068             :          * If we are Redzoning then check if there is some space between the
    4069             :          * end of the object and the free pointer. If not then add an
    4070             :          * additional word to have some bytes to store Redzone information.
    4071             :          */
    4072          67 :         if ((flags & SLAB_RED_ZONE) && size == s->object_size)
    4073           0 :                 size += sizeof(void *);
    4074             : #endif
    4075             : 
    4076             :         /*
    4077             :          * With that we have determined the number of bytes in actual use
    4078             :          * by the object and redzoning.
    4079             :          */
    4080          67 :         s->inuse = size;
    4081             : 
    4082          67 :         if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
    4083          63 :             ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
    4084          63 :             s->ctor) {
    4085             :                 /*
    4086             :                  * Relocate free pointer after the object if it is not
    4087             :                  * permitted to overwrite the first word of the object on
    4088             :                  * kmem_cache_free.
    4089             :                  *
    4090             :                  * This is the case if we do RCU, have a constructor or
    4091             :                  * destructor, are poisoning the objects, or are
    4092             :                  * redzoning an object smaller than sizeof(void *).
    4093             :                  *
    4094             :                  * The assumption that s->offset >= s->inuse means free
    4095             :                  * pointer is outside of the object is used in the
    4096             :                  * freeptr_outside_object() function. If that is no
    4097             :                  * longer true, the function needs to be modified.
    4098             :                  */
    4099           9 :                 s->offset = size;
    4100           9 :                 size += sizeof(void *);
    4101             :         } else {
    4102             :                 /*
    4103             :                  * Store freelist pointer near middle of object to keep
    4104             :                  * it away from the edges of the object to avoid small
    4105             :                  * sized over/underflows from neighboring allocations.
    4106             :                  */
    4107          58 :                 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
    4108             :         }
    4109             : 
    4110             : #ifdef CONFIG_SLUB_DEBUG
    4111          67 :         if (flags & SLAB_STORE_USER)
    4112             :                 /*
    4113             :                  * Need to store information about allocs and frees after
    4114             :                  * the object.
    4115             :                  */
    4116           0 :                 size += 2 * sizeof(struct track);
    4117             : #endif
    4118             : 
    4119          67 :         kasan_cache_create(s, &size, &s->flags);
    4120             : #ifdef CONFIG_SLUB_DEBUG
    4121          67 :         if (flags & SLAB_RED_ZONE) {
    4122             :                 /*
    4123             :                  * Add some empty padding so that we can catch
    4124             :                  * overwrites from earlier objects rather than let
    4125             :                  * tracking information or the free pointer be
    4126             :                  * corrupted if a user writes before the start
    4127             :                  * of the object.
    4128             :                  */
    4129           0 :                 size += sizeof(void *);
    4130             : 
    4131             :                 s->red_left_pad = sizeof(void *);
    4132           0 :                 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
    4133           0 :                 size += s->red_left_pad;
    4134             :         }
    4135             : #endif
    4136             : 
    4137             :         /*
    4138             :          * SLUB stores one object immediately after another beginning from
    4139             :          * offset 0. In order to align the objects we have to simply size
    4140             :          * each object to conform to the alignment.
    4141             :          */
    4142          67 :         size = ALIGN(size, s->align);
    4143          67 :         s->size = size;
    4144          67 :         s->reciprocal_size = reciprocal_value(size);
    4145          67 :         order = calculate_order(size);
    4146             : 
    4147          67 :         if ((int)order < 0)
    4148             :                 return 0;
    4149             : 
    4150          67 :         s->allocflags = 0;
    4151          67 :         if (order)
    4152          25 :                 s->allocflags |= __GFP_COMP;
    4153             : 
    4154          67 :         if (s->flags & SLAB_CACHE_DMA)
    4155           0 :                 s->allocflags |= GFP_DMA;
    4156             : 
    4157          67 :         if (s->flags & SLAB_CACHE_DMA32)
    4158           0 :                 s->allocflags |= GFP_DMA32;
    4159             : 
    4160          67 :         if (s->flags & SLAB_RECLAIM_ACCOUNT)
    4161          19 :                 s->allocflags |= __GFP_RECLAIMABLE;
    4162             : 
    4163             :         /*
    4164             :          * Determine the number of objects per slab
    4165             :          */
    4166         134 :         s->oo = oo_make(order, size);
    4167         201 :         s->min = oo_make(get_order(size), size);
    4168          67 :         if (oo_objects(s->oo) > oo_objects(s->max))
    4169          67 :                 s->max = s->oo;
    4170             : 
    4171          67 :         return !!oo_objects(s->oo);
    4172             : }
    4173             : 
    4174          67 : static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
    4175             : {
    4176          67 :         s->flags = kmem_cache_flags(s->size, flags, s->name);
    4177             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
    4178             :         s->random = get_random_long();
    4179             : #endif
    4180             : 
    4181          67 :         if (!calculate_sizes(s))
    4182             :                 goto error;
    4183          67 :         if (disable_higher_order_debug) {
    4184             :                 /*
    4185             :                  * Disable debugging flags that store metadata if the min slab
    4186             :                  * order increased.
    4187             :                  */
    4188           0 :                 if (get_order(s->size) > get_order(s->object_size)) {
    4189           0 :                         s->flags &= ~DEBUG_METADATA_FLAGS;
    4190           0 :                         s->offset = 0;
    4191           0 :                         if (!calculate_sizes(s))
    4192             :                                 goto error;
    4193             :                 }
    4194             :         }
    4195             : 
    4196             : #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    4197             :     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
    4198             :         if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
    4199             :                 /* Enable fast mode */
    4200             :                 s->flags |= __CMPXCHG_DOUBLE;
    4201             : #endif
    4202             : 
    4203             :         /*
    4204             :          * The larger the object size is, the more slabs we want on the partial
    4205             :          * list to avoid pounding the page allocator excessively.
    4206             :          */
    4207         134 :         s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
    4208          67 :         s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
    4209             : 
    4210          67 :         set_cpu_partial(s);
    4211             : 
    4212             : #ifdef CONFIG_NUMA
    4213             :         s->remote_node_defrag_ratio = 1000;
    4214             : #endif
    4215             : 
    4216             :         /* Initialize the pre-computed randomized freelist if slab is up */
    4217             :         if (slab_state >= UP) {
    4218             :                 if (init_cache_random_seq(s))
    4219             :                         goto error;
    4220             :         }
    4221             : 
    4222          67 :         if (!init_kmem_cache_nodes(s))
    4223             :                 goto error;
    4224             : 
    4225          67 :         if (alloc_kmem_cache_cpus(s))
    4226             :                 return 0;
    4227             : 
    4228             : error:
    4229           0 :         __kmem_cache_release(s);
    4230           0 :         return -EINVAL;
    4231             : }
    4232             : 
    4233           0 : static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
    4234             :                               const char *text)
    4235             : {
    4236             : #ifdef CONFIG_SLUB_DEBUG
    4237           0 :         void *addr = slab_address(slab);
    4238             :         unsigned long flags;
    4239             :         unsigned long *map;
    4240             :         void *p;
    4241             : 
    4242           0 :         slab_err(s, slab, text, s->name);
    4243           0 :         slab_lock(slab, &flags);
    4244             : 
    4245           0 :         map = get_map(s, slab);
    4246           0 :         for_each_object(p, s, addr, slab->objects) {
    4247             : 
    4248           0 :                 if (!test_bit(__obj_to_index(s, addr, p), map)) {
    4249           0 :                         pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
    4250           0 :                         print_tracking(s, p);
    4251             :                 }
    4252             :         }
    4253           0 :         put_map(map);
    4254           0 :         slab_unlock(slab, &flags);
    4255             : #endif
    4256           0 : }
    4257             : 
    4258             : /*
    4259             :  * Attempt to free all partial slabs on a node.
    4260             :  * This is called from __kmem_cache_shutdown(). We must take list_lock
    4261             :  * because sysfs file might still access partial list after the shutdowning.
    4262             :  */
    4263           0 : static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
    4264             : {
    4265           0 :         LIST_HEAD(discard);
    4266             :         struct slab *slab, *h;
    4267             : 
    4268           0 :         BUG_ON(irqs_disabled());
    4269           0 :         spin_lock_irq(&n->list_lock);
    4270           0 :         list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
    4271           0 :                 if (!slab->inuse) {
    4272           0 :                         remove_partial(n, slab);
    4273           0 :                         list_add(&slab->slab_list, &discard);
    4274             :                 } else {
    4275           0 :                         list_slab_objects(s, slab,
    4276             :                           "Objects remaining in %s on __kmem_cache_shutdown()");
    4277             :                 }
    4278             :         }
    4279           0 :         spin_unlock_irq(&n->list_lock);
    4280             : 
    4281           0 :         list_for_each_entry_safe(slab, h, &discard, slab_list)
    4282           0 :                 discard_slab(s, slab);
    4283           0 : }
    4284             : 
    4285           0 : bool __kmem_cache_empty(struct kmem_cache *s)
    4286             : {
    4287             :         int node;
    4288             :         struct kmem_cache_node *n;
    4289             : 
    4290           0 :         for_each_kmem_cache_node(s, node, n)
    4291           0 :                 if (n->nr_partial || slabs_node(s, node))
    4292             :                         return false;
    4293             :         return true;
    4294             : }
    4295             : 
    4296             : /*
    4297             :  * Release all resources used by a slab cache.
    4298             :  */
    4299           0 : int __kmem_cache_shutdown(struct kmem_cache *s)
    4300             : {
    4301             :         int node;
    4302             :         struct kmem_cache_node *n;
    4303             : 
    4304           0 :         flush_all_cpus_locked(s);
    4305             :         /* Attempt to free all objects */
    4306           0 :         for_each_kmem_cache_node(s, node, n) {
    4307           0 :                 free_partial(s, n);
    4308           0 :                 if (n->nr_partial || slabs_node(s, node))
    4309             :                         return 1;
    4310             :         }
    4311             :         return 0;
    4312             : }
    4313             : 
    4314             : #ifdef CONFIG_PRINTK
    4315           0 : void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
    4316             : {
    4317             :         void *base;
    4318             :         int __maybe_unused i;
    4319             :         unsigned int objnr;
    4320             :         void *objp;
    4321             :         void *objp0;
    4322           0 :         struct kmem_cache *s = slab->slab_cache;
    4323             :         struct track __maybe_unused *trackp;
    4324             : 
    4325           0 :         kpp->kp_ptr = object;
    4326           0 :         kpp->kp_slab = slab;
    4327           0 :         kpp->kp_slab_cache = s;
    4328           0 :         base = slab_address(slab);
    4329           0 :         objp0 = kasan_reset_tag(object);
    4330             : #ifdef CONFIG_SLUB_DEBUG
    4331           0 :         objp = restore_red_left(s, objp0);
    4332             : #else
    4333             :         objp = objp0;
    4334             : #endif
    4335           0 :         objnr = obj_to_index(s, slab, objp);
    4336           0 :         kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
    4337           0 :         objp = base + s->size * objnr;
    4338           0 :         kpp->kp_objp = objp;
    4339           0 :         if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
    4340           0 :                          || (objp - base) % s->size) ||
    4341           0 :             !(s->flags & SLAB_STORE_USER))
    4342             :                 return;
    4343             : #ifdef CONFIG_SLUB_DEBUG
    4344           0 :         objp = fixup_red_left(s, objp);
    4345           0 :         trackp = get_track(s, objp, TRACK_ALLOC);
    4346           0 :         kpp->kp_ret = (void *)trackp->addr;
    4347             : #ifdef CONFIG_STACKTRACE
    4348           0 :         for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
    4349           0 :                 kpp->kp_stack[i] = (void *)trackp->addrs[i];
    4350           0 :                 if (!kpp->kp_stack[i])
    4351             :                         break;
    4352             :         }
    4353             : 
    4354           0 :         trackp = get_track(s, objp, TRACK_FREE);
    4355           0 :         for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
    4356           0 :                 kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
    4357           0 :                 if (!kpp->kp_free_stack[i])
    4358             :                         break;
    4359             :         }
    4360             : #endif
    4361             : #endif
    4362             : }
    4363             : #endif
    4364             : 
    4365             : /********************************************************************
    4366             :  *              Kmalloc subsystem
    4367             :  *******************************************************************/
    4368             : 
    4369           0 : static int __init setup_slub_min_order(char *str)
    4370             : {
    4371           0 :         get_option(&str, (int *)&slub_min_order);
    4372             : 
    4373           0 :         return 1;
    4374             : }
    4375             : 
    4376             : __setup("slub_min_order=", setup_slub_min_order);
    4377             : 
    4378           0 : static int __init setup_slub_max_order(char *str)
    4379             : {
    4380           0 :         get_option(&str, (int *)&slub_max_order);
    4381           0 :         slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
    4382             : 
    4383           0 :         return 1;
    4384             : }
    4385             : 
    4386             : __setup("slub_max_order=", setup_slub_max_order);
    4387             : 
    4388           0 : static int __init setup_slub_min_objects(char *str)
    4389             : {
    4390           0 :         get_option(&str, (int *)&slub_min_objects);
    4391             : 
    4392           0 :         return 1;
    4393             : }
    4394             : 
    4395             : __setup("slub_min_objects=", setup_slub_min_objects);
    4396             : 
    4397        1011 : void *__kmalloc(size_t size, gfp_t flags)
    4398             : {
    4399             :         struct kmem_cache *s;
    4400             :         void *ret;
    4401             : 
    4402        1011 :         if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
    4403           8 :                 return kmalloc_large(size, flags);
    4404             : 
    4405        1003 :         s = kmalloc_slab(size, flags);
    4406             : 
    4407        1003 :         if (unlikely(ZERO_OR_NULL_PTR(s)))
    4408             :                 return s;
    4409             : 
    4410        2006 :         ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
    4411             : 
    4412        1003 :         trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
    4413             : 
    4414        1003 :         ret = kasan_kmalloc(s, ret, size, flags);
    4415             : 
    4416        1003 :         return ret;
    4417             : }
    4418             : EXPORT_SYMBOL(__kmalloc);
    4419             : 
    4420             : #ifdef CONFIG_NUMA
    4421             : static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    4422             : {
    4423             :         struct page *page;
    4424             :         void *ptr = NULL;
    4425             :         unsigned int order = get_order(size);
    4426             : 
    4427             :         flags |= __GFP_COMP;
    4428             :         page = alloc_pages_node(node, flags, order);
    4429             :         if (page) {
    4430             :                 ptr = page_address(page);
    4431             :                 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
    4432             :                                       PAGE_SIZE << order);
    4433             :         }
    4434             : 
    4435             :         return kmalloc_large_node_hook(ptr, size, flags);
    4436             : }
    4437             : 
    4438             : void *__kmalloc_node(size_t size, gfp_t flags, int node)
    4439             : {
    4440             :         struct kmem_cache *s;
    4441             :         void *ret;
    4442             : 
    4443             :         if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
    4444             :                 ret = kmalloc_large_node(size, flags, node);
    4445             : 
    4446             :                 trace_kmalloc_node(_RET_IP_, ret,
    4447             :                                    size, PAGE_SIZE << get_order(size),
    4448             :                                    flags, node);
    4449             : 
    4450             :                 return ret;
    4451             :         }
    4452             : 
    4453             :         s = kmalloc_slab(size, flags);
    4454             : 
    4455             :         if (unlikely(ZERO_OR_NULL_PTR(s)))
    4456             :                 return s;
    4457             : 
    4458             :         ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
    4459             : 
    4460             :         trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
    4461             : 
    4462             :         ret = kasan_kmalloc(s, ret, size, flags);
    4463             : 
    4464             :         return ret;
    4465             : }
    4466             : EXPORT_SYMBOL(__kmalloc_node);
    4467             : #endif  /* CONFIG_NUMA */
    4468             : 
    4469             : #ifdef CONFIG_HARDENED_USERCOPY
    4470             : /*
    4471             :  * Rejects incorrectly sized objects and objects that are to be copied
    4472             :  * to/from userspace but do not fall entirely within the containing slab
    4473             :  * cache's usercopy region.
    4474             :  *
    4475             :  * Returns NULL if check passes, otherwise const char * to name of cache
    4476             :  * to indicate an error.
    4477             :  */
    4478             : void __check_heap_object(const void *ptr, unsigned long n,
    4479             :                          const struct slab *slab, bool to_user)
    4480             : {
    4481             :         struct kmem_cache *s;
    4482             :         unsigned int offset;
    4483             :         bool is_kfence = is_kfence_address(ptr);
    4484             : 
    4485             :         ptr = kasan_reset_tag(ptr);
    4486             : 
    4487             :         /* Find object and usable object size. */
    4488             :         s = slab->slab_cache;
    4489             : 
    4490             :         /* Reject impossible pointers. */
    4491             :         if (ptr < slab_address(slab))
    4492             :                 usercopy_abort("SLUB object not in SLUB page?!", NULL,
    4493             :                                to_user, 0, n);
    4494             : 
    4495             :         /* Find offset within object. */
    4496             :         if (is_kfence)
    4497             :                 offset = ptr - kfence_object_start(ptr);
    4498             :         else
    4499             :                 offset = (ptr - slab_address(slab)) % s->size;
    4500             : 
    4501             :         /* Adjust for redzone and reject if within the redzone. */
    4502             :         if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
    4503             :                 if (offset < s->red_left_pad)
    4504             :                         usercopy_abort("SLUB object in left red zone",
    4505             :                                        s->name, to_user, offset, n);
    4506             :                 offset -= s->red_left_pad;
    4507             :         }
    4508             : 
    4509             :         /* Allow address range falling entirely within usercopy region. */
    4510             :         if (offset >= s->useroffset &&
    4511             :             offset - s->useroffset <= s->usersize &&
    4512             :             n <= s->useroffset - offset + s->usersize)
    4513             :                 return;
    4514             : 
    4515             :         usercopy_abort("SLUB object", s->name, to_user, offset, n);
    4516             : }
    4517             : #endif /* CONFIG_HARDENED_USERCOPY */
    4518             : 
    4519         228 : size_t __ksize(const void *object)
    4520             : {
    4521             :         struct folio *folio;
    4522             : 
    4523         228 :         if (unlikely(object == ZERO_SIZE_PTR))
    4524             :                 return 0;
    4525             : 
    4526         228 :         folio = virt_to_folio(object);
    4527             : 
    4528         228 :         if (unlikely(!folio_test_slab(folio)))
    4529           0 :                 return folio_size(folio);
    4530             : 
    4531         228 :         return slab_ksize(folio_slab(folio)->slab_cache);
    4532             : }
    4533             : EXPORT_SYMBOL(__ksize);
    4534             : 
    4535        3807 : void kfree(const void *x)
    4536             : {
    4537             :         struct folio *folio;
    4538             :         struct slab *slab;
    4539        3807 :         void *object = (void *)x;
    4540             : 
    4541        3807 :         trace_kfree(_RET_IP_, x);
    4542             : 
    4543        3807 :         if (unlikely(ZERO_OR_NULL_PTR(x)))
    4544             :                 return;
    4545             : 
    4546        2350 :         folio = virt_to_folio(x);
    4547        2350 :         if (unlikely(!folio_test_slab(folio))) {
    4548           8 :                 free_large_kmalloc(folio, object);
    4549           8 :                 return;
    4550             :         }
    4551        2342 :         slab = folio_slab(folio);
    4552        2342 :         slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
    4553             : }
    4554             : EXPORT_SYMBOL(kfree);
    4555             : 
    4556             : #define SHRINK_PROMOTE_MAX 32
    4557             : 
    4558             : /*
    4559             :  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
    4560             :  * up most to the head of the partial lists. New allocations will then
    4561             :  * fill those up and thus they can be removed from the partial lists.
    4562             :  *
    4563             :  * The slabs with the least items are placed last. This results in them
    4564             :  * being allocated from last increasing the chance that the last objects
    4565             :  * are freed in them.
    4566             :  */
    4567           0 : static int __kmem_cache_do_shrink(struct kmem_cache *s)
    4568             : {
    4569             :         int node;
    4570             :         int i;
    4571             :         struct kmem_cache_node *n;
    4572             :         struct slab *slab;
    4573             :         struct slab *t;
    4574             :         struct list_head discard;
    4575             :         struct list_head promote[SHRINK_PROMOTE_MAX];
    4576             :         unsigned long flags;
    4577           0 :         int ret = 0;
    4578             : 
    4579           0 :         for_each_kmem_cache_node(s, node, n) {
    4580           0 :                 INIT_LIST_HEAD(&discard);
    4581           0 :                 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
    4582           0 :                         INIT_LIST_HEAD(promote + i);
    4583             : 
    4584           0 :                 spin_lock_irqsave(&n->list_lock, flags);
    4585             : 
    4586             :                 /*
    4587             :                  * Build lists of slabs to discard or promote.
    4588             :                  *
    4589             :                  * Note that concurrent frees may occur while we hold the
    4590             :                  * list_lock. slab->inuse here is the upper limit.
    4591             :                  */
    4592           0 :                 list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
    4593           0 :                         int free = slab->objects - slab->inuse;
    4594             : 
    4595             :                         /* Do not reread slab->inuse */
    4596           0 :                         barrier();
    4597             : 
    4598             :                         /* We do not keep full slabs on the list */
    4599           0 :                         BUG_ON(free <= 0);
    4600             : 
    4601           0 :                         if (free == slab->objects) {
    4602           0 :                                 list_move(&slab->slab_list, &discard);
    4603           0 :                                 n->nr_partial--;
    4604           0 :                         } else if (free <= SHRINK_PROMOTE_MAX)
    4605           0 :                                 list_move(&slab->slab_list, promote + free - 1);
    4606             :                 }
    4607             : 
    4608             :                 /*
    4609             :                  * Promote the slabs filled up most to the head of the
    4610             :                  * partial list.
    4611             :                  */
    4612           0 :                 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
    4613           0 :                         list_splice(promote + i, &n->partial);
    4614             : 
    4615           0 :                 spin_unlock_irqrestore(&n->list_lock, flags);
    4616             : 
    4617             :                 /* Release empty slabs */
    4618           0 :                 list_for_each_entry_safe(slab, t, &discard, slab_list)
    4619           0 :                         discard_slab(s, slab);
    4620             : 
    4621           0 :                 if (slabs_node(s, node))
    4622           0 :                         ret = 1;
    4623             :         }
    4624             : 
    4625           0 :         return ret;
    4626             : }
    4627             : 
    4628           0 : int __kmem_cache_shrink(struct kmem_cache *s)
    4629             : {
    4630           0 :         flush_all(s);
    4631           0 :         return __kmem_cache_do_shrink(s);
    4632             : }
    4633             : 
    4634             : static int slab_mem_going_offline_callback(void *arg)
    4635             : {
    4636             :         struct kmem_cache *s;
    4637             : 
    4638             :         mutex_lock(&slab_mutex);
    4639             :         list_for_each_entry(s, &slab_caches, list) {
    4640             :                 flush_all_cpus_locked(s);
    4641             :                 __kmem_cache_do_shrink(s);
    4642             :         }
    4643             :         mutex_unlock(&slab_mutex);
    4644             : 
    4645             :         return 0;
    4646             : }
    4647             : 
    4648             : static void slab_mem_offline_callback(void *arg)
    4649             : {
    4650             :         struct memory_notify *marg = arg;
    4651             :         int offline_node;
    4652             : 
    4653             :         offline_node = marg->status_change_nid_normal;
    4654             : 
    4655             :         /*
    4656             :          * If the node still has available memory. we need kmem_cache_node
    4657             :          * for it yet.
    4658             :          */
    4659             :         if (offline_node < 0)
    4660             :                 return;
    4661             : 
    4662             :         mutex_lock(&slab_mutex);
    4663             :         node_clear(offline_node, slab_nodes);
    4664             :         /*
    4665             :          * We no longer free kmem_cache_node structures here, as it would be
    4666             :          * racy with all get_node() users, and infeasible to protect them with
    4667             :          * slab_mutex.
    4668             :          */
    4669             :         mutex_unlock(&slab_mutex);
    4670             : }
    4671             : 
    4672             : static int slab_mem_going_online_callback(void *arg)
    4673             : {
    4674             :         struct kmem_cache_node *n;
    4675             :         struct kmem_cache *s;
    4676             :         struct memory_notify *marg = arg;
    4677             :         int nid = marg->status_change_nid_normal;
    4678             :         int ret = 0;
    4679             : 
    4680             :         /*
    4681             :          * If the node's memory is already available, then kmem_cache_node is
    4682             :          * already created. Nothing to do.
    4683             :          */
    4684             :         if (nid < 0)
    4685             :                 return 0;
    4686             : 
    4687             :         /*
    4688             :          * We are bringing a node online. No memory is available yet. We must
    4689             :          * allocate a kmem_cache_node structure in order to bring the node
    4690             :          * online.
    4691             :          */
    4692             :         mutex_lock(&slab_mutex);
    4693             :         list_for_each_entry(s, &slab_caches, list) {
    4694             :                 /*
    4695             :                  * The structure may already exist if the node was previously
    4696             :                  * onlined and offlined.
    4697             :                  */
    4698             :                 if (get_node(s, nid))
    4699             :                         continue;
    4700             :                 /*
    4701             :                  * XXX: kmem_cache_alloc_node will fallback to other nodes
    4702             :                  *      since memory is not yet available from the node that
    4703             :                  *      is brought up.
    4704             :                  */
    4705             :                 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
    4706             :                 if (!n) {
    4707             :                         ret = -ENOMEM;
    4708             :                         goto out;
    4709             :                 }
    4710             :                 init_kmem_cache_node(n);
    4711             :                 s->node[nid] = n;
    4712             :         }
    4713             :         /*
    4714             :          * Any cache created after this point will also have kmem_cache_node
    4715             :          * initialized for the new node.
    4716             :          */
    4717             :         node_set(nid, slab_nodes);
    4718             : out:
    4719             :         mutex_unlock(&slab_mutex);
    4720             :         return ret;
    4721             : }
    4722             : 
    4723             : static int slab_memory_callback(struct notifier_block *self,
    4724             :                                 unsigned long action, void *arg)
    4725             : {
    4726             :         int ret = 0;
    4727             : 
    4728             :         switch (action) {
    4729             :         case MEM_GOING_ONLINE:
    4730             :                 ret = slab_mem_going_online_callback(arg);
    4731             :                 break;
    4732             :         case MEM_GOING_OFFLINE:
    4733             :                 ret = slab_mem_going_offline_callback(arg);
    4734             :                 break;
    4735             :         case MEM_OFFLINE:
    4736             :         case MEM_CANCEL_ONLINE:
    4737             :                 slab_mem_offline_callback(arg);
    4738             :                 break;
    4739             :         case MEM_ONLINE:
    4740             :         case MEM_CANCEL_OFFLINE:
    4741             :                 break;
    4742             :         }
    4743             :         if (ret)
    4744             :                 ret = notifier_from_errno(ret);
    4745             :         else
    4746             :                 ret = NOTIFY_OK;
    4747             :         return ret;
    4748             : }
    4749             : 
    4750             : static struct notifier_block slab_memory_callback_nb = {
    4751             :         .notifier_call = slab_memory_callback,
    4752             :         .priority = SLAB_CALLBACK_PRI,
    4753             : };
    4754             : 
    4755             : /********************************************************************
    4756             :  *                      Basic setup of slabs
    4757             :  *******************************************************************/
    4758             : 
    4759             : /*
    4760             :  * Used for early kmem_cache structures that were allocated using
    4761             :  * the page allocator. Allocate them properly then fix up the pointers
    4762             :  * that may be pointing to the wrong kmem_cache structure.
    4763             :  */
    4764             : 
    4765           2 : static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
    4766             : {
    4767             :         int node;
    4768           4 :         struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
    4769             :         struct kmem_cache_node *n;
    4770             : 
    4771           2 :         memcpy(s, static_cache, kmem_cache->object_size);
    4772             : 
    4773             :         /*
    4774             :          * This runs very early, and only the boot processor is supposed to be
    4775             :          * up.  Even if it weren't true, IRQs are not up so we couldn't fire
    4776             :          * IPIs around.
    4777             :          */
    4778           2 :         __flush_cpu_slab(s, smp_processor_id());
    4779           6 :         for_each_kmem_cache_node(s, node, n) {
    4780             :                 struct slab *p;
    4781             : 
    4782           4 :                 list_for_each_entry(p, &n->partial, slab_list)
    4783           2 :                         p->slab_cache = s;
    4784             : 
    4785             : #ifdef CONFIG_SLUB_DEBUG
    4786           2 :                 list_for_each_entry(p, &n->full, slab_list)
    4787           0 :                         p->slab_cache = s;
    4788             : #endif
    4789             :         }
    4790           4 :         list_add(&s->list, &slab_caches);
    4791           2 :         return s;
    4792             : }
    4793             : 
    4794           1 : void __init kmem_cache_init(void)
    4795             : {
    4796             :         static __initdata struct kmem_cache boot_kmem_cache,
    4797             :                 boot_kmem_cache_node;
    4798             :         int node;
    4799             : 
    4800             :         if (debug_guardpage_minorder())
    4801             :                 slub_max_order = 0;
    4802             : 
    4803             :         /* Print slub debugging pointers without hashing */
    4804           1 :         if (__slub_debug_enabled())
    4805           0 :                 no_hash_pointers_enable(NULL);
    4806             : 
    4807           1 :         kmem_cache_node = &boot_kmem_cache_node;
    4808           1 :         kmem_cache = &boot_kmem_cache;
    4809             : 
    4810             :         /*
    4811             :          * Initialize the nodemask for which we will allocate per node
    4812             :          * structures. Here we don't need taking slab_mutex yet.
    4813             :          */
    4814           3 :         for_each_node_state(node, N_NORMAL_MEMORY)
    4815             :                 node_set(node, slab_nodes);
    4816             : 
    4817           1 :         create_boot_cache(kmem_cache_node, "kmem_cache_node",
    4818             :                 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
    4819             : 
    4820             :         register_hotmemory_notifier(&slab_memory_callback_nb);
    4821             : 
    4822             :         /* Able to allocate the per node structures */
    4823           1 :         slab_state = PARTIAL;
    4824             : 
    4825           1 :         create_boot_cache(kmem_cache, "kmem_cache",
    4826             :                         offsetof(struct kmem_cache, node) +
    4827             :                                 nr_node_ids * sizeof(struct kmem_cache_node *),
    4828             :                        SLAB_HWCACHE_ALIGN, 0, 0);
    4829             : 
    4830           1 :         kmem_cache = bootstrap(&boot_kmem_cache);
    4831           1 :         kmem_cache_node = bootstrap(&boot_kmem_cache_node);
    4832             : 
    4833             :         /* Now we can use the kmem_cache to allocate kmalloc slabs */
    4834           1 :         setup_kmalloc_cache_index_table();
    4835           1 :         create_kmalloc_caches(0);
    4836             : 
    4837             :         /* Setup random freelists for each cache */
    4838           1 :         init_freelist_randomization();
    4839             : 
    4840           1 :         cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
    4841             :                                   slub_cpu_dead);
    4842             : 
    4843           1 :         pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
    4844             :                 cache_line_size(),
    4845             :                 slub_min_order, slub_max_order, slub_min_objects,
    4846             :                 nr_cpu_ids, nr_node_ids);
    4847           1 : }
    4848             : 
    4849           1 : void __init kmem_cache_init_late(void)
    4850             : {
    4851           1 : }
    4852             : 
    4853             : struct kmem_cache *
    4854          54 : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
    4855             :                    slab_flags_t flags, void (*ctor)(void *))
    4856             : {
    4857             :         struct kmem_cache *s;
    4858             : 
    4859          54 :         s = find_mergeable(size, align, flags, name, ctor);
    4860          54 :         if (s) {
    4861          21 :                 s->refcount++;
    4862             : 
    4863             :                 /*
    4864             :                  * Adjust the object sizes so that we clear
    4865             :                  * the complete object on kzalloc.
    4866             :                  */
    4867          21 :                 s->object_size = max(s->object_size, size);
    4868          21 :                 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
    4869             : 
    4870          21 :                 if (sysfs_slab_alias(s, name)) {
    4871           0 :                         s->refcount--;
    4872           0 :                         s = NULL;
    4873             :                 }
    4874             :         }
    4875             : 
    4876          54 :         return s;
    4877             : }
    4878             : 
    4879          67 : int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
    4880             : {
    4881             :         int err;
    4882             : 
    4883          67 :         err = kmem_cache_open(s, flags);
    4884          67 :         if (err)
    4885             :                 return err;
    4886             : 
    4887             :         /* Mutex is not taken during early boot */
    4888          67 :         if (slab_state <= UP)
    4889             :                 return 0;
    4890             : 
    4891           3 :         err = sysfs_slab_add(s);
    4892           3 :         if (err) {
    4893           0 :                 __kmem_cache_release(s);
    4894           0 :                 return err;
    4895             :         }
    4896             : 
    4897             :         if (s->flags & SLAB_STORE_USER)
    4898             :                 debugfs_slab_add(s);
    4899             : 
    4900             :         return 0;
    4901             : }
    4902             : 
    4903        2395 : void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
    4904             : {
    4905             :         struct kmem_cache *s;
    4906             :         void *ret;
    4907             : 
    4908        2395 :         if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
    4909           0 :                 return kmalloc_large(size, gfpflags);
    4910             : 
    4911        2395 :         s = kmalloc_slab(size, gfpflags);
    4912             : 
    4913        2395 :         if (unlikely(ZERO_OR_NULL_PTR(s)))
    4914             :                 return s;
    4915             : 
    4916        2395 :         ret = slab_alloc(s, NULL, gfpflags, caller, size);
    4917             : 
    4918             :         /* Honor the call site pointer we received. */
    4919        2395 :         trace_kmalloc(caller, ret, size, s->size, gfpflags);
    4920             : 
    4921        2395 :         return ret;
    4922             : }
    4923             : EXPORT_SYMBOL(__kmalloc_track_caller);
    4924             : 
    4925             : #ifdef CONFIG_NUMA
    4926             : void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
    4927             :                                         int node, unsigned long caller)
    4928             : {
    4929             :         struct kmem_cache *s;
    4930             :         void *ret;
    4931             : 
    4932             :         if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
    4933             :                 ret = kmalloc_large_node(size, gfpflags, node);
    4934             : 
    4935             :                 trace_kmalloc_node(caller, ret,
    4936             :                                    size, PAGE_SIZE << get_order(size),
    4937             :                                    gfpflags, node);
    4938             : 
    4939             :                 return ret;
    4940             :         }
    4941             : 
    4942             :         s = kmalloc_slab(size, gfpflags);
    4943             : 
    4944             :         if (unlikely(ZERO_OR_NULL_PTR(s)))
    4945             :                 return s;
    4946             : 
    4947             :         ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
    4948             : 
    4949             :         /* Honor the call site pointer we received. */
    4950             :         trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
    4951             : 
    4952             :         return ret;
    4953             : }
    4954             : EXPORT_SYMBOL(__kmalloc_node_track_caller);
    4955             : #endif
    4956             : 
    4957             : #ifdef CONFIG_SYSFS
    4958           0 : static int count_inuse(struct slab *slab)
    4959             : {
    4960           0 :         return slab->inuse;
    4961             : }
    4962             : 
    4963           0 : static int count_total(struct slab *slab)
    4964             : {
    4965           0 :         return slab->objects;
    4966             : }
    4967             : #endif
    4968             : 
    4969             : #ifdef CONFIG_SLUB_DEBUG
    4970           0 : static void validate_slab(struct kmem_cache *s, struct slab *slab,
    4971             :                           unsigned long *obj_map)
    4972             : {
    4973             :         void *p;
    4974           0 :         void *addr = slab_address(slab);
    4975             :         unsigned long flags;
    4976             : 
    4977           0 :         slab_lock(slab, &flags);
    4978             : 
    4979           0 :         if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
    4980             :                 goto unlock;
    4981             : 
    4982             :         /* Now we know that a valid freelist exists */
    4983           0 :         __fill_map(obj_map, s, slab);
    4984           0 :         for_each_object(p, s, addr, slab->objects) {
    4985           0 :                 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
    4986             :                          SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
    4987             : 
    4988           0 :                 if (!check_object(s, slab, p, val))
    4989             :                         break;
    4990             :         }
    4991             : unlock:
    4992           0 :         slab_unlock(slab, &flags);
    4993           0 : }
    4994             : 
    4995           0 : static int validate_slab_node(struct kmem_cache *s,
    4996             :                 struct kmem_cache_node *n, unsigned long *obj_map)
    4997             : {
    4998           0 :         unsigned long count = 0;
    4999             :         struct slab *slab;
    5000             :         unsigned long flags;
    5001             : 
    5002           0 :         spin_lock_irqsave(&n->list_lock, flags);
    5003             : 
    5004           0 :         list_for_each_entry(slab, &n->partial, slab_list) {
    5005           0 :                 validate_slab(s, slab, obj_map);
    5006           0 :                 count++;
    5007             :         }
    5008           0 :         if (count != n->nr_partial) {
    5009           0 :                 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
    5010             :                        s->name, count, n->nr_partial);
    5011           0 :                 slab_add_kunit_errors();
    5012             :         }
    5013             : 
    5014           0 :         if (!(s->flags & SLAB_STORE_USER))
    5015             :                 goto out;
    5016             : 
    5017           0 :         list_for_each_entry(slab, &n->full, slab_list) {
    5018           0 :                 validate_slab(s, slab, obj_map);
    5019           0 :                 count++;
    5020             :         }
    5021           0 :         if (count != atomic_long_read(&n->nr_slabs)) {
    5022           0 :                 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
    5023             :                        s->name, count, atomic_long_read(&n->nr_slabs));
    5024           0 :                 slab_add_kunit_errors();
    5025             :         }
    5026             : 
    5027             : out:
    5028           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    5029           0 :         return count;
    5030             : }
    5031             : 
    5032           0 : long validate_slab_cache(struct kmem_cache *s)
    5033             : {
    5034             :         int node;
    5035           0 :         unsigned long count = 0;
    5036             :         struct kmem_cache_node *n;
    5037             :         unsigned long *obj_map;
    5038             : 
    5039           0 :         obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
    5040           0 :         if (!obj_map)
    5041             :                 return -ENOMEM;
    5042             : 
    5043           0 :         flush_all(s);
    5044           0 :         for_each_kmem_cache_node(s, node, n)
    5045           0 :                 count += validate_slab_node(s, n, obj_map);
    5046             : 
    5047           0 :         bitmap_free(obj_map);
    5048             : 
    5049           0 :         return count;
    5050             : }
    5051             : EXPORT_SYMBOL(validate_slab_cache);
    5052             : 
    5053             : #ifdef CONFIG_DEBUG_FS
    5054             : /*
    5055             :  * Generate lists of code addresses where slabcache objects are allocated
    5056             :  * and freed.
    5057             :  */
    5058             : 
    5059             : struct location {
    5060             :         unsigned long count;
    5061             :         unsigned long addr;
    5062             :         long long sum_time;
    5063             :         long min_time;
    5064             :         long max_time;
    5065             :         long min_pid;
    5066             :         long max_pid;
    5067             :         DECLARE_BITMAP(cpus, NR_CPUS);
    5068             :         nodemask_t nodes;
    5069             : };
    5070             : 
    5071             : struct loc_track {
    5072             :         unsigned long max;
    5073             :         unsigned long count;
    5074             :         struct location *loc;
    5075             :         loff_t idx;
    5076             : };
    5077             : 
    5078             : static struct dentry *slab_debugfs_root;
    5079             : 
    5080             : static void free_loc_track(struct loc_track *t)
    5081             : {
    5082             :         if (t->max)
    5083             :                 free_pages((unsigned long)t->loc,
    5084             :                         get_order(sizeof(struct location) * t->max));
    5085             : }
    5086             : 
    5087             : static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
    5088             : {
    5089             :         struct location *l;
    5090             :         int order;
    5091             : 
    5092             :         order = get_order(sizeof(struct location) * max);
    5093             : 
    5094             :         l = (void *)__get_free_pages(flags, order);
    5095             :         if (!l)
    5096             :                 return 0;
    5097             : 
    5098             :         if (t->count) {
    5099             :                 memcpy(l, t->loc, sizeof(struct location) * t->count);
    5100             :                 free_loc_track(t);
    5101             :         }
    5102             :         t->max = max;
    5103             :         t->loc = l;
    5104             :         return 1;
    5105             : }
    5106             : 
    5107             : static int add_location(struct loc_track *t, struct kmem_cache *s,
    5108             :                                 const struct track *track)
    5109             : {
    5110             :         long start, end, pos;
    5111             :         struct location *l;
    5112             :         unsigned long caddr;
    5113             :         unsigned long age = jiffies - track->when;
    5114             : 
    5115             :         start = -1;
    5116             :         end = t->count;
    5117             : 
    5118             :         for ( ; ; ) {
    5119             :                 pos = start + (end - start + 1) / 2;
    5120             : 
    5121             :                 /*
    5122             :                  * There is nothing at "end". If we end up there
    5123             :                  * we need to add something to before end.
    5124             :                  */
    5125             :                 if (pos == end)
    5126             :                         break;
    5127             : 
    5128             :                 caddr = t->loc[pos].addr;
    5129             :                 if (track->addr == caddr) {
    5130             : 
    5131             :                         l = &t->loc[pos];
    5132             :                         l->count++;
    5133             :                         if (track->when) {
    5134             :                                 l->sum_time += age;
    5135             :                                 if (age < l->min_time)
    5136             :                                         l->min_time = age;
    5137             :                                 if (age > l->max_time)
    5138             :                                         l->max_time = age;
    5139             : 
    5140             :                                 if (track->pid < l->min_pid)
    5141             :                                         l->min_pid = track->pid;
    5142             :                                 if (track->pid > l->max_pid)
    5143             :                                         l->max_pid = track->pid;
    5144             : 
    5145             :                                 cpumask_set_cpu(track->cpu,
    5146             :                                                 to_cpumask(l->cpus));
    5147             :                         }
    5148             :                         node_set(page_to_nid(virt_to_page(track)), l->nodes);
    5149             :                         return 1;
    5150             :                 }
    5151             : 
    5152             :                 if (track->addr < caddr)
    5153             :                         end = pos;
    5154             :                 else
    5155             :                         start = pos;
    5156             :         }
    5157             : 
    5158             :         /*
    5159             :          * Not found. Insert new tracking element.
    5160             :          */
    5161             :         if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
    5162             :                 return 0;
    5163             : 
    5164             :         l = t->loc + pos;
    5165             :         if (pos < t->count)
    5166             :                 memmove(l + 1, l,
    5167             :                         (t->count - pos) * sizeof(struct location));
    5168             :         t->count++;
    5169             :         l->count = 1;
    5170             :         l->addr = track->addr;
    5171             :         l->sum_time = age;
    5172             :         l->min_time = age;
    5173             :         l->max_time = age;
    5174             :         l->min_pid = track->pid;
    5175             :         l->max_pid = track->pid;
    5176             :         cpumask_clear(to_cpumask(l->cpus));
    5177             :         cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
    5178             :         nodes_clear(l->nodes);
    5179             :         node_set(page_to_nid(virt_to_page(track)), l->nodes);
    5180             :         return 1;
    5181             : }
    5182             : 
    5183             : static void process_slab(struct loc_track *t, struct kmem_cache *s,
    5184             :                 struct slab *slab, enum track_item alloc,
    5185             :                 unsigned long *obj_map)
    5186             : {
    5187             :         void *addr = slab_address(slab);
    5188             :         void *p;
    5189             : 
    5190             :         __fill_map(obj_map, s, slab);
    5191             : 
    5192             :         for_each_object(p, s, addr, slab->objects)
    5193             :                 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
    5194             :                         add_location(t, s, get_track(s, p, alloc));
    5195             : }
    5196             : #endif  /* CONFIG_DEBUG_FS   */
    5197             : #endif  /* CONFIG_SLUB_DEBUG */
    5198             : 
    5199             : #ifdef CONFIG_SYSFS
    5200             : enum slab_stat_type {
    5201             :         SL_ALL,                 /* All slabs */
    5202             :         SL_PARTIAL,             /* Only partially allocated slabs */
    5203             :         SL_CPU,                 /* Only slabs used for cpu caches */
    5204             :         SL_OBJECTS,             /* Determine allocated objects not slabs */
    5205             :         SL_TOTAL                /* Determine object capacity not slabs */
    5206             : };
    5207             : 
    5208             : #define SO_ALL          (1 << SL_ALL)
    5209             : #define SO_PARTIAL      (1 << SL_PARTIAL)
    5210             : #define SO_CPU          (1 << SL_CPU)
    5211             : #define SO_OBJECTS      (1 << SL_OBJECTS)
    5212             : #define SO_TOTAL        (1 << SL_TOTAL)
    5213             : 
    5214           0 : static ssize_t show_slab_objects(struct kmem_cache *s,
    5215             :                                  char *buf, unsigned long flags)
    5216             : {
    5217           0 :         unsigned long total = 0;
    5218             :         int node;
    5219             :         int x;
    5220             :         unsigned long *nodes;
    5221           0 :         int len = 0;
    5222             : 
    5223           0 :         nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
    5224           0 :         if (!nodes)
    5225             :                 return -ENOMEM;
    5226             : 
    5227           0 :         if (flags & SO_CPU) {
    5228             :                 int cpu;
    5229             : 
    5230           0 :                 for_each_possible_cpu(cpu) {
    5231           0 :                         struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
    5232             :                                                                cpu);
    5233             :                         int node;
    5234             :                         struct slab *slab;
    5235             : 
    5236           0 :                         slab = READ_ONCE(c->slab);
    5237           0 :                         if (!slab)
    5238           0 :                                 continue;
    5239             : 
    5240           0 :                         node = slab_nid(slab);
    5241           0 :                         if (flags & SO_TOTAL)
    5242           0 :                                 x = slab->objects;
    5243           0 :                         else if (flags & SO_OBJECTS)
    5244           0 :                                 x = slab->inuse;
    5245             :                         else
    5246             :                                 x = 1;
    5247             : 
    5248           0 :                         total += x;
    5249           0 :                         nodes[node] += x;
    5250             : 
    5251             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5252             :                         slab = slub_percpu_partial_read_once(c);
    5253             :                         if (slab) {
    5254             :                                 node = slab_nid(slab);
    5255             :                                 if (flags & SO_TOTAL)
    5256             :                                         WARN_ON_ONCE(1);
    5257             :                                 else if (flags & SO_OBJECTS)
    5258             :                                         WARN_ON_ONCE(1);
    5259             :                                 else
    5260             :                                         x = slab->slabs;
    5261             :                                 total += x;
    5262             :                                 nodes[node] += x;
    5263             :                         }
    5264             : #endif
    5265             :                 }
    5266             :         }
    5267             : 
    5268             :         /*
    5269             :          * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
    5270             :          * already held which will conflict with an existing lock order:
    5271             :          *
    5272             :          * mem_hotplug_lock->slab_mutex->kernfs_mutex
    5273             :          *
    5274             :          * We don't really need mem_hotplug_lock (to hold off
    5275             :          * slab_mem_going_offline_callback) here because slab's memory hot
    5276             :          * unplug code doesn't destroy the kmem_cache->node[] data.
    5277             :          */
    5278             : 
    5279             : #ifdef CONFIG_SLUB_DEBUG
    5280           0 :         if (flags & SO_ALL) {
    5281             :                 struct kmem_cache_node *n;
    5282             : 
    5283           0 :                 for_each_kmem_cache_node(s, node, n) {
    5284             : 
    5285           0 :                         if (flags & SO_TOTAL)
    5286           0 :                                 x = atomic_long_read(&n->total_objects);
    5287           0 :                         else if (flags & SO_OBJECTS)
    5288           0 :                                 x = atomic_long_read(&n->total_objects) -
    5289           0 :                                         count_partial(n, count_free);
    5290             :                         else
    5291           0 :                                 x = atomic_long_read(&n->nr_slabs);
    5292           0 :                         total += x;
    5293           0 :                         nodes[node] += x;
    5294             :                 }
    5295             : 
    5296             :         } else
    5297             : #endif
    5298           0 :         if (flags & SO_PARTIAL) {
    5299             :                 struct kmem_cache_node *n;
    5300             : 
    5301           0 :                 for_each_kmem_cache_node(s, node, n) {
    5302           0 :                         if (flags & SO_TOTAL)
    5303           0 :                                 x = count_partial(n, count_total);
    5304           0 :                         else if (flags & SO_OBJECTS)
    5305           0 :                                 x = count_partial(n, count_inuse);
    5306             :                         else
    5307           0 :                                 x = n->nr_partial;
    5308           0 :                         total += x;
    5309           0 :                         nodes[node] += x;
    5310             :                 }
    5311             :         }
    5312             : 
    5313           0 :         len += sysfs_emit_at(buf, len, "%lu", total);
    5314             : #ifdef CONFIG_NUMA
    5315             :         for (node = 0; node < nr_node_ids; node++) {
    5316             :                 if (nodes[node])
    5317             :                         len += sysfs_emit_at(buf, len, " N%d=%lu",
    5318             :                                              node, nodes[node]);
    5319             :         }
    5320             : #endif
    5321           0 :         len += sysfs_emit_at(buf, len, "\n");
    5322           0 :         kfree(nodes);
    5323             : 
    5324           0 :         return len;
    5325             : }
    5326             : 
    5327             : #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
    5328             : #define to_slab(n) container_of(n, struct kmem_cache, kobj)
    5329             : 
    5330             : struct slab_attribute {
    5331             :         struct attribute attr;
    5332             :         ssize_t (*show)(struct kmem_cache *s, char *buf);
    5333             :         ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
    5334             : };
    5335             : 
    5336             : #define SLAB_ATTR_RO(_name) \
    5337             :         static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
    5338             : 
    5339             : #define SLAB_ATTR(_name) \
    5340             :         static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
    5341             : 
    5342           0 : static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
    5343             : {
    5344           0 :         return sysfs_emit(buf, "%u\n", s->size);
    5345             : }
    5346             : SLAB_ATTR_RO(slab_size);
    5347             : 
    5348           0 : static ssize_t align_show(struct kmem_cache *s, char *buf)
    5349             : {
    5350           0 :         return sysfs_emit(buf, "%u\n", s->align);
    5351             : }
    5352             : SLAB_ATTR_RO(align);
    5353             : 
    5354           0 : static ssize_t object_size_show(struct kmem_cache *s, char *buf)
    5355             : {
    5356           0 :         return sysfs_emit(buf, "%u\n", s->object_size);
    5357             : }
    5358             : SLAB_ATTR_RO(object_size);
    5359             : 
    5360           0 : static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
    5361             : {
    5362           0 :         return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
    5363             : }
    5364             : SLAB_ATTR_RO(objs_per_slab);
    5365             : 
    5366           0 : static ssize_t order_show(struct kmem_cache *s, char *buf)
    5367             : {
    5368           0 :         return sysfs_emit(buf, "%u\n", oo_order(s->oo));
    5369             : }
    5370             : SLAB_ATTR_RO(order);
    5371             : 
    5372           0 : static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
    5373             : {
    5374           0 :         return sysfs_emit(buf, "%lu\n", s->min_partial);
    5375             : }
    5376             : 
    5377           0 : static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
    5378             :                                  size_t length)
    5379             : {
    5380             :         unsigned long min;
    5381             :         int err;
    5382             : 
    5383           0 :         err = kstrtoul(buf, 10, &min);
    5384           0 :         if (err)
    5385           0 :                 return err;
    5386             : 
    5387           0 :         s->min_partial = min;
    5388           0 :         return length;
    5389             : }
    5390             : SLAB_ATTR(min_partial);
    5391             : 
    5392           0 : static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
    5393             : {
    5394           0 :         unsigned int nr_partial = 0;
    5395             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5396             :         nr_partial = s->cpu_partial;
    5397             : #endif
    5398             : 
    5399           0 :         return sysfs_emit(buf, "%u\n", nr_partial);
    5400             : }
    5401             : 
    5402           0 : static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
    5403             :                                  size_t length)
    5404             : {
    5405             :         unsigned int objects;
    5406             :         int err;
    5407             : 
    5408           0 :         err = kstrtouint(buf, 10, &objects);
    5409           0 :         if (err)
    5410           0 :                 return err;
    5411           0 :         if (objects && !kmem_cache_has_cpu_partial(s))
    5412             :                 return -EINVAL;
    5413             : 
    5414           0 :         slub_set_cpu_partial(s, objects);
    5415           0 :         flush_all(s);
    5416           0 :         return length;
    5417             : }
    5418             : SLAB_ATTR(cpu_partial);
    5419             : 
    5420           0 : static ssize_t ctor_show(struct kmem_cache *s, char *buf)
    5421             : {
    5422           0 :         if (!s->ctor)
    5423             :                 return 0;
    5424           0 :         return sysfs_emit(buf, "%pS\n", s->ctor);
    5425             : }
    5426             : SLAB_ATTR_RO(ctor);
    5427             : 
    5428           0 : static ssize_t aliases_show(struct kmem_cache *s, char *buf)
    5429             : {
    5430           0 :         return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
    5431             : }
    5432             : SLAB_ATTR_RO(aliases);
    5433             : 
    5434           0 : static ssize_t partial_show(struct kmem_cache *s, char *buf)
    5435             : {
    5436           0 :         return show_slab_objects(s, buf, SO_PARTIAL);
    5437             : }
    5438             : SLAB_ATTR_RO(partial);
    5439             : 
    5440           0 : static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
    5441             : {
    5442           0 :         return show_slab_objects(s, buf, SO_CPU);
    5443             : }
    5444             : SLAB_ATTR_RO(cpu_slabs);
    5445             : 
    5446           0 : static ssize_t objects_show(struct kmem_cache *s, char *buf)
    5447             : {
    5448           0 :         return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
    5449             : }
    5450             : SLAB_ATTR_RO(objects);
    5451             : 
    5452           0 : static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
    5453             : {
    5454           0 :         return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
    5455             : }
    5456             : SLAB_ATTR_RO(objects_partial);
    5457             : 
    5458           0 : static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
    5459             : {
    5460           0 :         int objects = 0;
    5461           0 :         int slabs = 0;
    5462             :         int cpu __maybe_unused;
    5463           0 :         int len = 0;
    5464             : 
    5465             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5466             :         for_each_online_cpu(cpu) {
    5467             :                 struct slab *slab;
    5468             : 
    5469             :                 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
    5470             : 
    5471             :                 if (slab)
    5472             :                         slabs += slab->slabs;
    5473             :         }
    5474             : #endif
    5475             : 
    5476             :         /* Approximate half-full slabs, see slub_set_cpu_partial() */
    5477           0 :         objects = (slabs * oo_objects(s->oo)) / 2;
    5478           0 :         len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
    5479             : 
    5480             : #if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)
    5481             :         for_each_online_cpu(cpu) {
    5482             :                 struct slab *slab;
    5483             : 
    5484             :                 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
    5485             :                 if (slab) {
    5486             :                         slabs = READ_ONCE(slab->slabs);
    5487             :                         objects = (slabs * oo_objects(s->oo)) / 2;
    5488             :                         len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
    5489             :                                              cpu, objects, slabs);
    5490             :                 }
    5491             :         }
    5492             : #endif
    5493           0 :         len += sysfs_emit_at(buf, len, "\n");
    5494             : 
    5495           0 :         return len;
    5496             : }
    5497             : SLAB_ATTR_RO(slabs_cpu_partial);
    5498             : 
    5499           0 : static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
    5500             : {
    5501           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
    5502             : }
    5503             : SLAB_ATTR_RO(reclaim_account);
    5504             : 
    5505           0 : static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
    5506             : {
    5507           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
    5508             : }
    5509             : SLAB_ATTR_RO(hwcache_align);
    5510             : 
    5511             : #ifdef CONFIG_ZONE_DMA
    5512             : static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
    5513             : {
    5514             :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
    5515             : }
    5516             : SLAB_ATTR_RO(cache_dma);
    5517             : #endif
    5518             : 
    5519           0 : static ssize_t usersize_show(struct kmem_cache *s, char *buf)
    5520             : {
    5521           0 :         return sysfs_emit(buf, "%u\n", s->usersize);
    5522             : }
    5523             : SLAB_ATTR_RO(usersize);
    5524             : 
    5525           0 : static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
    5526             : {
    5527           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
    5528             : }
    5529             : SLAB_ATTR_RO(destroy_by_rcu);
    5530             : 
    5531             : #ifdef CONFIG_SLUB_DEBUG
    5532           0 : static ssize_t slabs_show(struct kmem_cache *s, char *buf)
    5533             : {
    5534           0 :         return show_slab_objects(s, buf, SO_ALL);
    5535             : }
    5536             : SLAB_ATTR_RO(slabs);
    5537             : 
    5538           0 : static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
    5539             : {
    5540           0 :         return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
    5541             : }
    5542             : SLAB_ATTR_RO(total_objects);
    5543             : 
    5544           0 : static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
    5545             : {
    5546           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
    5547             : }
    5548             : SLAB_ATTR_RO(sanity_checks);
    5549             : 
    5550           0 : static ssize_t trace_show(struct kmem_cache *s, char *buf)
    5551             : {
    5552           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
    5553             : }
    5554             : SLAB_ATTR_RO(trace);
    5555             : 
    5556           0 : static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
    5557             : {
    5558           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
    5559             : }
    5560             : 
    5561             : SLAB_ATTR_RO(red_zone);
    5562             : 
    5563           0 : static ssize_t poison_show(struct kmem_cache *s, char *buf)
    5564             : {
    5565           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
    5566             : }
    5567             : 
    5568             : SLAB_ATTR_RO(poison);
    5569             : 
    5570           0 : static ssize_t store_user_show(struct kmem_cache *s, char *buf)
    5571             : {
    5572           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
    5573             : }
    5574             : 
    5575             : SLAB_ATTR_RO(store_user);
    5576             : 
    5577           0 : static ssize_t validate_show(struct kmem_cache *s, char *buf)
    5578             : {
    5579           0 :         return 0;
    5580             : }
    5581             : 
    5582           0 : static ssize_t validate_store(struct kmem_cache *s,
    5583             :                         const char *buf, size_t length)
    5584             : {
    5585           0 :         int ret = -EINVAL;
    5586             : 
    5587           0 :         if (buf[0] == '1') {
    5588           0 :                 ret = validate_slab_cache(s);
    5589           0 :                 if (ret >= 0)
    5590           0 :                         ret = length;
    5591             :         }
    5592           0 :         return ret;
    5593             : }
    5594             : SLAB_ATTR(validate);
    5595             : 
    5596             : #endif /* CONFIG_SLUB_DEBUG */
    5597             : 
    5598             : #ifdef CONFIG_FAILSLAB
    5599             : static ssize_t failslab_show(struct kmem_cache *s, char *buf)
    5600             : {
    5601             :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
    5602             : }
    5603             : SLAB_ATTR_RO(failslab);
    5604             : #endif
    5605             : 
    5606           0 : static ssize_t shrink_show(struct kmem_cache *s, char *buf)
    5607             : {
    5608           0 :         return 0;
    5609             : }
    5610             : 
    5611           0 : static ssize_t shrink_store(struct kmem_cache *s,
    5612             :                         const char *buf, size_t length)
    5613             : {
    5614           0 :         if (buf[0] == '1')
    5615           0 :                 kmem_cache_shrink(s);
    5616             :         else
    5617             :                 return -EINVAL;
    5618           0 :         return length;
    5619             : }
    5620             : SLAB_ATTR(shrink);
    5621             : 
    5622             : #ifdef CONFIG_NUMA
    5623             : static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
    5624             : {
    5625             :         return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
    5626             : }
    5627             : 
    5628             : static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
    5629             :                                 const char *buf, size_t length)
    5630             : {
    5631             :         unsigned int ratio;
    5632             :         int err;
    5633             : 
    5634             :         err = kstrtouint(buf, 10, &ratio);
    5635             :         if (err)
    5636             :                 return err;
    5637             :         if (ratio > 100)
    5638             :                 return -ERANGE;
    5639             : 
    5640             :         s->remote_node_defrag_ratio = ratio * 10;
    5641             : 
    5642             :         return length;
    5643             : }
    5644             : SLAB_ATTR(remote_node_defrag_ratio);
    5645             : #endif
    5646             : 
    5647             : #ifdef CONFIG_SLUB_STATS
    5648             : static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
    5649             : {
    5650             :         unsigned long sum  = 0;
    5651             :         int cpu;
    5652             :         int len = 0;
    5653             :         int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
    5654             : 
    5655             :         if (!data)
    5656             :                 return -ENOMEM;
    5657             : 
    5658             :         for_each_online_cpu(cpu) {
    5659             :                 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
    5660             : 
    5661             :                 data[cpu] = x;
    5662             :                 sum += x;
    5663             :         }
    5664             : 
    5665             :         len += sysfs_emit_at(buf, len, "%lu", sum);
    5666             : 
    5667             : #ifdef CONFIG_SMP
    5668             :         for_each_online_cpu(cpu) {
    5669             :                 if (data[cpu])
    5670             :                         len += sysfs_emit_at(buf, len, " C%d=%u",
    5671             :                                              cpu, data[cpu]);
    5672             :         }
    5673             : #endif
    5674             :         kfree(data);
    5675             :         len += sysfs_emit_at(buf, len, "\n");
    5676             : 
    5677             :         return len;
    5678             : }
    5679             : 
    5680             : static void clear_stat(struct kmem_cache *s, enum stat_item si)
    5681             : {
    5682             :         int cpu;
    5683             : 
    5684             :         for_each_online_cpu(cpu)
    5685             :                 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
    5686             : }
    5687             : 
    5688             : #define STAT_ATTR(si, text)                                     \
    5689             : static ssize_t text##_show(struct kmem_cache *s, char *buf)     \
    5690             : {                                                               \
    5691             :         return show_stat(s, buf, si);                           \
    5692             : }                                                               \
    5693             : static ssize_t text##_store(struct kmem_cache *s,               \
    5694             :                                 const char *buf, size_t length) \
    5695             : {                                                               \
    5696             :         if (buf[0] != '0')                                      \
    5697             :                 return -EINVAL;                                 \
    5698             :         clear_stat(s, si);                                      \
    5699             :         return length;                                          \
    5700             : }                                                               \
    5701             : SLAB_ATTR(text);                                                \
    5702             : 
    5703             : STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
    5704             : STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
    5705             : STAT_ATTR(FREE_FASTPATH, free_fastpath);
    5706             : STAT_ATTR(FREE_SLOWPATH, free_slowpath);
    5707             : STAT_ATTR(FREE_FROZEN, free_frozen);
    5708             : STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
    5709             : STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
    5710             : STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
    5711             : STAT_ATTR(ALLOC_SLAB, alloc_slab);
    5712             : STAT_ATTR(ALLOC_REFILL, alloc_refill);
    5713             : STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
    5714             : STAT_ATTR(FREE_SLAB, free_slab);
    5715             : STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
    5716             : STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
    5717             : STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
    5718             : STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
    5719             : STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
    5720             : STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
    5721             : STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
    5722             : STAT_ATTR(ORDER_FALLBACK, order_fallback);
    5723             : STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
    5724             : STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
    5725             : STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
    5726             : STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
    5727             : STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
    5728             : STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
    5729             : #endif  /* CONFIG_SLUB_STATS */
    5730             : 
    5731             : static struct attribute *slab_attrs[] = {
    5732             :         &slab_size_attr.attr,
    5733             :         &object_size_attr.attr,
    5734             :         &objs_per_slab_attr.attr,
    5735             :         &order_attr.attr,
    5736             :         &min_partial_attr.attr,
    5737             :         &cpu_partial_attr.attr,
    5738             :         &objects_attr.attr,
    5739             :         &objects_partial_attr.attr,
    5740             :         &partial_attr.attr,
    5741             :         &cpu_slabs_attr.attr,
    5742             :         &ctor_attr.attr,
    5743             :         &aliases_attr.attr,
    5744             :         &align_attr.attr,
    5745             :         &hwcache_align_attr.attr,
    5746             :         &reclaim_account_attr.attr,
    5747             :         &destroy_by_rcu_attr.attr,
    5748             :         &shrink_attr.attr,
    5749             :         &slabs_cpu_partial_attr.attr,
    5750             : #ifdef CONFIG_SLUB_DEBUG
    5751             :         &total_objects_attr.attr,
    5752             :         &slabs_attr.attr,
    5753             :         &sanity_checks_attr.attr,
    5754             :         &trace_attr.attr,
    5755             :         &red_zone_attr.attr,
    5756             :         &poison_attr.attr,
    5757             :         &store_user_attr.attr,
    5758             :         &validate_attr.attr,
    5759             : #endif
    5760             : #ifdef CONFIG_ZONE_DMA
    5761             :         &cache_dma_attr.attr,
    5762             : #endif
    5763             : #ifdef CONFIG_NUMA
    5764             :         &remote_node_defrag_ratio_attr.attr,
    5765             : #endif
    5766             : #ifdef CONFIG_SLUB_STATS
    5767             :         &alloc_fastpath_attr.attr,
    5768             :         &alloc_slowpath_attr.attr,
    5769             :         &free_fastpath_attr.attr,
    5770             :         &free_slowpath_attr.attr,
    5771             :         &free_frozen_attr.attr,
    5772             :         &free_add_partial_attr.attr,
    5773             :         &free_remove_partial_attr.attr,
    5774             :         &alloc_from_partial_attr.attr,
    5775             :         &alloc_slab_attr.attr,
    5776             :         &alloc_refill_attr.attr,
    5777             :         &alloc_node_mismatch_attr.attr,
    5778             :         &free_slab_attr.attr,
    5779             :         &cpuslab_flush_attr.attr,
    5780             :         &deactivate_full_attr.attr,
    5781             :         &deactivate_empty_attr.attr,
    5782             :         &deactivate_to_head_attr.attr,
    5783             :         &deactivate_to_tail_attr.attr,
    5784             :         &deactivate_remote_frees_attr.attr,
    5785             :         &deactivate_bypass_attr.attr,
    5786             :         &order_fallback_attr.attr,
    5787             :         &cmpxchg_double_fail_attr.attr,
    5788             :         &cmpxchg_double_cpu_fail_attr.attr,
    5789             :         &cpu_partial_alloc_attr.attr,
    5790             :         &cpu_partial_free_attr.attr,
    5791             :         &cpu_partial_node_attr.attr,
    5792             :         &cpu_partial_drain_attr.attr,
    5793             : #endif
    5794             : #ifdef CONFIG_FAILSLAB
    5795             :         &failslab_attr.attr,
    5796             : #endif
    5797             :         &usersize_attr.attr,
    5798             : 
    5799             :         NULL
    5800             : };
    5801             : 
    5802             : static const struct attribute_group slab_attr_group = {
    5803             :         .attrs = slab_attrs,
    5804             : };
    5805             : 
    5806           0 : static ssize_t slab_attr_show(struct kobject *kobj,
    5807             :                                 struct attribute *attr,
    5808             :                                 char *buf)
    5809             : {
    5810             :         struct slab_attribute *attribute;
    5811             :         struct kmem_cache *s;
    5812             :         int err;
    5813             : 
    5814           0 :         attribute = to_slab_attr(attr);
    5815           0 :         s = to_slab(kobj);
    5816             : 
    5817           0 :         if (!attribute->show)
    5818             :                 return -EIO;
    5819             : 
    5820           0 :         err = attribute->show(s, buf);
    5821             : 
    5822           0 :         return err;
    5823             : }
    5824             : 
    5825           0 : static ssize_t slab_attr_store(struct kobject *kobj,
    5826             :                                 struct attribute *attr,
    5827             :                                 const char *buf, size_t len)
    5828             : {
    5829             :         struct slab_attribute *attribute;
    5830             :         struct kmem_cache *s;
    5831             :         int err;
    5832             : 
    5833           0 :         attribute = to_slab_attr(attr);
    5834           0 :         s = to_slab(kobj);
    5835             : 
    5836           0 :         if (!attribute->store)
    5837             :                 return -EIO;
    5838             : 
    5839           0 :         err = attribute->store(s, buf, len);
    5840           0 :         return err;
    5841             : }
    5842             : 
    5843           0 : static void kmem_cache_release(struct kobject *k)
    5844             : {
    5845           0 :         slab_kmem_cache_release(to_slab(k));
    5846           0 : }
    5847             : 
    5848             : static const struct sysfs_ops slab_sysfs_ops = {
    5849             :         .show = slab_attr_show,
    5850             :         .store = slab_attr_store,
    5851             : };
    5852             : 
    5853             : static struct kobj_type slab_ktype = {
    5854             :         .sysfs_ops = &slab_sysfs_ops,
    5855             :         .release = kmem_cache_release,
    5856             : };
    5857             : 
    5858             : static struct kset *slab_kset;
    5859             : 
    5860             : static inline struct kset *cache_kset(struct kmem_cache *s)
    5861             : {
    5862          67 :         return slab_kset;
    5863             : }
    5864             : 
    5865             : #define ID_STR_LENGTH 64
    5866             : 
    5867             : /* Create a unique string id for a slab cache:
    5868             :  *
    5869             :  * Format       :[flags-]size
    5870             :  */
    5871          24 : static char *create_unique_id(struct kmem_cache *s)
    5872             : {
    5873          24 :         char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
    5874          24 :         char *p = name;
    5875             : 
    5876          24 :         BUG_ON(!name);
    5877             : 
    5878          24 :         *p++ = ':';
    5879             :         /*
    5880             :          * First flags affecting slabcache operations. We will only
    5881             :          * get here for aliasable slabs so we do not need to support
    5882             :          * too many flags. The flags here must cover all flags that
    5883             :          * are matched during merging to guarantee that the id is
    5884             :          * unique.
    5885             :          */
    5886          24 :         if (s->flags & SLAB_CACHE_DMA)
    5887           0 :                 *p++ = 'd';
    5888          24 :         if (s->flags & SLAB_CACHE_DMA32)
    5889           0 :                 *p++ = 'D';
    5890          24 :         if (s->flags & SLAB_RECLAIM_ACCOUNT)
    5891           1 :                 *p++ = 'a';
    5892          24 :         if (s->flags & SLAB_CONSISTENCY_CHECKS)
    5893           0 :                 *p++ = 'F';
    5894             :         if (s->flags & SLAB_ACCOUNT)
    5895             :                 *p++ = 'A';
    5896          24 :         if (p != name + 1)
    5897           1 :                 *p++ = '-';
    5898          24 :         p += sprintf(p, "%07u", s->size);
    5899             : 
    5900          24 :         BUG_ON(p > name + ID_STR_LENGTH - 1);
    5901          24 :         return name;
    5902             : }
    5903             : 
    5904          67 : static int sysfs_slab_add(struct kmem_cache *s)
    5905             : {
    5906             :         int err;
    5907             :         const char *name;
    5908         134 :         struct kset *kset = cache_kset(s);
    5909          67 :         int unmergeable = slab_unmergeable(s);
    5910             : 
    5911          67 :         if (!kset) {
    5912           0 :                 kobject_init(&s->kobj, &slab_ktype);
    5913           0 :                 return 0;
    5914             :         }
    5915             : 
    5916          67 :         if (!unmergeable && disable_higher_order_debug &&
    5917           0 :                         (slub_debug & DEBUG_METADATA_FLAGS))
    5918           0 :                 unmergeable = 1;
    5919             : 
    5920          67 :         if (unmergeable) {
    5921             :                 /*
    5922             :                  * Slabcache can never be merged so we can use the name proper.
    5923             :                  * This is typically the case for debug situations. In that
    5924             :                  * case we can catch duplicate names easily.
    5925             :                  */
    5926          43 :                 sysfs_remove_link(&slab_kset->kobj, s->name);
    5927          43 :                 name = s->name;
    5928             :         } else {
    5929             :                 /*
    5930             :                  * Create a unique name for the slab as a target
    5931             :                  * for the symlinks.
    5932             :                  */
    5933          24 :                 name = create_unique_id(s);
    5934             :         }
    5935             : 
    5936          67 :         s->kobj.kset = kset;
    5937          67 :         err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
    5938          67 :         if (err)
    5939             :                 goto out;
    5940             : 
    5941          67 :         err = sysfs_create_group(&s->kobj, &slab_attr_group);
    5942          67 :         if (err)
    5943             :                 goto out_del_kobj;
    5944             : 
    5945          67 :         if (!unmergeable) {
    5946             :                 /* Setup first alias */
    5947          24 :                 sysfs_slab_alias(s, s->name);
    5948             :         }
    5949             : out:
    5950          67 :         if (!unmergeable)
    5951          24 :                 kfree(name);
    5952             :         return err;
    5953             : out_del_kobj:
    5954           0 :         kobject_del(&s->kobj);
    5955           0 :         goto out;
    5956             : }
    5957             : 
    5958           0 : void sysfs_slab_unlink(struct kmem_cache *s)
    5959             : {
    5960           0 :         if (slab_state >= FULL)
    5961           0 :                 kobject_del(&s->kobj);
    5962           0 : }
    5963             : 
    5964           0 : void sysfs_slab_release(struct kmem_cache *s)
    5965             : {
    5966           0 :         if (slab_state >= FULL)
    5967           0 :                 kobject_put(&s->kobj);
    5968           0 : }
    5969             : 
    5970             : /*
    5971             :  * Need to buffer aliases during bootup until sysfs becomes
    5972             :  * available lest we lose that information.
    5973             :  */
    5974             : struct saved_alias {
    5975             :         struct kmem_cache *s;
    5976             :         const char *name;
    5977             :         struct saved_alias *next;
    5978             : };
    5979             : 
    5980             : static struct saved_alias *alias_list;
    5981             : 
    5982          58 : static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
    5983             : {
    5984             :         struct saved_alias *al;
    5985             : 
    5986          58 :         if (slab_state == FULL) {
    5987             :                 /*
    5988             :                  * If we have a leftover link then remove it.
    5989             :                  */
    5990          45 :                 sysfs_remove_link(&slab_kset->kobj, name);
    5991          45 :                 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
    5992             :         }
    5993             : 
    5994          13 :         al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
    5995          13 :         if (!al)
    5996             :                 return -ENOMEM;
    5997             : 
    5998          13 :         al->s = s;
    5999          13 :         al->name = name;
    6000          13 :         al->next = alias_list;
    6001          13 :         alias_list = al;
    6002          13 :         return 0;
    6003             : }
    6004             : 
    6005           1 : static int __init slab_sysfs_init(void)
    6006             : {
    6007             :         struct kmem_cache *s;
    6008             :         int err;
    6009             : 
    6010           1 :         mutex_lock(&slab_mutex);
    6011             : 
    6012           1 :         slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
    6013           1 :         if (!slab_kset) {
    6014           0 :                 mutex_unlock(&slab_mutex);
    6015           0 :                 pr_err("Cannot register slab subsystem.\n");
    6016           0 :                 return -ENOSYS;
    6017             :         }
    6018             : 
    6019           1 :         slab_state = FULL;
    6020             : 
    6021          65 :         list_for_each_entry(s, &slab_caches, list) {
    6022          64 :                 err = sysfs_slab_add(s);
    6023          64 :                 if (err)
    6024           0 :                         pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
    6025             :                                s->name);
    6026             :         }
    6027             : 
    6028          14 :         while (alias_list) {
    6029          13 :                 struct saved_alias *al = alias_list;
    6030             : 
    6031          13 :                 alias_list = alias_list->next;
    6032          13 :                 err = sysfs_slab_alias(al->s, al->name);
    6033          13 :                 if (err)
    6034           0 :                         pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
    6035             :                                al->name);
    6036          13 :                 kfree(al);
    6037             :         }
    6038             : 
    6039           1 :         mutex_unlock(&slab_mutex);
    6040           1 :         return 0;
    6041             : }
    6042             : 
    6043             : __initcall(slab_sysfs_init);
    6044             : #endif /* CONFIG_SYSFS */
    6045             : 
    6046             : #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
    6047             : static int slab_debugfs_show(struct seq_file *seq, void *v)
    6048             : {
    6049             :         struct loc_track *t = seq->private;
    6050             :         struct location *l;
    6051             :         unsigned long idx;
    6052             : 
    6053             :         idx = (unsigned long) t->idx;
    6054             :         if (idx < t->count) {
    6055             :                 l = &t->loc[idx];
    6056             : 
    6057             :                 seq_printf(seq, "%7ld ", l->count);
    6058             : 
    6059             :                 if (l->addr)
    6060             :                         seq_printf(seq, "%pS", (void *)l->addr);
    6061             :                 else
    6062             :                         seq_puts(seq, "<not-available>");
    6063             : 
    6064             :                 if (l->sum_time != l->min_time) {
    6065             :                         seq_printf(seq, " age=%ld/%llu/%ld",
    6066             :                                 l->min_time, div_u64(l->sum_time, l->count),
    6067             :                                 l->max_time);
    6068             :                 } else
    6069             :                         seq_printf(seq, " age=%ld", l->min_time);
    6070             : 
    6071             :                 if (l->min_pid != l->max_pid)
    6072             :                         seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
    6073             :                 else
    6074             :                         seq_printf(seq, " pid=%ld",
    6075             :                                 l->min_pid);
    6076             : 
    6077             :                 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
    6078             :                         seq_printf(seq, " cpus=%*pbl",
    6079             :                                  cpumask_pr_args(to_cpumask(l->cpus)));
    6080             : 
    6081             :                 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
    6082             :                         seq_printf(seq, " nodes=%*pbl",
    6083             :                                  nodemask_pr_args(&l->nodes));
    6084             : 
    6085             :                 seq_puts(seq, "\n");
    6086             :         }
    6087             : 
    6088             :         if (!idx && !t->count)
    6089             :                 seq_puts(seq, "No data\n");
    6090             : 
    6091             :         return 0;
    6092             : }
    6093             : 
    6094             : static void slab_debugfs_stop(struct seq_file *seq, void *v)
    6095             : {
    6096             : }
    6097             : 
    6098             : static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
    6099             : {
    6100             :         struct loc_track *t = seq->private;
    6101             : 
    6102             :         t->idx = ++(*ppos);
    6103             :         if (*ppos <= t->count)
    6104             :                 return ppos;
    6105             : 
    6106             :         return NULL;
    6107             : }
    6108             : 
    6109             : static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
    6110             : {
    6111             :         struct loc_track *t = seq->private;
    6112             : 
    6113             :         t->idx = *ppos;
    6114             :         return ppos;
    6115             : }
    6116             : 
    6117             : static const struct seq_operations slab_debugfs_sops = {
    6118             :         .start  = slab_debugfs_start,
    6119             :         .next   = slab_debugfs_next,
    6120             :         .stop   = slab_debugfs_stop,
    6121             :         .show   = slab_debugfs_show,
    6122             : };
    6123             : 
    6124             : static int slab_debug_trace_open(struct inode *inode, struct file *filep)
    6125             : {
    6126             : 
    6127             :         struct kmem_cache_node *n;
    6128             :         enum track_item alloc;
    6129             :         int node;
    6130             :         struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
    6131             :                                                 sizeof(struct loc_track));
    6132             :         struct kmem_cache *s = file_inode(filep)->i_private;
    6133             :         unsigned long *obj_map;
    6134             : 
    6135             :         if (!t)
    6136             :                 return -ENOMEM;
    6137             : 
    6138             :         obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
    6139             :         if (!obj_map) {
    6140             :                 seq_release_private(inode, filep);
    6141             :                 return -ENOMEM;
    6142             :         }
    6143             : 
    6144             :         if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
    6145             :                 alloc = TRACK_ALLOC;
    6146             :         else
    6147             :                 alloc = TRACK_FREE;
    6148             : 
    6149             :         if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
    6150             :                 bitmap_free(obj_map);
    6151             :                 seq_release_private(inode, filep);
    6152             :                 return -ENOMEM;
    6153             :         }
    6154             : 
    6155             :         for_each_kmem_cache_node(s, node, n) {
    6156             :                 unsigned long flags;
    6157             :                 struct slab *slab;
    6158             : 
    6159             :                 if (!atomic_long_read(&n->nr_slabs))
    6160             :                         continue;
    6161             : 
    6162             :                 spin_lock_irqsave(&n->list_lock, flags);
    6163             :                 list_for_each_entry(slab, &n->partial, slab_list)
    6164             :                         process_slab(t, s, slab, alloc, obj_map);
    6165             :                 list_for_each_entry(slab, &n->full, slab_list)
    6166             :                         process_slab(t, s, slab, alloc, obj_map);
    6167             :                 spin_unlock_irqrestore(&n->list_lock, flags);
    6168             :         }
    6169             : 
    6170             :         bitmap_free(obj_map);
    6171             :         return 0;
    6172             : }
    6173             : 
    6174             : static int slab_debug_trace_release(struct inode *inode, struct file *file)
    6175             : {
    6176             :         struct seq_file *seq = file->private_data;
    6177             :         struct loc_track *t = seq->private;
    6178             : 
    6179             :         free_loc_track(t);
    6180             :         return seq_release_private(inode, file);
    6181             : }
    6182             : 
    6183             : static const struct file_operations slab_debugfs_fops = {
    6184             :         .open    = slab_debug_trace_open,
    6185             :         .read    = seq_read,
    6186             :         .llseek  = seq_lseek,
    6187             :         .release = slab_debug_trace_release,
    6188             : };
    6189             : 
    6190             : static void debugfs_slab_add(struct kmem_cache *s)
    6191             : {
    6192             :         struct dentry *slab_cache_dir;
    6193             : 
    6194             :         if (unlikely(!slab_debugfs_root))
    6195             :                 return;
    6196             : 
    6197             :         slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
    6198             : 
    6199             :         debugfs_create_file("alloc_traces", 0400,
    6200             :                 slab_cache_dir, s, &slab_debugfs_fops);
    6201             : 
    6202             :         debugfs_create_file("free_traces", 0400,
    6203             :                 slab_cache_dir, s, &slab_debugfs_fops);
    6204             : }
    6205             : 
    6206             : void debugfs_slab_release(struct kmem_cache *s)
    6207             : {
    6208             :         debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
    6209             : }
    6210             : 
    6211             : static int __init slab_debugfs_init(void)
    6212             : {
    6213             :         struct kmem_cache *s;
    6214             : 
    6215             :         slab_debugfs_root = debugfs_create_dir("slab", NULL);
    6216             : 
    6217             :         list_for_each_entry(s, &slab_caches, list)
    6218             :                 if (s->flags & SLAB_STORE_USER)
    6219             :                         debugfs_slab_add(s);
    6220             : 
    6221             :         return 0;
    6222             : 
    6223             : }
    6224             : __initcall(slab_debugfs_init);
    6225             : #endif
    6226             : /*
    6227             :  * The /proc/slabinfo ABI
    6228             :  */
    6229             : #ifdef CONFIG_SLUB_DEBUG
    6230           0 : void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
    6231             : {
    6232           0 :         unsigned long nr_slabs = 0;
    6233           0 :         unsigned long nr_objs = 0;
    6234           0 :         unsigned long nr_free = 0;
    6235             :         int node;
    6236             :         struct kmem_cache_node *n;
    6237             : 
    6238           0 :         for_each_kmem_cache_node(s, node, n) {
    6239           0 :                 nr_slabs += node_nr_slabs(n);
    6240           0 :                 nr_objs += node_nr_objs(n);
    6241           0 :                 nr_free += count_partial(n, count_free);
    6242             :         }
    6243             : 
    6244           0 :         sinfo->active_objs = nr_objs - nr_free;
    6245           0 :         sinfo->num_objs = nr_objs;
    6246           0 :         sinfo->active_slabs = nr_slabs;
    6247           0 :         sinfo->num_slabs = nr_slabs;
    6248           0 :         sinfo->objects_per_slab = oo_objects(s->oo);
    6249           0 :         sinfo->cache_order = oo_order(s->oo);
    6250           0 : }
    6251             : 
    6252           0 : void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
    6253             : {
    6254           0 : }
    6255             : 
    6256           0 : ssize_t slabinfo_write(struct file *file, const char __user *buffer,
    6257             :                        size_t count, loff_t *ppos)
    6258             : {
    6259           0 :         return -EIO;
    6260             : }
    6261             : #endif /* CONFIG_SLUB_DEBUG */

Generated by: LCOV version 1.14