LCOV - code coverage report
Current view: top level - mm - slab.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 38 56 67.9 %
Date: 2022-12-09 01:23:36 Functions: 2 3 66.7 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef MM_SLAB_H
       3             : #define MM_SLAB_H
       4             : /*
       5             :  * Internal slab definitions
       6             :  */
       7             : 
       8             : /* Reuses the bits in struct page */
       9             : struct slab {
      10             :         unsigned long __page_flags;
      11             : 
      12             : #if defined(CONFIG_SLAB)
      13             : 
      14             :         union {
      15             :                 struct list_head slab_list;
      16             :                 struct rcu_head rcu_head;
      17             :         };
      18             :         struct kmem_cache *slab_cache;
      19             :         void *freelist; /* array of free object indexes */
      20             :         void *s_mem;    /* first object */
      21             :         unsigned int active;
      22             : 
      23             : #elif defined(CONFIG_SLUB)
      24             : 
      25             :         union {
      26             :                 struct list_head slab_list;
      27             :                 struct rcu_head rcu_head;
      28             : #ifdef CONFIG_SLUB_CPU_PARTIAL
      29             :                 struct {
      30             :                         struct slab *next;
      31             :                         int slabs;      /* Nr of slabs left */
      32             :                 };
      33             : #endif
      34             :         };
      35             :         struct kmem_cache *slab_cache;
      36             :         /* Double-word boundary */
      37             :         void *freelist;         /* first free object */
      38             :         union {
      39             :                 unsigned long counters;
      40             :                 struct {
      41             :                         unsigned inuse:16;
      42             :                         unsigned objects:15;
      43             :                         unsigned frozen:1;
      44             :                 };
      45             :         };
      46             :         unsigned int __unused;
      47             : 
      48             : #elif defined(CONFIG_SLOB)
      49             : 
      50             :         struct list_head slab_list;
      51             :         void *__unused_1;
      52             :         void *freelist;         /* first free block */
      53             :         long units;
      54             :         unsigned int __unused_2;
      55             : 
      56             : #else
      57             : #error "Unexpected slab allocator configured"
      58             : #endif
      59             : 
      60             :         atomic_t __page_refcount;
      61             : #ifdef CONFIG_MEMCG
      62             :         unsigned long memcg_data;
      63             : #endif
      64             : };
      65             : 
      66             : #define SLAB_MATCH(pg, sl)                                              \
      67             :         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
      68             : SLAB_MATCH(flags, __page_flags);
      69             : SLAB_MATCH(compound_head, slab_list);   /* Ensure bit 0 is clear */
      70             : #ifndef CONFIG_SLOB
      71             : SLAB_MATCH(rcu_head, rcu_head);
      72             : #endif
      73             : SLAB_MATCH(_refcount, __page_refcount);
      74             : #ifdef CONFIG_MEMCG
      75             : SLAB_MATCH(memcg_data, memcg_data);
      76             : #endif
      77             : #undef SLAB_MATCH
      78             : static_assert(sizeof(struct slab) <= sizeof(struct page));
      79             : 
      80             : /**
      81             :  * folio_slab - Converts from folio to slab.
      82             :  * @folio: The folio.
      83             :  *
      84             :  * Currently struct slab is a different representation of a folio where
      85             :  * folio_test_slab() is true.
      86             :  *
      87             :  * Return: The slab which contains this folio.
      88             :  */
      89             : #define folio_slab(folio)       (_Generic((folio),                      \
      90             :         const struct folio *:   (const struct slab *)(folio),           \
      91             :         struct folio *:         (struct slab *)(folio)))
      92             : 
      93             : /**
      94             :  * slab_folio - The folio allocated for a slab
      95             :  * @slab: The slab.
      96             :  *
      97             :  * Slabs are allocated as folios that contain the individual objects and are
      98             :  * using some fields in the first struct page of the folio - those fields are
      99             :  * now accessed by struct slab. It is occasionally necessary to convert back to
     100             :  * a folio in order to communicate with the rest of the mm.  Please use this
     101             :  * helper function instead of casting yourself, as the implementation may change
     102             :  * in the future.
     103             :  */
     104             : #define slab_folio(s)           (_Generic((s),                          \
     105             :         const struct slab *:    (const struct folio *)s,                \
     106             :         struct slab *:          (struct folio *)s))
     107             : 
     108             : /**
     109             :  * page_slab - Converts from first struct page to slab.
     110             :  * @p: The first (either head of compound or single) page of slab.
     111             :  *
     112             :  * A temporary wrapper to convert struct page to struct slab in situations where
     113             :  * we know the page is the compound head, or single order-0 page.
     114             :  *
     115             :  * Long-term ideally everything would work with struct slab directly or go
     116             :  * through folio to struct slab.
     117             :  *
     118             :  * Return: The slab which contains this page
     119             :  */
     120             : #define page_slab(p)            (_Generic((p),                          \
     121             :         const struct page *:    (const struct slab *)(p),               \
     122             :         struct page *:          (struct slab *)(p)))
     123             : 
     124             : /**
     125             :  * slab_page - The first struct page allocated for a slab
     126             :  * @slab: The slab.
     127             :  *
     128             :  * A convenience wrapper for converting slab to the first struct page of the
     129             :  * underlying folio, to communicate with code not yet converted to folio or
     130             :  * struct slab.
     131             :  */
     132             : #define slab_page(s) folio_page(slab_folio(s), 0)
     133             : 
     134             : /*
     135             :  * If network-based swap is enabled, sl*b must keep track of whether pages
     136             :  * were allocated from pfmemalloc reserves.
     137             :  */
     138             : static inline bool slab_test_pfmemalloc(const struct slab *slab)
     139             : {
     140        1008 :         return folio_test_active((struct folio *)slab_folio(slab));
     141             : }
     142             : 
     143             : static inline void slab_set_pfmemalloc(struct slab *slab)
     144             : {
     145           0 :         folio_set_active(slab_folio(slab));
     146             : }
     147             : 
     148             : static inline void slab_clear_pfmemalloc(struct slab *slab)
     149             : {
     150             :         folio_clear_active(slab_folio(slab));
     151             : }
     152             : 
     153             : static inline void __slab_clear_pfmemalloc(struct slab *slab)
     154             : {
     155           3 :         __folio_clear_active(slab_folio(slab));
     156             : }
     157             : 
     158             : static inline void *slab_address(const struct slab *slab)
     159             : {
     160         454 :         return folio_address(slab_folio(slab));
     161             : }
     162             : 
     163             : static inline int slab_nid(const struct slab *slab)
     164             : {
     165         551 :         return folio_nid(slab_folio(slab));
     166             : }
     167             : 
     168             : static inline pg_data_t *slab_pgdat(const struct slab *slab)
     169             : {
     170         457 :         return folio_pgdat(slab_folio(slab));
     171             : }
     172             : 
     173             : static inline struct slab *virt_to_slab(const void *addr)
     174             : {
     175        2969 :         struct folio *folio = virt_to_folio(addr);
     176             : 
     177        2969 :         if (!folio_test_slab(folio))
     178             :                 return NULL;
     179             : 
     180             :         return folio_slab(folio);
     181             : }
     182             : 
     183             : static inline int slab_order(const struct slab *slab)
     184             : {
     185           0 :         return folio_order((struct folio *)slab_folio(slab));
     186             : }
     187             : 
     188             : static inline size_t slab_size(const struct slab *slab)
     189             : {
     190           0 :         return PAGE_SIZE << slab_order(slab);
     191             : }
     192             : 
     193             : #ifdef CONFIG_SLOB
     194             : /*
     195             :  * Common fields provided in kmem_cache by all slab allocators
     196             :  * This struct is either used directly by the allocator (SLOB)
     197             :  * or the allocator must include definitions for all fields
     198             :  * provided in kmem_cache_common in their definition of kmem_cache.
     199             :  *
     200             :  * Once we can do anonymous structs (C11 standard) we could put a
     201             :  * anonymous struct definition in these allocators so that the
     202             :  * separate allocations in the kmem_cache structure of SLAB and
     203             :  * SLUB is no longer needed.
     204             :  */
     205             : struct kmem_cache {
     206             :         unsigned int object_size;/* The original size of the object */
     207             :         unsigned int size;      /* The aligned/padded/added on size  */
     208             :         unsigned int align;     /* Alignment as calculated */
     209             :         slab_flags_t flags;     /* Active flags on the slab */
     210             :         unsigned int useroffset;/* Usercopy region offset */
     211             :         unsigned int usersize;  /* Usercopy region size */
     212             :         const char *name;       /* Slab name for sysfs */
     213             :         int refcount;           /* Use counter */
     214             :         void (*ctor)(void *);   /* Called on object slot creation */
     215             :         struct list_head list;  /* List of all slab caches on the system */
     216             : };
     217             : 
     218             : #endif /* CONFIG_SLOB */
     219             : 
     220             : #ifdef CONFIG_SLAB
     221             : #include <linux/slab_def.h>
     222             : #endif
     223             : 
     224             : #ifdef CONFIG_SLUB
     225             : #include <linux/slub_def.h>
     226             : #endif
     227             : 
     228             : #include <linux/memcontrol.h>
     229             : #include <linux/fault-inject.h>
     230             : #include <linux/kasan.h>
     231             : #include <linux/kmemleak.h>
     232             : #include <linux/random.h>
     233             : #include <linux/sched/mm.h>
     234             : #include <linux/list_lru.h>
     235             : 
     236             : /*
     237             :  * State of the slab allocator.
     238             :  *
     239             :  * This is used to describe the states of the allocator during bootup.
     240             :  * Allocators use this to gradually bootstrap themselves. Most allocators
     241             :  * have the problem that the structures used for managing slab caches are
     242             :  * allocated from slab caches themselves.
     243             :  */
     244             : enum slab_state {
     245             :         DOWN,                   /* No slab functionality yet */
     246             :         PARTIAL,                /* SLUB: kmem_cache_node available */
     247             :         PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
     248             :         UP,                     /* Slab caches usable but not all extras yet */
     249             :         FULL                    /* Everything is working */
     250             : };
     251             : 
     252             : extern enum slab_state slab_state;
     253             : 
     254             : /* The slab cache mutex protects the management structures during changes */
     255             : extern struct mutex slab_mutex;
     256             : 
     257             : /* The list of all slab caches on the system */
     258             : extern struct list_head slab_caches;
     259             : 
     260             : /* The slab cache that manages slab cache information */
     261             : extern struct kmem_cache *kmem_cache;
     262             : 
     263             : /* A table of kmalloc cache names and sizes */
     264             : extern const struct kmalloc_info_struct {
     265             :         const char *name[NR_KMALLOC_TYPES];
     266             :         unsigned int size;
     267             : } kmalloc_info[];
     268             : 
     269             : #ifndef CONFIG_SLOB
     270             : /* Kmalloc array related functions */
     271             : void setup_kmalloc_cache_index_table(void);
     272             : void create_kmalloc_caches(slab_flags_t);
     273             : 
     274             : /* Find the kmalloc slab corresponding for a certain size */
     275             : struct kmem_cache *kmalloc_slab(size_t, gfp_t);
     276             : #endif
     277             : 
     278             : gfp_t kmalloc_fix_flags(gfp_t flags);
     279             : 
     280             : /* Functions provided by the slab allocators */
     281             : int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
     282             : 
     283             : struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
     284             :                         slab_flags_t flags, unsigned int useroffset,
     285             :                         unsigned int usersize);
     286             : extern void create_boot_cache(struct kmem_cache *, const char *name,
     287             :                         unsigned int size, slab_flags_t flags,
     288             :                         unsigned int useroffset, unsigned int usersize);
     289             : 
     290             : int slab_unmergeable(struct kmem_cache *s);
     291             : struct kmem_cache *find_mergeable(unsigned size, unsigned align,
     292             :                 slab_flags_t flags, const char *name, void (*ctor)(void *));
     293             : #ifndef CONFIG_SLOB
     294             : struct kmem_cache *
     295             : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
     296             :                    slab_flags_t flags, void (*ctor)(void *));
     297             : 
     298             : slab_flags_t kmem_cache_flags(unsigned int object_size,
     299             :         slab_flags_t flags, const char *name);
     300             : #else
     301             : static inline struct kmem_cache *
     302             : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
     303             :                    slab_flags_t flags, void (*ctor)(void *))
     304             : { return NULL; }
     305             : 
     306             : static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
     307             :         slab_flags_t flags, const char *name)
     308             : {
     309             :         return flags;
     310             : }
     311             : #endif
     312             : 
     313             : 
     314             : /* Legal flag mask for kmem_cache_create(), for various configurations */
     315             : #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
     316             :                          SLAB_CACHE_DMA32 | SLAB_PANIC | \
     317             :                          SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
     318             : 
     319             : #if defined(CONFIG_DEBUG_SLAB)
     320             : #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
     321             : #elif defined(CONFIG_SLUB_DEBUG)
     322             : #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
     323             :                           SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
     324             : #else
     325             : #define SLAB_DEBUG_FLAGS (0)
     326             : #endif
     327             : 
     328             : #if defined(CONFIG_SLAB)
     329             : #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
     330             :                           SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
     331             :                           SLAB_ACCOUNT)
     332             : #elif defined(CONFIG_SLUB)
     333             : #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
     334             :                           SLAB_TEMPORARY | SLAB_ACCOUNT)
     335             : #else
     336             : #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
     337             : #endif
     338             : 
     339             : /* Common flags available with current configuration */
     340             : #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
     341             : 
     342             : /* Common flags permitted for kmem_cache_create */
     343             : #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
     344             :                               SLAB_RED_ZONE | \
     345             :                               SLAB_POISON | \
     346             :                               SLAB_STORE_USER | \
     347             :                               SLAB_TRACE | \
     348             :                               SLAB_CONSISTENCY_CHECKS | \
     349             :                               SLAB_MEM_SPREAD | \
     350             :                               SLAB_NOLEAKTRACE | \
     351             :                               SLAB_RECLAIM_ACCOUNT | \
     352             :                               SLAB_TEMPORARY | \
     353             :                               SLAB_ACCOUNT)
     354             : 
     355             : bool __kmem_cache_empty(struct kmem_cache *);
     356             : int __kmem_cache_shutdown(struct kmem_cache *);
     357             : void __kmem_cache_release(struct kmem_cache *);
     358             : int __kmem_cache_shrink(struct kmem_cache *);
     359             : void slab_kmem_cache_release(struct kmem_cache *);
     360             : 
     361             : struct seq_file;
     362             : struct file;
     363             : 
     364             : struct slabinfo {
     365             :         unsigned long active_objs;
     366             :         unsigned long num_objs;
     367             :         unsigned long active_slabs;
     368             :         unsigned long num_slabs;
     369             :         unsigned long shared_avail;
     370             :         unsigned int limit;
     371             :         unsigned int batchcount;
     372             :         unsigned int shared;
     373             :         unsigned int objects_per_slab;
     374             :         unsigned int cache_order;
     375             : };
     376             : 
     377             : void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
     378             : void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
     379             : ssize_t slabinfo_write(struct file *file, const char __user *buffer,
     380             :                        size_t count, loff_t *ppos);
     381             : 
     382             : /*
     383             :  * Generic implementation of bulk operations
     384             :  * These are useful for situations in which the allocator cannot
     385             :  * perform optimizations. In that case segments of the object listed
     386             :  * may be allocated or freed using these operations.
     387             :  */
     388             : void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
     389             : int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
     390             : 
     391             : static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
     392             : {
     393         457 :         return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
     394         457 :                 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
     395             : }
     396             : 
     397             : #ifdef CONFIG_SLUB_DEBUG
     398             : #ifdef CONFIG_SLUB_DEBUG_ON
     399             : DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
     400             : #else
     401             : DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
     402             : #endif
     403             : extern void print_tracking(struct kmem_cache *s, void *object);
     404             : long validate_slab_cache(struct kmem_cache *s);
     405             : static inline bool __slub_debug_enabled(void)
     406             : {
     407       19844 :         return static_branch_unlikely(&slub_debug_enabled);
     408             : }
     409             : #else
     410             : static inline void print_tracking(struct kmem_cache *s, void *object)
     411             : {
     412             : }
     413             : static inline bool __slub_debug_enabled(void)
     414             : {
     415             :         return false;
     416             : }
     417             : #endif
     418             : 
     419             : /*
     420             :  * Returns true if any of the specified slub_debug flags is enabled for the
     421             :  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
     422             :  * the static key.
     423             :  */
     424             : static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
     425             : {
     426             :         if (IS_ENABLED(CONFIG_SLUB_DEBUG))
     427             :                 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
     428       19843 :         if (__slub_debug_enabled())
     429           0 :                 return s->flags & flags;
     430             :         return false;
     431             : }
     432             : 
     433             : #ifdef CONFIG_MEMCG_KMEM
     434             : /*
     435             :  * slab_objcgs - get the object cgroups vector associated with a slab
     436             :  * @slab: a pointer to the slab struct
     437             :  *
     438             :  * Returns a pointer to the object cgroups vector associated with the slab,
     439             :  * or NULL if no such vector has been associated yet.
     440             :  */
     441             : static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
     442             : {
     443             :         unsigned long memcg_data = READ_ONCE(slab->memcg_data);
     444             : 
     445             :         VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
     446             :                                                         slab_page(slab));
     447             :         VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
     448             : 
     449             :         return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     450             : }
     451             : 
     452             : int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
     453             :                                  gfp_t gfp, bool new_slab);
     454             : void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
     455             :                      enum node_stat_item idx, int nr);
     456             : 
     457             : static inline void memcg_free_slab_cgroups(struct slab *slab)
     458             : {
     459             :         kfree(slab_objcgs(slab));
     460             :         slab->memcg_data = 0;
     461             : }
     462             : 
     463             : static inline size_t obj_full_size(struct kmem_cache *s)
     464             : {
     465             :         /*
     466             :          * For each accounted object there is an extra space which is used
     467             :          * to store obj_cgroup membership. Charge it too.
     468             :          */
     469             :         return s->size + sizeof(struct obj_cgroup *);
     470             : }
     471             : 
     472             : /*
     473             :  * Returns false if the allocation should fail.
     474             :  */
     475             : static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
     476             :                                              struct list_lru *lru,
     477             :                                              struct obj_cgroup **objcgp,
     478             :                                              size_t objects, gfp_t flags)
     479             : {
     480             :         struct obj_cgroup *objcg;
     481             : 
     482             :         if (!memcg_kmem_enabled())
     483             :                 return true;
     484             : 
     485             :         if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
     486             :                 return true;
     487             : 
     488             :         objcg = get_obj_cgroup_from_current();
     489             :         if (!objcg)
     490             :                 return true;
     491             : 
     492             :         if (lru) {
     493             :                 int ret;
     494             :                 struct mem_cgroup *memcg;
     495             : 
     496             :                 memcg = get_mem_cgroup_from_objcg(objcg);
     497             :                 ret = memcg_list_lru_alloc(memcg, lru, flags);
     498             :                 css_put(&memcg->css);
     499             : 
     500             :                 if (ret)
     501             :                         goto out;
     502             :         }
     503             : 
     504             :         if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
     505             :                 goto out;
     506             : 
     507             :         *objcgp = objcg;
     508             :         return true;
     509             : out:
     510             :         obj_cgroup_put(objcg);
     511             :         return false;
     512             : }
     513             : 
     514             : static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
     515             :                                               struct obj_cgroup *objcg,
     516             :                                               gfp_t flags, size_t size,
     517             :                                               void **p)
     518             : {
     519             :         struct slab *slab;
     520             :         unsigned long off;
     521             :         size_t i;
     522             : 
     523             :         if (!memcg_kmem_enabled() || !objcg)
     524             :                 return;
     525             : 
     526             :         for (i = 0; i < size; i++) {
     527             :                 if (likely(p[i])) {
     528             :                         slab = virt_to_slab(p[i]);
     529             : 
     530             :                         if (!slab_objcgs(slab) &&
     531             :                             memcg_alloc_slab_cgroups(slab, s, flags,
     532             :                                                          false)) {
     533             :                                 obj_cgroup_uncharge(objcg, obj_full_size(s));
     534             :                                 continue;
     535             :                         }
     536             : 
     537             :                         off = obj_to_index(s, slab, p[i]);
     538             :                         obj_cgroup_get(objcg);
     539             :                         slab_objcgs(slab)[off] = objcg;
     540             :                         mod_objcg_state(objcg, slab_pgdat(slab),
     541             :                                         cache_vmstat_idx(s), obj_full_size(s));
     542             :                 } else {
     543             :                         obj_cgroup_uncharge(objcg, obj_full_size(s));
     544             :                 }
     545             :         }
     546             :         obj_cgroup_put(objcg);
     547             : }
     548             : 
     549             : static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
     550             :                                         void **p, int objects)
     551             : {
     552             :         struct kmem_cache *s;
     553             :         struct obj_cgroup **objcgs;
     554             :         struct obj_cgroup *objcg;
     555             :         struct slab *slab;
     556             :         unsigned int off;
     557             :         int i;
     558             : 
     559             :         if (!memcg_kmem_enabled())
     560             :                 return;
     561             : 
     562             :         for (i = 0; i < objects; i++) {
     563             :                 if (unlikely(!p[i]))
     564             :                         continue;
     565             : 
     566             :                 slab = virt_to_slab(p[i]);
     567             :                 /* we could be given a kmalloc_large() object, skip those */
     568             :                 if (!slab)
     569             :                         continue;
     570             : 
     571             :                 objcgs = slab_objcgs(slab);
     572             :                 if (!objcgs)
     573             :                         continue;
     574             : 
     575             :                 if (!s_orig)
     576             :                         s = slab->slab_cache;
     577             :                 else
     578             :                         s = s_orig;
     579             : 
     580             :                 off = obj_to_index(s, slab, p[i]);
     581             :                 objcg = objcgs[off];
     582             :                 if (!objcg)
     583             :                         continue;
     584             : 
     585             :                 objcgs[off] = NULL;
     586             :                 obj_cgroup_uncharge(objcg, obj_full_size(s));
     587             :                 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
     588             :                                 -obj_full_size(s));
     589             :                 obj_cgroup_put(objcg);
     590             :         }
     591             : }
     592             : 
     593             : #else /* CONFIG_MEMCG_KMEM */
     594             : static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
     595             : {
     596             :         return NULL;
     597             : }
     598             : 
     599             : static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
     600             : {
     601             :         return NULL;
     602             : }
     603             : 
     604             : static inline int memcg_alloc_slab_cgroups(struct slab *slab,
     605             :                                                struct kmem_cache *s, gfp_t gfp,
     606             :                                                bool new_slab)
     607             : {
     608             :         return 0;
     609             : }
     610             : 
     611             : static inline void memcg_free_slab_cgroups(struct slab *slab)
     612             : {
     613             : }
     614             : 
     615             : static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
     616             :                                              struct list_lru *lru,
     617             :                                              struct obj_cgroup **objcgp,
     618             :                                              size_t objects, gfp_t flags)
     619             : {
     620             :         return true;
     621             : }
     622             : 
     623             : static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
     624             :                                               struct obj_cgroup *objcg,
     625             :                                               gfp_t flags, size_t size,
     626             :                                               void **p)
     627             : {
     628             : }
     629             : 
     630             : static inline void memcg_slab_free_hook(struct kmem_cache *s,
     631             :                                         void **p, int objects)
     632             : {
     633             : }
     634             : #endif /* CONFIG_MEMCG_KMEM */
     635             : 
     636             : #ifndef CONFIG_SLOB
     637           0 : static inline struct kmem_cache *virt_to_cache(const void *obj)
     638             : {
     639             :         struct slab *slab;
     640             : 
     641           0 :         slab = virt_to_slab(obj);
     642           0 :         if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
     643             :                                         __func__))
     644             :                 return NULL;
     645           0 :         return slab->slab_cache;
     646             : }
     647             : 
     648             : static __always_inline void account_slab(struct slab *slab, int order,
     649             :                                          struct kmem_cache *s, gfp_t gfp)
     650             : {
     651             :         if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
     652             :                 memcg_alloc_slab_cgroups(slab, s, gfp, true);
     653             : 
     654        1816 :         mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
     655         454 :                             PAGE_SIZE << order);
     656             : }
     657             : 
     658             : static __always_inline void unaccount_slab(struct slab *slab, int order,
     659             :                                            struct kmem_cache *s)
     660             : {
     661             :         if (memcg_kmem_enabled())
     662             :                 memcg_free_slab_cgroups(slab);
     663             : 
     664          12 :         mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
     665           3 :                             -(PAGE_SIZE << order));
     666             : }
     667             : 
     668        2969 : static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
     669             : {
     670             :         struct kmem_cache *cachep;
     671             : 
     672        2969 :         if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
     673        5938 :             !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
     674             :                 return s;
     675             : 
     676           0 :         cachep = virt_to_cache(x);
     677           0 :         if (WARN(cachep && cachep != s,
     678             :                   "%s: Wrong slab cache. %s but object is from %s\n",
     679             :                   __func__, s->name, cachep->name))
     680           0 :                 print_tracking(cachep, x);
     681             :         return cachep;
     682             : }
     683             : #endif /* CONFIG_SLOB */
     684             : 
     685             : static inline size_t slab_ksize(const struct kmem_cache *s)
     686             : {
     687             : #ifndef CONFIG_SLUB
     688             :         return s->object_size;
     689             : 
     690             : #else /* CONFIG_SLUB */
     691             : # ifdef CONFIG_SLUB_DEBUG
     692             :         /*
     693             :          * Debugging requires use of the padding between object
     694             :          * and whatever may come after it.
     695             :          */
     696         228 :         if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
     697           0 :                 return s->object_size;
     698             : # endif
     699             :         if (s->flags & SLAB_KASAN)
     700             :                 return s->object_size;
     701             :         /*
     702             :          * If we have the need to store the freelist pointer
     703             :          * back there or track user information then we can
     704             :          * only use the space before that information.
     705             :          */
     706         228 :         if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
     707           0 :                 return s->inuse;
     708             :         /*
     709             :          * Else we can use all the padding etc for the allocation
     710             :          */
     711         228 :         return s->size;
     712             : #endif
     713             : }
     714             : 
     715             : static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
     716             :                                                      struct list_lru *lru,
     717             :                                                      struct obj_cgroup **objcgp,
     718             :                                                      size_t size, gfp_t flags)
     719             : {
     720       18327 :         flags &= gfp_allowed_mask;
     721             : 
     722       18327 :         might_alloc(flags);
     723             : 
     724       18327 :         if (should_failslab(s, flags))
     725             :                 return NULL;
     726             : 
     727       18327 :         if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
     728             :                 return NULL;
     729             : 
     730             :         return s;
     731             : }
     732             : 
     733       18327 : static inline void slab_post_alloc_hook(struct kmem_cache *s,
     734             :                                         struct obj_cgroup *objcg, gfp_t flags,
     735             :                                         size_t size, void **p, bool init)
     736             : {
     737             :         size_t i;
     738             : 
     739       18327 :         flags &= gfp_allowed_mask;
     740             : 
     741             :         /*
     742             :          * As memory initialization might be integrated into KASAN,
     743             :          * kasan_slab_alloc and initialization memset must be
     744             :          * kept together to avoid discrepancies in behavior.
     745             :          *
     746             :          * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
     747             :          */
     748       36654 :         for (i = 0; i < size; i++) {
     749       18327 :                 p[i] = kasan_slab_alloc(s, p[i], flags, init);
     750       18327 :                 if (p[i] && init && !kasan_has_integrated_init())
     751       14794 :                         memset(p[i], 0, s->object_size);
     752       18327 :                 kmemleak_alloc_recursive(p[i], s->object_size, 1,
     753             :                                          s->flags, flags);
     754             :         }
     755             : 
     756       18327 :         memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
     757       18327 : }
     758             : 
     759             : #ifndef CONFIG_SLOB
     760             : /*
     761             :  * The slab lists for all objects.
     762             :  */
     763             : struct kmem_cache_node {
     764             :         spinlock_t list_lock;
     765             : 
     766             : #ifdef CONFIG_SLAB
     767             :         struct list_head slabs_partial; /* partial list first, better asm code */
     768             :         struct list_head slabs_full;
     769             :         struct list_head slabs_free;
     770             :         unsigned long total_slabs;      /* length of all slab lists */
     771             :         unsigned long free_slabs;       /* length of free slab list only */
     772             :         unsigned long free_objects;
     773             :         unsigned int free_limit;
     774             :         unsigned int colour_next;       /* Per-node cache coloring */
     775             :         struct array_cache *shared;     /* shared per node */
     776             :         struct alien_cache **alien;     /* on other nodes */
     777             :         unsigned long next_reap;        /* updated without locking */
     778             :         int free_touched;               /* updated without locking */
     779             : #endif
     780             : 
     781             : #ifdef CONFIG_SLUB
     782             :         unsigned long nr_partial;
     783             :         struct list_head partial;
     784             : #ifdef CONFIG_SLUB_DEBUG
     785             :         atomic_long_t nr_slabs;
     786             :         atomic_long_t total_objects;
     787             :         struct list_head full;
     788             : #endif
     789             : #endif
     790             : 
     791             : };
     792             : 
     793             : static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
     794             : {
     795        1052 :         return s->node[node];
     796             : }
     797             : 
     798             : /*
     799             :  * Iterator over all nodes. The body will be executed for each node that has
     800             :  * a kmem_cache_node structure allocated (which is true for all online nodes)
     801             :  */
     802             : #define for_each_kmem_cache_node(__s, __node, __n) \
     803             :         for (__node = 0; __node < nr_node_ids; __node++) \
     804             :                  if ((__n = get_node(__s, __node)))
     805             : 
     806             : #endif
     807             : 
     808             : #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
     809             : void dump_unreclaimable_slab(void);
     810             : #else
     811             : static inline void dump_unreclaimable_slab(void)
     812             : {
     813             : }
     814             : #endif
     815             : 
     816             : void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
     817             : 
     818             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
     819             : int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
     820             :                         gfp_t gfp);
     821             : void cache_random_seq_destroy(struct kmem_cache *cachep);
     822             : #else
     823             : static inline int cache_random_seq_create(struct kmem_cache *cachep,
     824             :                                         unsigned int count, gfp_t gfp)
     825             : {
     826             :         return 0;
     827             : }
     828             : static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
     829             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
     830             : 
     831             : static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
     832             : {
     833       18327 :         if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
     834             :                                 &init_on_alloc)) {
     835           0 :                 if (c->ctor)
     836             :                         return false;
     837           0 :                 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
     838           0 :                         return flags & __GFP_ZERO;
     839             :                 return true;
     840             :         }
     841       18327 :         return flags & __GFP_ZERO;
     842             : }
     843             : 
     844             : static inline bool slab_want_init_on_free(struct kmem_cache *c)
     845             : {
     846       23638 :         if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
     847             :                                 &init_on_free))
     848           0 :                 return !(c->ctor ||
     849           0 :                          (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
     850             :         return false;
     851             : }
     852             : 
     853             : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
     854             : void debugfs_slab_release(struct kmem_cache *);
     855             : #else
     856             : static inline void debugfs_slab_release(struct kmem_cache *s) { }
     857             : #endif
     858             : 
     859             : #ifdef CONFIG_PRINTK
     860             : #define KS_ADDRS_COUNT 16
     861             : struct kmem_obj_info {
     862             :         void *kp_ptr;
     863             :         struct slab *kp_slab;
     864             :         void *kp_objp;
     865             :         unsigned long kp_data_offset;
     866             :         struct kmem_cache *kp_slab_cache;
     867             :         void *kp_ret;
     868             :         void *kp_stack[KS_ADDRS_COUNT];
     869             :         void *kp_free_stack[KS_ADDRS_COUNT];
     870             : };
     871             : void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
     872             : #endif
     873             : 
     874             : #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
     875             : void __check_heap_object(const void *ptr, unsigned long n,
     876             :                          const struct slab *slab, bool to_user);
     877             : #else
     878             : static inline
     879             : void __check_heap_object(const void *ptr, unsigned long n,
     880             :                          const struct slab *slab, bool to_user)
     881             : {
     882             : }
     883             : #endif
     884             : 
     885             : #endif /* MM_SLAB_H */

Generated by: LCOV version 1.14