LCOV - code coverage report
Current view: top level - include/linux - slub_def.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 2 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_SLUB_DEF_H
       3             : #define _LINUX_SLUB_DEF_H
       4             : 
       5             : /*
       6             :  * SLUB : A Slab allocator without object queues.
       7             :  *
       8             :  * (C) 2007 SGI, Christoph Lameter
       9             :  */
      10             : #include <linux/kfence.h>
      11             : #include <linux/kobject.h>
      12             : #include <linux/reciprocal_div.h>
      13             : #include <linux/local_lock.h>
      14             : 
      15             : enum stat_item {
      16             :         ALLOC_FASTPATH,         /* Allocation from cpu slab */
      17             :         ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
      18             :         FREE_FASTPATH,          /* Free to cpu slab */
      19             :         FREE_SLOWPATH,          /* Freeing not to cpu slab */
      20             :         FREE_FROZEN,            /* Freeing to frozen slab */
      21             :         FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
      22             :         FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
      23             :         ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
      24             :         ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
      25             :         ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
      26             :         ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
      27             :         FREE_SLAB,              /* Slab freed to the page allocator */
      28             :         CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
      29             :         DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
      30             :         DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
      31             :         DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
      32             :         DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
      33             :         DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
      34             :         DEACTIVATE_BYPASS,      /* Implicit deactivation */
      35             :         ORDER_FALLBACK,         /* Number of times fallback was necessary */
      36             :         CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
      37             :         CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
      38             :         CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
      39             :         CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
      40             :         CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
      41             :         CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
      42             :         NR_SLUB_STAT_ITEMS };
      43             : 
      44             : /*
      45             :  * When changing the layout, make sure freelist and tid are still compatible
      46             :  * with this_cpu_cmpxchg_double() alignment requirements.
      47             :  */
      48             : struct kmem_cache_cpu {
      49             :         void **freelist;        /* Pointer to next available object */
      50             :         unsigned long tid;      /* Globally unique transaction id */
      51             :         struct slab *slab;      /* The slab from which we are allocating */
      52             : #ifdef CONFIG_SLUB_CPU_PARTIAL
      53             :         struct slab *partial;   /* Partially allocated frozen slabs */
      54             : #endif
      55             :         local_lock_t lock;      /* Protects the fields above */
      56             : #ifdef CONFIG_SLUB_STATS
      57             :         unsigned stat[NR_SLUB_STAT_ITEMS];
      58             : #endif
      59             : };
      60             : 
      61             : #ifdef CONFIG_SLUB_CPU_PARTIAL
      62             : #define slub_percpu_partial(c)          ((c)->partial)
      63             : 
      64             : #define slub_set_percpu_partial(c, p)           \
      65             : ({                                              \
      66             :         slub_percpu_partial(c) = (p)->next;  \
      67             : })
      68             : 
      69             : #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
      70             : #else
      71             : #define slub_percpu_partial(c)                  NULL
      72             : 
      73             : #define slub_set_percpu_partial(c, p)
      74             : 
      75             : #define slub_percpu_partial_read_once(c)        NULL
      76             : #endif // CONFIG_SLUB_CPU_PARTIAL
      77             : 
      78             : /*
      79             :  * Word size structure that can be atomically updated or read and that
      80             :  * contains both the order and the number of objects that a slab of the
      81             :  * given order would contain.
      82             :  */
      83             : struct kmem_cache_order_objects {
      84             :         unsigned int x;
      85             : };
      86             : 
      87             : /*
      88             :  * Slab cache management.
      89             :  */
      90             : struct kmem_cache {
      91             :         struct kmem_cache_cpu __percpu *cpu_slab;
      92             :         /* Used for retrieving partial slabs, etc. */
      93             :         slab_flags_t flags;
      94             :         unsigned long min_partial;
      95             :         unsigned int size;      /* The size of an object including metadata */
      96             :         unsigned int object_size;/* The size of an object without metadata */
      97             :         struct reciprocal_value reciprocal_size;
      98             :         unsigned int offset;    /* Free pointer offset */
      99             : #ifdef CONFIG_SLUB_CPU_PARTIAL
     100             :         /* Number of per cpu partial objects to keep around */
     101             :         unsigned int cpu_partial;
     102             :         /* Number of per cpu partial slabs to keep around */
     103             :         unsigned int cpu_partial_slabs;
     104             : #endif
     105             :         struct kmem_cache_order_objects oo;
     106             : 
     107             :         /* Allocation and freeing of slabs */
     108             :         struct kmem_cache_order_objects max;
     109             :         struct kmem_cache_order_objects min;
     110             :         gfp_t allocflags;       /* gfp flags to use on each alloc */
     111             :         int refcount;           /* Refcount for slab cache destroy */
     112             :         void (*ctor)(void *);
     113             :         unsigned int inuse;             /* Offset to metadata */
     114             :         unsigned int align;             /* Alignment */
     115             :         unsigned int red_left_pad;      /* Left redzone padding size */
     116             :         const char *name;       /* Name (only for display!) */
     117             :         struct list_head list;  /* List of slab caches */
     118             : #ifdef CONFIG_SYSFS
     119             :         struct kobject kobj;    /* For sysfs */
     120             : #endif
     121             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
     122             :         unsigned long random;
     123             : #endif
     124             : 
     125             : #ifdef CONFIG_NUMA
     126             :         /*
     127             :          * Defragmentation by allocating from a remote node.
     128             :          */
     129             :         unsigned int remote_node_defrag_ratio;
     130             : #endif
     131             : 
     132             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
     133             :         unsigned int *random_seq;
     134             : #endif
     135             : 
     136             : #ifdef CONFIG_KASAN
     137             :         struct kasan_cache kasan_info;
     138             : #endif
     139             : 
     140             :         unsigned int useroffset;        /* Usercopy region offset */
     141             :         unsigned int usersize;          /* Usercopy region size */
     142             : 
     143             :         struct kmem_cache_node *node[MAX_NUMNODES];
     144             : };
     145             : 
     146             : #ifdef CONFIG_SYSFS
     147             : #define SLAB_SUPPORTS_SYSFS
     148             : void sysfs_slab_unlink(struct kmem_cache *);
     149             : void sysfs_slab_release(struct kmem_cache *);
     150             : #else
     151             : static inline void sysfs_slab_unlink(struct kmem_cache *s)
     152             : {
     153             : }
     154             : static inline void sysfs_slab_release(struct kmem_cache *s)
     155             : {
     156             : }
     157             : #endif
     158             : 
     159             : void *fixup_red_left(struct kmem_cache *s, void *p);
     160             : 
     161             : static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
     162             :                                 void *x) {
     163             :         void *object = x - (x - slab_address(slab)) % cache->size;
     164             :         void *last_object = slab_address(slab) +
     165             :                 (slab->objects - 1) * cache->size;
     166             :         void *result = (unlikely(object > last_object)) ? last_object : object;
     167             : 
     168             :         result = fixup_red_left(cache, result);
     169             :         return result;
     170             : }
     171             : 
     172             : /* Determine object index from a given position */
     173             : static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
     174             :                                           void *addr, void *obj)
     175             : {
     176           0 :         return reciprocal_divide(kasan_reset_tag(obj) - addr,
     177             :                                  cache->reciprocal_size);
     178             : }
     179             : 
     180             : static inline unsigned int obj_to_index(const struct kmem_cache *cache,
     181             :                                         const struct slab *slab, void *obj)
     182             : {
     183             :         if (is_kfence_address(obj))
     184             :                 return 0;
     185           0 :         return __obj_to_index(cache, slab_address(slab), obj);
     186             : }
     187             : 
     188             : static inline int objs_per_slab(const struct kmem_cache *cache,
     189             :                                      const struct slab *slab)
     190             : {
     191             :         return slab->objects;
     192             : }
     193             : #endif /* _LINUX_SLUB_DEF_H */

Generated by: LCOV version 1.14