LCOV - code coverage report
Current view: top level - include/linux - memcontrol.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 30 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 4 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* memcontrol.h - Memory Controller
       3             :  *
       4             :  * Copyright IBM Corporation, 2007
       5             :  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
       6             :  *
       7             :  * Copyright 2007 OpenVZ SWsoft Inc
       8             :  * Author: Pavel Emelianov <xemul@openvz.org>
       9             :  */
      10             : 
      11             : #ifndef _LINUX_MEMCONTROL_H
      12             : #define _LINUX_MEMCONTROL_H
      13             : #include <linux/cgroup.h>
      14             : #include <linux/vm_event_item.h>
      15             : #include <linux/hardirq.h>
      16             : #include <linux/jump_label.h>
      17             : #include <linux/page_counter.h>
      18             : #include <linux/vmpressure.h>
      19             : #include <linux/eventfd.h>
      20             : #include <linux/mm.h>
      21             : #include <linux/vmstat.h>
      22             : #include <linux/writeback.h>
      23             : #include <linux/page-flags.h>
      24             : 
      25             : struct mem_cgroup;
      26             : struct obj_cgroup;
      27             : struct page;
      28             : struct mm_struct;
      29             : struct kmem_cache;
      30             : 
      31             : /* Cgroup-specific page state, on top of universal node page state */
      32             : enum memcg_stat_item {
      33             :         MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
      34             :         MEMCG_SOCK,
      35             :         MEMCG_PERCPU_B,
      36             :         MEMCG_VMALLOC,
      37             :         MEMCG_KMEM,
      38             :         MEMCG_NR_STAT,
      39             : };
      40             : 
      41             : enum memcg_memory_event {
      42             :         MEMCG_LOW,
      43             :         MEMCG_HIGH,
      44             :         MEMCG_MAX,
      45             :         MEMCG_OOM,
      46             :         MEMCG_OOM_KILL,
      47             :         MEMCG_OOM_GROUP_KILL,
      48             :         MEMCG_SWAP_HIGH,
      49             :         MEMCG_SWAP_MAX,
      50             :         MEMCG_SWAP_FAIL,
      51             :         MEMCG_NR_MEMORY_EVENTS,
      52             : };
      53             : 
      54             : struct mem_cgroup_reclaim_cookie {
      55             :         pg_data_t *pgdat;
      56             :         unsigned int generation;
      57             : };
      58             : 
      59             : #ifdef CONFIG_MEMCG
      60             : 
      61             : #define MEM_CGROUP_ID_SHIFT     16
      62             : #define MEM_CGROUP_ID_MAX       USHRT_MAX
      63             : 
      64             : struct mem_cgroup_id {
      65             :         int id;
      66             :         refcount_t ref;
      67             : };
      68             : 
      69             : /*
      70             :  * Per memcg event counter is incremented at every pagein/pageout. With THP,
      71             :  * it will be incremented by the number of pages. This counter is used
      72             :  * to trigger some periodic events. This is straightforward and better
      73             :  * than using jiffies etc. to handle periodic memcg event.
      74             :  */
      75             : enum mem_cgroup_events_target {
      76             :         MEM_CGROUP_TARGET_THRESH,
      77             :         MEM_CGROUP_TARGET_SOFTLIMIT,
      78             :         MEM_CGROUP_NTARGETS,
      79             : };
      80             : 
      81             : struct memcg_vmstats_percpu {
      82             :         /* Local (CPU and cgroup) page state & events */
      83             :         long                    state[MEMCG_NR_STAT];
      84             :         unsigned long           events[NR_VM_EVENT_ITEMS];
      85             : 
      86             :         /* Delta calculation for lockless upward propagation */
      87             :         long                    state_prev[MEMCG_NR_STAT];
      88             :         unsigned long           events_prev[NR_VM_EVENT_ITEMS];
      89             : 
      90             :         /* Cgroup1: threshold notifications & softlimit tree updates */
      91             :         unsigned long           nr_page_events;
      92             :         unsigned long           targets[MEM_CGROUP_NTARGETS];
      93             : };
      94             : 
      95             : struct memcg_vmstats {
      96             :         /* Aggregated (CPU and subtree) page state & events */
      97             :         long                    state[MEMCG_NR_STAT];
      98             :         unsigned long           events[NR_VM_EVENT_ITEMS];
      99             : 
     100             :         /* Pending child counts during tree propagation */
     101             :         long                    state_pending[MEMCG_NR_STAT];
     102             :         unsigned long           events_pending[NR_VM_EVENT_ITEMS];
     103             : };
     104             : 
     105             : struct mem_cgroup_reclaim_iter {
     106             :         struct mem_cgroup *position;
     107             :         /* scan generation, increased every round-trip */
     108             :         unsigned int generation;
     109             : };
     110             : 
     111             : /*
     112             :  * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
     113             :  * shrinkers, which have elements charged to this memcg.
     114             :  */
     115             : struct shrinker_info {
     116             :         struct rcu_head rcu;
     117             :         atomic_long_t *nr_deferred;
     118             :         unsigned long *map;
     119             : };
     120             : 
     121             : struct lruvec_stats_percpu {
     122             :         /* Local (CPU and cgroup) state */
     123             :         long state[NR_VM_NODE_STAT_ITEMS];
     124             : 
     125             :         /* Delta calculation for lockless upward propagation */
     126             :         long state_prev[NR_VM_NODE_STAT_ITEMS];
     127             : };
     128             : 
     129             : struct lruvec_stats {
     130             :         /* Aggregated (CPU and subtree) state */
     131             :         long state[NR_VM_NODE_STAT_ITEMS];
     132             : 
     133             :         /* Pending child counts during tree propagation */
     134             :         long state_pending[NR_VM_NODE_STAT_ITEMS];
     135             : };
     136             : 
     137             : /*
     138             :  * per-node information in memory controller.
     139             :  */
     140             : struct mem_cgroup_per_node {
     141             :         struct lruvec           lruvec;
     142             : 
     143             :         struct lruvec_stats_percpu __percpu     *lruvec_stats_percpu;
     144             :         struct lruvec_stats                     lruvec_stats;
     145             : 
     146             :         unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
     147             : 
     148             :         struct mem_cgroup_reclaim_iter  iter;
     149             : 
     150             :         struct shrinker_info __rcu      *shrinker_info;
     151             : 
     152             :         struct rb_node          tree_node;      /* RB tree node */
     153             :         unsigned long           usage_in_excess;/* Set to the value by which */
     154             :                                                 /* the soft limit is exceeded*/
     155             :         bool                    on_tree;
     156             :         struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
     157             :                                                 /* use container_of        */
     158             : };
     159             : 
     160             : struct mem_cgroup_threshold {
     161             :         struct eventfd_ctx *eventfd;
     162             :         unsigned long threshold;
     163             : };
     164             : 
     165             : /* For threshold */
     166             : struct mem_cgroup_threshold_ary {
     167             :         /* An array index points to threshold just below or equal to usage. */
     168             :         int current_threshold;
     169             :         /* Size of entries[] */
     170             :         unsigned int size;
     171             :         /* Array of thresholds */
     172             :         struct mem_cgroup_threshold entries[];
     173             : };
     174             : 
     175             : struct mem_cgroup_thresholds {
     176             :         /* Primary thresholds array */
     177             :         struct mem_cgroup_threshold_ary *primary;
     178             :         /*
     179             :          * Spare threshold array.
     180             :          * This is needed to make mem_cgroup_unregister_event() "never fail".
     181             :          * It must be able to store at least primary->size - 1 entries.
     182             :          */
     183             :         struct mem_cgroup_threshold_ary *spare;
     184             : };
     185             : 
     186             : #if defined(CONFIG_SMP)
     187             : struct memcg_padding {
     188             :         char x[0];
     189             : } ____cacheline_internodealigned_in_smp;
     190             : #define MEMCG_PADDING(name)      struct memcg_padding name
     191             : #else
     192             : #define MEMCG_PADDING(name)
     193             : #endif
     194             : 
     195             : /*
     196             :  * Remember four most recent foreign writebacks with dirty pages in this
     197             :  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
     198             :  * one in a given round, we're likely to catch it later if it keeps
     199             :  * foreign-dirtying, so a fairly low count should be enough.
     200             :  *
     201             :  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
     202             :  */
     203             : #define MEMCG_CGWB_FRN_CNT      4
     204             : 
     205             : struct memcg_cgwb_frn {
     206             :         u64 bdi_id;                     /* bdi->id of the foreign inode */
     207             :         int memcg_id;                   /* memcg->css.id of foreign inode */
     208             :         u64 at;                         /* jiffies_64 at the time of dirtying */
     209             :         struct wb_completion done;      /* tracks in-flight foreign writebacks */
     210             : };
     211             : 
     212             : /*
     213             :  * Bucket for arbitrarily byte-sized objects charged to a memory
     214             :  * cgroup. The bucket can be reparented in one piece when the cgroup
     215             :  * is destroyed, without having to round up the individual references
     216             :  * of all live memory objects in the wild.
     217             :  */
     218             : struct obj_cgroup {
     219             :         struct percpu_ref refcnt;
     220             :         struct mem_cgroup *memcg;
     221             :         atomic_t nr_charged_bytes;
     222             :         union {
     223             :                 struct list_head list; /* protected by objcg_lock */
     224             :                 struct rcu_head rcu;
     225             :         };
     226             : };
     227             : 
     228             : /*
     229             :  * The memory controller data structure. The memory controller controls both
     230             :  * page cache and RSS per cgroup. We would eventually like to provide
     231             :  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
     232             :  * to help the administrator determine what knobs to tune.
     233             :  */
     234             : struct mem_cgroup {
     235             :         struct cgroup_subsys_state css;
     236             : 
     237             :         /* Private memcg ID. Used to ID objects that outlive the cgroup */
     238             :         struct mem_cgroup_id id;
     239             : 
     240             :         /* Accounted resources */
     241             :         struct page_counter memory;             /* Both v1 & v2 */
     242             : 
     243             :         union {
     244             :                 struct page_counter swap;       /* v2 only */
     245             :                 struct page_counter memsw;      /* v1 only */
     246             :         };
     247             : 
     248             :         /* Legacy consumer-oriented counters */
     249             :         struct page_counter kmem;               /* v1 only */
     250             :         struct page_counter tcpmem;             /* v1 only */
     251             : 
     252             :         /* Range enforcement for interrupt charges */
     253             :         struct work_struct high_work;
     254             : 
     255             :         unsigned long soft_limit;
     256             : 
     257             :         /* vmpressure notifications */
     258             :         struct vmpressure vmpressure;
     259             : 
     260             :         /*
     261             :          * Should the OOM killer kill all belonging tasks, had it kill one?
     262             :          */
     263             :         bool oom_group;
     264             : 
     265             :         /* protected by memcg_oom_lock */
     266             :         bool            oom_lock;
     267             :         int             under_oom;
     268             : 
     269             :         int     swappiness;
     270             :         /* OOM-Killer disable */
     271             :         int             oom_kill_disable;
     272             : 
     273             :         /* memory.events and memory.events.local */
     274             :         struct cgroup_file events_file;
     275             :         struct cgroup_file events_local_file;
     276             : 
     277             :         /* handle for "memory.swap.events" */
     278             :         struct cgroup_file swap_events_file;
     279             : 
     280             :         /* protect arrays of thresholds */
     281             :         struct mutex thresholds_lock;
     282             : 
     283             :         /* thresholds for memory usage. RCU-protected */
     284             :         struct mem_cgroup_thresholds thresholds;
     285             : 
     286             :         /* thresholds for mem+swap usage. RCU-protected */
     287             :         struct mem_cgroup_thresholds memsw_thresholds;
     288             : 
     289             :         /* For oom notifier event fd */
     290             :         struct list_head oom_notify;
     291             : 
     292             :         /*
     293             :          * Should we move charges of a task when a task is moved into this
     294             :          * mem_cgroup ? And what type of charges should we move ?
     295             :          */
     296             :         unsigned long move_charge_at_immigrate;
     297             :         /* taken only while moving_account > 0 */
     298             :         spinlock_t              move_lock;
     299             :         unsigned long           move_lock_flags;
     300             : 
     301             :         MEMCG_PADDING(_pad1_);
     302             : 
     303             :         /* memory.stat */
     304             :         struct memcg_vmstats    vmstats;
     305             : 
     306             :         /* memory.events */
     307             :         atomic_long_t           memory_events[MEMCG_NR_MEMORY_EVENTS];
     308             :         atomic_long_t           memory_events_local[MEMCG_NR_MEMORY_EVENTS];
     309             : 
     310             :         unsigned long           socket_pressure;
     311             : 
     312             :         /* Legacy tcp memory accounting */
     313             :         bool                    tcpmem_active;
     314             :         int                     tcpmem_pressure;
     315             : 
     316             : #ifdef CONFIG_MEMCG_KMEM
     317             :         int kmemcg_id;
     318             :         struct obj_cgroup __rcu *objcg;
     319             :         /* list of inherited objcgs, protected by objcg_lock */
     320             :         struct list_head objcg_list;
     321             : #endif
     322             : 
     323             :         MEMCG_PADDING(_pad2_);
     324             : 
     325             :         /*
     326             :          * set > 0 if pages under this cgroup are moving to other cgroup.
     327             :          */
     328             :         atomic_t                moving_account;
     329             :         struct task_struct      *move_lock_task;
     330             : 
     331             :         struct memcg_vmstats_percpu __percpu *vmstats_percpu;
     332             : 
     333             : #ifdef CONFIG_CGROUP_WRITEBACK
     334             :         struct list_head cgwb_list;
     335             :         struct wb_domain cgwb_domain;
     336             :         struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
     337             : #endif
     338             : 
     339             :         /* List of events which userspace want to receive */
     340             :         struct list_head event_list;
     341             :         spinlock_t event_list_lock;
     342             : 
     343             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     344             :         struct deferred_split deferred_split_queue;
     345             : #endif
     346             : 
     347             :         struct mem_cgroup_per_node *nodeinfo[];
     348             : };
     349             : 
     350             : /*
     351             :  * size of first charge trial. "32" comes from vmscan.c's magic value.
     352             :  * TODO: maybe necessary to use big numbers in big irons.
     353             :  */
     354             : #define MEMCG_CHARGE_BATCH 32U
     355             : 
     356             : extern struct mem_cgroup *root_mem_cgroup;
     357             : 
     358             : enum page_memcg_data_flags {
     359             :         /* page->memcg_data is a pointer to an objcgs vector */
     360             :         MEMCG_DATA_OBJCGS = (1UL << 0),
     361             :         /* page has been accounted as a non-slab kernel page */
     362             :         MEMCG_DATA_KMEM = (1UL << 1),
     363             :         /* the next bit after the last actual flag */
     364             :         __NR_MEMCG_DATA_FLAGS  = (1UL << 2),
     365             : };
     366             : 
     367             : #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
     368             : 
     369             : static inline bool folio_memcg_kmem(struct folio *folio);
     370             : 
     371             : /*
     372             :  * After the initialization objcg->memcg is always pointing at
     373             :  * a valid memcg, but can be atomically swapped to the parent memcg.
     374             :  *
     375             :  * The caller must ensure that the returned memcg won't be released:
     376             :  * e.g. acquire the rcu_read_lock or css_set_lock.
     377             :  */
     378             : static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
     379             : {
     380             :         return READ_ONCE(objcg->memcg);
     381             : }
     382             : 
     383             : /*
     384             :  * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
     385             :  * @folio: Pointer to the folio.
     386             :  *
     387             :  * Returns a pointer to the memory cgroup associated with the folio,
     388             :  * or NULL. This function assumes that the folio is known to have a
     389             :  * proper memory cgroup pointer. It's not safe to call this function
     390             :  * against some type of folios, e.g. slab folios or ex-slab folios or
     391             :  * kmem folios.
     392             :  */
     393             : static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
     394             : {
     395             :         unsigned long memcg_data = folio->memcg_data;
     396             : 
     397             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     398             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
     399             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
     400             : 
     401             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     402             : }
     403             : 
     404             : /*
     405             :  * __folio_objcg - get the object cgroup associated with a kmem folio.
     406             :  * @folio: Pointer to the folio.
     407             :  *
     408             :  * Returns a pointer to the object cgroup associated with the folio,
     409             :  * or NULL. This function assumes that the folio is known to have a
     410             :  * proper object cgroup pointer. It's not safe to call this function
     411             :  * against some type of folios, e.g. slab folios or ex-slab folios or
     412             :  * LRU folios.
     413             :  */
     414             : static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
     415             : {
     416             :         unsigned long memcg_data = folio->memcg_data;
     417             : 
     418             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     419             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
     420             :         VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
     421             : 
     422             :         return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     423             : }
     424             : 
     425             : /*
     426             :  * folio_memcg - Get the memory cgroup associated with a folio.
     427             :  * @folio: Pointer to the folio.
     428             :  *
     429             :  * Returns a pointer to the memory cgroup associated with the folio,
     430             :  * or NULL. This function assumes that the folio is known to have a
     431             :  * proper memory cgroup pointer. It's not safe to call this function
     432             :  * against some type of folios, e.g. slab folios or ex-slab folios.
     433             :  *
     434             :  * For a non-kmem folio any of the following ensures folio and memcg binding
     435             :  * stability:
     436             :  *
     437             :  * - the folio lock
     438             :  * - LRU isolation
     439             :  * - lock_page_memcg()
     440             :  * - exclusive reference
     441             :  *
     442             :  * For a kmem folio a caller should hold an rcu read lock to protect memcg
     443             :  * associated with a kmem folio from being released.
     444             :  */
     445             : static inline struct mem_cgroup *folio_memcg(struct folio *folio)
     446             : {
     447             :         if (folio_memcg_kmem(folio))
     448             :                 return obj_cgroup_memcg(__folio_objcg(folio));
     449             :         return __folio_memcg(folio);
     450             : }
     451             : 
     452             : static inline struct mem_cgroup *page_memcg(struct page *page)
     453             : {
     454             :         return folio_memcg(page_folio(page));
     455             : }
     456             : 
     457             : /**
     458             :  * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
     459             :  * @folio: Pointer to the folio.
     460             :  *
     461             :  * This function assumes that the folio is known to have a
     462             :  * proper memory cgroup pointer. It's not safe to call this function
     463             :  * against some type of folios, e.g. slab folios or ex-slab folios.
     464             :  *
     465             :  * Return: A pointer to the memory cgroup associated with the folio,
     466             :  * or NULL.
     467             :  */
     468             : static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
     469             : {
     470             :         unsigned long memcg_data = READ_ONCE(folio->memcg_data);
     471             : 
     472             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     473             :         WARN_ON_ONCE(!rcu_read_lock_held());
     474             : 
     475             :         if (memcg_data & MEMCG_DATA_KMEM) {
     476             :                 struct obj_cgroup *objcg;
     477             : 
     478             :                 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     479             :                 return obj_cgroup_memcg(objcg);
     480             :         }
     481             : 
     482             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     483             : }
     484             : 
     485             : /*
     486             :  * page_memcg_check - get the memory cgroup associated with a page
     487             :  * @page: a pointer to the page struct
     488             :  *
     489             :  * Returns a pointer to the memory cgroup associated with the page,
     490             :  * or NULL. This function unlike page_memcg() can take any page
     491             :  * as an argument. It has to be used in cases when it's not known if a page
     492             :  * has an associated memory cgroup pointer or an object cgroups vector or
     493             :  * an object cgroup.
     494             :  *
     495             :  * For a non-kmem page any of the following ensures page and memcg binding
     496             :  * stability:
     497             :  *
     498             :  * - the page lock
     499             :  * - LRU isolation
     500             :  * - lock_page_memcg()
     501             :  * - exclusive reference
     502             :  *
     503             :  * For a kmem page a caller should hold an rcu read lock to protect memcg
     504             :  * associated with a kmem page from being released.
     505             :  */
     506             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
     507             : {
     508             :         /*
     509             :          * Because page->memcg_data might be changed asynchronously
     510             :          * for slab pages, READ_ONCE() should be used here.
     511             :          */
     512             :         unsigned long memcg_data = READ_ONCE(page->memcg_data);
     513             : 
     514             :         if (memcg_data & MEMCG_DATA_OBJCGS)
     515             :                 return NULL;
     516             : 
     517             :         if (memcg_data & MEMCG_DATA_KMEM) {
     518             :                 struct obj_cgroup *objcg;
     519             : 
     520             :                 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     521             :                 return obj_cgroup_memcg(objcg);
     522             :         }
     523             : 
     524             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     525             : }
     526             : 
     527             : static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
     528             : {
     529             :         struct mem_cgroup *memcg;
     530             : 
     531             :         rcu_read_lock();
     532             : retry:
     533             :         memcg = obj_cgroup_memcg(objcg);
     534             :         if (unlikely(!css_tryget(&memcg->css)))
     535             :                 goto retry;
     536             :         rcu_read_unlock();
     537             : 
     538             :         return memcg;
     539             : }
     540             : 
     541             : #ifdef CONFIG_MEMCG_KMEM
     542             : /*
     543             :  * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
     544             :  * @folio: Pointer to the folio.
     545             :  *
     546             :  * Checks if the folio has MemcgKmem flag set. The caller must ensure
     547             :  * that the folio has an associated memory cgroup. It's not safe to call
     548             :  * this function against some types of folios, e.g. slab folios.
     549             :  */
     550             : static inline bool folio_memcg_kmem(struct folio *folio)
     551             : {
     552             :         VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
     553             :         VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
     554             :         return folio->memcg_data & MEMCG_DATA_KMEM;
     555             : }
     556             : 
     557             : 
     558             : #else
     559             : static inline bool folio_memcg_kmem(struct folio *folio)
     560             : {
     561             :         return false;
     562             : }
     563             : 
     564             : #endif
     565             : 
     566             : static inline bool PageMemcgKmem(struct page *page)
     567             : {
     568             :         return folio_memcg_kmem(page_folio(page));
     569             : }
     570             : 
     571             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
     572             : {
     573             :         return (memcg == root_mem_cgroup);
     574             : }
     575             : 
     576             : static inline bool mem_cgroup_disabled(void)
     577             : {
     578             :         return !cgroup_subsys_enabled(memory_cgrp_subsys);
     579             : }
     580             : 
     581             : static inline void mem_cgroup_protection(struct mem_cgroup *root,
     582             :                                          struct mem_cgroup *memcg,
     583             :                                          unsigned long *min,
     584             :                                          unsigned long *low)
     585             : {
     586             :         *min = *low = 0;
     587             : 
     588             :         if (mem_cgroup_disabled())
     589             :                 return;
     590             : 
     591             :         /*
     592             :          * There is no reclaim protection applied to a targeted reclaim.
     593             :          * We are special casing this specific case here because
     594             :          * mem_cgroup_protected calculation is not robust enough to keep
     595             :          * the protection invariant for calculated effective values for
     596             :          * parallel reclaimers with different reclaim target. This is
     597             :          * especially a problem for tail memcgs (as they have pages on LRU)
     598             :          * which would want to have effective values 0 for targeted reclaim
     599             :          * but a different value for external reclaim.
     600             :          *
     601             :          * Example
     602             :          * Let's have global and A's reclaim in parallel:
     603             :          *  |
     604             :          *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
     605             :          *  |\
     606             :          *  | C (low = 1G, usage = 2.5G)
     607             :          *  B (low = 1G, usage = 0.5G)
     608             :          *
     609             :          * For the global reclaim
     610             :          * A.elow = A.low
     611             :          * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
     612             :          * C.elow = min(C.usage, C.low)
     613             :          *
     614             :          * With the effective values resetting we have A reclaim
     615             :          * A.elow = 0
     616             :          * B.elow = B.low
     617             :          * C.elow = C.low
     618             :          *
     619             :          * If the global reclaim races with A's reclaim then
     620             :          * B.elow = C.elow = 0 because children_low_usage > A.elow)
     621             :          * is possible and reclaiming B would be violating the protection.
     622             :          *
     623             :          */
     624             :         if (root == memcg)
     625             :                 return;
     626             : 
     627             :         *min = READ_ONCE(memcg->memory.emin);
     628             :         *low = READ_ONCE(memcg->memory.elow);
     629             : }
     630             : 
     631             : void mem_cgroup_calculate_protection(struct mem_cgroup *root,
     632             :                                      struct mem_cgroup *memcg);
     633             : 
     634             : static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
     635             : {
     636             :         /*
     637             :          * The root memcg doesn't account charges, and doesn't support
     638             :          * protection.
     639             :          */
     640             :         return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
     641             : 
     642             : }
     643             : 
     644             : static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
     645             : {
     646             :         if (!mem_cgroup_supports_protection(memcg))
     647             :                 return false;
     648             : 
     649             :         return READ_ONCE(memcg->memory.elow) >=
     650             :                 page_counter_read(&memcg->memory);
     651             : }
     652             : 
     653             : static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
     654             : {
     655             :         if (!mem_cgroup_supports_protection(memcg))
     656             :                 return false;
     657             : 
     658             :         return READ_ONCE(memcg->memory.emin) >=
     659             :                 page_counter_read(&memcg->memory);
     660             : }
     661             : 
     662             : int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
     663             : 
     664             : /**
     665             :  * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
     666             :  * @folio: Folio to charge.
     667             :  * @mm: mm context of the allocating task.
     668             :  * @gfp: Reclaim mode.
     669             :  *
     670             :  * Try to charge @folio to the memcg that @mm belongs to, reclaiming
     671             :  * pages according to @gfp if necessary.  If @mm is NULL, try to
     672             :  * charge to the active memcg.
     673             :  *
     674             :  * Do not use this for folios allocated for swapin.
     675             :  *
     676             :  * Return: 0 on success. Otherwise, an error code is returned.
     677             :  */
     678             : static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
     679             :                                     gfp_t gfp)
     680             : {
     681             :         if (mem_cgroup_disabled())
     682             :                 return 0;
     683             :         return __mem_cgroup_charge(folio, mm, gfp);
     684             : }
     685             : 
     686             : int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
     687             :                                   gfp_t gfp, swp_entry_t entry);
     688             : void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
     689             : 
     690             : void __mem_cgroup_uncharge(struct folio *folio);
     691             : 
     692             : /**
     693             :  * mem_cgroup_uncharge - Uncharge a folio.
     694             :  * @folio: Folio to uncharge.
     695             :  *
     696             :  * Uncharge a folio previously charged with mem_cgroup_charge().
     697             :  */
     698             : static inline void mem_cgroup_uncharge(struct folio *folio)
     699             : {
     700             :         if (mem_cgroup_disabled())
     701             :                 return;
     702             :         __mem_cgroup_uncharge(folio);
     703             : }
     704             : 
     705             : void __mem_cgroup_uncharge_list(struct list_head *page_list);
     706             : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
     707             : {
     708             :         if (mem_cgroup_disabled())
     709             :                 return;
     710             :         __mem_cgroup_uncharge_list(page_list);
     711             : }
     712             : 
     713             : void mem_cgroup_migrate(struct folio *old, struct folio *new);
     714             : 
     715             : /**
     716             :  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
     717             :  * @memcg: memcg of the wanted lruvec
     718             :  * @pgdat: pglist_data
     719             :  *
     720             :  * Returns the lru list vector holding pages for a given @memcg &
     721             :  * @pgdat combination. This can be the node lruvec, if the memory
     722             :  * controller is disabled.
     723             :  */
     724             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
     725             :                                                struct pglist_data *pgdat)
     726             : {
     727             :         struct mem_cgroup_per_node *mz;
     728             :         struct lruvec *lruvec;
     729             : 
     730             :         if (mem_cgroup_disabled()) {
     731             :                 lruvec = &pgdat->__lruvec;
     732             :                 goto out;
     733             :         }
     734             : 
     735             :         if (!memcg)
     736             :                 memcg = root_mem_cgroup;
     737             : 
     738             :         mz = memcg->nodeinfo[pgdat->node_id];
     739             :         lruvec = &mz->lruvec;
     740             : out:
     741             :         /*
     742             :          * Since a node can be onlined after the mem_cgroup was created,
     743             :          * we have to be prepared to initialize lruvec->pgdat here;
     744             :          * and if offlined then reonlined, we need to reinitialize it.
     745             :          */
     746             :         if (unlikely(lruvec->pgdat != pgdat))
     747             :                 lruvec->pgdat = pgdat;
     748             :         return lruvec;
     749             : }
     750             : 
     751             : /**
     752             :  * folio_lruvec - return lruvec for isolating/putting an LRU folio
     753             :  * @folio: Pointer to the folio.
     754             :  *
     755             :  * This function relies on folio->mem_cgroup being stable.
     756             :  */
     757             : static inline struct lruvec *folio_lruvec(struct folio *folio)
     758             : {
     759             :         struct mem_cgroup *memcg = folio_memcg(folio);
     760             : 
     761             :         VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
     762             :         return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
     763             : }
     764             : 
     765             : struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
     766             : 
     767             : struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
     768             : 
     769             : struct lruvec *folio_lruvec_lock(struct folio *folio);
     770             : struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
     771             : struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
     772             :                                                 unsigned long *flags);
     773             : 
     774             : #ifdef CONFIG_DEBUG_VM
     775             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
     776             : #else
     777             : static inline
     778             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
     779             : {
     780             : }
     781             : #endif
     782             : 
     783             : static inline
     784             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
     785             :         return css ? container_of(css, struct mem_cgroup, css) : NULL;
     786             : }
     787             : 
     788             : static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
     789             : {
     790             :         return percpu_ref_tryget(&objcg->refcnt);
     791             : }
     792             : 
     793             : static inline void obj_cgroup_get(struct obj_cgroup *objcg)
     794             : {
     795             :         percpu_ref_get(&objcg->refcnt);
     796             : }
     797             : 
     798             : static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
     799             :                                        unsigned long nr)
     800             : {
     801             :         percpu_ref_get_many(&objcg->refcnt, nr);
     802             : }
     803             : 
     804             : static inline void obj_cgroup_put(struct obj_cgroup *objcg)
     805             : {
     806             :         percpu_ref_put(&objcg->refcnt);
     807             : }
     808             : 
     809             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
     810             : {
     811             :         if (memcg)
     812             :                 css_put(&memcg->css);
     813             : }
     814             : 
     815             : #define mem_cgroup_from_counter(counter, member)        \
     816             :         container_of(counter, struct mem_cgroup, member)
     817             : 
     818             : struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
     819             :                                    struct mem_cgroup *,
     820             :                                    struct mem_cgroup_reclaim_cookie *);
     821             : void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
     822             : int mem_cgroup_scan_tasks(struct mem_cgroup *,
     823             :                           int (*)(struct task_struct *, void *), void *);
     824             : 
     825             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
     826             : {
     827             :         if (mem_cgroup_disabled())
     828             :                 return 0;
     829             : 
     830             :         return memcg->id.id;
     831             : }
     832             : struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
     833             : 
     834             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
     835             : {
     836             :         return mem_cgroup_from_css(seq_css(m));
     837             : }
     838             : 
     839             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
     840             : {
     841             :         struct mem_cgroup_per_node *mz;
     842             : 
     843             :         if (mem_cgroup_disabled())
     844             :                 return NULL;
     845             : 
     846             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     847             :         return mz->memcg;
     848             : }
     849             : 
     850             : /**
     851             :  * parent_mem_cgroup - find the accounting parent of a memcg
     852             :  * @memcg: memcg whose parent to find
     853             :  *
     854             :  * Returns the parent memcg, or NULL if this is the root or the memory
     855             :  * controller is in legacy no-hierarchy mode.
     856             :  */
     857             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
     858             : {
     859             :         return mem_cgroup_from_css(memcg->css.parent);
     860             : }
     861             : 
     862             : static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
     863             :                               struct mem_cgroup *root)
     864             : {
     865             :         if (root == memcg)
     866             :                 return true;
     867             :         return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
     868             : }
     869             : 
     870             : static inline bool mm_match_cgroup(struct mm_struct *mm,
     871             :                                    struct mem_cgroup *memcg)
     872             : {
     873             :         struct mem_cgroup *task_memcg;
     874             :         bool match = false;
     875             : 
     876             :         rcu_read_lock();
     877             :         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
     878             :         if (task_memcg)
     879             :                 match = mem_cgroup_is_descendant(task_memcg, memcg);
     880             :         rcu_read_unlock();
     881             :         return match;
     882             : }
     883             : 
     884             : struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
     885             : ino_t page_cgroup_ino(struct page *page);
     886             : 
     887             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
     888             : {
     889             :         if (mem_cgroup_disabled())
     890             :                 return true;
     891             :         return !!(memcg->css.flags & CSS_ONLINE);
     892             : }
     893             : 
     894             : void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
     895             :                 int zid, int nr_pages);
     896             : 
     897             : static inline
     898             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
     899             :                 enum lru_list lru, int zone_idx)
     900             : {
     901             :         struct mem_cgroup_per_node *mz;
     902             : 
     903             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     904             :         return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
     905             : }
     906             : 
     907             : void mem_cgroup_handle_over_high(void);
     908             : 
     909             : unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
     910             : 
     911             : unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
     912             : 
     913             : void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
     914             :                                 struct task_struct *p);
     915             : 
     916             : void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
     917             : 
     918             : static inline void mem_cgroup_enter_user_fault(void)
     919             : {
     920             :         WARN_ON(current->in_user_fault);
     921             :         current->in_user_fault = 1;
     922             : }
     923             : 
     924             : static inline void mem_cgroup_exit_user_fault(void)
     925             : {
     926             :         WARN_ON(!current->in_user_fault);
     927             :         current->in_user_fault = 0;
     928             : }
     929             : 
     930             : static inline bool task_in_memcg_oom(struct task_struct *p)
     931             : {
     932             :         return p->memcg_in_oom;
     933             : }
     934             : 
     935             : bool mem_cgroup_oom_synchronize(bool wait);
     936             : struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
     937             :                                             struct mem_cgroup *oom_domain);
     938             : void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
     939             : 
     940             : #ifdef CONFIG_MEMCG_SWAP
     941             : extern bool cgroup_memory_noswap;
     942             : #endif
     943             : 
     944             : void folio_memcg_lock(struct folio *folio);
     945             : void folio_memcg_unlock(struct folio *folio);
     946             : void lock_page_memcg(struct page *page);
     947             : void unlock_page_memcg(struct page *page);
     948             : 
     949             : void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
     950             : 
     951             : /* idx can be of type enum memcg_stat_item or node_stat_item */
     952             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
     953             :                                    int idx, int val)
     954             : {
     955             :         unsigned long flags;
     956             : 
     957             :         local_irq_save(flags);
     958             :         __mod_memcg_state(memcg, idx, val);
     959             :         local_irq_restore(flags);
     960             : }
     961             : 
     962             : static inline void mod_memcg_page_state(struct page *page,
     963             :                                         int idx, int val)
     964             : {
     965             :         struct mem_cgroup *memcg;
     966             : 
     967             :         if (mem_cgroup_disabled())
     968             :                 return;
     969             : 
     970             :         rcu_read_lock();
     971             :         memcg = page_memcg(page);
     972             :         if (memcg)
     973             :                 mod_memcg_state(memcg, idx, val);
     974             :         rcu_read_unlock();
     975             : }
     976             : 
     977             : static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
     978             : {
     979             :         return READ_ONCE(memcg->vmstats.state[idx]);
     980             : }
     981             : 
     982             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
     983             :                                               enum node_stat_item idx)
     984             : {
     985             :         struct mem_cgroup_per_node *pn;
     986             : 
     987             :         if (mem_cgroup_disabled())
     988             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
     989             : 
     990             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     991             :         return READ_ONCE(pn->lruvec_stats.state[idx]);
     992             : }
     993             : 
     994             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
     995             :                                                     enum node_stat_item idx)
     996             : {
     997             :         struct mem_cgroup_per_node *pn;
     998             :         long x = 0;
     999             :         int cpu;
    1000             : 
    1001             :         if (mem_cgroup_disabled())
    1002             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
    1003             : 
    1004             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
    1005             :         for_each_possible_cpu(cpu)
    1006             :                 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
    1007             : #ifdef CONFIG_SMP
    1008             :         if (x < 0)
    1009             :                 x = 0;
    1010             : #endif
    1011             :         return x;
    1012             : }
    1013             : 
    1014             : void mem_cgroup_flush_stats(void);
    1015             : void mem_cgroup_flush_stats_delayed(void);
    1016             : 
    1017             : void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
    1018             :                               int val);
    1019             : void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
    1020             : 
    1021             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1022             :                                          int val)
    1023             : {
    1024             :         unsigned long flags;
    1025             : 
    1026             :         local_irq_save(flags);
    1027             :         __mod_lruvec_kmem_state(p, idx, val);
    1028             :         local_irq_restore(flags);
    1029             : }
    1030             : 
    1031             : static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
    1032             :                                           enum node_stat_item idx, int val)
    1033             : {
    1034             :         unsigned long flags;
    1035             : 
    1036             :         local_irq_save(flags);
    1037             :         __mod_memcg_lruvec_state(lruvec, idx, val);
    1038             :         local_irq_restore(flags);
    1039             : }
    1040             : 
    1041             : void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
    1042             :                           unsigned long count);
    1043             : 
    1044             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1045             :                                       enum vm_event_item idx,
    1046             :                                       unsigned long count)
    1047             : {
    1048             :         unsigned long flags;
    1049             : 
    1050             :         local_irq_save(flags);
    1051             :         __count_memcg_events(memcg, idx, count);
    1052             :         local_irq_restore(flags);
    1053             : }
    1054             : 
    1055             : static inline void count_memcg_page_event(struct page *page,
    1056             :                                           enum vm_event_item idx)
    1057             : {
    1058             :         struct mem_cgroup *memcg = page_memcg(page);
    1059             : 
    1060             :         if (memcg)
    1061             :                 count_memcg_events(memcg, idx, 1);
    1062             : }
    1063             : 
    1064             : static inline void count_memcg_event_mm(struct mm_struct *mm,
    1065             :                                         enum vm_event_item idx)
    1066             : {
    1067             :         struct mem_cgroup *memcg;
    1068             : 
    1069             :         if (mem_cgroup_disabled())
    1070             :                 return;
    1071             : 
    1072             :         rcu_read_lock();
    1073             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1074             :         if (likely(memcg))
    1075             :                 count_memcg_events(memcg, idx, 1);
    1076             :         rcu_read_unlock();
    1077             : }
    1078             : 
    1079             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1080             :                                       enum memcg_memory_event event)
    1081             : {
    1082             :         bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
    1083             :                           event == MEMCG_SWAP_FAIL;
    1084             : 
    1085             :         atomic_long_inc(&memcg->memory_events_local[event]);
    1086             :         if (!swap_event)
    1087             :                 cgroup_file_notify(&memcg->events_local_file);
    1088             : 
    1089             :         do {
    1090             :                 atomic_long_inc(&memcg->memory_events[event]);
    1091             :                 if (swap_event)
    1092             :                         cgroup_file_notify(&memcg->swap_events_file);
    1093             :                 else
    1094             :                         cgroup_file_notify(&memcg->events_file);
    1095             : 
    1096             :                 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
    1097             :                         break;
    1098             :                 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
    1099             :                         break;
    1100             :         } while ((memcg = parent_mem_cgroup(memcg)) &&
    1101             :                  !mem_cgroup_is_root(memcg));
    1102             : }
    1103             : 
    1104             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1105             :                                          enum memcg_memory_event event)
    1106             : {
    1107             :         struct mem_cgroup *memcg;
    1108             : 
    1109             :         if (mem_cgroup_disabled())
    1110             :                 return;
    1111             : 
    1112             :         rcu_read_lock();
    1113             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1114             :         if (likely(memcg))
    1115             :                 memcg_memory_event(memcg, event);
    1116             :         rcu_read_unlock();
    1117             : }
    1118             : 
    1119             : void split_page_memcg(struct page *head, unsigned int nr);
    1120             : 
    1121             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1122             :                                                 gfp_t gfp_mask,
    1123             :                                                 unsigned long *total_scanned);
    1124             : 
    1125             : #else /* CONFIG_MEMCG */
    1126             : 
    1127             : #define MEM_CGROUP_ID_SHIFT     0
    1128             : #define MEM_CGROUP_ID_MAX       0
    1129             : 
    1130             : static inline struct mem_cgroup *folio_memcg(struct folio *folio)
    1131             : {
    1132             :         return NULL;
    1133             : }
    1134             : 
    1135             : static inline struct mem_cgroup *page_memcg(struct page *page)
    1136             : {
    1137             :         return NULL;
    1138             : }
    1139             : 
    1140             : static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
    1141             : {
    1142           0 :         WARN_ON_ONCE(!rcu_read_lock_held());
    1143             :         return NULL;
    1144             : }
    1145             : 
    1146             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
    1147             : {
    1148             :         return NULL;
    1149             : }
    1150             : 
    1151             : static inline bool folio_memcg_kmem(struct folio *folio)
    1152             : {
    1153             :         return false;
    1154             : }
    1155             : 
    1156             : static inline bool PageMemcgKmem(struct page *page)
    1157             : {
    1158             :         return false;
    1159             : }
    1160             : 
    1161             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
    1162             : {
    1163             :         return true;
    1164             : }
    1165             : 
    1166             : static inline bool mem_cgroup_disabled(void)
    1167             : {
    1168             :         return true;
    1169             : }
    1170             : 
    1171             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1172             :                                       enum memcg_memory_event event)
    1173             : {
    1174             : }
    1175             : 
    1176             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1177             :                                          enum memcg_memory_event event)
    1178             : {
    1179             : }
    1180             : 
    1181             : static inline void mem_cgroup_protection(struct mem_cgroup *root,
    1182             :                                          struct mem_cgroup *memcg,
    1183             :                                          unsigned long *min,
    1184             :                                          unsigned long *low)
    1185             : {
    1186           0 :         *min = *low = 0;
    1187             : }
    1188             : 
    1189             : static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
    1190             :                                                    struct mem_cgroup *memcg)
    1191             : {
    1192             : }
    1193             : 
    1194             : static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
    1195             : {
    1196             :         return false;
    1197             : }
    1198             : 
    1199             : static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
    1200             : {
    1201             :         return false;
    1202             : }
    1203             : 
    1204             : static inline int mem_cgroup_charge(struct folio *folio,
    1205             :                 struct mm_struct *mm, gfp_t gfp)
    1206             : {
    1207             :         return 0;
    1208             : }
    1209             : 
    1210             : static inline int mem_cgroup_swapin_charge_page(struct page *page,
    1211             :                         struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
    1212             : {
    1213             :         return 0;
    1214             : }
    1215             : 
    1216             : static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
    1217             : {
    1218             : }
    1219             : 
    1220             : static inline void mem_cgroup_uncharge(struct folio *folio)
    1221             : {
    1222             : }
    1223             : 
    1224             : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
    1225             : {
    1226             : }
    1227             : 
    1228             : static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
    1229             : {
    1230             : }
    1231             : 
    1232             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
    1233             :                                                struct pglist_data *pgdat)
    1234             : {
    1235           0 :         return &pgdat->__lruvec;
    1236             : }
    1237             : 
    1238             : static inline struct lruvec *folio_lruvec(struct folio *folio)
    1239             : {
    1240           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1241             :         return &pgdat->__lruvec;
    1242             : }
    1243             : 
    1244             : static inline
    1245             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
    1246             : {
    1247             : }
    1248             : 
    1249             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
    1250             : {
    1251             :         return NULL;
    1252             : }
    1253             : 
    1254             : static inline bool mm_match_cgroup(struct mm_struct *mm,
    1255             :                 struct mem_cgroup *memcg)
    1256             : {
    1257             :         return true;
    1258             : }
    1259             : 
    1260             : static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
    1261             : {
    1262             :         return NULL;
    1263             : }
    1264             : 
    1265             : static inline
    1266             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
    1267             : {
    1268             :         return NULL;
    1269             : }
    1270             : 
    1271             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
    1272             : {
    1273             : }
    1274             : 
    1275             : static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
    1276             : {
    1277             :         struct pglist_data *pgdat = folio_pgdat(folio);
    1278             : 
    1279             :         spin_lock(&pgdat->__lruvec.lru_lock);
    1280             :         return &pgdat->__lruvec;
    1281             : }
    1282             : 
    1283             : static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
    1284             : {
    1285           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1286             : 
    1287           0 :         spin_lock_irq(&pgdat->__lruvec.lru_lock);
    1288             :         return &pgdat->__lruvec;
    1289             : }
    1290             : 
    1291             : static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
    1292             :                 unsigned long *flagsp)
    1293             : {
    1294           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1295             : 
    1296           0 :         spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
    1297             :         return &pgdat->__lruvec;
    1298             : }
    1299             : 
    1300             : static inline struct mem_cgroup *
    1301             : mem_cgroup_iter(struct mem_cgroup *root,
    1302             :                 struct mem_cgroup *prev,
    1303             :                 struct mem_cgroup_reclaim_cookie *reclaim)
    1304             : {
    1305             :         return NULL;
    1306             : }
    1307             : 
    1308             : static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
    1309             :                                          struct mem_cgroup *prev)
    1310             : {
    1311             : }
    1312             : 
    1313             : static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
    1314             :                 int (*fn)(struct task_struct *, void *), void *arg)
    1315             : {
    1316             :         return 0;
    1317             : }
    1318             : 
    1319             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
    1320             : {
    1321             :         return 0;
    1322             : }
    1323             : 
    1324           0 : static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
    1325             : {
    1326           0 :         WARN_ON_ONCE(id);
    1327             :         /* XXX: This should always return root_mem_cgroup */
    1328           0 :         return NULL;
    1329             : }
    1330             : 
    1331             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
    1332             : {
    1333             :         return NULL;
    1334             : }
    1335             : 
    1336             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
    1337             : {
    1338             :         return NULL;
    1339             : }
    1340             : 
    1341             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
    1342             : {
    1343             :         return true;
    1344             : }
    1345             : 
    1346             : static inline
    1347             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
    1348             :                 enum lru_list lru, int zone_idx)
    1349             : {
    1350             :         return 0;
    1351             : }
    1352             : 
    1353             : static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
    1354             : {
    1355             :         return 0;
    1356             : }
    1357             : 
    1358             : static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
    1359             : {
    1360             :         return 0;
    1361             : }
    1362             : 
    1363             : static inline void
    1364             : mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
    1365             : {
    1366             : }
    1367             : 
    1368             : static inline void
    1369             : mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
    1370             : {
    1371             : }
    1372             : 
    1373             : static inline void lock_page_memcg(struct page *page)
    1374             : {
    1375             : }
    1376             : 
    1377             : static inline void unlock_page_memcg(struct page *page)
    1378             : {
    1379             : }
    1380             : 
    1381             : static inline void folio_memcg_lock(struct folio *folio)
    1382             : {
    1383             : }
    1384             : 
    1385             : static inline void folio_memcg_unlock(struct folio *folio)
    1386             : {
    1387             : }
    1388             : 
    1389             : static inline void mem_cgroup_handle_over_high(void)
    1390             : {
    1391             : }
    1392             : 
    1393             : static inline void mem_cgroup_enter_user_fault(void)
    1394             : {
    1395             : }
    1396             : 
    1397             : static inline void mem_cgroup_exit_user_fault(void)
    1398             : {
    1399             : }
    1400             : 
    1401             : static inline bool task_in_memcg_oom(struct task_struct *p)
    1402             : {
    1403             :         return false;
    1404             : }
    1405             : 
    1406             : static inline bool mem_cgroup_oom_synchronize(bool wait)
    1407             : {
    1408             :         return false;
    1409             : }
    1410             : 
    1411             : static inline struct mem_cgroup *mem_cgroup_get_oom_group(
    1412             :         struct task_struct *victim, struct mem_cgroup *oom_domain)
    1413             : {
    1414             :         return NULL;
    1415             : }
    1416             : 
    1417             : static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
    1418             : {
    1419             : }
    1420             : 
    1421             : static inline void __mod_memcg_state(struct mem_cgroup *memcg,
    1422             :                                      int idx,
    1423             :                                      int nr)
    1424             : {
    1425             : }
    1426             : 
    1427             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
    1428             :                                    int idx,
    1429             :                                    int nr)
    1430             : {
    1431             : }
    1432             : 
    1433             : static inline void mod_memcg_page_state(struct page *page,
    1434             :                                         int idx, int val)
    1435             : {
    1436             : }
    1437             : 
    1438             : static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
    1439             : {
    1440             :         return 0;
    1441             : }
    1442             : 
    1443             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
    1444             :                                               enum node_stat_item idx)
    1445             : {
    1446           0 :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1447             : }
    1448             : 
    1449             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
    1450             :                                                     enum node_stat_item idx)
    1451             : {
    1452             :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1453             : }
    1454             : 
    1455             : static inline void mem_cgroup_flush_stats(void)
    1456             : {
    1457             : }
    1458             : 
    1459             : static inline void mem_cgroup_flush_stats_delayed(void)
    1460             : {
    1461             : }
    1462             : 
    1463             : static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
    1464             :                                             enum node_stat_item idx, int val)
    1465             : {
    1466             : }
    1467             : 
    1468             : static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1469             :                                            int val)
    1470             : {
    1471           0 :         struct page *page = virt_to_head_page(p);
    1472             : 
    1473           0 :         __mod_node_page_state(page_pgdat(page), idx, val);
    1474             : }
    1475             : 
    1476             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1477             :                                          int val)
    1478             : {
    1479             :         struct page *page = virt_to_head_page(p);
    1480             : 
    1481             :         mod_node_page_state(page_pgdat(page), idx, val);
    1482             : }
    1483             : 
    1484             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1485             :                                       enum vm_event_item idx,
    1486             :                                       unsigned long count)
    1487             : {
    1488             : }
    1489             : 
    1490             : static inline void __count_memcg_events(struct mem_cgroup *memcg,
    1491             :                                         enum vm_event_item idx,
    1492             :                                         unsigned long count)
    1493             : {
    1494             : }
    1495             : 
    1496             : static inline void count_memcg_page_event(struct page *page,
    1497             :                                           int idx)
    1498             : {
    1499             : }
    1500             : 
    1501             : static inline
    1502             : void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
    1503             : {
    1504             : }
    1505             : 
    1506             : static inline void split_page_memcg(struct page *head, unsigned int nr)
    1507             : {
    1508             : }
    1509             : 
    1510             : static inline
    1511             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1512             :                                             gfp_t gfp_mask,
    1513             :                                             unsigned long *total_scanned)
    1514             : {
    1515             :         return 0;
    1516             : }
    1517             : #endif /* CONFIG_MEMCG */
    1518             : 
    1519             : static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1520             : {
    1521           0 :         __mod_lruvec_kmem_state(p, idx, 1);
    1522             : }
    1523             : 
    1524             : static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1525             : {
    1526           0 :         __mod_lruvec_kmem_state(p, idx, -1);
    1527             : }
    1528             : 
    1529             : static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
    1530             : {
    1531             :         struct mem_cgroup *memcg;
    1532             : 
    1533           0 :         memcg = lruvec_memcg(lruvec);
    1534             :         if (!memcg)
    1535             :                 return NULL;
    1536             :         memcg = parent_mem_cgroup(memcg);
    1537             :         if (!memcg)
    1538             :                 return NULL;
    1539             :         return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
    1540             : }
    1541             : 
    1542             : static inline void unlock_page_lruvec(struct lruvec *lruvec)
    1543             : {
    1544             :         spin_unlock(&lruvec->lru_lock);
    1545             : }
    1546             : 
    1547             : static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
    1548             : {
    1549           0 :         spin_unlock_irq(&lruvec->lru_lock);
    1550             : }
    1551             : 
    1552             : static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
    1553             :                 unsigned long flags)
    1554             : {
    1555           0 :         spin_unlock_irqrestore(&lruvec->lru_lock, flags);
    1556             : }
    1557             : 
    1558             : /* Test requires a stable page->memcg binding, see page_memcg() */
    1559             : static inline bool folio_matches_lruvec(struct folio *folio,
    1560             :                 struct lruvec *lruvec)
    1561             : {
    1562           0 :         return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
    1563             :                lruvec_memcg(lruvec) == folio_memcg(folio);
    1564             : }
    1565             : 
    1566             : /* Don't lock again iff page's lruvec locked */
    1567           0 : static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
    1568             :                 struct lruvec *locked_lruvec)
    1569             : {
    1570           0 :         if (locked_lruvec) {
    1571           0 :                 if (folio_matches_lruvec(folio, locked_lruvec))
    1572             :                         return locked_lruvec;
    1573             : 
    1574           0 :                 unlock_page_lruvec_irq(locked_lruvec);
    1575             :         }
    1576             : 
    1577           0 :         return folio_lruvec_lock_irq(folio);
    1578             : }
    1579             : 
    1580             : /* Don't lock again iff page's lruvec locked */
    1581           0 : static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
    1582             :                 struct lruvec *locked_lruvec, unsigned long *flags)
    1583             : {
    1584           0 :         if (locked_lruvec) {
    1585           0 :                 if (folio_matches_lruvec(folio, locked_lruvec))
    1586             :                         return locked_lruvec;
    1587             : 
    1588           0 :                 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
    1589             :         }
    1590             : 
    1591           0 :         return folio_lruvec_lock_irqsave(folio, flags);
    1592             : }
    1593             : 
    1594             : #ifdef CONFIG_CGROUP_WRITEBACK
    1595             : 
    1596             : struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
    1597             : void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
    1598             :                          unsigned long *pheadroom, unsigned long *pdirty,
    1599             :                          unsigned long *pwriteback);
    1600             : 
    1601             : void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
    1602             :                                              struct bdi_writeback *wb);
    1603             : 
    1604             : static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
    1605             :                                                   struct bdi_writeback *wb)
    1606             : {
    1607             :         if (mem_cgroup_disabled())
    1608             :                 return;
    1609             : 
    1610             :         if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
    1611             :                 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
    1612             : }
    1613             : 
    1614             : void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
    1615             : 
    1616             : #else   /* CONFIG_CGROUP_WRITEBACK */
    1617             : 
    1618             : static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
    1619             : {
    1620             :         return NULL;
    1621             : }
    1622             : 
    1623             : static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
    1624             :                                        unsigned long *pfilepages,
    1625             :                                        unsigned long *pheadroom,
    1626             :                                        unsigned long *pdirty,
    1627             :                                        unsigned long *pwriteback)
    1628             : {
    1629             : }
    1630             : 
    1631             : static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
    1632             :                                                   struct bdi_writeback *wb)
    1633             : {
    1634             : }
    1635             : 
    1636             : static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
    1637             : {
    1638             : }
    1639             : 
    1640             : #endif  /* CONFIG_CGROUP_WRITEBACK */
    1641             : 
    1642             : struct sock;
    1643             : bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
    1644             :                              gfp_t gfp_mask);
    1645             : void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
    1646             : #ifdef CONFIG_MEMCG
    1647             : extern struct static_key_false memcg_sockets_enabled_key;
    1648             : #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
    1649             : void mem_cgroup_sk_alloc(struct sock *sk);
    1650             : void mem_cgroup_sk_free(struct sock *sk);
    1651             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1652             : {
    1653             :         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
    1654             :                 return true;
    1655             :         do {
    1656             :                 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
    1657             :                         return true;
    1658             :         } while ((memcg = parent_mem_cgroup(memcg)));
    1659             :         return false;
    1660             : }
    1661             : 
    1662             : int alloc_shrinker_info(struct mem_cgroup *memcg);
    1663             : void free_shrinker_info(struct mem_cgroup *memcg);
    1664             : void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
    1665             : void reparent_shrinker_deferred(struct mem_cgroup *memcg);
    1666             : #else
    1667             : #define mem_cgroup_sockets_enabled 0
    1668             : static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
    1669             : static inline void mem_cgroup_sk_free(struct sock *sk) { };
    1670             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1671             : {
    1672             :         return false;
    1673             : }
    1674             : 
    1675             : static inline void set_shrinker_bit(struct mem_cgroup *memcg,
    1676             :                                     int nid, int shrinker_id)
    1677             : {
    1678             : }
    1679             : #endif
    1680             : 
    1681             : #ifdef CONFIG_MEMCG_KMEM
    1682             : bool mem_cgroup_kmem_disabled(void);
    1683             : int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
    1684             : void __memcg_kmem_uncharge_page(struct page *page, int order);
    1685             : 
    1686             : struct obj_cgroup *get_obj_cgroup_from_current(void);
    1687             : 
    1688             : int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
    1689             : void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
    1690             : 
    1691             : extern struct static_key_false memcg_kmem_enabled_key;
    1692             : 
    1693             : static inline bool memcg_kmem_enabled(void)
    1694             : {
    1695             :         return static_branch_likely(&memcg_kmem_enabled_key);
    1696             : }
    1697             : 
    1698             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1699             :                                          int order)
    1700             : {
    1701             :         if (memcg_kmem_enabled())
    1702             :                 return __memcg_kmem_charge_page(page, gfp, order);
    1703             :         return 0;
    1704             : }
    1705             : 
    1706             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1707             : {
    1708             :         if (memcg_kmem_enabled())
    1709             :                 __memcg_kmem_uncharge_page(page, order);
    1710             : }
    1711             : 
    1712             : /*
    1713             :  * A helper for accessing memcg's kmem_id, used for getting
    1714             :  * corresponding LRU lists.
    1715             :  */
    1716             : static inline int memcg_kmem_id(struct mem_cgroup *memcg)
    1717             : {
    1718             :         return memcg ? memcg->kmemcg_id : -1;
    1719             : }
    1720             : 
    1721             : struct mem_cgroup *mem_cgroup_from_obj(void *p);
    1722             : 
    1723             : #else
    1724             : static inline bool mem_cgroup_kmem_disabled(void)
    1725             : {
    1726             :         return true;
    1727             : }
    1728             : 
    1729             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1730             :                                          int order)
    1731             : {
    1732             :         return 0;
    1733             : }
    1734             : 
    1735             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1736             : {
    1737             : }
    1738             : 
    1739             : static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1740             :                                            int order)
    1741             : {
    1742             :         return 0;
    1743             : }
    1744             : 
    1745             : static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
    1746             : {
    1747             : }
    1748             : 
    1749             : static inline bool memcg_kmem_enabled(void)
    1750             : {
    1751             :         return false;
    1752             : }
    1753             : 
    1754             : static inline int memcg_kmem_id(struct mem_cgroup *memcg)
    1755             : {
    1756             :         return -1;
    1757             : }
    1758             : 
    1759             : static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
    1760             : {
    1761             :        return NULL;
    1762             : }
    1763             : 
    1764             : #endif /* CONFIG_MEMCG_KMEM */
    1765             : 
    1766             : #endif /* _LINUX_MEMCONTROL_H */

Generated by: LCOV version 1.14