Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef LINUX_MM_INLINE_H
3 : #define LINUX_MM_INLINE_H
4 :
5 : #include <linux/atomic.h>
6 : #include <linux/huge_mm.h>
7 : #include <linux/swap.h>
8 : #include <linux/string.h>
9 :
10 : /**
11 : * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
12 : * @folio: The folio to test.
13 : *
14 : * We would like to get this info without a page flag, but the state
15 : * needs to survive until the folio is last deleted from the LRU, which
16 : * could be as far down as __page_cache_release.
17 : *
18 : * Return: An integer (not a boolean!) used to sort a folio onto the
19 : * right LRU list and to account folios correctly.
20 : * 1 if @folio is a regular filesystem backed page cache folio
21 : * or a lazily freed anonymous folio (e.g. via MADV_FREE).
22 : * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
23 : * ram or swap backed folio.
24 : */
25 : static inline int folio_is_file_lru(struct folio *folio)
26 : {
27 0 : return !folio_test_swapbacked(folio);
28 : }
29 :
30 : static inline int page_is_file_lru(struct page *page)
31 : {
32 0 : return folio_is_file_lru(page_folio(page));
33 : }
34 :
35 : static __always_inline void update_lru_size(struct lruvec *lruvec,
36 : enum lru_list lru, enum zone_type zid,
37 : long nr_pages)
38 : {
39 0 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
40 :
41 0 : __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
42 0 : __mod_zone_page_state(&pgdat->node_zones[zid],
43 0 : NR_ZONE_LRU_BASE + lru, nr_pages);
44 : #ifdef CONFIG_MEMCG
45 : mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
46 : #endif
47 : }
48 :
49 : /**
50 : * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
51 : * @folio: The folio that was on lru and now has a zero reference.
52 : */
53 : static __always_inline void __folio_clear_lru_flags(struct folio *folio)
54 : {
55 : VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
56 :
57 0 : __folio_clear_lru(folio);
58 :
59 : /* this shouldn't happen, so leave the flags to bad_page() */
60 0 : if (folio_test_active(folio) && folio_test_unevictable(folio))
61 : return;
62 :
63 0 : __folio_clear_active(folio);
64 : __folio_clear_unevictable(folio);
65 : }
66 :
67 : static __always_inline void __clear_page_lru_flags(struct page *page)
68 : {
69 0 : __folio_clear_lru_flags(page_folio(page));
70 : }
71 :
72 : /**
73 : * folio_lru_list - Which LRU list should a folio be on?
74 : * @folio: The folio to test.
75 : *
76 : * Return: The LRU list a folio should be on, as an index
77 : * into the array of LRU lists.
78 : */
79 : static __always_inline enum lru_list folio_lru_list(struct folio *folio)
80 : {
81 : enum lru_list lru;
82 :
83 : VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
84 :
85 0 : if (folio_test_unevictable(folio))
86 : return LRU_UNEVICTABLE;
87 :
88 0 : lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
89 0 : if (folio_test_active(folio))
90 0 : lru += LRU_ACTIVE;
91 :
92 : return lru;
93 : }
94 :
95 : static __always_inline
96 : void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
97 : {
98 0 : enum lru_list lru = folio_lru_list(folio);
99 :
100 0 : update_lru_size(lruvec, lru, folio_zonenum(folio),
101 : folio_nr_pages(folio));
102 0 : if (lru != LRU_UNEVICTABLE)
103 0 : list_add(&folio->lru, &lruvec->lists[lru]);
104 : }
105 :
106 : static __always_inline void add_page_to_lru_list(struct page *page,
107 : struct lruvec *lruvec)
108 : {
109 0 : lruvec_add_folio(lruvec, page_folio(page));
110 : }
111 :
112 : static __always_inline
113 : void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
114 : {
115 0 : enum lru_list lru = folio_lru_list(folio);
116 :
117 0 : update_lru_size(lruvec, lru, folio_zonenum(folio),
118 : folio_nr_pages(folio));
119 : /* This is not expected to be used on LRU_UNEVICTABLE */
120 0 : list_add_tail(&folio->lru, &lruvec->lists[lru]);
121 : }
122 :
123 : static __always_inline void add_page_to_lru_list_tail(struct page *page,
124 : struct lruvec *lruvec)
125 : {
126 0 : lruvec_add_folio_tail(lruvec, page_folio(page));
127 : }
128 :
129 : static __always_inline
130 : void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
131 : {
132 0 : enum lru_list lru = folio_lru_list(folio);
133 :
134 0 : if (lru != LRU_UNEVICTABLE)
135 0 : list_del(&folio->lru);
136 0 : update_lru_size(lruvec, lru, folio_zonenum(folio),
137 0 : -folio_nr_pages(folio));
138 : }
139 :
140 : static __always_inline void del_page_from_lru_list(struct page *page,
141 : struct lruvec *lruvec)
142 : {
143 0 : lruvec_del_folio(lruvec, page_folio(page));
144 : }
145 :
146 : #ifdef CONFIG_ANON_VMA_NAME
147 : /*
148 : * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
149 : * either keep holding the lock while using the returned pointer or it should
150 : * raise anon_vma_name refcount before releasing the lock.
151 : */
152 : extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
153 : extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
154 : extern void anon_vma_name_free(struct kref *kref);
155 :
156 : /* mmap_lock should be read-locked */
157 : static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
158 : {
159 : if (anon_name)
160 : kref_get(&anon_name->kref);
161 : }
162 :
163 : static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
164 : {
165 : if (anon_name)
166 : kref_put(&anon_name->kref, anon_vma_name_free);
167 : }
168 :
169 : static inline
170 : struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
171 : {
172 : /* Prevent anon_name refcount saturation early on */
173 : if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
174 : anon_vma_name_get(anon_name);
175 : return anon_name;
176 :
177 : }
178 : return anon_vma_name_alloc(anon_name->name);
179 : }
180 :
181 : static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
182 : struct vm_area_struct *new_vma)
183 : {
184 : struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
185 :
186 : if (anon_name)
187 : new_vma->anon_name = anon_vma_name_reuse(anon_name);
188 : }
189 :
190 : static inline void free_anon_vma_name(struct vm_area_struct *vma)
191 : {
192 : /*
193 : * Not using anon_vma_name because it generates a warning if mmap_lock
194 : * is not held, which might be the case here.
195 : */
196 : if (!vma->vm_file)
197 : anon_vma_name_put(vma->anon_name);
198 : }
199 :
200 : static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
201 : struct anon_vma_name *anon_name2)
202 : {
203 : if (anon_name1 == anon_name2)
204 : return true;
205 :
206 : return anon_name1 && anon_name2 &&
207 : !strcmp(anon_name1->name, anon_name2->name);
208 : }
209 :
210 : #else /* CONFIG_ANON_VMA_NAME */
211 : static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
212 : {
213 : return NULL;
214 : }
215 :
216 : static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
217 : {
218 : return NULL;
219 : }
220 :
221 : static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
222 : static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
223 : static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
224 : struct vm_area_struct *new_vma) {}
225 : static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
226 :
227 : static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
228 : struct anon_vma_name *anon_name2)
229 : {
230 : return true;
231 : }
232 :
233 : #endif /* CONFIG_ANON_VMA_NAME */
234 :
235 : static inline void init_tlb_flush_pending(struct mm_struct *mm)
236 : {
237 0 : atomic_set(&mm->tlb_flush_pending, 0);
238 : }
239 :
240 : static inline void inc_tlb_flush_pending(struct mm_struct *mm)
241 : {
242 0 : atomic_inc(&mm->tlb_flush_pending);
243 : /*
244 : * The only time this value is relevant is when there are indeed pages
245 : * to flush. And we'll only flush pages after changing them, which
246 : * requires the PTL.
247 : *
248 : * So the ordering here is:
249 : *
250 : * atomic_inc(&mm->tlb_flush_pending);
251 : * spin_lock(&ptl);
252 : * ...
253 : * set_pte_at();
254 : * spin_unlock(&ptl);
255 : *
256 : * spin_lock(&ptl)
257 : * mm_tlb_flush_pending();
258 : * ....
259 : * spin_unlock(&ptl);
260 : *
261 : * flush_tlb_range();
262 : * atomic_dec(&mm->tlb_flush_pending);
263 : *
264 : * Where the increment if constrained by the PTL unlock, it thus
265 : * ensures that the increment is visible if the PTE modification is
266 : * visible. After all, if there is no PTE modification, nobody cares
267 : * about TLB flushes either.
268 : *
269 : * This very much relies on users (mm_tlb_flush_pending() and
270 : * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
271 : * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
272 : * locks (PPC) the unlock of one doesn't order against the lock of
273 : * another PTL.
274 : *
275 : * The decrement is ordered by the flush_tlb_range(), such that
276 : * mm_tlb_flush_pending() will not return false unless all flushes have
277 : * completed.
278 : */
279 : }
280 :
281 : static inline void dec_tlb_flush_pending(struct mm_struct *mm)
282 : {
283 : /*
284 : * See inc_tlb_flush_pending().
285 : *
286 : * This cannot be smp_mb__before_atomic() because smp_mb() simply does
287 : * not order against TLB invalidate completion, which is what we need.
288 : *
289 : * Therefore we must rely on tlb_flush_*() to guarantee order.
290 : */
291 0 : atomic_dec(&mm->tlb_flush_pending);
292 : }
293 :
294 : static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
295 : {
296 : /*
297 : * Must be called after having acquired the PTL; orders against that
298 : * PTLs release and therefore ensures that if we observe the modified
299 : * PTE we must also observe the increment from inc_tlb_flush_pending().
300 : *
301 : * That is, it only guarantees to return true if there is a flush
302 : * pending for _this_ PTL.
303 : */
304 : return atomic_read(&mm->tlb_flush_pending);
305 : }
306 :
307 : static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
308 : {
309 : /*
310 : * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
311 : * for which there is a TLB flush pending in order to guarantee
312 : * we've seen both that PTE modification and the increment.
313 : *
314 : * (no requirement on actually still holding the PTL, that is irrelevant)
315 : */
316 0 : return atomic_read(&mm->tlb_flush_pending) > 1;
317 : }
318 :
319 :
320 : #endif
|