Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Macros for manipulating and testing page->flags
4 : */
5 :
6 : #ifndef PAGE_FLAGS_H
7 : #define PAGE_FLAGS_H
8 :
9 : #include <linux/types.h>
10 : #include <linux/bug.h>
11 : #include <linux/mmdebug.h>
12 : #ifndef __GENERATING_BOUNDS_H
13 : #include <linux/mm_types.h>
14 : #include <generated/bounds.h>
15 : #endif /* !__GENERATING_BOUNDS_H */
16 :
17 : /*
18 : * Various page->flags bits:
19 : *
20 : * PG_reserved is set for special pages. The "struct page" of such a page
21 : * should in general not be touched (e.g. set dirty) except by its owner.
22 : * Pages marked as PG_reserved include:
23 : * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 : * initrd, HW tables)
25 : * - Pages reserved or allocated early during boot (before the page allocator
26 : * was initialized). This includes (depending on the architecture) the
27 : * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 : * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 : * be given to the page allocator.
30 : * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 : * to read/write these pages might end badly. Don't touch!
32 : * - The zero page(s)
33 : * - Pages not added to the page allocator when onlining a section because
34 : * they were excluded via the online_page_callback() or because they are
35 : * PG_hwpoison.
36 : * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 : * control pages, vmcoreinfo)
38 : * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 : * not marked PG_reserved (as they might be in use by somebody else who does
40 : * not respect the caching strategy).
41 : * - Pages part of an offline section (struct pages of offline sections should
42 : * not be trusted as they will be initialized when first onlined).
43 : * - MCA pages on ia64
44 : * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 : * - Device memory (e.g. PMEM, DAX, HMM)
46 : * Some PG_reserved pages will be excluded from the hibernation image.
47 : * PG_reserved does in general not hinder anybody from dumping or swapping
48 : * and is no longer required for remap_pfn_range(). ioremap might require it.
49 : * Consequently, PG_reserved for a page mapped into user space can indicate
50 : * the zero page, the vDSO, MMIO pages or device memory.
51 : *
52 : * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 : * specific data (which is normally at page->private). It can be used by
54 : * private allocations for its own usage.
55 : *
56 : * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 : * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 : * is set before writeback starts and cleared when it finishes.
59 : *
60 : * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 : * while it is held.
62 : *
63 : * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 : * to become unlocked.
65 : *
66 : * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 : * usually PageAnon or shmem pages but please note that even anonymous pages
68 : * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 : * a result of MADV_FREE).
70 : *
71 : * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72 : * file-backed pagecache (see mm/vmscan.c).
73 : *
74 : * PG_error is set to indicate that an I/O error occurred on this page.
75 : *
76 : * PG_arch_1 is an architecture specific page state bit. The generic code
77 : * guarantees that this bit is cleared for a page when it first is entered into
78 : * the page cache.
79 : *
80 : * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 : * data with incorrect ECC bits that triggered a machine check. Accessing is
82 : * not safe since it may cause another machine check. Don't touch!
83 : */
84 :
85 : /*
86 : * Don't use the pageflags directly. Use the PageFoo macros.
87 : *
88 : * The page flags field is split into two parts, the main flags area
89 : * which extends from the low bits upwards, and the fields area which
90 : * extends from the high bits downwards.
91 : *
92 : * | FIELD | ... | FLAGS |
93 : * N-1 ^ 0
94 : * (NR_PAGEFLAGS)
95 : *
96 : * The fields area is reserved for fields mapping zone, node (for NUMA) and
97 : * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98 : * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99 : */
100 : enum pageflags {
101 : PG_locked, /* Page is locked. Don't touch. */
102 : PG_referenced,
103 : PG_uptodate,
104 : PG_dirty,
105 : PG_lru,
106 : PG_active,
107 : PG_workingset,
108 : PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109 : PG_error,
110 : PG_slab,
111 : PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
112 : PG_arch_1,
113 : PG_reserved,
114 : PG_private, /* If pagecache, has fs-private data */
115 : PG_private_2, /* If pagecache, has fs aux data */
116 : PG_writeback, /* Page is under writeback */
117 : PG_head, /* A head page */
118 : PG_mappedtodisk, /* Has blocks allocated on-disk */
119 : PG_reclaim, /* To be reclaimed asap */
120 : PG_swapbacked, /* Page is backed by RAM/swap */
121 : PG_unevictable, /* Page is "unevictable" */
122 : #ifdef CONFIG_MMU
123 : PG_mlocked, /* Page is vma mlocked */
124 : #endif
125 : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
126 : PG_uncached, /* Page has been mapped as uncached */
127 : #endif
128 : #ifdef CONFIG_MEMORY_FAILURE
129 : PG_hwpoison, /* hardware poisoned page. Don't touch */
130 : #endif
131 : #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
132 : PG_young,
133 : PG_idle,
134 : #endif
135 : #ifdef CONFIG_64BIT
136 : PG_arch_2,
137 : #endif
138 : #ifdef CONFIG_KASAN_HW_TAGS
139 : PG_skip_kasan_poison,
140 : #endif
141 : __NR_PAGEFLAGS,
142 :
143 : PG_readahead = PG_reclaim,
144 :
145 : /* Filesystems */
146 : PG_checked = PG_owner_priv_1,
147 :
148 : /* SwapBacked */
149 : PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
150 :
151 : /* Two page bits are conscripted by FS-Cache to maintain local caching
152 : * state. These bits are set on pages belonging to the netfs's inodes
153 : * when those inodes are being locally cached.
154 : */
155 : PG_fscache = PG_private_2, /* page backed by cache */
156 :
157 : /* XEN */
158 : /* Pinned in Xen as a read-only pagetable page. */
159 : PG_pinned = PG_owner_priv_1,
160 : /* Pinned as part of domain save (see xen_mm_pin_all()). */
161 : PG_savepinned = PG_dirty,
162 : /* Has a grant mapping of another (foreign) domain's page. */
163 : PG_foreign = PG_owner_priv_1,
164 : /* Remapped by swiotlb-xen. */
165 : PG_xen_remapped = PG_owner_priv_1,
166 :
167 : /* SLOB */
168 : PG_slob_free = PG_private,
169 :
170 : /* Compound pages. Stored in first tail page's flags */
171 : PG_double_map = PG_workingset,
172 :
173 : #ifdef CONFIG_MEMORY_FAILURE
174 : /*
175 : * Compound pages. Stored in first tail page's flags.
176 : * Indicates that at least one subpage is hwpoisoned in the
177 : * THP.
178 : */
179 : PG_has_hwpoisoned = PG_mappedtodisk,
180 : #endif
181 :
182 : /* non-lru isolated movable page */
183 : PG_isolated = PG_reclaim,
184 :
185 : /* Only valid for buddy pages. Used to track pages that are reported */
186 : PG_reported = PG_uptodate,
187 : };
188 :
189 : #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
190 :
191 : #ifndef __GENERATING_BOUNDS_H
192 :
193 : #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
194 : DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
195 : hugetlb_free_vmemmap_enabled_key);
196 :
197 : static __always_inline bool hugetlb_free_vmemmap_enabled(void)
198 : {
199 : return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
200 : &hugetlb_free_vmemmap_enabled_key);
201 : }
202 :
203 : /*
204 : * If the feature of freeing some vmemmap pages associated with each HugeTLB
205 : * page is enabled, the head vmemmap page frame is reused and all of the tail
206 : * vmemmap addresses map to the head vmemmap page frame (furture details can
207 : * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other
208 : * words, there are more than one page struct with PG_head associated with each
209 : * HugeTLB page. We __know__ that there is only one head page struct, the tail
210 : * page structs with PG_head are fake head page structs. We need an approach
211 : * to distinguish between those two different types of page structs so that
212 : * compound_head() can return the real head page struct when the parameter is
213 : * the tail page struct but with PG_head.
214 : *
215 : * The page_fixed_fake_head() returns the real head page struct if the @page is
216 : * fake page head, otherwise, returns @page which can either be a true page
217 : * head or tail.
218 : */
219 : static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
220 : {
221 : if (!hugetlb_free_vmemmap_enabled())
222 : return page;
223 :
224 : /*
225 : * Only addresses aligned with PAGE_SIZE of struct page may be fake head
226 : * struct page. The alignment check aims to avoid access the fields (
227 : * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
228 : * cold cacheline in some cases.
229 : */
230 : if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
231 : test_bit(PG_head, &page->flags)) {
232 : /*
233 : * We can safely access the field of the @page[1] with PG_head
234 : * because the @page is a compound page composed with at least
235 : * two contiguous pages.
236 : */
237 : unsigned long head = READ_ONCE(page[1].compound_head);
238 :
239 : if (likely(head & 1))
240 : return (const struct page *)(head - 1);
241 : }
242 : return page;
243 : }
244 : #else
245 : static inline const struct page *page_fixed_fake_head(const struct page *page)
246 : {
247 : return page;
248 : }
249 :
250 : static inline bool hugetlb_free_vmemmap_enabled(void)
251 : {
252 : return false;
253 : }
254 : #endif
255 :
256 : static __always_inline int page_is_fake_head(struct page *page)
257 : {
258 11 : return page_fixed_fake_head(page) != page;
259 : }
260 :
261 0 : static inline unsigned long _compound_head(const struct page *page)
262 : {
263 5547 : unsigned long head = READ_ONCE(page->compound_head);
264 :
265 5547 : if (unlikely(head & 1))
266 379 : return head - 1;
267 5168 : return (unsigned long)page_fixed_fake_head(page);
268 : }
269 :
270 : #define compound_head(page) ((typeof(page))_compound_head(page))
271 :
272 : /**
273 : * page_folio - Converts from page to folio.
274 : * @p: The page.
275 : *
276 : * Every page is part of a folio. This function cannot be called on a
277 : * NULL pointer.
278 : *
279 : * Context: No reference, nor lock is required on @page. If the caller
280 : * does not hold a reference, this call may race with a folio split, so
281 : * it should re-check the folio still contains this page after gaining
282 : * a reference on the folio.
283 : * Return: The folio which contains this page.
284 : */
285 : #define page_folio(p) (_Generic((p), \
286 : const struct page *: (const struct folio *)_compound_head(p), \
287 : struct page *: (struct folio *)_compound_head(p)))
288 :
289 : /**
290 : * folio_page - Return a page from a folio.
291 : * @folio: The folio.
292 : * @n: The page number to return.
293 : *
294 : * @n is relative to the start of the folio. This function does not
295 : * check that the page number lies within @folio; the caller is presumed
296 : * to have a reference to the page.
297 : */
298 : #define folio_page(folio, n) nth_page(&(folio)->page, n)
299 :
300 : static __always_inline int PageTail(struct page *page)
301 : {
302 0 : return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
303 : }
304 :
305 : static __always_inline int PageCompound(struct page *page)
306 : {
307 781 : return test_bit(PG_head, &page->flags) ||
308 253 : READ_ONCE(page->compound_head) & 1;
309 : }
310 :
311 : #define PAGE_POISON_PATTERN -1l
312 : static inline int PagePoisoned(const struct page *page)
313 : {
314 0 : return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
315 : }
316 :
317 : #ifdef CONFIG_DEBUG_VM
318 : void page_init_poison(struct page *page, size_t size);
319 : #else
320 : static inline void page_init_poison(struct page *page, size_t size)
321 : {
322 : }
323 : #endif
324 :
325 : static unsigned long *folio_flags(struct folio *folio, unsigned n)
326 : {
327 7015 : struct page *page = &folio->page;
328 :
329 : VM_BUG_ON_PGFLAGS(PageTail(page), page);
330 : VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
331 457 : return &page[n].flags;
332 : }
333 :
334 : /*
335 : * Page flags policies wrt compound pages
336 : *
337 : * PF_POISONED_CHECK
338 : * check if this struct page poisoned/uninitialized
339 : *
340 : * PF_ANY:
341 : * the page flag is relevant for small, head and tail pages.
342 : *
343 : * PF_HEAD:
344 : * for compound page all operations related to the page flag applied to
345 : * head page.
346 : *
347 : * PF_ONLY_HEAD:
348 : * for compound page, callers only ever operate on the head page.
349 : *
350 : * PF_NO_TAIL:
351 : * modifications of the page flag must be done on small or head pages,
352 : * checks can be done on tail pages too.
353 : *
354 : * PF_NO_COMPOUND:
355 : * the page flag is not relevant for compound pages.
356 : *
357 : * PF_SECOND:
358 : * the page flag is stored in the first tail page.
359 : */
360 : #define PF_POISONED_CHECK(page) ({ \
361 : VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
362 : page; })
363 : #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
364 : #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
365 : #define PF_ONLY_HEAD(page, enforce) ({ \
366 : VM_BUG_ON_PGFLAGS(PageTail(page), page); \
367 : PF_POISONED_CHECK(page); })
368 : #define PF_NO_TAIL(page, enforce) ({ \
369 : VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
370 : PF_POISONED_CHECK(compound_head(page)); })
371 : #define PF_NO_COMPOUND(page, enforce) ({ \
372 : VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
373 : PF_POISONED_CHECK(page); })
374 : #define PF_SECOND(page, enforce) ({ \
375 : VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
376 : PF_POISONED_CHECK(&page[1]); })
377 :
378 : /* Which page is the flag stored in */
379 : #define FOLIO_PF_ANY 0
380 : #define FOLIO_PF_HEAD 0
381 : #define FOLIO_PF_ONLY_HEAD 0
382 : #define FOLIO_PF_NO_TAIL 0
383 : #define FOLIO_PF_NO_COMPOUND 0
384 : #define FOLIO_PF_SECOND 1
385 :
386 : /*
387 : * Macros to create function definitions for page flags
388 : */
389 : #define TESTPAGEFLAG(uname, lname, policy) \
390 : static __always_inline bool folio_test_##lname(struct folio *folio) \
391 : { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
392 : static __always_inline int Page##uname(struct page *page) \
393 : { return test_bit(PG_##lname, &policy(page, 0)->flags); }
394 :
395 : #define SETPAGEFLAG(uname, lname, policy) \
396 : static __always_inline \
397 : void folio_set_##lname(struct folio *folio) \
398 : { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
399 : static __always_inline void SetPage##uname(struct page *page) \
400 : { set_bit(PG_##lname, &policy(page, 1)->flags); }
401 :
402 : #define CLEARPAGEFLAG(uname, lname, policy) \
403 : static __always_inline \
404 : void folio_clear_##lname(struct folio *folio) \
405 : { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
406 : static __always_inline void ClearPage##uname(struct page *page) \
407 : { clear_bit(PG_##lname, &policy(page, 1)->flags); }
408 :
409 : #define __SETPAGEFLAG(uname, lname, policy) \
410 : static __always_inline \
411 : void __folio_set_##lname(struct folio *folio) \
412 : { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
413 : static __always_inline void __SetPage##uname(struct page *page) \
414 : { __set_bit(PG_##lname, &policy(page, 1)->flags); }
415 :
416 : #define __CLEARPAGEFLAG(uname, lname, policy) \
417 : static __always_inline \
418 : void __folio_clear_##lname(struct folio *folio) \
419 : { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
420 : static __always_inline void __ClearPage##uname(struct page *page) \
421 : { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
422 :
423 : #define TESTSETFLAG(uname, lname, policy) \
424 : static __always_inline \
425 : bool folio_test_set_##lname(struct folio *folio) \
426 : { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
427 : static __always_inline int TestSetPage##uname(struct page *page) \
428 : { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
429 :
430 : #define TESTCLEARFLAG(uname, lname, policy) \
431 : static __always_inline \
432 : bool folio_test_clear_##lname(struct folio *folio) \
433 : { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
434 : static __always_inline int TestClearPage##uname(struct page *page) \
435 : { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
436 :
437 : #define PAGEFLAG(uname, lname, policy) \
438 : TESTPAGEFLAG(uname, lname, policy) \
439 : SETPAGEFLAG(uname, lname, policy) \
440 : CLEARPAGEFLAG(uname, lname, policy)
441 :
442 : #define __PAGEFLAG(uname, lname, policy) \
443 : TESTPAGEFLAG(uname, lname, policy) \
444 : __SETPAGEFLAG(uname, lname, policy) \
445 : __CLEARPAGEFLAG(uname, lname, policy)
446 :
447 : #define TESTSCFLAG(uname, lname, policy) \
448 : TESTSETFLAG(uname, lname, policy) \
449 : TESTCLEARFLAG(uname, lname, policy)
450 :
451 : #define TESTPAGEFLAG_FALSE(uname, lname) \
452 : static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
453 : static inline int Page##uname(const struct page *page) { return 0; }
454 :
455 : #define SETPAGEFLAG_NOOP(uname, lname) \
456 : static inline void folio_set_##lname(struct folio *folio) { } \
457 : static inline void SetPage##uname(struct page *page) { }
458 :
459 : #define CLEARPAGEFLAG_NOOP(uname, lname) \
460 : static inline void folio_clear_##lname(struct folio *folio) { } \
461 : static inline void ClearPage##uname(struct page *page) { }
462 :
463 : #define __CLEARPAGEFLAG_NOOP(uname, lname) \
464 : static inline void __folio_clear_##lname(struct folio *folio) { } \
465 : static inline void __ClearPage##uname(struct page *page) { }
466 :
467 : #define TESTSETFLAG_FALSE(uname, lname) \
468 : static inline bool folio_test_set_##lname(struct folio *folio) \
469 : { return 0; } \
470 : static inline int TestSetPage##uname(struct page *page) { return 0; }
471 :
472 : #define TESTCLEARFLAG_FALSE(uname, lname) \
473 : static inline bool folio_test_clear_##lname(struct folio *folio) \
474 : { return 0; } \
475 : static inline int TestClearPage##uname(struct page *page) { return 0; }
476 :
477 : #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
478 : SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
479 :
480 : #define TESTSCFLAG_FALSE(uname, lname) \
481 : TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
482 :
483 0 : __PAGEFLAG(Locked, locked, PF_NO_TAIL)
484 0 : PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
485 0 : PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
486 0 : PAGEFLAG(Referenced, referenced, PF_HEAD)
487 0 : TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
488 0 : __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
489 0 : PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
490 : __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
491 0 : PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
492 0 : TESTCLEARFLAG(LRU, lru, PF_HEAD)
493 2022 : PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
494 0 : TESTCLEARFLAG(Active, active, PF_HEAD)
495 0 : PAGEFLAG(Workingset, workingset, PF_HEAD)
496 : TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
497 12008 : __PAGEFLAG(Slab, slab, PF_NO_TAIL)
498 : __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
499 0 : PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
500 :
501 : /* Xen */
502 : PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
503 : TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
504 : PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
505 : PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
506 : PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
507 : TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
508 :
509 0 : PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
510 502096 : __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
511 30170 : __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
512 0 : PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
513 : __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
514 0 : __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
515 :
516 : /*
517 : * Private page markings that may be used by the filesystem that owns the page
518 : * for its own purposes.
519 : * - PG_private and PG_private_2 cause releasepage() and co to be invoked
520 : */
521 0 : PAGEFLAG(Private, private, PF_ANY)
522 0 : PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
523 : PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
524 : TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
525 :
526 : /*
527 : * Only test-and-set exist for PG_writeback. The unconditional operators are
528 : * risky: they bypass page accounting.
529 : */
530 0 : TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
531 0 : TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
532 0 : PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
533 :
534 : /* PG_readahead is only used for reads; PG_reclaim is only for writes */
535 0 : PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
536 : TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
537 0 : PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
538 0 : TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
539 :
540 : #ifdef CONFIG_HIGHMEM
541 : /*
542 : * Must use a macro here due to header dependency issues. page_zone() is not
543 : * available at this point.
544 : */
545 : #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
546 : #else
547 : PAGEFLAG_FALSE(HighMem, highmem)
548 : #endif
549 :
550 : #ifdef CONFIG_SWAP
551 : static __always_inline bool folio_test_swapcache(struct folio *folio)
552 : {
553 0 : return folio_test_swapbacked(folio) &&
554 0 : test_bit(PG_swapcache, folio_flags(folio, 0));
555 : }
556 :
557 : static __always_inline bool PageSwapCache(struct page *page)
558 : {
559 0 : return folio_test_swapcache(page_folio(page));
560 : }
561 :
562 0 : SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
563 0 : CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
564 : #else
565 : PAGEFLAG_FALSE(SwapCache, swapcache)
566 : #endif
567 :
568 0 : PAGEFLAG(Unevictable, unevictable, PF_HEAD)
569 0 : __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
570 0 : TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
571 :
572 : #ifdef CONFIG_MMU
573 0 : PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
574 0 : __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
575 0 : TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
576 : #else
577 : PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
578 : TESTSCFLAG_FALSE(Mlocked, mlocked)
579 : #endif
580 :
581 : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
582 : PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
583 : #else
584 : PAGEFLAG_FALSE(Uncached, uncached)
585 : #endif
586 :
587 : #ifdef CONFIG_MEMORY_FAILURE
588 : PAGEFLAG(HWPoison, hwpoison, PF_ANY)
589 : TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
590 : #define __PG_HWPOISON (1UL << PG_hwpoison)
591 : #define MAGIC_HWPOISON 0x48575053U /* HWPS */
592 : extern void SetPageHWPoisonTakenOff(struct page *page);
593 : extern void ClearPageHWPoisonTakenOff(struct page *page);
594 : extern bool take_page_off_buddy(struct page *page);
595 : extern bool put_page_back_buddy(struct page *page);
596 : #else
597 : PAGEFLAG_FALSE(HWPoison, hwpoison)
598 : #define __PG_HWPOISON 0
599 : #endif
600 :
601 : #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
602 : TESTPAGEFLAG(Young, young, PF_ANY)
603 : SETPAGEFLAG(Young, young, PF_ANY)
604 : TESTCLEARFLAG(Young, young, PF_ANY)
605 : PAGEFLAG(Idle, idle, PF_ANY)
606 : #endif
607 :
608 : #ifdef CONFIG_KASAN_HW_TAGS
609 : PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
610 : #else
611 : PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
612 : #endif
613 :
614 : /*
615 : * PageReported() is used to track reported free pages within the Buddy
616 : * allocator. We can use the non-atomic version of the test and set
617 : * operations as both should be shielded with the zone lock to prevent
618 : * any possible races on the setting or clearing of the bit.
619 : */
620 : __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
621 :
622 : /*
623 : * On an anonymous page mapped into a user virtual memory area,
624 : * page->mapping points to its anon_vma, not to a struct address_space;
625 : * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
626 : *
627 : * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
628 : * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
629 : * bit; and then page->mapping points, not to an anon_vma, but to a private
630 : * structure which KSM associates with that merged page. See ksm.h.
631 : *
632 : * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
633 : * page and then page->mapping points a struct address_space.
634 : *
635 : * Please note that, confusingly, "page_mapping" refers to the inode
636 : * address_space which maps the page from disk; whereas "page_mapped"
637 : * refers to user virtual address space into which the page is mapped.
638 : */
639 : #define PAGE_MAPPING_ANON 0x1
640 : #define PAGE_MAPPING_MOVABLE 0x2
641 : #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
642 : #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
643 :
644 : static __always_inline int PageMappingFlags(struct page *page)
645 : {
646 266 : return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
647 : }
648 :
649 : static __always_inline bool folio_test_anon(struct folio *folio)
650 : {
651 0 : return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
652 : }
653 :
654 : static __always_inline bool PageAnon(struct page *page)
655 : {
656 0 : return folio_test_anon(page_folio(page));
657 : }
658 :
659 : static __always_inline int __PageMovable(struct page *page)
660 : {
661 0 : return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
662 : PAGE_MAPPING_MOVABLE;
663 : }
664 :
665 : #ifdef CONFIG_KSM
666 : /*
667 : * A KSM page is one of those write-protected "shared pages" or "merged pages"
668 : * which KSM maps into multiple mms, wherever identical anonymous page content
669 : * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
670 : * anon_vma, but to that page's node of the stable tree.
671 : */
672 : static __always_inline bool folio_test_ksm(struct folio *folio)
673 : {
674 : return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
675 : PAGE_MAPPING_KSM;
676 : }
677 :
678 : static __always_inline bool PageKsm(struct page *page)
679 : {
680 : return folio_test_ksm(page_folio(page));
681 : }
682 : #else
683 : TESTPAGEFLAG_FALSE(Ksm, ksm)
684 : #endif
685 :
686 : u64 stable_page_flags(struct page *page);
687 :
688 : /**
689 : * folio_test_uptodate - Is this folio up to date?
690 : * @folio: The folio.
691 : *
692 : * The uptodate flag is set on a folio when every byte in the folio is
693 : * at least as new as the corresponding bytes on storage. Anonymous
694 : * and CoW folios are always uptodate. If the folio is not uptodate,
695 : * some of the bytes in it may be; see the is_partially_uptodate()
696 : * address_space operation.
697 : */
698 : static inline bool folio_test_uptodate(struct folio *folio)
699 : {
700 0 : bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
701 : /*
702 : * Must ensure that the data we read out of the folio is loaded
703 : * _after_ we've loaded folio->flags to check the uptodate bit.
704 : * We can skip the barrier if the folio is not uptodate, because
705 : * we wouldn't be reading anything from it.
706 : *
707 : * See folio_mark_uptodate() for the other side of the story.
708 : */
709 0 : if (ret)
710 0 : smp_rmb();
711 :
712 : return ret;
713 : }
714 :
715 : static inline int PageUptodate(struct page *page)
716 : {
717 0 : return folio_test_uptodate(page_folio(page));
718 : }
719 :
720 : static __always_inline void __folio_mark_uptodate(struct folio *folio)
721 : {
722 0 : smp_wmb();
723 0 : __set_bit(PG_uptodate, folio_flags(folio, 0));
724 : }
725 :
726 : static __always_inline void folio_mark_uptodate(struct folio *folio)
727 : {
728 : /*
729 : * Memory barrier must be issued before setting the PG_uptodate bit,
730 : * so that all previous stores issued in order to bring the folio
731 : * uptodate are actually visible before folio_test_uptodate becomes true.
732 : */
733 0 : smp_wmb();
734 0 : set_bit(PG_uptodate, folio_flags(folio, 0));
735 : }
736 :
737 : static __always_inline void __SetPageUptodate(struct page *page)
738 : {
739 0 : __folio_mark_uptodate((struct folio *)page);
740 : }
741 :
742 : static __always_inline void SetPageUptodate(struct page *page)
743 : {
744 0 : folio_mark_uptodate((struct folio *)page);
745 : }
746 :
747 0 : CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
748 :
749 : bool __folio_start_writeback(struct folio *folio, bool keep_write);
750 : bool set_page_writeback(struct page *page);
751 :
752 : #define folio_start_writeback(folio) \
753 : __folio_start_writeback(folio, false)
754 : #define folio_start_writeback_keepwrite(folio) \
755 : __folio_start_writeback(folio, true)
756 :
757 : static inline void set_page_writeback_keepwrite(struct page *page)
758 : {
759 : folio_start_writeback_keepwrite(page_folio(page));
760 : }
761 :
762 : static inline bool test_set_page_writeback(struct page *page)
763 : {
764 : return set_page_writeback(page);
765 : }
766 :
767 : static __always_inline bool folio_test_head(struct folio *folio)
768 : {
769 0 : return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
770 : }
771 :
772 : static __always_inline int PageHead(struct page *page)
773 : {
774 : PF_POISONED_CHECK(page);
775 33 : return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
776 : }
777 :
778 218 : __SETPAGEFLAG(Head, head, PF_ANY)
779 : __CLEARPAGEFLAG(Head, head, PF_ANY)
780 : CLEARPAGEFLAG(Head, head, PF_ANY)
781 :
782 : /**
783 : * folio_test_large() - Does this folio contain more than one page?
784 : * @folio: The folio to test.
785 : *
786 : * Return: True if the folio is larger than one page.
787 : */
788 : static inline bool folio_test_large(struct folio *folio)
789 : {
790 0 : return folio_test_head(folio);
791 : }
792 :
793 : static __always_inline void set_compound_head(struct page *page, struct page *head)
794 : {
795 407 : WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
796 : }
797 :
798 : static __always_inline void clear_compound_head(struct page *page)
799 : {
800 251 : WRITE_ONCE(page->compound_head, 0);
801 : }
802 :
803 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
804 : static inline void ClearPageCompound(struct page *page)
805 : {
806 : BUG_ON(!PageHead(page));
807 : ClearPageHead(page);
808 : }
809 : #endif
810 :
811 : #define PG_head_mask ((1UL << PG_head))
812 :
813 : #ifdef CONFIG_HUGETLB_PAGE
814 : int PageHuge(struct page *page);
815 : int PageHeadHuge(struct page *page);
816 : static inline bool folio_test_hugetlb(struct folio *folio)
817 : {
818 : return PageHeadHuge(&folio->page);
819 : }
820 : #else
821 : TESTPAGEFLAG_FALSE(Huge, hugetlb)
822 : TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
823 : #endif
824 :
825 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
826 : /*
827 : * PageHuge() only returns true for hugetlbfs pages, but not for
828 : * normal or transparent huge pages.
829 : *
830 : * PageTransHuge() returns true for both transparent huge and
831 : * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
832 : * called only in the core VM paths where hugetlbfs pages can't exist.
833 : */
834 : static inline int PageTransHuge(struct page *page)
835 : {
836 : VM_BUG_ON_PAGE(PageTail(page), page);
837 : return PageHead(page);
838 : }
839 :
840 : static inline bool folio_test_transhuge(struct folio *folio)
841 : {
842 : return folio_test_head(folio);
843 : }
844 :
845 : /*
846 : * PageTransCompound returns true for both transparent huge pages
847 : * and hugetlbfs pages, so it should only be called when it's known
848 : * that hugetlbfs pages aren't involved.
849 : */
850 : static inline int PageTransCompound(struct page *page)
851 : {
852 : return PageCompound(page);
853 : }
854 :
855 : /*
856 : * PageTransTail returns true for both transparent huge pages
857 : * and hugetlbfs pages, so it should only be called when it's known
858 : * that hugetlbfs pages aren't involved.
859 : */
860 : static inline int PageTransTail(struct page *page)
861 : {
862 : return PageTail(page);
863 : }
864 :
865 : /*
866 : * PageDoubleMap indicates that the compound page is mapped with PTEs as well
867 : * as PMDs.
868 : *
869 : * This is required for optimization of rmap operations for THP: we can postpone
870 : * per small page mapcount accounting (and its overhead from atomic operations)
871 : * until the first PMD split.
872 : *
873 : * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
874 : * by one. This reference will go away with last compound_mapcount.
875 : *
876 : * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
877 : */
878 : PAGEFLAG(DoubleMap, double_map, PF_SECOND)
879 : TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
880 : #else
881 : TESTPAGEFLAG_FALSE(TransHuge, transhuge)
882 : TESTPAGEFLAG_FALSE(TransCompound, transcompound)
883 : TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
884 : TESTPAGEFLAG_FALSE(TransTail, transtail)
885 : PAGEFLAG_FALSE(DoubleMap, double_map)
886 : TESTSCFLAG_FALSE(DoubleMap, double_map)
887 : #endif
888 :
889 : #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
890 : /*
891 : * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
892 : * compound page.
893 : *
894 : * This flag is set by hwpoison handler. Cleared by THP split or free page.
895 : */
896 : PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
897 : TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
898 : #else
899 : PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
900 : TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
901 : #endif
902 :
903 : /*
904 : * Check if a page is currently marked HWPoisoned. Note that this check is
905 : * best effort only and inherently racy: there is no way to synchronize with
906 : * failing hardware.
907 : */
908 : static inline bool is_page_hwpoison(struct page *page)
909 : {
910 : if (PageHWPoison(page))
911 : return true;
912 : return PageHuge(page) && PageHWPoison(compound_head(page));
913 : }
914 :
915 : /*
916 : * For pages that are never mapped to userspace (and aren't PageSlab),
917 : * page_type may be used. Because it is initialised to -1, we invert the
918 : * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
919 : * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
920 : * low bits so that an underflow or overflow of page_mapcount() won't be
921 : * mistaken for a page type value.
922 : */
923 :
924 : #define PAGE_TYPE_BASE 0xf0000000
925 : /* Reserve 0x0000007f to catch underflows of page_mapcount */
926 : #define PAGE_MAPCOUNT_RESERVE -128
927 : #define PG_buddy 0x00000080
928 : #define PG_offline 0x00000100
929 : #define PG_table 0x00000200
930 : #define PG_guard 0x00000400
931 :
932 : #define PageType(page, flag) \
933 : ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
934 :
935 : static inline int page_has_type(struct page *page)
936 : {
937 0 : return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
938 : }
939 :
940 : #define PAGE_TYPE_OPS(uname, lname) \
941 : static __always_inline int Page##uname(struct page *page) \
942 : { \
943 : return PageType(page, PG_##lname); \
944 : } \
945 : static __always_inline void __SetPage##uname(struct page *page) \
946 : { \
947 : VM_BUG_ON_PAGE(!PageType(page, 0), page); \
948 : page->page_type &= ~PG_##lname; \
949 : } \
950 : static __always_inline void __ClearPage##uname(struct page *page) \
951 : { \
952 : VM_BUG_ON_PAGE(!Page##uname(page), page); \
953 : page->page_type |= PG_##lname; \
954 : }
955 :
956 : /*
957 : * PageBuddy() indicates that the page is free and in the buddy system
958 : * (see mm/page_alloc.c).
959 : */
960 1696 : PAGE_TYPE_OPS(Buddy, buddy)
961 :
962 : /*
963 : * PageOffline() indicates that the page is logically offline although the
964 : * containing section is online. (e.g. inflated in a balloon driver or
965 : * not onlined when onlining the section).
966 : * The content of these pages is effectively stale. Such pages should not
967 : * be touched (read/write/dump/save) except by their owner.
968 : *
969 : * If a driver wants to allow to offline unmovable PageOffline() pages without
970 : * putting them back to the buddy, it can do so via the memory notifier by
971 : * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
972 : * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
973 : * pages (now with a reference count of zero) are treated like free pages,
974 : * allowing the containing memory block to get offlined. A driver that
975 : * relies on this feature is aware that re-onlining the memory block will
976 : * require to re-set the pages PageOffline() and not giving them to the
977 : * buddy via online_page_callback_t.
978 : *
979 : * There are drivers that mark a page PageOffline() and expect there won't be
980 : * any further access to page content. PFN walkers that read content of random
981 : * pages should check PageOffline() and synchronize with such drivers using
982 : * page_offline_freeze()/page_offline_thaw().
983 : */
984 0 : PAGE_TYPE_OPS(Offline, offline)
985 :
986 : extern void page_offline_freeze(void);
987 : extern void page_offline_thaw(void);
988 : extern void page_offline_begin(void);
989 : extern void page_offline_end(void);
990 :
991 : /*
992 : * Marks pages in use as page tables.
993 : */
994 1 : PAGE_TYPE_OPS(Table, table)
995 :
996 : /*
997 : * Marks guardpages used with debug_pagealloc.
998 : */
999 : PAGE_TYPE_OPS(Guard, guard)
1000 :
1001 : extern bool is_free_buddy_page(struct page *page);
1002 :
1003 0 : PAGEFLAG(Isolated, isolated, PF_ANY);
1004 :
1005 : #ifdef CONFIG_MMU
1006 : #define __PG_MLOCKED (1UL << PG_mlocked)
1007 : #else
1008 : #define __PG_MLOCKED 0
1009 : #endif
1010 :
1011 : /*
1012 : * Flags checked when a page is freed. Pages being freed should not have
1013 : * these flags set. If they are, there is a problem.
1014 : */
1015 : #define PAGE_FLAGS_CHECK_AT_FREE \
1016 : (1UL << PG_lru | 1UL << PG_locked | \
1017 : 1UL << PG_private | 1UL << PG_private_2 | \
1018 : 1UL << PG_writeback | 1UL << PG_reserved | \
1019 : 1UL << PG_slab | 1UL << PG_active | \
1020 : 1UL << PG_unevictable | __PG_MLOCKED)
1021 :
1022 : /*
1023 : * Flags checked when a page is prepped for return by the page allocator.
1024 : * Pages being prepped should not have these flags set. If they are set,
1025 : * there has been a kernel bug or struct page corruption.
1026 : *
1027 : * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1028 : * alloc-free cycle to prevent from reusing the page.
1029 : */
1030 : #define PAGE_FLAGS_CHECK_AT_PREP \
1031 : (PAGEFLAGS_MASK & ~__PG_HWPOISON)
1032 :
1033 : #define PAGE_FLAGS_PRIVATE \
1034 : (1UL << PG_private | 1UL << PG_private_2)
1035 : /**
1036 : * page_has_private - Determine if page has private stuff
1037 : * @page: The page to be checked
1038 : *
1039 : * Determine if a page has private stuff, indicating that release routines
1040 : * should be invoked upon it.
1041 : */
1042 : static inline int page_has_private(struct page *page)
1043 : {
1044 0 : return !!(page->flags & PAGE_FLAGS_PRIVATE);
1045 : }
1046 :
1047 : static inline bool folio_has_private(struct folio *folio)
1048 : {
1049 0 : return page_has_private(&folio->page);
1050 : }
1051 :
1052 : #undef PF_ANY
1053 : #undef PF_HEAD
1054 : #undef PF_ONLY_HEAD
1055 : #undef PF_NO_TAIL
1056 : #undef PF_NO_COMPOUND
1057 : #undef PF_SECOND
1058 : #endif /* !__GENERATING_BOUNDS_H */
1059 :
1060 : #endif /* PAGE_FLAGS_H */
|