Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Macros for manipulating and testing page->flags
4 */
5
6#ifndef PAGE_FLAGS_H
7#define PAGE_FLAGS_H
8
9#include <linux/types.h>
10#include <linux/bug.h>
11#include <linux/mmdebug.h>
12#ifndef __GENERATING_BOUNDS_H
13#include <linux/mm_types.h>
14#include <generated/bounds.h>
15#endif /* !__GENERATING_BOUNDS_H */
16
17/*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
51 *
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
55 *
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
65 *
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72 * file-backed pagecache (see mm/vmscan.c).
73 *
74 * PG_error is set to indicate that an I/O error occurred on this page.
75 *
76 * PG_arch_1 is an architecture specific page state bit. The generic code
77 * guarantees that this bit is cleared for a page when it first is entered into
78 * the page cache.
79 *
80 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * data with incorrect ECC bits that triggered a machine check. Accessing is
82 * not safe since it may cause another machine check. Don't touch!
83 */
84
85/*
86 * Don't use the pageflags directly. Use the PageFoo macros.
87 *
88 * The page flags field is split into two parts, the main flags area
89 * which extends from the low bits upwards, and the fields area which
90 * extends from the high bits downwards.
91 *
92 * | FIELD | ... | FLAGS |
93 * N-1 ^ 0
94 * (NR_PAGEFLAGS)
95 *
96 * The fields area is reserved for fields mapping zone, node (for NUMA) and
97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99 */
100enum pageflags {
101 PG_locked, /* Page is locked. Don't touch. */
102 PG_referenced,
103 PG_uptodate,
104 PG_dirty,
105 PG_lru,
106 PG_active,
107 PG_workingset,
108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109 PG_error,
110 PG_slab,
111 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
112 PG_arch_1,
113 PG_reserved,
114 PG_private, /* If pagecache, has fs-private data */
115 PG_private_2, /* If pagecache, has fs aux data */
116 PG_writeback, /* Page is under writeback */
117 PG_head, /* A head page */
118 PG_mappedtodisk, /* Has blocks allocated on-disk */
119 PG_reclaim, /* To be reclaimed asap */
120 PG_swapbacked, /* Page is backed by RAM/swap */
121 PG_unevictable, /* Page is "unevictable" */
122#ifdef CONFIG_MMU
123 PG_mlocked, /* Page is vma mlocked */
124#endif
125#ifdef CONFIG_ARCH_USES_PG_UNCACHED
126 PG_uncached, /* Page has been mapped as uncached */
127#endif
128#ifdef CONFIG_MEMORY_FAILURE
129 PG_hwpoison, /* hardware poisoned page. Don't touch */
130#endif
131#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
132 PG_young,
133 PG_idle,
134#endif
135#ifdef CONFIG_ARCH_USES_PG_ARCH_X
136 PG_arch_2,
137 PG_arch_3,
138#endif
139#ifdef CONFIG_KASAN_HW_TAGS
140 PG_skip_kasan_poison,
141#endif
142 __NR_PAGEFLAGS,
143
144 PG_readahead = PG_reclaim,
145
146 /*
147 * Depending on the way an anonymous folio can be mapped into a page
148 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
149 * THP), PG_anon_exclusive may be set only for the head page or for
150 * tail pages of an anonymous folio. For now, we only expect it to be
151 * set on tail pages for PTE-mapped THP.
152 */
153 PG_anon_exclusive = PG_mappedtodisk,
154
155 /* Filesystems */
156 PG_checked = PG_owner_priv_1,
157
158 /* SwapBacked */
159 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
160
161 /* Two page bits are conscripted by FS-Cache to maintain local caching
162 * state. These bits are set on pages belonging to the netfs's inodes
163 * when those inodes are being locally cached.
164 */
165 PG_fscache = PG_private_2, /* page backed by cache */
166
167 /* XEN */
168 /* Pinned in Xen as a read-only pagetable page. */
169 PG_pinned = PG_owner_priv_1,
170 /* Pinned as part of domain save (see xen_mm_pin_all()). */
171 PG_savepinned = PG_dirty,
172 /* Has a grant mapping of another (foreign) domain's page. */
173 PG_foreign = PG_owner_priv_1,
174 /* Remapped by swiotlb-xen. */
175 PG_xen_remapped = PG_owner_priv_1,
176
177 /* SLOB */
178 PG_slob_free = PG_private,
179
180#ifdef CONFIG_MEMORY_FAILURE
181 /*
182 * Compound pages. Stored in first tail page's flags.
183 * Indicates that at least one subpage is hwpoisoned in the
184 * THP.
185 */
186 PG_has_hwpoisoned = PG_error,
187#endif
188
189 /* non-lru isolated movable page */
190 PG_isolated = PG_reclaim,
191
192 /* Only valid for buddy pages. Used to track pages that are reported */
193 PG_reported = PG_uptodate,
194
195#ifdef CONFIG_MEMORY_HOTPLUG
196 /* For self-hosted memmap pages */
197 PG_vmemmap_self_hosted = PG_owner_priv_1,
198#endif
199};
200
201#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
202
203#ifndef __GENERATING_BOUNDS_H
204
205#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
206DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
207
208/*
209 * Return the real head page struct iff the @page is a fake head page, otherwise
210 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
211 */
212static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
213{
214 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
215 return page;
216
217 /*
218 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
219 * struct page. The alignment check aims to avoid access the fields (
220 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
221 * cold cacheline in some cases.
222 */
223 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
224 test_bit(PG_head, &page->flags)) {
225 /*
226 * We can safely access the field of the @page[1] with PG_head
227 * because the @page is a compound page composed with at least
228 * two contiguous pages.
229 */
230 unsigned long head = READ_ONCE(page[1].compound_head);
231
232 if (likely(head & 1))
233 return (const struct page *)(head - 1);
234 }
235 return page;
236}
237#else
238static inline const struct page *page_fixed_fake_head(const struct page *page)
239{
240 return page;
241}
242#endif
243
244static __always_inline int page_is_fake_head(struct page *page)
245{
246 return page_fixed_fake_head(page) != page;
247}
248
249static inline unsigned long _compound_head(const struct page *page)
250{
251 unsigned long head = READ_ONCE(page->compound_head);
252
253 if (unlikely(head & 1))
254 return head - 1;
255 return (unsigned long)page_fixed_fake_head(page);
256}
257
258#define compound_head(page) ((typeof(page))_compound_head(page))
259
260/**
261 * page_folio - Converts from page to folio.
262 * @p: The page.
263 *
264 * Every page is part of a folio. This function cannot be called on a
265 * NULL pointer.
266 *
267 * Context: No reference, nor lock is required on @page. If the caller
268 * does not hold a reference, this call may race with a folio split, so
269 * it should re-check the folio still contains this page after gaining
270 * a reference on the folio.
271 * Return: The folio which contains this page.
272 */
273#define page_folio(p) (_Generic((p), \
274 const struct page *: (const struct folio *)_compound_head(p), \
275 struct page *: (struct folio *)_compound_head(p)))
276
277/**
278 * folio_page - Return a page from a folio.
279 * @folio: The folio.
280 * @n: The page number to return.
281 *
282 * @n is relative to the start of the folio. This function does not
283 * check that the page number lies within @folio; the caller is presumed
284 * to have a reference to the page.
285 */
286#define folio_page(folio, n) nth_page(&(folio)->page, n)
287
288static __always_inline int PageTail(struct page *page)
289{
290 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
291}
292
293static __always_inline int PageCompound(struct page *page)
294{
295 return test_bit(PG_head, &page->flags) ||
296 READ_ONCE(page->compound_head) & 1;
297}
298
299#define PAGE_POISON_PATTERN -1l
300static inline int PagePoisoned(const struct page *page)
301{
302 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
303}
304
305#ifdef CONFIG_DEBUG_VM
306void page_init_poison(struct page *page, size_t size);
307#else
308static inline void page_init_poison(struct page *page, size_t size)
309{
310}
311#endif
312
313static unsigned long *folio_flags(struct folio *folio, unsigned n)
314{
315 struct page *page = &folio->page;
316
317 VM_BUG_ON_PGFLAGS(PageTail(page), page);
318 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
319 return &page[n].flags;
320}
321
322/*
323 * Page flags policies wrt compound pages
324 *
325 * PF_POISONED_CHECK
326 * check if this struct page poisoned/uninitialized
327 *
328 * PF_ANY:
329 * the page flag is relevant for small, head and tail pages.
330 *
331 * PF_HEAD:
332 * for compound page all operations related to the page flag applied to
333 * head page.
334 *
335 * PF_ONLY_HEAD:
336 * for compound page, callers only ever operate on the head page.
337 *
338 * PF_NO_TAIL:
339 * modifications of the page flag must be done on small or head pages,
340 * checks can be done on tail pages too.
341 *
342 * PF_NO_COMPOUND:
343 * the page flag is not relevant for compound pages.
344 *
345 * PF_SECOND:
346 * the page flag is stored in the first tail page.
347 */
348#define PF_POISONED_CHECK(page) ({ \
349 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
350 page; })
351#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
352#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
353#define PF_ONLY_HEAD(page, enforce) ({ \
354 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
355 PF_POISONED_CHECK(page); })
356#define PF_NO_TAIL(page, enforce) ({ \
357 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
358 PF_POISONED_CHECK(compound_head(page)); })
359#define PF_NO_COMPOUND(page, enforce) ({ \
360 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
361 PF_POISONED_CHECK(page); })
362#define PF_SECOND(page, enforce) ({ \
363 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
364 PF_POISONED_CHECK(&page[1]); })
365
366/* Which page is the flag stored in */
367#define FOLIO_PF_ANY 0
368#define FOLIO_PF_HEAD 0
369#define FOLIO_PF_ONLY_HEAD 0
370#define FOLIO_PF_NO_TAIL 0
371#define FOLIO_PF_NO_COMPOUND 0
372#define FOLIO_PF_SECOND 1
373
374/*
375 * Macros to create function definitions for page flags
376 */
377#define TESTPAGEFLAG(uname, lname, policy) \
378static __always_inline bool folio_test_##lname(struct folio *folio) \
379{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
380static __always_inline int Page##uname(struct page *page) \
381{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
382
383#define SETPAGEFLAG(uname, lname, policy) \
384static __always_inline \
385void folio_set_##lname(struct folio *folio) \
386{ set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
387static __always_inline void SetPage##uname(struct page *page) \
388{ set_bit(PG_##lname, &policy(page, 1)->flags); }
389
390#define CLEARPAGEFLAG(uname, lname, policy) \
391static __always_inline \
392void folio_clear_##lname(struct folio *folio) \
393{ clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
394static __always_inline void ClearPage##uname(struct page *page) \
395{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
396
397#define __SETPAGEFLAG(uname, lname, policy) \
398static __always_inline \
399void __folio_set_##lname(struct folio *folio) \
400{ __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
401static __always_inline void __SetPage##uname(struct page *page) \
402{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
403
404#define __CLEARPAGEFLAG(uname, lname, policy) \
405static __always_inline \
406void __folio_clear_##lname(struct folio *folio) \
407{ __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
408static __always_inline void __ClearPage##uname(struct page *page) \
409{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
410
411#define TESTSETFLAG(uname, lname, policy) \
412static __always_inline \
413bool folio_test_set_##lname(struct folio *folio) \
414{ return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
415static __always_inline int TestSetPage##uname(struct page *page) \
416{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
417
418#define TESTCLEARFLAG(uname, lname, policy) \
419static __always_inline \
420bool folio_test_clear_##lname(struct folio *folio) \
421{ return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
422static __always_inline int TestClearPage##uname(struct page *page) \
423{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
424
425#define PAGEFLAG(uname, lname, policy) \
426 TESTPAGEFLAG(uname, lname, policy) \
427 SETPAGEFLAG(uname, lname, policy) \
428 CLEARPAGEFLAG(uname, lname, policy)
429
430#define __PAGEFLAG(uname, lname, policy) \
431 TESTPAGEFLAG(uname, lname, policy) \
432 __SETPAGEFLAG(uname, lname, policy) \
433 __CLEARPAGEFLAG(uname, lname, policy)
434
435#define TESTSCFLAG(uname, lname, policy) \
436 TESTSETFLAG(uname, lname, policy) \
437 TESTCLEARFLAG(uname, lname, policy)
438
439#define TESTPAGEFLAG_FALSE(uname, lname) \
440static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
441static inline int Page##uname(const struct page *page) { return 0; }
442
443#define SETPAGEFLAG_NOOP(uname, lname) \
444static inline void folio_set_##lname(struct folio *folio) { } \
445static inline void SetPage##uname(struct page *page) { }
446
447#define CLEARPAGEFLAG_NOOP(uname, lname) \
448static inline void folio_clear_##lname(struct folio *folio) { } \
449static inline void ClearPage##uname(struct page *page) { }
450
451#define __CLEARPAGEFLAG_NOOP(uname, lname) \
452static inline void __folio_clear_##lname(struct folio *folio) { } \
453static inline void __ClearPage##uname(struct page *page) { }
454
455#define TESTSETFLAG_FALSE(uname, lname) \
456static inline bool folio_test_set_##lname(struct folio *folio) \
457{ return 0; } \
458static inline int TestSetPage##uname(struct page *page) { return 0; }
459
460#define TESTCLEARFLAG_FALSE(uname, lname) \
461static inline bool folio_test_clear_##lname(struct folio *folio) \
462{ return 0; } \
463static inline int TestClearPage##uname(struct page *page) { return 0; }
464
465#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
466 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
467
468#define TESTSCFLAG_FALSE(uname, lname) \
469 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
470
471__PAGEFLAG(Locked, locked, PF_NO_TAIL)
472PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
473PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
474PAGEFLAG(Referenced, referenced, PF_HEAD)
475 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
476 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
477PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
478 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
479PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
480 TESTCLEARFLAG(LRU, lru, PF_HEAD)
481PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
482 TESTCLEARFLAG(Active, active, PF_HEAD)
483PAGEFLAG(Workingset, workingset, PF_HEAD)
484 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
485__PAGEFLAG(Slab, slab, PF_NO_TAIL)
486__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
487PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
488
489/* Xen */
490PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
491 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
492PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
493PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
494PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
495 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
496
497PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
498 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
499 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
500PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
501 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
502 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
503
504/*
505 * Private page markings that may be used by the filesystem that owns the page
506 * for its own purposes.
507 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
508 */
509PAGEFLAG(Private, private, PF_ANY)
510PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
511PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
512 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
513
514/*
515 * Only test-and-set exist for PG_writeback. The unconditional operators are
516 * risky: they bypass page accounting.
517 */
518TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
519 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
520PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
521
522/* PG_readahead is only used for reads; PG_reclaim is only for writes */
523PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
524 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
525PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
526 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
527
528#ifdef CONFIG_HIGHMEM
529/*
530 * Must use a macro here due to header dependency issues. page_zone() is not
531 * available at this point.
532 */
533#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
534#else
535PAGEFLAG_FALSE(HighMem, highmem)
536#endif
537
538#ifdef CONFIG_SWAP
539static __always_inline bool folio_test_swapcache(struct folio *folio)
540{
541 return folio_test_swapbacked(folio) &&
542 test_bit(PG_swapcache, folio_flags(folio, 0));
543}
544
545static __always_inline bool PageSwapCache(struct page *page)
546{
547 return folio_test_swapcache(page_folio(page));
548}
549
550SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
551CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
552#else
553PAGEFLAG_FALSE(SwapCache, swapcache)
554#endif
555
556PAGEFLAG(Unevictable, unevictable, PF_HEAD)
557 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
558 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
559
560#ifdef CONFIG_MMU
561PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
562 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
563 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
564#else
565PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
566 TESTSCFLAG_FALSE(Mlocked, mlocked)
567#endif
568
569#ifdef CONFIG_ARCH_USES_PG_UNCACHED
570PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
571#else
572PAGEFLAG_FALSE(Uncached, uncached)
573#endif
574
575#ifdef CONFIG_MEMORY_FAILURE
576PAGEFLAG(HWPoison, hwpoison, PF_ANY)
577TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
578#define __PG_HWPOISON (1UL << PG_hwpoison)
579#define MAGIC_HWPOISON 0x48575053U /* HWPS */
580extern void SetPageHWPoisonTakenOff(struct page *page);
581extern void ClearPageHWPoisonTakenOff(struct page *page);
582extern bool take_page_off_buddy(struct page *page);
583extern bool put_page_back_buddy(struct page *page);
584#else
585PAGEFLAG_FALSE(HWPoison, hwpoison)
586#define __PG_HWPOISON 0
587#endif
588
589#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
590TESTPAGEFLAG(Young, young, PF_ANY)
591SETPAGEFLAG(Young, young, PF_ANY)
592TESTCLEARFLAG(Young, young, PF_ANY)
593PAGEFLAG(Idle, idle, PF_ANY)
594#endif
595
596#ifdef CONFIG_KASAN_HW_TAGS
597PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
598#else
599PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
600#endif
601
602/*
603 * PageReported() is used to track reported free pages within the Buddy
604 * allocator. We can use the non-atomic version of the test and set
605 * operations as both should be shielded with the zone lock to prevent
606 * any possible races on the setting or clearing of the bit.
607 */
608__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
609
610#ifdef CONFIG_MEMORY_HOTPLUG
611PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
612#else
613PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
614#endif
615
616/*
617 * On an anonymous page mapped into a user virtual memory area,
618 * page->mapping points to its anon_vma, not to a struct address_space;
619 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
620 *
621 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
622 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
623 * bit; and then page->mapping points, not to an anon_vma, but to a private
624 * structure which KSM associates with that merged page. See ksm.h.
625 *
626 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
627 * page and then page->mapping points to a struct movable_operations.
628 *
629 * Please note that, confusingly, "page_mapping" refers to the inode
630 * address_space which maps the page from disk; whereas "page_mapped"
631 * refers to user virtual address space into which the page is mapped.
632 */
633#define PAGE_MAPPING_ANON 0x1
634#define PAGE_MAPPING_MOVABLE 0x2
635#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
636#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
637
638/*
639 * Different with flags above, this flag is used only for fsdax mode. It
640 * indicates that this page->mapping is now under reflink case.
641 */
642#define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
643
644static __always_inline bool folio_mapping_flags(struct folio *folio)
645{
646 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
647}
648
649static __always_inline int PageMappingFlags(struct page *page)
650{
651 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
652}
653
654static __always_inline bool folio_test_anon(struct folio *folio)
655{
656 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
657}
658
659static __always_inline bool PageAnon(struct page *page)
660{
661 return folio_test_anon(page_folio(page));
662}
663
664static __always_inline bool __folio_test_movable(const struct folio *folio)
665{
666 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
667 PAGE_MAPPING_MOVABLE;
668}
669
670static __always_inline int __PageMovable(struct page *page)
671{
672 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
673 PAGE_MAPPING_MOVABLE;
674}
675
676#ifdef CONFIG_KSM
677/*
678 * A KSM page is one of those write-protected "shared pages" or "merged pages"
679 * which KSM maps into multiple mms, wherever identical anonymous page content
680 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
681 * anon_vma, but to that page's node of the stable tree.
682 */
683static __always_inline bool folio_test_ksm(struct folio *folio)
684{
685 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
686 PAGE_MAPPING_KSM;
687}
688
689static __always_inline bool PageKsm(struct page *page)
690{
691 return folio_test_ksm(page_folio(page));
692}
693#else
694TESTPAGEFLAG_FALSE(Ksm, ksm)
695#endif
696
697u64 stable_page_flags(struct page *page);
698
699/**
700 * folio_test_uptodate - Is this folio up to date?
701 * @folio: The folio.
702 *
703 * The uptodate flag is set on a folio when every byte in the folio is
704 * at least as new as the corresponding bytes on storage. Anonymous
705 * and CoW folios are always uptodate. If the folio is not uptodate,
706 * some of the bytes in it may be; see the is_partially_uptodate()
707 * address_space operation.
708 */
709static inline bool folio_test_uptodate(struct folio *folio)
710{
711 bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
712 /*
713 * Must ensure that the data we read out of the folio is loaded
714 * _after_ we've loaded folio->flags to check the uptodate bit.
715 * We can skip the barrier if the folio is not uptodate, because
716 * we wouldn't be reading anything from it.
717 *
718 * See folio_mark_uptodate() for the other side of the story.
719 */
720 if (ret)
721 smp_rmb();
722
723 return ret;
724}
725
726static inline int PageUptodate(struct page *page)
727{
728 return folio_test_uptodate(page_folio(page));
729}
730
731static __always_inline void __folio_mark_uptodate(struct folio *folio)
732{
733 smp_wmb();
734 __set_bit(PG_uptodate, folio_flags(folio, 0));
735}
736
737static __always_inline void folio_mark_uptodate(struct folio *folio)
738{
739 /*
740 * Memory barrier must be issued before setting the PG_uptodate bit,
741 * so that all previous stores issued in order to bring the folio
742 * uptodate are actually visible before folio_test_uptodate becomes true.
743 */
744 smp_wmb();
745 set_bit(PG_uptodate, folio_flags(folio, 0));
746}
747
748static __always_inline void __SetPageUptodate(struct page *page)
749{
750 __folio_mark_uptodate((struct folio *)page);
751}
752
753static __always_inline void SetPageUptodate(struct page *page)
754{
755 folio_mark_uptodate((struct folio *)page);
756}
757
758CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
759
760bool __folio_start_writeback(struct folio *folio, bool keep_write);
761bool set_page_writeback(struct page *page);
762
763#define folio_start_writeback(folio) \
764 __folio_start_writeback(folio, false)
765#define folio_start_writeback_keepwrite(folio) \
766 __folio_start_writeback(folio, true)
767
768static inline void set_page_writeback_keepwrite(struct page *page)
769{
770 folio_start_writeback_keepwrite(page_folio(page));
771}
772
773static inline bool test_set_page_writeback(struct page *page)
774{
775 return set_page_writeback(page);
776}
777
778static __always_inline bool folio_test_head(struct folio *folio)
779{
780 return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
781}
782
783static __always_inline int PageHead(struct page *page)
784{
785 PF_POISONED_CHECK(page);
786 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
787}
788
789__SETPAGEFLAG(Head, head, PF_ANY)
790__CLEARPAGEFLAG(Head, head, PF_ANY)
791CLEARPAGEFLAG(Head, head, PF_ANY)
792
793/**
794 * folio_test_large() - Does this folio contain more than one page?
795 * @folio: The folio to test.
796 *
797 * Return: True if the folio is larger than one page.
798 */
799static inline bool folio_test_large(struct folio *folio)
800{
801 return folio_test_head(folio);
802}
803
804static __always_inline void set_compound_head(struct page *page, struct page *head)
805{
806 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
807}
808
809static __always_inline void clear_compound_head(struct page *page)
810{
811 WRITE_ONCE(page->compound_head, 0);
812}
813
814#ifdef CONFIG_TRANSPARENT_HUGEPAGE
815static inline void ClearPageCompound(struct page *page)
816{
817 BUG_ON(!PageHead(page));
818 ClearPageHead(page);
819}
820#endif
821
822#define PG_head_mask ((1UL << PG_head))
823
824#ifdef CONFIG_HUGETLB_PAGE
825int PageHuge(struct page *page);
826int PageHeadHuge(struct page *page);
827static inline bool folio_test_hugetlb(struct folio *folio)
828{
829 return PageHeadHuge(&folio->page);
830}
831#else
832TESTPAGEFLAG_FALSE(Huge, hugetlb)
833TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
834#endif
835
836#ifdef CONFIG_TRANSPARENT_HUGEPAGE
837/*
838 * PageHuge() only returns true for hugetlbfs pages, but not for
839 * normal or transparent huge pages.
840 *
841 * PageTransHuge() returns true for both transparent huge and
842 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
843 * called only in the core VM paths where hugetlbfs pages can't exist.
844 */
845static inline int PageTransHuge(struct page *page)
846{
847 VM_BUG_ON_PAGE(PageTail(page), page);
848 return PageHead(page);
849}
850
851static inline bool folio_test_transhuge(struct folio *folio)
852{
853 return folio_test_head(folio);
854}
855
856/*
857 * PageTransCompound returns true for both transparent huge pages
858 * and hugetlbfs pages, so it should only be called when it's known
859 * that hugetlbfs pages aren't involved.
860 */
861static inline int PageTransCompound(struct page *page)
862{
863 return PageCompound(page);
864}
865
866/*
867 * PageTransTail returns true for both transparent huge pages
868 * and hugetlbfs pages, so it should only be called when it's known
869 * that hugetlbfs pages aren't involved.
870 */
871static inline int PageTransTail(struct page *page)
872{
873 return PageTail(page);
874}
875#else
876TESTPAGEFLAG_FALSE(TransHuge, transhuge)
877TESTPAGEFLAG_FALSE(TransCompound, transcompound)
878TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
879TESTPAGEFLAG_FALSE(TransTail, transtail)
880#endif
881
882#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
883/*
884 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
885 * compound page.
886 *
887 * This flag is set by hwpoison handler. Cleared by THP split or free page.
888 */
889PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
890 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
891#else
892PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
893 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
894#endif
895
896/*
897 * Check if a page is currently marked HWPoisoned. Note that this check is
898 * best effort only and inherently racy: there is no way to synchronize with
899 * failing hardware.
900 */
901static inline bool is_page_hwpoison(struct page *page)
902{
903 if (PageHWPoison(page))
904 return true;
905 return PageHuge(page) && PageHWPoison(compound_head(page));
906}
907
908/*
909 * For pages that are never mapped to userspace (and aren't PageSlab),
910 * page_type may be used. Because it is initialised to -1, we invert the
911 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
912 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
913 * low bits so that an underflow or overflow of page_mapcount() won't be
914 * mistaken for a page type value.
915 */
916
917#define PAGE_TYPE_BASE 0xf0000000
918/* Reserve 0x0000007f to catch underflows of page_mapcount */
919#define PAGE_MAPCOUNT_RESERVE -128
920#define PG_buddy 0x00000080
921#define PG_offline 0x00000100
922#define PG_table 0x00000200
923#define PG_guard 0x00000400
924
925#define PageType(page, flag) \
926 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
927
928static inline int page_has_type(struct page *page)
929{
930 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
931}
932
933#define PAGE_TYPE_OPS(uname, lname) \
934static __always_inline int Page##uname(struct page *page) \
935{ \
936 return PageType(page, PG_##lname); \
937} \
938static __always_inline void __SetPage##uname(struct page *page) \
939{ \
940 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
941 page->page_type &= ~PG_##lname; \
942} \
943static __always_inline void __ClearPage##uname(struct page *page) \
944{ \
945 VM_BUG_ON_PAGE(!Page##uname(page), page); \
946 page->page_type |= PG_##lname; \
947}
948
949/*
950 * PageBuddy() indicates that the page is free and in the buddy system
951 * (see mm/page_alloc.c).
952 */
953PAGE_TYPE_OPS(Buddy, buddy)
954
955/*
956 * PageOffline() indicates that the page is logically offline although the
957 * containing section is online. (e.g. inflated in a balloon driver or
958 * not onlined when onlining the section).
959 * The content of these pages is effectively stale. Such pages should not
960 * be touched (read/write/dump/save) except by their owner.
961 *
962 * If a driver wants to allow to offline unmovable PageOffline() pages without
963 * putting them back to the buddy, it can do so via the memory notifier by
964 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
965 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
966 * pages (now with a reference count of zero) are treated like free pages,
967 * allowing the containing memory block to get offlined. A driver that
968 * relies on this feature is aware that re-onlining the memory block will
969 * require to re-set the pages PageOffline() and not giving them to the
970 * buddy via online_page_callback_t.
971 *
972 * There are drivers that mark a page PageOffline() and expect there won't be
973 * any further access to page content. PFN walkers that read content of random
974 * pages should check PageOffline() and synchronize with such drivers using
975 * page_offline_freeze()/page_offline_thaw().
976 */
977PAGE_TYPE_OPS(Offline, offline)
978
979extern void page_offline_freeze(void);
980extern void page_offline_thaw(void);
981extern void page_offline_begin(void);
982extern void page_offline_end(void);
983
984/*
985 * Marks pages in use as page tables.
986 */
987PAGE_TYPE_OPS(Table, table)
988
989/*
990 * Marks guardpages used with debug_pagealloc.
991 */
992PAGE_TYPE_OPS(Guard, guard)
993
994extern bool is_free_buddy_page(struct page *page);
995
996PAGEFLAG(Isolated, isolated, PF_ANY);
997
998static __always_inline int PageAnonExclusive(struct page *page)
999{
1000 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1001 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1002 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1003}
1004
1005static __always_inline void SetPageAnonExclusive(struct page *page)
1006{
1007 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1008 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1009 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1010}
1011
1012static __always_inline void ClearPageAnonExclusive(struct page *page)
1013{
1014 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1015 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1016 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1017}
1018
1019static __always_inline void __ClearPageAnonExclusive(struct page *page)
1020{
1021 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1022 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1023 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1024}
1025
1026#ifdef CONFIG_MMU
1027#define __PG_MLOCKED (1UL << PG_mlocked)
1028#else
1029#define __PG_MLOCKED 0
1030#endif
1031
1032/*
1033 * Flags checked when a page is freed. Pages being freed should not have
1034 * these flags set. If they are, there is a problem.
1035 */
1036#define PAGE_FLAGS_CHECK_AT_FREE \
1037 (1UL << PG_lru | 1UL << PG_locked | \
1038 1UL << PG_private | 1UL << PG_private_2 | \
1039 1UL << PG_writeback | 1UL << PG_reserved | \
1040 1UL << PG_slab | 1UL << PG_active | \
1041 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
1042
1043/*
1044 * Flags checked when a page is prepped for return by the page allocator.
1045 * Pages being prepped should not have these flags set. If they are set,
1046 * there has been a kernel bug or struct page corruption.
1047 *
1048 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1049 * alloc-free cycle to prevent from reusing the page.
1050 */
1051#define PAGE_FLAGS_CHECK_AT_PREP \
1052 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1053
1054#define PAGE_FLAGS_PRIVATE \
1055 (1UL << PG_private | 1UL << PG_private_2)
1056/**
1057 * page_has_private - Determine if page has private stuff
1058 * @page: The page to be checked
1059 *
1060 * Determine if a page has private stuff, indicating that release routines
1061 * should be invoked upon it.
1062 */
1063static inline int page_has_private(struct page *page)
1064{
1065 return !!(page->flags & PAGE_FLAGS_PRIVATE);
1066}
1067
1068static inline bool folio_has_private(struct folio *folio)
1069{
1070 return page_has_private(&folio->page);
1071}
1072
1073#undef PF_ANY
1074#undef PF_HEAD
1075#undef PF_ONLY_HEAD
1076#undef PF_NO_TAIL
1077#undef PF_NO_COMPOUND
1078#undef PF_SECOND
1079#endif /* !__GENERATING_BOUNDS_H */
1080
1081#endif /* PAGE_FLAGS_H */