at v6.6 37 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Macros for manipulating and testing page->flags 4 */ 5 6#ifndef PAGE_FLAGS_H 7#define PAGE_FLAGS_H 8 9#include <linux/types.h> 10#include <linux/bug.h> 11#include <linux/mmdebug.h> 12#ifndef __GENERATING_BOUNDS_H 13#include <linux/mm_types.h> 14#include <generated/bounds.h> 15#endif /* !__GENERATING_BOUNDS_H */ 16 17/* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 34 * they were excluded via the online_page_callback() or because they are 35 * PG_hwpoison. 36 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 37 * control pages, vmcoreinfo) 38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 39 * not marked PG_reserved (as they might be in use by somebody else who does 40 * not respect the caching strategy). 41 * - Pages part of an offline section (struct pages of offline sections should 42 * not be trusted as they will be initialized when first onlined). 43 * - MCA pages on ia64 44 * - Pages holding CPU notes for POWER Firmware Assisted Dump 45 * - Device memory (e.g. PMEM, DAX, HMM) 46 * Some PG_reserved pages will be excluded from the hibernation image. 47 * PG_reserved does in general not hinder anybody from dumping or swapping 48 * and is no longer required for remap_pfn_range(). ioremap might require it. 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. 51 * 52 * The PG_private bitflag is set on pagecache pages if they contain filesystem 53 * specific data (which is normally at page->private). It can be used by 54 * private allocations for its own usage. 55 * 56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 58 * is set before writeback starts and cleared when it finishes. 59 * 60 * PG_locked also pins a page in pagecache, and blocks truncation of the file 61 * while it is held. 62 * 63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 64 * to become unlocked. 65 * 66 * PG_swapbacked is set when a page uses swap as a backing storage. This are 67 * usually PageAnon or shmem pages but please note that even anonymous pages 68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 69 * a result of MADV_FREE). 70 * 71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 72 * file-backed pagecache (see mm/vmscan.c). 73 * 74 * PG_error is set to indicate that an I/O error occurred on this page. 75 * 76 * PG_arch_1 is an architecture specific page state bit. The generic code 77 * guarantees that this bit is cleared for a page when it first is entered into 78 * the page cache. 79 * 80 * PG_hwpoison indicates that a page got corrupted in hardware and contains 81 * data with incorrect ECC bits that triggered a machine check. Accessing is 82 * not safe since it may cause another machine check. Don't touch! 83 */ 84 85/* 86 * Don't use the pageflags directly. Use the PageFoo macros. 87 * 88 * The page flags field is split into two parts, the main flags area 89 * which extends from the low bits upwards, and the fields area which 90 * extends from the high bits downwards. 91 * 92 * | FIELD | ... | FLAGS | 93 * N-1 ^ 0 94 * (NR_PAGEFLAGS) 95 * 96 * The fields area is reserved for fields mapping zone, node (for NUMA) and 97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 99 */ 100enum pageflags { 101 PG_locked, /* Page is locked. Don't touch. */ 102 PG_writeback, /* Page is under writeback */ 103 PG_referenced, 104 PG_uptodate, 105 PG_dirty, 106 PG_lru, 107 PG_head, /* Must be in bit 6 */ 108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 109 PG_active, 110 PG_workingset, 111 PG_error, 112 PG_slab, 113 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 114 PG_arch_1, 115 PG_reserved, 116 PG_private, /* If pagecache, has fs-private data */ 117 PG_private_2, /* If pagecache, has fs aux data */ 118 PG_mappedtodisk, /* Has blocks allocated on-disk */ 119 PG_reclaim, /* To be reclaimed asap */ 120 PG_swapbacked, /* Page is backed by RAM/swap */ 121 PG_unevictable, /* Page is "unevictable" */ 122#ifdef CONFIG_MMU 123 PG_mlocked, /* Page is vma mlocked */ 124#endif 125#ifdef CONFIG_ARCH_USES_PG_UNCACHED 126 PG_uncached, /* Page has been mapped as uncached */ 127#endif 128#ifdef CONFIG_MEMORY_FAILURE 129 PG_hwpoison, /* hardware poisoned page. Don't touch */ 130#endif 131#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 132 PG_young, 133 PG_idle, 134#endif 135#ifdef CONFIG_ARCH_USES_PG_ARCH_X 136 PG_arch_2, 137 PG_arch_3, 138#endif 139 __NR_PAGEFLAGS, 140 141 PG_readahead = PG_reclaim, 142 143 /* 144 * Depending on the way an anonymous folio can be mapped into a page 145 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 146 * THP), PG_anon_exclusive may be set only for the head page or for 147 * tail pages of an anonymous folio. For now, we only expect it to be 148 * set on tail pages for PTE-mapped THP. 149 */ 150 PG_anon_exclusive = PG_mappedtodisk, 151 152 /* Filesystems */ 153 PG_checked = PG_owner_priv_1, 154 155 /* SwapBacked */ 156 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 157 158 /* Two page bits are conscripted by FS-Cache to maintain local caching 159 * state. These bits are set on pages belonging to the netfs's inodes 160 * when those inodes are being locally cached. 161 */ 162 PG_fscache = PG_private_2, /* page backed by cache */ 163 164 /* XEN */ 165 /* Pinned in Xen as a read-only pagetable page. */ 166 PG_pinned = PG_owner_priv_1, 167 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 168 PG_savepinned = PG_dirty, 169 /* Has a grant mapping of another (foreign) domain's page. */ 170 PG_foreign = PG_owner_priv_1, 171 /* Remapped by swiotlb-xen. */ 172 PG_xen_remapped = PG_owner_priv_1, 173 174 /* non-lru isolated movable page */ 175 PG_isolated = PG_reclaim, 176 177 /* Only valid for buddy pages. Used to track pages that are reported */ 178 PG_reported = PG_uptodate, 179 180#ifdef CONFIG_MEMORY_HOTPLUG 181 /* For self-hosted memmap pages */ 182 PG_vmemmap_self_hosted = PG_owner_priv_1, 183#endif 184 185 /* 186 * Flags only valid for compound pages. Stored in first tail page's 187 * flags word. Cannot use the first 8 flags or any flag marked as 188 * PF_ANY. 189 */ 190 191 /* At least one page in this folio has the hwpoison flag set */ 192 PG_has_hwpoisoned = PG_error, 193 PG_hugetlb = PG_active, 194 PG_large_rmappable = PG_workingset, /* anon or file-backed */ 195}; 196 197#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 198 199#ifndef __GENERATING_BOUNDS_H 200 201#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 202DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); 203 204/* 205 * Return the real head page struct iff the @page is a fake head page, otherwise 206 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. 207 */ 208static __always_inline const struct page *page_fixed_fake_head(const struct page *page) 209{ 210 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) 211 return page; 212 213 /* 214 * Only addresses aligned with PAGE_SIZE of struct page may be fake head 215 * struct page. The alignment check aims to avoid access the fields ( 216 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) 217 * cold cacheline in some cases. 218 */ 219 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 220 test_bit(PG_head, &page->flags)) { 221 /* 222 * We can safely access the field of the @page[1] with PG_head 223 * because the @page is a compound page composed with at least 224 * two contiguous pages. 225 */ 226 unsigned long head = READ_ONCE(page[1].compound_head); 227 228 if (likely(head & 1)) 229 return (const struct page *)(head - 1); 230 } 231 return page; 232} 233#else 234static inline const struct page *page_fixed_fake_head(const struct page *page) 235{ 236 return page; 237} 238#endif 239 240static __always_inline int page_is_fake_head(struct page *page) 241{ 242 return page_fixed_fake_head(page) != page; 243} 244 245static inline unsigned long _compound_head(const struct page *page) 246{ 247 unsigned long head = READ_ONCE(page->compound_head); 248 249 if (unlikely(head & 1)) 250 return head - 1; 251 return (unsigned long)page_fixed_fake_head(page); 252} 253 254#define compound_head(page) ((typeof(page))_compound_head(page)) 255 256/** 257 * page_folio - Converts from page to folio. 258 * @p: The page. 259 * 260 * Every page is part of a folio. This function cannot be called on a 261 * NULL pointer. 262 * 263 * Context: No reference, nor lock is required on @page. If the caller 264 * does not hold a reference, this call may race with a folio split, so 265 * it should re-check the folio still contains this page after gaining 266 * a reference on the folio. 267 * Return: The folio which contains this page. 268 */ 269#define page_folio(p) (_Generic((p), \ 270 const struct page *: (const struct folio *)_compound_head(p), \ 271 struct page *: (struct folio *)_compound_head(p))) 272 273/** 274 * folio_page - Return a page from a folio. 275 * @folio: The folio. 276 * @n: The page number to return. 277 * 278 * @n is relative to the start of the folio. This function does not 279 * check that the page number lies within @folio; the caller is presumed 280 * to have a reference to the page. 281 */ 282#define folio_page(folio, n) nth_page(&(folio)->page, n) 283 284static __always_inline int PageTail(struct page *page) 285{ 286 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); 287} 288 289static __always_inline int PageCompound(struct page *page) 290{ 291 return test_bit(PG_head, &page->flags) || 292 READ_ONCE(page->compound_head) & 1; 293} 294 295#define PAGE_POISON_PATTERN -1l 296static inline int PagePoisoned(const struct page *page) 297{ 298 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 299} 300 301#ifdef CONFIG_DEBUG_VM 302void page_init_poison(struct page *page, size_t size); 303#else 304static inline void page_init_poison(struct page *page, size_t size) 305{ 306} 307#endif 308 309static unsigned long *folio_flags(struct folio *folio, unsigned n) 310{ 311 struct page *page = &folio->page; 312 313 VM_BUG_ON_PGFLAGS(PageTail(page), page); 314 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 315 return &page[n].flags; 316} 317 318/* 319 * Page flags policies wrt compound pages 320 * 321 * PF_POISONED_CHECK 322 * check if this struct page poisoned/uninitialized 323 * 324 * PF_ANY: 325 * the page flag is relevant for small, head and tail pages. 326 * 327 * PF_HEAD: 328 * for compound page all operations related to the page flag applied to 329 * head page. 330 * 331 * PF_ONLY_HEAD: 332 * for compound page, callers only ever operate on the head page. 333 * 334 * PF_NO_TAIL: 335 * modifications of the page flag must be done on small or head pages, 336 * checks can be done on tail pages too. 337 * 338 * PF_NO_COMPOUND: 339 * the page flag is not relevant for compound pages. 340 * 341 * PF_SECOND: 342 * the page flag is stored in the first tail page. 343 */ 344#define PF_POISONED_CHECK(page) ({ \ 345 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 346 page; }) 347#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 348#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 349#define PF_ONLY_HEAD(page, enforce) ({ \ 350 VM_BUG_ON_PGFLAGS(PageTail(page), page); \ 351 PF_POISONED_CHECK(page); }) 352#define PF_NO_TAIL(page, enforce) ({ \ 353 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 354 PF_POISONED_CHECK(compound_head(page)); }) 355#define PF_NO_COMPOUND(page, enforce) ({ \ 356 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 357 PF_POISONED_CHECK(page); }) 358#define PF_SECOND(page, enforce) ({ \ 359 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 360 PF_POISONED_CHECK(&page[1]); }) 361 362/* Which page is the flag stored in */ 363#define FOLIO_PF_ANY 0 364#define FOLIO_PF_HEAD 0 365#define FOLIO_PF_ONLY_HEAD 0 366#define FOLIO_PF_NO_TAIL 0 367#define FOLIO_PF_NO_COMPOUND 0 368#define FOLIO_PF_SECOND 1 369 370/* 371 * Macros to create function definitions for page flags 372 */ 373#define TESTPAGEFLAG(uname, lname, policy) \ 374static __always_inline bool folio_test_##lname(struct folio *folio) \ 375{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 376static __always_inline int Page##uname(struct page *page) \ 377{ return test_bit(PG_##lname, &policy(page, 0)->flags); } 378 379#define SETPAGEFLAG(uname, lname, policy) \ 380static __always_inline \ 381void folio_set_##lname(struct folio *folio) \ 382{ set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 383static __always_inline void SetPage##uname(struct page *page) \ 384{ set_bit(PG_##lname, &policy(page, 1)->flags); } 385 386#define CLEARPAGEFLAG(uname, lname, policy) \ 387static __always_inline \ 388void folio_clear_##lname(struct folio *folio) \ 389{ clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 390static __always_inline void ClearPage##uname(struct page *page) \ 391{ clear_bit(PG_##lname, &policy(page, 1)->flags); } 392 393#define __SETPAGEFLAG(uname, lname, policy) \ 394static __always_inline \ 395void __folio_set_##lname(struct folio *folio) \ 396{ __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 397static __always_inline void __SetPage##uname(struct page *page) \ 398{ __set_bit(PG_##lname, &policy(page, 1)->flags); } 399 400#define __CLEARPAGEFLAG(uname, lname, policy) \ 401static __always_inline \ 402void __folio_clear_##lname(struct folio *folio) \ 403{ __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 404static __always_inline void __ClearPage##uname(struct page *page) \ 405{ __clear_bit(PG_##lname, &policy(page, 1)->flags); } 406 407#define TESTSETFLAG(uname, lname, policy) \ 408static __always_inline \ 409bool folio_test_set_##lname(struct folio *folio) \ 410{ return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 411static __always_inline int TestSetPage##uname(struct page *page) \ 412{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 413 414#define TESTCLEARFLAG(uname, lname, policy) \ 415static __always_inline \ 416bool folio_test_clear_##lname(struct folio *folio) \ 417{ return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 418static __always_inline int TestClearPage##uname(struct page *page) \ 419{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 420 421#define PAGEFLAG(uname, lname, policy) \ 422 TESTPAGEFLAG(uname, lname, policy) \ 423 SETPAGEFLAG(uname, lname, policy) \ 424 CLEARPAGEFLAG(uname, lname, policy) 425 426#define __PAGEFLAG(uname, lname, policy) \ 427 TESTPAGEFLAG(uname, lname, policy) \ 428 __SETPAGEFLAG(uname, lname, policy) \ 429 __CLEARPAGEFLAG(uname, lname, policy) 430 431#define TESTSCFLAG(uname, lname, policy) \ 432 TESTSETFLAG(uname, lname, policy) \ 433 TESTCLEARFLAG(uname, lname, policy) 434 435#define TESTPAGEFLAG_FALSE(uname, lname) \ 436static inline bool folio_test_##lname(const struct folio *folio) { return false; } \ 437static inline int Page##uname(const struct page *page) { return 0; } 438 439#define SETPAGEFLAG_NOOP(uname, lname) \ 440static inline void folio_set_##lname(struct folio *folio) { } \ 441static inline void SetPage##uname(struct page *page) { } 442 443#define CLEARPAGEFLAG_NOOP(uname, lname) \ 444static inline void folio_clear_##lname(struct folio *folio) { } \ 445static inline void ClearPage##uname(struct page *page) { } 446 447#define __CLEARPAGEFLAG_NOOP(uname, lname) \ 448static inline void __folio_clear_##lname(struct folio *folio) { } \ 449static inline void __ClearPage##uname(struct page *page) { } 450 451#define TESTSETFLAG_FALSE(uname, lname) \ 452static inline bool folio_test_set_##lname(struct folio *folio) \ 453{ return 0; } \ 454static inline int TestSetPage##uname(struct page *page) { return 0; } 455 456#define TESTCLEARFLAG_FALSE(uname, lname) \ 457static inline bool folio_test_clear_##lname(struct folio *folio) \ 458{ return 0; } \ 459static inline int TestClearPage##uname(struct page *page) { return 0; } 460 461#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 462 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 463 464#define TESTSCFLAG_FALSE(uname, lname) \ 465 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 466 467__PAGEFLAG(Locked, locked, PF_NO_TAIL) 468PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) 469PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) 470PAGEFLAG(Referenced, referenced, PF_HEAD) 471 TESTCLEARFLAG(Referenced, referenced, PF_HEAD) 472 __SETPAGEFLAG(Referenced, referenced, PF_HEAD) 473PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 474 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 475PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 476 TESTCLEARFLAG(LRU, lru, PF_HEAD) 477PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 478 TESTCLEARFLAG(Active, active, PF_HEAD) 479PAGEFLAG(Workingset, workingset, PF_HEAD) 480 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 481__PAGEFLAG(Slab, slab, PF_NO_TAIL) 482PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 483 484/* Xen */ 485PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 486 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 487PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 488PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 489PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 490 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 491 492PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 493 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 494 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 495PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 496 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 497 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 498 499/* 500 * Private page markings that may be used by the filesystem that owns the page 501 * for its own purposes. 502 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 503 */ 504PAGEFLAG(Private, private, PF_ANY) 505PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) 506PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 507 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 508 509/* 510 * Only test-and-set exist for PG_writeback. The unconditional operators are 511 * risky: they bypass page accounting. 512 */ 513TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 514 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 515PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) 516 517/* PG_readahead is only used for reads; PG_reclaim is only for writes */ 518PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 519 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 520PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) 521 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) 522 523#ifdef CONFIG_HIGHMEM 524/* 525 * Must use a macro here due to header dependency issues. page_zone() is not 526 * available at this point. 527 */ 528#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 529#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) 530#else 531PAGEFLAG_FALSE(HighMem, highmem) 532#endif 533 534#ifdef CONFIG_SWAP 535static __always_inline bool folio_test_swapcache(struct folio *folio) 536{ 537 return folio_test_swapbacked(folio) && 538 test_bit(PG_swapcache, folio_flags(folio, 0)); 539} 540 541static __always_inline bool PageSwapCache(struct page *page) 542{ 543 return folio_test_swapcache(page_folio(page)); 544} 545 546SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 547CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 548#else 549PAGEFLAG_FALSE(SwapCache, swapcache) 550#endif 551 552PAGEFLAG(Unevictable, unevictable, PF_HEAD) 553 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) 554 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) 555 556#ifdef CONFIG_MMU 557PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 558 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 559 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) 560#else 561PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) 562 TESTSCFLAG_FALSE(Mlocked, mlocked) 563#endif 564 565#ifdef CONFIG_ARCH_USES_PG_UNCACHED 566PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) 567#else 568PAGEFLAG_FALSE(Uncached, uncached) 569#endif 570 571#ifdef CONFIG_MEMORY_FAILURE 572PAGEFLAG(HWPoison, hwpoison, PF_ANY) 573TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 574#define __PG_HWPOISON (1UL << PG_hwpoison) 575#define MAGIC_HWPOISON 0x48575053U /* HWPS */ 576extern void SetPageHWPoisonTakenOff(struct page *page); 577extern void ClearPageHWPoisonTakenOff(struct page *page); 578extern bool take_page_off_buddy(struct page *page); 579extern bool put_page_back_buddy(struct page *page); 580#else 581PAGEFLAG_FALSE(HWPoison, hwpoison) 582#define __PG_HWPOISON 0 583#endif 584 585#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 586TESTPAGEFLAG(Young, young, PF_ANY) 587SETPAGEFLAG(Young, young, PF_ANY) 588TESTCLEARFLAG(Young, young, PF_ANY) 589PAGEFLAG(Idle, idle, PF_ANY) 590#endif 591 592/* 593 * PageReported() is used to track reported free pages within the Buddy 594 * allocator. We can use the non-atomic version of the test and set 595 * operations as both should be shielded with the zone lock to prevent 596 * any possible races on the setting or clearing of the bit. 597 */ 598__PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 599 600#ifdef CONFIG_MEMORY_HOTPLUG 601PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 602#else 603PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 604#endif 605 606/* 607 * On an anonymous page mapped into a user virtual memory area, 608 * page->mapping points to its anon_vma, not to a struct address_space; 609 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 610 * 611 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 612 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON 613 * bit; and then page->mapping points, not to an anon_vma, but to a private 614 * structure which KSM associates with that merged page. See ksm.h. 615 * 616 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable 617 * page and then page->mapping points to a struct movable_operations. 618 * 619 * Please note that, confusingly, "page_mapping" refers to the inode 620 * address_space which maps the page from disk; whereas "page_mapped" 621 * refers to user virtual address space into which the page is mapped. 622 * 623 * For slab pages, since slab reuses the bits in struct page to store its 624 * internal states, the page->mapping does not exist as such, nor do these 625 * flags below. So in order to avoid testing non-existent bits, please 626 * make sure that PageSlab(page) actually evaluates to false before calling 627 * the following functions (e.g., PageAnon). See mm/slab.h. 628 */ 629#define PAGE_MAPPING_ANON 0x1 630#define PAGE_MAPPING_MOVABLE 0x2 631#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 632#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 633 634/* 635 * Different with flags above, this flag is used only for fsdax mode. It 636 * indicates that this page->mapping is now under reflink case. 637 */ 638#define PAGE_MAPPING_DAX_SHARED ((void *)0x1) 639 640static __always_inline bool folio_mapping_flags(struct folio *folio) 641{ 642 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; 643} 644 645static __always_inline int PageMappingFlags(struct page *page) 646{ 647 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; 648} 649 650static __always_inline bool folio_test_anon(struct folio *folio) 651{ 652 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; 653} 654 655static __always_inline bool PageAnon(struct page *page) 656{ 657 return folio_test_anon(page_folio(page)); 658} 659 660static __always_inline bool __folio_test_movable(const struct folio *folio) 661{ 662 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 663 PAGE_MAPPING_MOVABLE; 664} 665 666static __always_inline int __PageMovable(struct page *page) 667{ 668 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 669 PAGE_MAPPING_MOVABLE; 670} 671 672#ifdef CONFIG_KSM 673/* 674 * A KSM page is one of those write-protected "shared pages" or "merged pages" 675 * which KSM maps into multiple mms, wherever identical anonymous page content 676 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 677 * anon_vma, but to that page's node of the stable tree. 678 */ 679static __always_inline bool folio_test_ksm(struct folio *folio) 680{ 681 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 682 PAGE_MAPPING_KSM; 683} 684 685static __always_inline bool PageKsm(struct page *page) 686{ 687 return folio_test_ksm(page_folio(page)); 688} 689#else 690TESTPAGEFLAG_FALSE(Ksm, ksm) 691#endif 692 693u64 stable_page_flags(struct page *page); 694 695/** 696 * folio_test_uptodate - Is this folio up to date? 697 * @folio: The folio. 698 * 699 * The uptodate flag is set on a folio when every byte in the folio is 700 * at least as new as the corresponding bytes on storage. Anonymous 701 * and CoW folios are always uptodate. If the folio is not uptodate, 702 * some of the bytes in it may be; see the is_partially_uptodate() 703 * address_space operation. 704 */ 705static inline bool folio_test_uptodate(struct folio *folio) 706{ 707 bool ret = test_bit(PG_uptodate, folio_flags(folio, 0)); 708 /* 709 * Must ensure that the data we read out of the folio is loaded 710 * _after_ we've loaded folio->flags to check the uptodate bit. 711 * We can skip the barrier if the folio is not uptodate, because 712 * we wouldn't be reading anything from it. 713 * 714 * See folio_mark_uptodate() for the other side of the story. 715 */ 716 if (ret) 717 smp_rmb(); 718 719 return ret; 720} 721 722static inline int PageUptodate(struct page *page) 723{ 724 return folio_test_uptodate(page_folio(page)); 725} 726 727static __always_inline void __folio_mark_uptodate(struct folio *folio) 728{ 729 smp_wmb(); 730 __set_bit(PG_uptodate, folio_flags(folio, 0)); 731} 732 733static __always_inline void folio_mark_uptodate(struct folio *folio) 734{ 735 /* 736 * Memory barrier must be issued before setting the PG_uptodate bit, 737 * so that all previous stores issued in order to bring the folio 738 * uptodate are actually visible before folio_test_uptodate becomes true. 739 */ 740 smp_wmb(); 741 set_bit(PG_uptodate, folio_flags(folio, 0)); 742} 743 744static __always_inline void __SetPageUptodate(struct page *page) 745{ 746 __folio_mark_uptodate((struct folio *)page); 747} 748 749static __always_inline void SetPageUptodate(struct page *page) 750{ 751 folio_mark_uptodate((struct folio *)page); 752} 753 754CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 755 756bool __folio_start_writeback(struct folio *folio, bool keep_write); 757bool set_page_writeback(struct page *page); 758 759#define folio_start_writeback(folio) \ 760 __folio_start_writeback(folio, false) 761#define folio_start_writeback_keepwrite(folio) \ 762 __folio_start_writeback(folio, true) 763 764static inline bool test_set_page_writeback(struct page *page) 765{ 766 return set_page_writeback(page); 767} 768 769static __always_inline bool folio_test_head(struct folio *folio) 770{ 771 return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); 772} 773 774static __always_inline int PageHead(struct page *page) 775{ 776 PF_POISONED_CHECK(page); 777 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 778} 779 780__SETPAGEFLAG(Head, head, PF_ANY) 781__CLEARPAGEFLAG(Head, head, PF_ANY) 782CLEARPAGEFLAG(Head, head, PF_ANY) 783 784/** 785 * folio_test_large() - Does this folio contain more than one page? 786 * @folio: The folio to test. 787 * 788 * Return: True if the folio is larger than one page. 789 */ 790static inline bool folio_test_large(struct folio *folio) 791{ 792 return folio_test_head(folio); 793} 794 795static __always_inline void set_compound_head(struct page *page, struct page *head) 796{ 797 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 798} 799 800static __always_inline void clear_compound_head(struct page *page) 801{ 802 WRITE_ONCE(page->compound_head, 0); 803} 804 805#ifdef CONFIG_TRANSPARENT_HUGEPAGE 806static inline void ClearPageCompound(struct page *page) 807{ 808 BUG_ON(!PageHead(page)); 809 ClearPageHead(page); 810} 811PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND) 812#else 813TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable) 814#endif 815 816#define PG_head_mask ((1UL << PG_head)) 817 818#ifdef CONFIG_HUGETLB_PAGE 819int PageHuge(struct page *page); 820SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND) 821CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND) 822 823/** 824 * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs 825 * @folio: The folio to test. 826 * 827 * Context: Any context. Caller should have a reference on the folio to 828 * prevent it from being turned into a tail page. 829 * Return: True for hugetlbfs folios, false for anon folios or folios 830 * belonging to other filesystems. 831 */ 832static inline bool folio_test_hugetlb(struct folio *folio) 833{ 834 return folio_test_large(folio) && 835 test_bit(PG_hugetlb, folio_flags(folio, 1)); 836} 837#else 838TESTPAGEFLAG_FALSE(Huge, hugetlb) 839#endif 840 841#ifdef CONFIG_TRANSPARENT_HUGEPAGE 842/* 843 * PageHuge() only returns true for hugetlbfs pages, but not for 844 * normal or transparent huge pages. 845 * 846 * PageTransHuge() returns true for both transparent huge and 847 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 848 * called only in the core VM paths where hugetlbfs pages can't exist. 849 */ 850static inline int PageTransHuge(struct page *page) 851{ 852 VM_BUG_ON_PAGE(PageTail(page), page); 853 return PageHead(page); 854} 855 856/* 857 * PageTransCompound returns true for both transparent huge pages 858 * and hugetlbfs pages, so it should only be called when it's known 859 * that hugetlbfs pages aren't involved. 860 */ 861static inline int PageTransCompound(struct page *page) 862{ 863 return PageCompound(page); 864} 865 866/* 867 * PageTransTail returns true for both transparent huge pages 868 * and hugetlbfs pages, so it should only be called when it's known 869 * that hugetlbfs pages aren't involved. 870 */ 871static inline int PageTransTail(struct page *page) 872{ 873 return PageTail(page); 874} 875#else 876TESTPAGEFLAG_FALSE(TransHuge, transhuge) 877TESTPAGEFLAG_FALSE(TransCompound, transcompound) 878TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) 879TESTPAGEFLAG_FALSE(TransTail, transtail) 880#endif 881 882#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 883/* 884 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 885 * compound page. 886 * 887 * This flag is set by hwpoison handler. Cleared by THP split or free page. 888 */ 889PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 890 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 891#else 892PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 893 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 894#endif 895 896/* 897 * Check if a page is currently marked HWPoisoned. Note that this check is 898 * best effort only and inherently racy: there is no way to synchronize with 899 * failing hardware. 900 */ 901static inline bool is_page_hwpoison(struct page *page) 902{ 903 if (PageHWPoison(page)) 904 return true; 905 return PageHuge(page) && PageHWPoison(compound_head(page)); 906} 907 908/* 909 * For pages that are never mapped to userspace (and aren't PageSlab), 910 * page_type may be used. Because it is initialised to -1, we invert the 911 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and 912 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and 913 * low bits so that an underflow or overflow of page_mapcount() won't be 914 * mistaken for a page type value. 915 */ 916 917#define PAGE_TYPE_BASE 0xf0000000 918/* Reserve 0x0000007f to catch underflows of page_mapcount */ 919#define PAGE_MAPCOUNT_RESERVE -128 920#define PG_buddy 0x00000080 921#define PG_offline 0x00000100 922#define PG_table 0x00000200 923#define PG_guard 0x00000400 924 925#define PageType(page, flag) \ 926 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 927#define folio_test_type(folio, flag) \ 928 ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 929 930static inline int page_type_has_type(unsigned int page_type) 931{ 932 return (int)page_type < PAGE_MAPCOUNT_RESERVE; 933} 934 935static inline int page_has_type(struct page *page) 936{ 937 return page_type_has_type(page->page_type); 938} 939 940#define PAGE_TYPE_OPS(uname, lname, fname) \ 941static __always_inline int Page##uname(const struct page *page) \ 942{ \ 943 return PageType(page, PG_##lname); \ 944} \ 945static __always_inline int folio_test_##fname(const struct folio *folio)\ 946{ \ 947 return folio_test_type(folio, PG_##lname); \ 948} \ 949static __always_inline void __SetPage##uname(struct page *page) \ 950{ \ 951 VM_BUG_ON_PAGE(!PageType(page, 0), page); \ 952 page->page_type &= ~PG_##lname; \ 953} \ 954static __always_inline void __folio_set_##fname(struct folio *folio) \ 955{ \ 956 VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \ 957 folio->page.page_type &= ~PG_##lname; \ 958} \ 959static __always_inline void __ClearPage##uname(struct page *page) \ 960{ \ 961 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 962 page->page_type |= PG_##lname; \ 963} \ 964static __always_inline void __folio_clear_##fname(struct folio *folio) \ 965{ \ 966 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ 967 folio->page.page_type |= PG_##lname; \ 968} \ 969 970/* 971 * PageBuddy() indicates that the page is free and in the buddy system 972 * (see mm/page_alloc.c). 973 */ 974PAGE_TYPE_OPS(Buddy, buddy, buddy) 975 976/* 977 * PageOffline() indicates that the page is logically offline although the 978 * containing section is online. (e.g. inflated in a balloon driver or 979 * not onlined when onlining the section). 980 * The content of these pages is effectively stale. Such pages should not 981 * be touched (read/write/dump/save) except by their owner. 982 * 983 * If a driver wants to allow to offline unmovable PageOffline() pages without 984 * putting them back to the buddy, it can do so via the memory notifier by 985 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 986 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 987 * pages (now with a reference count of zero) are treated like free pages, 988 * allowing the containing memory block to get offlined. A driver that 989 * relies on this feature is aware that re-onlining the memory block will 990 * require to re-set the pages PageOffline() and not giving them to the 991 * buddy via online_page_callback_t. 992 * 993 * There are drivers that mark a page PageOffline() and expect there won't be 994 * any further access to page content. PFN walkers that read content of random 995 * pages should check PageOffline() and synchronize with such drivers using 996 * page_offline_freeze()/page_offline_thaw(). 997 */ 998PAGE_TYPE_OPS(Offline, offline, offline) 999 1000extern void page_offline_freeze(void); 1001extern void page_offline_thaw(void); 1002extern void page_offline_begin(void); 1003extern void page_offline_end(void); 1004 1005/* 1006 * Marks pages in use as page tables. 1007 */ 1008PAGE_TYPE_OPS(Table, table, pgtable) 1009 1010/* 1011 * Marks guardpages used with debug_pagealloc. 1012 */ 1013PAGE_TYPE_OPS(Guard, guard, guard) 1014 1015extern bool is_free_buddy_page(struct page *page); 1016 1017PAGEFLAG(Isolated, isolated, PF_ANY); 1018 1019static __always_inline int PageAnonExclusive(struct page *page) 1020{ 1021 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1022 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1023 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1024} 1025 1026static __always_inline void SetPageAnonExclusive(struct page *page) 1027{ 1028 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1029 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1030 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1031} 1032 1033static __always_inline void ClearPageAnonExclusive(struct page *page) 1034{ 1035 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1036 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1037 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1038} 1039 1040static __always_inline void __ClearPageAnonExclusive(struct page *page) 1041{ 1042 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1043 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1044 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1045} 1046 1047#ifdef CONFIG_MMU 1048#define __PG_MLOCKED (1UL << PG_mlocked) 1049#else 1050#define __PG_MLOCKED 0 1051#endif 1052 1053/* 1054 * Flags checked when a page is freed. Pages being freed should not have 1055 * these flags set. If they are, there is a problem. 1056 */ 1057#define PAGE_FLAGS_CHECK_AT_FREE \ 1058 (1UL << PG_lru | 1UL << PG_locked | \ 1059 1UL << PG_private | 1UL << PG_private_2 | \ 1060 1UL << PG_writeback | 1UL << PG_reserved | \ 1061 1UL << PG_slab | 1UL << PG_active | \ 1062 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) 1063 1064/* 1065 * Flags checked when a page is prepped for return by the page allocator. 1066 * Pages being prepped should not have these flags set. If they are set, 1067 * there has been a kernel bug or struct page corruption. 1068 * 1069 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1070 * alloc-free cycle to prevent from reusing the page. 1071 */ 1072#define PAGE_FLAGS_CHECK_AT_PREP \ 1073 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) 1074 1075/* 1076 * Flags stored in the second page of a compound page. They may overlap 1077 * the CHECK_AT_FREE flags above, so need to be cleared. 1078 */ 1079#define PAGE_FLAGS_SECOND \ 1080 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ 1081 1UL << PG_hugetlb | 1UL << PG_large_rmappable) 1082 1083#define PAGE_FLAGS_PRIVATE \ 1084 (1UL << PG_private | 1UL << PG_private_2) 1085/** 1086 * page_has_private - Determine if page has private stuff 1087 * @page: The page to be checked 1088 * 1089 * Determine if a page has private stuff, indicating that release routines 1090 * should be invoked upon it. 1091 */ 1092static inline int page_has_private(struct page *page) 1093{ 1094 return !!(page->flags & PAGE_FLAGS_PRIVATE); 1095} 1096 1097static inline bool folio_has_private(struct folio *folio) 1098{ 1099 return page_has_private(&folio->page); 1100} 1101 1102#undef PF_ANY 1103#undef PF_HEAD 1104#undef PF_ONLY_HEAD 1105#undef PF_NO_TAIL 1106#undef PF_NO_COMPOUND 1107#undef PF_SECOND 1108#endif /* !__GENERATING_BOUNDS_H */ 1109 1110#endif /* PAGE_FLAGS_H */