Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: introduce memdesc_flags_t

Patch series "Add and use memdesc_flags_t".

At some point struct page will be separated from struct slab and struct
folio. This is a step towards that by introducing a type for the 'flags'
word of all three structures. This gives us a certain amount of type
safety by establishing that some of these unsigned longs are different
from other unsigned longs in that they contain things like node ID,
section number and zone number in the upper bits. That lets us have
functions that can be easily called by anyone who has a slab, folio or
page (but not easily by anyone else) to get the node or zone.

There's going to be some unusual merge problems with this as some odd bits
of the kernel decide they want to print out the flags value or something
similar by writing page->flags and now they'll need to write page->flags.f
instead. That's most of the churn here. Maybe we should be removing
these things from the debug output?


This patch (of 11):

Wrap the unsigned long flags in a typedef. In upcoming patches, this will
provide a strong hint that you can't just pass a random unsigned long to
functions which take this as an argument.

[willy@infradead.org: s/flags/flags.f/ in several architectures]
Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org
[nicola.vetrini@gmail.com: mips: fix compilation error]
Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/
Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com
Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Zi Yan <ziy@nvidia.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
53fbef56 4e915656

+195 -190
+4 -4
arch/arc/mm/cache.c
··· 704 704 705 705 void flush_dcache_folio(struct folio *folio) 706 706 { 707 - clear_bit(PG_dc_clean, &folio->flags); 707 + clear_bit(PG_dc_clean, &folio->flags.f); 708 708 return; 709 709 } 710 710 EXPORT_SYMBOL(flush_dcache_folio); ··· 889 889 890 890 copy_page(kto, kfrom); 891 891 892 - clear_bit(PG_dc_clean, &dst->flags); 893 - clear_bit(PG_dc_clean, &src->flags); 892 + clear_bit(PG_dc_clean, &dst->flags.f); 893 + clear_bit(PG_dc_clean, &src->flags.f); 894 894 895 895 kunmap_atomic(kto); 896 896 kunmap_atomic(kfrom); ··· 900 900 { 901 901 struct folio *folio = page_folio(page); 902 902 clear_page(to); 903 - clear_bit(PG_dc_clean, &folio->flags); 903 + clear_bit(PG_dc_clean, &folio->flags.f); 904 904 } 905 905 EXPORT_SYMBOL(clear_user_page); 906 906
+1 -1
arch/arc/mm/tlb.c
··· 488 488 */ 489 489 if (vma->vm_flags & VM_EXEC) { 490 490 struct folio *folio = page_folio(page); 491 - int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); 491 + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f); 492 492 if (dirty) { 493 493 unsigned long offset = offset_in_folio(folio, paddr); 494 494 nr = folio_nr_pages(folio);
+1 -1
arch/arm/include/asm/hugetlb.h
··· 17 17 18 18 static inline void arch_clear_hugetlb_flags(struct folio *folio) 19 19 { 20 - clear_bit(PG_dcache_clean, &folio->flags); 20 + clear_bit(PG_dcache_clean, &folio->flags.f); 21 21 } 22 22 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags 23 23
+1 -1
arch/arm/mm/copypage-v4mc.c
··· 67 67 struct folio *src = page_folio(from); 68 68 void *kto = kmap_atomic(to); 69 69 70 - if (!test_and_set_bit(PG_dcache_clean, &src->flags)) 70 + if (!test_and_set_bit(PG_dcache_clean, &src->flags.f)) 71 71 __flush_dcache_folio(folio_flush_mapping(src), src); 72 72 73 73 raw_spin_lock(&minicache_lock);
+1 -1
arch/arm/mm/copypage-v6.c
··· 73 73 unsigned int offset = CACHE_COLOUR(vaddr); 74 74 unsigned long kfrom, kto; 75 75 76 - if (!test_and_set_bit(PG_dcache_clean, &src->flags)) 76 + if (!test_and_set_bit(PG_dcache_clean, &src->flags.f)) 77 77 __flush_dcache_folio(folio_flush_mapping(src), src); 78 78 79 79 /* FIXME: not highmem safe */
+1 -1
arch/arm/mm/copypage-xscale.c
··· 87 87 struct folio *src = page_folio(from); 88 88 void *kto = kmap_atomic(to); 89 89 90 - if (!test_and_set_bit(PG_dcache_clean, &src->flags)) 90 + if (!test_and_set_bit(PG_dcache_clean, &src->flags.f)) 91 91 __flush_dcache_folio(folio_flush_mapping(src), src); 92 92 93 93 raw_spin_lock(&minicache_lock);
+1 -1
arch/arm/mm/dma-mapping.c
··· 718 718 if (size < sz) 719 719 break; 720 720 if (!offset) 721 - set_bit(PG_dcache_clean, &folio->flags); 721 + set_bit(PG_dcache_clean, &folio->flags.f); 722 722 offset = 0; 723 723 size -= sz; 724 724 if (!size)
+1 -1
arch/arm/mm/fault-armv.c
··· 203 203 204 204 folio = page_folio(pfn_to_page(pfn)); 205 205 mapping = folio_flush_mapping(folio); 206 - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) 206 + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) 207 207 __flush_dcache_folio(mapping, folio); 208 208 if (mapping) { 209 209 if (cache_is_vivt())
+5 -5
arch/arm/mm/flush.c
··· 304 304 else 305 305 mapping = NULL; 306 306 307 - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) 307 + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) 308 308 __flush_dcache_folio(mapping, folio); 309 309 310 310 if (pte_exec(pteval)) ··· 343 343 return; 344 344 345 345 if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { 346 - if (test_bit(PG_dcache_clean, &folio->flags)) 347 - clear_bit(PG_dcache_clean, &folio->flags); 346 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 347 + clear_bit(PG_dcache_clean, &folio->flags.f); 348 348 return; 349 349 } 350 350 ··· 352 352 353 353 if (!cache_ops_need_broadcast() && 354 354 mapping && !folio_mapped(folio)) 355 - clear_bit(PG_dcache_clean, &folio->flags); 355 + clear_bit(PG_dcache_clean, &folio->flags.f); 356 356 else { 357 357 __flush_dcache_folio(mapping, folio); 358 358 if (mapping && cache_is_vivt()) 359 359 __flush_dcache_aliases(mapping, folio); 360 360 else if (mapping) 361 361 __flush_icache_all(); 362 - set_bit(PG_dcache_clean, &folio->flags); 362 + set_bit(PG_dcache_clean, &folio->flags.f); 363 363 } 364 364 } 365 365 EXPORT_SYMBOL(flush_dcache_folio);
+3 -3
arch/arm64/include/asm/hugetlb.h
··· 21 21 22 22 static inline void arch_clear_hugetlb_flags(struct folio *folio) 23 23 { 24 - clear_bit(PG_dcache_clean, &folio->flags); 24 + clear_bit(PG_dcache_clean, &folio->flags.f); 25 25 26 26 #ifdef CONFIG_ARM64_MTE 27 27 if (system_supports_mte()) { 28 - clear_bit(PG_mte_tagged, &folio->flags); 29 - clear_bit(PG_mte_lock, &folio->flags); 28 + clear_bit(PG_mte_tagged, &folio->flags.f); 29 + clear_bit(PG_mte_lock, &folio->flags.f); 30 30 } 31 31 #endif 32 32 }
+8 -8
arch/arm64/include/asm/mte.h
··· 48 48 * before the page flags update. 49 49 */ 50 50 smp_wmb(); 51 - set_bit(PG_mte_tagged, &page->flags); 51 + set_bit(PG_mte_tagged, &page->flags.f); 52 52 } 53 53 54 54 static inline bool page_mte_tagged(struct page *page) 55 55 { 56 - bool ret = test_bit(PG_mte_tagged, &page->flags); 56 + bool ret = test_bit(PG_mte_tagged, &page->flags.f); 57 57 58 58 VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); 59 59 ··· 82 82 { 83 83 VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); 84 84 85 - if (!test_and_set_bit(PG_mte_lock, &page->flags)) 85 + if (!test_and_set_bit(PG_mte_lock, &page->flags.f)) 86 86 return true; 87 87 88 88 /* ··· 90 90 * already. Check if the PG_mte_tagged flag has been set or wait 91 91 * otherwise. 92 92 */ 93 - smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged)); 93 + smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged)); 94 94 95 95 return false; 96 96 } ··· 173 173 * before the folio flags update. 174 174 */ 175 175 smp_wmb(); 176 - set_bit(PG_mte_tagged, &folio->flags); 176 + set_bit(PG_mte_tagged, &folio->flags.f); 177 177 178 178 } 179 179 180 180 static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio) 181 181 { 182 - bool ret = test_bit(PG_mte_tagged, &folio->flags); 182 + bool ret = test_bit(PG_mte_tagged, &folio->flags.f); 183 183 184 184 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); 185 185 ··· 196 196 { 197 197 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); 198 198 199 - if (!test_and_set_bit(PG_mte_lock, &folio->flags)) 199 + if (!test_and_set_bit(PG_mte_lock, &folio->flags.f)) 200 200 return true; 201 201 202 202 /* ··· 204 204 * already. Check if the PG_mte_tagged flag has been set or wait 205 205 * otherwise. 206 206 */ 207 - smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged)); 207 + smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged)); 208 208 209 209 return false; 210 210 }
+4 -4
arch/arm64/mm/flush.c
··· 53 53 { 54 54 struct folio *folio = page_folio(pte_page(pte)); 55 55 56 - if (!test_bit(PG_dcache_clean, &folio->flags)) { 56 + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { 57 57 sync_icache_aliases((unsigned long)folio_address(folio), 58 58 (unsigned long)folio_address(folio) + 59 59 folio_size(folio)); 60 - set_bit(PG_dcache_clean, &folio->flags); 60 + set_bit(PG_dcache_clean, &folio->flags.f); 61 61 } 62 62 } 63 63 EXPORT_SYMBOL_GPL(__sync_icache_dcache); ··· 69 69 */ 70 70 void flush_dcache_folio(struct folio *folio) 71 71 { 72 - if (test_bit(PG_dcache_clean, &folio->flags)) 73 - clear_bit(PG_dcache_clean, &folio->flags); 72 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 73 + clear_bit(PG_dcache_clean, &folio->flags.f); 74 74 } 75 75 EXPORT_SYMBOL(flush_dcache_folio); 76 76
+3 -3
arch/csky/abiv1/cacheflush.c
··· 25 25 mapping = folio_flush_mapping(folio); 26 26 27 27 if (mapping && !folio_mapped(folio)) 28 - clear_bit(PG_dcache_clean, &folio->flags); 28 + clear_bit(PG_dcache_clean, &folio->flags.f); 29 29 else { 30 30 dcache_wbinv_all(); 31 31 if (mapping) 32 32 icache_inv_all(); 33 - set_bit(PG_dcache_clean, &folio->flags); 33 + set_bit(PG_dcache_clean, &folio->flags.f); 34 34 } 35 35 } 36 36 EXPORT_SYMBOL(flush_dcache_folio); ··· 56 56 return; 57 57 58 58 folio = page_folio(pfn_to_page(pfn)); 59 - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) 59 + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) 60 60 dcache_wbinv_all(); 61 61 62 62 if (folio_flush_mapping(folio)) {
+3 -3
arch/mips/include/asm/cacheflush.h
··· 37 37 #define PG_dcache_dirty PG_arch_1 38 38 39 39 #define folio_test_dcache_dirty(folio) \ 40 - test_bit(PG_dcache_dirty, &(folio)->flags) 40 + test_bit(PG_dcache_dirty, &(folio)->flags.f) 41 41 #define folio_set_dcache_dirty(folio) \ 42 - set_bit(PG_dcache_dirty, &(folio)->flags) 42 + set_bit(PG_dcache_dirty, &(folio)->flags.f) 43 43 #define folio_clear_dcache_dirty(folio) \ 44 - clear_bit(PG_dcache_dirty, &(folio)->flags) 44 + clear_bit(PG_dcache_dirty, &(folio)->flags.f) 45 45 46 46 extern void (*flush_cache_all)(void); 47 47 extern void (*__flush_cache_all)(void);
+3 -3
arch/nios2/mm/cacheflush.c
··· 187 187 188 188 /* Flush this page if there are aliases. */ 189 189 if (mapping && !mapping_mapped(mapping)) { 190 - clear_bit(PG_dcache_clean, &folio->flags); 190 + clear_bit(PG_dcache_clean, &folio->flags.f); 191 191 } else { 192 192 __flush_dcache_folio(folio); 193 193 if (mapping) { ··· 195 195 flush_aliases(mapping, folio); 196 196 flush_icache_range(start, start + folio_size(folio)); 197 197 } 198 - set_bit(PG_dcache_clean, &folio->flags); 198 + set_bit(PG_dcache_clean, &folio->flags.f); 199 199 } 200 200 } 201 201 EXPORT_SYMBOL(flush_dcache_folio); ··· 227 227 return; 228 228 229 229 folio = page_folio(pfn_to_page(pfn)); 230 - if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) 230 + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) 231 231 __flush_dcache_folio(folio); 232 232 233 233 mapping = folio_flush_mapping(folio);
+1 -1
arch/openrisc/include/asm/cacheflush.h
··· 75 75 76 76 static inline void flush_dcache_folio(struct folio *folio) 77 77 { 78 - clear_bit(PG_dc_clean, &folio->flags); 78 + clear_bit(PG_dc_clean, &folio->flags.f); 79 79 } 80 80 #define flush_dcache_folio flush_dcache_folio 81 81
+1 -1
arch/openrisc/mm/cache.c
··· 83 83 { 84 84 unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; 85 85 struct folio *folio = page_folio(pfn_to_page(pfn)); 86 - int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); 86 + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f); 87 87 88 88 /* 89 89 * Since icaches do not snoop for updated data on OpenRISC, we
+3 -3
arch/parisc/kernel/cache.c
··· 122 122 pfn = folio_pfn(folio); 123 123 nr = folio_nr_pages(folio); 124 124 if (folio_flush_mapping(folio) && 125 - test_bit(PG_dcache_dirty, &folio->flags)) { 125 + test_bit(PG_dcache_dirty, &folio->flags.f)) { 126 126 while (nr--) 127 127 flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); 128 - clear_bit(PG_dcache_dirty, &folio->flags); 128 + clear_bit(PG_dcache_dirty, &folio->flags.f); 129 129 } else if (parisc_requires_coherency()) 130 130 while (nr--) 131 131 flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); ··· 481 481 pgoff_t pgoff; 482 482 483 483 if (mapping && !mapping_mapped(mapping)) { 484 - set_bit(PG_dcache_dirty, &folio->flags); 484 + set_bit(PG_dcache_dirty, &folio->flags.f); 485 485 return; 486 486 } 487 487
+2 -2
arch/powerpc/include/asm/cacheflush.h
··· 40 40 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 41 41 return; 42 42 /* avoid an atomic op if possible */ 43 - if (test_bit(PG_dcache_clean, &folio->flags)) 44 - clear_bit(PG_dcache_clean, &folio->flags); 43 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 44 + clear_bit(PG_dcache_clean, &folio->flags.f); 45 45 } 46 46 #define flush_dcache_folio flush_dcache_folio 47 47
+2 -2
arch/powerpc/include/asm/kvm_ppc.h
··· 939 939 940 940 /* Clear i-cache for new pages */ 941 941 folio = page_folio(pfn_to_page(pfn)); 942 - if (!test_bit(PG_dcache_clean, &folio->flags)) { 942 + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { 943 943 flush_dcache_icache_folio(folio); 944 - set_bit(PG_dcache_clean, &folio->flags); 944 + set_bit(PG_dcache_clean, &folio->flags.f); 945 945 } 946 946 } 947 947
+2 -2
arch/powerpc/mm/book3s64/hash_utils.c
··· 1562 1562 folio = page_folio(pte_page(pte)); 1563 1563 1564 1564 /* page is dirty */ 1565 - if (!test_bit(PG_dcache_clean, &folio->flags) && 1565 + if (!test_bit(PG_dcache_clean, &folio->flags.f) && 1566 1566 !folio_test_reserved(folio)) { 1567 1567 if (trap == INTERRUPT_INST_STORAGE) { 1568 1568 flush_dcache_icache_folio(folio); 1569 - set_bit(PG_dcache_clean, &folio->flags); 1569 + set_bit(PG_dcache_clean, &folio->flags.f); 1570 1570 } else 1571 1571 pp |= HPTE_R_N; 1572 1572 }
+6 -6
arch/powerpc/mm/pgtable.c
··· 87 87 struct folio *folio = maybe_pte_to_folio(pte); 88 88 if (!folio) 89 89 return pte; 90 - if (!test_bit(PG_dcache_clean, &folio->flags)) { 90 + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { 91 91 flush_dcache_icache_folio(folio); 92 - set_bit(PG_dcache_clean, &folio->flags); 92 + set_bit(PG_dcache_clean, &folio->flags.f); 93 93 } 94 94 } 95 95 return pte; ··· 127 127 return pte; 128 128 129 129 /* If the page clean, we move on */ 130 - if (test_bit(PG_dcache_clean, &folio->flags)) 130 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 131 131 return pte; 132 132 133 133 /* If it's an exec fault, we flush the cache and make it clean */ 134 134 if (is_exec_fault()) { 135 135 flush_dcache_icache_folio(folio); 136 - set_bit(PG_dcache_clean, &folio->flags); 136 + set_bit(PG_dcache_clean, &folio->flags.f); 137 137 return pte; 138 138 } 139 139 ··· 175 175 goto bail; 176 176 177 177 /* If the page is already clean, we move on */ 178 - if (test_bit(PG_dcache_clean, &folio->flags)) 178 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 179 179 goto bail; 180 180 181 181 /* Clean the page and set PG_dcache_clean */ 182 182 flush_dcache_icache_folio(folio); 183 - set_bit(PG_dcache_clean, &folio->flags); 183 + set_bit(PG_dcache_clean, &folio->flags.f); 184 184 185 185 bail: 186 186 return pte_mkexec(pte);
+2 -2
arch/riscv/include/asm/cacheflush.h
··· 23 23 24 24 static inline void flush_dcache_folio(struct folio *folio) 25 25 { 26 - if (test_bit(PG_dcache_clean, &folio->flags)) 27 - clear_bit(PG_dcache_clean, &folio->flags); 26 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 27 + clear_bit(PG_dcache_clean, &folio->flags.f); 28 28 } 29 29 #define flush_dcache_folio flush_dcache_folio 30 30 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+1 -1
arch/riscv/include/asm/hugetlb.h
··· 7 7 8 8 static inline void arch_clear_hugetlb_flags(struct folio *folio) 9 9 { 10 - clear_bit(PG_dcache_clean, &folio->flags); 10 + clear_bit(PG_dcache_clean, &folio->flags.f); 11 11 } 12 12 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags 13 13
+2 -2
arch/riscv/mm/cacheflush.c
··· 101 101 { 102 102 struct folio *folio = page_folio(pte_page(pte)); 103 103 104 - if (!test_bit(PG_dcache_clean, &folio->flags)) { 104 + if (!test_bit(PG_dcache_clean, &folio->flags.f)) { 105 105 flush_icache_mm(mm, false); 106 - set_bit(PG_dcache_clean, &folio->flags); 106 + set_bit(PG_dcache_clean, &folio->flags.f); 107 107 } 108 108 } 109 109 #endif /* CONFIG_MMU */
+1 -1
arch/s390/include/asm/hugetlb.h
··· 39 39 40 40 static inline void arch_clear_hugetlb_flags(struct folio *folio) 41 41 { 42 - clear_bit(PG_arch_1, &folio->flags); 42 + clear_bit(PG_arch_1, &folio->flags.f); 43 43 } 44 44 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags 45 45
+6 -6
arch/s390/kernel/uv.c
··· 144 144 folio_get(folio); 145 145 rc = uv_destroy(folio_to_phys(folio)); 146 146 if (!rc) 147 - clear_bit(PG_arch_1, &folio->flags); 147 + clear_bit(PG_arch_1, &folio->flags.f); 148 148 folio_put(folio); 149 149 return rc; 150 150 } ··· 193 193 folio_get(folio); 194 194 rc = uv_convert_from_secure(folio_to_phys(folio)); 195 195 if (!rc) 196 - clear_bit(PG_arch_1, &folio->flags); 196 + clear_bit(PG_arch_1, &folio->flags.f); 197 197 folio_put(folio); 198 198 return rc; 199 199 } ··· 289 289 expected = expected_folio_refs(folio) + 1; 290 290 if (!folio_ref_freeze(folio, expected)) 291 291 return -EBUSY; 292 - set_bit(PG_arch_1, &folio->flags); 292 + set_bit(PG_arch_1, &folio->flags.f); 293 293 /* 294 294 * If the UVC does not succeed or fail immediately, we don't want to 295 295 * loop for long, or we might get stall notifications. ··· 483 483 * convert_to_secure. 484 484 * As secure pages are never large folios, both variants can co-exists. 485 485 */ 486 - if (!test_bit(PG_arch_1, &folio->flags)) 486 + if (!test_bit(PG_arch_1, &folio->flags.f)) 487 487 return 0; 488 488 489 489 rc = uv_pin_shared(folio_to_phys(folio)); 490 490 if (!rc) { 491 - clear_bit(PG_arch_1, &folio->flags); 491 + clear_bit(PG_arch_1, &folio->flags.f); 492 492 return 0; 493 493 } 494 494 495 495 rc = uv_convert_from_secure(folio_to_phys(folio)); 496 496 if (!rc) { 497 - clear_bit(PG_arch_1, &folio->flags); 497 + clear_bit(PG_arch_1, &folio->flags.f); 498 498 return 0; 499 499 } 500 500
+1 -1
arch/s390/mm/gmap.c
··· 2272 2272 start = pmd_val(*pmd) & HPAGE_MASK; 2273 2273 end = start + HPAGE_SIZE; 2274 2274 __storage_key_init_range(start, end); 2275 - set_bit(PG_arch_1, &folio->flags); 2275 + set_bit(PG_arch_1, &folio->flags.f); 2276 2276 cond_resched(); 2277 2277 return 0; 2278 2278 }
+1 -1
arch/s390/mm/hugetlbpage.c
··· 155 155 paddr = rste & PMD_MASK; 156 156 } 157 157 158 - if (!test_and_set_bit(PG_arch_1, &folio->flags)) 158 + if (!test_and_set_bit(PG_arch_1, &folio->flags.f)) 159 159 __storage_key_init_range(paddr, paddr + size); 160 160 } 161 161
+1 -1
arch/sh/include/asm/hugetlb.h
··· 14 14 15 15 static inline void arch_clear_hugetlb_flags(struct folio *folio) 16 16 { 17 - clear_bit(PG_dcache_clean, &folio->flags); 17 + clear_bit(PG_dcache_clean, &folio->flags.f); 18 18 } 19 19 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags 20 20
+1 -1
arch/sh/mm/cache-sh4.c
··· 114 114 struct address_space *mapping = folio_flush_mapping(folio); 115 115 116 116 if (mapping && !mapping_mapped(mapping)) 117 - clear_bit(PG_dcache_clean, &folio->flags); 117 + clear_bit(PG_dcache_clean, &folio->flags.f); 118 118 else 119 119 #endif 120 120 {
+1 -1
arch/sh/mm/cache-sh7705.c
··· 138 138 struct address_space *mapping = folio_flush_mapping(folio); 139 139 140 140 if (mapping && !mapping_mapped(mapping)) 141 - clear_bit(PG_dcache_clean, &folio->flags); 141 + clear_bit(PG_dcache_clean, &folio->flags.f); 142 142 else { 143 143 unsigned long pfn = folio_pfn(folio); 144 144 unsigned int i, nr = folio_nr_pages(folio);
+7 -7
arch/sh/mm/cache.c
··· 64 64 struct folio *folio = page_folio(page); 65 65 66 66 if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && 67 - test_bit(PG_dcache_clean, &folio->flags)) { 67 + test_bit(PG_dcache_clean, &folio->flags.f)) { 68 68 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 69 69 memcpy(vto, src, len); 70 70 kunmap_coherent(vto); 71 71 } else { 72 72 memcpy(dst, src, len); 73 73 if (boot_cpu_data.dcache.n_aliases) 74 - clear_bit(PG_dcache_clean, &folio->flags); 74 + clear_bit(PG_dcache_clean, &folio->flags.f); 75 75 } 76 76 77 77 if (vma->vm_flags & VM_EXEC) ··· 85 85 struct folio *folio = page_folio(page); 86 86 87 87 if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && 88 - test_bit(PG_dcache_clean, &folio->flags)) { 88 + test_bit(PG_dcache_clean, &folio->flags.f)) { 89 89 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 90 90 memcpy(dst, vfrom, len); 91 91 kunmap_coherent(vfrom); 92 92 } else { 93 93 memcpy(dst, src, len); 94 94 if (boot_cpu_data.dcache.n_aliases) 95 - clear_bit(PG_dcache_clean, &folio->flags); 95 + clear_bit(PG_dcache_clean, &folio->flags.f); 96 96 } 97 97 } 98 98 ··· 105 105 vto = kmap_atomic(to); 106 106 107 107 if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) && 108 - test_bit(PG_dcache_clean, &src->flags)) { 108 + test_bit(PG_dcache_clean, &src->flags.f)) { 109 109 vfrom = kmap_coherent(from, vaddr); 110 110 copy_page(vto, vfrom); 111 111 kunmap_coherent(vfrom); ··· 148 148 149 149 if (pfn_valid(pfn)) { 150 150 struct folio *folio = page_folio(pfn_to_page(pfn)); 151 - int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags); 151 + int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f); 152 152 if (dirty) 153 153 __flush_purge_region(folio_address(folio), 154 154 folio_size(folio)); ··· 162 162 163 163 if (pages_do_alias(addr, vmaddr)) { 164 164 if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && 165 - test_bit(PG_dcache_clean, &folio->flags)) { 165 + test_bit(PG_dcache_clean, &folio->flags.f)) { 166 166 void *kaddr; 167 167 168 168 kaddr = kmap_coherent(page, vmaddr);
+1 -1
arch/sh/mm/kmap.c
··· 31 31 enum fixed_addresses idx; 32 32 unsigned long vaddr; 33 33 34 - BUG_ON(!test_bit(PG_dcache_clean, &folio->flags)); 34 + BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f)); 35 35 36 36 preempt_disable(); 37 37 pagefault_disable();
+5 -5
arch/sparc/mm/init_64.c
··· 224 224 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 225 225 226 226 #define dcache_dirty_cpu(folio) \ 227 - (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 227 + (((folio)->flags.f >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 228 228 229 229 static inline void set_dcache_dirty(struct folio *folio, int this_cpu) 230 230 { ··· 243 243 "bne,pn %%xcc, 1b\n\t" 244 244 " nop" 245 245 : /* no outputs */ 246 - : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags) 246 + : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags.f) 247 247 : "g1", "g7"); 248 248 } 249 249 ··· 265 265 " nop\n" 266 266 "2:" 267 267 : /* no outputs */ 268 - : "r" (cpu), "r" (mask), "r" (&folio->flags), 268 + : "r" (cpu), "r" (mask), "r" (&folio->flags.f), 269 269 "i" (PG_dcache_cpu_mask), 270 270 "i" (PG_dcache_cpu_shift) 271 271 : "g1", "g7"); ··· 292 292 struct folio *folio = page_folio(page); 293 293 unsigned long pg_flags; 294 294 295 - pg_flags = folio->flags; 295 + pg_flags = folio->flags.f; 296 296 if (pg_flags & (1UL << PG_dcache_dirty)) { 297 297 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 298 298 PG_dcache_cpu_mask); ··· 480 480 481 481 mapping = folio_flush_mapping(folio); 482 482 if (mapping && !mapping_mapped(mapping)) { 483 - bool dirty = test_bit(PG_dcache_dirty, &folio->flags); 483 + bool dirty = test_bit(PG_dcache_dirty, &folio->flags.f); 484 484 if (dirty) { 485 485 int dirty_cpu = dcache_dirty_cpu(folio); 486 486
+3 -3
arch/x86/mm/pat/memtype.c
··· 126 126 127 127 static inline enum page_cache_mode get_page_memtype(struct page *pg) 128 128 { 129 - unsigned long pg_flags = pg->flags & _PGMT_MASK; 129 + unsigned long pg_flags = pg->flags.f & _PGMT_MASK; 130 130 131 131 if (pg_flags == _PGMT_WB) 132 132 return _PAGE_CACHE_MODE_WB; ··· 161 161 break; 162 162 } 163 163 164 - old_flags = READ_ONCE(pg->flags); 164 + old_flags = READ_ONCE(pg->flags.f); 165 165 do { 166 166 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; 167 - } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags)); 167 + } while (!try_cmpxchg(&pg->flags.f, &old_flags, new_flags)); 168 168 } 169 169 #else 170 170 static inline enum page_cache_mode get_page_memtype(struct page *pg)
+6 -6
arch/xtensa/mm/cache.c
··· 134 134 */ 135 135 136 136 if (mapping && !mapping_mapped(mapping)) { 137 - if (!test_bit(PG_arch_1, &folio->flags)) 138 - set_bit(PG_arch_1, &folio->flags); 137 + if (!test_bit(PG_arch_1, &folio->flags.f)) 138 + set_bit(PG_arch_1, &folio->flags.f); 139 139 return; 140 140 141 141 } else { ··· 232 232 233 233 #if (DCACHE_WAY_SIZE > PAGE_SIZE) 234 234 235 - if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) { 235 + if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags.f)) { 236 236 unsigned long phys = folio_pfn(folio) * PAGE_SIZE; 237 237 unsigned long tmp; 238 238 ··· 247 247 } 248 248 preempt_enable(); 249 249 250 - clear_bit(PG_arch_1, &folio->flags); 250 + clear_bit(PG_arch_1, &folio->flags.f); 251 251 } 252 252 #else 253 - if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags) 253 + if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags.f) 254 254 && (vma->vm_flags & VM_EXEC) != 0) { 255 255 for (i = 0; i < nr; i++) { 256 256 void *paddr = kmap_local_folio(folio, i * PAGE_SIZE); ··· 258 258 __invalidate_icache_page((unsigned long)paddr); 259 259 kunmap_local(paddr); 260 260 } 261 - set_bit(PG_arch_1, &folio->flags); 261 + set_bit(PG_arch_1, &folio->flags.f); 262 262 } 263 263 #endif 264 264 }
+1 -1
fs/fuse/dev.c
··· 935 935 { 936 936 if (folio_mapped(folio) || 937 937 folio->mapping != NULL || 938 - (folio->flags & PAGE_FLAGS_CHECK_AT_PREP & 938 + (folio->flags.f & PAGE_FLAGS_CHECK_AT_PREP & 939 939 ~(1 << PG_locked | 940 940 1 << PG_referenced | 941 941 1 << PG_lru |
+1 -1
fs/gfs2/glops.c
··· 40 40 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 41 41 "state 0x%lx\n", 42 42 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 43 - bh->b_folio->mapping, bh->b_folio->flags); 43 + bh->b_folio->mapping, bh->b_folio->flags.f); 44 44 fs_err(sdp, "AIL glock %u:%llu mapping %p\n", 45 45 gl->gl_name.ln_type, gl->gl_name.ln_number, 46 46 gfs2_glock2aspace(gl));
+2 -2
fs/jffs2/file.c
··· 230 230 goto release_sem; 231 231 } 232 232 } 233 - jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags); 233 + jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags.f); 234 234 235 235 release_sem: 236 236 mutex_unlock(&c->alloc_sem); ··· 259 259 260 260 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n", 261 261 __func__, inode->i_ino, folio_pos(folio), 262 - start, end, folio->flags); 262 + start, end, folio->flags.f); 263 263 264 264 /* We need to avoid deadlock with page_cache_read() in 265 265 jffs2_garbage_collect_pass(). So the folio must be
+1 -1
fs/nilfs2/page.c
··· 167 167 printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx " 168 168 "mapping=%p ino=%lu\n", 169 169 folio, folio_ref_count(folio), 170 - (unsigned long long)folio->index, folio->flags, m, ino); 170 + (unsigned long long)folio->index, folio->flags.f, m, ino); 171 171 172 172 head = folio_buffers(folio); 173 173 if (head) {
+2 -2
fs/proc/page.c
··· 163 163 snapshot_page(&ps, page); 164 164 folio = &ps.folio_snapshot; 165 165 166 - k = folio->flags; 166 + k = folio->flags.f; 167 167 mapping = (unsigned long)folio->mapping; 168 168 is_anon = mapping & FOLIO_MAPPING_ANON; 169 169 ··· 238 238 if (u & (1 << KPF_HUGE)) 239 239 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 240 240 else 241 - u |= kpf_copy_bit(ps.page_snapshot.flags, KPF_HWPOISON, PG_hwpoison); 241 + u |= kpf_copy_bit(ps.page_snapshot.flags.f, KPF_HWPOISON, PG_hwpoison); 242 242 #endif 243 243 244 244 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
+3 -3
fs/ubifs/file.c
··· 107 107 size_t offset = 0; 108 108 109 109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", 110 - inode->i_ino, folio->index, i_size, folio->flags); 110 + inode->i_ino, folio->index, i_size, folio->flags.f); 111 111 ubifs_assert(c, !folio_test_checked(folio)); 112 112 ubifs_assert(c, !folio->private); 113 113 ··· 600 600 pgoff_t end_index; 601 601 602 602 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", 603 - inode->i_ino, folio->index, i_size, folio->flags); 603 + inode->i_ino, folio->index, i_size, folio->flags.f); 604 604 605 605 end_index = (i_size - 1) >> PAGE_SHIFT; 606 606 if (!i_size || folio->index > end_index) { ··· 988 988 int err, len = folio_size(folio); 989 989 990 990 dbg_gen("ino %lu, pg %lu, pg flags %#lx", 991 - inode->i_ino, folio->index, folio->flags); 991 + inode->i_ino, folio->index, folio->flags.f); 992 992 ubifs_assert(c, folio->private != NULL); 993 993 994 994 /* Is the folio fully outside @i_size? (truncate in progress) */
+16 -16
include/linux/mm.h
··· 1024 1024 { 1025 1025 struct folio *folio = (struct folio *)page; 1026 1026 1027 - if (!test_bit(PG_head, &folio->flags)) 1027 + if (!test_bit(PG_head, &folio->flags.f)) 1028 1028 return 0; 1029 1029 return folio_large_order(folio); 1030 1030 } ··· 1554 1554 */ 1555 1555 static inline int page_zone_id(struct page *page) 1556 1556 { 1557 - return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 1557 + return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK; 1558 1558 } 1559 1559 1560 1560 #ifdef NODE_NOT_IN_PAGE_FLAGS ··· 1562 1562 #else 1563 1563 static inline int page_to_nid(const struct page *page) 1564 1564 { 1565 - return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK; 1565 + return (PF_POISONED_CHECK(page)->flags.f >> NODES_PGSHIFT) & NODES_MASK; 1566 1566 } 1567 1567 #endif 1568 1568 ··· 1637 1637 #else 1638 1638 static inline int folio_last_cpupid(struct folio *folio) 1639 1639 { 1640 - return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 1640 + return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 1641 1641 } 1642 1642 1643 1643 int folio_xchg_last_cpupid(struct folio *folio, int cpupid); 1644 1644 1645 1645 static inline void page_cpupid_reset_last(struct page *page) 1646 1646 { 1647 - page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 1647 + page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 1648 1648 } 1649 1649 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 1650 1650 ··· 1740 1740 u8 tag = KASAN_TAG_KERNEL; 1741 1741 1742 1742 if (kasan_enabled()) { 1743 - tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1743 + tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1744 1744 tag ^= 0xff; 1745 1745 } 1746 1746 ··· 1755 1755 return; 1756 1756 1757 1757 tag ^= 0xff; 1758 - old_flags = READ_ONCE(page->flags); 1758 + old_flags = READ_ONCE(page->flags.f); 1759 1759 do { 1760 1760 flags = old_flags; 1761 1761 flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 1762 1762 flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 1763 - } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); 1763 + } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags))); 1764 1764 } 1765 1765 1766 1766 static inline void page_kasan_tag_reset(struct page *page) ··· 1804 1804 #ifdef SECTION_IN_PAGE_FLAGS 1805 1805 static inline void set_page_section(struct page *page, unsigned long section) 1806 1806 { 1807 - page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1808 - page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1807 + page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1808 + page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1809 1809 } 1810 1810 1811 1811 static inline unsigned long page_to_section(const struct page *page) 1812 1812 { 1813 - return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1813 + return (page->flags.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1814 1814 } 1815 1815 #endif 1816 1816 ··· 2015 2015 2016 2016 static inline void set_page_zone(struct page *page, enum zone_type zone) 2017 2017 { 2018 - page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 2019 - page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 2018 + page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT); 2019 + page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 2020 2020 } 2021 2021 2022 2022 static inline void set_page_node(struct page *page, unsigned long node) 2023 2023 { 2024 - page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 2025 - page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 2024 + page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT); 2025 + page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT; 2026 2026 } 2027 2027 2028 2028 static inline void set_page_links(struct page *page, enum zone_type zone, ··· 2064 2064 { 2065 2065 struct folio *folio = (struct folio *)page; 2066 2066 2067 - if (!test_bit(PG_head, &folio->flags)) 2067 + if (!test_bit(PG_head, &folio->flags.f)) 2068 2068 return 1; 2069 2069 return folio_large_nr_pages(folio); 2070 2070 }
+6 -6
include/linux/mm_inline.h
··· 143 143 144 144 static inline int folio_lru_refs(struct folio *folio) 145 145 { 146 - unsigned long flags = READ_ONCE(folio->flags); 146 + unsigned long flags = READ_ONCE(folio->flags.f); 147 147 148 148 if (!(flags & BIT(PG_referenced))) 149 149 return 0; ··· 156 156 157 157 static inline int folio_lru_gen(struct folio *folio) 158 158 { 159 - unsigned long flags = READ_ONCE(folio->flags); 159 + unsigned long flags = READ_ONCE(folio->flags.f); 160 160 161 161 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 162 162 } ··· 268 268 gen = lru_gen_from_seq(seq); 269 269 flags = (gen + 1UL) << LRU_GEN_PGOFF; 270 270 /* see the comment on MIN_NR_GENS about PG_active */ 271 - set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); 271 + set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags); 272 272 273 273 lru_gen_update_size(lruvec, folio, -1, gen); 274 274 /* for folio_rotate_reclaimable() */ ··· 293 293 294 294 /* for folio_migrate_flags() */ 295 295 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; 296 - flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); 296 + flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags); 297 297 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 298 298 299 299 lru_gen_update_size(lruvec, folio, gen, -1); ··· 304 304 305 305 static inline void folio_migrate_refs(struct folio *new, struct folio *old) 306 306 { 307 - unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK; 307 + unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK; 308 308 309 - set_mask_bits(&new->flags, LRU_REFS_MASK, refs); 309 + set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs); 310 310 } 311 311 #else /* !CONFIG_LRU_GEN */ 312 312
+6 -2
include/linux/mm_types.h
··· 34 34 struct futex_private_hash; 35 35 struct mem_cgroup; 36 36 37 + typedef struct { 38 + unsigned long f; 39 + } memdesc_flags_t; 40 + 37 41 /* 38 42 * Each physical page in the system has a struct page associated with 39 43 * it to keep track of whatever it is we are using the page for at the ··· 76 72 #endif 77 73 78 74 struct page { 79 - unsigned long flags; /* Atomic flags, some possibly 75 + memdesc_flags_t flags; /* Atomic flags, some possibly 80 76 * updated asynchronously */ 81 77 /* 82 78 * Five words (20/40 bytes) are available in this union. ··· 387 383 union { 388 384 struct { 389 385 /* public: */ 390 - unsigned long flags; 386 + memdesc_flags_t flags; 391 387 union { 392 388 struct list_head lru; 393 389 /* private: avoid cluttering the output */
+1 -1
include/linux/mmzone.h
··· 1186 1186 static inline enum zone_type page_zonenum(const struct page *page) 1187 1187 { 1188 1188 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); 1189 - return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 1189 + return (page->flags.f >> ZONES_PGSHIFT) & ZONES_MASK; 1190 1190 } 1191 1191 1192 1192 static inline enum zone_type folio_zonenum(const struct folio *folio)
+20 -20
include/linux/page-flags.h
··· 217 217 * cold cacheline in some cases. 218 218 */ 219 219 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 220 - test_bit(PG_head, &page->flags)) { 220 + test_bit(PG_head, &page->flags.f)) { 221 221 /* 222 222 * We can safely access the field of the @page[1] with PG_head 223 223 * because the @page is a compound page composed with at least ··· 325 325 326 326 static __always_inline int PageCompound(const struct page *page) 327 327 { 328 - return test_bit(PG_head, &page->flags) || 328 + return test_bit(PG_head, &page->flags.f) || 329 329 READ_ONCE(page->compound_head) & 1; 330 330 } 331 331 332 332 #define PAGE_POISON_PATTERN -1l 333 333 static inline int PagePoisoned(const struct page *page) 334 334 { 335 - return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 335 + return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN; 336 336 } 337 337 338 338 #ifdef CONFIG_DEBUG_VM ··· 349 349 const struct page *page = &folio->page; 350 350 351 351 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); 352 - VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 353 - return &page[n].flags; 352 + VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page); 353 + return &page[n].flags.f; 354 354 } 355 355 356 356 static unsigned long *folio_flags(struct folio *folio, unsigned n) ··· 358 358 struct page *page = &folio->page; 359 359 360 360 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); 361 - VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 362 - return &page[n].flags; 361 + VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page); 362 + return &page[n].flags.f; 363 363 } 364 364 365 365 /* ··· 449 449 #define TESTPAGEFLAG(uname, lname, policy) \ 450 450 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ 451 451 static __always_inline int Page##uname(const struct page *page) \ 452 - { return test_bit(PG_##lname, &policy(page, 0)->flags); } 452 + { return test_bit(PG_##lname, &policy(page, 0)->flags.f); } 453 453 454 454 #define SETPAGEFLAG(uname, lname, policy) \ 455 455 FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 456 456 static __always_inline void SetPage##uname(struct page *page) \ 457 - { set_bit(PG_##lname, &policy(page, 1)->flags); } 457 + { set_bit(PG_##lname, &policy(page, 1)->flags.f); } 458 458 459 459 #define CLEARPAGEFLAG(uname, lname, policy) \ 460 460 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 461 461 static __always_inline void ClearPage##uname(struct page *page) \ 462 - { clear_bit(PG_##lname, &policy(page, 1)->flags); } 462 + { clear_bit(PG_##lname, &policy(page, 1)->flags.f); } 463 463 464 464 #define __SETPAGEFLAG(uname, lname, policy) \ 465 465 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 466 466 static __always_inline void __SetPage##uname(struct page *page) \ 467 - { __set_bit(PG_##lname, &policy(page, 1)->flags); } 467 + { __set_bit(PG_##lname, &policy(page, 1)->flags.f); } 468 468 469 469 #define __CLEARPAGEFLAG(uname, lname, policy) \ 470 470 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 471 471 static __always_inline void __ClearPage##uname(struct page *page) \ 472 - { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 472 + { __clear_bit(PG_##lname, &policy(page, 1)->flags.f); } 473 473 474 474 #define TESTSETFLAG(uname, lname, policy) \ 475 475 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ 476 476 static __always_inline int TestSetPage##uname(struct page *page) \ 477 - { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 477 + { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); } 478 478 479 479 #define TESTCLEARFLAG(uname, lname, policy) \ 480 480 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ 481 481 static __always_inline int TestClearPage##uname(struct page *page) \ 482 - { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 482 + { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); } 483 483 484 484 #define PAGEFLAG(uname, lname, policy) \ 485 485 TESTPAGEFLAG(uname, lname, policy) \ ··· 846 846 static __always_inline int PageHead(const struct page *page) 847 847 { 848 848 PF_POISONED_CHECK(page); 849 - return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 849 + return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page); 850 850 } 851 851 852 852 __SETPAGEFLAG(Head, head, PF_ANY) ··· 1170 1170 */ 1171 1171 if (PageHuge(page)) 1172 1172 page = compound_head(page); 1173 - return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1173 + return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1174 1174 } 1175 1175 1176 1176 static __always_inline void SetPageAnonExclusive(struct page *page) 1177 1177 { 1178 1178 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); 1179 1179 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1180 - set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1180 + set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1181 1181 } 1182 1182 1183 1183 static __always_inline void ClearPageAnonExclusive(struct page *page) 1184 1184 { 1185 1185 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); 1186 1186 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1187 - clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1187 + clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1188 1188 } 1189 1189 1190 1190 static __always_inline void __ClearPageAnonExclusive(struct page *page) 1191 1191 { 1192 1192 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1193 1193 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1194 - __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1194 + __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1195 1195 } 1196 1196 1197 1197 #ifdef CONFIG_MMU ··· 1241 1241 */ 1242 1242 static inline int folio_has_private(const struct folio *folio) 1243 1243 { 1244 - return !!(folio->flags & PAGE_FLAGS_PRIVATE); 1244 + return !!(folio->flags.f & PAGE_FLAGS_PRIVATE); 1245 1245 } 1246 1246 1247 1247 #undef PF_ANY
+4 -3
include/linux/pgalloc_tag.h
··· 107 107 if (static_key_enabled(&mem_profiling_compressed)) { 108 108 pgalloc_tag_idx idx; 109 109 110 - idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask; 110 + idx = (page->flags.f >> alloc_tag_ref_offs) & 111 + alloc_tag_ref_mask; 111 112 idx_to_ref(idx, ref); 112 113 handle->page = page; 113 114 } else { ··· 150 149 idx = (unsigned long)ref_to_idx(ref); 151 150 idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs; 152 151 do { 153 - old_flags = READ_ONCE(page->flags); 152 + old_flags = READ_ONCE(page->flags.f); 154 153 flags = old_flags; 155 154 flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs); 156 155 flags |= idx; 157 - } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); 156 + } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags))); 158 157 } else { 159 158 if (WARN_ON(!handle.ref || !ref)) 160 159 return;
+2 -2
include/trace/events/page_ref.h
··· 28 28 29 29 TP_fast_assign( 30 30 __entry->pfn = page_to_pfn(page); 31 - __entry->flags = page->flags; 31 + __entry->flags = page->flags.f; 32 32 __entry->count = page_ref_count(page); 33 33 __entry->mapcount = atomic_read(&page->_mapcount); 34 34 __entry->mapping = page->mapping; ··· 77 77 78 78 TP_fast_assign( 79 79 __entry->pfn = page_to_pfn(page); 80 - __entry->flags = page->flags; 80 + __entry->flags = page->flags.f; 81 81 __entry->count = page_ref_count(page); 82 82 __entry->mapcount = atomic_read(&page->_mapcount); 83 83 __entry->mapping = page->mapping;
+4 -4
mm/filemap.c
··· 1140 1140 */ 1141 1141 flags = wait->flags; 1142 1142 if (flags & WQ_FLAG_EXCLUSIVE) { 1143 - if (test_bit(key->bit_nr, &key->folio->flags)) 1143 + if (test_bit(key->bit_nr, &key->folio->flags.f)) 1144 1144 return -1; 1145 1145 if (flags & WQ_FLAG_CUSTOM) { 1146 - if (test_and_set_bit(key->bit_nr, &key->folio->flags)) 1146 + if (test_and_set_bit(key->bit_nr, &key->folio->flags.f)) 1147 1147 return -1; 1148 1148 flags |= WQ_FLAG_DONE; 1149 1149 } ··· 1226 1226 struct wait_queue_entry *wait) 1227 1227 { 1228 1228 if (wait->flags & WQ_FLAG_EXCLUSIVE) { 1229 - if (test_and_set_bit(bit_nr, &folio->flags)) 1229 + if (test_and_set_bit(bit_nr, &folio->flags.f)) 1230 1230 return false; 1231 - } else if (test_bit(bit_nr, &folio->flags)) 1231 + } else if (test_bit(bit_nr, &folio->flags.f)) 1232 1232 return false; 1233 1233 1234 1234 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
+2 -2
mm/huge_memory.c
··· 3303 3303 * unreferenced sub-pages of an anonymous THP: we can simply drop 3304 3304 * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 3305 3305 */ 3306 - new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 3307 - new_folio->flags |= (folio->flags & 3306 + new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 3307 + new_folio->flags.f |= (folio->flags.f & 3308 3308 ((1L << PG_referenced) | 3309 3309 (1L << PG_swapbacked) | 3310 3310 (1L << PG_swapcache) |
+6 -6
mm/memory-failure.c
··· 1707 1707 * carried out only if the first check can't determine the page status. 1708 1708 */ 1709 1709 for (ps = error_states;; ps++) 1710 - if ((p->flags & ps->mask) == ps->res) 1710 + if ((p->flags.f & ps->mask) == ps->res) 1711 1711 break; 1712 1712 1713 - page_flags |= (p->flags & (1UL << PG_dirty)); 1713 + page_flags |= (p->flags.f & (1UL << PG_dirty)); 1714 1714 1715 1715 if (!ps->mask) 1716 1716 for (ps = error_states;; ps++) ··· 2137 2137 return action_result(pfn, MF_MSG_FREE_HUGE, res); 2138 2138 } 2139 2139 2140 - page_flags = folio->flags; 2140 + page_flags = folio->flags.f; 2141 2141 2142 2142 if (!hwpoison_user_mappings(folio, p, pfn, flags)) { 2143 2143 folio_unlock(folio); ··· 2398 2398 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page 2399 2399 * status correctly, we save a copy of the page flags at this time. 2400 2400 */ 2401 - page_flags = folio->flags; 2401 + page_flags = folio->flags.f; 2402 2402 2403 2403 /* 2404 2404 * __munlock_folio() may clear a writeback folio's LRU flag without ··· 2744 2744 putback_movable_pages(&pagelist); 2745 2745 2746 2746 pr_info("%#lx: %s migration failed %ld, type %pGp\n", 2747 - pfn, msg_page[huge], ret, &page->flags); 2747 + pfn, msg_page[huge], ret, &page->flags.f); 2748 2748 if (ret > 0) 2749 2749 ret = -EBUSY; 2750 2750 } 2751 2751 } else { 2752 2752 pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n", 2753 - pfn, msg_page[huge], page_count(page), &page->flags); 2753 + pfn, msg_page[huge], page_count(page), &page->flags.f); 2754 2754 ret = -EBUSY; 2755 2755 } 2756 2756 return ret;
+2 -2
mm/mmzone.c
··· 99 99 unsigned long old_flags, flags; 100 100 int last_cpupid; 101 101 102 - old_flags = READ_ONCE(folio->flags); 102 + old_flags = READ_ONCE(folio->flags.f); 103 103 do { 104 104 flags = old_flags; 105 105 last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 106 106 107 107 flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); 108 108 flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; 109 - } while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags))); 109 + } while (unlikely(!try_cmpxchg(&folio->flags.f, &old_flags, flags))); 110 110 111 111 return last_cpupid; 112 112 }
+6 -6
mm/page_alloc.c
··· 950 950 bool to_tail; 951 951 952 952 VM_BUG_ON(!zone_is_initialized(zone)); 953 - VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 953 + VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); 954 954 955 955 VM_BUG_ON(migratetype == -1); 956 956 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); ··· 1043 1043 page->memcg_data | 1044 1044 #endif 1045 1045 page_pool_page_is_pp(page) | 1046 - (page->flags & check_flags))) 1046 + (page->flags.f & check_flags))) 1047 1047 return false; 1048 1048 1049 1049 return true; ··· 1059 1059 bad_reason = "non-NULL mapping"; 1060 1060 if (unlikely(page_ref_count(page) != 0)) 1061 1061 bad_reason = "nonzero _refcount"; 1062 - if (unlikely(page->flags & flags)) { 1062 + if (unlikely(page->flags.f & flags)) { 1063 1063 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1064 1064 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1065 1065 else ··· 1358 1358 int i; 1359 1359 1360 1360 if (compound) { 1361 - page[1].flags &= ~PAGE_FLAGS_SECOND; 1361 + page[1].flags.f &= ~PAGE_FLAGS_SECOND; 1362 1362 #ifdef NR_PAGES_IN_LARGE_FOLIO 1363 1363 folio->_nr_pages = 0; 1364 1364 #endif ··· 1372 1372 continue; 1373 1373 } 1374 1374 } 1375 - (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1375 + (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1376 1376 } 1377 1377 } 1378 1378 if (folio_test_anon(folio)) { ··· 1391 1391 } 1392 1392 1393 1393 page_cpupid_reset_last(page); 1394 - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1394 + page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; 1395 1395 reset_page_owner(page, order); 1396 1396 page_table_check_free(page, order); 1397 1397 pgalloc_tag_sub(page, 1 << order);
+4 -4
mm/swap.c
··· 387 387 388 388 static void lru_gen_inc_refs(struct folio *folio) 389 389 { 390 - unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 390 + unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); 391 391 392 392 if (folio_test_unevictable(folio)) 393 393 return; 394 394 395 395 /* see the comment on LRU_REFS_FLAGS */ 396 396 if (!folio_test_referenced(folio)) { 397 - set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); 397 + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); 398 398 return; 399 399 } 400 400 ··· 406 406 } 407 407 408 408 new_flags = old_flags + BIT(LRU_REFS_PGOFF); 409 - } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 409 + } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); 410 410 } 411 411 412 412 static bool lru_gen_clear_refs(struct folio *folio) ··· 418 418 if (gen < 0) 419 419 return true; 420 420 421 - set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0); 421 + set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS | BIT(PG_workingset), 0); 422 422 423 423 lrugen = &folio_lruvec(folio)->lrugen; 424 424 /* whether can do without shuffling under the LRU lock */
+9 -9
mm/vmscan.c
··· 888 888 { 889 889 /* see the comment on LRU_REFS_FLAGS */ 890 890 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { 891 - set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); 891 + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); 892 892 return false; 893 893 } 894 894 895 - set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset)); 895 + set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset)); 896 896 return true; 897 897 } 898 898 #else ··· 3257 3257 /* promote pages accessed through page tables */ 3258 3258 static int folio_update_gen(struct folio *folio, int gen) 3259 3259 { 3260 - unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3260 + unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); 3261 3261 3262 3262 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 3263 3263 3264 3264 /* see the comment on LRU_REFS_FLAGS */ 3265 3265 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { 3266 - set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); 3266 + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); 3267 3267 return -1; 3268 3268 } 3269 3269 ··· 3274 3274 3275 3275 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS); 3276 3276 new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset); 3277 - } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3277 + } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); 3278 3278 3279 3279 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3280 3280 } ··· 3285 3285 int type = folio_is_file_lru(folio); 3286 3286 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3287 3287 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3288 - unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3288 + unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); 3289 3289 3290 3290 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); 3291 3291 ··· 3302 3302 /* for folio_end_writeback() */ 3303 3303 if (reclaiming) 3304 3304 new_flags |= BIT(PG_reclaim); 3305 - } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3305 + } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); 3306 3306 3307 3307 lru_gen_update_size(lruvec, folio, old_gen, new_gen); 3308 3308 ··· 4553 4553 4554 4554 /* see the comment on LRU_REFS_FLAGS */ 4555 4555 if (!folio_test_referenced(folio)) 4556 - set_mask_bits(&folio->flags, LRU_REFS_MASK, 0); 4556 + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0); 4557 4557 4558 4558 /* for shrink_folio_list() */ 4559 4559 folio_clear_reclaim(folio); ··· 4766 4766 4767 4767 /* don't add rejected folios to the oldest generation */ 4768 4768 if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type]) 4769 - set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active)); 4769 + set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active)); 4770 4770 } 4771 4771 4772 4772 spin_lock_irq(&lruvec->lru_lock);
+1 -1
mm/workingset.c
··· 318 318 folio_set_workingset(folio); 319 319 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); 320 320 } else 321 - set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); 321 + set_mask_bits(&folio->flags.f, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); 322 322 unlock: 323 323 rcu_read_unlock(); 324 324 }