Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

proc: rewrite stable_page_flags()

Reduce the usage of PageFlag tests and reduce the number of
compound_head() calls.

For multi-page folios, we'll now show all pages as having the flags that
apply to them, e.g. if it's dirty, all pages will have the dirty flag set
instead of just the head page. The mapped flag is still per page, as is
the hwpoison flag.

[willy@infradead.org: fix up some bits vs masks]
Link: https://lkml.kernel.org/r/20240403173112.1450721-1-willy@infradead.org
[willy@infradead.org: fix warnings]
Link: https://lkml.kernel.org/r/ZhBPtCYfSuFuUMEz@casper.infradead.org
Link: https://lkml.kernel.org/r/20240326171045.410737-11-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Svetly Todorov <svetly.todorov@memverge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
dee3d0be 4dc7d373

+44 -40
+39 -34
fs/proc/page.c
··· 107 107 return ((kflags >> kbit) & 1) << ubit; 108 108 } 109 109 110 - u64 stable_page_flags(struct page *page) 110 + u64 stable_page_flags(const struct page *page) 111 111 { 112 - u64 k; 113 - u64 u; 112 + const struct folio *folio; 113 + unsigned long k; 114 + unsigned long mapping; 115 + bool is_anon; 116 + u64 u = 0; 114 117 115 118 /* 116 119 * pseudo flag: KPF_NOPAGE ··· 121 118 */ 122 119 if (!page) 123 120 return 1 << KPF_NOPAGE; 121 + folio = page_folio(page); 124 122 125 - k = page->flags; 126 - u = 0; 123 + k = folio->flags; 124 + mapping = (unsigned long)folio->mapping; 125 + is_anon = mapping & PAGE_MAPPING_ANON; 127 126 128 127 /* 129 128 * pseudo flags for the well known (anonymous) memory mapped pages 130 129 */ 131 130 if (page_mapped(page)) 132 131 u |= 1 << KPF_MMAP; 133 - if (PageAnon(page)) 132 + if (is_anon) { 134 133 u |= 1 << KPF_ANON; 135 - if (PageKsm(page)) 136 - u |= 1 << KPF_KSM; 134 + if (mapping & PAGE_MAPPING_KSM) 135 + u |= 1 << KPF_KSM; 136 + } 137 137 138 138 /* 139 139 * compound pages: export both head/tail info 140 140 * they together define a compound page's start/end pos and order 141 141 */ 142 - if (PageHead(page)) 143 - u |= 1 << KPF_COMPOUND_HEAD; 144 - if (PageTail(page)) 142 + if (page == &folio->page) 143 + u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head); 144 + else 145 145 u |= 1 << KPF_COMPOUND_TAIL; 146 - if (PageHuge(page)) 146 + if (folio_test_hugetlb(folio)) 147 147 u |= 1 << KPF_HUGE; 148 148 /* 149 - * PageTransCompound can be true for non-huge compound pages (slab 150 - * pages or pages allocated by drivers with __GFP_COMP) because it 151 - * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 149 + * We need to check PageLRU/PageAnon 152 150 * to make sure a given page is a thp, not a non-huge compound page. 153 151 */ 154 - else if (PageTransCompound(page)) { 155 - struct page *head = compound_head(page); 156 - 157 - if (PageLRU(head) || PageAnon(head)) 152 + else if (folio_test_large(folio)) { 153 + if ((k & (1 << PG_lru)) || is_anon) 158 154 u |= 1 << KPF_THP; 159 - else if (is_huge_zero_page(head)) { 155 + else if (is_huge_zero_page(&folio->page)) { 160 156 u |= 1 << KPF_ZERO_PAGE; 161 157 u |= 1 << KPF_THP; 162 158 } 163 159 } else if (is_zero_pfn(page_to_pfn(page))) 164 160 u |= 1 << KPF_ZERO_PAGE; 165 - 166 161 167 162 /* 168 163 * Caveats on high order pages: PG_buddy and PG_slab will only be set ··· 175 174 u |= 1 << KPF_OFFLINE; 176 175 if (PageTable(page)) 177 176 u |= 1 << KPF_PGTABLE; 178 - 179 - if (page_is_idle(page)) 180 - u |= 1 << KPF_IDLE; 181 - 182 - u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 183 - 184 - u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); 185 - if (PageTail(page) && PageSlab(page)) 177 + if (folio_test_slab(folio)) 186 178 u |= 1 << KPF_SLAB; 187 179 180 + #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 181 + u |= kpf_copy_bit(k, KPF_IDLE, PG_idle); 182 + #else 183 + if (folio_test_idle(folio)) 184 + u |= 1 << KPF_IDLE; 185 + #endif 186 + 187 + u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 188 188 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 189 189 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 190 190 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); ··· 196 194 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 197 195 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 198 196 199 - if (PageSwapCache(page)) 197 + #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache)) 198 + if ((k & SWAPCACHE) == SWAPCACHE) 200 199 u |= 1 << KPF_SWAPCACHE; 201 200 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 202 201 ··· 205 202 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 206 203 207 204 #ifdef CONFIG_MEMORY_FAILURE 208 - u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 205 + if (u & (1 << KPF_HUGE)) 206 + u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 207 + else 208 + u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison); 209 209 #endif 210 210 211 211 #ifdef CONFIG_ARCH_USES_PG_UNCACHED ··· 234 228 { 235 229 const unsigned long max_dump_pfn = get_max_dump_pfn(); 236 230 u64 __user *out = (u64 __user *)buf; 237 - struct page *ppage; 238 231 unsigned long src = *ppos; 239 232 unsigned long pfn; 240 233 ssize_t ret = 0; ··· 250 245 * TODO: ZONE_DEVICE support requires to identify 251 246 * memmaps that were actually initialized. 252 247 */ 253 - ppage = pfn_to_online_page(pfn); 248 + struct page *page = pfn_to_online_page(pfn); 254 249 255 - if (put_user(stable_page_flags(ppage), out)) { 250 + if (put_user(stable_page_flags(page), out)) { 256 251 ret = -EFAULT; 257 252 break; 258 253 }
+2 -2
include/linux/huge_mm.h
··· 351 351 extern struct page *huge_zero_page; 352 352 extern unsigned long huge_zero_pfn; 353 353 354 - static inline bool is_huge_zero_page(struct page *page) 354 + static inline bool is_huge_zero_page(const struct page *page) 355 355 { 356 356 return READ_ONCE(huge_zero_page) == page; 357 357 } ··· 480 480 return 0; 481 481 } 482 482 483 - static inline bool is_huge_zero_page(struct page *page) 483 + static inline bool is_huge_zero_page(const struct page *page) 484 484 { 485 485 return false; 486 486 }
+1 -1
include/linux/page-flags.h
··· 734 734 TESTPAGEFLAG_FALSE(Ksm, ksm) 735 735 #endif 736 736 737 - u64 stable_page_flags(struct page *page); 737 + u64 stable_page_flags(const struct page *page); 738 738 739 739 /** 740 740 * folio_xor_flags_has_waiters - Change some folio flags.
+2 -3
tools/cgroup/memcg_slabinfo.py
··· 146 146 147 147 148 148 def for_each_slab(prog): 149 - PGSlab = 1 << prog.constant('PG_slab') 150 - PGHead = 1 << prog.constant('PG_head') 149 + PGSlab = ~prog.constant('PG_slab') 151 150 152 151 for page in for_each_page(prog): 153 152 try: 154 - if page.flags.value_() & PGSlab: 153 + if page.page_type.value_() == PGSlab: 155 154 yield cast('struct slab *', page) 156 155 except FaultError: 157 156 pass