Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/page_table_check: Batch-check pmds/puds just like ptes

Convert page_table_check_p[mu]d_set(...) to
page_table_check_p[mu]ds_set(..., nr) to allow checking a contiguous set
of pmds/puds in single batch. We retain page_table_check_p[mu]d_set(...)
as macros that call new batch functions with nr=1 for compatibility.

arm64 is about to reorganise its pte/pmd/pud helpers to reuse more code
and to allow the implementation for huge_pte to more efficiently set
ptes/pmds/puds in batches. We need these batch-helpers to make the
refactoring possible.

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Tested-by: Luiz Capitulino <luizcap@redhat.com>
Link: https://lore.kernel.org/r/20250422081822.1836315-4-ryan.roberts@arm.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Ryan Roberts and committed by
Will Deacon
91e40668 5b3f8917

+38 -26
+18 -12
include/linux/page_table_check.h
··· 19 19 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud); 20 20 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, 21 21 unsigned int nr); 22 - void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd); 23 - void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud); 22 + void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, 23 + unsigned int nr); 24 + void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud, 25 + unsigned int nr); 24 26 void __page_table_check_pte_clear_range(struct mm_struct *mm, 25 27 unsigned long addr, 26 28 pmd_t pmd); ··· 76 74 __page_table_check_ptes_set(mm, ptep, pte, nr); 77 75 } 78 76 79 - static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, 80 - pmd_t pmd) 77 + static inline void page_table_check_pmds_set(struct mm_struct *mm, 78 + pmd_t *pmdp, pmd_t pmd, unsigned int nr) 81 79 { 82 80 if (static_branch_likely(&page_table_check_disabled)) 83 81 return; 84 82 85 - __page_table_check_pmd_set(mm, pmdp, pmd); 83 + __page_table_check_pmds_set(mm, pmdp, pmd, nr); 86 84 } 87 85 88 - static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, 89 - pud_t pud) 86 + static inline void page_table_check_puds_set(struct mm_struct *mm, 87 + pud_t *pudp, pud_t pud, unsigned int nr) 90 88 { 91 89 if (static_branch_likely(&page_table_check_disabled)) 92 90 return; 93 91 94 - __page_table_check_pud_set(mm, pudp, pud); 92 + __page_table_check_puds_set(mm, pudp, pud, nr); 95 93 } 96 94 97 95 static inline void page_table_check_pte_clear_range(struct mm_struct *mm, ··· 131 129 { 132 130 } 133 131 134 - static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, 135 - pmd_t pmd) 132 + static inline void page_table_check_pmds_set(struct mm_struct *mm, 133 + pmd_t *pmdp, pmd_t pmd, unsigned int nr) 136 134 { 137 135 } 138 136 139 - static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, 140 - pud_t pud) 137 + static inline void page_table_check_puds_set(struct mm_struct *mm, 138 + pud_t *pudp, pud_t pud, unsigned int nr) 141 139 { 142 140 } 143 141 ··· 148 146 } 149 147 150 148 #endif /* CONFIG_PAGE_TABLE_CHECK */ 149 + 150 + #define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1) 151 + #define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1) 152 + 151 153 #endif /* __LINUX_PAGE_TABLE_CHECK_H */
+20 -14
mm/page_table_check.c
··· 218 218 WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd))); 219 219 } 220 220 221 - void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) 221 + void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, 222 + unsigned int nr) 222 223 { 224 + unsigned long stride = PMD_SIZE >> PAGE_SHIFT; 225 + unsigned int i; 226 + 223 227 if (&init_mm == mm) 224 228 return; 225 229 226 230 page_table_check_pmd_flags(pmd); 227 231 228 - __page_table_check_pmd_clear(mm, *pmdp); 229 - if (pmd_user_accessible_page(pmd)) { 230 - page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT, 231 - pmd_write(pmd)); 232 - } 232 + for (i = 0; i < nr; i++) 233 + __page_table_check_pmd_clear(mm, *(pmdp + i)); 234 + if (pmd_user_accessible_page(pmd)) 235 + page_table_check_set(pmd_pfn(pmd), stride * nr, pmd_write(pmd)); 233 236 } 234 - EXPORT_SYMBOL(__page_table_check_pmd_set); 237 + EXPORT_SYMBOL(__page_table_check_pmds_set); 235 238 236 - void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud) 239 + void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud, 240 + unsigned int nr) 237 241 { 242 + unsigned long stride = PUD_SIZE >> PAGE_SHIFT; 243 + unsigned int i; 244 + 238 245 if (&init_mm == mm) 239 246 return; 240 247 241 - __page_table_check_pud_clear(mm, *pudp); 242 - if (pud_user_accessible_page(pud)) { 243 - page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT, 244 - pud_write(pud)); 245 - } 248 + for (i = 0; i < nr; i++) 249 + __page_table_check_pud_clear(mm, *(pudp + i)); 250 + if (pud_user_accessible_page(pud)) 251 + page_table_check_set(pud_pfn(pud), stride * nr, pud_write(pud)); 246 252 } 247 - EXPORT_SYMBOL(__page_table_check_pud_set); 253 + EXPORT_SYMBOL(__page_table_check_puds_set); 248 254 249 255 void __page_table_check_pte_clear_range(struct mm_struct *mm, 250 256 unsigned long addr,