Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/treewide: replace pXd_huge() with pXd_leaf()

Now after we're sure all pXd_huge() definitions are the same as pXd_leaf(),
reuse it. Luckily, pXd_huge() isn't widely used.

Link: https://lkml.kernel.org/r/20240318200404.448346-12-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Bjorn Andersson <andersson@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <festevam@denx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Konrad Dybcio <konrad.dybcio@linaro.org>
Cc: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Mark Salter <msalter@redhat.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Shawn Guo <shawnguo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Peter Xu and committed by
Andrew Morton
1965e933 7db86dc3

+15 -15
+1 -1
arch/arm/include/asm/pgtable-3level.h
··· 190 190 #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) 191 191 192 192 #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) 193 - #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 193 + #define pmd_thp_or_huge(pmd) (pmd_leaf(pmd) || pmd_trans_huge(pmd)) 194 194 195 195 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 196 196 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
+1 -1
arch/arm64/include/asm/pgtable.h
··· 517 517 return pmd; 518 518 } 519 519 520 - #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 520 + #define pmd_thp_or_huge(pmd) (pmd_leaf(pmd) || pmd_trans_huge(pmd)) 521 521 522 522 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 523 523
+2 -2
arch/arm64/mm/hugetlbpage.c
··· 321 321 if (sz != PUD_SIZE && pud_none(pud)) 322 322 return NULL; 323 323 /* hugepage or swap? */ 324 - if (pud_huge(pud) || !pud_present(pud)) 324 + if (pud_leaf(pud) || !pud_present(pud)) 325 325 return (pte_t *)pudp; 326 326 /* table; check the next level */ 327 327 ··· 333 333 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && 334 334 pmd_none(pmd)) 335 335 return NULL; 336 - if (pmd_huge(pmd) || !pmd_present(pmd)) 336 + if (pmd_leaf(pmd) || !pmd_present(pmd)) 337 337 return (pte_t *)pmdp; 338 338 339 339 if (sz == CONT_PTE_SIZE)
+1 -1
arch/loongarch/mm/hugetlbpage.c
··· 64 64 { 65 65 uint64_t val; 66 66 /* PMD as PTE. Must be huge page */ 67 - if (!pmd_huge(__pmd(pmd_val))) 67 + if (!pmd_leaf(__pmd(pmd_val))) 68 68 panic("%s", __func__); 69 69 70 70 val = pmd_val ^ _PAGE_HUGE;
+1 -1
arch/mips/mm/tlb-r4k.c
··· 326 326 idx = read_c0_index(); 327 327 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 328 328 /* this could be a huge page */ 329 - if (pmd_huge(*pmdp)) { 329 + if (pmd_leaf(*pmdp)) { 330 330 unsigned long lo; 331 331 write_c0_pagemask(PM_HUGE_MASK); 332 332 ptep = (pte_t *)pmdp;
+3 -3
arch/powerpc/mm/pgtable_64.c
··· 102 102 { 103 103 if (p4d_leaf(p4d)) { 104 104 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 105 - VM_WARN_ON(!p4d_huge(p4d)); 105 + VM_WARN_ON(!p4d_leaf(p4d)); 106 106 return pte_page(p4d_pte(p4d)); 107 107 } 108 108 return virt_to_page(p4d_pgtable(p4d)); ··· 113 113 { 114 114 if (pud_leaf(pud)) { 115 115 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 116 - VM_WARN_ON(!pud_huge(pud)); 116 + VM_WARN_ON(!pud_leaf(pud)); 117 117 return pte_page(pud_pte(pud)); 118 118 } 119 119 return virt_to_page(pud_pgtable(pud)); ··· 132 132 * enabled so these checks can't be used. 133 133 */ 134 134 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 135 - VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd))); 135 + VM_WARN_ON(!pmd_leaf(pmd)); 136 136 return pte_page(pmd_pte(pmd)); 137 137 } 138 138 return virt_to_page(pmd_page_vaddr(pmd));
+2 -2
arch/x86/mm/pgtable.c
··· 731 731 return 0; 732 732 733 733 /* Bail out if we are we on a populated non-leaf entry: */ 734 - if (pud_present(*pud) && !pud_huge(*pud)) 734 + if (pud_present(*pud) && !pud_leaf(*pud)) 735 735 return 0; 736 736 737 737 set_pte((pte_t *)pud, pfn_pte( ··· 760 760 } 761 761 762 762 /* Bail out if we are we on a populated non-leaf entry: */ 763 - if (pmd_present(*pmd) && !pmd_huge(*pmd)) 763 + if (pmd_present(*pmd) && !pmd_leaf(*pmd)) 764 764 return 0; 765 765 766 766 set_pte((pte_t *)pmd, pfn_pte(
+2 -2
mm/gup.c
··· 778 778 p4d = READ_ONCE(*p4dp); 779 779 if (!p4d_present(p4d)) 780 780 return no_page_table(vma, flags); 781 - BUILD_BUG_ON(p4d_huge(p4d)); 781 + BUILD_BUG_ON(p4d_leaf(p4d)); 782 782 if (unlikely(p4d_bad(p4d))) 783 783 return no_page_table(vma, flags); 784 784 ··· 3082 3082 next = p4d_addr_end(addr, end); 3083 3083 if (!p4d_present(p4d)) 3084 3084 return 0; 3085 - BUILD_BUG_ON(p4d_huge(p4d)); 3085 + BUILD_BUG_ON(p4d_leaf(p4d)); 3086 3086 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 3087 3087 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 3088 3088 P4D_SHIFT, next, flags, pages, nr))
+1 -1
mm/hmm.c
··· 429 429 return hmm_vma_walk_hole(start, end, -1, walk); 430 430 } 431 431 432 - if (pud_huge(pud) && pud_devmap(pud)) { 432 + if (pud_leaf(pud) && pud_devmap(pud)) { 433 433 unsigned long i, npages, pfn; 434 434 unsigned int required_fault; 435 435 unsigned long *hmm_pfns;
+1 -1
mm/memory.c
··· 2765 2765 unsigned long next; 2766 2766 int err = 0; 2767 2767 2768 - BUG_ON(pud_huge(*pud)); 2768 + BUG_ON(pud_leaf(*pud)); 2769 2769 2770 2770 if (create) { 2771 2771 pmd = pmd_alloc_track(mm, pud, addr, mask);