Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/treewide: replace pmd_large() with pmd_leaf()

pmd_large() is always defined as pmd_leaf(). Merge their usages. Chose
pmd_leaf() because pmd_leaf() is a global API, while pmd_large() is not.

Link: https://lkml.kernel.org/r/20240305043750.93762-8-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Peter Xu and committed by
Andrew Morton
2f709f7b b6c9d5a9

+49 -49
+2 -2
arch/arm/mm/dump.c
··· 349 349 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 350 350 addr = start + i * PMD_SIZE; 351 351 domain = get_domain_name(pmd); 352 - if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) 352 + if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_present(*pmd)) 353 353 note_page(st, addr, 4, pmd_val(*pmd), domain); 354 354 else 355 355 walk_pte(st, pmd, addr, domain); 356 356 357 - if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) { 357 + if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) { 358 358 addr += SECTION_SIZE; 359 359 pmd++; 360 360 domain = get_domain_name(pmd);
+1 -1
arch/powerpc/mm/book3s64/pgtable.c
··· 113 113 114 114 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); 115 115 assert_spin_locked(pmd_lockptr(mm, pmdp)); 116 - WARN_ON(!(pmd_large(pmd))); 116 + WARN_ON(!(pmd_leaf(pmd))); 117 117 #endif 118 118 trace_hugepage_set_pmd(addr, pmd_val(pmd)); 119 119 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
+1 -1
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 924 924 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, 925 925 unsigned long addr, unsigned long next) 926 926 { 927 - int large = pmd_large(*pmdp); 927 + int large = pmd_leaf(*pmdp); 928 928 929 929 if (large) 930 930 vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
+1 -1
arch/powerpc/mm/pgtable_64.c
··· 132 132 * enabled so these checks can't be used. 133 133 */ 134 134 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 135 - VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); 135 + VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd))); 136 136 return pte_page(pmd_pte(pmd)); 137 137 } 138 138 return virt_to_page(pmd_page_vaddr(pmd));
+1 -1
arch/s390/boot/vmem.c
··· 333 333 } 334 334 pte = boot_pte_alloc(); 335 335 pmd_populate(&init_mm, pmd, pte); 336 - } else if (pmd_large(*pmd)) { 336 + } else if (pmd_leaf(*pmd)) { 337 337 continue; 338 338 } 339 339 pgtable_pte_populate(pmd, addr, next, mode);
+4 -4
arch/s390/include/asm/pgtable.h
··· 721 721 722 722 static inline int pmd_bad(pmd_t pmd) 723 723 { 724 - if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) 724 + if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd)) 725 725 return 1; 726 726 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 727 727 } ··· 820 820 821 821 static inline int pmd_protnone(pmd_t pmd) 822 822 { 823 - /* pmd_large(pmd) implies pmd_present(pmd) */ 824 - return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 823 + /* pmd_leaf(pmd) implies pmd_present(pmd) */ 824 + return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 825 825 } 826 826 #endif 827 827 ··· 1385 1385 unsigned long origin_mask; 1386 1386 1387 1387 origin_mask = _SEGMENT_ENTRY_ORIGIN; 1388 - if (pmd_large(pmd)) 1388 + if (pmd_leaf(pmd)) 1389 1389 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 1390 1390 return (unsigned long)__va(pmd_val(pmd) & origin_mask); 1391 1391 }
+6 -6
arch/s390/mm/gmap.c
··· 603 603 pmd = pmd_offset(pud, vmaddr); 604 604 VM_BUG_ON(pmd_none(*pmd)); 605 605 /* Are we allowed to use huge pages? */ 606 - if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) 606 + if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) 607 607 return -EFAULT; 608 608 /* Link gmap segment table entry location to page table. */ 609 609 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); ··· 615 615 rc = radix_tree_insert(&gmap->host_to_guest, 616 616 vmaddr >> PMD_SHIFT, table); 617 617 if (!rc) { 618 - if (pmd_large(*pmd)) { 618 + if (pmd_leaf(*pmd)) { 619 619 *table = (pmd_val(*pmd) & 620 620 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE) 621 621 | _SEGMENT_ENTRY_GMAP_UC; ··· 945 945 } 946 946 947 947 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */ 948 - if (!pmd_large(*pmdp)) 948 + if (!pmd_leaf(*pmdp)) 949 949 spin_unlock(&gmap->guest_table_lock); 950 950 return pmdp; 951 951 } ··· 957 957 */ 958 958 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp) 959 959 { 960 - if (pmd_large(*pmdp)) 960 + if (pmd_leaf(*pmdp)) 961 961 spin_unlock(&gmap->guest_table_lock); 962 962 } 963 963 ··· 1068 1068 rc = -EAGAIN; 1069 1069 pmdp = gmap_pmd_op_walk(gmap, gaddr); 1070 1070 if (pmdp) { 1071 - if (!pmd_large(*pmdp)) { 1071 + if (!pmd_leaf(*pmdp)) { 1072 1072 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot, 1073 1073 bits); 1074 1074 if (!rc) { ··· 2500 2500 if (!pmdp) 2501 2501 return; 2502 2502 2503 - if (pmd_large(*pmdp)) { 2503 + if (pmd_leaf(*pmdp)) { 2504 2504 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr)) 2505 2505 bitmap_fill(bitmap, _PAGE_ENTRIES); 2506 2506 } else {
+1 -1
arch/s390/mm/hugetlbpage.c
··· 235 235 236 236 int pmd_huge(pmd_t pmd) 237 237 { 238 - return pmd_large(pmd); 238 + return pmd_leaf(pmd); 239 239 } 240 240 241 241 int pud_huge(pud_t pud)
+1 -1
arch/s390/mm/pageattr.c
··· 185 185 if (pmd_none(*pmdp)) 186 186 return -EINVAL; 187 187 next = pmd_addr_end(addr, end); 188 - if (pmd_large(*pmdp)) { 188 + if (pmd_leaf(*pmdp)) { 189 189 need_split = !!(flags & SET_MEMORY_4K); 190 190 need_split |= !!(addr & ~PMD_MASK); 191 191 need_split |= !!(addr + PMD_SIZE > next);
+3 -3
arch/s390/mm/pgtable.c
··· 827 827 return key ? -EFAULT : 0; 828 828 } 829 829 830 - if (pmd_large(*pmdp)) { 830 + if (pmd_leaf(*pmdp)) { 831 831 paddr = pmd_val(*pmdp) & HPAGE_MASK; 832 832 paddr |= addr & ~HPAGE_MASK; 833 833 /* ··· 938 938 return 0; 939 939 } 940 940 941 - if (pmd_large(*pmdp)) { 941 + if (pmd_leaf(*pmdp)) { 942 942 paddr = pmd_val(*pmdp) & HPAGE_MASK; 943 943 paddr |= addr & ~HPAGE_MASK; 944 944 cc = page_reset_referenced(paddr); ··· 1002 1002 return 0; 1003 1003 } 1004 1004 1005 - if (pmd_large(*pmdp)) { 1005 + if (pmd_leaf(*pmdp)) { 1006 1006 paddr = pmd_val(*pmdp) & HPAGE_MASK; 1007 1007 paddr |= addr & ~HPAGE_MASK; 1008 1008 *key = page_get_storage_key(paddr);
+3 -3
arch/s390/mm/vmem.c
··· 236 236 if (!add) { 237 237 if (pmd_none(*pmd)) 238 238 continue; 239 - if (pmd_large(*pmd)) { 239 + if (pmd_leaf(*pmd)) { 240 240 if (IS_ALIGNED(addr, PMD_SIZE) && 241 241 IS_ALIGNED(next, PMD_SIZE)) { 242 242 if (!direct) ··· 281 281 if (!pte) 282 282 goto out; 283 283 pmd_populate(&init_mm, pmd, pte); 284 - } else if (pmd_large(*pmd)) { 284 + } else if (pmd_leaf(*pmd)) { 285 285 if (!direct) 286 286 vmemmap_use_sub_pmd(addr, next); 287 287 continue; ··· 610 610 if (!pte) 611 611 goto out; 612 612 pmd_populate(&init_mm, pmd, pte); 613 - } else if (WARN_ON_ONCE(pmd_large(*pmd))) { 613 + } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) { 614 614 goto out; 615 615 } 616 616 ptep = pte_offset_kernel(pmd, addr);
+2 -2
arch/sparc/mm/init_64.c
··· 1672 1672 if (pmd_none(*pmd)) 1673 1673 return false; 1674 1674 1675 - if (pmd_large(*pmd)) 1675 + if (pmd_leaf(*pmd)) 1676 1676 return pfn_valid(pmd_pfn(*pmd)); 1677 1677 1678 1678 pte = pte_offset_kernel(pmd, addr); ··· 2968 2968 struct mm_struct *mm; 2969 2969 pmd_t entry = *pmd; 2970 2970 2971 - if (!pmd_large(entry) || !pmd_young(entry)) 2971 + if (!pmd_leaf(entry) || !pmd_young(entry)) 2972 2972 return; 2973 2973 2974 2974 pte = pmd_val(entry);
+1 -1
arch/x86/boot/compressed/ident_map_64.c
··· 284 284 pudp = pud_offset(p4dp, address); 285 285 pmdp = pmd_offset(pudp, address); 286 286 287 - if (pmd_large(*pmdp)) 287 + if (pmd_leaf(*pmdp)) 288 288 ptep = split_large_pmd(info, pmdp, address); 289 289 else 290 290 ptep = pte_offset_kernel(pmdp, address);
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 3135 3135 if (pmd_none(pmd) || !pmd_present(pmd)) 3136 3136 goto out; 3137 3137 3138 - if (pmd_large(pmd)) 3138 + if (pmd_leaf(pmd)) 3139 3139 level = PG_LEVEL_2M; 3140 3140 3141 3141 out:
+4 -4
arch/x86/mm/fault.c
··· 250 250 if (!pmd_k) 251 251 return -1; 252 252 253 - if (pmd_large(*pmd_k)) 253 + if (pmd_leaf(*pmd_k)) 254 254 return 0; 255 255 256 256 pte_k = pte_offset_kernel(pmd_k, address); ··· 319 319 * And let's rather not kmap-atomic the pte, just in case 320 320 * it's allocated already: 321 321 */ 322 - if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 322 + if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd)) 323 323 goto out; 324 324 325 325 pte = pte_offset_kernel(pmd, address); ··· 384 384 goto bad; 385 385 386 386 pr_cont("PMD %lx ", pmd_val(*pmd)); 387 - if (!pmd_present(*pmd) || pmd_large(*pmd)) 387 + if (!pmd_present(*pmd) || pmd_leaf(*pmd)) 388 388 goto out; 389 389 390 390 pte = pte_offset_kernel(pmd, address); ··· 1053 1053 if (!pmd_present(*pmd)) 1054 1054 return 0; 1055 1055 1056 - if (pmd_large(*pmd)) 1056 + if (pmd_leaf(*pmd)) 1057 1057 return spurious_kernel_fault_check(error_code, (pte_t *) pmd); 1058 1058 1059 1059 pte = pte_offset_kernel(pmd, address);
+1 -1
arch/x86/mm/init_32.c
··· 463 463 break; 464 464 465 465 /* should not be large page here */ 466 - if (pmd_large(*pmd)) { 466 + if (pmd_leaf(*pmd)) { 467 467 pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", 468 468 pfn, pmd, __pa(pmd)); 469 469 BUG_ON(1);
+4 -4
arch/x86/mm/init_64.c
··· 530 530 } 531 531 532 532 if (!pmd_none(*pmd)) { 533 - if (!pmd_large(*pmd)) { 533 + if (!pmd_leaf(*pmd)) { 534 534 spin_lock(&init_mm.page_table_lock); 535 535 pte = (pte_t *)pmd_page_vaddr(*pmd); 536 536 paddr_last = phys_pte_init(pte, paddr, ··· 1114 1114 if (!pmd_present(*pmd)) 1115 1115 continue; 1116 1116 1117 - if (pmd_large(*pmd)) { 1117 + if (pmd_leaf(*pmd)) { 1118 1118 if (IS_ALIGNED(addr, PMD_SIZE) && 1119 1119 IS_ALIGNED(next, PMD_SIZE)) { 1120 1120 if (!direct) ··· 1520 1520 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, 1521 1521 unsigned long addr, unsigned long next) 1522 1522 { 1523 - int large = pmd_large(*pmd); 1523 + int large = pmd_leaf(*pmd); 1524 1524 1525 - if (pmd_large(*pmd)) { 1525 + if (pmd_leaf(*pmd)) { 1526 1526 vmemmap_verify((pte_t *)pmd, node, addr, next); 1527 1527 vmemmap_use_sub_pmd(addr, next); 1528 1528 }
+1 -1
arch/x86/mm/kasan_init_64.c
··· 95 95 pmd = pmd_offset(pud, addr); 96 96 do { 97 97 next = pmd_addr_end(addr, end); 98 - if (!pmd_large(*pmd)) 98 + if (!pmd_leaf(*pmd)) 99 99 kasan_populate_pmd(pmd, addr, next, nid); 100 100 } while (pmd++, addr = next, addr != end); 101 101 }
+2 -2
arch/x86/mm/mem_encrypt_identity.c
··· 161 161 return; 162 162 163 163 pmd = pmd_offset(pud, ppd->vaddr); 164 - if (pmd_large(*pmd)) 164 + if (pmd_leaf(*pmd)) 165 165 return; 166 166 167 167 set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); ··· 185 185 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); 186 186 } 187 187 188 - if (pmd_large(*pmd)) 188 + if (pmd_leaf(*pmd)) 189 189 return; 190 190 191 191 pte = pte_offset_kernel(pmd, ppd->vaddr);
+2 -2
arch/x86/mm/pat/set_memory.c
··· 692 692 return NULL; 693 693 694 694 *level = PG_LEVEL_2M; 695 - if (pmd_large(*pmd) || !pmd_present(*pmd)) 695 + if (pmd_leaf(*pmd) || !pmd_present(*pmd)) 696 696 return (pte_t *)pmd; 697 697 698 698 *level = PG_LEVEL_4K; ··· 1229 1229 * Try to unmap in 2M chunks. 1230 1230 */ 1231 1231 while (end - start >= PMD_SIZE) { 1232 - if (pmd_large(*pmd)) 1232 + if (pmd_leaf(*pmd)) 1233 1233 pmd_clear(pmd); 1234 1234 else 1235 1235 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
+1 -1
arch/x86/mm/pgtable.c
··· 792 792 */ 793 793 int pmd_clear_huge(pmd_t *pmd) 794 794 { 795 - if (pmd_large(*pmd)) { 795 + if (pmd_leaf(*pmd)) { 796 796 pmd_clear(pmd); 797 797 return 1; 798 798 }
+2 -2
arch/x86/mm/pti.c
··· 252 252 return NULL; 253 253 254 254 /* We can't do anything sensible if we hit a large mapping. */ 255 - if (pmd_large(*pmd)) { 255 + if (pmd_leaf(*pmd)) { 256 256 WARN_ON(1); 257 257 return NULL; 258 258 } ··· 341 341 continue; 342 342 } 343 343 344 - if (pmd_large(*pmd) || level == PTI_CLONE_PMD) { 344 + if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) { 345 345 target_pmd = pti_user_pagetable_walk_pmd(addr); 346 346 if (WARN_ON(!target_pmd)) 347 347 return;
+1 -1
arch/x86/power/hibernate.c
··· 175 175 goto out; 176 176 } 177 177 pmd = pmd_offset(pud, relocated_restore_code); 178 - if (pmd_large(*pmd)) { 178 + if (pmd_leaf(*pmd)) { 179 179 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); 180 180 goto out; 181 181 }
+2 -2
arch/x86/xen/mmu_pv.c
··· 1059 1059 pte_t *pte_tbl; 1060 1060 int i; 1061 1061 1062 - if (pmd_large(*pmd)) { 1062 + if (pmd_leaf(*pmd)) { 1063 1063 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; 1064 1064 xen_free_ro_pages(pa, PMD_SIZE); 1065 1065 return; ··· 1871 1871 if (!pmd_present(pmd)) 1872 1872 return 0; 1873 1873 pa = pmd_val(pmd) & PTE_PFN_MASK; 1874 - if (pmd_large(pmd)) 1874 + if (pmd_leaf(pmd)) 1875 1875 return pa + (vaddr & ~PMD_MASK); 1876 1876 1877 1877 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
+1 -1
drivers/misc/sgi-gru/grufault.c
··· 227 227 if (unlikely(pmd_none(*pmdp))) 228 228 goto err; 229 229 #ifdef CONFIG_X86_64 230 - if (unlikely(pmd_large(*pmdp))) 230 + if (unlikely(pmd_leaf(*pmdp))) 231 231 pte = ptep_get((pte_t *)pmdp); 232 232 else 233 233 #endif