Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: cleanup *pte_alloc* interfaces

There are few things about *pte_alloc*() helpers worth cleaning up:

- 'vma' argument is unused, let's drop it;

- most __pte_alloc() callers do speculative check for pmd_none(),
before taking ptl: let's introduce pte_alloc() macro which does
the check.

The only direct user of __pte_alloc left is userfaultfd, which has
different expectation about atomicity wrt pmd.

- pte_alloc_map() and pte_alloc_map_lock() are redefined using
pte_alloc().

[sudeep.holla@arm.com: fix build for arm64 hugetlbpage]
[sfr@canb.auug.org.au: fix arch/arm/mm/mmu.c some more]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
3ed3a4f0 5057dcd0

+27 -32
+3 -3
arch/arm/mm/mmu.c
··· 732 732 return ptr; 733 733 } 734 734 735 - static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr, 735 + static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr, 736 736 unsigned long prot, 737 737 void *(*alloc)(unsigned long sz)) 738 738 { ··· 747 747 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, 748 748 unsigned long prot) 749 749 { 750 - return pte_alloc(pmd, addr, prot, early_alloc); 750 + return arm_pte_alloc(pmd, addr, prot, early_alloc); 751 751 } 752 752 753 753 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, ··· 756 756 void *(*alloc)(unsigned long sz), 757 757 bool ng) 758 758 { 759 - pte_t *pte = pte_alloc(pmd, addr, type->prot_l1, alloc); 759 + pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); 760 760 do { 761 761 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 762 762 ng ? PTE_EXT_NG : 0);
+1 -1
arch/arm/mm/pgd.c
··· 80 80 if (!new_pmd) 81 81 goto no_pmd; 82 82 83 - new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); 83 + new_pte = pte_alloc_map(mm, new_pmd, 0); 84 84 if (!new_pte) 85 85 goto no_pte; 86 86
+1 -1
arch/arm64/mm/hugetlbpage.c
··· 124 124 * will be no pte_unmap() to correspond with this 125 125 * pte_alloc_map(). 126 126 */ 127 - pte = pte_alloc_map(mm, NULL, pmd, addr); 127 + pte = pte_alloc_map(mm, pmd, addr); 128 128 } else if (sz == PMD_SIZE) { 129 129 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && 130 130 pud_none(*pud))
+1 -1
arch/ia64/mm/hugetlbpage.c
··· 38 38 if (pud) { 39 39 pmd = pmd_alloc(mm, pud, taddr); 40 40 if (pmd) 41 - pte = pte_alloc_map(mm, NULL, pmd, taddr); 41 + pte = pte_alloc_map(mm, pmd, taddr); 42 42 } 43 43 return pte; 44 44 }
+1 -1
arch/metag/mm/hugetlbpage.c
··· 67 67 pgd = pgd_offset(mm, addr); 68 68 pud = pud_offset(pgd, addr); 69 69 pmd = pmd_offset(pud, addr); 70 - pte = pte_alloc_map(mm, NULL, pmd, addr); 70 + pte = pte_alloc_map(mm, pmd, addr); 71 71 pgd->pgd &= ~_PAGE_SZ_MASK; 72 72 pgd->pgd |= _PAGE_SZHUGE; 73 73
+1 -1
arch/parisc/mm/hugetlbpage.c
··· 63 63 if (pud) { 64 64 pmd = pmd_alloc(mm, pud, addr); 65 65 if (pmd) 66 - pte = pte_alloc_map(mm, NULL, pmd, addr); 66 + pte = pte_alloc_map(mm, pmd, addr); 67 67 } 68 68 return pte; 69 69 }
+1 -1
arch/sh/mm/hugetlbpage.c
··· 35 35 if (pud) { 36 36 pmd = pmd_alloc(mm, pud, addr); 37 37 if (pmd) 38 - pte = pte_alloc_map(mm, NULL, pmd, addr); 38 + pte = pte_alloc_map(mm, pmd, addr); 39 39 } 40 40 } 41 41
+1 -1
arch/sparc/mm/hugetlbpage.c
··· 146 146 if (pud) { 147 147 pmd = pmd_alloc(mm, pud, addr); 148 148 if (pmd) 149 - pte = pte_alloc_map(mm, NULL, pmd, addr); 149 + pte = pte_alloc_map(mm, pmd, addr); 150 150 } 151 151 return pte; 152 152 }
+1 -1
arch/tile/mm/hugetlbpage.c
··· 77 77 else { 78 78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE]) 79 79 panic("Unexpected page size %#lx\n", sz); 80 - return pte_alloc_map(mm, NULL, pmd, addr); 80 + return pte_alloc_map(mm, pmd, addr); 81 81 } 82 82 } 83 83 #else
+1 -1
arch/um/kernel/skas/mmu.c
··· 31 31 if (!pmd) 32 32 goto out_pmd; 33 33 34 - pte = pte_alloc_map(mm, NULL, pmd, proc); 34 + pte = pte_alloc_map(mm, pmd, proc); 35 35 if (!pte) 36 36 goto out_pte; 37 37
+1 -1
arch/unicore32/mm/pgd.c
··· 54 54 if (!new_pmd) 55 55 goto no_pmd; 56 56 57 - new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); 57 + new_pte = pte_alloc_map(mm, new_pmd, 0); 58 58 if (!new_pte) 59 59 goto no_pte; 60 60
+1 -1
arch/x86/kernel/tboot.c
··· 135 135 pmd = pmd_alloc(&tboot_mm, pud, vaddr); 136 136 if (!pmd) 137 137 return -1; 138 - pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr); 138 + pte = pte_alloc_map(&tboot_mm, pmd, vaddr); 139 139 if (!pte) 140 140 return -1; 141 141 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
+8 -9
include/linux/mm.h
··· 1545 1545 } 1546 1546 #endif 1547 1547 1548 - int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 1549 - pmd_t *pmd, unsigned long address); 1548 + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1550 1549 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1551 1550 1552 1551 /* ··· 1671 1672 pte_unmap(pte); \ 1672 1673 } while (0) 1673 1674 1674 - #define pte_alloc_map(mm, vma, pmd, address) \ 1675 - ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ 1676 - pmd, address))? \ 1677 - NULL: pte_offset_map(pmd, address)) 1675 + #define pte_alloc(mm, pmd, address) \ 1676 + (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) 1677 + 1678 + #define pte_alloc_map(mm, pmd, address) \ 1679 + (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) 1678 1680 1679 1681 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1680 - ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ 1681 - pmd, address))? \ 1682 - NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) 1682 + (pte_alloc(mm, pmd, address) ? \ 1683 + NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 1683 1684 1684 1685 #define pte_alloc_kernel(pmd, address) \ 1685 1686 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+3 -5
mm/memory.c
··· 562 562 } 563 563 } 564 564 565 - int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 566 - pmd_t *pmd, unsigned long address) 565 + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 567 566 { 568 567 spinlock_t *ptl; 569 568 pgtable_t new = pte_alloc_one(mm, address); ··· 3418 3419 } 3419 3420 3420 3421 /* 3421 - * Use __pte_alloc instead of pte_alloc_map, because we can't 3422 + * Use pte_alloc() instead of pte_alloc_map, because we can't 3422 3423 * run pte_offset_map on the pmd, if an huge pmd could 3423 3424 * materialize from under us from a different thread. 3424 3425 */ 3425 - if (unlikely(pmd_none(*pmd)) && 3426 - unlikely(__pte_alloc(mm, vma, pmd, address))) 3426 + if (unlikely(pte_alloc(mm, pmd, address))) 3427 3427 return VM_FAULT_OOM; 3428 3428 /* 3429 3429 * If a huge pmd materialized under us just retry later. Use
+1 -2
mm/mremap.c
··· 213 213 continue; 214 214 VM_BUG_ON(pmd_trans_huge(*old_pmd)); 215 215 } 216 - if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, 217 - new_pmd, new_addr)) 216 + if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) 218 217 break; 219 218 next = (new_addr + PMD_SIZE) & PMD_MASK; 220 219 if (extent > next - new_addr)
+1 -2
mm/userfaultfd.c
··· 230 230 break; 231 231 } 232 232 if (unlikely(pmd_none(dst_pmdval)) && 233 - unlikely(__pte_alloc(dst_mm, dst_vma, dst_pmd, 234 - dst_addr))) { 233 + unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { 235 234 err = -ENOMEM; 236 235 break; 237 236 }