Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: rename pud_page_vaddr to pud_pgtable and make it return pmd_t *

No functional change in this patch.

[aneesh.kumar@linux.ibm.com: fix]
Link: https://lkml.kernel.org/r/87wnqtnb60.fsf@linux.ibm.com
[sfr@canb.auug.org.au: another fix]
Link: https://lkml.kernel.org/r/20210619134410.89559-1-aneesh.kumar@linux.ibm.com

Link: https://lkml.kernel.org/r/20210615110859.320299-1-aneesh.kumar@linux.ibm.com
Link: https://lore.kernel.org/linuxppc-dev/CAHk-=wi+J+iodze9FtjM3Zi4j4OeS+qqbKxME9QN4roxPEXH9Q@mail.gmail.com/
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Aneesh Kumar K.V and committed by
Linus Torvalds
9cf6fa24 44e8a5e9

+46 -36
+5 -3
arch/alpha/include/asm/pgtable.h
··· 236 236 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) 237 237 #define pud_page(pud) (pfn_to_page(pud_val(pud) >> 32)) 238 238 239 - extern inline unsigned long pud_page_vaddr(pud_t pgd) 240 - { return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } 239 + extern inline pmd_t *pud_pgtable(pud_t pgd) 240 + { 241 + return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT))); 242 + } 241 243 242 244 extern inline int pte_none(pte_t pte) { return !pte_val(pte); } 243 245 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } ··· 289 287 /* Find an entry in the second-level page table.. */ 290 288 extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address) 291 289 { 292 - pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); 290 + pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); 293 291 smp_rmb(); /* see above */ 294 292 return ret; 295 293 }
+1 -1
arch/arm/include/asm/pgtable-3level.h
··· 130 130 flush_pmd_entry(pudp); \ 131 131 } while (0) 132 132 133 - static inline pmd_t *pud_page_vaddr(pud_t pud) 133 + static inline pmd_t *pud_pgtable(pud_t pud) 134 134 { 135 135 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); 136 136 }
+2 -2
arch/arm64/include/asm/pgtable.h
··· 649 649 return __pud_to_phys(pud); 650 650 } 651 651 652 - static inline unsigned long pud_page_vaddr(pud_t pud) 652 + static inline pmd_t *pud_pgtable(pud_t pud) 653 653 { 654 - return (unsigned long)__va(pud_page_paddr(pud)); 654 + return (pmd_t *)__va(pud_page_paddr(pud)); 655 655 } 656 656 657 657 /* Find an entry in the second-level page table. */
+1 -1
arch/ia64/include/asm/pgtable.h
··· 273 273 #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) 274 274 #define pud_present(pud) (pud_val(pud) != 0UL) 275 275 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 276 - #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) 276 + #define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK)) 277 277 #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) 278 278 279 279 #if CONFIG_PGTABLE_LEVELS == 4
+1 -1
arch/m68k/include/asm/motorola_pgtable.h
··· 131 131 132 132 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK)) 133 133 #define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK)) 134 - #define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK)) 134 + #define pud_pgtable(pud) ((pmd_t *)__va(pud_val(pud) & _TABLE_MASK)) 135 135 136 136 137 137 #define pte_none(pte) (!pte_val(pte))
+2 -2
arch/mips/include/asm/pgtable-64.h
··· 313 313 #endif 314 314 315 315 #ifndef __PAGETABLE_PMD_FOLDED 316 - static inline unsigned long pud_page_vaddr(pud_t pud) 316 + static inline pmd_t *pud_pgtable(pud_t pud) 317 317 { 318 - return pud_val(pud); 318 + return (pmd_t *)pud_val(pud); 319 319 } 320 320 #define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) 321 321 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
+2 -2
arch/parisc/include/asm/pgtable.h
··· 322 322 323 323 324 324 #if CONFIG_PGTABLE_LEVELS == 3 325 - #define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud))) 326 - #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) 325 + #define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud))) 326 + #define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) 327 327 328 328 /* For 64 bit we have three level tables */ 329 329
+5 -1
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 1051 1051 /* Pointers in the page table tree are physical addresses */ 1052 1052 #define __pgtable_ptr_val(ptr) __pa(ptr) 1053 1053 1054 - #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) 1055 1054 #define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) 1055 + 1056 + static inline pmd_t *pud_pgtable(pud_t pud) 1057 + { 1058 + return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS); 1059 + } 1056 1060 1057 1061 #define pte_ERROR(e) \ 1058 1062 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+5 -1
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 162 162 #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 163 163 || (pud_val(pud) & PUD_BAD_BITS)) 164 164 #define pud_present(pud) (pud_val(pud) != 0) 165 - #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 165 + 166 + static inline pmd_t *pud_pgtable(pud_t pud) 167 + { 168 + return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS); 169 + } 166 170 167 171 extern struct page *pud_page(pud_t pud); 168 172
+2 -2
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 820 820 continue; 821 821 } 822 822 823 - pmd_base = (pmd_t *)pud_page_vaddr(*pud); 823 + pmd_base = pud_pgtable(*pud); 824 824 remove_pmd_table(pmd_base, addr, next); 825 825 free_pmd_table(pmd_base, pud); 826 826 } ··· 1105 1105 pmd_t *pmd; 1106 1106 int i; 1107 1107 1108 - pmd = (pmd_t *)pud_page_vaddr(*pud); 1108 + pmd = pud_pgtable(*pud); 1109 1109 pud_clear(pud); 1110 1110 1111 1111 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+1 -1
arch/powerpc/mm/pgtable_64.c
··· 115 115 VM_WARN_ON(!pud_huge(pud)); 116 116 return pte_page(pud_pte(pud)); 117 117 } 118 - return virt_to_page(pud_page_vaddr(pud)); 118 + return virt_to_page(pud_pgtable(pud)); 119 119 } 120 120 121 121 /*
+2 -2
arch/riscv/include/asm/pgtable-64.h
··· 60 60 set_pud(pudp, __pud(0)); 61 61 } 62 62 63 - static inline unsigned long pud_page_vaddr(pud_t pud) 63 + static inline pmd_t *pud_pgtable(pud_t pud) 64 64 { 65 - return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT); 65 + return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT); 66 66 } 67 67 68 68 static inline struct page *pud_page(pud_t pud)
+2 -2
arch/sh/include/asm/pgtable-3level.h
··· 32 32 #define pmd_val(x) ((x).pmd) 33 33 #define __pmd(x) ((pmd_t) { (x) } ) 34 34 35 - static inline unsigned long pud_page_vaddr(pud_t pud) 35 + static inline pmd_t *pud_pgtable(pud_t pud) 36 36 { 37 - return pud_val(pud); 37 + return (pmd_t *)pud_val(pud); 38 38 } 39 39 40 40 /* only used by the stubbed out hugetlb gup code, should never be called */
+3 -3
arch/sparc/include/asm/pgtable_32.h
··· 151 151 return (unsigned long)__nocache_va(v << 4); 152 152 } 153 153 154 - static inline unsigned long pud_page_vaddr(pud_t pud) 154 + static inline pmd_t *pud_pgtable(pud_t pud) 155 155 { 156 156 if (srmmu_device_memory(pud_val(pud))) { 157 - return ~0; 157 + return (pmd_t *)~0; 158 158 } else { 159 159 unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK; 160 - return (unsigned long)__nocache_va(v << 4); 160 + return (pmd_t *)__nocache_va(v << 4); 161 161 } 162 162 } 163 163
+3 -3
arch/sparc/include/asm/pgtable_64.h
··· 841 841 return ((unsigned long) __va(pfn << PAGE_SHIFT)); 842 842 } 843 843 844 - static inline unsigned long pud_page_vaddr(pud_t pud) 844 + static inline pmd_t *pud_pgtable(pud_t pud) 845 845 { 846 846 pte_t pte = __pte(pud_val(pud)); 847 847 unsigned long pfn; 848 848 849 849 pfn = pte_pfn(pte); 850 850 851 - return ((unsigned long) __va(pfn << PAGE_SHIFT)); 851 + return ((pmd_t *) __va(pfn << PAGE_SHIFT)); 852 852 } 853 853 854 854 #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd)) 855 - #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) 855 + #define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) 856 856 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) 857 857 #define pud_present(pud) (pud_val(pud) != 0U) 858 858 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
+1 -1
arch/um/include/asm/pgtable-3level.h
··· 83 83 } 84 84 85 85 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK) 86 - #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) 86 + #define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK)) 87 87 88 88 static inline unsigned long pte_pfn(pte_t pte) 89 89 {
+2 -2
arch/x86/include/asm/pgtable.h
··· 836 836 return pud_flags(pud) & _PAGE_PRESENT; 837 837 } 838 838 839 - static inline unsigned long pud_page_vaddr(pud_t pud) 839 + static inline pmd_t *pud_pgtable(pud_t pud) 840 840 { 841 - return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); 841 + return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud)); 842 842 } 843 843 844 844 /*
+2 -2
arch/x86/mm/pat/set_memory.c
··· 1134 1134 unsigned long start, unsigned long end) 1135 1135 { 1136 1136 if (unmap_pte_range(pmd, start, end)) 1137 - if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) 1137 + if (try_to_free_pmd_page(pud_pgtable(*pud))) 1138 1138 pud_clear(pud); 1139 1139 } 1140 1140 ··· 1178 1178 * Try again to free the PMD page if haven't succeeded above. 1179 1179 */ 1180 1180 if (!pud_none(*pud)) 1181 - if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) 1181 + if (try_to_free_pmd_page(pud_pgtable(*pud))) 1182 1182 pud_clear(pud); 1183 1183 } 1184 1184
+1 -1
arch/x86/mm/pgtable.c
··· 801 801 pte_t *pte; 802 802 int i; 803 803 804 - pmd = (pmd_t *)pud_page_vaddr(*pud); 804 + pmd = pud_pgtable(*pud); 805 805 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); 806 806 if (!pmd_sv) 807 807 return 0;
+1 -1
include/asm-generic/pgtable-nopmd.h
··· 51 51 #define __pmd(x) ((pmd_t) { __pud(x) } ) 52 52 53 53 #define pud_page(pud) (pmd_page((pmd_t){ pud })) 54 - #define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) 54 + #define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud }))) 55 55 56 56 /* 57 57 * allocating and freeing a pmd is trivial: the 1-entry pmd is
+1 -1
include/asm-generic/pgtable-nopud.h
··· 49 49 #define __pud(x) ((pud_t) { __p4d(x) }) 50 50 51 51 #define p4d_page(p4d) (pud_page((pud_t){ p4d })) 52 - #define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) 52 + #define p4d_page_vaddr(p4d) (pud_pgtable((pud_t){ p4d })) 53 53 54 54 /* 55 55 * allocating and freeing a pud is trivial: the 1-entry pud is
+1 -1
include/linux/pgtable.h
··· 106 106 #ifndef pmd_offset 107 107 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 108 108 { 109 - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 109 + return pud_pgtable(*pud) + pmd_index(address); 110 110 } 111 111 #define pmd_offset pmd_offset 112 112 #endif