Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mips: add support for folded p4d page tables

Implement primitives necessary for the 4th level folding, add walks of p4d
level where appropriate, replace 5leve-fixup.h with pgtable-nop4d.h and
drop usage of __ARCH_USE_5LEVEL_HACK.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Paul Burton <paulburton@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: Mike Rapoport <rppt@kernel.org>

authored by

Mike Rapoport and committed by
Paul Burton
2bee1b58 31168f03

+75 -39
+1 -1
arch/mips/include/asm/fixmap.h
··· 70 70 #include <asm-generic/fixmap.h> 71 71 72 72 #define kmap_get_fixmap_pte(vaddr) \ 73 - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 73 + pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), (vaddr)) 74 74 75 75 /* 76 76 * Called from pgtable_init()
+2 -2
arch/mips/include/asm/pgalloc.h
··· 96 96 free_pages((unsigned long)pud, PUD_ORDER); 97 97 } 98 98 99 - static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 99 + static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 100 100 { 101 - set_pgd(pgd, __pgd((unsigned long)pud)); 101 + set_p4d(p4d, __p4d((unsigned long)pud)); 102 102 } 103 103 104 104 #define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
-1
arch/mips/include/asm/pgtable-32.h
··· 16 16 #include <asm/cachectl.h> 17 17 #include <asm/fixmap.h> 18 18 19 - #define __ARCH_USE_5LEVEL_HACK 20 19 #include <asm-generic/pgtable-nopmd.h> 21 20 22 21 #ifdef CONFIG_HIGHMEM
+19 -18
arch/mips/include/asm/pgtable-64.h
··· 17 17 #include <asm/cachectl.h> 18 18 #include <asm/fixmap.h> 19 19 20 - #define __ARCH_USE_5LEVEL_HACK 21 20 #if CONFIG_PGTABLE_LEVELS == 2 22 21 #include <asm-generic/pgtable-nopmd.h> 23 22 #elif CONFIG_PGTABLE_LEVELS == 3 24 23 #include <asm-generic/pgtable-nopud.h> 25 24 #else 26 - #include <asm-generic/5level-fixup.h> 25 + #include <asm-generic/pgtable-nop4d.h> 27 26 #endif 28 27 29 28 /* ··· 187 188 /* 188 189 * Empty pgd entries point to the invalid_pud_table. 189 190 */ 190 - static inline int pgd_none(pgd_t pgd) 191 + static inline int p4d_none(p4d_t p4d) 191 192 { 192 - return pgd_val(pgd) == (unsigned long)invalid_pud_table; 193 + return p4d_val(p4d) == (unsigned long)invalid_pud_table; 193 194 } 194 195 195 - static inline int pgd_bad(pgd_t pgd) 196 + static inline int p4d_bad(p4d_t p4d) 196 197 { 197 - if (unlikely(pgd_val(pgd) & ~PAGE_MASK)) 198 + if (unlikely(p4d_val(p4d) & ~PAGE_MASK)) 198 199 return 1; 199 200 200 201 return 0; 201 202 } 202 203 203 - static inline int pgd_present(pgd_t pgd) 204 + static inline int p4d_present(p4d_t p4d) 204 205 { 205 - return pgd_val(pgd) != (unsigned long)invalid_pud_table; 206 + return p4d_val(p4d) != (unsigned long)invalid_pud_table; 206 207 } 207 208 208 - static inline void pgd_clear(pgd_t *pgdp) 209 + static inline void p4d_clear(p4d_t *p4dp) 209 210 { 210 - pgd_val(*pgdp) = (unsigned long)invalid_pud_table; 211 + p4d_val(*p4dp) = (unsigned long)invalid_pud_table; 211 212 } 212 213 213 214 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 214 215 215 - static inline unsigned long pgd_page_vaddr(pgd_t pgd) 216 + static inline unsigned long p4d_page_vaddr(p4d_t p4d) 216 217 { 217 - return pgd_val(pgd); 218 + return p4d_val(p4d); 218 219 } 219 220 220 - #define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd)) 221 - #define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT)) 221 + #define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d)) 222 + #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) 222 223 223 - static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 224 + #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 225 + 226 + static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 224 227 { 225 - return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); 228 + return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); 226 229 } 227 230 228 - static inline void set_pgd(pgd_t *pgd, pgd_t pgdval) 231 + static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) 229 232 { 230 - *pgd = pgdval; 233 + *p4d = p4dval; 231 234 } 232 235 233 236 #endif
+12 -4
arch/mips/kvm/mmu.c
··· 136 136 static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, 137 137 unsigned long addr) 138 138 { 139 + p4d_t *p4d; 139 140 pud_t *pud; 140 141 pmd_t *pmd; 141 142 ··· 146 145 BUG(); 147 146 return NULL; 148 147 } 149 - pud = pud_offset(pgd, addr); 148 + p4d = p4d_offset(pgd, addr); 149 + pud = pud_offset(p4d, addr); 150 150 if (pud_none(*pud)) { 151 151 pmd_t *new_pmd; 152 152 ··· 260 258 static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa, 261 259 unsigned long end_gpa) 262 260 { 261 + p4d_t *p4d; 263 262 pud_t *pud; 264 263 unsigned long end = ~0ul; 265 264 int i_min = pgd_index(start_gpa); ··· 272 269 if (!pgd_present(pgd[i])) 273 270 continue; 274 271 275 - pud = pud_offset(pgd + i, 0); 272 + p4d = p4d_offset(pgd, 0); 273 + pud = pud_offset(p4d + i, 0); 276 274 if (i == i_max) 277 275 end = end_gpa; 278 276 ··· 382 378 unsigned long end) \ 383 379 { \ 384 380 int ret = 0; \ 381 + p4d_t *p4d; \ 385 382 pud_t *pud; \ 386 383 unsigned long cur_end = ~0ul; \ 387 384 int i_min = pgd_index(start); \ ··· 393 388 if (!pgd_present(pgd[i])) \ 394 389 continue; \ 395 390 \ 396 - pud = pud_offset(pgd + i, 0); \ 391 + p4d = p4d_offset(pgd, 0); \ 392 + pud = pud_offset(p4d + i, 0); \ 397 393 if (i == i_max) \ 398 394 cur_end = end; \ 399 395 \ ··· 922 916 static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, 923 917 unsigned long end_gva) 924 918 { 919 + p4d_t *p4d; 925 920 pud_t *pud; 926 921 unsigned long end = ~0ul; 927 922 int i_min = pgd_index(start_gva); ··· 934 927 if (!pgd_present(pgd[i])) 935 928 continue; 936 929 937 - pud = pud_offset(pgd + i, 0); 930 + p4d = p4d_offset(pgd, 0); 931 + pud = pud_offset(p4d + i, 0); 938 932 if (i == i_max) 939 933 end = end_gva; 940 934
+3 -1
arch/mips/kvm/trap_emul.c
··· 564 564 /* Don't free host kernel page tables copied from init_mm.pgd */ 565 565 const unsigned long end = 0x80000000; 566 566 unsigned long pgd_va, pud_va, pmd_va; 567 + p4d_t *p4d; 567 568 pud_t *pud; 568 569 pmd_t *pmd; 569 570 pte_t *pte; ··· 577 576 pgd_va = (unsigned long)i << PGDIR_SHIFT; 578 577 if (pgd_va >= end) 579 578 break; 580 - pud = pud_offset(pgd + i, 0); 579 + p4d = p4d_offset(pgd, 0); 580 + pud = pud_offset(p4d + i, 0); 581 581 for (j = 0; j < PTRS_PER_PUD; j++) { 582 582 if (pud_none(pud[j])) 583 583 continue;
+3 -1
arch/mips/mm/c-r3k.c
··· 241 241 int exec = vma->vm_flags & VM_EXEC; 242 242 struct mm_struct *mm = vma->vm_mm; 243 243 pgd_t *pgdp; 244 + p4d_t *p4dp; 244 245 pud_t *pudp; 245 246 pmd_t *pmdp; 246 247 pte_t *ptep; ··· 254 253 return; 255 254 256 255 pgdp = pgd_offset(mm, addr); 257 - pudp = pud_offset(pgdp, addr); 256 + p4dp = p4d_offset(pgdp, addr); 257 + pudp = pud_offset(p4dp, addr); 258 258 pmdp = pmd_offset(pudp, addr); 259 259 ptep = pte_offset(pmdp, addr); 260 260
+3 -1
arch/mips/mm/c-r4k.c
··· 654 654 struct mm_struct *mm = vma->vm_mm; 655 655 int map_coherent = 0; 656 656 pgd_t *pgdp; 657 + p4d_t *p4dp; 657 658 pud_t *pudp; 658 659 pmd_t *pmdp; 659 660 pte_t *ptep; ··· 669 668 670 669 addr &= PAGE_MASK; 671 670 pgdp = pgd_offset(mm, addr); 672 - pudp = pud_offset(pgdp, addr); 671 + p4dp = p4d_offset(pgdp, addr); 672 + pudp = pud_offset(p4dp, addr); 673 673 pmdp = pmd_offset(pudp, addr); 674 674 ptep = pte_offset(pmdp, addr); 675 675
+3 -1
arch/mips/mm/c-tx39.c
··· 170 170 int exec = vma->vm_flags & VM_EXEC; 171 171 struct mm_struct *mm = vma->vm_mm; 172 172 pgd_t *pgdp; 173 + p4d_t *p4dp; 173 174 pud_t *pudp; 174 175 pmd_t *pmdp; 175 176 pte_t *ptep; ··· 184 183 185 184 page &= PAGE_MASK; 186 185 pgdp = pgd_offset(mm, page); 187 - pudp = pud_offset(pgdp, page); 186 + p4dp = p4d_offset(pgdp, page); 187 + pudp = pud_offset(p4dp, page); 188 188 pmdp = pmd_offset(pudp, page); 189 189 ptep = pte_offset(pmdp, page); 190 190
+8 -2
arch/mips/mm/fault.c
··· 294 294 */ 295 295 int offset = pgd_index(address); 296 296 pgd_t *pgd, *pgd_k; 297 + p4d_t *p4d, *p4d_k; 297 298 pud_t *pud, *pud_k; 298 299 pmd_t *pmd, *pmd_k; 299 300 pte_t *pte_k; ··· 306 305 goto no_context; 307 306 set_pgd(pgd, *pgd_k); 308 307 309 - pud = pud_offset(pgd, address); 310 - pud_k = pud_offset(pgd_k, address); 308 + p4d = p4d_offset(pgd, address); 309 + p4d_k = p4d_offset(pgd_k, address); 310 + if (!p4d_present(*p4d_k)) 311 + goto no_context; 312 + 313 + pud = pud_offset(p4d, address); 314 + pud_k = pud_offset(p4d_k, address); 311 315 if (!pud_present(*pud_k)) 312 316 goto no_context; 313 317
+10 -4
arch/mips/mm/hugetlbpage.c
··· 25 25 unsigned long sz) 26 26 { 27 27 pgd_t *pgd; 28 + p4d_t *p4d; 28 29 pud_t *pud; 29 30 pte_t *pte = NULL; 30 31 31 32 pgd = pgd_offset(mm, addr); 32 - pud = pud_alloc(mm, pgd, addr); 33 + p4d = p4d_alloc(mm, pgd, addr); 34 + pud = pud_alloc(mm, p4d, addr); 33 35 if (pud) 34 36 pte = (pte_t *)pmd_alloc(mm, pud, addr); 35 37 ··· 42 40 unsigned long sz) 43 41 { 44 42 pgd_t *pgd; 43 + p4d_t *p4d; 45 44 pud_t *pud; 46 45 pmd_t *pmd = NULL; 47 46 48 47 pgd = pgd_offset(mm, addr); 49 48 if (pgd_present(*pgd)) { 50 - pud = pud_offset(pgd, addr); 51 - if (pud_present(*pud)) 52 - pmd = pmd_offset(pud, addr); 49 + p4d = p4d_offset(pgd, addr); 50 + if (p4d_present(*p4d)) { 51 + pud = pud_offset(p4d, addr); 52 + if (pud_present(*pud)) 53 + pmd = pmd_offset(pud, addr); 54 + } 53 55 } 54 56 return (pte_t *) pmd; 55 57 }
+5 -1
arch/mips/mm/ioremap.c
··· 78 78 flush_cache_all(); 79 79 BUG_ON(address >= end); 80 80 do { 81 + p4d_t *p4d; 81 82 pud_t *pud; 82 83 pmd_t *pmd; 83 84 84 85 error = -ENOMEM; 85 - pud = pud_alloc(&init_mm, dir, address); 86 + p4d = p4d_alloc(&init_mm, dir, address); 87 + if (!p4d) 88 + break; 89 + pud = pud_alloc(&init_mm, p4d, address); 86 90 if (!pud) 87 91 break; 88 92 pmd = pmd_alloc(&init_mm, pud, address);
+3 -1
arch/mips/mm/pgtable-32.c
··· 56 56 pgd_t *pgd_base; 57 57 #ifdef CONFIG_HIGHMEM 58 58 pgd_t *pgd; 59 + p4d_t *p4d; 59 60 pud_t *pud; 60 61 pmd_t *pmd; 61 62 pte_t *pte; ··· 83 82 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 84 83 85 84 pgd = swapper_pg_dir + pgd_index(vaddr); 86 - pud = pud_offset(pgd, vaddr); 85 + p4d = p4d_offset(pgd, vaddr); 86 + pud = pud_offset(p4d, vaddr); 87 87 pmd = pmd_offset(pud, vaddr); 88 88 pte = pte_offset_kernel(pmd, vaddr); 89 89 pkmap_page_table = pte;
+3 -1
arch/mips/mm/tlb-r4k.c
··· 295 295 { 296 296 unsigned long flags; 297 297 pgd_t *pgdp; 298 + p4d_t *p4dp; 298 299 pud_t *pudp; 299 300 pmd_t *pmdp; 300 301 pte_t *ptep; ··· 321 320 mtc0_tlbw_hazard(); 322 321 tlb_probe(); 323 322 tlb_probe_hazard(); 324 - pudp = pud_offset(pgdp, address); 323 + p4dp = p4d_offset(pgdp, address); 324 + pudp = pud_offset(p4dp, address); 325 325 pmdp = pmd_offset(pudp, address); 326 326 idx = read_c0_index(); 327 327 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT