Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ia64: add support for folded p4d page tables

Implement primitives necessary for the 4th level folding, add walks of p4d
level where appropriate, remove usage of __ARCH_USE_5LEVEL_HACK and
replace 5level-fixup.h with pgtable-nop4d.h

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: James Morse <james.morse@arm.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200414153455.21744-6-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
c03ab9e3 00b13def

+52 -22
+2 -2
arch/ia64/include/asm/pgalloc.h
··· 36 36 37 37 #if CONFIG_PGTABLE_LEVELS == 4 38 38 static inline void 39 - pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) 39 + p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud) 40 40 { 41 - pgd_val(*pgd_entry) = __pa(pud); 41 + p4d_val(*p4d_entry) = __pa(pud); 42 42 } 43 43 44 44 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+8 -9
arch/ia64/include/asm/pgtable.h
··· 283 283 #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) 284 284 285 285 #if CONFIG_PGTABLE_LEVELS == 4 286 - #define pgd_none(pgd) (!pgd_val(pgd)) 287 - #define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) 288 - #define pgd_present(pgd) (pgd_val(pgd) != 0UL) 289 - #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 290 - #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) 291 - #define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET)) 286 + #define p4d_none(p4d) (!p4d_val(p4d)) 287 + #define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d))) 288 + #define p4d_present(p4d) (p4d_val(p4d) != 0UL) 289 + #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL) 290 + #define p4d_page_vaddr(p4d) ((unsigned long) __va(p4d_val(p4d) & _PFN_MASK)) 291 + #define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET)) 292 292 #endif 293 293 294 294 /* ··· 386 386 #if CONFIG_PGTABLE_LEVELS == 4 387 387 /* Find an entry in the second-level page table.. */ 388 388 #define pud_offset(dir,addr) \ 389 - ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) 389 + ((pud_t *) p4d_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) 390 390 #endif 391 391 392 392 /* Find an entry in the third-level page table.. */ ··· 580 580 581 581 582 582 #if CONFIG_PGTABLE_LEVELS == 3 583 - #define __ARCH_USE_5LEVEL_HACK 584 583 #include <asm-generic/pgtable-nopud.h> 585 584 #endif 586 - #include <asm-generic/5level-fixup.h> 585 + #include <asm-generic/pgtable-nop4d.h> 587 586 #include <asm-generic/pgtable.h> 588 587 589 588 #endif /* _ASM_IA64_PGTABLE_H */
+6 -1
arch/ia64/mm/fault.c
··· 29 29 mapped_kernel_page_is_present (unsigned long address) 30 30 { 31 31 pgd_t *pgd; 32 + p4d_t *p4d; 32 33 pud_t *pud; 33 34 pmd_t *pmd; 34 35 pte_t *ptep, pte; ··· 38 37 if (pgd_none(*pgd) || pgd_bad(*pgd)) 39 38 return 0; 40 39 41 - pud = pud_offset(pgd, address); 40 + p4d = p4d_offset(pgd, address); 41 + if (p4d_none(*p4d) || p4d_bad(*p4d)) 42 + return 0; 43 + 44 + pud = pud_offset(p4d, address); 42 45 if (pud_none(*pud) || pud_bad(*pud)) 43 46 return 0; 44 47
+12 -6
arch/ia64/mm/hugetlbpage.c
··· 30 30 { 31 31 unsigned long taddr = htlbpage_to_page(addr); 32 32 pgd_t *pgd; 33 + p4d_t *p4d; 33 34 pud_t *pud; 34 35 pmd_t *pmd; 35 36 pte_t *pte = NULL; 36 37 37 38 pgd = pgd_offset(mm, taddr); 38 - pud = pud_alloc(mm, pgd, taddr); 39 + p4d = p4d_offset(pgd, taddr); 40 + pud = pud_alloc(mm, p4d, taddr); 39 41 if (pud) { 40 42 pmd = pmd_alloc(mm, pud, taddr); 41 43 if (pmd) ··· 51 49 { 52 50 unsigned long taddr = htlbpage_to_page(addr); 53 51 pgd_t *pgd; 52 + p4d_t *p4d; 54 53 pud_t *pud; 55 54 pmd_t *pmd; 56 55 pte_t *pte = NULL; 57 56 58 57 pgd = pgd_offset(mm, taddr); 59 58 if (pgd_present(*pgd)) { 60 - pud = pud_offset(pgd, taddr); 61 - if (pud_present(*pud)) { 62 - pmd = pmd_offset(pud, taddr); 63 - if (pmd_present(*pmd)) 64 - pte = pte_offset_map(pmd, taddr); 59 + p4d = p4d_offset(pgd, addr); 60 + if (p4d_present(*p4d)) { 61 + pud = pud_offset(p4d, taddr); 62 + if (pud_present(*pud)) { 63 + pmd = pmd_offset(pud, taddr); 64 + if (pmd_present(*pmd)) 65 + pte = pte_offset_map(pmd, taddr); 66 + } 65 67 } 66 68 } 67 69
+24 -4
arch/ia64/mm/init.c
··· 208 208 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) 209 209 { 210 210 pgd_t *pgd; 211 + p4d_t *p4d; 211 212 pud_t *pud; 212 213 pmd_t *pmd; 213 214 pte_t *pte; ··· 216 215 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ 217 216 218 217 { 219 - pud = pud_alloc(&init_mm, pgd, address); 218 + p4d = p4d_alloc(&init_mm, pgd, address); 219 + if (!p4d) 220 + goto out; 221 + pud = pud_alloc(&init_mm, p4d, address); 220 222 if (!pud) 221 223 goto out; 222 224 pmd = pmd_alloc(&init_mm, pud, address); ··· 386 382 387 383 do { 388 384 pgd_t *pgd; 385 + p4d_t *p4d; 389 386 pud_t *pud; 390 387 pmd_t *pmd; 391 388 pte_t *pte; ··· 397 392 continue; 398 393 } 399 394 400 - pud = pud_offset(pgd, end_address); 395 + p4d = p4d_offset(pgd, end_address); 396 + if (p4d_none(*p4d)) { 397 + end_address += P4D_SIZE; 398 + continue; 399 + } 400 + 401 + pud = pud_offset(p4d, end_address); 401 402 if (pud_none(*pud)) { 402 403 end_address += PUD_SIZE; 403 404 continue; ··· 441 430 struct page *map_start, *map_end; 442 431 int node; 443 432 pgd_t *pgd; 433 + p4d_t *p4d; 444 434 pud_t *pud; 445 435 pmd_t *pmd; 446 436 pte_t *pte; ··· 456 444 for (address = start_page; address < end_page; address += PAGE_SIZE) { 457 445 pgd = pgd_offset_k(address); 458 446 if (pgd_none(*pgd)) { 447 + p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); 448 + if (!p4d) 449 + goto err_alloc; 450 + pgd_populate(&init_mm, pgd, p4d); 451 + } 452 + p4d = p4d_offset(pgd, address); 453 + 454 + if (p4d_none(*p4d)) { 459 455 pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); 460 456 if (!pud) 461 457 goto err_alloc; 462 - pgd_populate(&init_mm, pgd, pud); 458 + p4d_populate(&init_mm, p4d, pud); 463 459 } 464 - pud = pud_offset(pgd, address); 460 + pud = pud_offset(p4d, address); 465 461 466 462 if (pud_none(*pud)) { 467 463 pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);