Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: use pgtable-nopXd instead of 4level-fixup

parisc has two or three levels of page tables and can use appropriate
pgtable-nopXd and folding of the upper layers.

Replace usage of include/asm-generic/4level-fixup.h and explicit
definitions of __PAGETABLE_PxD_FOLDED in parisc with
include/asm-generic/pgtable-nopmd.h for two-level configurations and
with include/asm-generic/pgtable-nopud.h for three-lelve configurations
and adjust page table manipulation macros and functions accordingly.

Link: http://lkml.kernel.org/r/1572938135-31886-9-git-send-email-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Helge Deller <deller@gmx.de>
Cc: Anatoly Pugachev <matorola@gmail.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Peter Rosin <peda@axentia.se>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rolf Eike Beer <eike-kernel@sf-tec.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Sam Creasey <sammy@sammy.net>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
d96885e2 7c2763c4

+81 -76
+18 -12
arch/parisc/include/asm/page.h
··· 42 42 43 43 /* NOTE: even on 64 bits, these entries are __u32 because we allocate 44 44 * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ 45 - typedef struct { __u32 pmd; } pmd_t; 46 45 typedef struct { __u32 pgd; } pgd_t; 47 46 typedef struct { unsigned long pgprot; } pgprot_t; 48 47 49 - #define pte_val(x) ((x).pte) 50 - /* These do not work lvalues, so make sure we don't use them as such. */ 48 + #if CONFIG_PGTABLE_LEVELS == 3 49 + typedef struct { __u32 pmd; } pmd_t; 50 + #define __pmd(x) ((pmd_t) { (x) } ) 51 + /* pXd_val() do not work as lvalues, so make sure we don't use them as such. */ 51 52 #define pmd_val(x) ((x).pmd + 0) 53 + #endif 54 + 55 + #define pte_val(x) ((x).pte) 52 56 #define pgd_val(x) ((x).pgd + 0) 53 57 #define pgprot_val(x) ((x).pgprot) 54 58 55 59 #define __pte(x) ((pte_t) { (x) } ) 56 - #define __pmd(x) ((pmd_t) { (x) } ) 57 60 #define __pgd(x) ((pgd_t) { (x) } ) 58 61 #define __pgprot(x) ((pgprot_t) { (x) } ) 59 - 60 - #define __pmd_val_set(x,n) (x).pmd = (n) 61 - #define __pgd_val_set(x,n) (x).pgd = (n) 62 62 63 63 #else 64 64 /* 65 65 * .. while these make it easier on the compiler 66 66 */ 67 67 typedef unsigned long pte_t; 68 + 69 + #if CONFIG_PGTABLE_LEVELS == 3 68 70 typedef __u32 pmd_t; 71 + #define pmd_val(x) (x) 72 + #define __pmd(x) (x) 73 + #endif 74 + 69 75 typedef __u32 pgd_t; 70 76 typedef unsigned long pgprot_t; 71 77 72 78 #define pte_val(x) (x) 73 - #define pmd_val(x) (x) 74 79 #define pgd_val(x) (x) 75 80 #define pgprot_val(x) (x) 76 81 77 82 #define __pte(x) (x) 78 - #define __pmd(x) (x) 79 83 #define __pgd(x) (x) 80 84 #define __pgprot(x) (x) 81 85 82 - #define __pmd_val_set(x,n) (x) = (n) 83 - #define __pgd_val_set(x,n) (x) = (n) 84 - 85 86 #endif /* STRICT_MM_TYPECHECKS */ 87 + 88 + #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) 89 + #if CONFIG_PGTABLE_LEVELS == 3 90 + #define set_pud(pudptr, pudval) (*(pudptr) = (pudval)) 91 + #endif 86 92 87 93 typedef struct page *pgtable_t; 88 94
+14 -27
arch/parisc/include/asm/pgalloc.h
··· 34 34 /* Populate first pmd with allocated memory. We mark it 35 35 * with PxD_FLAG_ATTACHED as a signal to the system that this 36 36 * pmd entry may not be cleared. */ 37 - __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | 38 - PxD_FLAG_VALID | 39 - PxD_FLAG_ATTACHED) 40 - + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); 37 + set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT | 38 + PxD_FLAG_VALID | 39 + PxD_FLAG_ATTACHED) 40 + + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT))); 41 41 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as 42 42 * a signal that this pmd may not be freed */ 43 - __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); 43 + set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED)); 44 44 #endif 45 45 } 46 46 spin_lock_init(pgd_spinlock(actual_pgd)); ··· 59 59 60 60 /* Three Level Page Table Support for pmd's */ 61 61 62 - static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 62 + static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 63 63 { 64 - __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) + 65 - (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); 64 + set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + 65 + (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); 66 66 } 67 67 68 68 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) ··· 88 88 free_pages((unsigned long)pmd, PMD_ORDER); 89 89 } 90 90 91 - #else 92 - 93 - /* Two Level Page Table Support for pmd's */ 94 - 95 - /* 96 - * allocating and freeing a pmd is trivial: the 1-entry pmd is 97 - * inside the pgd, so has no extra memory associated with it. 98 - */ 99 - 100 - #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 101 - #define pmd_free(mm, x) do { } while (0) 102 - #define pgd_populate(mm, pmd, pte) BUG() 103 - 104 91 #endif 105 92 106 93 static inline void ··· 97 110 /* preserve the gateway marker if this is the beginning of 98 111 * the permanent pmd */ 99 112 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 100 - __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | 101 - PxD_FLAG_VALID | 102 - PxD_FLAG_ATTACHED) 103 - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); 113 + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | 114 + PxD_FLAG_VALID | 115 + PxD_FLAG_ATTACHED) 116 + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); 104 117 else 105 118 #endif 106 - __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) 107 - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); 119 + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) 120 + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); 108 121 } 109 122 110 123 #define pmd_populate(mm, pmd, pte_page) \
+25 -27
arch/parisc/include/asm/pgtable.h
··· 3 3 #define _PARISC_PGTABLE_H 4 4 5 5 #include <asm/page.h> 6 - #include <asm-generic/4level-fixup.h> 6 + 7 + #if CONFIG_PGTABLE_LEVELS == 3 8 + #include <asm-generic/pgtable-nopud.h> 9 + #elif CONFIG_PGTABLE_LEVELS == 2 10 + #include <asm-generic/pgtable-nopmd.h> 11 + #endif 7 12 8 13 #include <asm/fixmap.h> 9 14 ··· 106 101 107 102 #define pte_ERROR(e) \ 108 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 104 + #if CONFIG_PGTABLE_LEVELS == 3 109 105 #define pmd_ERROR(e) \ 110 106 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) 107 + #endif 111 108 #define pgd_ERROR(e) \ 112 109 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 113 110 ··· 139 132 #define PTRS_PER_PTE (1UL << BITS_PER_PTE) 140 133 141 134 /* Definitions for 2nd level */ 135 + #if CONFIG_PGTABLE_LEVELS == 3 142 136 #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) 143 137 #define PMD_SIZE (1UL << PMD_SHIFT) 144 138 #define PMD_MASK (~(PMD_SIZE-1)) 145 - #if CONFIG_PGTABLE_LEVELS == 3 146 139 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) 140 + #define PTRS_PER_PMD (1UL << BITS_PER_PMD) 147 141 #else 148 - #define __PAGETABLE_PMD_FOLDED 1 149 142 #define BITS_PER_PMD 0 150 143 #endif 151 - #define PTRS_PER_PMD (1UL << BITS_PER_PMD) 152 144 153 145 /* Definitions for 1st level */ 154 - #define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) 146 + #define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD) 155 147 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG 156 148 #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) 157 149 #else ··· 323 317 324 318 #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 325 319 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 320 + #define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK) 321 + #define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 326 322 #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) 327 323 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 328 324 ··· 342 334 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 343 335 /* This is the entry pointing to the permanent pmd 344 336 * attached to the pgd; cannot clear it */ 345 - __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); 337 + set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED)); 346 338 else 347 339 #endif 348 - __pmd_val_set(*pmd, 0); 340 + set_pmd(pmd, __pmd(0)); 349 341 } 350 342 351 343 352 344 353 345 #if CONFIG_PGTABLE_LEVELS == 3 354 - #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) 355 - #define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) 346 + #define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud))) 347 + #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) 356 348 357 349 /* For 64 bit we have three level tables */ 358 350 359 - #define pgd_none(x) (!pgd_val(x)) 360 - #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) 361 - #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) 362 - static inline void pgd_clear(pgd_t *pgd) { 351 + #define pud_none(x) (!pud_val(x)) 352 + #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) 353 + #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) 354 + static inline void pud_clear(pud_t *pud) { 363 355 #if CONFIG_PGTABLE_LEVELS == 3 364 - if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) 365 - /* This is the permanent pmd attached to the pgd; cannot 356 + if(pud_flag(*pud) & PxD_FLAG_ATTACHED) 357 + /* This is the permanent pmd attached to the pud; cannot 366 358 * free it */ 367 359 return; 368 360 #endif 369 - __pgd_val_set(*pgd, 0); 361 + set_pud(pud, __pud(0)); 370 362 } 371 - #else 372 - /* 373 - * The "pgd_xxx()" functions here are trivial for a folded two-level 374 - * setup: the pgd is never bad, and a pmd always exists (as it's folded 375 - * into the pgd entry) 376 - */ 377 - static inline int pgd_none(pgd_t pgd) { return 0; } 378 - static inline int pgd_bad(pgd_t pgd) { return 0; } 379 - static inline int pgd_present(pgd_t pgd) { return 1; } 380 - static inline void pgd_clear(pgd_t * pgdp) { } 381 363 #endif 382 364 383 365 /* ··· 450 452 #if CONFIG_PGTABLE_LEVELS == 3 451 453 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 452 454 #define pmd_offset(dir,address) \ 453 - ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) 455 + ((pmd_t *) pud_page_vaddr(*(dir)) + pmd_index(address)) 454 456 #else 455 457 #define pmd_offset(dir,addr) ((pmd_t *) dir) 456 458 #endif
+2
arch/parisc/include/asm/tlb.h
··· 4 4 5 5 #include <asm-generic/tlb.h> 6 6 7 + #if CONFIG_PGTABLE_LEVELS == 3 7 8 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 9 + #endif 8 10 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) 9 11 10 12 #endif
+8 -5
arch/parisc/kernel/cache.c
··· 534 534 pte_t *ptep = NULL; 535 535 536 536 if (!pgd_none(*pgd)) { 537 - pud_t *pud = pud_offset(pgd, addr); 538 - if (!pud_none(*pud)) { 539 - pmd_t *pmd = pmd_offset(pud, addr); 540 - if (!pmd_none(*pmd)) 541 - ptep = pte_offset_map(pmd, addr); 537 + p4d_t *p4d = p4d_offset(pgd, addr); 538 + if (!p4d_none(*p4d)) { 539 + pud_t *pud = pud_offset(p4d, addr); 540 + if (!pud_none(*pud)) { 541 + pmd_t *pmd = pmd_offset(pud, addr); 542 + if (!pmd_none(*pmd)) 543 + ptep = pte_offset_map(pmd, addr); 544 + } 542 545 } 543 546 } 544 547 return ptep;
+7 -2
arch/parisc/kernel/pci-dma.c
··· 133 133 134 134 dir = pgd_offset_k(vaddr); 135 135 do { 136 + p4d_t *p4d; 137 + pud_t *pud; 136 138 pmd_t *pmd; 137 - 138 - pmd = pmd_alloc(NULL, dir, vaddr); 139 + 140 + p4d = p4d_offset(dir, vaddr); 141 + pud = pud_offset(p4d, vaddr); 142 + pmd = pmd_alloc(NULL, pud, vaddr); 143 + 139 144 if (!pmd) 140 145 return -ENOMEM; 141 146 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
+7 -3
arch/parisc/mm/fixmap.c
··· 14 14 { 15 15 unsigned long vaddr = __fix_to_virt(idx); 16 16 pgd_t *pgd = pgd_offset_k(vaddr); 17 - pmd_t *pmd = pmd_offset(pgd, vaddr); 17 + p4d_t *p4d = p4d_offset(pgd, vaddr); 18 + pud_t *pud = pud_offset(p4d, vaddr); 19 + pmd_t *pmd = pmd_offset(pud, vaddr); 18 20 pte_t *pte; 19 21 20 22 if (pmd_none(*pmd)) 21 - pmd = pmd_alloc(NULL, pgd, vaddr); 23 + pmd = pmd_alloc(NULL, pud, vaddr); 22 24 23 25 pte = pte_offset_kernel(pmd, vaddr); 24 26 if (pte_none(*pte)) ··· 34 32 { 35 33 unsigned long vaddr = __fix_to_virt(idx); 36 34 pgd_t *pgd = pgd_offset_k(vaddr); 37 - pmd_t *pmd = pmd_offset(pgd, vaddr); 35 + p4d_t *p4d = p4d_offset(pgd, vaddr); 36 + pud_t *pud = pud_offset(p4d, vaddr); 37 + pmd_t *pmd = pmd_offset(pud, vaddr); 38 38 pte_t *pte = pte_offset_kernel(pmd, vaddr); 39 39 40 40 if (WARN_ON(pte_none(*pte)))