Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Remove use of 4level-fixup.h for ppc32

For 32-bit systems, powerpc still relies on the 4level-fixup.h hack,
to pretend that the generic pagetable handling stuff is 3-levels
rather than 4. This patch removes this, instead using the newer
pgtable-nopmd.h to handle the elision of both the pud and pmd
pagetable levels (ppc32 pagetables are actually 2 levels).

This removes a little extraneous code, and makes it more easily
compared to the 64-bit pagetable code.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

David Gibson and committed by
Paul Mackerras
d1953c88 00c2ae35

+31 -47
+3 -1
arch/powerpc/lib/dma-noncoherent.c
··· 306 306 static int __init dma_alloc_init(void) 307 307 { 308 308 pgd_t *pgd; 309 + pud_t *pud; 309 310 pmd_t *pmd; 310 311 pte_t *pte; 311 312 int ret = 0; 312 313 313 314 do { 314 315 pgd = pgd_offset(&init_mm, CONSISTENT_BASE); 315 - pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); 316 + pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE); 317 + pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE); 316 318 if (!pmd) { 317 319 printk(KERN_ERR "%s: no pmd tables\n", __func__); 318 320 ret = -ENOMEM;
+16 -12
arch/powerpc/mm/pgtable_32.c
··· 261 261 int err = -ENOMEM; 262 262 263 263 /* Use upper 10 bits of VA to index the first level map */ 264 - pd = pmd_offset(pgd_offset_k(va), va); 264 + pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); 265 265 /* Use middle 10 bits of VA to index the second-level map */ 266 266 pg = pte_alloc_kernel(pd, va); 267 267 if (pg != 0) { ··· 354 354 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) 355 355 { 356 356 pgd_t *pgd; 357 + pud_t *pud; 357 358 pmd_t *pmd; 358 359 pte_t *pte; 359 360 int retval = 0; 360 361 361 362 pgd = pgd_offset(mm, addr & PAGE_MASK); 362 363 if (pgd) { 363 - pmd = pmd_offset(pgd, addr & PAGE_MASK); 364 - if (pmd_present(*pmd)) { 365 - pte = pte_offset_map(pmd, addr & PAGE_MASK); 366 - if (pte) { 367 - retval = 1; 368 - *ptep = pte; 369 - if (pmdp) 370 - *pmdp = pmd; 371 - /* XXX caller needs to do pte_unmap, yuck */ 372 - } 373 - } 364 + pud = pud_offset(pgd, addr & PAGE_MASK); 365 + if (pud && pud_present(*pud)) { 366 + pmd = pmd_offset(pud, addr & PAGE_MASK); 367 + if (pmd_present(*pmd)) { 368 + pte = pte_offset_map(pmd, addr & PAGE_MASK); 369 + if (pte) { 370 + retval = 1; 371 + *ptep = pte; 372 + if (pmdp) 373 + *pmdp = pmd; 374 + /* XXX caller needs to do pte_unmap, yuck */ 375 + } 376 + } 377 + } 374 378 } 375 379 return(retval); 376 380 }
+8 -4
include/asm-powerpc/page.h
··· 121 121 #endif 122 122 123 123 /* PMD level */ 124 + #ifdef CONFIG_PPC64 124 125 typedef struct { unsigned long pmd; } pmd_t; 125 126 #define pmd_val(x) ((x).pmd) 126 127 #define __pmd(x) ((pmd_t) { (x) }) 127 128 128 129 /* PUD level exusts only on 4k pages */ 129 - #if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_64K_PAGES) 130 + #ifndef CONFIG_PPC_64K_PAGES 130 131 typedef struct { unsigned long pud; } pud_t; 131 132 #define pud_val(x) ((x).pud) 132 133 #define __pud(x) ((pud_t) { (x) }) 133 - #endif 134 + #endif /* !CONFIG_PPC_64K_PAGES */ 135 + #endif /* CONFIG_PPC64 */ 134 136 135 137 /* PGD level */ 136 138 typedef struct { unsigned long pgd; } pgd_t; ··· 161 159 #endif 162 160 163 161 162 + #ifdef CONFIG_PPC64 164 163 typedef unsigned long pmd_t; 165 164 #define pmd_val(x) (x) 166 165 #define __pmd(x) (x) 167 166 168 - #if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_64K_PAGES) 167 + #ifndef CONFIG_PPC_64K_PAGES 169 168 typedef unsigned long pud_t; 170 169 #define pud_val(x) (x) 171 170 #define __pud(x) (x) 172 - #endif 171 + #endif /* !CONFIG_PPC_64K_PAGES */ 172 + #endif /* CONFIG_PPC64 */ 173 173 174 174 typedef unsigned long pgd_t; 175 175 #define pgd_val(x) (x)
+2 -2
include/asm-powerpc/pgalloc-32.h
··· 12 12 * We don't have any real pmd's, and this code never triggers because 13 13 * the pgd will always be present.. 14 14 */ 15 - #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 15 + /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ 16 16 #define pmd_free(x) do { } while (0) 17 17 #define __pmd_free_tlb(tlb,x) do { } while (0) 18 - #define pgd_populate(mm, pmd, pte) BUG() 18 + /* #define pgd_populate(mm, pmd, pte) BUG() */ 19 19 20 20 #ifndef CONFIG_BOOKE 21 21 #define pmd_populate_kernel(mm, pmd, pte) \
+2 -28
include/asm-powerpc/pgtable-ppc32.h
··· 1 1 #ifndef _ASM_POWERPC_PGTABLE_PPC32_H 2 2 #define _ASM_POWERPC_PGTABLE_PPC32_H 3 3 4 - #include <asm-generic/4level-fixup.h> 4 + #include <asm-generic/pgtable-nopmd.h> 5 5 6 6 #ifndef __ASSEMBLY__ 7 7 #include <linux/sched.h> ··· 76 76 * level has 2048 entries and the second level has 512 64-bit PTE entries. 77 77 * -Matt 78 78 */ 79 - /* PMD_SHIFT determines the size of the area mapped by the PTE pages */ 80 - #define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) 81 - #define PMD_SIZE (1UL << PMD_SHIFT) 82 - #define PMD_MASK (~(PMD_SIZE-1)) 83 - 84 79 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 85 - #define PGDIR_SHIFT PMD_SHIFT 80 + #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) 86 81 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 87 82 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 88 83 ··· 98 103 #define pte_ERROR(e) \ 99 104 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ 100 105 (unsigned long long)pte_val(e)) 101 - #define pmd_ERROR(e) \ 102 - printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 103 106 #define pgd_ERROR(e) \ 104 107 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 105 108 ··· 509 516 510 517 #ifndef __ASSEMBLY__ 511 518 /* 512 - * The "pgd_xxx()" functions here are trivial for a folded two-level 513 - * setup: the pgd is never bad, and a pmd always exists (as it's folded 514 - * into the pgd entry) 515 - */ 516 - static inline int pgd_none(pgd_t pgd) { return 0; } 517 - static inline int pgd_bad(pgd_t pgd) { return 0; } 518 - static inline int pgd_present(pgd_t pgd) { return 1; } 519 - #define pgd_clear(xp) do { } while (0) 520 - 521 - #define pgd_page_vaddr(pgd) \ 522 - ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) 523 - 524 - /* 525 519 * The following only work if pte_present() is true. 526 520 * Undefined behaviour if not.. 527 521 */ ··· 716 736 /* to find an entry in a page-table-directory */ 717 737 #define pgd_index(address) ((address) >> PGDIR_SHIFT) 718 738 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 719 - 720 - /* Find an entry in the second-level page table.. */ 721 - static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 722 - { 723 - return (pmd_t *) dir; 724 - } 725 739 726 740 /* Find an entry in the third-level page table.. */ 727 741 #define pte_index(address) \