Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Update MIPS to use the 4-level pagetable code thereby getting rid of the compacrapability headers.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+179 -101
+8 -2
arch/mips/lib-32/dump_tlb.c
··· 139 139 void dump_list_process(struct task_struct *t, void *address) 140 140 { 141 141 pgd_t *page_dir, *pgd; 142 + pud_t *pud; 142 143 pmd_t *pmd; 143 144 pte_t *pte, page; 144 145 unsigned long addr, val; ··· 163 162 pgd = pgd_offset(t->mm, addr); 164 163 printk("pgd == %08x, ", (unsigned int) pgd); 165 164 166 - pmd = pmd_offset(pgd, addr); 165 + pud = pud_offset(pgd, addr); 166 + printk("pud == %08x, ", (unsigned int) pud); 167 + 168 + pmd = pmd_offset(pud, addr); 167 169 printk("pmd == %08x, ", (unsigned int) pmd); 168 170 169 171 pte = pte_offset(pmd, addr); ··· 199 195 unsigned int vtop(void *address) 200 196 { 201 197 pgd_t *pgd; 198 + pud_t *pud; 202 199 pmd_t *pmd; 203 200 pte_t *pte; 204 201 unsigned int addr, paddr; 205 202 206 203 addr = (unsigned long) address; 207 204 pgd = pgd_offset(current->mm, addr); 208 - pmd = pmd_offset(pgd, addr); 205 + pud = pud_offset(pgd, addr); 206 + pmd = pmd_offset(pud, addr); 209 207 pte = pte_offset(pmd, addr); 210 208 paddr = (KSEG1 | (unsigned int) pte_val(*pte)) & PAGE_MASK; 211 209 paddr |= (addr & ~PAGE_MASK);
+8 -2
arch/mips/lib-32/r3k_dump_tlb.c
··· 105 105 void dump_list_process(struct task_struct *t, void *address) 106 106 { 107 107 pgd_t *page_dir, *pgd; 108 + pud_t *pud; 108 109 pmd_t *pmd; 109 110 pte_t *pte, page; 110 111 unsigned int addr; ··· 122 121 pgd = pgd_offset(t->mm, addr); 123 122 printk("pgd == %08x, ", (unsigned int) pgd); 124 123 125 - pmd = pmd_offset(pgd, addr); 124 + pud = pud_offset(pgd, addr); 125 + printk("pud == %08x, ", (unsigned int) pud); 126 + 127 + pmd = pmd_offset(pud, addr); 126 128 printk("pmd == %08x, ", (unsigned int) pmd); 127 129 128 130 pte = pte_offset(pmd, addr); ··· 153 149 unsigned int vtop(void *address) 154 150 { 155 151 pgd_t *pgd; 152 + pud_t *pud; 156 153 pmd_t *pmd; 157 154 pte_t *pte; 158 155 unsigned int addr, paddr; 159 156 160 157 addr = (unsigned long) address; 161 158 pgd = pgd_offset(current->mm, addr); 162 - pmd = pmd_offset(pgd, addr); 159 + pud = pud_offset(pgd, addr); 160 + pmd = pmd_offset(pud, addr); 163 161 pte = pte_offset(pmd, addr); 164 162 paddr = (KSEG1 | (unsigned int) pte_val(*pte)) & PAGE_MASK; 165 163 paddr |= (addr & ~PAGE_MASK);
+8 -2
arch/mips/lib-64/dump_tlb.c
··· 140 140 void dump_list_process(struct task_struct *t, void *address) 141 141 { 142 142 pgd_t *page_dir, *pgd; 143 + pud_t *pud; 143 144 pmd_t *pmd; 144 145 pte_t *pte, page; 145 146 unsigned long addr, val; ··· 156 155 pgd = pgd_offset(t->mm, addr); 157 156 printk("pgd == %016lx\n", (unsigned long) pgd); 158 157 159 - pmd = pmd_offset(pgd, addr); 158 + pud = pud_offset(pgd, addr); 159 + printk("pud == %016lx\n", (unsigned long) pud); 160 + 161 + pmd = pmd_offset(pud, addr); 160 162 printk("pmd == %016lx\n", (unsigned long) pmd); 161 163 162 164 pte = pte_offset(pmd, addr); ··· 188 184 unsigned int vtop(void *address) 189 185 { 190 186 pgd_t *pgd; 187 + pud_t *pud; 191 188 pmd_t *pmd; 192 189 pte_t *pte; 193 190 unsigned int addr, paddr; 194 191 195 192 addr = (unsigned long) address; 196 193 pgd = pgd_offset(current->mm, addr); 197 - pmd = pmd_offset(pgd, addr); 194 + pud = pud_offset(pgd, addr); 195 + pmd = pmd_offset(pud, addr); 198 196 pte = pte_offset(pmd, addr); 199 197 paddr = (CKSEG1 | (unsigned int) pte_val(*pte)) & PAGE_MASK; 200 198 paddr |= (addr & ~PAGE_MASK);
+3 -1
arch/mips/mm/c-r3k.c
··· 221 221 struct mm_struct *mm) 222 222 { 223 223 pgd_t *pgd; 224 + pud_t *pud; 224 225 pmd_t *pmd; 225 226 pte_t *pte; 226 227 unsigned long physpage; 227 228 228 229 pgd = pgd_offset(mm, addr); 229 - pmd = pmd_offset(pgd, addr); 230 + pud = pud_offset(pgd, addr); 231 + pmd = pmd_offset(pud, addr); 230 232 pte = pte_offset(pmd, addr); 231 233 232 234 if ((physpage = pte_val(*pte)) & _PAGE_VALID)
+3 -1
arch/mips/mm/c-r4k.c
··· 372 372 int exec = vma->vm_flags & VM_EXEC; 373 373 struct mm_struct *mm = vma->vm_mm; 374 374 pgd_t *pgdp; 375 + pud_t *pudp; 375 376 pmd_t *pmdp; 376 377 pte_t *ptep; 377 378 378 379 page &= PAGE_MASK; 379 380 pgdp = pgd_offset(mm, page); 380 - pmdp = pmd_offset(pgdp, page); 381 + pudp = pud_offset(pgdp, page); 382 + pmdp = pmd_offset(pudp, page); 381 383 ptep = pte_offset(pmdp, page); 382 384 383 385 /*
+3 -1
arch/mips/mm/c-tx39.c
··· 183 183 int exec = vma->vm_flags & VM_EXEC; 184 184 struct mm_struct *mm = vma->vm_mm; 185 185 pgd_t *pgdp; 186 + pud_t *pudp; 186 187 pmd_t *pmdp; 187 188 pte_t *ptep; 188 189 ··· 196 195 197 196 page &= PAGE_MASK; 198 197 pgdp = pgd_offset(mm, page); 199 - pmdp = pmd_offset(pgdp, page); 198 + pudp = pud_offset(pgdp, page); 199 + pmdp = pmd_offset(pudp, page); 200 200 ptep = pte_offset(pmdp, page); 201 201 202 202 /*
+8 -2
arch/mips/mm/fault.c
··· 212 212 */ 213 213 int offset = __pgd_offset(address); 214 214 pgd_t *pgd, *pgd_k; 215 + pud_t *pud, *pud_k; 215 216 pmd_t *pmd, *pmd_k; 216 217 pte_t *pte_k; 217 218 ··· 223 222 goto no_context; 224 223 set_pgd(pgd, *pgd_k); 225 224 226 - pmd = pmd_offset(pgd, address); 227 - pmd_k = pmd_offset(pgd_k, address); 225 + pud = pud_offset(pgd, address); 226 + pud_k = pud_offset(pgd_k, address); 227 + if (!pud_present(*pud_k)) 228 + goto no_context; 229 + 230 + pmd = pmd_offset(pud, address); 231 + pmd_k = pmd_offset(pud_k, address); 228 232 if (!pmd_present(*pmd_k)) 229 233 goto no_context; 230 234 set_pmd(pmd, *pmd_k);
+17 -11
arch/mips/mm/init.c
··· 83 83 pgprot_t kmap_prot; 84 84 85 85 #define kmap_get_fixmap_pte(vaddr) \ 86 - pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) 86 + pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 87 87 88 88 static void __init kmap_init(void) 89 89 { ··· 101 101 pgd_t *pgd_base) 102 102 { 103 103 pgd_t *pgd; 104 + pud_t *pud; 104 105 pmd_t *pmd; 105 106 pte_t *pte; 106 - int i, j; 107 + int i, j, k; 107 108 unsigned long vaddr; 108 109 109 110 vaddr = start; 110 111 i = __pgd_offset(vaddr); 111 - j = __pmd_offset(vaddr); 112 + j = __pud_offset(vaddr); 113 + k = __pmd_offset(vaddr); 112 114 pgd = pgd_base + i; 113 115 114 116 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 115 - pmd = (pmd_t *)pgd; 116 - for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { 117 - if (pmd_none(*pmd)) { 118 - pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 119 - set_pmd(pmd, __pmd(pte)); 120 - if (pte != pte_offset_kernel(pmd, 0)) 121 - BUG(); 117 + pud = (pud_t *)pgd; 118 + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 119 + pmd = (pmd_t *)pud; 120 + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 121 + if (pmd_none(*pmd)) { 122 + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 123 + set_pmd(pmd, __pmd(pte)); 124 + if (pte != pte_offset_kernel(pmd, 0)) 125 + BUG(); 126 + } 127 + vaddr += PMD_SIZE; 122 128 } 123 - vaddr += PMD_SIZE; 129 + k = 0; 124 130 } 125 131 j = 0; 126 132 }
+9 -6
arch/mips/mm/ioremap.c
··· 79 79 BUG(); 80 80 spin_lock(&init_mm.page_table_lock); 81 81 do { 82 + pud_t *pud; 82 83 pmd_t *pmd; 83 - pmd = pmd_alloc(&init_mm, dir, address); 84 + 84 85 error = -ENOMEM; 86 + pud = pud_alloc(&init_mm, dir, address); 87 + if (!pud) 88 + break; 89 + pmd = pmd_alloc(&init_mm, pud, address); 85 90 if (!pmd) 86 91 break; 87 92 if (remap_area_pmd(pmd, address, end - address, ··· 146 141 */ 147 142 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && 148 143 flags == _CACHE_UNCACHED) 149 - return (void *) KSEG1ADDR(phys_addr); 144 + return (void *) CKSEG1ADDR(phys_addr); 150 145 151 146 /* 152 147 * Don't allow anybody to remap normal RAM that we're using.. ··· 185 180 return (void *) (offset + (char *)addr); 186 181 } 187 182 188 - #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) 183 + #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 189 184 190 185 void __iounmap(volatile void __iomem *addr) 191 186 { ··· 195 190 return; 196 191 197 192 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); 198 - if (!p) { 193 + if (!p) 199 194 printk(KERN_ERR "iounmap: bad address %p\n", addr); 200 - return; 201 - } 202 195 203 196 kfree(p); 204 197 }
+3 -1
arch/mips/mm/pgtable-32.c
··· 35 35 #ifdef CONFIG_HIGHMEM 36 36 unsigned long vaddr; 37 37 pgd_t *pgd, *pgd_base; 38 + pud_t *pud; 38 39 pmd_t *pmd; 39 40 pte_t *pte; 40 41 #endif ··· 61 60 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 62 61 63 62 pgd = swapper_pg_dir + __pgd_offset(vaddr); 64 - pmd = pmd_offset(pgd, vaddr); 63 + pud = pud_offset(pgd, vaddr); 64 + pmd = pmd_offset(pud, vaddr); 65 65 pte = pte_offset_kernel(pmd, vaddr); 66 66 pkmap_page_table = pte; 67 67 #endif
+3 -1
arch/mips/mm/tlb-andes.c
··· 195 195 { 196 196 unsigned long flags; 197 197 pgd_t *pgdp; 198 + pud_t *pudp; 198 199 pmd_t *pmdp; 199 200 pte_t *ptep; 200 201 int idx, pid; ··· 221 220 write_c0_entryhi(address | (pid)); 222 221 pgdp = pgd_offset(vma->vm_mm, address); 223 222 tlb_probe(); 224 - pmdp = pmd_offset(pgdp, address); 223 + pudp = pud_offset(pgdp, address); 224 + pmdp = pmd_offset(pudp, address); 225 225 idx = read_c0_index(); 226 226 ptep = pte_offset_map(pmdp, address); 227 227 write_c0_entrylo0(pte_val(*ptep++) >> 6);
+3 -1
arch/mips/mm/tlb-r4k.c
··· 227 227 { 228 228 unsigned long flags; 229 229 pgd_t *pgdp; 230 + pud_t *pudp; 230 231 pmd_t *pmdp; 231 232 pte_t *ptep; 232 233 int idx, pid; ··· 247 246 mtc0_tlbw_hazard(); 248 247 tlb_probe(); 249 248 BARRIER; 250 - pmdp = pmd_offset(pgdp, address); 249 + pudp = pud_offset(pgdp, address); 250 + pmdp = pmd_offset(pudp, address); 251 251 idx = read_c0_index(); 252 252 ptep = pte_offset_map(pmdp, address); 253 253
+35 -9
include/asm-mips/page.h
··· 87 87 typedef struct { unsigned long pte; } pte_t; 88 88 #define pte_val(x) ((x).pte) 89 89 #endif 90 + #define __pte(x) ((pte_t) { (x) } ) 91 + 92 + /* 93 + * For 3-level pagetables we defines these ourselves, for 2-level the 94 + * definitions are supplied by <asm-generic/pgtable-nopmd.h>. 95 + */ 96 + #ifdef CONFIG_64BIT 90 97 91 98 typedef struct { unsigned long pmd; } pmd_t; 92 - typedef struct { unsigned long pgd; } pgd_t; 93 - typedef struct { unsigned long pgprot; } pgprot_t; 94 - 95 99 #define pmd_val(x) ((x).pmd) 96 - #define pgd_val(x) ((x).pgd) 97 - #define pgprot_val(x) ((x).pgprot) 98 - 99 - #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) 100 - 101 - #define __pte(x) ((pte_t) { (x) } ) 102 100 #define __pmd(x) ((pmd_t) { (x) } ) 101 + 102 + #endif 103 + 104 + /* 105 + * Right now we don't support 4-level pagetables, so all pud-related 106 + * definitions come from <asm-generic/pgtable-nopud.h>. 107 + */ 108 + 109 + /* 110 + * Finall the top of the hierarchy, the pgd 111 + */ 112 + typedef struct { unsigned long pgd; } pgd_t; 113 + #define pgd_val(x) ((x).pgd) 103 114 #define __pgd(x) ((pgd_t) { (x) } ) 115 + 116 + /* 117 + * Manipulate page protection bits 118 + */ 119 + typedef struct { unsigned long pgprot; } pgprot_t; 120 + #define pgprot_val(x) ((x).pgprot) 104 121 #define __pgprot(x) ((pgprot_t) { (x) } ) 122 + 123 + /* 124 + * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd 125 + * pair of pages we only have a single global bit per pair of pages. When 126 + * writing to the TLB make sure we always have the bit set for both pages 127 + * or none. This macro is used to access the `buddy' of the pte we're just 128 + * working on. 129 + */ 130 + #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) 105 131 106 132 #endif /* !__ASSEMBLY__ */ 107 133
+14 -5
include/asm-mips/pgalloc.h
··· 26 26 } 27 27 28 28 /* 29 + * Initialize a new pmd table with invalid pointers. 30 + */ 31 + extern void pmd_init(unsigned long page, unsigned long pagetable); 32 + 33 + #ifdef CONFIG_64BIT 34 + 35 + static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 36 + { 37 + set_pud(pud, __pud((unsigned long)pmd)); 38 + } 39 + #endif 40 + 41 + /* 29 42 * Initialize a new pgd / pmd table with invalid pointers. 30 43 */ 31 44 extern void pgd_init(unsigned long page); 32 - extern void pmd_init(unsigned long page, unsigned long pagetable); 33 45 34 46 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 35 47 { ··· 98 86 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 99 87 100 88 #ifdef CONFIG_32BIT 101 - #define pgd_populate(mm, pmd, pte) BUG() 102 89 103 90 /* 104 91 * allocating and freeing a pmd is trivial: the 1-entry pmd is 105 92 * inside the pgd, so has no extra memory associated with it. 106 93 */ 107 - #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 108 94 #define pmd_free(x) do { } while (0) 109 95 #define __pmd_free_tlb(tlb,x) do { } while (0) 96 + 110 97 #endif 111 98 112 99 #ifdef CONFIG_64BIT 113 - 114 - #define pgd_populate(mm, pgd, pmd) set_pgd(pgd, __pgd(pmd)) 115 100 116 101 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 117 102 {
+12 -28
include/asm-mips/pgtable-32.h
··· 17 17 #include <asm/cachectl.h> 18 18 #include <asm/fixmap.h> 19 19 20 + #include <asm-generic/pgtable-nopmd.h> 21 + 20 22 /* 21 23 * - add_wired_entry() add a fixed TLB entry, and move wired register 22 24 */ ··· 44 42 */ 45 43 46 44 /* PMD_SHIFT determines the size of the area a second-level page table can map */ 47 - #ifdef CONFIG_64BIT_PHYS_ADDR 48 - #define PMD_SHIFT 21 49 - #else 50 - #define PMD_SHIFT 22 51 - #endif 52 45 #define PMD_SIZE (1UL << PMD_SHIFT) 53 46 #define PMD_MASK (~(PMD_SIZE-1)) 54 47 55 48 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 56 - #define PGDIR_SHIFT PMD_SHIFT 49 + #ifdef CONFIG_64BIT_PHYS_ADDR 50 + #define PGDIR_SHIFT 21 51 + #else 52 + #define PGDIR_SHIFT 22 53 + #endif 57 54 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 58 55 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 59 56 60 57 /* 61 58 * Entries per page directory level: we use two-level, so 62 - * we don't really have any PMD directory physically. 59 + * we don't really have any PUD/PMD directory physically. 63 60 */ 64 61 #ifdef CONFIG_64BIT_PHYS_ADDR 65 62 #define PGD_ORDER 1 66 - #define PMD_ORDER 0 63 + #define PUD_ORDER aieeee_attempt_to_allocate_pud 64 + #define PMD_ORDER 1 67 65 #define PTE_ORDER 0 68 66 #else 69 67 #define PGD_ORDER 0 70 - #define PMD_ORDER 0 68 + #define PUD_ORDER aieeee_attempt_to_allocate_pud 69 + #define PMD_ORDER 1 71 70 #define PTE_ORDER 0 72 71 #endif 73 72 74 73 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 75 - #define PTRS_PER_PMD 1 76 74 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 77 75 78 76 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) ··· 93 91 #define pte_ERROR(e) \ 94 92 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 95 93 #endif 96 - #define pmd_ERROR(e) \ 97 - printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 98 94 #define pgd_ERROR(e) \ 99 95 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 100 96 ··· 119 119 { 120 120 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 121 121 } 122 - 123 - /* 124 - * The "pgd_xxx()" functions here are trivial for a folded two-level 125 - * setup: the pgd is never bad, and a pmd always exists (as it's folded 126 - * into the pgd entry) 127 - */ 128 - static inline int pgd_none(pgd_t pgd) { return 0; } 129 - static inline int pgd_bad(pgd_t pgd) { return 0; } 130 - static inline int pgd_present(pgd_t pgd) { return 1; } 131 - static inline void pgd_clear(pgd_t *pgdp) { } 132 122 133 123 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 134 124 #define pte_page(x) pfn_to_page(pte_pfn(x)) ··· 155 165 156 166 /* to find an entry in a page-table-directory */ 157 167 #define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr)) 158 - 159 - /* Find an entry in the second-level page table.. */ 160 - static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) 161 - { 162 - return (pmd_t *) dir; 163 - } 164 168 165 169 /* Find an entry in the third-level page table.. */ 166 170 #define __pte_offset(address) \
+33 -24
include/asm-mips/pgtable-64.h
··· 16 16 #include <asm/page.h> 17 17 #include <asm/cachectl.h> 18 18 19 + #include <asm-generic/pgtable-nopud.h> 20 + 19 21 /* 20 22 * Each address space has 2 4K pages as its page directory, giving 1024 21 23 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a 22 - * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to 23 - * page tables. Each page table is a single 4K page, giving 512 (== 24 - * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to 25 - * invalid_pmd_table, each pmde is initialized to point to 24 + * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page 25 + * tables. Each page table is also a single 4K page, giving 512 (== 26 + * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to 27 + * invalid_pmd_table, each pmd entry is initialized to point to 26 28 * invalid_pte_table, each pte is initialized to 0. When memory is low, 27 29 * and a pmd table or a page table allocation fails, empty_bad_pmd_table 28 30 * and empty_bad_page_table is returned back to higher layer code, so ··· 38 36 */ 39 37 40 38 /* PMD_SHIFT determines the size of the area a second-level page table can map */ 41 - #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 39 + #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) 42 40 #define PMD_SIZE (1UL << PMD_SHIFT) 43 41 #define PMD_MASK (~(PMD_SIZE-1)) 44 42 45 43 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 46 - #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + 1 - 3)) 44 + #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) 47 45 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 48 46 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 49 47 50 48 /* 51 - * For 4kB page size we use a 3 level page tree and a 8kB pmd and pgds which 49 + * For 4kB page size we use a 3 level page tree and an 8kB pud, which 52 50 * permits us mapping 40 bits of virtual address space. 53 51 * 54 52 * We used to implement 41 bits by having an order 1 pmd level but that seemed ··· 67 65 */ 68 66 #ifdef CONFIG_PAGE_SIZE_4KB 69 67 #define PGD_ORDER 1 68 + #define PUD_ORDER aieeee_attempt_to_allocate_pud 70 69 #define PMD_ORDER 0 71 70 #define PTE_ORDER 0 72 71 #endif 73 72 #ifdef CONFIG_PAGE_SIZE_8KB 74 73 #define PGD_ORDER 0 74 + #define PUD_ORDER aieeee_attempt_to_allocate_pud 75 75 #define PMD_ORDER 0 76 76 #define PTE_ORDER 0 77 77 #endif 78 78 #ifdef CONFIG_PAGE_SIZE_16KB 79 79 #define PGD_ORDER 0 80 + #define PUD_ORDER aieeee_attempt_to_allocate_pud 80 81 #define PMD_ORDER 0 81 82 #define PTE_ORDER 0 82 83 #endif 83 84 #ifdef CONFIG_PAGE_SIZE_64KB 84 85 #define PGD_ORDER 0 86 + #define PUD_ORDER aieeee_attempt_to_allocate_pud 85 87 #define PMD_ORDER 0 86 88 #define PTE_ORDER 0 87 89 #endif ··· 108 102 #define pgd_ERROR(e) \ 109 103 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 110 104 111 - extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; 112 - extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)]; 113 - extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; 114 - extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; 105 + extern pte_t invalid_pte_table[PTRS_PER_PTE]; 106 + extern pte_t empty_bad_page_table[PTRS_PER_PTE]; 107 + extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 108 + extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; 115 109 116 110 /* 117 111 * Empty pmd entries point to the invalid_pte_table. ··· 136 130 /* 137 131 * Empty pgd entries point to the invalid_pmd_table. 138 132 */ 139 - static inline int pgd_none(pgd_t pgd) 133 + static inline int pud_none(pud_t pud) 140 134 { 141 - return pgd_val(pgd) == (unsigned long) invalid_pmd_table; 135 + return pud_val(pud) == (unsigned long) invalid_pmd_table; 142 136 } 143 137 144 - #define pgd_bad(pgd) (pgd_val(pgd) &~ PAGE_MASK) 145 - 146 - static inline int pgd_present(pgd_t pgd) 138 + static inline int pud_bad(pud_t pud) 147 139 { 148 - return pgd_val(pgd) != (unsigned long) invalid_pmd_table; 140 + return pud_val(pud) & ~PAGE_MASK; 149 141 } 150 142 151 - static inline void pgd_clear(pgd_t *pgdp) 143 + static inline int pud_present(pud_t pud) 152 144 { 153 - pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table); 145 + return pud_val(pud) != (unsigned long) invalid_pmd_table; 146 + } 147 + 148 + static inline void pud_clear(pud_t *pudp) 149 + { 150 + pud_val(*pudp) = ((unsigned long) invalid_pmd_table); 154 151 } 155 152 156 153 #define pte_page(x) pfn_to_page((unsigned long)((pte_val(x) >> PAGE_SHIFT))) ··· 171 162 /* to find an entry in a kernel page-table-directory */ 172 163 #define pgd_offset_k(address) pgd_offset(&init_mm, 0) 173 164 174 - #define pgd_index(address) ((address) >> PGDIR_SHIFT) 165 + #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 175 166 176 167 /* to find an entry in a page-table-directory */ 177 168 #define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr)) 178 169 179 - static inline unsigned long pgd_page(pgd_t pgd) 170 + static inline unsigned long pud_page(pud_t pud) 180 171 { 181 - return pgd_val(pgd); 172 + return pud_val(pud); 182 173 } 183 174 184 175 /* Find an entry in the second-level page table.. */ 185 - static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) 176 + static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) 186 177 { 187 - return (pmd_t *) pgd_page(*dir) + 178 + return (pmd_t *) pud_page(*pud) + 188 179 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 189 180 } 190 181
+9 -4
include/asm-mips/pgtable.h
··· 8 8 #ifndef _ASM_PGTABLE_H 9 9 #define _ASM_PGTABLE_H 10 10 11 - #include <asm-generic/4level-fixup.h> 12 - 13 11 #include <linux/config.h> 14 12 #ifdef CONFIG_32BIT 15 13 #include <asm/pgtable-32.h> ··· 146 148 #endif 147 149 148 150 /* 149 - * (pmds are folded into pgds so this doesn't get actually called, 151 + * (pmds are folded into puds so this doesn't get actually called, 150 152 * but the define is needed for a generic inline function.) 151 153 */ 152 154 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 153 - #define set_pgd(pgdptr, pgdval) do { *(pgdptr) = (pgdval); } while(0) 155 + 156 + #ifdef CONFIG_64BIT 157 + /* 158 + * (puds are folded into pgds so this doesn't get actually called, 159 + * but the define is needed for a generic inline function.) 160 + */ 161 + #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 162 + #endif 154 163 155 164 #define PGD_T_LOG2 ffz(~sizeof(pgd_t)) 156 165 #define PMD_T_LOG2 ffz(~sizeof(pmd_t))