Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: pgtable: add pud-level code

Add pud_offset() et.al. between the pgd and pmd code in preparation of
using pgtable-nopud.h rather than 4level-fixup.h.

This incorporates a fix from Jamie Iles <jamie@jamieiles.com> for
uaccess_with_memcpy.c.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+116 -22
+1
arch/arm/include/asm/pgtable.h
··· 301 301 #define pgd_present(pgd) (1) 302 302 #define pgd_clear(pgdp) do { } while (0) 303 303 #define set_pgd(pgd,pgdp) do { } while (0) 304 + #define set_pud(pud,pudp) do { } while (0) 304 305 305 306 306 307 /* Find an entry in the second-level page table.. */
+6 -1
arch/arm/lib/uaccess_with_memcpy.c
··· 27 27 pgd_t *pgd; 28 28 pmd_t *pmd; 29 29 pte_t *pte; 30 + pud_t *pud; 30 31 spinlock_t *ptl; 31 32 32 33 pgd = pgd_offset(current->mm, addr); 33 34 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) 34 35 return 0; 35 36 36 - pmd = pmd_offset(pgd, addr); 37 + pud = pud_offset(pgd, addr); 38 + if (unlikely(pud_none(*pud) || pud_bad(*pud))) 39 + return 0; 40 + 41 + pmd = pmd_offset(pud, addr); 37 42 if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) 38 43 return 0; 39 44
+10 -1
arch/arm/mm/dma-mapping.c
··· 148 148 { 149 149 int ret = 0; 150 150 pgd_t *pgd; 151 + pud_t *pud; 151 152 pmd_t *pmd; 152 153 pte_t *pte; 153 154 int i = 0; ··· 156 155 157 156 do { 158 157 pgd = pgd_offset(&init_mm, base); 159 - pmd = pmd_alloc(&init_mm, pgd, base); 158 + 159 + pud = pud_alloc(&init_mm, pgd, base); 160 + if (!pud) { 161 + printk(KERN_ERR "%s: no pud tables\n", __func__); 162 + ret = -ENOMEM; 163 + break; 164 + } 165 + 166 + pmd = pmd_alloc(&init_mm, pud, base); 160 167 if (!pmd) { 161 168 printk(KERN_ERR "%s: no pmd tables\n", __func__); 162 169 ret = -ENOMEM;
+6 -1
arch/arm/mm/fault-armv.c
··· 95 95 { 96 96 spinlock_t *ptl; 97 97 pgd_t *pgd; 98 + pud_t *pud; 98 99 pmd_t *pmd; 99 100 pte_t *pte; 100 101 int ret; ··· 104 103 if (pgd_none_or_clear_bad(pgd)) 105 104 return 0; 106 105 107 - pmd = pmd_offset(pgd, address); 106 + pud = pud_offset(pgd, address); 107 + if (pud_none_or_clear_bad(pud)) 108 + return 0; 109 + 110 + pmd = pmd_offset(pud, address); 108 111 if (pmd_none_or_clear_bad(pmd)) 109 112 return 0; 110 113
+25 -4
arch/arm/mm/fault.c
··· 80 80 addr, (long long)pgd_val(*pgd)); 81 81 82 82 do { 83 + pud_t *pud; 83 84 pmd_t *pmd; 84 85 pte_t *pte; 85 86 ··· 92 91 break; 93 92 } 94 93 95 - pmd = pmd_offset(pgd, addr); 94 + pud = pud_offset(pgd, addr); 95 + if (PTRS_PER_PUD != 1) 96 + printk(", *pud=%08lx", pud_val(*pud)); 97 + 98 + if (pud_none(*pud)) 99 + break; 100 + 101 + if (pud_bad(*pud)) { 102 + printk("(bad)"); 103 + break; 104 + } 105 + 106 + pmd = pmd_offset(pud, addr); 96 107 if (PTRS_PER_PMD != 1) 97 108 printk(", *pmd=%08llx", (long long)pmd_val(*pmd)); 98 109 ··· 403 390 { 404 391 unsigned int index; 405 392 pgd_t *pgd, *pgd_k; 393 + pud_t *pud, *pud_k; 406 394 pmd_t *pmd, *pmd_k; 407 395 408 396 if (addr < TASK_SIZE) ··· 422 408 423 409 if (pgd_none(*pgd_k)) 424 410 goto bad_area; 425 - 426 411 if (!pgd_present(*pgd)) 427 412 set_pgd(pgd, *pgd_k); 428 413 429 - pmd_k = pmd_offset(pgd_k, addr); 430 - pmd = pmd_offset(pgd, addr); 414 + pud = pud_offset(pgd, addr); 415 + pud_k = pud_offset(pgd_k, addr); 416 + 417 + if (pud_none(*pud_k)) 418 + goto bad_area; 419 + if (!pud_present(*pud)) 420 + set_pud(pud, *pud_k); 421 + 422 + pmd = pmd_offset(pud, addr); 423 + pmd_k = pmd_offset(pud_k, addr); 431 424 432 425 /* 433 426 * On ARM one Linux PGD entry contains two hardware entries (see page
+29 -6
arch/arm/mm/idmap.c
··· 4 4 #include <asm/pgalloc.h> 5 5 #include <asm/pgtable.h> 6 6 7 - static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end, 7 + static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, 8 8 unsigned long prot) 9 9 { 10 - pmd_t *pmd = pmd_offset(pgd, addr); 10 + pmd_t *pmd = pmd_offset(pud, addr); 11 11 12 12 addr = (addr & PMD_MASK) | prot; 13 13 pmd[0] = __pmd(addr); 14 14 addr += SECTION_SIZE; 15 15 pmd[1] = __pmd(addr); 16 16 flush_pmd_entry(pmd); 17 + } 18 + 19 + static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 20 + unsigned long prot) 21 + { 22 + pud_t *pud = pud_offset(pgd, addr); 23 + unsigned long next; 24 + 25 + do { 26 + next = pud_addr_end(addr, end); 27 + idmap_add_pmd(pud, addr, next, prot); 28 + } while (pud++, addr = next, addr != end); 17 29 } 18 30 19 31 void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) ··· 39 27 pgd += pgd_index(addr); 40 28 do { 41 29 next = pgd_addr_end(addr, end); 42 - idmap_add_pmd(pgd, addr, next, prot); 30 + idmap_add_pud(pgd, addr, next, prot); 43 31 } while (pgd++, addr = next, addr != end); 44 32 } 45 33 46 34 #ifdef CONFIG_SMP 47 - static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end) 35 + static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) 48 36 { 49 - pmd_t *pmd = pmd_offset(pgd, addr); 37 + pmd_t *pmd = pmd_offset(pud, addr); 50 38 pmd_clear(pmd); 39 + } 40 + 41 + static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) 42 + { 43 + pud_t *pud = pud_offset(pgd, addr); 44 + unsigned long next; 45 + 46 + do { 47 + next = pud_addr_end(addr, end); 48 + idmap_del_pmd(pud, addr, next); 49 + } while (pud++, addr = next, addr != end); 51 50 } 52 51 53 52 void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) ··· 68 45 pgd += pgd_index(addr); 69 46 do { 70 47 next = pgd_addr_end(addr, end); 71 - idmap_del_pmd(pgd, addr, next); 48 + idmap_del_pud(pgd, addr, next); 72 49 } while (pgd++, addr = next, addr != end); 73 50 } 74 51 #endif
+1 -1
arch/arm/mm/mm.h
··· 7 7 8 8 static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) 9 9 { 10 - return pmd_offset(pgd, virt); 10 + return pmd_offset(pud_offset(pgd, virt), virt); 11 11 } 12 12 13 13 static inline pmd_t *pmd_off_k(unsigned long virt)
+18 -4
arch/arm/mm/mmu.c
··· 550 550 } while (pte++, addr += PAGE_SIZE, addr != end); 551 551 } 552 552 553 - static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, 553 + static void __init alloc_init_section(pud_t *pud, unsigned long addr, 554 554 unsigned long end, phys_addr_t phys, 555 555 const struct mem_type *type) 556 556 { 557 - pmd_t *pmd = pmd_offset(pgd, addr); 557 + pmd_t *pmd = pmd_offset(pud, addr); 558 558 559 559 /* 560 560 * Try a section mapping - end, addr and phys must all be aligned ··· 581 581 */ 582 582 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 583 583 } 584 + } 585 + 586 + static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 587 + unsigned long phys, const struct mem_type *type) 588 + { 589 + pud_t *pud = pud_offset(pgd, addr); 590 + unsigned long next; 591 + 592 + do { 593 + next = pud_addr_end(addr, end); 594 + alloc_init_section(pud, addr, next, phys, type); 595 + phys += next - addr; 596 + } while (pud++, addr = next, addr != end); 584 597 } 585 598 586 599 static void __init create_36bit_mapping(struct map_desc *md, ··· 643 630 pgd = pgd_offset_k(addr); 644 631 end = addr + length; 645 632 do { 646 - pmd_t *pmd = pmd_offset(pgd, addr); 633 + pud_t *pud = pud_offset(pgd, addr); 634 + pmd_t *pmd = pmd_offset(pud, addr); 647 635 int i; 648 636 649 637 for (i = 0; i < 16; i++) ··· 710 696 do { 711 697 unsigned long next = pgd_addr_end(addr, end); 712 698 713 - alloc_init_section(pgd, addr, next, phys, type); 699 + alloc_init_pud(pgd, addr, next, phys, type); 714 700 715 701 phys += next - addr; 716 702 addr = next;
+20 -4
arch/arm/mm/pgd.c
··· 23 23 pgd_t *pgd_alloc(struct mm_struct *mm) 24 24 { 25 25 pgd_t *new_pgd, *init_pgd; 26 + pud_t *new_pud, *init_pud; 26 27 pmd_t *new_pmd, *init_pmd; 27 28 pte_t *new_pte, *init_pte; 28 29 ··· 47 46 * On ARM, first page must always be allocated since it 48 47 * contains the machine vectors. 49 48 */ 50 - new_pmd = pmd_alloc(mm, new_pgd, 0); 49 + new_pud = pud_alloc(mm, new_pgd, 0); 50 + if (!new_pud) 51 + goto no_pud; 52 + 53 + new_pmd = pmd_alloc(mm, new_pud, 0); 51 54 if (!new_pmd) 52 55 goto no_pmd; 53 56 ··· 59 54 if (!new_pte) 60 55 goto no_pte; 61 56 62 - init_pmd = pmd_offset(init_pgd, 0); 57 + init_pud = pud_offset(init_pgd, 0); 58 + init_pmd = pmd_offset(init_pud, 0); 63 59 init_pte = pte_offset_map(init_pmd, 0); 64 60 set_pte_ext(new_pte, *init_pte, 0); 65 61 pte_unmap(init_pte); ··· 72 66 no_pte: 73 67 pmd_free(mm, new_pmd); 74 68 no_pmd: 69 + pud_free(mm, new_pud); 70 + no_pud: 75 71 free_pages((unsigned long)new_pgd, 2); 76 72 no_pgd: 77 73 return NULL; ··· 82 74 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) 83 75 { 84 76 pgd_t *pgd; 77 + pud_t *pud; 85 78 pmd_t *pmd; 86 79 pgtable_t pte; 87 80 ··· 93 84 if (pgd_none_or_clear_bad(pgd)) 94 85 goto no_pgd; 95 86 96 - pmd = pmd_offset(pgd, 0); 87 + pud = pud_offset(pgd, 0); 88 + if (pud_none_or_clear_bad(pud)) 89 + goto no_pud; 90 + 91 + pmd = pmd_offset(pud, 0); 97 92 if (pmd_none_or_clear_bad(pmd)) 98 93 goto no_pmd; 99 94 ··· 105 92 pmd_clear(pmd); 106 93 pte_free(mm, pte); 107 94 no_pmd: 108 - pgd_clear(pgd); 95 + pud_clear(pud); 109 96 pmd_free(mm, pmd); 97 + no_pud: 98 + pgd_clear(pgd); 99 + pud_free(mm, pud); 110 100 no_pgd: 111 101 free_pages((unsigned long) pgd_base, 2); 112 102 }