Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Add partial 32-bit huge page support

This adds initial support for huge pages to 32-bit MIPS systems.
Systems with extended addressing enabled (EVA,XPA,Alchemy/Netlogic)
are not yet supported.
With huge pages enabled, this implementation will increase page table
memory overhead to match that of a 64-bit MIPS system. However, the
cache-friendliness of page table walks is not affected significantly.

Signed-off-by: Daniel Silsby <dansilsby@gmail.com>
Signed-off-by: Paul Cercueil <paul@crapouillou.net>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: James Hogan <jhogan@kernel.org>
Cc: od@zcrc.me
Cc: linux-mips@vger.kernel.org
Cc: linux-kernel@vger.kernel.org

authored by

Daniel Silsby and committed by
Paul Burton
35476311 171543e7

+73 -7
+51 -5
arch/mips/include/asm/pgtable-32.h
··· 23 23 #include <asm/highmem.h> 24 24 #endif 25 25 26 + /* 27 + * Regarding 32-bit MIPS huge page support (and the tradeoff it entails): 28 + * 29 + * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size, 30 + * our 2-level table layout would normally have a PGD entry cover a contiguous 31 + * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t 32 + * pointers, each pointing to a 4KB physical page). The problem is that 4MB, 33 + * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page 34 + * support, not one of the standard supported sizes (1MB,4MB,16MB,...). 35 + * To correct for this, when huge pages are enabled, we halve the number of 36 + * pointers a PTE page holds, making its last half go to waste. Correspondingly, 37 + * we double the number of PGD pages. Overall, page table memory overhead 38 + * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly. 39 + * 40 + * NOTE: We don't yet support huge pages if extended-addressing is enabled 41 + * (i.e. EVA, XPA, 36-bit Alchemy/Netlogic). 42 + */ 43 + 26 44 extern int temp_tlb_entry; 27 45 28 46 /* ··· 62 44 */ 63 45 64 46 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 65 - #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) 47 + #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) 48 + # define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1) 49 + #else 50 + # define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) 51 + #endif 52 + 66 53 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 67 54 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 68 55 ··· 75 52 * Entries per page directory level: we use two-level, so 76 53 * we don't really have any PUD/PMD directory physically. 77 54 */ 78 - #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) 55 + #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) 56 + # define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1) 57 + #else 58 + # define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) 59 + #endif 60 + 79 61 #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) 80 62 #define PUD_ORDER aieeee_attempt_to_allocate_pud 81 63 #define PMD_ORDER 1 82 64 #define PTE_ORDER 0 83 65 84 66 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) 85 - #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 67 + #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) 68 + # define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2) 69 + #else 70 + # define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 71 + #endif 86 72 87 73 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 88 74 #define FIRST_USER_ADDRESS 0UL ··· 119 87 120 88 extern void load_pgd(unsigned long pg_dir); 121 89 122 - extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; 90 + extern pte_t invalid_pte_table[PTRS_PER_PTE]; 123 91 124 92 /* 125 93 * Empty pgd/pmd entries point to the invalid_pte_table. ··· 129 97 return pmd_val(pmd) == (unsigned long) invalid_pte_table; 130 98 } 131 99 132 - #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 100 + static inline int pmd_bad(pmd_t pmd) 101 + { 102 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 103 + /* pmd_huge(pmd) but inline */ 104 + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 105 + return 0; 106 + #endif 107 + 108 + if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) 109 + return 1; 110 + 111 + return 0; 112 + } 133 113 134 114 static inline int pmd_present(pmd_t pmd) 135 115 { ··· 190 146 #else 191 147 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 192 148 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) 149 + #define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) 193 150 #endif 194 151 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ 195 152 ··· 204 159 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 205 160 206 161 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 162 + #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 207 163 208 164 /* to find an entry in a page-table-directory */ 209 165 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+2 -2
arch/mips/include/asm/pgtable-bits.h
··· 110 110 _PAGE_WRITE_SHIFT, 111 111 _PAGE_ACCESSED_SHIFT, 112 112 _PAGE_MODIFIED_SHIFT, 113 - #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 113 + #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 114 114 _PAGE_HUGE_SHIFT, 115 115 #endif 116 116 ··· 132 132 #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 133 133 #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 134 134 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 135 - #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 135 + #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 136 136 # define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 137 137 #endif 138 138
+20
arch/mips/mm/pgtable-32.c
··· 12 12 #include <asm/fixmap.h> 13 13 #include <asm/pgtable.h> 14 14 #include <asm/pgalloc.h> 15 + #include <asm/tlbflush.h> 15 16 16 17 void pgd_init(unsigned long page) 17 18 { ··· 30 29 p[i + 7] = (unsigned long) invalid_pte_table; 31 30 } 32 31 } 32 + 33 + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) 34 + pmd_t mk_pmd(struct page *page, pgprot_t prot) 35 + { 36 + pmd_t pmd; 37 + 38 + pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); 39 + 40 + return pmd; 41 + } 42 + 43 + 44 + void set_pmd_at(struct mm_struct *mm, unsigned long addr, 45 + pmd_t *pmdp, pmd_t pmd) 46 + { 47 + *pmdp = pmd; 48 + flush_tlb_all(); 49 + } 50 + #endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */ 33 51 34 52 void __init pagetable_init(void) 35 53 {