Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Implement Read Inhibit/eXecute Inhibit

The SmartMIPS ASE specifies how Read Inhibit (RI) and eXecute Inhibit
(XI) bits in the page tables work. The upper two bits of EntryLo{0,1}
are RI and XI when the feature is enabled in the PageGrain register.
SmartMIPS only covers 32-bit systems. Cavium Octeon+ extends this to
64-bit systems by continuing to place the RI and XI bits in the top of
EntryLo even when EntryLo is 64-bits wide.

Because we need to carry the RI and XI bits in the PTE, the layout of
the PTE is changed. There is a two instruction overhead in the TLB
refill hot path to get the EntryLo bits into the proper position.
Also the TLB load exception has to probe the TLB to check if RI or XI
caused the exception.

Also of note is that the layout of the PTE bits is done at compile and
runtime rather than statically. In the 32-bit case this allows for
the same number of PFN bits as before the patch as the _PAGE_HUGE is
not supported in 32-bit kernels (we have _PAGE_NO_EXEC and
_PAGE_NO_READ instead of _PAGE_READ and _PAGE_HUGE).

The patch is tested on Cavium Octeon+, but should also work on 32-bit
systems with the Smart-MIPS ASE.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/952/
Patchwork: http://patchwork.linux-mips.org/patch/956/
Patchwork: http://patchwork.linux-mips.org/patch/962/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

David Daney and committed by
Ralf Baechle
6dd9344c 32546f38

+349 -80
+3
arch/mips/include/asm/cpu-features.h
··· 95 95 #ifndef cpu_has_smartmips 96 96 #define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) 97 97 #endif 98 + #ifndef kernel_uses_smartmips_rixi 99 + #define kernel_uses_smartmips_rixi 0 100 + #endif 98 101 #ifndef cpu_has_vtag_icache 99 102 #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 100 103 #endif
+2 -2
arch/mips/include/asm/pgtable-32.h
··· 127 127 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 128 128 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 129 129 #else 130 - #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) 131 - #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 130 + #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 131 + #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) 132 132 #endif 133 133 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ 134 134
+2 -2
arch/mips/include/asm/pgtable-64.h
··· 211 211 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 212 212 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 213 213 #else 214 - #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) 215 - #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 214 + #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 215 + #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 216 216 #endif 217 217 218 218 #define __pgd_offset(address) pgd_index(address)
+106 -16
arch/mips/include/asm/pgtable-bits.h
··· 50 50 #define _CACHE_SHIFT 3 51 51 #define _CACHE_MASK (7<<3) 52 52 53 - #else 53 + #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 54 54 55 55 #define _PAGE_PRESENT (1<<0) /* implemented in software */ 56 56 #define _PAGE_READ (1<<1) /* implemented in software */ ··· 58 58 #define _PAGE_ACCESSED (1<<3) /* implemented in software */ 59 59 #define _PAGE_MODIFIED (1<<4) /* implemented in software */ 60 60 #define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */ 61 - 62 - #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 63 61 64 62 #define _PAGE_GLOBAL (1<<8) 65 63 #define _PAGE_VALID (1<<9) ··· 67 69 #define _CACHE_UNCACHED (1<<11) 68 70 #define _CACHE_MASK (1<<11) 69 71 72 + #else /* 'Normal' r4K case */ 73 + /* 74 + * When using the RI/XI bit support, we have 13 bits of flags below 75 + * the physical address. The RI/XI bits are placed such that a SRL 5 76 + * can strip off the software bits, then a ROTR 2 can move the RI/XI 77 + * into bits [63:62]. This also limits physical address to 56 bits, 78 + * which is more than we need right now. 79 + */ 80 + 81 + /* implemented in software */ 82 + #define _PAGE_PRESENT_SHIFT (0) 83 + #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 84 + /* implemented in software, should be unused if kernel_uses_smartmips_rixi. */ 85 + #define _PAGE_READ_SHIFT (kernel_uses_smartmips_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 86 + #define _PAGE_READ ({if (kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_READ_SHIFT; }) 87 + /* implemented in software */ 88 + #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 89 + #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 90 + /* implemented in software */ 91 + #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 92 + #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 93 + /* implemented in software */ 94 + #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 95 + #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 96 + /* set:pagecache unset:swap */ 97 + #define _PAGE_FILE (_PAGE_MODIFIED) 98 + 99 + #ifdef CONFIG_HUGETLB_PAGE 100 + /* huge tlb page */ 101 + #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 102 + #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 70 103 #else 71 - 72 - #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ 73 - #define _PAGE_HUGE (1<<5) /* huge tlb page */ 74 - #define _PAGE_GLOBAL (1<<6) 75 - #define _PAGE_VALID (1<<7) 76 - #define _PAGE_SILENT_READ (1<<7) /* synonym */ 77 - #define _PAGE_DIRTY (1<<8) /* The MIPS dirty bit */ 78 - #define _PAGE_SILENT_WRITE (1<<8) 79 - #define _CACHE_SHIFT 9 80 - #define _CACHE_MASK (7<<9) 81 - 104 + #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) 105 + #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 82 106 #endif 107 + 108 + /* Page cannot be executed */ 109 + #define _PAGE_NO_EXEC_SHIFT (kernel_uses_smartmips_rixi ? _PAGE_HUGE_SHIFT + 1 : _PAGE_HUGE_SHIFT) 110 + #define _PAGE_NO_EXEC ({if (!kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_NO_EXEC_SHIFT; }) 111 + 112 + /* Page cannot be read */ 113 + #define _PAGE_NO_READ_SHIFT (kernel_uses_smartmips_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT) 114 + #define _PAGE_NO_READ ({if (!kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_NO_READ_SHIFT; }) 115 + 116 + #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 117 + #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 118 + 119 + #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 120 + #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 121 + /* synonym */ 122 + #define _PAGE_SILENT_READ (_PAGE_VALID) 123 + 124 + /* The MIPS dirty bit */ 125 + #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 126 + #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 127 + #define _PAGE_SILENT_WRITE (_PAGE_DIRTY) 128 + 129 + #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 130 + #define _CACHE_MASK (7 << _CACHE_SHIFT) 131 + 132 + #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 133 + 83 134 #endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */ 84 135 136 + #ifndef _PFN_SHIFT 137 + #define _PFN_SHIFT PAGE_SHIFT 138 + #endif 139 + #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 140 + 141 + #ifndef _PAGE_NO_READ 142 + #define _PAGE_NO_READ ({BUG(); 0; }) 143 + #define _PAGE_NO_READ_SHIFT ({BUG(); 0; }) 144 + #endif 145 + #ifndef _PAGE_NO_EXEC 146 + #define _PAGE_NO_EXEC ({BUG(); 0; }) 147 + #endif 148 + #ifndef _PAGE_GLOBAL_SHIFT 149 + #define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) 150 + #endif 151 + 152 + 153 + #ifndef __ASSEMBLY__ 154 + /* 155 + * pte_to_entrylo converts a page table entry (PTE) into a Mips 156 + * entrylo0/1 value. 157 + */ 158 + static inline uint64_t pte_to_entrylo(unsigned long pte_val) 159 + { 160 + if (kernel_uses_smartmips_rixi) { 161 + int sa; 162 + #ifdef CONFIG_32BIT 163 + sa = 31 - _PAGE_NO_READ_SHIFT; 164 + #else 165 + sa = 63 - _PAGE_NO_READ_SHIFT; 166 + #endif 167 + /* 168 + * C has no way to express that this is a DSRL 169 + * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily 170 + * in the fast path this is done in assembly 171 + */ 172 + return (pte_val >> _PAGE_GLOBAL_SHIFT) | 173 + ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); 174 + } 175 + 176 + return pte_val >> _PAGE_GLOBAL_SHIFT; 177 + } 178 + #endif 85 179 86 180 /* 87 181 * Cache attributes ··· 220 130 221 131 #endif 222 132 223 - #define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) 133 + #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ)) 224 134 #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 225 135 226 - #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 136 + #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 227 137 228 138 #endif /* _ASM_PGTABLE_BITS_H */
+16 -10
arch/mips/include/asm/pgtable.h
··· 22 22 struct vm_area_struct; 23 23 24 24 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) 25 - #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 25 + #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \ 26 26 _page_cachable_default) 27 - #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 28 - _page_cachable_default) 29 - #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 27 + #define PAGE_COPY __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \ 28 + (kernel_uses_smartmips_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default) 29 + #define PAGE_READONLY __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \ 30 30 _page_cachable_default) 31 31 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 32 32 _PAGE_GLOBAL | _page_cachable_default) 33 - #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 33 + #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ 34 34 _page_cachable_default) 35 35 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 36 36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 37 37 38 38 /* 39 - * MIPS can't do page protection for execute, and considers that the same like 40 - * read. Also, write permissions imply read permissions. This is the closest 41 - * we can get by reasonable means.. 39 + * If _PAGE_NO_EXEC is not defined, we can't do page protection for 40 + * execute, and consider it to be the same as read. Also, write 41 + * permissions imply read permissions. This is the closest we can get 42 + * by reasonable means.. 42 43 */ 43 44 44 45 /* ··· 299 298 static inline pte_t pte_mkyoung(pte_t pte) 300 299 { 301 300 pte_val(pte) |= _PAGE_ACCESSED; 302 - if (pte_val(pte) & _PAGE_READ) 303 - pte_val(pte) |= _PAGE_SILENT_READ; 301 + if (kernel_uses_smartmips_rixi) { 302 + if (!(pte_val(pte) & _PAGE_NO_READ)) 303 + pte_val(pte) |= _PAGE_SILENT_READ; 304 + } else { 305 + if (pte_val(pte) & _PAGE_READ) 306 + pte_val(pte) |= _PAGE_SILENT_READ; 307 + } 304 308 return pte; 305 309 } 306 310
+37 -16
arch/mips/mm/cache.c
··· 137 137 138 138 static inline void setup_protection_map(void) 139 139 { 140 - protection_map[0] = PAGE_NONE; 141 - protection_map[1] = PAGE_READONLY; 142 - protection_map[2] = PAGE_COPY; 143 - protection_map[3] = PAGE_COPY; 144 - protection_map[4] = PAGE_READONLY; 145 - protection_map[5] = PAGE_READONLY; 146 - protection_map[6] = PAGE_COPY; 147 - protection_map[7] = PAGE_COPY; 148 - protection_map[8] = PAGE_NONE; 149 - protection_map[9] = PAGE_READONLY; 150 - protection_map[10] = PAGE_SHARED; 151 - protection_map[11] = PAGE_SHARED; 152 - protection_map[12] = PAGE_READONLY; 153 - protection_map[13] = PAGE_READONLY; 154 - protection_map[14] = PAGE_SHARED; 155 - protection_map[15] = PAGE_SHARED; 140 + if (kernel_uses_smartmips_rixi) { 141 + protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 142 + protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 143 + protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 144 + protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 145 + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 146 + protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 147 + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 148 + protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 149 + 150 + protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 151 + protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 152 + protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 153 + protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 154 + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 155 + protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 156 + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); 157 + protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 158 + 159 + } else { 160 + protection_map[0] = PAGE_NONE; 161 + protection_map[1] = PAGE_READONLY; 162 + protection_map[2] = PAGE_COPY; 163 + protection_map[3] = PAGE_COPY; 164 + protection_map[4] = PAGE_READONLY; 165 + protection_map[5] = PAGE_READONLY; 166 + protection_map[6] = PAGE_COPY; 167 + protection_map[7] = PAGE_COPY; 168 + protection_map[8] = PAGE_NONE; 169 + protection_map[9] = PAGE_READONLY; 170 + protection_map[10] = PAGE_SHARED; 171 + protection_map[11] = PAGE_SHARED; 172 + protection_map[12] = PAGE_READONLY; 173 + protection_map[13] = PAGE_READONLY; 174 + protection_map[14] = PAGE_SHARED; 175 + protection_map[15] = PAGE_SHARED; 176 + } 156 177 } 157 178 158 179 void __cpuinit cpu_cache_init(void)
+25 -2
arch/mips/mm/fault.c
··· 99 99 if (!(vma->vm_flags & VM_WRITE)) 100 100 goto bad_area; 101 101 } else { 102 - if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 103 - goto bad_area; 102 + if (kernel_uses_smartmips_rixi) { 103 + if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { 104 + #if 0 105 + pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", 106 + raw_smp_processor_id(), 107 + current->comm, current->pid, 108 + field, address, write, 109 + field, regs->cp0_epc); 110 + #endif 111 + goto bad_area; 112 + } 113 + if (!(vma->vm_flags & VM_READ)) { 114 + #if 0 115 + pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", 116 + raw_smp_processor_id(), 117 + current->comm, current->pid, 118 + field, address, write, 119 + field, regs->cp0_epc); 120 + #endif 121 + goto bad_area; 122 + } 123 + } else { 124 + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 125 + goto bad_area; 126 + } 104 127 } 105 128 106 129 /*
+1 -1
arch/mips/mm/init.c
··· 143 143 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 144 144 entrylo = pte.pte_high; 145 145 #else 146 - entrylo = pte_val(pte) >> 6; 146 + entrylo = pte_to_entrylo(pte_val(pte)); 147 147 #endif 148 148 149 149 ENTER_CRITICAL(flags);
+16 -3
arch/mips/mm/tlb-r4k.c
··· 303 303 unsigned long lo; 304 304 write_c0_pagemask(PM_HUGE_MASK); 305 305 ptep = (pte_t *)pmdp; 306 - lo = pte_val(*ptep) >> 6; 306 + lo = pte_to_entrylo(pte_val(*ptep)); 307 307 write_c0_entrylo0(lo); 308 308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 309 309 ··· 323 323 ptep++; 324 324 write_c0_entrylo1(ptep->pte_high); 325 325 #else 326 - write_c0_entrylo0(pte_val(*ptep++) >> 6); 327 - write_c0_entrylo1(pte_val(*ptep) >> 6); 326 + write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 327 + write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); 328 328 #endif 329 329 mtc0_tlbw_hazard(); 330 330 if (idx < 0) ··· 437 437 current_cpu_type() == CPU_R12000 || 438 438 current_cpu_type() == CPU_R14000) 439 439 write_c0_framemask(0); 440 + 441 + if (kernel_uses_smartmips_rixi) { 442 + /* 443 + * Enable the no read, no exec bits, and enable large virtual 444 + * address. 445 + */ 446 + u32 pg = PG_RIE | PG_XIE; 447 + #ifdef CONFIG_64BIT 448 + pg |= PG_ELPA; 449 + #endif 450 + write_c0_pagegrain(pg); 451 + } 452 + 440 453 temp_tlb_entry = current_cpu_data.tlbsize - 1; 441 454 442 455 /* From this point on the ARC firmware is dead. */
+141 -28
arch/mips/mm/tlbex.c
··· 76 76 label_vmalloc_done, 77 77 label_tlbw_hazard, 78 78 label_split, 79 + label_tlbl_goaround1, 80 + label_tlbl_goaround2, 79 81 label_nopage_tlbl, 80 82 label_nopage_tlbs, 81 83 label_nopage_tlbm, ··· 94 92 UASM_L_LA(_vmalloc_done) 95 93 UASM_L_LA(_tlbw_hazard) 96 94 UASM_L_LA(_split) 95 + UASM_L_LA(_tlbl_goaround1) 96 + UASM_L_LA(_tlbl_goaround2) 97 97 UASM_L_LA(_nopage_tlbl) 98 98 UASM_L_LA(_nopage_tlbs) 99 99 UASM_L_LA(_nopage_tlbm) ··· 400 396 } 401 397 } 402 398 399 + static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 400 + unsigned int reg) 401 + { 402 + if (kernel_uses_smartmips_rixi) { 403 + UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); 404 + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 405 + } else { 406 + #ifdef CONFIG_64BIT_PHYS_ADDR 407 + uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); 408 + #else 409 + UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 410 + #endif 411 + } 412 + } 413 + 403 414 #ifdef CONFIG_HUGETLB_PAGE 415 + 416 + static __cpuinit void build_restore_pagemask(u32 **p, 417 + struct uasm_reloc **r, 418 + unsigned int tmp, 419 + enum label_id lid) 420 + { 421 + /* Reset default page size */ 422 + if (PM_DEFAULT_MASK >> 16) { 423 + uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 424 + uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 425 + uasm_il_b(p, r, lid); 426 + uasm_i_mtc0(p, tmp, C0_PAGEMASK); 427 + } else if (PM_DEFAULT_MASK) { 428 + uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 429 + uasm_il_b(p, r, lid); 430 + uasm_i_mtc0(p, tmp, C0_PAGEMASK); 431 + } else { 432 + uasm_il_b(p, r, lid); 433 + uasm_i_mtc0(p, 0, C0_PAGEMASK); 434 + } 435 + } 436 + 404 437 static __cpuinit void build_huge_tlb_write_entry(u32 **p, 405 438 struct uasm_label **l, 406 439 struct uasm_reloc **r, ··· 451 410 452 411 build_tlb_write_entry(p, l, r, wmode); 453 412 454 - /* Reset default page size */ 455 - if (PM_DEFAULT_MASK >> 16) { 456 - uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 457 - uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 458 - uasm_il_b(p, r, label_leave); 459 - uasm_i_mtc0(p, tmp, C0_PAGEMASK); 460 - } else if (PM_DEFAULT_MASK) { 461 - uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 462 - uasm_il_b(p, r, label_leave); 463 - uasm_i_mtc0(p, tmp, C0_PAGEMASK); 464 - } else { 465 - uasm_il_b(p, r, label_leave); 466 - uasm_i_mtc0(p, 0, C0_PAGEMASK); 467 - } 413 + build_restore_pagemask(p, r, tmp, label_leave); 468 414 } 469 415 470 416 /* ··· 487 459 if (!small_sequence) 488 460 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 489 461 490 - UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ 462 + build_convert_pte_to_entrylo(p, pte); 491 463 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 492 464 /* convert to entrylo1 */ 493 465 if (small_sequence) ··· 713 685 if (cpu_has_64bits) { 714 686 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 715 687 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 716 - uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 717 - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 718 - uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 688 + if (kernel_uses_smartmips_rixi) { 689 + UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); 690 + UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); 691 + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 692 + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 693 + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 694 + } else { 695 + uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 696 + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 697 + uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 698 + } 719 699 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 720 700 } else { 721 701 int pte_off_even = sizeof(pte_t) / 2; ··· 740 704 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 741 705 if (r45k_bvahwbug()) 742 706 build_tlb_probe_entry(p); 743 - UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 744 - if (r4k_250MHZhwbug()) 745 - UASM_i_MTC0(p, 0, C0_ENTRYLO0); 746 - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 747 - UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 748 - if (r45k_bvahwbug()) 749 - uasm_i_mfc0(p, tmp, C0_INDEX); 707 + if (kernel_uses_smartmips_rixi) { 708 + UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); 709 + UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); 710 + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 711 + if (r4k_250MHZhwbug()) 712 + UASM_i_MTC0(p, 0, C0_ENTRYLO0); 713 + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 714 + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 715 + } else { 716 + UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 717 + if (r4k_250MHZhwbug()) 718 + UASM_i_MTC0(p, 0, C0_ENTRYLO0); 719 + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 720 + UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 721 + if (r45k_bvahwbug()) 722 + uasm_i_mfc0(p, tmp, C0_INDEX); 723 + } 750 724 if (r4k_250MHZhwbug()) 751 725 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 752 726 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ ··· 1032 986 build_pte_present(u32 **p, struct uasm_reloc **r, 1033 987 unsigned int pte, unsigned int ptr, enum label_id lid) 1034 988 { 1035 - uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1036 - uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1037 - uasm_il_bnez(p, r, pte, lid); 989 + if (kernel_uses_smartmips_rixi) { 990 + uasm_i_andi(p, pte, pte, _PAGE_PRESENT); 991 + uasm_il_beqz(p, r, pte, lid); 992 + } else { 993 + uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 994 + uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 995 + uasm_il_bnez(p, r, pte, lid); 996 + } 1038 997 iPTE_LW(p, pte, ptr); 1039 998 } 1040 999 ··· 1324 1273 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1325 1274 if (m4kc_tlbp_war()) 1326 1275 build_tlb_probe_entry(&p); 1276 + 1277 + if (kernel_uses_smartmips_rixi) { 1278 + /* 1279 + * If the page is not _PAGE_VALID, RI or XI could not 1280 + * have triggered it. Skip the expensive test.. 1281 + */ 1282 + uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1283 + uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); 1284 + uasm_i_nop(&p); 1285 + 1286 + uasm_i_tlbr(&p); 1287 + /* Examine entrylo 0 or 1 based on ptr. */ 1288 + uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1289 + uasm_i_beqz(&p, K0, 8); 1290 + 1291 + UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1292 + UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1293 + /* 1294 + * If the entryLo (now in K0) is valid (bit 1), RI or 1295 + * XI must have triggered it. 1296 + */ 1297 + uasm_i_andi(&p, K0, K0, 2); 1298 + uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); 1299 + 1300 + uasm_l_tlbl_goaround1(&l, p); 1301 + /* Reload the PTE value */ 1302 + iPTE_LW(&p, K0, K1); 1303 + } 1327 1304 build_make_valid(&p, &r, K0, K1); 1328 1305 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1329 1306 ··· 1364 1285 iPTE_LW(&p, K0, K1); 1365 1286 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1366 1287 build_tlb_probe_entry(&p); 1288 + 1289 + if (kernel_uses_smartmips_rixi) { 1290 + /* 1291 + * If the page is not _PAGE_VALID, RI or XI could not 1292 + * have triggered it. Skip the expensive test.. 1293 + */ 1294 + uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1295 + uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1296 + uasm_i_nop(&p); 1297 + 1298 + uasm_i_tlbr(&p); 1299 + /* Examine entrylo 0 or 1 based on ptr. */ 1300 + uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1301 + uasm_i_beqz(&p, K0, 8); 1302 + 1303 + UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1304 + UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1305 + /* 1306 + * If the entryLo (now in K0) is valid (bit 1), RI or 1307 + * XI must have triggered it. 1308 + */ 1309 + uasm_i_andi(&p, K0, K0, 2); 1310 + uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1311 + /* Reload the PTE value */ 1312 + iPTE_LW(&p, K0, K1); 1313 + 1314 + /* 1315 + * We clobbered C0_PAGEMASK, restore it. On the other branch 1316 + * it is restored in build_huge_tlb_write_entry. 1317 + */ 1318 + build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); 1319 + 1320 + uasm_l_tlbl_goaround2(&l, p); 1321 + } 1367 1322 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); 1368 1323 build_huge_handler_tail(&p, &r, &l, K0, K1); 1369 1324 #endif