Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic)

There are 2 distinct cases in which a kernel for a MIPS32 CPU
(CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses
(CONFIG_PHYS_ADDR_T_64BIT=y):

- 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR
CPUs.

- MIPS32r5 eXtended Physical Addressing (XPA).

These 2 cases are distinct in that they require different behaviour from
the kernel - the EntryLo registers have different formats. Until Linux
v4.1 we only supported the first case, with code conditional upon the 2
aforementioned Kconfig variables being set. Commit c5b367835cfc ("MIPS:
Add support for XPA.") added support for the second case, but did so by
modifying the code that existed for the first case rather than treating
the 2 cases as distinct. Since the EntryLo registers have different
formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by
splitting the 2 cases, with XPA cases now being conditional upon
CONFIG_XPA and the non-XPA case matching the code as it existed prior to
commit c5b367835cfc ("MIPS: Add support for XPA.").

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Reported-by: Manuel Lauss <manuel.lauss@gmail.com>
Tested-by: Manuel Lauss <manuel.lauss@gmail.com>
Fixes: c5b367835cfc ("MIPS: Add support for XPA.")
Cc: James Hogan <james.hogan@imgtec.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Maciej W. Rozycki <macro@linux-mips.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Alex Smith <alex.smith@imgtec.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: stable@vger.kernel.org # v4.1+
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13119/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Paul Burton and committed by
Ralf Baechle
7b2cb64f 745f3558

+125 -27
+25 -2
arch/mips/include/asm/pgtable-32.h
··· 103 103 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 104 104 } 105 105 106 - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 106 + #if defined(CONFIG_XPA) 107 107 108 108 #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) 109 109 static inline pte_t ··· 115 115 (pgprot_val(prot) & ~_PFNX_MASK); 116 116 pte.pte_high = (pfn << _PFN_SHIFT) | 117 117 (pgprot_val(prot) & ~_PFN_MASK); 118 + return pte; 119 + } 120 + 121 + #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 122 + 123 + #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) 124 + 125 + static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 126 + { 127 + pte_t pte; 128 + 129 + pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); 130 + pte.pte_low = pgprot_val(prot); 131 + 118 132 return pte; 119 133 } 120 134 ··· 180 166 181 167 #else 182 168 183 - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 169 + #if defined(CONFIG_XPA) 184 170 185 171 /* Swap entries must have VALID and GLOBAL bits cleared. */ 186 172 #define __swp_type(x) (((x).val >> 4) & 0x1f) 187 173 #define __swp_offset(x) ((x).val >> 9) 188 174 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) }) 175 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 176 + #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 177 + 178 + #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 179 + 180 + /* Swap entries must have VALID and GLOBAL bits cleared. */ 181 + #define __swp_type(x) (((x).val >> 2) & 0x1f) 182 + #define __swp_offset(x) ((x).val >> 7) 183 + #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 189 184 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 190 185 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 191 186
+25 -4
arch/mips/include/asm/pgtable-bits.h
··· 32 32 * unpredictable things. The code (when it is written) to deal with 33 33 * this problem will be in the update_mmu_cache() code for the r4k. 34 34 */ 35 - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 35 + #if defined(CONFIG_XPA) 36 36 37 37 /* 38 - * Page table bit offsets used for 64 bit physical addressing on MIPS32, 39 - * for example with Alchemy, Netlogic XLP/XLR or XPA. 38 + * Page table bit offsets used for 64 bit physical addressing on 39 + * MIPS32r5 with XPA. 40 40 */ 41 41 enum pgtable_bits { 42 42 /* Used by TLB hardware (placed in EntryLo*) */ ··· 58 58 * Bits for extended EntryLo0/EntryLo1 registers 59 59 */ 60 60 #define _PFNX_MASK 0xffffff 61 + 62 + #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 63 + 64 + /* 65 + * Page table bit offsets used for 36 bit physical addressing on MIPS32, 66 + * for example with Alchemy or Netlogic XLP/XLR. 67 + */ 68 + enum pgtable_bits { 69 + /* Used by TLB hardware (placed in EntryLo*) */ 70 + _PAGE_GLOBAL_SHIFT, 71 + _PAGE_VALID_SHIFT, 72 + _PAGE_DIRTY_SHIFT, 73 + _CACHE_SHIFT, 74 + 75 + /* Used only by software (masked out before writing EntryLo*) */ 76 + _PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3, 77 + _PAGE_NO_READ_SHIFT, 78 + _PAGE_WRITE_SHIFT, 79 + _PAGE_ACCESSED_SHIFT, 80 + _PAGE_MODIFIED_SHIFT, 81 + }; 61 82 62 83 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 63 84 ··· 137 116 #endif 138 117 139 118 /* Used by TLB hardware (placed in EntryLo*) */ 140 - #if (defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)) 119 + #if defined(CONFIG_XPA) 141 120 # define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) 142 121 #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 143 122 # define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
+49 -8
arch/mips/include/asm/pgtable.h
··· 133 133 134 134 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 135 135 136 - #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 136 + #ifdef CONFIG_XPA 137 + # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 138 + #else 139 + # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 140 + #endif 141 + 137 142 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 138 143 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) 139 144 ··· 148 143 smp_wmb(); 149 144 ptep->pte_low = pte.pte_low; 150 145 146 + #ifdef CONFIG_XPA 151 147 if (pte.pte_high & _PAGE_GLOBAL) { 148 + #else 149 + if (pte.pte_low & _PAGE_GLOBAL) { 150 + #endif 152 151 pte_t *buddy = ptep_buddy(ptep); 153 152 /* 154 153 * Make sure the buddy is global too (if it's !none, 155 154 * it better already be global) 156 155 */ 157 - if (pte_none(*buddy)) 156 + if (pte_none(*buddy)) { 157 + if (!config_enabled(CONFIG_XPA)) 158 + buddy->pte_low |= _PAGE_GLOBAL; 158 159 buddy->pte_high |= _PAGE_GLOBAL; 160 + } 159 161 } 160 162 } 161 163 ··· 172 160 173 161 htw_stop(); 174 162 /* Preserve global status for the pair */ 175 - if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 176 - null.pte_high = _PAGE_GLOBAL; 163 + if (config_enabled(CONFIG_XPA)) { 164 + if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 165 + null.pte_high = _PAGE_GLOBAL; 166 + } else { 167 + if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 168 + null.pte_low = null.pte_high = _PAGE_GLOBAL; 169 + } 177 170 178 171 set_pte_at(mm, addr, ptep, null); 179 172 htw_start(); ··· 319 302 static inline pte_t pte_wrprotect(pte_t pte) 320 303 { 321 304 pte.pte_low &= ~_PAGE_WRITE; 305 + if (!config_enabled(CONFIG_XPA)) 306 + pte.pte_low &= ~_PAGE_SILENT_WRITE; 322 307 pte.pte_high &= ~_PAGE_SILENT_WRITE; 323 308 return pte; 324 309 } ··· 328 309 static inline pte_t pte_mkclean(pte_t pte) 329 310 { 330 311 pte.pte_low &= ~_PAGE_MODIFIED; 312 + if (!config_enabled(CONFIG_XPA)) 313 + pte.pte_low &= ~_PAGE_SILENT_WRITE; 331 314 pte.pte_high &= ~_PAGE_SILENT_WRITE; 332 315 return pte; 333 316 } ··· 337 316 static inline pte_t pte_mkold(pte_t pte) 338 317 { 339 318 pte.pte_low &= ~_PAGE_ACCESSED; 319 + if (!config_enabled(CONFIG_XPA)) 320 + pte.pte_low &= ~_PAGE_SILENT_READ; 340 321 pte.pte_high &= ~_PAGE_SILENT_READ; 341 322 return pte; 342 323 } ··· 346 323 static inline pte_t pte_mkwrite(pte_t pte) 347 324 { 348 325 pte.pte_low |= _PAGE_WRITE; 349 - if (pte.pte_low & _PAGE_MODIFIED) 326 + if (pte.pte_low & _PAGE_MODIFIED) { 327 + if (!config_enabled(CONFIG_XPA)) 328 + pte.pte_low |= _PAGE_SILENT_WRITE; 350 329 pte.pte_high |= _PAGE_SILENT_WRITE; 330 + } 351 331 return pte; 352 332 } 353 333 354 334 static inline pte_t pte_mkdirty(pte_t pte) 355 335 { 356 336 pte.pte_low |= _PAGE_MODIFIED; 357 - if (pte.pte_low & _PAGE_WRITE) 337 + if (pte.pte_low & _PAGE_WRITE) { 338 + if (!config_enabled(CONFIG_XPA)) 339 + pte.pte_low |= _PAGE_SILENT_WRITE; 358 340 pte.pte_high |= _PAGE_SILENT_WRITE; 341 + } 359 342 return pte; 360 343 } 361 344 362 345 static inline pte_t pte_mkyoung(pte_t pte) 363 346 { 364 347 pte.pte_low |= _PAGE_ACCESSED; 365 - if (!(pte.pte_low & _PAGE_NO_READ)) 348 + if (!(pte.pte_low & _PAGE_NO_READ)) { 349 + if (!config_enabled(CONFIG_XPA)) 350 + pte.pte_low |= _PAGE_SILENT_READ; 366 351 pte.pte_high |= _PAGE_SILENT_READ; 352 + } 367 353 return pte; 368 354 } 369 355 #else ··· 470 438 */ 471 439 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 472 440 473 - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 441 + #if defined(CONFIG_XPA) 474 442 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 475 443 { 476 444 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 477 445 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 478 446 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 479 447 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; 448 + return pte; 449 + } 450 + #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 451 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 452 + { 453 + pte.pte_low &= _PAGE_CHG_MASK; 454 + pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 455 + pte.pte_low |= pgprot_val(newprot); 456 + pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 480 457 return pte; 481 458 } 482 459 #else
+3 -1
arch/mips/mm/init.c
··· 98 98 idx += in_interrupt() ? FIX_N_COLOURS : 0; 99 99 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 100 100 pte = mk_pte(page, prot); 101 - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 101 + #if defined(CONFIG_XPA) 102 102 entrylo = pte_to_entrylo(pte.pte_high); 103 + #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 104 + entrylo = pte.pte_high; 103 105 #else 104 106 entrylo = pte_to_entrylo(pte_val(pte)); 105 107 #endif
+23 -12
arch/mips/mm/tlbex.c
··· 1011 1011 1012 1012 static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) 1013 1013 { 1014 - /* 1015 - * 64bit address support (36bit on a 32bit CPU) in a 32bit 1016 - * Kernel is a special case. Only a few CPUs use it. 1017 - */ 1018 - if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { 1014 + if (config_enabled(CONFIG_XPA)) { 1019 1015 int pte_off_even = sizeof(pte_t) / 2; 1020 1016 int pte_off_odd = pte_off_even + sizeof(pte_t); 1021 - #ifdef CONFIG_XPA 1022 1017 const int scratch = 1; /* Our extra working register */ 1023 1018 1024 1019 uasm_i_addu(p, scratch, 0, ptep); 1025 - #endif 1020 + 1026 1021 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1027 - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ 1028 1022 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1029 - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1030 1023 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1024 + 1025 + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ 1026 + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1031 1027 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); 1032 - #ifdef CONFIG_XPA 1028 + 1033 1029 uasm_i_lw(p, tmp, 0, scratch); 1034 1030 uasm_i_lw(p, ptep, sizeof(pte_t), scratch); 1035 1031 uasm_i_lui(p, scratch, 0xff); ··· 1034 1038 uasm_i_and(p, ptep, scratch, ptep); 1035 1039 uasm_i_mthc0(p, tmp, C0_ENTRYLO0); 1036 1040 uasm_i_mthc0(p, ptep, C0_ENTRYLO1); 1037 - #endif 1041 + return; 1042 + } 1043 + 1044 + /* 1045 + * 64bit address support (36bit on a 32bit CPU) in a 32bit 1046 + * Kernel is a special case. Only a few CPUs use it. 1047 + */ 1048 + if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { 1049 + int pte_off_even = sizeof(pte_t) / 2; 1050 + int pte_off_odd = pte_off_even + sizeof(pte_t); 1051 + 1052 + uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1053 + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1054 + 1055 + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ 1056 + UASM_i_MTC0(p, ptep, C0_ENTRYLO1); 1038 1057 return; 1039 1058 } 1040 1059 ··· 1648 1637 #ifdef CONFIG_PHYS_ADDR_T_64BIT 1649 1638 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1650 1639 1651 - if (!cpu_has_64bits) { 1640 + if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) { 1652 1641 const int scratch = 1; /* Our extra working register */ 1653 1642 1654 1643 uasm_i_lui(p, scratch, (mode >> 16));