Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Add support for XPA.

Add support for extended physical addressing (XPA) so that
32-bit platforms can access equal to or greater than 40 bits
of physical addresses.

NOTE:
1) XPA and EVA are not the same and cannot be used
simultaneously.
2) If you configure your kernel for XPA, the PTEs
and all address sizes become 64-bit.
3) Your platform MUST have working HIGHMEM support.

Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/9355/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Steven J. Hill and committed by
Ralf Baechle
c5b36783 be0c37c9

+173 -44
+35
arch/mips/Kconfig
··· 377 377 select SYS_HAS_CPU_MIPS32_R1 378 378 select SYS_HAS_CPU_MIPS32_R2 379 379 select SYS_HAS_CPU_MIPS32_R3_5 380 + select SYS_HAS_CPU_MIPS32_R5 380 381 select SYS_HAS_CPU_MIPS32_R6 381 382 select SYS_HAS_CPU_MIPS64_R1 382 383 select SYS_HAS_CPU_MIPS64_R2 ··· 387 386 select SYS_SUPPORTS_32BIT_KERNEL 388 387 select SYS_SUPPORTS_64BIT_KERNEL 389 388 select SYS_SUPPORTS_BIG_ENDIAN 389 + select SYS_SUPPORTS_HIGHMEM 390 390 select SYS_SUPPORTS_LITTLE_ENDIAN 391 391 select SYS_SUPPORTS_MICROMIPS 392 392 select SYS_SUPPORTS_MIPS_CMP ··· 1598 1596 One of its primary benefits is an increase in the maximum size 1599 1597 of lowmem (up to 3GB). If unsure, say 'N' here. 1600 1598 1599 + config CPU_MIPS32_R5_FEATURES 1600 + bool "MIPS32 Release 5 Features" 1601 + depends on SYS_HAS_CPU_MIPS32_R5 1602 + depends on CPU_MIPS32_R2 1603 + help 1604 + Choose this option to build a kernel for release 2 or later of the 1605 + MIPS32 architecture including features from release 5 such as 1606 + support for Extended Physical Addressing (XPA). 1607 + 1608 + config CPU_MIPS32_R5_XPA 1609 + bool "Extended Physical Addressing (XPA)" 1610 + depends on CPU_MIPS32_R5_FEATURES 1611 + depends on !EVA 1612 + depends on !PAGE_SIZE_4KB 1613 + depends on SYS_SUPPORTS_HIGHMEM 1614 + select XPA 1615 + select HIGHMEM 1616 + select ARCH_PHYS_ADDR_T_64BIT 1617 + default n 1618 + help 1619 + Choose this option if you want to enable the Extended Physical 1620 + Addressing (XPA) on your MIPS32 core (such as P5600 series). The 1621 + benefit is to increase physical addressing equal to or greater 1622 + than 40 bits. Note that this has the side effect of turning on 1623 + 64-bit addressing which in turn makes the PTEs 64-bit in size. 1624 + If unsure, say 'N' here. 1625 + 1601 1626 if CPU_LOONGSON2F 1602 1627 config CPU_NOP_WORKAROUNDS 1603 1628 bool ··· 1726 1697 bool 1727 1698 1728 1699 config SYS_HAS_CPU_MIPS32_R3_5 1700 + bool 1701 + 1702 + config SYS_HAS_CPU_MIPS32_R5 1729 1703 bool 1730 1704 1731 1705 config SYS_HAS_CPU_MIPS32_R6 ··· 1866 1834 select MIPS_SPRAM 1867 1835 1868 1836 config EVA 1837 + bool 1838 + 1839 + config XPA 1869 1840 bool 1870 1841 1871 1842 config SYS_SUPPORTS_32BIT_KERNEL
+3
arch/mips/include/asm/cpu-features.h
··· 139 139 # endif 140 140 #endif 141 141 142 + #ifndef cpu_has_xpa 143 + #define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA) 144 + #endif 142 145 #ifndef cpu_has_vtag_icache 143 146 #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 144 147 #endif
+1
arch/mips/include/asm/cpu.h
··· 377 377 #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 378 378 #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 379 379 #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ 380 + #define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */ 380 381 381 382 /* 382 383 * CPU ASE encodings
+9 -6
arch/mips/include/asm/pgtable-32.h
··· 105 105 106 106 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 107 107 #define pte_page(x) pfn_to_page(pte_pfn(x)) 108 - #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) 108 + #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) 109 109 static inline pte_t 110 110 pfn_pte(unsigned long pfn, pgprot_t prot) 111 111 { 112 112 pte_t pte; 113 - pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); 114 - pte.pte_low = pgprot_val(prot); 113 + 114 + pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) | 115 + (pgprot_val(prot) & ~_PFNX_MASK); 116 + pte.pte_high = (pfn << _PFN_SHIFT) | 117 + (pgprot_val(prot) & ~_PFN_MASK); 115 118 return pte; 116 119 } 117 120 ··· 169 166 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 170 167 171 168 /* Swap entries must have VALID and GLOBAL bits cleared. */ 172 - #define __swp_type(x) (((x).val >> 2) & 0x1f) 173 - #define __swp_offset(x) ((x).val >> 7) 174 - #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 169 + #define __swp_type(x) (((x).val >> 4) & 0x1f) 170 + #define __swp_offset(x) ((x).val >> 9) 171 + #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) }) 175 172 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 176 173 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 177 174
+11 -2
arch/mips/include/asm/pgtable-bits.h
··· 37 37 /* 38 38 * The following bits are implemented by the TLB hardware 39 39 */ 40 - #define _PAGE_GLOBAL_SHIFT 0 40 + #define _PAGE_NO_EXEC_SHIFT 0 41 + #define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) 42 + #define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) 43 + #define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) 44 + #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 41 45 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 42 46 #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 43 47 #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) ··· 53 49 /* 54 50 * The following bits are implemented in software 55 51 */ 56 - #define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) 52 + #define _PAGE_PRESENT_SHIFT (24) 57 53 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 58 54 #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 59 55 #define _PAGE_READ (1 << _PAGE_READ_SHIFT) ··· 65 61 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 66 62 67 63 #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 64 + 65 + /* 66 + * Bits for extended EntryLo0/EntryLo1 registers 67 + */ 68 + #define _PFNX_MASK 0xffffff 68 69 69 70 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 70 71
+14 -22
arch/mips/include/asm/pgtable.h
··· 133 133 134 134 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 135 135 136 - #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 136 + #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 137 137 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 138 138 139 139 static inline void set_pte(pte_t *ptep, pte_t pte) ··· 142 142 smp_wmb(); 143 143 ptep->pte_low = pte.pte_low; 144 144 145 - if (pte.pte_low & _PAGE_GLOBAL) { 145 + if (pte.pte_high & _PAGE_GLOBAL) { 146 146 pte_t *buddy = ptep_buddy(ptep); 147 147 /* 148 148 * Make sure the buddy is global too (if it's !none, 149 149 * it better already be global) 150 150 */ 151 - if (pte_none(*buddy)) { 152 - buddy->pte_low |= _PAGE_GLOBAL; 151 + if (pte_none(*buddy)) 153 152 buddy->pte_high |= _PAGE_GLOBAL; 154 - } 155 153 } 156 154 } 157 155 ··· 159 161 160 162 htw_stop(); 161 163 /* Preserve global status for the pair */ 162 - if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 163 - null.pte_low = null.pte_high = _PAGE_GLOBAL; 164 + if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 165 + null.pte_high = _PAGE_GLOBAL; 164 166 165 167 set_pte_at(mm, addr, ptep, null); 166 168 htw_start(); ··· 240 242 241 243 static inline pte_t pte_wrprotect(pte_t pte) 242 244 { 243 - pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 245 + pte.pte_low &= ~_PAGE_WRITE; 244 246 pte.pte_high &= ~_PAGE_SILENT_WRITE; 245 247 return pte; 246 248 } 247 249 248 250 static inline pte_t pte_mkclean(pte_t pte) 249 251 { 250 - pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 252 + pte.pte_low &= ~_PAGE_MODIFIED; 251 253 pte.pte_high &= ~_PAGE_SILENT_WRITE; 252 254 return pte; 253 255 } 254 256 255 257 static inline pte_t pte_mkold(pte_t pte) 256 258 { 257 - pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 259 + pte.pte_low &= ~_PAGE_ACCESSED; 258 260 pte.pte_high &= ~_PAGE_SILENT_READ; 259 261 return pte; 260 262 } ··· 262 264 static inline pte_t pte_mkwrite(pte_t pte) 263 265 { 264 266 pte.pte_low |= _PAGE_WRITE; 265 - if (pte.pte_low & _PAGE_MODIFIED) { 266 - pte.pte_low |= _PAGE_SILENT_WRITE; 267 + if (pte.pte_low & _PAGE_MODIFIED) 267 268 pte.pte_high |= _PAGE_SILENT_WRITE; 268 - } 269 269 return pte; 270 270 } 271 271 272 272 static inline pte_t pte_mkdirty(pte_t pte) 273 273 { 274 274 pte.pte_low |= _PAGE_MODIFIED; 275 - if (pte.pte_low & _PAGE_WRITE) { 276 - pte.pte_low |= _PAGE_SILENT_WRITE; 275 + if (pte.pte_low & _PAGE_WRITE) 277 276 pte.pte_high |= _PAGE_SILENT_WRITE; 278 - } 279 277 return pte; 280 278 } 281 279 282 280 static inline pte_t pte_mkyoung(pte_t pte) 283 281 { 284 282 pte.pte_low |= _PAGE_ACCESSED; 285 - if (pte.pte_low & _PAGE_READ) { 286 - pte.pte_low |= _PAGE_SILENT_READ; 283 + if (pte.pte_low & _PAGE_READ) 287 284 pte.pte_high |= _PAGE_SILENT_READ; 288 - } 289 285 return pte; 290 286 } 291 287 #else ··· 383 391 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 384 392 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 385 393 { 386 - pte.pte_low &= _PAGE_CHG_MASK; 394 + pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 387 395 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 388 - pte.pte_low |= pgprot_val(newprot); 389 - pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 396 + pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 397 + pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; 390 398 return pte; 391 399 } 392 400 #else
+4
arch/mips/kernel/cpu-probe.c
··· 516 516 c->options |= MIPS_CPU_MAAR; 517 517 if (config5 & MIPS_CONF5_LLB) 518 518 c->options |= MIPS_CPU_RW_LLB; 519 + #ifdef CONFIG_XPA 520 + if (config5 & MIPS_CONF5_MVH) 521 + c->options |= MIPS_CPU_XPA; 522 + #endif 519 523 520 524 return config5 & MIPS_CONF_M; 521 525 }
+1
arch/mips/kernel/proc.c
··· 120 120 if (cpu_has_msa) seq_printf(m, "%s", " msa"); 121 121 if (cpu_has_eva) seq_printf(m, "%s", " eva"); 122 122 if (cpu_has_htw) seq_printf(m, "%s", " htw"); 123 + if (cpu_has_xpa) seq_printf(m, "%s", " xpa"); 123 124 seq_printf(m, "\n"); 124 125 125 126 if (cpu_has_mmips) {
+6 -1
arch/mips/mm/init.c
··· 96 96 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 97 97 pte = mk_pte(page, prot); 98 98 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 99 - entrylo = pte.pte_high; 99 + entrylo = pte_to_entrylo(pte.pte_high); 100 100 #else 101 101 entrylo = pte_to_entrylo(pte_val(pte)); 102 102 #endif ··· 106 106 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 107 107 write_c0_entrylo0(entrylo); 108 108 write_c0_entrylo1(entrylo); 109 + #ifdef CONFIG_XPA 110 + entrylo = (pte.pte_low & _PFNX_MASK); 111 + writex_c0_entrylo0(entrylo); 112 + writex_c0_entrylo1(entrylo); 113 + #endif 109 114 tlbidx = read_c0_wired(); 110 115 write_c0_wired(tlbidx + 1); 111 116 write_c0_index(tlbidx);
+12
arch/mips/mm/tlb-r4k.c
··· 333 333 ptep = pte_offset_map(pmdp, address); 334 334 335 335 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 336 + #ifdef CONFIG_XPA 337 + write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); 338 + writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); 339 + ptep++; 340 + write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); 341 + writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); 342 + #else 336 343 write_c0_entrylo0(ptep->pte_high); 337 344 ptep++; 338 345 write_c0_entrylo1(ptep->pte_high); 346 + #endif 339 347 #else 340 348 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 341 349 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); ··· 363 355 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 364 356 unsigned long entryhi, unsigned long pagemask) 365 357 { 358 + #ifdef CONFIG_XPA 359 + panic("Broken for XPA kernels"); 360 + #else 366 361 unsigned long flags; 367 362 unsigned long wired; 368 363 unsigned long old_pagemask; ··· 394 383 write_c0_pagemask(old_pagemask); 395 384 local_flush_tlb_all(); 396 385 local_irq_restore(flags); 386 + #endif 397 387 } 398 388 399 389 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+77 -13
arch/mips/mm/tlbex.c
··· 35 35 #include <asm/uasm.h> 36 36 #include <asm/setup.h> 37 37 38 + static int __cpuinitdata mips_xpa_disabled; 39 + 40 + static int __init xpa_disable(char *s) 41 + { 42 + mips_xpa_disabled = 1; 43 + 44 + return 1; 45 + } 46 + 47 + __setup("noxpa", xpa_disable); 48 + 38 49 /* 39 50 * TLB load/store/modify handlers. 40 51 * ··· 1038 1027 } else { 1039 1028 int pte_off_even = sizeof(pte_t) / 2; 1040 1029 int pte_off_odd = pte_off_even + sizeof(pte_t); 1030 + #ifdef CONFIG_XPA 1031 + const int scratch = 1; /* Our extra working register */ 1041 1032 1042 - /* The pte entries are pre-shifted */ 1043 - uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1044 - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1045 - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1046 - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1033 + uasm_i_addu(p, scratch, 0, ptep); 1034 + #endif 1035 + uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1036 + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ 1037 + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1038 + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1039 + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1040 + UASM_i_MTC0(p, ptep, C0_ENTRYLO1); 1041 + #ifdef CONFIG_XPA 1042 + uasm_i_lw(p, tmp, 0, scratch); 1043 + uasm_i_lw(p, ptep, sizeof(pte_t), scratch); 1044 + uasm_i_lui(p, scratch, 0xff); 1045 + uasm_i_ori(p, scratch, scratch, 0xffff); 1046 + uasm_i_and(p, tmp, scratch, tmp); 1047 + uasm_i_and(p, ptep, scratch, ptep); 1048 + uasm_i_mthc0(p, tmp, C0_ENTRYLO0); 1049 + uasm_i_mthc0(p, ptep, C0_ENTRYLO1); 1050 + #endif 1047 1051 } 1048 1052 #else 1049 1053 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ ··· 1559 1533 { 1560 1534 #ifdef CONFIG_PHYS_ADDR_T_64BIT 1561 1535 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1562 - #endif 1563 1536 1537 + if (!cpu_has_64bits) { 1538 + const int scratch = 1; /* Our extra working register */ 1539 + 1540 + uasm_i_lui(p, scratch, (mode >> 16)); 1541 + uasm_i_or(p, pte, pte, scratch); 1542 + } else 1543 + #endif 1564 1544 uasm_i_ori(p, pte, pte, mode); 1565 1545 #ifdef CONFIG_SMP 1566 1546 # ifdef CONFIG_PHYS_ADDR_T_64BIT ··· 1630 1598 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1631 1599 uasm_i_nop(p); 1632 1600 } else { 1633 - uasm_i_andi(p, t, pte, _PAGE_PRESENT); 1601 + uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); 1602 + uasm_i_andi(p, t, t, 1); 1634 1603 uasm_il_beqz(p, r, t, lid); 1635 1604 if (pte == t) 1636 1605 /* You lose the SMP race :-(*/ 1637 1606 iPTE_LW(p, pte, ptr); 1638 1607 } 1639 1608 } else { 1640 - uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); 1641 - uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); 1609 + uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); 1610 + uasm_i_andi(p, t, t, 3); 1611 + uasm_i_xori(p, t, t, 3); 1642 1612 uasm_il_bnez(p, r, t, lid); 1643 1613 if (pte == t) 1644 1614 /* You lose the SMP race :-(*/ ··· 1669 1635 { 1670 1636 int t = scratch >= 0 ? scratch : pte; 1671 1637 1672 - uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); 1673 - uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); 1638 + uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); 1639 + uasm_i_andi(p, t, t, 5); 1640 + uasm_i_xori(p, t, t, 5); 1674 1641 uasm_il_bnez(p, r, t, lid); 1675 1642 if (pte == t) 1676 1643 /* You lose the SMP race :-(*/ ··· 1707 1672 uasm_i_nop(p); 1708 1673 } else { 1709 1674 int t = scratch >= 0 ? scratch : pte; 1710 - uasm_i_andi(p, t, pte, _PAGE_WRITE); 1675 + uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT); 1676 + uasm_i_andi(p, t, t, 1); 1711 1677 uasm_il_beqz(p, r, t, lid); 1712 1678 if (pte == t) 1713 1679 /* You lose the SMP race :-(*/ ··· 2321 2285 2322 2286 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; 2323 2287 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; 2288 + 2289 + /* If XPA has been enabled, PTEs are 64-bit in size. */ 2290 + if (read_c0_pagegrain() & PG_ELPA) 2291 + pwsize |= 1; 2292 + 2324 2293 write_c0_pwsize(pwsize); 2325 2294 2326 2295 /* Make sure everything is set before we enable the HTW */ ··· 2337 2296 pr_info("Hardware Page Table Walker enabled\n"); 2338 2297 2339 2298 print_htw_config(); 2299 + } 2300 + 2301 + static void config_xpa_params(void) 2302 + { 2303 + #ifdef CONFIG_XPA 2304 + unsigned int pagegrain; 2305 + 2306 + if (mips_xpa_disabled) { 2307 + pr_info("Extended Physical Addressing (XPA) disabled\n"); 2308 + return; 2309 + } 2310 + 2311 + pagegrain = read_c0_pagegrain(); 2312 + write_c0_pagegrain(pagegrain | PG_ELPA); 2313 + back_to_back_c0_hazard(); 2314 + pagegrain = read_c0_pagegrain(); 2315 + 2316 + if (pagegrain & PG_ELPA) 2317 + pr_info("Extended Physical Addressing (XPA) enabled\n"); 2318 + else 2319 + panic("Extended Physical Addressing (XPA) disabled"); 2320 + #endif 2340 2321 } 2341 2322 2342 2323 void build_tlb_refill_handler(void) ··· 2425 2362 } 2426 2363 if (cpu_has_local_ebase) 2427 2364 build_r4000_tlb_refill_handler(); 2365 + if (cpu_has_xpa) 2366 + config_xpa_params(); 2428 2367 if (cpu_has_htw) 2429 2368 config_htw_params(); 2430 - 2431 2369 } 2432 2370 }