Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm/radix: Use STD_MMU_64 to properly isolate hash related code

We also use MMU_FTR_RADIX to branch out from code path specific to
hash.

No functionality change.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Aneesh Kumar K.V and committed by
Michael Ellerman
caca285e a8ed87c9

+52 -16
+5 -2
arch/powerpc/kernel/entry_64.S
··· 529 529 std r6,PACACURRENT(r13) /* Set new 'current' */ 530 530 531 531 ld r8,KSP(r4) /* new stack pointer */ 532 - #ifdef CONFIG_PPC_BOOK3S 532 + #ifdef CONFIG_PPC_STD_MMU_64 533 + BEGIN_MMU_FTR_SECTION 534 + b 2f 535 + END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) 533 536 BEGIN_FTR_SECTION 534 537 clrrdi r6,r8,28 /* get its ESID */ 535 538 clrrdi r9,r1,28 /* get current sp ESID */ ··· 578 575 slbmte r7,r0 579 576 isync 580 577 2: 581 - #endif /* !CONFIG_PPC_BOOK3S */ 578 + #endif /* CONFIG_PPC_STD_MMU_64 */ 582 579 583 580 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ 584 581 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+23 -5
arch/powerpc/kernel/exceptions-64s.S
··· 939 939 ld r3,PACA_EXGEN+EX_DAR(r13) 940 940 lwz r4,PACA_EXGEN+EX_DSISR(r13) 941 941 li r5,0x300 942 + std r3,_DAR(r1) 943 + std r4,_DSISR(r1) 944 + BEGIN_MMU_FTR_SECTION 942 945 b do_hash_page /* Try to handle as hpte fault */ 946 + MMU_FTR_SECTION_ELSE 947 + b handle_page_fault 948 + ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) 943 949 944 950 .align 7 945 951 .globl h_data_storage_common ··· 970 964 ld r3,_NIP(r1) 971 965 andis. r4,r12,0x5820 972 966 li r5,0x400 967 + std r3,_DAR(r1) 968 + std r4,_DSISR(r1) 969 + BEGIN_MMU_FTR_SECTION 973 970 b do_hash_page /* Try to handle as hpte fault */ 971 + MMU_FTR_SECTION_ELSE 972 + b handle_page_fault 973 + ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) 974 974 975 975 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) 976 976 ··· 1387 1375 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1388 1376 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1389 1377 1378 + #ifdef CONFIG_PPC_STD_MMU_64 1379 + BEGIN_MMU_FTR_SECTION 1390 1380 bl slb_allocate_realmode 1391 - 1381 + END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX) 1382 + #endif 1392 1383 /* All done -- return from exception. */ 1393 1384 1394 1385 ld r10,PACA_EXSLB+EX_LR(r13) ··· 1399 1384 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1400 1385 1401 1386 mtlr r10 1402 - 1387 + BEGIN_MMU_FTR_SECTION 1388 + b 2f 1389 + END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) 1403 1390 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1404 1391 beq- 2f 1405 1392 ··· 1452 1435 */ 1453 1436 .align 7 1454 1437 do_hash_page: 1455 - std r3,_DAR(r1) 1456 - std r4,_DSISR(r1) 1457 - 1438 + #ifdef CONFIG_PPC_STD_MMU_64 1458 1439 andis. r0,r4,0xa410 /* weird error? */ 1459 1440 bne- handle_page_fault /* if not, try to insert a HPTE */ 1460 1441 andis. r0,r4,DSISR_DABRMATCH@h ··· 1480 1465 1481 1466 /* Error */ 1482 1467 blt- 13f 1468 + #endif /* CONFIG_PPC_STD_MMU_64 */ 1483 1469 1484 1470 /* Here we have a page fault that hash_page can't handle. */ 1485 1471 handle_page_fault: ··· 1507 1491 12: b ret_from_except_lite 1508 1492 1509 1493 1494 + #ifdef CONFIG_PPC_STD_MMU_64 1510 1495 /* We have a page fault that hash_page could handle but HV refused 1511 1496 * the PTE insertion 1512 1497 */ ··· 1517 1500 ld r4,_DAR(r1) 1518 1501 bl low_hash_fault 1519 1502 b ret_from_except 1503 + #endif 1520 1504 1521 1505 /* 1522 1506 * We come here as a result of a DSI at a point where we don't want
+4 -2
arch/powerpc/kernel/machine_kexec_64.c
··· 76 76 * end of the blocked region (begin >= high). Use the 77 77 * boolean identity !(a || b) === (!a && !b). 78 78 */ 79 + #ifdef CONFIG_PPC_STD_MMU_64 79 80 if (htab_address) { 80 81 low = __pa(htab_address); 81 82 high = low + htab_size_bytes; ··· 89 88 return -ETXTBSY; 90 89 } 91 90 } 91 + #endif /* CONFIG_PPC_STD_MMU_64 */ 92 92 93 93 /* We also should not overwrite the tce tables */ 94 94 for_each_node_by_type(node, "pci") { ··· 383 381 /* NOTREACHED */ 384 382 } 385 383 386 - #ifndef CONFIG_PPC_BOOK3E 384 + #ifdef CONFIG_PPC_STD_MMU_64 387 385 /* Values we need to export to the second kernel via the device tree. */ 388 386 static unsigned long htab_base; 389 387 static unsigned long htab_size; ··· 430 428 return 0; 431 429 } 432 430 late_initcall(export_htab_values); 433 - #endif /* !CONFIG_PPC_BOOK3E */ 431 + #endif /* CONFIG_PPC_STD_MMU_64 */
+10
arch/powerpc/kernel/mce_power.c
··· 80 80 81 81 82 82 /* flush SLBs and reload */ 83 + #ifdef CONFIG_PPC_MMU_STD_64 83 84 static void flush_and_reload_slb(void) 84 85 { 85 86 struct slb_shadow *slb; ··· 114 113 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); 115 114 } 116 115 } 116 + #endif 117 117 118 118 static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) 119 119 { ··· 125 123 * reset the error bits whenever we handle them so that at the end 126 124 * we can check whether we handled all of them or not. 127 125 * */ 126 + #ifdef CONFIG_PPC_MMU_STD_64 128 127 if (dsisr & slb_error_bits) { 129 128 flush_and_reload_slb(); 130 129 /* reset error bits */ ··· 137 134 /* reset error bits */ 138 135 dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; 139 136 } 137 + #endif 140 138 /* Any other errors we don't understand? */ 141 139 if (dsisr & 0xffffffffUL) 142 140 handled = 0; ··· 157 153 switch (P7_SRR1_MC_IFETCH(srr1)) { 158 154 case 0: 159 155 break; 156 + #ifdef CONFIG_PPC_MMU_STD_64 160 157 case P7_SRR1_MC_IFETCH_SLB_PARITY: 161 158 case P7_SRR1_MC_IFETCH_SLB_MULTIHIT: 162 159 /* flush and reload SLBs for SLB errors. */ ··· 170 165 handled = 1; 171 166 } 172 167 break; 168 + #endif 173 169 default: 174 170 break; 175 171 } ··· 184 178 185 179 handled = mce_handle_common_ierror(srr1); 186 180 181 + #ifdef CONFIG_PPC_MMU_STD_64 187 182 if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) { 188 183 flush_and_reload_slb(); 189 184 handled = 1; 190 185 } 186 + #endif 191 187 return handled; 192 188 } 193 189 ··· 332 324 333 325 handled = mce_handle_common_ierror(srr1); 334 326 327 + #ifdef CONFIG_PPC_MMU_STD_64 335 328 if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) { 336 329 flush_and_reload_slb(); 337 330 handled = 1; 338 331 } 332 + #endif 339 333 return handled; 340 334 } 341 335
+9 -6
arch/powerpc/kernel/process.c
··· 1079 1079 } 1080 1080 #endif /* CONFIG_PPC64 */ 1081 1081 1082 - #ifdef CONFIG_PPC_BOOK3S_64 1082 + #ifdef CONFIG_PPC_STD_MMU_64 1083 1083 batch = this_cpu_ptr(&ppc64_tlb_batch); 1084 1084 if (batch->active) { 1085 1085 current_thread_info()->local_flags |= _TLF_LAZY_MMU; ··· 1087 1087 __flush_tlb_pending(batch); 1088 1088 batch->active = 0; 1089 1089 } 1090 - #endif /* CONFIG_PPC_BOOK3S_64 */ 1090 + #endif /* CONFIG_PPC_STD_MMU_64 */ 1091 1091 1092 1092 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1093 1093 switch_booke_debug_regs(&new->thread.debug); ··· 1133 1133 1134 1134 last = _switch(old_thread, new_thread); 1135 1135 1136 - #ifdef CONFIG_PPC_BOOK3S_64 1136 + #ifdef CONFIG_PPC_STD_MMU_64 1137 1137 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 1138 1138 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 1139 1139 batch = this_cpu_ptr(&ppc64_tlb_batch); ··· 1142 1142 1143 1143 if (current_thread_info()->task->thread.regs) 1144 1144 restore_math(current_thread_info()->task->thread.regs); 1145 - 1146 - #endif /* CONFIG_PPC_BOOK3S_64 */ 1145 + #endif /* CONFIG_PPC_STD_MMU_64 */ 1147 1146 1148 1147 return last; 1149 1148 } ··· 1376 1377 #ifdef CONFIG_PPC_STD_MMU_64 1377 1378 unsigned long sp_vsid; 1378 1379 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 1380 + 1381 + if (radix_enabled()) 1382 + return; 1379 1383 1380 1384 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 1381 1385 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) ··· 1928 1926 * the heap, we can put it above 1TB so it is backed by a 1TB 1929 1927 * segment. Otherwise the heap will be in the bottom 1TB 1930 1928 * which always uses 256MB segments and this may result in a 1931 - * performance penalty. 1929 + * performance penalty. We don't need to worry about radix. For 1930 + * radix, mmu_highuser_ssize remains unchanged from 256MB. 1932 1931 */ 1933 1932 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 1934 1933 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+1 -1
arch/powerpc/xmon/xmon.c
··· 2913 2913 printf("%s", after); 2914 2914 } 2915 2915 2916 - #ifdef CONFIG_PPC_BOOK3S_64 2916 + #ifdef CONFIG_PPC_STD_MMU_64 2917 2917 void dump_segments(void) 2918 2918 { 2919 2919 int i;