Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-4.15-7' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
"One fix for an oops at boot if we take a hotplug interrupt before we
are ready to handle it.

The bulk is patches to implement mitigation for Meltdown, see the
change logs for more details.

Thanks to: Nicholas Piggin, Michael Neuling, Oliver O'Halloran, Jon
Masters, Jose Ricardo Ziviani, David Gibson"

* tag 'powerpc-4.15-7' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/powernv: Check device-tree for RFI flush settings
powerpc/pseries: Query hypervisor for RFI flush settings
powerpc/64s: Support disabling RFI flush with no_rfi_flush and nopti
powerpc/64s: Add support for RFI flush of L1-D cache
powerpc/64s: Convert slb_miss_common to use RFI_TO_USER/KERNEL
powerpc/64: Convert fast_exception_return to use RFI_TO_USER/KERNEL
powerpc/64: Convert the syscall exit path to use RFI_TO_USER/KERNEL
powerpc/64s: Simple RFI macro conversions
powerpc/64: Add macros for annotating the destination of rfid/hrfid
powerpc/pseries: Add H_GET_CPU_CHARACTERISTICS flags & wrapper
powerpc/pseries: Make RAS IRQ explicitly dependent on DLPAR WQ

+561 -36
+6
arch/powerpc/include/asm/exception-64e.h
··· 209 209 ori r3,r3,vector_offset@l; \ 210 210 mtspr SPRN_IVOR##vector_number,r3; 211 211 212 + #define RFI_TO_KERNEL \ 213 + rfi 214 + 215 + #define RFI_TO_USER \ 216 + rfi 217 + 212 218 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ 213 219
+55 -2
arch/powerpc/include/asm/exception-64s.h
··· 74 74 */ 75 75 #define EX_R3 EX_DAR 76 76 77 + /* 78 + * Macros for annotating the expected destination of (h)rfid 79 + * 80 + * The nop instructions allow us to insert one or more instructions to flush the 81 + * L1-D cache when returning to userspace or a guest. 82 + */ 83 + #define RFI_FLUSH_SLOT \ 84 + RFI_FLUSH_FIXUP_SECTION; \ 85 + nop; \ 86 + nop; \ 87 + nop 88 + 89 + #define RFI_TO_KERNEL \ 90 + rfid 91 + 92 + #define RFI_TO_USER \ 93 + RFI_FLUSH_SLOT; \ 94 + rfid; \ 95 + b rfi_flush_fallback 96 + 97 + #define RFI_TO_USER_OR_KERNEL \ 98 + RFI_FLUSH_SLOT; \ 99 + rfid; \ 100 + b rfi_flush_fallback 101 + 102 + #define RFI_TO_GUEST \ 103 + RFI_FLUSH_SLOT; \ 104 + rfid; \ 105 + b rfi_flush_fallback 106 + 107 + #define HRFI_TO_KERNEL \ 108 + hrfid 109 + 110 + #define HRFI_TO_USER \ 111 + RFI_FLUSH_SLOT; \ 112 + hrfid; \ 113 + b hrfi_flush_fallback 114 + 115 + #define HRFI_TO_USER_OR_KERNEL \ 116 + RFI_FLUSH_SLOT; \ 117 + hrfid; \ 118 + b hrfi_flush_fallback 119 + 120 + #define HRFI_TO_GUEST \ 121 + RFI_FLUSH_SLOT; \ 122 + hrfid; \ 123 + b hrfi_flush_fallback 124 + 125 + #define HRFI_TO_UNKNOWN \ 126 + RFI_FLUSH_SLOT; \ 127 + hrfid; \ 128 + b hrfi_flush_fallback 129 + 77 130 #ifdef CONFIG_RELOCATABLE 78 131 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 79 132 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ ··· 271 218 mtspr SPRN_##h##SRR0,r12; \ 272 219 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 273 220 mtspr SPRN_##h##SRR1,r10; \ 274 - h##rfid; \ 221 + h##RFI_TO_KERNEL; \ 275 222 b . /* prevent speculative execution */ 276 223 #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 277 224 __EXCEPTION_PROLOG_PSERIES_1(label, h) ··· 285 232 mtspr SPRN_##h##SRR0,r12; \ 286 233 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 287 234 mtspr SPRN_##h##SRR1,r10; \ 288 - h##rfid; \ 235 + h##RFI_TO_KERNEL; \ 289 236 b . /* prevent speculative execution */ 290 237 291 238 #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+13
arch/powerpc/include/asm/feature-fixups.h
··· 187 187 FTR_ENTRY_OFFSET label##1b-label##3b; \ 188 188 .popsection; 189 189 190 + #define RFI_FLUSH_FIXUP_SECTION \ 191 + 951: \ 192 + .pushsection __rfi_flush_fixup,"a"; \ 193 + .align 2; \ 194 + 952: \ 195 + FTR_ENTRY_OFFSET 951b-952b; \ 196 + .popsection; 197 + 198 + 190 199 #ifndef __ASSEMBLY__ 200 + #include <linux/types.h> 201 + 202 + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; 203 + 191 204 void apply_feature_fixups(void); 192 205 void setup_feature_keys(void); 193 206 #endif
+17
arch/powerpc/include/asm/hvcall.h
··· 241 241 #define H_GET_HCA_INFO 0x1B8 242 242 #define H_GET_PERF_COUNT 0x1BC 243 243 #define H_MANAGE_TRACE 0x1C0 244 + #define H_GET_CPU_CHARACTERISTICS 0x1C8 244 245 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 245 246 #define H_QUERY_INT_STATE 0x1E4 246 247 #define H_POLL_PENDING 0x1D8 ··· 330 329 #define H_SIGNAL_SYS_RESET_ALL -1 331 330 #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 332 331 /* >= 0 values are CPU number */ 332 + 333 + /* H_GET_CPU_CHARACTERISTICS return values */ 334 + #define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0 335 + #define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1 336 + #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 337 + #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 338 + #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 339 + 340 + #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 341 + #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 342 + #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 333 343 334 344 /* Flag values used in H_REGISTER_PROC_TBL hcall */ 335 345 #define PROC_TABLE_OP_MASK 0x18 ··· 447 435 return 1; 448 436 } 449 437 } 438 + 439 + struct h_cpu_char_result { 440 + u64 character; 441 + u64 behaviour; 442 + }; 450 443 451 444 #endif /* __ASSEMBLY__ */ 452 445 #endif /* __KERNEL__ */
+10
arch/powerpc/include/asm/paca.h
··· 232 232 struct sibling_subcore_state *sibling_subcore_state; 233 233 #endif 234 234 #endif 235 + #ifdef CONFIG_PPC_BOOK3S_64 236 + /* 237 + * rfi fallback flush must be in its own cacheline to prevent 238 + * other paca data leaking into the L1d 239 + */ 240 + u64 exrfi[EX_SIZE] __aligned(0x80); 241 + void *rfi_flush_fallback_area; 242 + u64 l1d_flush_congruence; 243 + u64 l1d_flush_sets; 244 + #endif 235 245 }; 236 246 237 247 extern void copy_mm_to_paca(struct mm_struct *mm);
+14
arch/powerpc/include/asm/plpar_wrappers.h
··· 326 326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); 327 327 } 328 328 329 + static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) 330 + { 331 + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 332 + long rc; 333 + 334 + rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf); 335 + if (rc == H_SUCCESS) { 336 + p->character = retbuf[0]; 337 + p->behaviour = retbuf[1]; 338 + } 339 + 340 + return rc; 341 + } 342 + 329 343 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
+13
arch/powerpc/include/asm/setup.h
··· 39 39 static inline void pseries_little_endian_exceptions(void) {} 40 40 #endif /* CONFIG_PPC_PSERIES */ 41 41 42 + void rfi_flush_enable(bool enable); 43 + 44 + /* These are bit flags */ 45 + enum l1d_flush_type { 46 + L1D_FLUSH_NONE = 0x1, 47 + L1D_FLUSH_FALLBACK = 0x2, 48 + L1D_FLUSH_ORI = 0x4, 49 + L1D_FLUSH_MTTRIG = 0x8, 50 + }; 51 + 52 + void __init setup_rfi_flush(enum l1d_flush_type, bool enable); 53 + void do_rfi_flush_fixups(enum l1d_flush_type types); 54 + 42 55 #endif /* !__ASSEMBLY__ */ 43 56 44 57 #endif /* _ASM_POWERPC_SETUP_H */
+5
arch/powerpc/kernel/asm-offsets.c
··· 237 237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); 238 238 OFFSET(PACA_IN_MCE, paca_struct, in_mce); 239 239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi); 240 + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); 241 + OFFSET(PACA_EXRFI, paca_struct, exrfi); 242 + OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); 243 + OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); 244 + 240 245 #endif 241 246 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); 242 247 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
+36 -8
arch/powerpc/kernel/entry_64.S
··· 37 37 #include <asm/tm.h> 38 38 #include <asm/ppc-opcode.h> 39 39 #include <asm/export.h> 40 + #ifdef CONFIG_PPC_BOOK3S 41 + #include <asm/exception-64s.h> 42 + #else 43 + #include <asm/exception-64e.h> 44 + #endif 40 45 41 46 /* 42 47 * System calls. ··· 267 262 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 268 263 269 264 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 265 + ld r2,GPR2(r1) 266 + ld r1,GPR1(r1) 267 + mtlr r4 268 + mtcr r5 269 + mtspr SPRN_SRR0,r7 270 + mtspr SPRN_SRR1,r8 271 + RFI_TO_USER 272 + b . /* prevent speculative execution */ 273 + 274 + /* exit to kernel */ 270 275 1: ld r2,GPR2(r1) 271 276 ld r1,GPR1(r1) 272 277 mtlr r4 273 278 mtcr r5 274 279 mtspr SPRN_SRR0,r7 275 280 mtspr SPRN_SRR1,r8 276 - RFI 281 + RFI_TO_KERNEL 277 282 b . /* prevent speculative execution */ 278 283 279 284 .Lsyscall_error: ··· 412 397 mtmsrd r10, 1 413 398 mtspr SPRN_SRR0, r11 414 399 mtspr SPRN_SRR1, r12 415 - 416 - rfid 400 + RFI_TO_USER 417 401 b . /* prevent speculative execution */ 418 402 #endif 419 403 _ASM_NOKPROBE_SYMBOL(system_call_common); ··· 892 878 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 893 879 ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 894 880 REST_GPR(13, r1) 895 - 1: 881 + 896 882 mtspr SPRN_SRR1,r3 897 883 898 884 ld r2,_CCR(r1) ··· 905 891 ld r3,GPR3(r1) 906 892 ld r4,GPR4(r1) 907 893 ld r1,GPR1(r1) 894 + RFI_TO_USER 895 + b . /* prevent speculative execution */ 908 896 909 - rfid 897 + 1: mtspr SPRN_SRR1,r3 898 + 899 + ld r2,_CCR(r1) 900 + mtcrf 0xFF,r2 901 + ld r2,_NIP(r1) 902 + mtspr SPRN_SRR0,r2 903 + 904 + ld r0,GPR0(r1) 905 + ld r2,GPR2(r1) 906 + ld r3,GPR3(r1) 907 + ld r4,GPR4(r1) 908 + ld r1,GPR1(r1) 909 + RFI_TO_KERNEL 910 910 b . /* prevent speculative execution */ 911 911 912 912 #endif /* CONFIG_PPC_BOOK3E */ ··· 1101 1073 1102 1074 mtspr SPRN_SRR0,r5 1103 1075 mtspr SPRN_SRR1,r6 1104 - rfid 1076 + RFI_TO_KERNEL 1105 1077 b . /* prevent speculative execution */ 1106 1078 1107 1079 rtas_return_loc: ··· 1126 1098 1127 1099 mtspr SPRN_SRR0,r3 1128 1100 mtspr SPRN_SRR1,r4 1129 - rfid 1101 + RFI_TO_KERNEL 1130 1102 b . /* prevent speculative execution */ 1131 1103 _ASM_NOKPROBE_SYMBOL(__enter_rtas) 1132 1104 _ASM_NOKPROBE_SYMBOL(rtas_return_loc) ··· 1199 1171 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) 1200 1172 andc r11,r11,r12 1201 1173 mtsrr1 r11 1202 - rfid 1174 + RFI_TO_KERNEL 1203 1175 #endif /* CONFIG_PPC_BOOK3E */ 1204 1176 1205 1177 1: /* Return from OF */
+124 -13
arch/powerpc/kernel/exceptions-64s.S
··· 256 256 LOAD_HANDLER(r12, machine_check_handle_early) 257 257 1: mtspr SPRN_SRR0,r12 258 258 mtspr SPRN_SRR1,r11 259 - rfid 259 + RFI_TO_KERNEL 260 260 b . /* prevent speculative execution */ 261 261 2: 262 262 /* Stack overflow. Stay on emergency stack and panic. ··· 445 445 li r3,MSR_ME 446 446 andc r10,r10,r3 /* Turn off MSR_ME */ 447 447 mtspr SPRN_SRR1,r10 448 - rfid 448 + RFI_TO_KERNEL 449 449 b . 450 450 2: 451 451 /* ··· 463 463 */ 464 464 bl machine_check_queue_event 465 465 MACHINE_CHECK_HANDLER_WINDUP 466 - rfid 466 + RFI_TO_USER_OR_KERNEL 467 467 9: 468 468 /* Deliver the machine check to host kernel in V mode. */ 469 469 MACHINE_CHECK_HANDLER_WINDUP ··· 598 598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 599 599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 600 600 601 + andi. r9,r11,MSR_PR // Check for exception from userspace 602 + cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later 603 + 601 604 /* 602 605 * Test MSR_RI before calling slb_allocate_realmode, because the 603 606 * MSR in r11 gets clobbered. However we still want to allocate ··· 627 624 628 625 /* All done -- return from exception. */ 629 626 627 + bne cr4,1f /* returning to kernel */ 628 + 630 629 .machine push 631 630 .machine "power4" 632 631 mtcrf 0x80,r9 632 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ 633 633 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 634 634 mtcrf 0x02,r9 /* I/D indication is in cr6 */ 635 635 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ ··· 646 640 ld r11,PACA_EXSLB+EX_R11(r13) 647 641 ld r12,PACA_EXSLB+EX_R12(r13) 648 642 ld r13,PACA_EXSLB+EX_R13(r13) 649 - rfid 643 + RFI_TO_USER 650 644 b . /* prevent speculative execution */ 645 + 1: 646 + .machine push 647 + .machine "power4" 648 + mtcrf 0x80,r9 649 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ 650 + mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 651 + mtcrf 0x02,r9 /* I/D indication is in cr6 */ 652 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 653 + .machine pop 654 + 655 + RESTORE_CTR(r9, PACA_EXSLB) 656 + RESTORE_PPR_PACA(PACA_EXSLB, r9) 657 + mr r3,r12 658 + ld r9,PACA_EXSLB+EX_R9(r13) 659 + ld r10,PACA_EXSLB+EX_R10(r13) 660 + ld r11,PACA_EXSLB+EX_R11(r13) 661 + ld r12,PACA_EXSLB+EX_R12(r13) 662 + ld r13,PACA_EXSLB+EX_R13(r13) 663 + RFI_TO_KERNEL 664 + b . /* prevent speculative execution */ 665 + 651 666 652 667 2: std r3,PACA_EXSLB+EX_DAR(r13) 653 668 mr r3,r12 ··· 678 651 mtspr SPRN_SRR0,r10 679 652 ld r10,PACAKMSR(r13) 680 653 mtspr SPRN_SRR1,r10 681 - rfid 654 + RFI_TO_KERNEL 682 655 b . 683 656 684 657 8: std r3,PACA_EXSLB+EX_DAR(r13) ··· 689 662 mtspr SPRN_SRR0,r10 690 663 ld r10,PACAKMSR(r13) 691 664 mtspr SPRN_SRR1,r10 692 - rfid 665 + RFI_TO_KERNEL 693 666 b . 694 667 695 668 EXC_COMMON_BEGIN(unrecov_slb) ··· 928 901 mtspr SPRN_SRR0,r10 ; \ 929 902 ld r10,PACAKMSR(r13) ; \ 930 903 mtspr SPRN_SRR1,r10 ; \ 931 - rfid ; \ 904 + RFI_TO_KERNEL ; \ 932 905 b . ; /* prevent speculative execution */ 933 906 934 907 #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH ··· 944 917 xori r12,r12,MSR_LE ; \ 945 918 mtspr SPRN_SRR1,r12 ; \ 946 919 mr r13,r9 ; \ 947 - rfid ; /* return to userspace */ \ 920 + RFI_TO_USER ; /* return to userspace */ \ 948 921 b . ; /* prevent speculative execution */ 949 922 #else 950 923 #define SYSCALL_FASTENDIAN_TEST ··· 1090 1063 mtcr r11 1091 1064 REST_GPR(11, r1) 1092 1065 ld r1,GPR1(r1) 1093 - hrfid 1066 + HRFI_TO_USER_OR_KERNEL 1094 1067 1095 1068 1: mtcr r11 1096 1069 REST_GPR(11, r1) ··· 1341 1314 ld r11,PACA_EXGEN+EX_R11(r13) 1342 1315 ld r12,PACA_EXGEN+EX_R12(r13) 1343 1316 ld r13,PACA_EXGEN+EX_R13(r13) 1344 - HRFID 1317 + HRFI_TO_UNKNOWN 1345 1318 b . 1346 1319 #endif 1347 1320 ··· 1445 1418 ld r10,PACA_EXGEN+EX_R10(r13); \ 1446 1419 ld r11,PACA_EXGEN+EX_R11(r13); \ 1447 1420 /* returns to kernel where r13 must be set up, so don't restore it */ \ 1448 - ##_H##rfid; \ 1421 + ##_H##RFI_TO_KERNEL; \ 1449 1422 b .; \ 1450 1423 MASKED_DEC_HANDLER(_H) 1424 + 1425 + TRAMP_REAL_BEGIN(rfi_flush_fallback) 1426 + SET_SCRATCH0(r13); 1427 + GET_PACA(r13); 1428 + std r9,PACA_EXRFI+EX_R9(r13) 1429 + std r10,PACA_EXRFI+EX_R10(r13) 1430 + std r11,PACA_EXRFI+EX_R11(r13) 1431 + std r12,PACA_EXRFI+EX_R12(r13) 1432 + std r8,PACA_EXRFI+EX_R13(r13) 1433 + mfctr r9 1434 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 1435 + ld r11,PACA_L1D_FLUSH_SETS(r13) 1436 + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) 1437 + /* 1438 + * The load adresses are at staggered offsets within cachelines, 1439 + * which suits some pipelines better (on others it should not 1440 + * hurt). 1441 + */ 1442 + addi r12,r12,8 1443 + mtctr r11 1444 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 1445 + 1446 + /* order ld/st prior to dcbt stop all streams with flushing */ 1447 + sync 1448 + 1: li r8,0 1449 + .rept 8 /* 8-way set associative */ 1450 + ldx r11,r10,r8 1451 + add r8,r8,r12 1452 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not 1453 + add r8,r8,r11 // Add 0, this creates a dependency on the ldx 1454 + .endr 1455 + addi r10,r10,128 /* 128 byte cache line */ 1456 + bdnz 1b 1457 + 1458 + mtctr r9 1459 + ld r9,PACA_EXRFI+EX_R9(r13) 1460 + ld r10,PACA_EXRFI+EX_R10(r13) 1461 + ld r11,PACA_EXRFI+EX_R11(r13) 1462 + ld r12,PACA_EXRFI+EX_R12(r13) 1463 + ld r8,PACA_EXRFI+EX_R13(r13) 1464 + GET_SCRATCH0(r13); 1465 + rfid 1466 + 1467 + TRAMP_REAL_BEGIN(hrfi_flush_fallback) 1468 + SET_SCRATCH0(r13); 1469 + GET_PACA(r13); 1470 + std r9,PACA_EXRFI+EX_R9(r13) 1471 + std r10,PACA_EXRFI+EX_R10(r13) 1472 + std r11,PACA_EXRFI+EX_R11(r13) 1473 + std r12,PACA_EXRFI+EX_R12(r13) 1474 + std r8,PACA_EXRFI+EX_R13(r13) 1475 + mfctr r9 1476 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 1477 + ld r11,PACA_L1D_FLUSH_SETS(r13) 1478 + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) 1479 + /* 1480 + * The load adresses are at staggered offsets within cachelines, 1481 + * which suits some pipelines better (on others it should not 1482 + * hurt). 1483 + */ 1484 + addi r12,r12,8 1485 + mtctr r11 1486 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 1487 + 1488 + /* order ld/st prior to dcbt stop all streams with flushing */ 1489 + sync 1490 + 1: li r8,0 1491 + .rept 8 /* 8-way set associative */ 1492 + ldx r11,r10,r8 1493 + add r8,r8,r12 1494 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not 1495 + add r8,r8,r11 // Add 0, this creates a dependency on the ldx 1496 + .endr 1497 + addi r10,r10,128 /* 128 byte cache line */ 1498 + bdnz 1b 1499 + 1500 + mtctr r9 1501 + ld r9,PACA_EXRFI+EX_R9(r13) 1502 + ld r10,PACA_EXRFI+EX_R10(r13) 1503 + ld r11,PACA_EXRFI+EX_R11(r13) 1504 + ld r12,PACA_EXRFI+EX_R12(r13) 1505 + ld r8,PACA_EXRFI+EX_R13(r13) 1506 + GET_SCRATCH0(r13); 1507 + hrfid 1451 1508 1452 1509 /* 1453 1510 * Real mode exceptions actually use this too, but alternate ··· 1552 1441 addi r13, r13, 4 1553 1442 mtspr SPRN_SRR0, r13 1554 1443 GET_SCRATCH0(r13) 1555 - rfid 1444 + RFI_TO_KERNEL 1556 1445 b . 1557 1446 1558 1447 TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) ··· 1564 1453 addi r13, r13, 4 1565 1454 mtspr SPRN_HSRR0, r13 1566 1455 GET_SCRATCH0(r13) 1567 - hrfid 1456 + HRFI_TO_KERNEL 1568 1457 b . 1569 1458 #endif 1570 1459
+101
arch/powerpc/kernel/setup_64.c
··· 801 801 return 0; 802 802 } 803 803 early_initcall(disable_hardlockup_detector); 804 + 805 + #ifdef CONFIG_PPC_BOOK3S_64 806 + static enum l1d_flush_type enabled_flush_types; 807 + static void *l1d_flush_fallback_area; 808 + static bool no_rfi_flush; 809 + bool rfi_flush; 810 + 811 + static int __init handle_no_rfi_flush(char *p) 812 + { 813 + pr_info("rfi-flush: disabled on command line."); 814 + no_rfi_flush = true; 815 + return 0; 816 + } 817 + early_param("no_rfi_flush", handle_no_rfi_flush); 818 + 819 + /* 820 + * The RFI flush is not KPTI, but because users will see doco that says to use 821 + * nopti we hijack that option here to also disable the RFI flush. 822 + */ 823 + static int __init handle_no_pti(char *p) 824 + { 825 + pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 826 + handle_no_rfi_flush(NULL); 827 + return 0; 828 + } 829 + early_param("nopti", handle_no_pti); 830 + 831 + static void do_nothing(void *unused) 832 + { 833 + /* 834 + * We don't need to do the flush explicitly, just enter+exit kernel is 835 + * sufficient, the RFI exit handlers will do the right thing. 836 + */ 837 + } 838 + 839 + void rfi_flush_enable(bool enable) 840 + { 841 + if (rfi_flush == enable) 842 + return; 843 + 844 + if (enable) { 845 + do_rfi_flush_fixups(enabled_flush_types); 846 + on_each_cpu(do_nothing, NULL, 1); 847 + } else 848 + do_rfi_flush_fixups(L1D_FLUSH_NONE); 849 + 850 + rfi_flush = enable; 851 + } 852 + 853 + static void init_fallback_flush(void) 854 + { 855 + u64 l1d_size, limit; 856 + int cpu; 857 + 858 + l1d_size = ppc64_caches.l1d.size; 859 + limit = min(safe_stack_limit(), ppc64_rma_size); 860 + 861 + /* 862 + * Align to L1d size, and size it at 2x L1d size, to catch possible 863 + * hardware prefetch runoff. We don't have a recipe for load patterns to 864 + * reliably avoid the prefetcher. 865 + */ 866 + l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); 867 + memset(l1d_flush_fallback_area, 0, l1d_size * 2); 868 + 869 + for_each_possible_cpu(cpu) { 870 + /* 871 + * The fallback flush is currently coded for 8-way 872 + * associativity. Different associativity is possible, but it 873 + * will be treated as 8-way and may not evict the lines as 874 + * effectively. 875 + * 876 + * 128 byte lines are mandatory. 877 + */ 878 + u64 c = l1d_size / 8; 879 + 880 + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; 881 + paca[cpu].l1d_flush_congruence = c; 882 + paca[cpu].l1d_flush_sets = c / 128; 883 + } 884 + } 885 + 886 + void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) 887 + { 888 + if (types & L1D_FLUSH_FALLBACK) { 889 + pr_info("rfi-flush: Using fallback displacement flush\n"); 890 + init_fallback_flush(); 891 + } 892 + 893 + if (types & L1D_FLUSH_ORI) 894 + pr_info("rfi-flush: Using ori type flush\n"); 895 + 896 + if (types & L1D_FLUSH_MTTRIG) 897 + pr_info("rfi-flush: Using mttrig type flush\n"); 898 + 899 + enabled_flush_types = types; 900 + 901 + if (!no_rfi_flush) 902 + rfi_flush_enable(enable); 903 + } 904 + #endif /* CONFIG_PPC_BOOK3S_64 */
+9
arch/powerpc/kernel/vmlinux.lds.S
··· 132 132 /* Read-only data */ 133 133 RO_DATA(PAGE_SIZE) 134 134 135 + #ifdef CONFIG_PPC64 136 + . = ALIGN(8); 137 + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { 138 + __start___rfi_flush_fixup = .; 139 + *(__rfi_flush_fixup) 140 + __stop___rfi_flush_fixup = .; 141 + } 142 + #endif 143 + 135 144 EXCEPTION_TABLE(0) 136 145 137 146 NOTES :kernel :notes
+4 -5
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 79 79 mtmsrd r0,1 /* clear RI in MSR */ 80 80 mtsrr0 r5 81 81 mtsrr1 r6 82 - RFI 82 + RFI_TO_KERNEL 83 83 84 84 kvmppc_call_hv_entry: 85 85 BEGIN_FTR_SECTION ··· 199 199 mtmsrd r6, 1 /* Clear RI in MSR */ 200 200 mtsrr0 r8 201 201 mtsrr1 r7 202 - RFI 202 + RFI_TO_KERNEL 203 203 204 204 /* Virtual-mode return */ 205 205 .Lvirt_return: ··· 1167 1167 1168 1168 ld r0, VCPU_GPR(R0)(r4) 1169 1169 ld r4, VCPU_GPR(R4)(r4) 1170 - 1171 - hrfid 1170 + HRFI_TO_GUEST 1172 1171 b . 1173 1172 1174 1173 secondary_too_late: ··· 3319 3320 ld r4, PACAKMSR(r13) 3320 3321 mtspr SPRN_SRR0, r3 3321 3322 mtspr SPRN_SRR1, r4 3322 - rfid 3323 + RFI_TO_KERNEL 3323 3324 9: addi r3, r1, STACK_FRAME_OVERHEAD 3324 3325 bl kvmppc_bad_interrupt 3325 3326 b 9b
+5 -2
arch/powerpc/kvm/book3s_rmhandlers.S
··· 46 46 47 47 #define FUNC(name) name 48 48 49 + #define RFI_TO_KERNEL RFI 50 + #define RFI_TO_GUEST RFI 51 + 49 52 .macro INTERRUPT_TRAMPOLINE intno 50 53 51 54 .global kvmppc_trampoline_\intno ··· 144 141 GET_SCRATCH0(r13) 145 142 146 143 /* And get back into the code */ 147 - RFI 144 + RFI_TO_KERNEL 148 145 #endif 149 146 150 147 /* ··· 167 164 ori r5, r5, MSR_EE 168 165 mtsrr0 r7 169 166 mtsrr1 r6 170 - RFI 167 + RFI_TO_KERNEL 171 168 172 169 #include "book3s_segment.S"
+2 -2
arch/powerpc/kvm/book3s_segment.S
··· 156 156 PPC_LL r9, SVCPU_R9(r3) 157 157 PPC_LL r3, (SVCPU_R3)(r3) 158 158 159 - RFI 159 + RFI_TO_GUEST 160 160 kvmppc_handler_trampoline_enter_end: 161 161 162 162 ··· 407 407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 408 408 beqa BOOK3S_INTERRUPT_DOORBELL 409 409 410 - RFI 410 + RFI_TO_KERNEL 411 411 kvmppc_handler_trampoline_exit_end:
+41
arch/powerpc/lib/feature-fixups.c
··· 116 116 } 117 117 } 118 118 119 + #ifdef CONFIG_PPC_BOOK3S_64 120 + void do_rfi_flush_fixups(enum l1d_flush_type types) 121 + { 122 + unsigned int instrs[3], *dest; 123 + long *start, *end; 124 + int i; 125 + 126 + start = PTRRELOC(&__start___rfi_flush_fixup), 127 + end = PTRRELOC(&__stop___rfi_flush_fixup); 128 + 129 + instrs[0] = 0x60000000; /* nop */ 130 + instrs[1] = 0x60000000; /* nop */ 131 + instrs[2] = 0x60000000; /* nop */ 132 + 133 + if (types & L1D_FLUSH_FALLBACK) 134 + /* b .+16 to fallback flush */ 135 + instrs[0] = 0x48000010; 136 + 137 + i = 0; 138 + if (types & L1D_FLUSH_ORI) { 139 + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ 140 + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ 141 + } 142 + 143 + if (types & L1D_FLUSH_MTTRIG) 144 + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ 145 + 146 + for (i = 0; start < end; start++, i++) { 147 + dest = (void *)start + *start; 148 + 149 + pr_devel("patching dest %lx\n", (unsigned long)dest); 150 + 151 + patch_instruction(dest, instrs[0]); 152 + patch_instruction(dest + 1, instrs[1]); 153 + patch_instruction(dest + 2, instrs[2]); 154 + } 155 + 156 + printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); 157 + } 158 + #endif /* CONFIG_PPC_BOOK3S_64 */ 159 + 119 160 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 120 161 { 121 162 long *start, *end;
+49
arch/powerpc/platforms/powernv/setup.c
··· 37 37 #include <asm/kexec.h> 38 38 #include <asm/smp.h> 39 39 #include <asm/tm.h> 40 + #include <asm/setup.h> 40 41 41 42 #include "powernv.h" 43 + 44 + static void pnv_setup_rfi_flush(void) 45 + { 46 + struct device_node *np, *fw_features; 47 + enum l1d_flush_type type; 48 + int enable; 49 + 50 + /* Default to fallback in case fw-features are not available */ 51 + type = L1D_FLUSH_FALLBACK; 52 + enable = 1; 53 + 54 + np = of_find_node_by_name(NULL, "ibm,opal"); 55 + fw_features = of_get_child_by_name(np, "fw-features"); 56 + of_node_put(np); 57 + 58 + if (fw_features) { 59 + np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); 60 + if (np && of_property_read_bool(np, "enabled")) 61 + type = L1D_FLUSH_MTTRIG; 62 + 63 + of_node_put(np); 64 + 65 + np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); 66 + if (np && of_property_read_bool(np, "enabled")) 67 + type = L1D_FLUSH_ORI; 68 + 69 + of_node_put(np); 70 + 71 + /* Enable unless firmware says NOT to */ 72 + enable = 2; 73 + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); 74 + if (np && of_property_read_bool(np, "disabled")) 75 + enable--; 76 + 77 + of_node_put(np); 78 + 79 + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); 80 + if (np && of_property_read_bool(np, "disabled")) 81 + enable--; 82 + 83 + of_node_put(np); 84 + of_node_put(fw_features); 85 + } 86 + 87 + setup_rfi_flush(type, enable > 0); 88 + } 42 89 43 90 static void __init pnv_setup_arch(void) 44 91 { 45 92 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 93 + 94 + pnv_setup_rfi_flush(); 46 95 47 96 /* Initialize SMP */ 48 97 pnv_smp_init();
+18 -3
arch/powerpc/platforms/pseries/dlpar.c
··· 574 574 575 575 static CLASS_ATTR_RW(dlpar); 576 576 577 - static int __init pseries_dlpar_init(void) 577 + int __init dlpar_workqueue_init(void) 578 578 { 579 + if (pseries_hp_wq) 580 + return 0; 581 + 579 582 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 580 - WQ_UNBOUND, 1); 583 + WQ_UNBOUND, 1); 584 + 585 + return pseries_hp_wq ? 0 : -ENOMEM; 586 + } 587 + 588 + static int __init dlpar_sysfs_init(void) 589 + { 590 + int rc; 591 + 592 + rc = dlpar_workqueue_init(); 593 + if (rc) 594 + return rc; 595 + 581 596 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 582 597 } 583 - machine_device_initcall(pseries, pseries_dlpar_init); 598 + machine_device_initcall(pseries, dlpar_sysfs_init); 584 599
+2
arch/powerpc/platforms/pseries/pseries.h
··· 98 98 return CMO_PageSize; 99 99 } 100 100 101 + int dlpar_workqueue_init(void); 102 + 101 103 #endif /* _PSERIES_PSERIES_H */
+2 -1
arch/powerpc/platforms/pseries/ras.c
··· 69 69 /* Hotplug Events */ 70 70 np = of_find_node_by_path("/event-sources/hot-plug-events"); 71 71 if (np != NULL) { 72 - request_event_sources_irqs(np, ras_hotplug_interrupt, 72 + if (dlpar_workqueue_init() == 0) 73 + request_event_sources_irqs(np, ras_hotplug_interrupt, 73 74 "RAS_HOTPLUG"); 74 75 of_node_put(np); 75 76 }
+35
arch/powerpc/platforms/pseries/setup.c
··· 459 459 of_pci_check_probe_only(); 460 460 } 461 461 462 + static void pseries_setup_rfi_flush(void) 463 + { 464 + struct h_cpu_char_result result; 465 + enum l1d_flush_type types; 466 + bool enable; 467 + long rc; 468 + 469 + /* Enable by default */ 470 + enable = true; 471 + 472 + rc = plpar_get_cpu_characteristics(&result); 473 + if (rc == H_SUCCESS) { 474 + types = L1D_FLUSH_NONE; 475 + 476 + if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) 477 + types |= L1D_FLUSH_MTTRIG; 478 + if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) 479 + types |= L1D_FLUSH_ORI; 480 + 481 + /* Use fallback if nothing set in hcall */ 482 + if (types == L1D_FLUSH_NONE) 483 + types = L1D_FLUSH_FALLBACK; 484 + 485 + if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) 486 + enable = false; 487 + } else { 488 + /* Default to fallback if case hcall is not available */ 489 + types = L1D_FLUSH_FALLBACK; 490 + } 491 + 492 + setup_rfi_flush(types, enable); 493 + } 494 + 462 495 static void __init pSeries_setup_arch(void) 463 496 { 464 497 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); ··· 508 475 loops_per_jiffy = 50000000; 509 476 510 477 fwnmi_init(); 478 + 479 + pseries_setup_rfi_flush(); 511 480 512 481 /* By default, only probe PCI (can be overridden by rtas_pci) */ 513 482 pci_add_flags(PCI_PROBE_ONLY);