Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_bugs_post_ibpb' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 IBPB fixes from Borislav Petkov:
"This fixes the IBPB implementation of older AMDs (< gen4) that do not
flush the RSB (Return Address Stack) so you can still do some leaking
when using a "=ibpb" mitigation for Retbleed or SRSO. Fix it by doing
the flushing in software on those generations.

IBPB is not the default setting so this is not likely to affect
anybody in practice"

* tag 'x86_bugs_post_ibpb' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/bugs: Do not use UNTRAIN_RET with IBPB on entry
x86/bugs: Skip RSB fill at VMEXIT
x86/entry: Have entry_ibpb() invalidate return predictions
x86/cpufeatures: Add a IBPB_NO_RET BUG flag
x86/cpufeatures: Define X86_FEATURE_AMD_IBPB_RET

+43 -1
+5
arch/x86/entry/entry.S
··· 9 9 #include <asm/unwind_hints.h> 10 10 #include <asm/segment.h> 11 11 #include <asm/cache.h> 12 + #include <asm/cpufeatures.h> 13 + #include <asm/nospec-branch.h> 12 14 13 15 #include "calling.h" 14 16 ··· 21 19 movl $PRED_CMD_IBPB, %eax 22 20 xorl %edx, %edx 23 21 wrmsr 22 + 23 + /* Make sure IBPB clears return stack preductions too. */ 24 + FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET 24 25 RET 25 26 SYM_FUNC_END(entry_ibpb) 26 27 /* For KVM */
+3 -1
arch/x86/include/asm/cpufeatures.h
··· 215 215 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ 216 216 #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */ 217 217 #define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */ 218 - #define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */ 218 + #define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */ 219 219 #define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */ 220 220 #define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */ 221 221 #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */ ··· 348 348 #define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */ 349 349 #define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ 350 350 #define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */ 351 + #define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* IBPB clears return address predictor */ 351 352 #define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */ 352 353 353 354 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ ··· 524 523 #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ 525 524 #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ 526 525 #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ 526 + #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ 527 527 #endif /* _ASM_X86_CPUFEATURES_H */
+32
arch/x86/kernel/cpu/bugs.c
··· 1115 1115 1116 1116 case RETBLEED_MITIGATION_IBPB: 1117 1117 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1118 + 1119 + /* 1120 + * IBPB on entry already obviates the need for 1121 + * software-based untraining so clear those in case some 1122 + * other mitigation like SRSO has selected them. 1123 + */ 1124 + setup_clear_cpu_cap(X86_FEATURE_UNRET); 1125 + setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 1126 + 1118 1127 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1119 1128 mitigate_smt = true; 1129 + 1130 + /* 1131 + * There is no need for RSB filling: entry_ibpb() ensures 1132 + * all predictions, including the RSB, are invalidated, 1133 + * regardless of IBPB implementation. 1134 + */ 1135 + setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1136 + 1120 1137 break; 1121 1138 1122 1139 case RETBLEED_MITIGATION_STUFF: ··· 2644 2627 if (has_microcode) { 2645 2628 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 2646 2629 srso_mitigation = SRSO_MITIGATION_IBPB; 2630 + 2631 + /* 2632 + * IBPB on entry already obviates the need for 2633 + * software-based untraining so clear those in case some 2634 + * other mitigation like Retbleed has selected them. 2635 + */ 2636 + setup_clear_cpu_cap(X86_FEATURE_UNRET); 2637 + setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 2647 2638 } 2648 2639 } else { 2649 2640 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); ··· 2663 2638 if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { 2664 2639 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 2665 2640 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 2641 + 2642 + /* 2643 + * There is no need for RSB filling: entry_ibpb() ensures 2644 + * all predictions, including the RSB, are invalidated, 2645 + * regardless of IBPB implementation. 2646 + */ 2647 + setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 2666 2648 } 2667 2649 } else { 2668 2650 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+3
arch/x86/kernel/cpu/common.c
··· 1443 1443 boot_cpu_has(X86_FEATURE_HYPERVISOR))) 1444 1444 setup_force_cpu_bug(X86_BUG_BHI); 1445 1445 1446 + if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) 1447 + setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); 1448 + 1446 1449 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) 1447 1450 return; 1448 1451