Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 pti updates from Thomas Gleixner:
"Three small commits updating the SSB mitigation to take the updated
AMD mitigation variants into account"

* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/bugs: Switch the selection of mitigation from CPU vendor to CPU features
x86/bugs: Add AMD's SPEC_CTRL MSR usage
x86/bugs: Add AMD's variant of SSB_NO

+28 -14
+2
arch/x86/include/asm/cpufeatures.h
··· 282 282 #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ 283 283 #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ 284 284 #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ 285 + #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ 285 286 #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 287 + #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ 286 288 287 289 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ 288 290 #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+5 -8
arch/x86/kernel/cpu/bugs.c
··· 529 529 if (mode == SPEC_STORE_BYPASS_DISABLE) { 530 530 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 531 531 /* 532 - * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses 533 - * a completely different MSR and bit dependent on family. 532 + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 533 + * use a completely different MSR and bit dependent on family. 534 534 */ 535 - switch (boot_cpu_data.x86_vendor) { 536 - case X86_VENDOR_INTEL: 535 + if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 536 + x86_amd_ssb_disable(); 537 + else { 537 538 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 538 539 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; 539 540 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 540 - break; 541 - case X86_VENDOR_AMD: 542 - x86_amd_ssb_disable(); 543 - break; 544 541 } 545 542 } 546 543
+8 -1
arch/x86/kernel/cpu/common.c
··· 803 803 set_cpu_cap(c, X86_FEATURE_STIBP); 804 804 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 805 805 } 806 + 807 + if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 808 + set_cpu_cap(c, X86_FEATURE_SSBD); 809 + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 810 + clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 811 + } 806 812 } 807 813 808 814 void get_cpu_cap(struct cpuinfo_x86 *c) ··· 998 992 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 999 993 1000 994 if (!x86_match_cpu(cpu_no_spec_store_bypass) && 1001 - !(ia32_cap & ARCH_CAP_SSB_NO)) 995 + !(ia32_cap & ARCH_CAP_SSB_NO) && 996 + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1002 997 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1003 998 1004 999 if (x86_match_cpu(cpu_no_meltdown))
+8 -2
arch/x86/kvm/cpuid.c
··· 379 379 380 380 /* cpuid 0x80000008.ebx */ 381 381 const u32 kvm_cpuid_8000_0008_ebx_x86_features = 382 - F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD); 382 + F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | 383 + F(AMD_SSB_NO); 383 384 384 385 /* cpuid 0xC0000001.edx */ 385 386 const u32 kvm_cpuid_C000_0001_edx_x86_features = ··· 665 664 entry->ebx |= F(VIRT_SSBD); 666 665 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; 667 666 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); 668 - if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 667 + /* 668 + * The preference is to use SPEC CTRL MSR instead of the 669 + * VIRT_SPEC MSR. 670 + */ 671 + if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 672 + !boot_cpu_has(X86_FEATURE_AMD_SSBD)) 669 673 entry->ebx |= F(VIRT_SSBD); 670 674 break; 671 675 }
+5 -3
arch/x86/kvm/svm.c
··· 4115 4115 break; 4116 4116 case MSR_IA32_SPEC_CTRL: 4117 4117 if (!msr_info->host_initiated && 4118 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) 4118 + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && 4119 + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) 4119 4120 return 1; 4120 4121 4121 4122 msr_info->data = svm->spec_ctrl; ··· 4218 4217 break; 4219 4218 case MSR_IA32_SPEC_CTRL: 4220 4219 if (!msr->host_initiated && 4221 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) 4220 + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && 4221 + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) 4222 4222 return 1; 4223 4223 4224 4224 /* The STIBP bit doesn't fault even if it's not advertised */ 4225 - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) 4225 + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) 4226 4226 return 1; 4227 4227 4228 4228 svm->spec_ctrl = data;