Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/vmscape: Enable the mitigation

Enable the previously added mitigation for VMscape. Add the cmdline
vmscape={off|ibpb|force} and sysfs reporting.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>

authored by

Pawan Gupta and committed by
Dave Hansen
556c1ad6 2f8f1734

+115
+1
Documentation/ABI/testing/sysfs-devices-system-cpu
··· 586 586 /sys/devices/system/cpu/vulnerabilities/srbds 587 587 /sys/devices/system/cpu/vulnerabilities/tsa 588 588 /sys/devices/system/cpu/vulnerabilities/tsx_async_abort 589 + /sys/devices/system/cpu/vulnerabilities/vmscape 589 590 Date: January 2018 590 591 Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 591 592 Description: Information about CPU vulnerabilities
+11
Documentation/admin-guide/kernel-parameters.txt
··· 3829 3829 srbds=off [X86,INTEL] 3830 3830 ssbd=force-off [ARM64] 3831 3831 tsx_async_abort=off [X86] 3832 + vmscape=off [X86] 3832 3833 3833 3834 Exceptions: 3834 3835 This does not have any effect on ··· 8041 8040 8042 8041 vmpoff= [KNL,S390] Perform z/VM CP command after power off. 8043 8042 Format: <command> 8043 + 8044 + vmscape= [X86] Controls mitigation for VMscape attacks. 8045 + VMscape attacks can leak information from a userspace 8046 + hypervisor to a guest via speculative side-channels. 8047 + 8048 + off - disable the mitigation 8049 + ibpb - use Indirect Branch Prediction Barrier 8050 + (IBPB) mitigation (default) 8051 + force - force vulnerability detection even on 8052 + unaffected processors 8044 8053 8045 8054 vsyscall= [X86-64,EARLY] 8046 8055 Controls the behavior of vsyscalls (i.e. calls to
+9
arch/x86/Kconfig
··· 2701 2701 security vulnerability on AMD CPUs which can lead to forwarding of 2702 2702 invalid info to subsequent instructions and thus can affect their 2703 2703 timing and thereby cause a leakage. 2704 + 2705 + config MITIGATION_VMSCAPE 2706 + bool "Mitigate VMSCAPE" 2707 + depends on KVM 2708 + default y 2709 + help 2710 + Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security 2711 + vulnerability on Intel and AMD CPUs that may allow a guest to do 2712 + Spectre v2 style attacks on userspace hypervisor. 2704 2713 endif 2705 2714 2706 2715 config ARCH_HAS_ADD_PAGES
+90
arch/x86/kernel/cpu/bugs.c
··· 96 96 static void __init its_apply_mitigation(void); 97 97 static void __init tsa_select_mitigation(void); 98 98 static void __init tsa_apply_mitigation(void); 99 + static void __init vmscape_select_mitigation(void); 100 + static void __init vmscape_update_mitigation(void); 101 + static void __init vmscape_apply_mitigation(void); 99 102 100 103 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 101 104 u64 x86_spec_ctrl_base; ··· 273 270 its_select_mitigation(); 274 271 bhi_select_mitigation(); 275 272 tsa_select_mitigation(); 273 + vmscape_select_mitigation(); 276 274 277 275 /* 278 276 * After mitigations are selected, some may need to update their ··· 305 301 bhi_update_mitigation(); 306 302 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ 307 303 srso_update_mitigation(); 304 + vmscape_update_mitigation(); 308 305 309 306 spectre_v1_apply_mitigation(); 310 307 spectre_v2_apply_mitigation(); ··· 323 318 its_apply_mitigation(); 324 319 bhi_apply_mitigation(); 325 320 tsa_apply_mitigation(); 321 + vmscape_apply_mitigation(); 326 322 } 327 323 328 324 /* ··· 3329 3323 } 3330 3324 3331 3325 #undef pr_fmt 3326 + #define pr_fmt(fmt) "VMSCAPE: " fmt 3327 + 3328 + enum vmscape_mitigations { 3329 + VMSCAPE_MITIGATION_NONE, 3330 + VMSCAPE_MITIGATION_AUTO, 3331 + VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER, 3332 + VMSCAPE_MITIGATION_IBPB_ON_VMEXIT, 3333 + }; 3334 + 3335 + static const char * const vmscape_strings[] = { 3336 + [VMSCAPE_MITIGATION_NONE] = "Vulnerable", 3337 + /* [VMSCAPE_MITIGATION_AUTO] */ 3338 + [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace", 3339 + [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT", 3340 + }; 3341 + 3342 + static enum vmscape_mitigations vmscape_mitigation __ro_after_init = 3343 + IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE; 3344 + 3345 + static int __init vmscape_parse_cmdline(char *str) 3346 + { 3347 + if (!str) 3348 + return -EINVAL; 3349 + 3350 + if (!strcmp(str, "off")) { 3351 + vmscape_mitigation = VMSCAPE_MITIGATION_NONE; 3352 + } else if (!strcmp(str, "ibpb")) { 3353 + vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; 3354 + } else if (!strcmp(str, "force")) { 3355 + setup_force_cpu_bug(X86_BUG_VMSCAPE); 3356 + vmscape_mitigation = VMSCAPE_MITIGATION_AUTO; 3357 + } else { 3358 + pr_err("Ignoring unknown vmscape=%s option.\n", str); 3359 + } 3360 + 3361 + return 0; 3362 + } 3363 + early_param("vmscape", vmscape_parse_cmdline); 3364 + 3365 + static void __init vmscape_select_mitigation(void) 3366 + { 3367 + if (cpu_mitigations_off() || 3368 + !boot_cpu_has_bug(X86_BUG_VMSCAPE) || 3369 + !boot_cpu_has(X86_FEATURE_IBPB)) { 3370 + vmscape_mitigation = VMSCAPE_MITIGATION_NONE; 3371 + return; 3372 + } 3373 + 3374 + if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) 3375 + vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; 3376 + } 3377 + 3378 + static void __init vmscape_update_mitigation(void) 3379 + { 3380 + if (!boot_cpu_has_bug(X86_BUG_VMSCAPE)) 3381 + return; 3382 + 3383 + if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB || 3384 + srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT) 3385 + vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT; 3386 + 3387 + pr_info("%s\n", vmscape_strings[vmscape_mitigation]); 3388 + } 3389 + 3390 + static void __init vmscape_apply_mitigation(void) 3391 + { 3392 + if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) 3393 + setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER); 3394 + } 3395 + 3396 + #undef pr_fmt 3332 3397 #define pr_fmt(fmt) fmt 3333 3398 3334 3399 #ifdef CONFIG_SYSFS ··· 3647 3570 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); 3648 3571 } 3649 3572 3573 + static ssize_t vmscape_show_state(char *buf) 3574 + { 3575 + return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]); 3576 + } 3577 + 3650 3578 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 3651 3579 char *buf, unsigned int bug) 3652 3580 { ··· 3717 3635 3718 3636 case X86_BUG_TSA: 3719 3637 return tsa_show_state(buf); 3638 + 3639 + case X86_BUG_VMSCAPE: 3640 + return vmscape_show_state(buf); 3720 3641 3721 3642 default: 3722 3643 break; ··· 3811 3726 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) 3812 3727 { 3813 3728 return cpu_show_common(dev, attr, buf, X86_BUG_TSA); 3729 + } 3730 + 3731 + ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf) 3732 + { 3733 + return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE); 3814 3734 } 3815 3735 #endif 3816 3736
+3
drivers/base/cpu.c
··· 603 603 CPU_SHOW_VULN_FALLBACK(old_microcode); 604 604 CPU_SHOW_VULN_FALLBACK(indirect_target_selection); 605 605 CPU_SHOW_VULN_FALLBACK(tsa); 606 + CPU_SHOW_VULN_FALLBACK(vmscape); 606 607 607 608 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); 608 609 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); ··· 623 622 static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); 624 623 static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); 625 624 static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); 625 + static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL); 626 626 627 627 static struct attribute *cpu_root_vulnerabilities_attrs[] = { 628 628 &dev_attr_meltdown.attr, ··· 644 642 &dev_attr_old_microcode.attr, 645 643 &dev_attr_indirect_target_selection.attr, 646 644 &dev_attr_tsa.attr, 645 + &dev_attr_vmscape.attr, 647 646 NULL 648 647 }; 649 648
+1
include/linux/cpu.h
··· 83 83 extern ssize_t cpu_show_indirect_target_selection(struct device *dev, 84 84 struct device_attribute *attr, char *buf); 85 85 extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); 86 + extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf); 86 87 87 88 extern __printf(4, 5) 88 89 struct device *cpu_device_create(struct device *parent, void *drvdata,