Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/bugs: Add a Transient Scheduler Attacks mitigation

Add the required features detection glue to bugs.c et all in order to
support the TSA mitigation.

Co-developed-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>

+232 -7
+1
Documentation/ABI/testing/sysfs-devices-system-cpu
··· 584 584 /sys/devices/system/cpu/vulnerabilities/spectre_v1 585 585 /sys/devices/system/cpu/vulnerabilities/spectre_v2 586 586 /sys/devices/system/cpu/vulnerabilities/srbds 587 + /sys/devices/system/cpu/vulnerabilities/tsa 587 588 /sys/devices/system/cpu/vulnerabilities/tsx_async_abort 588 589 Date: January 2018 589 590 Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+13
Documentation/admin-guide/kernel-parameters.txt
··· 7488 7488 having this key zero'ed is acceptable. E.g. in testing 7489 7489 scenarios. 7490 7490 7491 + tsa= [X86] Control mitigation for Transient Scheduler 7492 + Attacks on AMD CPUs. Search the following in your 7493 + favourite search engine for more details: 7494 + 7495 + "Technical guidance for mitigating transient scheduler 7496 + attacks". 7497 + 7498 + off - disable the mitigation 7499 + on - enable the mitigation (default) 7500 + user - mitigate only user/kernel transitions 7501 + vm - mitigate only guest/host transitions 7502 + 7503 + 7491 7504 tsc= Disable clocksource stability checks for TSC. 7492 7505 Format: <string> 7493 7506 [x86] reliable: mark tsc clocksource as reliable, this
+9
arch/x86/Kconfig
··· 2695 2695 disabled, mitigation cannot be enabled via cmdline. 2696 2696 See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst> 2697 2697 2698 + config MITIGATION_TSA 2699 + bool "Mitigate Transient Scheduler Attacks" 2700 + depends on CPU_SUP_AMD 2701 + default y 2702 + help 2703 + Enable mitigation for Transient Scheduler Attacks. TSA is a hardware 2704 + security vulnerability on AMD CPUs which can lead to forwarding of 2705 + invalid info to subsequent instructions and thus can affect their 2706 + timing and thereby cause a leakage. 2698 2707 endif 2699 2708 2700 2709 config ARCH_HAS_ADD_PAGES
+5 -1
arch/x86/include/asm/cpufeatures.h
··· 456 456 #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ 457 457 #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ 458 458 #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ 459 + #define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */ 459 460 #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ 460 461 #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ 461 462 #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ ··· 488 487 #define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */ 489 488 #define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */ 490 489 #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */ 490 + #define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */ 491 + #define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */ 492 + #define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */ 491 493 492 494 /* 493 495 * BUG word(s) ··· 546 542 #define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */ 547 543 #define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */ 548 544 #define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ 549 - 545 + #define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ 550 546 #endif /* _ASM_X86_CPUFEATURES_H */
+1 -1
arch/x86/include/asm/mwait.h
··· 80 80 */ 81 81 static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) 82 82 { 83 - /* No MDS buffer clear as this is AMD/HYGON only */ 83 + /* No need for TSA buffer clearing on AMD */ 84 84 85 85 /* "mwaitx %eax, %ebx, %ecx" */ 86 86 asm volatile(".byte 0x0f, 0x01, 0xfb"
+10 -4
arch/x86/include/asm/nospec-branch.h
··· 308 308 * CFLAGS.ZF. 309 309 * Note: Only the memory operand variant of VERW clears the CPU buffers. 310 310 */ 311 - .macro CLEAR_CPU_BUFFERS 311 + .macro __CLEAR_CPU_BUFFERS feature 312 312 #ifdef CONFIG_X86_64 313 - ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF 313 + ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature 314 314 #else 315 315 /* 316 316 * In 32bit mode, the memory operand must be a %cs reference. The data 317 317 * segments may not be usable (vm86 mode), and the stack segment may not 318 318 * be flat (ESPFIX32). 319 319 */ 320 - ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF 320 + ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature 321 321 #endif 322 322 .endm 323 + 324 + #define CLEAR_CPU_BUFFERS \ 325 + __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF 326 + 327 + #define VM_CLEAR_CPU_BUFFERS \ 328 + __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM 323 329 324 330 #ifdef CONFIG_X86_64 325 331 .macro CLEAR_BRANCH_HISTORY ··· 608 602 609 603 /** 610 604 * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS 611 - * vulnerability 605 + * and TSA vulnerabilities. 612 606 * 613 607 * Clear CPU buffers if the corresponding static key is enabled 614 608 */
+44
arch/x86/kernel/cpu/amd.c
··· 377 377 #endif 378 378 } 379 379 380 + #define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \ 381 + X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \ 382 + step, step, ucode) 383 + 384 + static const struct x86_cpu_id amd_tsa_microcode[] = { 385 + ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7), 386 + ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b), 387 + ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d), 388 + ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c), 389 + ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c), 390 + ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109), 391 + ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e), 392 + ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211), 393 + ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108), 394 + ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012), 395 + ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a), 396 + ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108), 397 + ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208), 398 + ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008), 399 + ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008), 400 + ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216), 401 + {}, 402 + }; 403 + 404 + static void tsa_init(struct cpuinfo_x86 *c) 405 + { 406 + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 407 + return; 408 + 409 + if (cpu_has(c, X86_FEATURE_ZEN3) || 410 + cpu_has(c, X86_FEATURE_ZEN4)) { 411 + if (x86_match_min_microcode_rev(amd_tsa_microcode)) 412 + setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); 413 + else 414 + pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode); 415 + } else { 416 + setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); 417 + setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); 418 + } 419 + } 420 + 380 421 static void bsp_init_amd(struct cpuinfo_x86 *c) 381 422 { 382 423 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { ··· 530 489 } 531 490 532 491 bsp_determine_snp(c); 492 + 493 + tsa_init(c); 494 + 533 495 return; 534 496 535 497 warn:
+124
arch/x86/kernel/cpu/bugs.c
··· 94 94 static void __init its_select_mitigation(void); 95 95 static void __init its_update_mitigation(void); 96 96 static void __init its_apply_mitigation(void); 97 + static void __init tsa_select_mitigation(void); 98 + static void __init tsa_apply_mitigation(void); 97 99 98 100 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 99 101 u64 x86_spec_ctrl_base; ··· 227 225 gds_select_mitigation(); 228 226 its_select_mitigation(); 229 227 bhi_select_mitigation(); 228 + tsa_select_mitigation(); 230 229 231 230 /* 232 231 * After mitigations are selected, some may need to update their ··· 275 272 gds_apply_mitigation(); 276 273 its_apply_mitigation(); 277 274 bhi_apply_mitigation(); 275 + tsa_apply_mitigation(); 278 276 } 279 277 280 278 /* ··· 1492 1488 } 1493 1489 1494 1490 #undef pr_fmt 1491 + #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt 1492 + 1493 + enum tsa_mitigations { 1494 + TSA_MITIGATION_NONE, 1495 + TSA_MITIGATION_AUTO, 1496 + TSA_MITIGATION_UCODE_NEEDED, 1497 + TSA_MITIGATION_USER_KERNEL, 1498 + TSA_MITIGATION_VM, 1499 + TSA_MITIGATION_FULL, 1500 + }; 1501 + 1502 + static const char * const tsa_strings[] = { 1503 + [TSA_MITIGATION_NONE] = "Vulnerable", 1504 + [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 1505 + [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", 1506 + [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", 1507 + [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 1508 + }; 1509 + 1510 + static enum tsa_mitigations tsa_mitigation __ro_after_init = 1511 + IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; 1512 + 1513 + static int __init tsa_parse_cmdline(char *str) 1514 + { 1515 + if (!str) 1516 + return -EINVAL; 1517 + 1518 + if (!strcmp(str, "off")) 1519 + tsa_mitigation = TSA_MITIGATION_NONE; 1520 + else if (!strcmp(str, "on")) 1521 + tsa_mitigation = TSA_MITIGATION_FULL; 1522 + else if (!strcmp(str, "user")) 1523 + tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1524 + else if (!strcmp(str, "vm")) 1525 + tsa_mitigation = TSA_MITIGATION_VM; 1526 + else 1527 + pr_err("Ignoring unknown tsa=%s option.\n", str); 1528 + 1529 + return 0; 1530 + } 1531 + early_param("tsa", tsa_parse_cmdline); 1532 + 1533 + static void __init tsa_select_mitigation(void) 1534 + { 1535 + if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { 1536 + tsa_mitigation = TSA_MITIGATION_NONE; 1537 + return; 1538 + } 1539 + 1540 + if (tsa_mitigation == TSA_MITIGATION_NONE) 1541 + return; 1542 + 1543 + if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) { 1544 + tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; 1545 + goto out; 1546 + } 1547 + 1548 + if (tsa_mitigation == TSA_MITIGATION_AUTO) 1549 + tsa_mitigation = TSA_MITIGATION_FULL; 1550 + 1551 + /* 1552 + * No need to set verw_clear_cpu_buf_mitigation_selected - it 1553 + * doesn't fit all cases here and it is not needed because this 1554 + * is the only VERW-based mitigation on AMD. 1555 + */ 1556 + out: 1557 + pr_info("%s\n", tsa_strings[tsa_mitigation]); 1558 + } 1559 + 1560 + static void __init tsa_apply_mitigation(void) 1561 + { 1562 + switch (tsa_mitigation) { 1563 + case TSA_MITIGATION_USER_KERNEL: 1564 + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1565 + break; 1566 + case TSA_MITIGATION_VM: 1567 + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1568 + break; 1569 + case TSA_MITIGATION_FULL: 1570 + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1571 + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1572 + break; 1573 + default: 1574 + break; 1575 + } 1576 + } 1577 + 1578 + #undef pr_fmt 1495 1579 #define pr_fmt(fmt) "Spectre V2 : " fmt 1496 1580 1497 1581 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = ··· 2405 2313 pr_warn_once(MMIO_MSG_SMT); 2406 2314 break; 2407 2315 case MMIO_MITIGATION_OFF: 2316 + break; 2317 + } 2318 + 2319 + switch (tsa_mitigation) { 2320 + case TSA_MITIGATION_USER_KERNEL: 2321 + case TSA_MITIGATION_VM: 2322 + case TSA_MITIGATION_AUTO: 2323 + case TSA_MITIGATION_FULL: 2324 + /* 2325 + * TSA-SQ can potentially lead to info leakage between 2326 + * SMT threads. 2327 + */ 2328 + if (sched_smt_active()) 2329 + static_branch_enable(&cpu_buf_idle_clear); 2330 + else 2331 + static_branch_disable(&cpu_buf_idle_clear); 2332 + break; 2333 + case TSA_MITIGATION_NONE: 2334 + case TSA_MITIGATION_UCODE_NEEDED: 2408 2335 break; 2409 2336 } 2410 2337 ··· 3376 3265 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); 3377 3266 } 3378 3267 3268 + static ssize_t tsa_show_state(char *buf) 3269 + { 3270 + return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); 3271 + } 3272 + 3379 3273 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 3380 3274 char *buf, unsigned int bug) 3381 3275 { ··· 3443 3327 3444 3328 case X86_BUG_ITS: 3445 3329 return its_show_state(buf); 3330 + 3331 + case X86_BUG_TSA: 3332 + return tsa_show_state(buf); 3446 3333 3447 3334 default: 3448 3335 break; ··· 3532 3413 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) 3533 3414 { 3534 3415 return cpu_show_common(dev, attr, buf, X86_BUG_ITS); 3416 + } 3417 + 3418 + ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) 3419 + { 3420 + return cpu_show_common(dev, attr, buf, X86_BUG_TSA); 3535 3421 } 3536 3422 #endif 3537 3423
+13 -1
arch/x86/kernel/cpu/common.c
··· 1233 1233 #define ITS BIT(8) 1234 1234 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ 1235 1235 #define ITS_NATIVE_ONLY BIT(9) 1236 + /* CPU is affected by Transient Scheduler Attacks */ 1237 + #define TSA BIT(10) 1236 1238 1237 1239 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { 1238 1240 VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS), ··· 1282 1280 VULNBL_AMD(0x16, RETBLEED), 1283 1281 VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), 1284 1282 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), 1285 - VULNBL_AMD(0x19, SRSO), 1283 + VULNBL_AMD(0x19, SRSO | TSA), 1286 1284 VULNBL_AMD(0x1a, SRSO), 1287 1285 {} 1288 1286 }; ··· 1530 1528 setup_force_cpu_bug(X86_BUG_ITS); 1531 1529 if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY)) 1532 1530 setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); 1531 + } 1532 + 1533 + if (c->x86_vendor == X86_VENDOR_AMD) { 1534 + if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) || 1535 + !cpu_has(c, X86_FEATURE_TSA_L1_NO)) { 1536 + if (cpu_matches(cpu_vuln_blacklist, TSA) || 1537 + /* Enable bug on Zen guests to allow for live migration. */ 1538 + (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN))) 1539 + setup_force_cpu_bug(X86_BUG_TSA); 1540 + } 1533 1541 } 1534 1542 1535 1543 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+2
arch/x86/kernel/cpu/scattered.c
··· 50 50 { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, 51 51 { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, 52 52 { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, 53 + { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 }, 54 + { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 }, 53 55 { X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 }, 54 56 { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, 55 57 { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
+6
arch/x86/kvm/svm/vmenter.S
··· 169 169 #endif 170 170 mov VCPU_RDI(%_ASM_DI), %_ASM_DI 171 171 172 + /* Clobbers EFLAGS.ZF */ 173 + VM_CLEAR_CPU_BUFFERS 174 + 172 175 /* Enter guest mode */ 173 176 3: vmrun %_ASM_AX 174 177 4: ··· 337 334 /* Get svm->current_vmcb->pa into RAX. */ 338 335 mov SVM_current_vmcb(%rdi), %rax 339 336 mov KVM_VMCB_pa(%rax), %rax 337 + 338 + /* Clobbers EFLAGS.ZF */ 339 + VM_CLEAR_CPU_BUFFERS 340 340 341 341 /* Enter guest mode */ 342 342 1: vmrun %rax
+3
drivers/base/cpu.c
··· 602 602 CPU_SHOW_VULN_FALLBACK(ghostwrite); 603 603 CPU_SHOW_VULN_FALLBACK(old_microcode); 604 604 CPU_SHOW_VULN_FALLBACK(indirect_target_selection); 605 + CPU_SHOW_VULN_FALLBACK(tsa); 605 606 606 607 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); 607 608 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); ··· 621 620 static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); 622 621 static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); 623 622 static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); 623 + static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); 624 624 625 625 static struct attribute *cpu_root_vulnerabilities_attrs[] = { 626 626 &dev_attr_meltdown.attr, ··· 641 639 &dev_attr_ghostwrite.attr, 642 640 &dev_attr_old_microcode.attr, 643 641 &dev_attr_indirect_target_selection.attr, 642 + &dev_attr_tsa.attr, 644 643 NULL 645 644 }; 646 645
+1
include/linux/cpu.h
··· 82 82 struct device_attribute *attr, char *buf); 83 83 extern ssize_t cpu_show_indirect_target_selection(struct device *dev, 84 84 struct device_attribute *attr, char *buf); 85 + extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); 85 86 86 87 extern __printf(4, 5) 87 88 struct device *cpu_device_create(struct device *parent, void *drvdata,