Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_bugs_for_v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 speculation mitigation updates from Borislav Petkov:

- Some preparatory work to convert the mitigations machinery to
mitigating attack vectors instead of single vulnerabilities

- Untangle and remove a now unneeded X86_FEATURE_USE_IBPB flag

- Add support for a Zen5-specific SRSO mitigation

- Cleanups and minor improvements

* tag 'x86_bugs_for_v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/bugs: Make spectre user default depend on MITIGATION_SPECTRE_V2
x86/bugs: Use the cpu_smt_possible() helper instead of open-coded code
x86/bugs: Add AUTO mitigations for mds/taa/mmio/rfds
x86/bugs: Relocate mds/taa/mmio/rfds defines
x86/bugs: Add X86_BUG_SPECTRE_V2_USER
x86/bugs: Remove X86_FEATURE_USE_IBPB
KVM: nVMX: Always use IBPB to properly virtualize IBRS
x86/bugs: Use a static branch to guard IBPB on vCPU switch
x86/bugs: Remove the X86_FEATURE_USE_IBPB check in ib_prctl_set()
x86/mm: Remove X86_FEATURE_USE_IBPB checks in cond_mitigation()
x86/bugs: Move the X86_FEATURE_USE_IBPB check into callers
x86/bugs: KVM: Add support for SRSO_MSR_FIX

+118 -52
+13
Documentation/admin-guide/hw-vuln/srso.rst
··· 104 104 105 105 (spec_rstack_overflow=ibpb-vmexit) 106 106 107 + * 'Mitigation: Reduced Speculation': 107 108 109 + This mitigation gets automatically enabled when the above one "IBPB on 110 + VMEXIT" has been selected and the CPU supports the BpSpecReduce bit. 111 + 112 + It gets automatically enabled on machines which have the 113 + SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch 114 + to the above =ibpb-vmexit mitigation because the user/kernel boundary is 115 + not affected anymore and thus "safe RET" is not needed. 116 + 117 + After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit 118 + is detected (functionality present on all such machines) and that 119 + practically overrides IBPB on VMEXIT as it has a lot less performance 120 + impact and takes care of the guest->host attack vector too. 108 121 109 122 In order to exploit vulnerability, an attacker needs to: 110 123
+2
Documentation/admin-guide/kernel-parameters.txt
··· 6585 6585 6586 6586 Selecting 'on' will also enable the mitigation 6587 6587 against user space to user space task attacks. 6588 + Selecting specific mitigation does not force enable 6589 + user mitigations. 6588 6590 6589 6591 Selecting 'off' will disable both the kernel and 6590 6592 the user space protections.
+5 -1
arch/x86/include/asm/cpufeatures.h
··· 202 202 #define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */ 203 203 #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ 204 204 #define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */ 205 - #define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */ 206 205 #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */ 207 206 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ 208 207 #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */ ··· 460 461 #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */ 461 462 #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */ 462 463 #define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */ 464 + #define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /* 465 + * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs. 466 + * (SRSO_MSR_FIX in the official doc). 467 + */ 463 468 464 469 /* 465 470 * Extended auxiliary flags: Linux defined - for features scattered in various ··· 530 527 #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ 531 528 #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ 532 529 #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ 530 + #define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ 533 531 #endif /* _ASM_X86_CPUFEATURES_H */
+1
arch/x86/include/asm/msr-index.h
··· 723 723 724 724 /* Zen4 */ 725 725 #define MSR_ZEN4_BP_CFG 0xc001102e 726 + #define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4 726 727 #define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5 727 728 728 729 /* Fam 19h MSRs */
+3 -1
arch/x86/include/asm/nospec-branch.h
··· 522 522 523 523 static inline void indirect_branch_prediction_barrier(void) 524 524 { 525 - alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); 525 + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB); 526 526 } 527 527 528 528 /* The Intel SPEC CTRL MSR base value cache */ ··· 558 558 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); 559 559 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 560 560 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 561 + 562 + DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 561 563 562 564 DECLARE_STATIC_KEY_FALSE(mds_idle_clear); 563 565
+1
arch/x86/include/asm/processor.h
··· 745 745 746 746 enum mds_mitigations { 747 747 MDS_MITIGATION_OFF, 748 + MDS_MITIGATION_AUTO, 748 749 MDS_MITIGATION_FULL, 749 750 MDS_MITIGATION_VMWERV, 750 751 };
+77 -44
arch/x86/kernel/cpu/bugs.c
··· 113 113 /* Control unconditional IBPB in switch_mm() */ 114 114 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 115 115 116 + /* Control IBPB on vCPU load */ 117 + DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 118 + EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); 119 + 116 120 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 117 121 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 118 122 EXPORT_SYMBOL_GPL(mds_idle_clear); ··· 238 234 239 235 /* Default mitigation for MDS-affected CPUs */ 240 236 static enum mds_mitigations mds_mitigation __ro_after_init = 241 - IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_FULL : MDS_MITIGATION_OFF; 237 + IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; 242 238 static bool mds_nosmt __ro_after_init = false; 243 239 244 240 static const char * const mds_strings[] = { ··· 247 243 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 248 244 }; 249 245 246 + enum taa_mitigations { 247 + TAA_MITIGATION_OFF, 248 + TAA_MITIGATION_AUTO, 249 + TAA_MITIGATION_UCODE_NEEDED, 250 + TAA_MITIGATION_VERW, 251 + TAA_MITIGATION_TSX_DISABLED, 252 + }; 253 + 254 + /* Default mitigation for TAA-affected CPUs */ 255 + static enum taa_mitigations taa_mitigation __ro_after_init = 256 + IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; 257 + 258 + enum mmio_mitigations { 259 + MMIO_MITIGATION_OFF, 260 + MMIO_MITIGATION_AUTO, 261 + MMIO_MITIGATION_UCODE_NEEDED, 262 + MMIO_MITIGATION_VERW, 263 + }; 264 + 265 + /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 266 + static enum mmio_mitigations mmio_mitigation __ro_after_init = 267 + IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; 268 + 269 + enum rfds_mitigations { 270 + RFDS_MITIGATION_OFF, 271 + RFDS_MITIGATION_AUTO, 272 + RFDS_MITIGATION_VERW, 273 + RFDS_MITIGATION_UCODE_NEEDED, 274 + }; 275 + 276 + /* Default mitigation for Register File Data Sampling */ 277 + static enum rfds_mitigations rfds_mitigation __ro_after_init = 278 + IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; 279 + 250 280 static void __init mds_select_mitigation(void) 251 281 { 252 282 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 253 283 mds_mitigation = MDS_MITIGATION_OFF; 254 284 return; 255 285 } 286 + 287 + if (mds_mitigation == MDS_MITIGATION_AUTO) 288 + mds_mitigation = MDS_MITIGATION_FULL; 256 289 257 290 if (mds_mitigation == MDS_MITIGATION_FULL) { 258 291 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) ··· 327 286 #undef pr_fmt 328 287 #define pr_fmt(fmt) "TAA: " fmt 329 288 330 - enum taa_mitigations { 331 - TAA_MITIGATION_OFF, 332 - TAA_MITIGATION_UCODE_NEEDED, 333 - TAA_MITIGATION_VERW, 334 - TAA_MITIGATION_TSX_DISABLED, 335 - }; 336 - 337 - /* Default mitigation for TAA-affected CPUs */ 338 - static enum taa_mitigations taa_mitigation __ro_after_init = 339 - IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_VERW : TAA_MITIGATION_OFF; 340 289 static bool taa_nosmt __ro_after_init; 341 290 342 291 static const char * const taa_strings[] = { ··· 417 386 #undef pr_fmt 418 387 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 419 388 420 - enum mmio_mitigations { 421 - MMIO_MITIGATION_OFF, 422 - MMIO_MITIGATION_UCODE_NEEDED, 423 - MMIO_MITIGATION_VERW, 424 - }; 425 - 426 - /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 427 - static enum mmio_mitigations mmio_mitigation __ro_after_init = 428 - IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_VERW : MMIO_MITIGATION_OFF; 429 389 static bool mmio_nosmt __ro_after_init = false; 430 390 431 391 static const char * const mmio_strings[] = { ··· 505 483 #undef pr_fmt 506 484 #define pr_fmt(fmt) "Register File Data Sampling: " fmt 507 485 508 - enum rfds_mitigations { 509 - RFDS_MITIGATION_OFF, 510 - RFDS_MITIGATION_VERW, 511 - RFDS_MITIGATION_UCODE_NEEDED, 512 - }; 513 - 514 - /* Default mitigation for Register File Data Sampling */ 515 - static enum rfds_mitigations rfds_mitigation __ro_after_init = 516 - IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF; 517 - 518 486 static const char * const rfds_strings[] = { 519 487 [RFDS_MITIGATION_OFF] = "Vulnerable", 520 488 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", ··· 519 507 } 520 508 if (rfds_mitigation == RFDS_MITIGATION_OFF) 521 509 return; 510 + 511 + if (rfds_mitigation == RFDS_MITIGATION_AUTO) 512 + rfds_mitigation = RFDS_MITIGATION_VERW; 522 513 523 514 if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR) 524 515 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); ··· 1308 1293 static enum spectre_v2_user_cmd __init 1309 1294 spectre_v2_parse_user_cmdline(void) 1310 1295 { 1296 + enum spectre_v2_user_cmd mode; 1311 1297 char arg[20]; 1312 1298 int ret, i; 1299 + 1300 + mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? 1301 + SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; 1313 1302 1314 1303 switch (spectre_v2_cmd) { 1315 1304 case SPECTRE_V2_CMD_NONE: ··· 1327 1308 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1328 1309 arg, sizeof(arg)); 1329 1310 if (ret < 0) 1330 - return SPECTRE_V2_USER_CMD_AUTO; 1311 + return mode; 1331 1312 1332 1313 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1333 1314 if (match_option(arg, ret, v2_user_options[i].option)) { ··· 1337 1318 } 1338 1319 } 1339 1320 1340 - pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 1341 - return SPECTRE_V2_USER_CMD_AUTO; 1321 + pr_err("Unknown user space protection option (%s). Switching to default\n", arg); 1322 + return mode; 1342 1323 } 1343 1324 1344 1325 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) ··· 1350 1331 spectre_v2_user_select_mitigation(void) 1351 1332 { 1352 1333 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 1353 - bool smt_possible = IS_ENABLED(CONFIG_SMP); 1354 1334 enum spectre_v2_user_cmd cmd; 1355 1335 1356 1336 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1357 1337 return; 1358 - 1359 - if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 1360 - cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 1361 - smt_possible = false; 1362 1338 1363 1339 cmd = spectre_v2_parse_user_cmdline(); 1364 1340 switch (cmd) { ··· 1378 1364 1379 1365 /* Initialize Indirect Branch Prediction Barrier */ 1380 1366 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1381 - setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 1367 + static_branch_enable(&switch_vcpu_ibpb); 1382 1368 1383 1369 spectre_v2_user_ibpb = mode; 1384 1370 switch (cmd) { ··· 1415 1401 * so allow for STIBP to be selected in those cases. 1416 1402 */ 1417 1403 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1418 - !smt_possible || 1404 + !cpu_smt_possible() || 1419 1405 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1420 1406 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) 1421 1407 return; ··· 1987 1973 1988 1974 switch (mds_mitigation) { 1989 1975 case MDS_MITIGATION_FULL: 1976 + case MDS_MITIGATION_AUTO: 1990 1977 case MDS_MITIGATION_VMWERV: 1991 1978 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 1992 1979 pr_warn_once(MDS_MSG_SMT); ··· 1999 1984 2000 1985 switch (taa_mitigation) { 2001 1986 case TAA_MITIGATION_VERW: 1987 + case TAA_MITIGATION_AUTO: 2002 1988 case TAA_MITIGATION_UCODE_NEEDED: 2003 1989 if (sched_smt_active()) 2004 1990 pr_warn_once(TAA_MSG_SMT); ··· 2011 1995 2012 1996 switch (mmio_mitigation) { 2013 1997 case MMIO_MITIGATION_VERW: 1998 + case MMIO_MITIGATION_AUTO: 2014 1999 case MMIO_MITIGATION_UCODE_NEEDED: 2015 2000 if (sched_smt_active()) 2016 2001 pr_warn_once(MMIO_MSG_SMT); ··· 2539 2522 SRSO_MITIGATION_SAFE_RET, 2540 2523 SRSO_MITIGATION_IBPB, 2541 2524 SRSO_MITIGATION_IBPB_ON_VMEXIT, 2525 + SRSO_MITIGATION_BP_SPEC_REDUCE, 2542 2526 }; 2543 2527 2544 2528 enum srso_mitigation_cmd { ··· 2557 2539 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", 2558 2540 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", 2559 2541 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", 2560 - [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" 2542 + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", 2543 + [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" 2561 2544 }; 2562 2545 2563 2546 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; ··· 2597 2578 srso_cmd == SRSO_CMD_OFF) { 2598 2579 if (boot_cpu_has(X86_FEATURE_SBPB)) 2599 2580 x86_pred_cmd = PRED_CMD_SBPB; 2600 - return; 2581 + goto out; 2601 2582 } 2602 2583 2603 2584 if (has_microcode) { ··· 2609 2590 */ 2610 2591 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 2611 2592 setup_force_cpu_cap(X86_FEATURE_SRSO_NO); 2612 - return; 2593 + goto out; 2613 2594 } 2614 2595 2615 2596 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { ··· 2689 2670 2690 2671 ibpb_on_vmexit: 2691 2672 case SRSO_CMD_IBPB_ON_VMEXIT: 2673 + if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { 2674 + pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); 2675 + srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; 2676 + break; 2677 + } 2678 + 2692 2679 if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 2693 2680 if (has_microcode) { 2694 2681 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); ··· 2716 2691 } 2717 2692 2718 2693 out: 2719 - pr_info("%s\n", srso_strings[srso_mitigation]); 2694 + /* 2695 + * Clear the feature flag if this mitigation is not selected as that 2696 + * feature flag controls the BpSpecReduce MSR bit toggling in KVM. 2697 + */ 2698 + if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) 2699 + setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); 2700 + 2701 + if (srso_mitigation != SRSO_MITIGATION_NONE) 2702 + pr_info("%s\n", srso_strings[srso_mitigation]); 2720 2703 } 2721 2704 2722 2705 #undef pr_fmt
+3 -1
arch/x86/kernel/cpu/common.c
··· 1332 1332 1333 1333 setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 1334 1334 1335 - if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) 1335 + if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) { 1336 1336 setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 1337 + setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER); 1338 + } 1337 1339 1338 1340 if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && 1339 1341 !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
+8 -1
arch/x86/kvm/svm/svm.c
··· 607 607 kvm_cpu_svm_disable(); 608 608 609 609 amd_pmu_disable_virt(); 610 + 611 + if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) 612 + msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); 610 613 } 611 614 612 615 static int svm_enable_virtualization_cpu(void) ··· 686 683 687 684 rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi); 688 685 } 686 + 687 + if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) 688 + msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); 689 689 690 690 return 0; 691 691 } ··· 1565 1559 if (sd->current_vmcb != svm->vmcb) { 1566 1560 sd->current_vmcb = svm->vmcb; 1567 1561 1568 - if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) 1562 + if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT) && 1563 + static_branch_likely(&switch_vcpu_ibpb)) 1569 1564 indirect_branch_prediction_barrier(); 1570 1565 } 1571 1566 if (kvm_vcpu_apicv_active(vcpu))
+2 -1
arch/x86/kvm/vmx/vmx.c
··· 1477 1477 * performs IBPB on nested VM-Exit (a single nested transition 1478 1478 * may switch the active VMCS multiple times). 1479 1479 */ 1480 - if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) 1480 + if (static_branch_likely(&switch_vcpu_ibpb) && 1481 + (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))) 1481 1482 indirect_branch_prediction_barrier(); 1482 1483 } 1483 1484
+2
arch/x86/lib/msr.c
··· 103 103 { 104 104 return __flip_bit(msr, bit, true); 105 105 } 106 + EXPORT_SYMBOL_GPL(msr_set_bit); 106 107 107 108 /** 108 109 * msr_clear_bit - Clear @bit in a MSR @msr. ··· 119 118 { 120 119 return __flip_bit(msr, bit, false); 121 120 } 121 + EXPORT_SYMBOL_GPL(msr_clear_bit); 122 122 123 123 #ifdef CONFIG_TRACEPOINTS 124 124 void do_trace_write_msr(unsigned int msr, u64 val, int failed)
+1 -2
arch/x86/mm/tlb.c
··· 725 725 * different context than the user space task which ran 726 726 * last on this CPU. 727 727 */ 728 - if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != 729 - (unsigned long)next->mm) 728 + if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != (unsigned long)next->mm) 730 729 indirect_branch_prediction_barrier(); 731 730 } 732 731
-1
tools/arch/x86/include/asm/cpufeatures.h
··· 202 202 #define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */ 203 203 #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ 204 204 #define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */ 205 - #define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */ 206 205 #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */ 207 206 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ 208 207 #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */