···157157combination with a microcode update. The microcode clears the affected CPU158158buffers when the VERW instruction is executed.159159160160-Kernel reuses the MDS function to invoke the buffer clearing:161161-162162- mds_clear_cpu_buffers()160160+Kernel does the buffer clearing with x86_clear_cpu_buffers().163161164162On MDS affected CPUs, the kernel already invokes CPU buffer clear on165163kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+4-4
Documentation/arch/x86/mds.rst
···93939494The kernel provides a function to invoke the buffer clearing:95959696- mds_clear_cpu_buffers()9696+ x86_clear_cpu_buffers()97979898Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.9999Other than CFLAGS.ZF, this macro doesn't clobber any registers.···185185 idle clearing would be a window dressing exercise and is therefore not186186 activated.187187188188- The invocation is controlled by the static key mds_idle_clear which is189189- switched depending on the chosen mitigation mode and the SMT state of190190- the system.188188+ The invocation is controlled by the static key cpu_buf_idle_clear which is189189+ switched depending on the chosen mitigation mode and the SMT state of the190190+ system.191191192192 The buffer clear is only invoked before entering the C-State to prevent193193 that stale data from the idling CPU from spilling to the Hyper-Thread
+4-4
arch/x86/entry/entry.S
···36363737/*3838 * Define the VERW operand that is disguised as entry code so that3939- * it can be referenced with KPTI enabled. This ensure VERW can be3939+ * it can be referenced with KPTI enabled. This ensures VERW can be4040 * used late in exit-to-user path after page tables are switched.4141 */4242.pushsection .entry.text, "ax"43434444.align L1_CACHE_BYTES, 0xcc4545-SYM_CODE_START_NOALIGN(mds_verw_sel)4545+SYM_CODE_START_NOALIGN(x86_verw_sel)4646 UNWIND_HINT_UNDEFINED4747 ANNOTATE_NOENDBR4848 .word __KERNEL_DS4949.align L1_CACHE_BYTES, 0xcc5050-SYM_CODE_END(mds_verw_sel);5050+SYM_CODE_END(x86_verw_sel);5151/* For KVM */5252-EXPORT_SYMBOL_GPL(mds_verw_sel);5252+EXPORT_SYMBOL_GPL(x86_verw_sel);53535454.popsection5555
···43434444static __always_inline void __mwait(u32 eax, u32 ecx)4545{4646- mds_idle_clear_cpu_buffers();4646+ x86_idle_clear_cpu_buffers();47474848 /*4949 * Use the instruction mnemonic with implicit operands, as the LLVM···9898 */9999static __always_inline void __sti_mwait(u32 eax, u32 ecx)100100{101101- mds_idle_clear_cpu_buffers();101101+ x86_idle_clear_cpu_buffers();102102103103 asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));104104}
+15-14
arch/x86/include/asm/nospec-branch.h
···302302.endm303303304304/*305305- * Macro to execute VERW instruction that mitigate transient data sampling306306- * attacks such as MDS. On affected systems a microcode update overloaded VERW307307- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.308308- *305305+ * Macro to execute VERW insns that mitigate transient data sampling306306+ * attacks such as MDS or TSA. On affected systems a microcode update307307+ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers308308+ * CFLAGS.ZF.309309 * Note: Only the memory operand variant of VERW clears the CPU buffers.310310 */311311.macro CLEAR_CPU_BUFFERS312312#ifdef CONFIG_X86_64313313- ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF313313+ ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF314314#else315315 /*316316 * In 32bit mode, the memory operand must be a %cs reference. The data317317 * segments may not be usable (vm86 mode), and the stack segment may not318318 * be flat (ESPFIX32).319319 */320320- ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF320320+ ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF321321#endif322322.endm323323···567567568568DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);569569570570-DECLARE_STATIC_KEY_FALSE(mds_idle_clear);570570+DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);571571572572DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);573573574574DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);575575576576-extern u16 mds_verw_sel;576576+extern u16 x86_verw_sel;577577578578#include <asm/segment.h>579579580580/**581581- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability581581+ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns582582 *583583 * This uses the otherwise unused and obsolete VERW instruction in584584 * combination with microcode which triggers a CPU buffer flush when the585585 * instruction is executed.586586 */587587-static __always_inline void mds_clear_cpu_buffers(void)587587+static __always_inline void x86_clear_cpu_buffers(void)588588{589589 static const u16 ds = __KERNEL_DS;590590···601601}602602603603/**604604- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability604604+ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS605605+ * vulnerability605606 *606607 * Clear CPU buffers if the corresponding static key is enabled607608 */608608-static __always_inline void mds_idle_clear_cpu_buffers(void)609609+static __always_inline void x86_idle_clear_cpu_buffers(void)609610{610610- if (static_branch_likely(&mds_idle_clear))611611- mds_clear_cpu_buffers();611611+ if (static_branch_likely(&cpu_buf_idle_clear))612612+ x86_clear_cpu_buffers();612613}613614614615#endif /* __ASSEMBLER__ */
+6-6
arch/x86/kernel/cpu/bugs.c
···169169DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);170170EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);171171172172-/* Control MDS CPU buffer clear before idling (halt, mwait) */173173-DEFINE_STATIC_KEY_FALSE(mds_idle_clear);174174-EXPORT_SYMBOL_GPL(mds_idle_clear);172172+/* Control CPU buffer clear before idling (halt, mwait) */173173+DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);174174+EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);175175176176/*177177 * Controls whether l1d flush based mitigations are enabled,···637637 * is required irrespective of SMT state.638638 */639639 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))640640- static_branch_enable(&mds_idle_clear);640640+ static_branch_enable(&cpu_buf_idle_clear);641641642642 if (mmio_nosmt || cpu_mitigations_auto_nosmt())643643 cpu_smt_disable(false);···22492249 return;2250225022512251 if (sched_smt_active()) {22522252- static_branch_enable(&mds_idle_clear);22522252+ static_branch_enable(&cpu_buf_idle_clear);22532253 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||22542254 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {22552255- static_branch_disable(&mds_idle_clear);22552255+ static_branch_disable(&cpu_buf_idle_clear);22562256 }22572257}22582258
+1-1
arch/x86/kvm/vmx/vmx.c
···72917291 vmx_l1d_flush(vcpu);72927292 else if (static_branch_unlikely(&cpu_buf_vm_clear) &&72937293 kvm_arch_has_assigned_device(vcpu->kvm))72947294- mds_clear_cpu_buffers();72947294+ x86_clear_cpu_buffers();7295729572967296 vmx_disable_fb_clear(vmx);72977297