Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/bugs: Rename MDS machinery to something more generic

It will be used by other x86 mitigations.

No functional changes.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>

+35 -36
+1 -3
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
··· 157 157 combination with a microcode update. The microcode clears the affected CPU 158 158 buffers when the VERW instruction is executed. 159 159 160 - Kernel reuses the MDS function to invoke the buffer clearing: 161 - 162 - mds_clear_cpu_buffers() 160 + Kernel does the buffer clearing with x86_clear_cpu_buffers(). 163 161 164 162 On MDS affected CPUs, the kernel already invokes CPU buffer clear on 165 163 kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+4 -4
Documentation/arch/x86/mds.rst
··· 93 93 94 94 The kernel provides a function to invoke the buffer clearing: 95 95 96 - mds_clear_cpu_buffers() 96 + x86_clear_cpu_buffers() 97 97 98 98 Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. 99 99 Other than CFLAGS.ZF, this macro doesn't clobber any registers. ··· 185 185 idle clearing would be a window dressing exercise and is therefore not 186 186 activated. 187 187 188 - The invocation is controlled by the static key mds_idle_clear which is 189 - switched depending on the chosen mitigation mode and the SMT state of 190 - the system. 188 + The invocation is controlled by the static key cpu_buf_idle_clear which is 189 + switched depending on the chosen mitigation mode and the SMT state of the 190 + system. 191 191 192 192 The buffer clear is only invoked before entering the C-State to prevent 193 193 that stale data from the idling CPU from spilling to the Hyper-Thread
+4 -4
arch/x86/entry/entry.S
··· 36 36 37 37 /* 38 38 * Define the VERW operand that is disguised as entry code so that 39 - * it can be referenced with KPTI enabled. This ensure VERW can be 39 + * it can be referenced with KPTI enabled. This ensures VERW can be 40 40 * used late in exit-to-user path after page tables are switched. 41 41 */ 42 42 .pushsection .entry.text, "ax" 43 43 44 44 .align L1_CACHE_BYTES, 0xcc 45 - SYM_CODE_START_NOALIGN(mds_verw_sel) 45 + SYM_CODE_START_NOALIGN(x86_verw_sel) 46 46 UNWIND_HINT_UNDEFINED 47 47 ANNOTATE_NOENDBR 48 48 .word __KERNEL_DS 49 49 .align L1_CACHE_BYTES, 0xcc 50 - SYM_CODE_END(mds_verw_sel); 50 + SYM_CODE_END(x86_verw_sel); 51 51 /* For KVM */ 52 - EXPORT_SYMBOL_GPL(mds_verw_sel); 52 + EXPORT_SYMBOL_GPL(x86_verw_sel); 53 53 54 54 .popsection 55 55
+2 -2
arch/x86/include/asm/irqflags.h
··· 44 44 45 45 static __always_inline void native_safe_halt(void) 46 46 { 47 - mds_idle_clear_cpu_buffers(); 47 + x86_idle_clear_cpu_buffers(); 48 48 asm volatile("sti; hlt": : :"memory"); 49 49 } 50 50 51 51 static __always_inline void native_halt(void) 52 52 { 53 - mds_idle_clear_cpu_buffers(); 53 + x86_idle_clear_cpu_buffers(); 54 54 asm volatile("hlt": : :"memory"); 55 55 } 56 56
+2 -2
arch/x86/include/asm/mwait.h
··· 43 43 44 44 static __always_inline void __mwait(u32 eax, u32 ecx) 45 45 { 46 - mds_idle_clear_cpu_buffers(); 46 + x86_idle_clear_cpu_buffers(); 47 47 48 48 /* 49 49 * Use the instruction mnemonic with implicit operands, as the LLVM ··· 98 98 */ 99 99 static __always_inline void __sti_mwait(u32 eax, u32 ecx) 100 100 { 101 - mds_idle_clear_cpu_buffers(); 101 + x86_idle_clear_cpu_buffers(); 102 102 103 103 asm volatile("sti; mwait" :: "a" (eax), "c" (ecx)); 104 104 }
+15 -14
arch/x86/include/asm/nospec-branch.h
··· 302 302 .endm 303 303 304 304 /* 305 - * Macro to execute VERW instruction that mitigate transient data sampling 306 - * attacks such as MDS. On affected systems a microcode update overloaded VERW 307 - * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. 308 - * 305 + * Macro to execute VERW insns that mitigate transient data sampling 306 + * attacks such as MDS or TSA. On affected systems a microcode update 307 + * overloaded VERW insns to also clear the CPU buffers. VERW clobbers 308 + * CFLAGS.ZF. 309 309 * Note: Only the memory operand variant of VERW clears the CPU buffers. 310 310 */ 311 311 .macro CLEAR_CPU_BUFFERS 312 312 #ifdef CONFIG_X86_64 313 - ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF 313 + ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF 314 314 #else 315 315 /* 316 316 * In 32bit mode, the memory operand must be a %cs reference. The data 317 317 * segments may not be usable (vm86 mode), and the stack segment may not 318 318 * be flat (ESPFIX32). 319 319 */ 320 - ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF 320 + ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF 321 321 #endif 322 322 .endm 323 323 ··· 567 567 568 568 DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 569 569 570 - DECLARE_STATIC_KEY_FALSE(mds_idle_clear); 570 + DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); 571 571 572 572 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 573 573 574 574 DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); 575 575 576 - extern u16 mds_verw_sel; 576 + extern u16 x86_verw_sel; 577 577 578 578 #include <asm/segment.h> 579 579 580 580 /** 581 - * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 581 + * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns 582 582 * 583 583 * This uses the otherwise unused and obsolete VERW instruction in 584 584 * combination with microcode which triggers a CPU buffer flush when the 585 585 * instruction is executed. 586 586 */ 587 - static __always_inline void mds_clear_cpu_buffers(void) 587 + static __always_inline void x86_clear_cpu_buffers(void) 588 588 { 589 589 static const u16 ds = __KERNEL_DS; 590 590 ··· 601 601 } 602 602 603 603 /** 604 - * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability 604 + * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS 605 + * vulnerability 605 606 * 606 607 * Clear CPU buffers if the corresponding static key is enabled 607 608 */ 608 - static __always_inline void mds_idle_clear_cpu_buffers(void) 609 + static __always_inline void x86_idle_clear_cpu_buffers(void) 609 610 { 610 - if (static_branch_likely(&mds_idle_clear)) 611 - mds_clear_cpu_buffers(); 611 + if (static_branch_likely(&cpu_buf_idle_clear)) 612 + x86_clear_cpu_buffers(); 612 613 } 613 614 614 615 #endif /* __ASSEMBLER__ */
+6 -6
arch/x86/kernel/cpu/bugs.c
··· 169 169 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 170 170 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); 171 171 172 - /* Control MDS CPU buffer clear before idling (halt, mwait) */ 173 - DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 174 - EXPORT_SYMBOL_GPL(mds_idle_clear); 172 + /* Control CPU buffer clear before idling (halt, mwait) */ 173 + DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); 174 + EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); 175 175 176 176 /* 177 177 * Controls whether l1d flush based mitigations are enabled, ··· 637 637 * is required irrespective of SMT state. 638 638 */ 639 639 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) 640 - static_branch_enable(&mds_idle_clear); 640 + static_branch_enable(&cpu_buf_idle_clear); 641 641 642 642 if (mmio_nosmt || cpu_mitigations_auto_nosmt()) 643 643 cpu_smt_disable(false); ··· 2249 2249 return; 2250 2250 2251 2251 if (sched_smt_active()) { 2252 - static_branch_enable(&mds_idle_clear); 2252 + static_branch_enable(&cpu_buf_idle_clear); 2253 2253 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 2254 2254 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { 2255 - static_branch_disable(&mds_idle_clear); 2255 + static_branch_disable(&cpu_buf_idle_clear); 2256 2256 } 2257 2257 } 2258 2258
+1 -1
arch/x86/kvm/vmx/vmx.c
··· 7291 7291 vmx_l1d_flush(vcpu); 7292 7292 else if (static_branch_unlikely(&cpu_buf_vm_clear) && 7293 7293 kvm_arch_has_assigned_device(vcpu->kvm)) 7294 - mds_clear_cpu_buffers(); 7294 + x86_clear_cpu_buffers(); 7295 7295 7296 7296 vmx_disable_fb_clear(vmx); 7297 7297