Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/msr: Replace wrmsr(msr, low, 0) with wrmsrq(msr, low)

The third argument in wrmsr(msr, low, 0) is unnecessary. Instead, use
wrmsrq(msr, low), which automatically sets the higher 32 bits of the
MSR value to 0.

Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Uros Bizjak <ubizjak@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-15-xin@zytor.com

authored by

Xin Li (Intel) and committed by
Ingo Molnar
444b46a1 0c2678ef

+16 -16
+3 -3
arch/x86/hyperv/hv_apic.c
··· 76 76 { 77 77 switch (reg) { 78 78 case APIC_EOI: 79 - wrmsr(HV_X64_MSR_EOI, val, 0); 79 + wrmsrq(HV_X64_MSR_EOI, val); 80 80 break; 81 81 case APIC_TASKPRI: 82 - wrmsr(HV_X64_MSR_TPR, val, 0); 82 + wrmsrq(HV_X64_MSR_TPR, val); 83 83 break; 84 84 default: 85 85 native_apic_mem_write(reg, val); ··· 93 93 if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1)) 94 94 return; 95 95 96 - wrmsr(HV_X64_MSR_EOI, APIC_EOI_ACK, 0); 96 + wrmsrq(HV_X64_MSR_EOI, APIC_EOI_ACK); 97 97 } 98 98 99 99 static bool cpu_is_self(int cpu)
+1 -1
arch/x86/include/asm/apic.h
··· 209 209 reg == APIC_LVR) 210 210 return; 211 211 212 - wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); 212 + wrmsrq(APIC_BASE_MSR + (reg >> 4), v); 213 213 } 214 214 215 215 static inline void native_apic_msr_eoi(void)
+1 -1
arch/x86/include/asm/switch_to.h
··· 61 61 return; 62 62 63 63 this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); 64 - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 64 + wrmsrq(MSR_IA32_SYSENTER_CS, thread->sysenter_cs); 65 65 } 66 66 #endif 67 67
+1 -1
arch/x86/kernel/cpu/amd.c
··· 1207 1207 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) 1208 1208 return; 1209 1209 1210 - wrmsr(amd_msr_dr_addr_masks[dr], mask, 0); 1210 + wrmsrq(amd_msr_dr_addr_masks[dr], mask); 1211 1211 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; 1212 1212 } 1213 1213
+4 -4
arch/x86/kernel/cpu/common.c
··· 1982 1982 */ 1983 1983 1984 1984 tss->x86_tss.ss1 = __KERNEL_CS; 1985 - wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1986 - wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 1987 - wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1985 + wrmsrq(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1); 1986 + wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); 1987 + wrmsrq(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32); 1988 1988 1989 1989 put_cpu(); 1990 1990 } ··· 2198 2198 struct desc_struct d = { }; 2199 2199 2200 2200 if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) 2201 - wrmsr(MSR_TSC_AUX, cpudata, 0); 2201 + wrmsrq(MSR_TSC_AUX, cpudata); 2202 2202 2203 2203 /* Store CPU and node number in limit. */ 2204 2204 d.limit0 = cpudata;
+2 -2
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
··· 905 905 * Disable hardware prefetchers. 906 906 */ 907 907 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); 908 - wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); 908 + wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); 909 909 mem_r = READ_ONCE(plr->kmem); 910 910 /* 911 911 * Dummy execute of the time measurement to load the needed ··· 1001 1001 * Disable hardware prefetchers. 1002 1002 */ 1003 1003 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); 1004 - wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); 1004 + wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); 1005 1005 1006 1006 /* Initialize rest of local variables */ 1007 1007 /*
+1 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 1708 1708 pr_warn_once("Invalid event id %d\n", config_info->evtid); 1709 1709 return; 1710 1710 } 1711 - wrmsr(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config, 0); 1711 + wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config); 1712 1712 } 1713 1713 1714 1714 static void mbm_config_write_domain(struct rdt_resource *r,
+2 -2
arch/x86/kernel/cpu/umwait.c
··· 33 33 static void umwait_update_control_msr(void * unused) 34 34 { 35 35 lockdep_assert_irqs_disabled(); 36 - wrmsr(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached), 0); 36 + wrmsrq(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached)); 37 37 } 38 38 39 39 /* ··· 71 71 * the original control MSR value in umwait_init(). So there 72 72 * is no race condition here. 73 73 */ 74 - wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0); 74 + wrmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); 75 75 76 76 return 0; 77 77 }
+1 -1
arch/x86/kernel/kvm.c
··· 400 400 if (!has_steal_clock) 401 401 return; 402 402 403 - wrmsr(MSR_KVM_STEAL_TIME, 0, 0); 403 + wrmsrq(MSR_KVM_STEAL_TIME, 0); 404 404 } 405 405 406 406 static u64 kvm_steal_clock(int cpu)