Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/msr: Rename 'wrmsrl_safe()' to 'wrmsrq_safe()'

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>

+45 -45
+1 -1
arch/x86/events/core.c
··· 317 317 if (rdmsrq_safe(reg, &val)) 318 318 goto msr_fail; 319 319 val ^= 0xffffUL; 320 - ret = wrmsrl_safe(reg, val); 320 + ret = wrmsrq_safe(reg, val); 321 321 ret |= rdmsrq_safe(reg, &val_new); 322 322 if (ret || val != val_new) 323 323 goto msr_fail;
+4 -4
arch/x86/events/intel/core.c
··· 2976 2976 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); 2977 2977 2978 2978 for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) { 2979 - wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); 2980 - wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); 2979 + wrmsrq_safe(x86_pmu_config_addr(idx), 0ull); 2980 + wrmsrq_safe(x86_pmu_event_addr(idx), 0ull); 2981 2981 } 2982 2982 for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) { 2983 2983 if (fixed_counter_disabled(idx, cpuc->pmu)) 2984 2984 continue; 2985 - wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull); 2985 + wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull); 2986 2986 } 2987 2987 2988 2988 if (ds) ··· 5621 5621 if (is_lbr_from(msr)) 5622 5622 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 5623 5623 5624 - if (wrmsrl_safe(msr, val_tmp) || 5624 + if (wrmsrq_safe(msr, val_tmp) || 5625 5625 rdmsrq_safe(msr, &val_new)) 5626 5626 return false; 5627 5627
+2 -2
arch/x86/events/intel/knc.c
··· 182 182 val = hwc->config; 183 183 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 184 184 185 - (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 185 + (void)wrmsrq_safe(hwc->config_base + hwc->idx, val); 186 186 } 187 187 188 188 static void knc_pmu_enable_event(struct perf_event *event) ··· 193 193 val = hwc->config; 194 194 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 195 195 196 - (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 196 + (void)wrmsrq_safe(hwc->config_base + hwc->idx, val); 197 197 } 198 198 199 199 static inline u64 knc_pmu_get_status(void)
+1 -1
arch/x86/events/intel/lbr.c
··· 1602 1602 goto clear_arch_lbr; 1603 1603 1604 1604 /* Apply the max depth of Arch LBR */ 1605 - if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr)) 1605 + if (wrmsrq_safe(MSR_ARCH_LBR_DEPTH, lbr_nr)) 1606 1606 goto clear_arch_lbr; 1607 1607 1608 1608 x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
+8 -8
arch/x86/events/intel/p4.c
··· 897 897 * So at moment let leave metrics turned on forever -- it's 898 898 * ok for now but need to be revisited! 899 899 * 900 - * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0); 901 - * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0); 900 + * (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, 0); 901 + * (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, 0); 902 902 */ 903 903 } 904 904 ··· 911 911 * state we need to clear P4_CCCR_OVF, otherwise interrupt get 912 912 * asserted again and again 913 913 */ 914 - (void)wrmsrl_safe(hwc->config_base, 914 + (void)wrmsrq_safe(hwc->config_base, 915 915 p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); 916 916 } 917 917 ··· 944 944 945 945 bind = &p4_pebs_bind_map[idx]; 946 946 947 - (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); 948 - (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); 947 + (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); 948 + (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); 949 949 } 950 950 951 951 static void __p4_pmu_enable_event(struct perf_event *event) ··· 979 979 */ 980 980 p4_pmu_enable_pebs(hwc->config); 981 981 982 - (void)wrmsrl_safe(escr_addr, escr_conf); 983 - (void)wrmsrl_safe(hwc->config_base, 982 + (void)wrmsrq_safe(escr_addr, escr_conf); 983 + (void)wrmsrq_safe(hwc->config_base, 984 984 (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); 985 985 } 986 986 ··· 1398 1398 */ 1399 1399 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 1400 1400 reg = x86_pmu_config_addr(i); 1401 - wrmsrl_safe(reg, 0ULL); 1401 + wrmsrq_safe(reg, 0ULL); 1402 1402 } 1403 1403 1404 1404 return 0;
+2 -2
arch/x86/events/intel/p6.c
··· 163 163 struct hw_perf_event *hwc = &event->hw; 164 164 u64 val = P6_NOP_EVENT; 165 165 166 - (void)wrmsrl_safe(hwc->config_base, val); 166 + (void)wrmsrq_safe(hwc->config_base, val); 167 167 } 168 168 169 169 static void p6_pmu_enable_event(struct perf_event *event) ··· 180 180 * to actually enable the events. 181 181 */ 182 182 183 - (void)wrmsrl_safe(hwc->config_base, val); 183 + (void)wrmsrq_safe(hwc->config_base, val); 184 184 } 185 185 186 186 PMU_FORMAT_ATTR(event, "config:0-7" );
+2 -2
arch/x86/include/asm/msr.h
··· 316 316 /* 317 317 * 64-bit version of wrmsr_safe(): 318 318 */ 319 - static inline int wrmsrl_safe(u32 msr, u64 val) 319 + static inline int wrmsrq_safe(u32 msr, u64 val) 320 320 { 321 321 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); 322 322 } ··· 385 385 } 386 386 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 387 387 { 388 - return wrmsrl_safe(msr_no, q); 388 + return wrmsrq_safe(msr_no, q); 389 389 } 390 390 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 391 391 {
+3 -3
arch/x86/kernel/cpu/amd.c
··· 612 612 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { 613 613 if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) 614 614 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 615 - else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { 615 + else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { 616 616 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 617 617 setup_force_cpu_cap(X86_FEATURE_SBPB); 618 618 } ··· 790 790 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 791 791 if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 792 792 value |= 0x1E; 793 - wrmsrl_safe(MSR_F15H_IC_CFG, value); 793 + wrmsrq_safe(MSR_F15H_IC_CFG, value); 794 794 } 795 795 } 796 796 ··· 840 840 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 841 841 if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { 842 842 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; 843 - wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 843 + wrmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 844 844 } 845 845 } 846 846 #endif
+1 -1
arch/x86/kernel/cpu/bus_lock.c
··· 101 101 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; 102 102 else 103 103 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; 104 - if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) 104 + if (wrmsrq_safe(MSR_TEST_CTRL, ctrl)) 105 105 return false; 106 106 rdmsrq(MSR_TEST_CTRL, tmp); 107 107 return ctrl == tmp;
+7 -7
arch/x86/kernel/cpu/common.c
··· 158 158 159 159 /* If PPIN is disabled, try to enable */ 160 160 if (!(val & 2UL)) { 161 - wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); 161 + wrmsrq_safe(info->msr_ppin_ctl, val | 2UL); 162 162 rdmsrq_safe(info->msr_ppin_ctl, &val); 163 163 } 164 164 ··· 2114 2114 * This does not cause SYSENTER to jump to the wrong location, because 2115 2115 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 2116 2116 */ 2117 - wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 2118 - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 2117 + wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 2118 + wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 2119 2119 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 2120 - wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 2120 + wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 2121 2121 } else { 2122 2122 wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore); 2123 - wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 2124 - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 2125 - wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 2123 + wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 2124 + wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 2125 + wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 2126 2126 } 2127 2127 2128 2128 /*
+2 -2
arch/x86/kernel/cpu/mce/inject.c
··· 747 747 748 748 toggle_hw_mce_inject(cpu, true); 749 749 750 - wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status); 750 + wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), status); 751 751 rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status); 752 - wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0); 752 + wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), 0); 753 753 754 754 if (!status) { 755 755 hw_injection_possible = false;
+1 -1
arch/x86/kernel/cpu/mce/intel.c
··· 463 463 if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control)) 464 464 return; 465 465 error_control |= 2; 466 - wrmsrl_safe(MSR_ERROR_CONTROL, error_control); 466 + wrmsrq_safe(MSR_ERROR_CONTROL, error_control); 467 467 break; 468 468 } 469 469 }
+1 -1
arch/x86/kernel/cpu/resctrl/core.c
··· 145 145 struct rdt_resource *r = &hw_res->r_resctrl; 146 146 u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; 147 147 148 - if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) 148 + if (wrmsrq_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) 149 149 return; 150 150 151 151 rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
+1 -1
arch/x86/kvm/svm/sev.c
··· 3119 3119 * back to WBINVD if this faults so as not to make any problems worse 3120 3120 * by leaving stale encrypted data in the cache. 3121 3121 */ 3122 - if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid))) 3122 + if (WARN_ON_ONCE(wrmsrq_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid))) 3123 3123 goto do_wbinvd; 3124 3124 3125 3125 return;
+3 -3
arch/x86/kvm/x86.c
··· 593 593 ret = rdmsrq_safe(msr, &val); 594 594 if (ret) 595 595 goto out; 596 - ret = wrmsrl_safe(msr, val); 596 + ret = wrmsrq_safe(msr, val); 597 597 out: 598 598 preempt_enable(); 599 599 return ret; ··· 644 644 value = (value & mask) | (msrs->values[slot].host & ~mask); 645 645 if (value == msrs->values[slot].curr) 646 646 return 0; 647 - err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 647 + err = wrmsrq_safe(kvm_uret_msrs_list[slot], value); 648 648 if (err) 649 649 return 1; 650 650 ··· 13654 13654 13655 13655 if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13656 13656 ret = 1; 13657 - else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13657 + else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value)) 13658 13658 ret = 1; 13659 13659 else 13660 13660 wrmsrq(MSR_IA32_SPEC_CTRL, saved_value);
+1 -1
arch/x86/lib/msr.c
··· 58 58 */ 59 59 static int msr_write(u32 msr, struct msr *m) 60 60 { 61 - return wrmsrl_safe(msr, m->q); 61 + return wrmsrq_safe(msr, m->q); 62 62 } 63 63 64 64 static inline int __flip_bit(u32 msr, u8 bit, bool set)
+1 -1
drivers/cpufreq/intel_pstate.c
··· 1894 1894 return; 1895 1895 1896 1896 ack_intr: 1897 - wrmsrl_safe(MSR_HWP_STATUS, 0); 1897 + wrmsrq_safe(MSR_HWP_STATUS, 0); 1898 1898 raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); 1899 1899 } 1900 1900
+1 -1
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
··· 211 211 hash_for_each_possible(isst_hash, sst_cmd, hnode, 212 212 punit_msr_white_list[i]) { 213 213 if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu) 214 - wrmsrl_safe(sst_cmd->cmd, sst_cmd->data); 214 + wrmsrq_safe(sst_cmd->cmd, sst_cmd->data); 215 215 } 216 216 } 217 217 mutex_unlock(&isst_hash_lock);
+1 -1
drivers/platform/x86/intel/turbo_max_3.c
··· 41 41 value = cmd << MSR_OC_MAILBOX_CMD_OFFSET; 42 42 /* Set the busy bit to indicate OS is trying to issue command */ 43 43 value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT); 44 - ret = wrmsrl_safe(MSR_OC_MAILBOX, value); 44 + ret = wrmsrq_safe(MSR_OC_MAILBOX, value); 45 45 if (ret) { 46 46 pr_debug("cpu %d OC mailbox write failed\n", cpu); 47 47 return ret;
+1 -1
drivers/powercap/intel_rapl_msr.c
··· 123 123 val &= ~ra->mask; 124 124 val |= ra->value; 125 125 126 - ra->err = wrmsrl_safe(ra->reg.msr, val); 126 + ra->err = wrmsrq_safe(ra->reg.msr, val); 127 127 } 128 128 129 129 static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
+1 -1
drivers/thermal/intel/therm_throt.c
··· 643 643 644 644 void __weak notify_hwp_interrupt(void) 645 645 { 646 - wrmsrl_safe(MSR_HWP_STATUS, 0); 646 + wrmsrq_safe(MSR_HWP_STATUS, 0); 647 647 } 648 648 649 649 /* Thermal transition interrupt handler */