Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/msr: Rename 'rdmsrl_safe()' to 'rdmsrq_safe()'

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>

+80 -80
+1 -1
arch/x86/events/amd/power.c
··· 272 272 273 273 cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); 274 274 275 - if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { 275 + if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { 276 276 pr_err("Failed to read max compute unit power accumulator MSR\n"); 277 277 return -ENODEV; 278 278 }
+4 -4
arch/x86/events/core.c
··· 269 269 */ 270 270 for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) { 271 271 reg = x86_pmu_config_addr(i); 272 - ret = rdmsrl_safe(reg, &val); 272 + ret = rdmsrq_safe(reg, &val); 273 273 if (ret) 274 274 goto msr_fail; 275 275 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { ··· 283 283 284 284 if (*(u64 *)fixed_cntr_mask) { 285 285 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 286 - ret = rdmsrl_safe(reg, &val); 286 + ret = rdmsrq_safe(reg, &val); 287 287 if (ret) 288 288 goto msr_fail; 289 289 for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) { ··· 314 314 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 315 315 */ 316 316 reg = x86_pmu_event_addr(reg_safe); 317 - if (rdmsrl_safe(reg, &val)) 317 + if (rdmsrq_safe(reg, &val)) 318 318 goto msr_fail; 319 319 val ^= 0xffffUL; 320 320 ret = wrmsrl_safe(reg, val); 321 - ret |= rdmsrl_safe(reg, &val_new); 321 + ret |= rdmsrq_safe(reg, &val_new); 322 322 if (ret || val != val_new) 323 323 goto msr_fail; 324 324
+2 -2
arch/x86/events/intel/core.c
··· 5610 5610 * matches, this is needed to detect certain hardware emulators 5611 5611 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 5612 5612 */ 5613 - if (rdmsrl_safe(msr, &val_old)) 5613 + if (rdmsrq_safe(msr, &val_old)) 5614 5614 return false; 5615 5615 5616 5616 /* ··· 5622 5622 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 5623 5623 5624 5624 if (wrmsrl_safe(msr, val_tmp) || 5625 - rdmsrl_safe(msr, &val_new)) 5625 + rdmsrq_safe(msr, &val_new)) 5626 5626 return false; 5627 5627 5628 5628 /*
+1 -1
arch/x86/events/probe.c
··· 43 43 if (msr[bit].test && !msr[bit].test(bit, data)) 44 44 continue; 45 45 /* Virt sucks; you cannot tell if a R/O MSR is present :/ */ 46 - if (rdmsrl_safe(msr[bit].msr, &val)) 46 + if (rdmsrq_safe(msr[bit].msr, &val)) 47 47 continue; 48 48 49 49 mask = msr[bit].mask;
+1 -1
arch/x86/events/rapl.c
··· 611 611 int i; 612 612 613 613 /* protect rdmsrq() to handle virtualization */ 614 - if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) 614 + if (rdmsrq_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) 615 615 return -1; 616 616 for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++) 617 617 rapl_pkg_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
+1 -1
arch/x86/include/asm/apic.h
··· 120 120 { 121 121 u64 msr; 122 122 123 - if (rdmsrl_safe(MSR_IA32_APICBASE, &msr)) 123 + if (rdmsrq_safe(MSR_IA32_APICBASE, &msr)) 124 124 return false; 125 125 return msr & X2APIC_ENABLE; 126 126 }
+2 -2
arch/x86/include/asm/msr.h
··· 279 279 __err; \ 280 280 }) 281 281 282 - static inline int rdmsrl_safe(u32 msr, u64 *p) 282 + static inline int rdmsrq_safe(u32 msr, u64 *p) 283 283 { 284 284 int err; 285 285 ··· 381 381 } 382 382 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 383 383 { 384 - return rdmsrl_safe(msr_no, q); 384 + return rdmsrq_safe(msr_no, q); 385 385 } 386 386 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 387 387 {
+1 -1
arch/x86/include/asm/paravirt.h
··· 231 231 _err; \ 232 232 }) 233 233 234 - static inline int rdmsrl_safe(unsigned msr, u64 *p) 234 + static inline int rdmsrq_safe(unsigned msr, u64 *p) 235 235 { 236 236 int err; 237 237
+1 -1
arch/x86/kernel/amd_nb.c
··· 151 151 152 152 /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ 153 153 if (boot_cpu_data.x86 < 0x10 || 154 - rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) 154 + rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) 155 155 return NULL; 156 156 157 157 /* mmconfig is not enabled */
+3 -3
arch/x86/kernel/cpu/amd.c
··· 422 422 * Try to cache the base value so further operations can 423 423 * avoid RMW. If that faults, do not enable SSBD. 424 424 */ 425 - if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 425 + if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 426 426 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 427 427 setup_force_cpu_cap(X86_FEATURE_SSBD); 428 428 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; ··· 788 788 * Disable it on the affected CPUs. 789 789 */ 790 790 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 791 - if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 791 + if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 792 792 value |= 0x1E; 793 793 wrmsrl_safe(MSR_F15H_IC_CFG, value); 794 794 } ··· 838 838 * suppresses non-branch predictions. 839 839 */ 840 840 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 841 - if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { 841 + if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { 842 842 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; 843 843 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 844 844 }
+10 -10
arch/x86/kernel/cpu/aperfmperf.c
··· 99 99 u64 misc_en; 100 100 int err; 101 101 102 - err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en); 102 + err = rdmsrq_safe(MSR_IA32_MISC_ENABLE, &misc_en); 103 103 if (err) 104 104 return false; 105 105 ··· 110 110 { 111 111 int err; 112 112 113 - err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq); 113 + err = rdmsrq_safe(MSR_ATOM_CORE_RATIOS, base_freq); 114 114 if (err) 115 115 return false; 116 116 117 - err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq); 117 + err = rdmsrq_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq); 118 118 if (err) 119 119 return false; 120 120 ··· 152 152 int err, i; 153 153 u64 msr; 154 154 155 - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); 155 + err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq); 156 156 if (err) 157 157 return false; 158 158 159 159 *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ 160 160 161 - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); 161 + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr); 162 162 if (err) 163 163 return false; 164 164 ··· 190 190 u32 group_size; 191 191 int err, i; 192 192 193 - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); 193 + err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq); 194 194 if (err) 195 195 return false; 196 196 197 197 *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ 198 198 199 - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios); 199 + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &ratios); 200 200 if (err) 201 201 return false; 202 202 203 - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts); 203 + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT1, &counts); 204 204 if (err) 205 205 return false; 206 206 ··· 220 220 u64 msr; 221 221 int err; 222 222 223 - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); 223 + err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq); 224 224 if (err) 225 225 return false; 226 226 227 - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); 227 + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr); 228 228 if (err) 229 229 return false; 230 230
+1 -1
arch/x86/kernel/cpu/bus_lock.c
··· 95 95 { 96 96 u64 ctrl, tmp; 97 97 98 - if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) 98 + if (rdmsrq_safe(MSR_TEST_CTRL, &ctrl)) 99 99 return false; 100 100 if (on) 101 101 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
+2 -2
arch/x86/kernel/cpu/common.c
··· 148 148 */ 149 149 info = (struct ppin_info *)id->driver_data; 150 150 151 - if (rdmsrl_safe(info->msr_ppin_ctl, &val)) 151 + if (rdmsrq_safe(info->msr_ppin_ctl, &val)) 152 152 goto clear_ppin; 153 153 154 154 if ((val & 3UL) == 1UL) { ··· 159 159 /* If PPIN is disabled, try to enable */ 160 160 if (!(val & 2UL)) { 161 161 wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); 162 - rdmsrl_safe(info->msr_ppin_ctl, &val); 162 + rdmsrq_safe(info->msr_ppin_ctl, &val); 163 163 } 164 164 165 165 /* Is the enable bit set? */
+1 -1
arch/x86/kernel/cpu/feat_ctl.c
··· 118 118 bool enable_vmx; 119 119 u64 msr; 120 120 121 - if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { 121 + if (rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr)) { 122 122 clear_cpu_cap(c, X86_FEATURE_VMX); 123 123 clear_cpu_cap(c, X86_FEATURE_SGX); 124 124 return;
+1 -1
arch/x86/kernel/cpu/hygon.c
··· 110 110 * Try to cache the base value so further operations can 111 111 * avoid RMW. If that faults, do not enable SSBD. 112 112 */ 113 - if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 113 + if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 114 114 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 115 115 setup_force_cpu_cap(X86_FEATURE_SSBD); 116 116 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
+2 -2
arch/x86/kernel/cpu/intel.c
··· 488 488 { 489 489 u64 msr; 490 490 491 - if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { 491 + if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) { 492 492 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) 493 493 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); 494 494 } ··· 498 498 { 499 499 u64 msr; 500 500 501 - if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) 501 + if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr)) 502 502 return; 503 503 504 504 /* Clear all MISC features */
+1 -1
arch/x86/kernel/cpu/mce/inject.c
··· 748 748 toggle_hw_mce_inject(cpu, true); 749 749 750 750 wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status); 751 - rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status); 751 + rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status); 752 752 wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0); 753 753 754 754 if (!status) {
+1 -1
arch/x86/kernel/cpu/mce/intel.c
··· 460 460 case INTEL_SANDYBRIDGE_X: 461 461 case INTEL_IVYBRIDGE_X: 462 462 case INTEL_HASWELL_X: 463 - if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control)) 463 + if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control)) 464 464 return; 465 465 error_control |= 2; 466 466 wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
+1 -1
arch/x86/kvm/vmx/sgx.c
··· 411 411 * MSRs exist but are read-only (locked and not writable). 412 412 */ 413 413 if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) || 414 - rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { 414 + rdmsrq_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { 415 415 sgx_pubkey_hash[0] = 0xa6053e051270b7acULL; 416 416 sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL; 417 417 sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL;
+1 -1
arch/x86/kvm/vmx/vmx.c
··· 2850 2850 2851 2851 fault: 2852 2852 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", 2853 - rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); 2853 + rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); 2854 2854 cr4_clear_bits(X86_CR4_VMXE); 2855 2855 2856 2856 return -EFAULT;
+6 -6
arch/x86/kvm/x86.c
··· 590 590 int ret; 591 591 592 592 preempt_disable(); 593 - ret = rdmsrl_safe(msr, &val); 593 + ret = rdmsrq_safe(msr, &val); 594 594 if (ret) 595 595 goto out; 596 596 ret = wrmsrl_safe(msr, val); ··· 630 630 int i; 631 631 632 632 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 633 - rdmsrl_safe(kvm_uret_msrs_list[i], &value); 633 + rdmsrq_safe(kvm_uret_msrs_list[i], &value); 634 634 msrs->values[i].host = value; 635 635 msrs->values[i].curr = value; 636 636 } ··· 1660 1660 *data = MSR_PLATFORM_INFO_CPUID_FAULT; 1661 1661 break; 1662 1662 case MSR_IA32_UCODE_REV: 1663 - rdmsrl_safe(index, data); 1663 + rdmsrq_safe(index, data); 1664 1664 break; 1665 1665 default: 1666 1666 return kvm_x86_call(get_feature_msr)(index, data); ··· 9736 9736 * with an exception. PAT[0] is set to WB on RESET and also by the 9737 9737 * kernel, i.e. failure indicates a kernel bug or broken firmware. 9738 9738 */ 9739 - if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9739 + if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) || 9740 9740 (host_pat & GENMASK(2, 0)) != 6) { 9741 9741 pr_err("host PAT[0] is not WB\n"); 9742 9742 return -EIO; ··· 9770 9770 kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0; 9771 9771 } 9772 9772 9773 - rdmsrl_safe(MSR_EFER, &kvm_host.efer); 9773 + rdmsrq_safe(MSR_EFER, &kvm_host.efer); 9774 9774 9775 9775 if (boot_cpu_has(X86_FEATURE_XSAVES)) 9776 9776 rdmsrq(MSR_IA32_XSS, kvm_host.xss); ··· 13652 13652 13653 13653 local_irq_save(flags); 13654 13654 13655 - if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13655 + if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13656 13656 ret = 1; 13657 13657 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13658 13658 ret = 1;
+1 -1
arch/x86/lib/msr.c
··· 41 41 int err; 42 42 u64 val; 43 43 44 - err = rdmsrl_safe(msr, &val); 44 + err = rdmsrq_safe(msr, &val); 45 45 if (!err) 46 46 m->q = val; 47 47
+2 -2
arch/x86/power/cpu.c
··· 125 125 ctxt->cr2 = read_cr2(); 126 126 ctxt->cr3 = __read_cr3(); 127 127 ctxt->cr4 = __read_cr4(); 128 - ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, 128 + ctxt->misc_enable_saved = !rdmsrq_safe(MSR_IA32_MISC_ENABLE, 129 129 &ctxt->misc_enable); 130 130 msr_save_context(ctxt); 131 131 } ··· 414 414 u64 dummy; 415 415 416 416 msr_array[i].info.msr_no = msr_id[j]; 417 - msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); 417 + msr_array[i].valid = !rdmsrq_safe(msr_id[j], &dummy); 418 418 msr_array[i].info.reg.q = 0; 419 419 } 420 420 saved_msrs->num = total_num;
+1 -1
drivers/acpi/acpi_extlog.c
··· 234 234 u64 cap; 235 235 int rc; 236 236 237 - if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) || 237 + if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) || 238 238 !(cap & MCG_ELOG_P) || 239 239 !extlog_get_l1addr()) 240 240 return -ENODEV;
+1 -1
drivers/acpi/acpi_lpit.c
··· 39 39 return 0; 40 40 } 41 41 42 - err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter); 42 + err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter); 43 43 if (!err) { 44 44 u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset + 45 45 residency_info_ffh.gaddr. bit_width - 1,
+2 -2
drivers/cpufreq/amd-pstate-ut.c
··· 90 90 if (get_shared_mem()) 91 91 return 0; 92 92 93 - ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); 93 + ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); 94 94 if (ret) { 95 - pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); 95 + pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); 96 96 return ret; 97 97 } 98 98
+1 -1
drivers/cpufreq/amd_freq_sensitivity.c
··· 129 129 pci_dev_put(pcidev); 130 130 } 131 131 132 - if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) 132 + if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) 133 133 return -ENODEV; 134 134 135 135 if (!(val >> CLASS_CODE_SHIFT))
+1 -1
drivers/cpufreq/intel_pstate.c
··· 1877 1877 if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) 1878 1878 status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; 1879 1879 1880 - rdmsrl_safe(MSR_HWP_STATUS, &value); 1880 + rdmsrq_safe(MSR_HWP_STATUS, &value); 1881 1881 if (!(value & status_mask)) 1882 1882 return; 1883 1883
+2 -2
drivers/gpu/drm/i915/selftests/librapl.c
··· 22 22 unsigned long long power; 23 23 u32 units; 24 24 25 - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) 25 + if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power)) 26 26 return 0; 27 27 28 28 units = (power & 0x1f00) >> 8; 29 29 30 - if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power)) 30 + if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power)) 31 31 return 0; 32 32 33 33 return (1000000 * power) >> units; /* convert to uJ */
+3 -3
drivers/hwmon/fam15h_power.c
··· 143 143 */ 144 144 cu = topology_core_id(smp_processor_id()); 145 145 146 - rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]); 147 - rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]); 146 + rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]); 147 + rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]); 148 148 149 149 data->cu_on[cu] = 1; 150 150 } ··· 424 424 */ 425 425 data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); 426 426 427 - if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) { 427 + if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) { 428 428 pr_err("Failed to read max compute unit power accumulator MSR\n"); 429 429 return -ENODEV; 430 430 }
+2 -2
drivers/platform/x86/intel/ifs/core.c
··· 115 115 if (!m) 116 116 return -ENODEV; 117 117 118 - if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval)) 118 + if (rdmsrq_safe(MSR_IA32_CORE_CAPS, &msrval)) 119 119 return -ENODEV; 120 120 121 121 if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS)) 122 122 return -ENODEV; 123 123 124 - if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval)) 124 + if (rdmsrq_safe(MSR_INTEGRITY_CAPS, &msrval)) 125 125 return -ENODEV; 126 126 127 127 ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL);
+4 -4
drivers/platform/x86/intel/pmc/core.c
··· 1082 1082 unsigned int index; 1083 1083 1084 1084 for (index = 0; map[index].name ; index++) { 1085 - if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) 1085 + if (rdmsrq_safe(map[index].bit_mask, &pcstate_count)) 1086 1086 continue; 1087 1087 1088 1088 pcstate_count *= 1000; ··· 1587 1587 1588 1588 /* Save PKGC residency for checking later */ 1589 1589 for (i = 0; i < pmcdev->num_of_pkgc; i++) { 1590 - if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) 1590 + if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) 1591 1591 return -EIO; 1592 1592 } 1593 1593 ··· 1603 1603 u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask; 1604 1604 u64 deepest_pkgc_residency; 1605 1605 1606 - if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) 1606 + if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) 1607 1607 return false; 1608 1608 1609 1609 if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]) ··· 1655 1655 for (i = 0; i < pmcdev->num_of_pkgc; i++) { 1656 1656 u64 pc_cnt; 1657 1657 1658 - if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) { 1658 + if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) { 1659 1659 dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n", 1660 1660 msr_map[i].name, pmcdev->pkgc_res_cnt[i], 1661 1661 msr_map[i].name, pc_cnt);
+5 -5
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
··· 406 406 407 407 isst_cpu_info[cpu].numa_node = cpu_to_node(cpu); 408 408 409 - ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data); 409 + ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data); 410 410 if (ret) { 411 411 /* This is not a fatal error on MSR mailbox only I/F */ 412 412 isst_cpu_info[cpu].bus_info[0] = -1; ··· 420 420 421 421 if (isst_hpm_support) { 422 422 423 - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); 423 + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); 424 424 if (!ret) 425 425 goto set_punit_id; 426 426 } 427 427 428 - ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); 428 + ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data); 429 429 if (ret) { 430 430 isst_cpu_info[cpu].punit_cpu_id = -1; 431 431 return ret; ··· 831 831 u64 data; 832 832 833 833 /* Can fail only on some Skylake-X generations */ 834 - if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) || 835 - rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data)) 834 + if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) || 835 + rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data)) 836 836 return -ENODEV; 837 837 } 838 838
+2 -2
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
··· 176 176 return -ENODEV; 177 177 178 178 /* Check presence of mailbox MSRs */ 179 - ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data); 179 + ret = rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data); 180 180 if (ret) 181 181 return ret; 182 182 183 - ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data); 183 + ret = rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data); 184 184 if (ret) 185 185 return ret; 186 186
+2 -2
drivers/platform/x86/intel/tpmi_power_domains.c
··· 157 157 u64 data; 158 158 int ret; 159 159 160 - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); 160 + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); 161 161 if (ret) 162 162 return ret; 163 163 ··· 203 203 return -ENODEV; 204 204 205 205 /* Check for MSR 0x54 presence */ 206 - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); 206 + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); 207 207 if (ret) 208 208 return ret; 209 209
+1 -1
drivers/platform/x86/intel/turbo_max_3.c
··· 48 48 } 49 49 50 50 for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) { 51 - ret = rdmsrl_safe(MSR_OC_MAILBOX, &value); 51 + ret = rdmsrq_safe(MSR_OC_MAILBOX, &value); 52 52 if (ret) { 53 53 pr_debug("cpu %d OC mailbox read failed\n", cpu); 54 54 break;
+1 -1
drivers/powercap/intel_rapl_msr.c
··· 116 116 struct reg_action *ra = info; 117 117 u64 val; 118 118 119 - ra->err = rdmsrl_safe(ra->reg.msr, &val); 119 + ra->err = rdmsrq_safe(ra->reg.msr, &val); 120 120 if (ra->err) 121 121 return; 122 122
+1 -1
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
··· 153 153 u64 val; 154 154 int err; 155 155 156 - err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); 156 + err = rdmsrq_safe(MSR_PLATFORM_INFO, &val); 157 157 if (err) 158 158 return err; 159 159
+2 -2
drivers/thermal/intel/intel_powerclamp.c
··· 340 340 341 341 /* check if any one of the counter msrs exists */ 342 342 while (info->msr_index) { 343 - if (!rdmsrl_safe(info->msr_index, &val)) 343 + if (!rdmsrq_safe(info->msr_index, &val)) 344 344 return true; 345 345 info++; 346 346 } ··· 356 356 357 357 while (info->msr_index) { 358 358 if (!info->skip) { 359 - if (!rdmsrl_safe(info->msr_index, &val)) 359 + if (!rdmsrq_safe(info->msr_index, &val)) 360 360 count += val; 361 361 else 362 362 info->skip = true;
+2 -2
drivers/thermal/intel/intel_tcc_cooling.c
··· 81 81 if (!id) 82 82 return -ENODEV; 83 83 84 - err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); 84 + err = rdmsrq_safe(MSR_PLATFORM_INFO, &val); 85 85 if (err) 86 86 return err; 87 87 88 88 if (!(val & TCC_PROGRAMMABLE)) 89 89 return -ENODEV; 90 90 91 - err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); 91 + err = rdmsrq_safe(MSR_IA32_TEMPERATURE_TARGET, &val); 92 92 if (err) 93 93 return err; 94 94