Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/msr: Rename 'wrmsrl_on_cpu()' to 'wrmsrq_on_cpu()'

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>

+27 -27
+2 -2
arch/x86/include/asm/msr.h
··· 330 330 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 331 331 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 332 332 int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 333 - int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 333 + int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 334 334 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 335 335 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 336 336 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); ··· 355 355 rdmsrq(msr_no, *q); 356 356 return 0; 357 357 } 358 - static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 358 + static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 359 359 { 360 360 wrmsrq(msr_no, q); 361 361 return 0;
+1 -1
arch/x86/kernel/cpu/intel_epb.c
··· 161 161 if (ret < 0) 162 162 return ret; 163 163 164 - ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, 164 + ret = wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, 165 165 (epb & ~EPB_MASK) | val); 166 166 if (ret < 0) 167 167 return ret;
+2 -2
arch/x86/lib/msr-smp.c
··· 78 78 } 79 79 EXPORT_SYMBOL(wrmsr_on_cpu); 80 80 81 - int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 81 + int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 82 82 { 83 83 int err; 84 84 struct msr_info rv; ··· 92 92 93 93 return err; 94 94 } 95 - EXPORT_SYMBOL(wrmsrl_on_cpu); 95 + EXPORT_SYMBOL(wrmsrq_on_cpu); 96 96 97 97 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, 98 98 struct msr __percpu *msrs,
+3 -3
drivers/cpufreq/amd-pstate.c
··· 261 261 wrmsrq(MSR_AMD_CPPC_REQ, value); 262 262 return 0; 263 263 } else { 264 - int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 264 + int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 265 265 266 266 if (ret) 267 267 return ret; ··· 309 309 if (value == prev) 310 310 return 0; 311 311 312 - ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 312 + ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 313 313 if (ret) { 314 314 pr_err("failed to set energy perf value (%d)\n", ret); 315 315 return ret; ··· 788 788 789 789 static void amd_perf_ctl_reset(unsigned int cpu) 790 790 { 791 - wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); 791 + wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); 792 792 } 793 793 794 794 /*
+17 -17
drivers/cpufreq/intel_pstate.c
··· 664 664 return ret; 665 665 666 666 epb = (epb & ~0x0f) | pref; 667 - wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 667 + wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 668 668 669 669 return 0; 670 670 } ··· 762 762 * function, so it cannot run in parallel with the update below. 763 763 */ 764 764 WRITE_ONCE(cpu->hwp_req_cached, value); 765 - ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 765 + ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 766 766 if (!ret) 767 767 cpu->epp_cached = epp; 768 768 ··· 1209 1209 } 1210 1210 skip_epp: 1211 1211 WRITE_ONCE(cpu_data->hwp_req_cached, value); 1212 - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 1212 + wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value); 1213 1213 } 1214 1214 1215 1215 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); ··· 1256 1256 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 1257 1257 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 1258 1258 1259 - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1259 + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1260 1260 1261 1261 mutex_lock(&hybrid_capacity_lock); 1262 1262 ··· 1302 1302 static void intel_pstate_hwp_reenable(struct cpudata *cpu) 1303 1303 { 1304 1304 intel_pstate_hwp_enable(cpu); 1305 - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1305 + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1306 1306 } 1307 1307 1308 1308 static int intel_pstate_suspend(struct cpufreq_policy *policy) ··· 1855 1855 hybrid_update_capacity(cpudata); 1856 1856 } 1857 1857 1858 - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1858 + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1859 1859 } 1860 1860 1861 1861 static DEFINE_RAW_SPINLOCK(hwp_notify_lock); ··· 1905 1905 if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) 1906 1906 return; 1907 1907 1908 - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ 1909 - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1908 + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ 1909 + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1910 1910 1911 1911 raw_spin_lock_irq(&hwp_notify_lock); 1912 1912 cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); ··· 1933 1933 if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) 1934 1934 interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; 1935 1935 1936 - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ 1937 - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); 1938 - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1936 + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ 1937 + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); 1938 + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1939 1939 } 1940 1940 } 1941 1941 ··· 1974 1974 { 1975 1975 /* First disable HWP notification interrupt till we activate again */ 1976 1976 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1977 - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1977 + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1978 1978 1979 - wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1979 + wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1980 1980 1981 1981 intel_pstate_enable_hwp_interrupt(cpudata); 1982 1982 ··· 2244 2244 * the CPU being updated, so force the register update to run on the 2245 2245 * right CPU. 2246 2246 */ 2247 - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2247 + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2248 2248 pstate_funcs.get_val(cpu, pstate)); 2249 2249 } 2250 2250 ··· 3102 3102 if (fast_switch) 3103 3103 wrmsrq(MSR_HWP_REQUEST, value); 3104 3104 else 3105 - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3105 + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3106 3106 } 3107 3107 3108 3108 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, ··· 3112 3112 wrmsrq(MSR_IA32_PERF_CTL, 3113 3113 pstate_funcs.get_val(cpu, target_pstate)); 3114 3114 else 3115 - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 3115 + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 3116 3116 pstate_funcs.get_val(cpu, target_pstate)); 3117 3117 } 3118 3118 ··· 3323 3323 * written by it may not be suitable. 3324 3324 */ 3325 3325 value &= ~HWP_DESIRED_PERF(~0L); 3326 - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3326 + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3327 3327 WRITE_ONCE(cpu->hwp_req_cached, value); 3328 3328 } 3329 3329
+2 -2
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
··· 88 88 cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); 89 89 } 90 90 91 - ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); 91 + ret = wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); 92 92 if (ret) 93 93 return ret; 94 94 ··· 207 207 if (!data || !data->valid || !data->stored_uncore_data) 208 208 return 0; 209 209 210 - wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, 210 + wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, 211 211 data->stored_uncore_data); 212 212 } 213 213 break;