Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: amd-pstate: Introduce the support for the processors with shared memory solution

In some of Zen2 and Zen3 based processors, they are using the shared
memory that exposed from ACPI SBIOS. In this kind of the processors,
there is no MSR support, so we add acpi cppc function as the backend for
them.

It is using a module param (shared_mem) to enable related processors
manually. We will enable this by default once we address performance
issue on this solution.

Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Huang Rui and committed by
Rafael J. Wysocki
e059c184 1d215f03

+97 -8
+97 -8
drivers/cpufreq/amd-pstate.c
··· 48 48 #define AMD_PSTATE_TRANSITION_LATENCY 0x20000 49 49 #define AMD_PSTATE_TRANSITION_DELAY 500 50 50 51 + /* 52 + * TODO: We need more time to fine tune processors with shared memory solution 53 + * with community together. 54 + * 55 + * There are some performance drops on the CPU benchmarks which reports from 56 + * Suse. We are co-working with them to fine tune the shared memory solution. So 57 + * we disable it by default to go acpi-cpufreq on these processors and add a 58 + * module parameter to be able to enable it manually for debugging. 59 + */ 60 + static bool shared_mem = false; 61 + module_param(shared_mem, bool, 0444); 62 + MODULE_PARM_DESC(shared_mem, 63 + "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)"); 64 + 51 65 static struct cpufreq_driver amd_pstate_driver; 52 66 53 67 /** ··· 99 85 u32 lowest_nonlinear_freq; 100 86 }; 101 87 102 - static inline int amd_pstate_enable(bool enable) 88 + static inline int pstate_enable(bool enable) 103 89 { 104 90 return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable); 105 91 } 106 92 107 - static int amd_pstate_init_perf(struct amd_cpudata *cpudata) 93 + static int cppc_enable(bool enable) 94 + { 95 + int cpu, ret = 0; 96 + 97 + for_each_present_cpu(cpu) { 98 + ret = cppc_set_enable(cpu, enable); 99 + if (ret) 100 + return ret; 101 + } 102 + 103 + return ret; 104 + } 105 + 106 + DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); 107 + 108 + static inline int amd_pstate_enable(bool enable) 109 + { 110 + return static_call(amd_pstate_enable)(enable); 111 + } 112 + 113 + static int pstate_init_perf(struct amd_cpudata *cpudata) 108 114 { 109 115 u64 cap1; 110 116 ··· 147 113 return 0; 148 114 } 149 115 150 - static void amd_pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, 151 - u32 des_perf, u32 max_perf, bool fast_switch) 116 + static int cppc_init_perf(struct amd_cpudata *cpudata) 117 + { 118 + struct cppc_perf_caps cppc_perf; 119 + 120 + int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 121 + if (ret) 122 + return ret; 123 + 124 + WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); 125 + 126 + WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 127 + WRITE_ONCE(cpudata->lowest_nonlinear_perf, 128 + cppc_perf.lowest_nonlinear_perf); 129 + WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); 130 + 131 + return 0; 132 + } 133 + 134 + DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); 135 + 136 + static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) 137 + { 138 + return static_call(amd_pstate_init_perf)(cpudata); 139 + } 140 + 141 + static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, 142 + u32 des_perf, u32 max_perf, bool fast_switch) 152 143 { 153 144 if (fast_switch) 154 145 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); 155 146 else 156 147 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 157 148 READ_ONCE(cpudata->cppc_req_cached)); 149 + } 150 + 151 + static void cppc_update_perf(struct amd_cpudata *cpudata, 152 + u32 min_perf, u32 des_perf, 153 + u32 max_perf, bool fast_switch) 154 + { 155 + struct cppc_perf_ctrls perf_ctrls; 156 + 157 + perf_ctrls.max_perf = max_perf; 158 + perf_ctrls.min_perf = min_perf; 159 + perf_ctrls.desired_perf = des_perf; 160 + 161 + cppc_set_perf(cpudata->cpu, &perf_ctrls); 162 + } 163 + 164 + DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); 165 + 166 + static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, 167 + u32 min_perf, u32 des_perf, 168 + u32 max_perf, bool fast_switch) 169 + { 170 + static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, 171 + max_perf, fast_switch); 158 172 } 159 173 160 174 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, ··· 414 332 /* It will be updated by governor */ 415 333 policy->cur = policy->cpuinfo.min_freq; 416 334 417 - policy->fast_switch_possible = true; 335 + if (boot_cpu_has(X86_FEATURE_CPPC)) 336 + policy->fast_switch_possible = true; 418 337 419 338 /* Initial processor data capability frequencies */ 420 339 cpudata->max_freq = max_freq; ··· 447 364 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, 448 365 .verify = amd_pstate_verify, 449 366 .target = amd_pstate_target, 450 - .adjust_perf = amd_pstate_adjust_perf, 451 367 .init = amd_pstate_cpu_init, 452 368 .exit = amd_pstate_cpu_exit, 453 369 .name = "amd-pstate", ··· 469 387 return -EEXIST; 470 388 471 389 /* capability check */ 472 - if (!boot_cpu_has(X86_FEATURE_CPPC)) { 473 - pr_debug("AMD CPPC MSR based functionality is not supported\n"); 390 + if (boot_cpu_has(X86_FEATURE_CPPC)) { 391 + pr_debug("AMD CPPC MSR based functionality is supported\n"); 392 + amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; 393 + } else if (shared_mem) { 394 + static_call_update(amd_pstate_enable, cppc_enable); 395 + static_call_update(amd_pstate_init_perf, cppc_init_perf); 396 + static_call_update(amd_pstate_update_perf, cppc_update_perf); 397 + } else { 398 + pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n"); 474 399 return -ENODEV; 475 400 } 476 401