Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq/amd-pstate: Fix scaling_min_freq and scaling_max_freq update

When amd_pstate is running, writing to scaling_min_freq and
scaling_max_freq has no effect. These values are only passed to the
policy level, but not to the platform level. This means that the
platform does not know about the frequency limits set by the user.

To fix this, update the min_perf and max_perf values at the platform
level whenever the user changes the scaling_min_freq and scaling_max_freq
values.

Fixes: ffa5096a7c33 ("cpufreq: amd-pstate: implement Pstate EPP support for the AMD processors")
Acked-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Wyes Karny <wyes.karny@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Wyes Karny and committed by
Rafael J. Wysocki
febab20c bb87be26

+51 -13
+47 -13
drivers/cpufreq/amd-pstate.c
··· 307 307 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); 308 308 309 309 WRITE_ONCE(cpudata->highest_perf, highest_perf); 310 - 310 + WRITE_ONCE(cpudata->max_limit_perf, highest_perf); 311 311 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); 312 312 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); 313 313 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); 314 - 314 + WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1)); 315 315 return 0; 316 316 } 317 317 ··· 329 329 highest_perf = cppc_perf.highest_perf; 330 330 331 331 WRITE_ONCE(cpudata->highest_perf, highest_perf); 332 - 332 + WRITE_ONCE(cpudata->max_limit_perf, highest_perf); 333 333 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 334 334 WRITE_ONCE(cpudata->lowest_nonlinear_perf, 335 335 cppc_perf.lowest_nonlinear_perf); 336 336 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); 337 + WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf); 337 338 338 339 if (cppc_state == AMD_PSTATE_ACTIVE) 339 340 return 0; ··· 433 432 u64 prev = READ_ONCE(cpudata->cppc_req_cached); 434 433 u64 value = prev; 435 434 435 + min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, 436 + cpudata->max_limit_perf); 437 + max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, 438 + cpudata->max_limit_perf); 436 439 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 437 440 438 441 if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { ··· 475 470 return 0; 476 471 } 477 472 473 + static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) 474 + { 475 + u32 max_limit_perf, min_limit_perf; 476 + struct amd_cpudata *cpudata = policy->driver_data; 477 + 478 + max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); 479 + min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); 480 + 481 + WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); 482 + WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); 483 + WRITE_ONCE(cpudata->max_limit_freq, policy->max); 484 + WRITE_ONCE(cpudata->min_limit_freq, policy->min); 485 + 486 + return 0; 487 + } 488 + 478 489 static int amd_pstate_update_freq(struct cpufreq_policy *policy, 479 490 unsigned int target_freq, bool fast_switch) 480 491 { ··· 500 479 501 480 if (!cpudata->max_freq) 502 481 return -ENODEV; 482 + 483 + if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 484 + amd_pstate_update_min_max_limit(policy); 503 485 504 486 cap_perf = READ_ONCE(cpudata->highest_perf); 505 487 min_perf = READ_ONCE(cpudata->lowest_perf); ··· 557 533 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 558 534 struct amd_cpudata *cpudata = policy->driver_data; 559 535 unsigned int target_freq; 536 + 537 + if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 538 + amd_pstate_update_min_max_limit(policy); 539 + 560 540 561 541 cap_perf = READ_ONCE(cpudata->highest_perf); 562 542 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); ··· 775 747 /* Initial processor data capability frequencies */ 776 748 cpudata->max_freq = max_freq; 777 749 cpudata->min_freq = min_freq; 750 + cpudata->max_limit_freq = max_freq; 751 + cpudata->min_limit_freq = min_freq; 778 752 cpudata->nominal_freq = nominal_freq; 779 753 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; 780 754 ··· 1215 1185 return 0; 1216 1186 } 1217 1187 1218 - static void amd_pstate_epp_init(unsigned int cpu) 1188 + static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) 1219 1189 { 1220 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1221 1190 struct amd_cpudata *cpudata = policy->driver_data; 1222 - u32 max_perf, min_perf; 1191 + u32 max_perf, min_perf, min_limit_perf, max_limit_perf; 1223 1192 u64 value; 1224 1193 s16 epp; 1225 1194 1226 1195 max_perf = READ_ONCE(cpudata->highest_perf); 1227 1196 min_perf = READ_ONCE(cpudata->lowest_perf); 1197 + max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); 1198 + min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); 1199 + 1200 + max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, 1201 + cpudata->max_limit_perf); 1202 + min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, 1203 + cpudata->max_limit_perf); 1204 + 1205 + WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); 1206 + WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); 1228 1207 1229 1208 value = READ_ONCE(cpudata->cppc_req_cached); 1230 1209 ··· 1251 1212 value &= ~AMD_CPPC_DES_PERF(~0L); 1252 1213 value |= AMD_CPPC_DES_PERF(0); 1253 1214 1254 - if (cpudata->epp_policy == cpudata->policy) 1255 - goto skip_epp; 1256 - 1257 1215 cpudata->epp_policy = cpudata->policy; 1258 1216 1259 1217 /* Get BIOS pre-defined epp value */ ··· 1260 1224 * This return value can only be negative for shared_memory 1261 1225 * systems where EPP register read/write not supported. 1262 1226 */ 1263 - goto skip_epp; 1227 + return; 1264 1228 } 1265 1229 1266 1230 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) ··· 1274 1238 1275 1239 WRITE_ONCE(cpudata->cppc_req_cached, value); 1276 1240 amd_pstate_set_epp(cpudata, epp); 1277 - skip_epp: 1278 - cpufreq_cpu_put(policy); 1279 1241 } 1280 1242 1281 1243 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) ··· 1288 1254 1289 1255 cpudata->policy = policy->policy; 1290 1256 1291 - amd_pstate_epp_init(policy->cpu); 1257 + amd_pstate_epp_update_limit(policy); 1292 1258 1293 1259 return 0; 1294 1260 }
+4
include/linux/amd-pstate.h
··· 70 70 u32 nominal_perf; 71 71 u32 lowest_nonlinear_perf; 72 72 u32 lowest_perf; 73 + u32 min_limit_perf; 74 + u32 max_limit_perf; 75 + u32 min_limit_freq; 76 + u32 max_limit_freq; 73 77 74 78 u32 max_freq; 75 79 u32 min_freq;