Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pm-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
"Make the intel_pstate driver behave as expected when it operates in
the passive mode with HWP enabled and the 'powersave' governor on top
of it"

* tag 'pm-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
cpufreq: intel_pstate: Take CPUFREQ_GOV_STRICT_TARGET into account
cpufreq: Add strict_target to struct cpufreq_policy
cpufreq: Introduce CPUFREQ_GOV_STRICT_TARGET
cpufreq: Introduce governor flags

+32 -12
+3 -1
drivers/cpufreq/cpufreq.c
··· 2254 2254 return -EINVAL; 2255 2255 2256 2256 /* Platform doesn't want dynamic frequency switching ? */ 2257 - if (policy->governor->dynamic_switching && 2257 + if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING && 2258 2258 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { 2259 2259 struct cpufreq_governor *gov = cpufreq_fallback_governor(); 2260 2260 ··· 2279 2279 return ret; 2280 2280 } 2281 2281 } 2282 + 2283 + policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET); 2282 2284 2283 2285 return 0; 2284 2286 }
+1 -1
drivers/cpufreq/cpufreq_governor.h
··· 156 156 #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ 157 157 { \ 158 158 .name = _name_, \ 159 - .dynamic_switching = true, \ 159 + .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \ 160 160 .owner = THIS_MODULE, \ 161 161 .init = cpufreq_dbs_governor_init, \ 162 162 .exit = cpufreq_dbs_governor_exit, \
+1
drivers/cpufreq/cpufreq_performance.c
··· 20 20 static struct cpufreq_governor cpufreq_gov_performance = { 21 21 .name = "performance", 22 22 .owner = THIS_MODULE, 23 + .flags = CPUFREQ_GOV_STRICT_TARGET, 23 24 .limits = cpufreq_gov_performance_limits, 24 25 }; 25 26
+1
drivers/cpufreq/cpufreq_powersave.c
··· 21 21 .name = "powersave", 22 22 .limits = cpufreq_gov_powersave_limits, 23 23 .owner = THIS_MODULE, 24 + .flags = CPUFREQ_GOV_STRICT_TARGET, 24 25 }; 25 26 26 27 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+9 -7
drivers/cpufreq/intel_pstate.c
··· 2527 2527 } 2528 2528 2529 2529 static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, 2530 - bool fast_switch) 2530 + bool strict, bool fast_switch) 2531 2531 { 2532 2532 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; 2533 2533 ··· 2539 2539 * field in it, so opportunistically update the max too if needed. 2540 2540 */ 2541 2541 value &= ~HWP_MAX_PERF(~0L); 2542 - value |= HWP_MAX_PERF(cpu->max_perf_ratio); 2542 + value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio); 2543 2543 2544 2544 if (value == prev) 2545 2545 return; ··· 2562 2562 pstate_funcs.get_val(cpu, target_pstate)); 2563 2563 } 2564 2564 2565 - static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate, 2566 - bool fast_switch) 2565 + static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, 2566 + int target_pstate, bool fast_switch) 2567 2567 { 2568 + struct cpudata *cpu = all_cpu_data[policy->cpu]; 2568 2569 int old_pstate = cpu->pstate.current_pstate; 2569 2570 2570 2571 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2571 2572 if (hwp_active) { 2572 - intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch); 2573 + intel_cpufreq_adjust_hwp(cpu, target_pstate, 2574 + policy->strict_target, fast_switch); 2573 2575 cpu->pstate.current_pstate = target_pstate; 2574 2576 } else if (target_pstate != old_pstate) { 2575 2577 intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch); ··· 2611 2609 break; 2612 2610 } 2613 2611 2614 - target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false); 2612 + target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); 2615 2613 2616 2614 freqs.new = target_pstate * cpu->pstate.scaling; 2617 2615 ··· 2630 2628 2631 2629 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2632 2630 2633 - target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true); 2631 + target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); 2634 2632 2635 2633 return target_pstate * cpu->pstate.scaling; 2636 2634 }
+16 -2
include/linux/cpufreq.h
··· 110 110 bool fast_switch_enabled; 111 111 112 112 /* 113 + * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current 114 + * governor. 115 + */ 116 + bool strict_target; 117 + 118 + /* 113 119 * Preferred average time interval between consecutive invocations of 114 120 * the driver to set the frequency for this policy. To be set by the 115 121 * scaling driver (0, which is the default, means no preference). ··· 576 570 char *buf); 577 571 int (*store_setspeed) (struct cpufreq_policy *policy, 578 572 unsigned int freq); 579 - /* For governors which change frequency dynamically by themselves */ 580 - bool dynamic_switching; 581 573 struct list_head governor_list; 582 574 struct module *owner; 575 + u8 flags; 583 576 }; 577 + 578 + /* Governor flags */ 579 + 580 + /* For governors which change frequency dynamically by themselves */ 581 + #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) 582 + 583 + /* For governors wanting the target frequency to be set exactly */ 584 + #define CPUFREQ_GOV_STRICT_TARGET BIT(1) 585 + 584 586 585 587 /* Pass a target to the cpufreq driver */ 586 588 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+1 -1
kernel/sched/cpufreq_schedutil.c
··· 881 881 struct cpufreq_governor schedutil_gov = { 882 882 .name = "schedutil", 883 883 .owner = THIS_MODULE, 884 - .dynamic_switching = true, 884 + .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, 885 885 .init = sugov_init, 886 886 .exit = sugov_exit, 887 887 .start = sugov_start,