Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back cpufreq material for 5.10.

+69 -51
+1
arch/arm/include/asm/topology.h
··· 9 9 10 10 /* Replace task scheduler's default frequency-invariant accounting */ 11 11 #define arch_scale_freq_capacity topology_get_freq_scale 12 + #define arch_scale_freq_invariant topology_scale_freq_invariant 12 13 13 14 /* Replace task scheduler's default cpu-invariant accounting */ 14 15 #define arch_scale_cpu_capacity topology_get_cpu_scale
+1
arch/arm64/include/asm/topology.h
··· 27 27 28 28 /* Replace task scheduler's default frequency-invariant accounting */ 29 29 #define arch_scale_freq_capacity topology_get_freq_scale 30 + #define arch_scale_freq_invariant topology_scale_freq_invariant 30 31 31 32 /* Replace task scheduler's default cpu-invariant accounting */ 32 33 #define arch_scale_cpu_capacity topology_get_cpu_scale
+8 -1
arch/arm64/kernel/topology.c
··· 246 246 static_branch_enable(&amu_fie_key); 247 247 } 248 248 249 + /* 250 + * If the system is not fully invariant after AMU init, disable 251 + * partial use of counters for frequency invariance. 252 + */ 253 + if (!topology_scale_freq_invariant()) 254 + static_branch_disable(&amu_fie_key); 255 + 249 256 free_valid_mask: 250 257 free_cpumask_var(valid_cpus); 251 258 ··· 260 253 } 261 254 late_initcall_sync(init_amu_fie); 262 255 263 - bool arch_freq_counters_available(struct cpumask *cpus) 256 + bool arch_freq_counters_available(const struct cpumask *cpus) 264 257 { 265 258 return amu_freq_invariant() && 266 259 cpumask_subset(cpus, amu_fie_cpus);
+11 -2
drivers/base/arch_topology.c
··· 21 21 #include <linux/sched.h> 22 22 #include <linux/smp.h> 23 23 24 - __weak bool arch_freq_counters_available(struct cpumask *cpus) 24 + bool topology_scale_freq_invariant(void) 25 + { 26 + return cpufreq_supports_freq_invariance() || 27 + arch_freq_counters_available(cpu_online_mask); 28 + } 29 + 30 + __weak bool arch_freq_counters_available(const struct cpumask *cpus) 25 31 { 26 32 return false; 27 33 } 28 34 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; 29 35 30 - void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, 36 + void arch_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, 31 37 unsigned long max_freq) 32 38 { 33 39 unsigned long scale; 34 40 int i; 41 + 42 + if (WARN_ON_ONCE(!cur_freq || !max_freq)) 43 + return; 35 44 36 45 /* 37 46 * If the use of counters for FIE is enabled, just return as we don't
+1 -9
drivers/cpufreq/cpufreq-dt.c
··· 40 40 { 41 41 struct private_data *priv = policy->driver_data; 42 42 unsigned long freq = policy->freq_table[index].frequency; 43 - int ret; 44 43 45 - ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); 46 - 47 - if (!ret) { 48 - arch_set_freq_scale(policy->related_cpus, freq, 49 - policy->cpuinfo.max_freq); 50 - } 51 - 52 - return ret; 44 + return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); 53 45 } 54 46 55 47 /*
+31 -4
drivers/cpufreq/cpufreq.c
··· 61 61 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 62 62 static DEFINE_RWLOCK(cpufreq_driver_lock); 63 63 64 + static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance); 65 + bool cpufreq_supports_freq_invariance(void) 66 + { 67 + return static_branch_likely(&cpufreq_freq_invariance); 68 + } 69 + 64 70 /* Flag to suspend/resume CPUFreq governors */ 65 71 static bool cpufreq_suspended; 66 72 ··· 160 154 } 161 155 EXPORT_SYMBOL_GPL(get_cpu_idle_time); 162 156 163 - __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, 164 - unsigned long max_freq) 157 + __weak void arch_set_freq_scale(const struct cpumask *cpus, 158 + unsigned long cur_freq, 159 + unsigned long max_freq) 165 160 { 166 161 } 167 162 EXPORT_SYMBOL_GPL(arch_set_freq_scale); ··· 452 445 return; 453 446 454 447 cpufreq_notify_post_transition(policy, freqs, transition_failed); 448 + 449 + arch_set_freq_scale(policy->related_cpus, 450 + policy->cur, 451 + policy->cpuinfo.max_freq); 455 452 456 453 policy->transition_ongoing = false; 457 454 policy->transition_task = NULL; ··· 2067 2056 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 2068 2057 unsigned int target_freq) 2069 2058 { 2070 - target_freq = clamp_val(target_freq, policy->min, policy->max); 2059 + unsigned int freq; 2071 2060 2072 - return cpufreq_driver->fast_switch(policy, target_freq); 2061 + target_freq = clamp_val(target_freq, policy->min, policy->max); 2062 + freq = cpufreq_driver->fast_switch(policy, target_freq); 2063 + 2064 + arch_set_freq_scale(policy->related_cpus, freq, 2065 + policy->cpuinfo.max_freq); 2066 + 2067 + return freq; 2073 2068 } 2074 2069 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); 2075 2070 ··· 2727 2710 cpufreq_driver = driver_data; 2728 2711 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2729 2712 2713 + /* 2714 + * Mark support for the scheduler's frequency invariance engine for 2715 + * drivers that implement target(), target_index() or fast_switch(). 2716 + */ 2717 + if (!cpufreq_driver->setpolicy) { 2718 + static_branch_enable_cpuslocked(&cpufreq_freq_invariance); 2719 + pr_debug("supports frequency invariance"); 2720 + } 2721 + 2730 2722 if (driver_data->setpolicy) 2731 2723 driver_data->flags |= CPUFREQ_CONST_LOOPS; 2732 2724 ··· 2805 2779 cpus_read_lock(); 2806 2780 subsys_interface_unregister(&cpufreq_interface); 2807 2781 remove_boost_sysfs_file(); 2782 + static_branch_disable_cpuslocked(&cpufreq_freq_invariance); 2808 2783 cpuhp_remove_state_nocalls_cpuslocked(hp_online); 2809 2784 2810 2785 write_lock_irqsave(&cpufreq_driver_lock, flags);
+1 -8
drivers/cpufreq/qcom-cpufreq-hw.c
··· 85 85 if (icc_scaling_enabled) 86 86 qcom_cpufreq_set_bw(policy, freq); 87 87 88 - arch_set_freq_scale(policy->related_cpus, freq, 89 - policy->cpuinfo.max_freq); 90 88 return 0; 91 89 } 92 90 ··· 111 113 { 112 114 void __iomem *perf_state_reg = policy->driver_data; 113 115 unsigned int index; 114 - unsigned long freq; 115 116 116 117 index = policy->cached_resolved_idx; 117 118 writel_relaxed(index, perf_state_reg); 118 119 119 - freq = policy->freq_table[index].frequency; 120 - arch_set_freq_scale(policy->related_cpus, freq, 121 - policy->cpuinfo.max_freq); 122 - 123 - return freq; 120 + return policy->freq_table[index].frequency; 124 121 } 125 122 126 123 static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
+2 -10
drivers/cpufreq/scmi-cpufreq.c
··· 48 48 static int 49 49 scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) 50 50 { 51 - int ret; 52 51 struct scmi_data *priv = policy->driver_data; 53 52 struct scmi_perf_ops *perf_ops = handle->perf_ops; 54 53 u64 freq = policy->freq_table[index].frequency; 55 54 56 - ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); 57 - if (!ret) 58 - arch_set_freq_scale(policy->related_cpus, freq, 59 - policy->cpuinfo.max_freq); 60 - return ret; 55 + return perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); 61 56 } 62 57 63 58 static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, ··· 62 67 struct scmi_perf_ops *perf_ops = handle->perf_ops; 63 68 64 69 if (!perf_ops->freq_set(handle, priv->domain_id, 65 - target_freq * 1000, true)) { 66 - arch_set_freq_scale(policy->related_cpus, target_freq, 67 - policy->cpuinfo.max_freq); 70 + target_freq * 1000, true)) 68 71 return target_freq; 69 - } 70 72 71 73 return 0; 72 74 }
+1 -5
drivers/cpufreq/scpi-cpufreq.c
··· 47 47 static int 48 48 scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) 49 49 { 50 - unsigned long freq = policy->freq_table[index].frequency; 50 + u64 rate = policy->freq_table[index].frequency * 1000; 51 51 struct scpi_data *priv = policy->driver_data; 52 - u64 rate = freq * 1000; 53 52 int ret; 54 53 55 54 ret = clk_set_rate(priv->clk, rate); ··· 58 59 59 60 if (clk_get_rate(priv->clk) != rate) 60 61 return -EIO; 61 - 62 - arch_set_freq_scale(policy->related_cpus, freq, 63 - policy->cpuinfo.max_freq); 64 62 65 63 return 0; 66 64 }
+2 -10
drivers/cpufreq/vexpress-spc-cpufreq.c
··· 182 182 { 183 183 u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; 184 184 unsigned int freqs_new; 185 - int ret; 186 185 187 186 cur_cluster = cpu_to_cluster(cpu); 188 187 new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); ··· 196 197 new_cluster = A15_CLUSTER; 197 198 } 198 199 199 - ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster, 200 - freqs_new); 201 - 202 - if (!ret) { 203 - arch_set_freq_scale(policy->related_cpus, freqs_new, 204 - policy->cpuinfo.max_freq); 205 - } 206 - 207 - return ret; 200 + return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster, 201 + freqs_new); 208 202 } 209 203 210 204 static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+3 -1
include/linux/arch_topology.h
··· 30 30 return per_cpu(freq_scale, cpu); 31 31 } 32 32 33 - bool arch_freq_counters_available(struct cpumask *cpus); 33 + bool topology_scale_freq_invariant(void); 34 + 35 + bool arch_freq_counters_available(const struct cpumask *cpus); 34 36 35 37 DECLARE_PER_CPU(unsigned long, thermal_pressure); 36 38
+7 -1
include/linux/cpufreq.h
··· 217 217 void cpufreq_update_policy(unsigned int cpu); 218 218 void cpufreq_update_limits(unsigned int cpu); 219 219 bool have_governor_per_policy(void); 220 + bool cpufreq_supports_freq_invariance(void); 220 221 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); 221 222 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); 222 223 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); ··· 237 236 static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) 238 237 { 239 238 return 0; 239 + } 240 + static inline bool cpufreq_supports_freq_invariance(void) 241 + { 242 + return false; 240 243 } 241 244 static inline void disable_cpufreq(void) { } 242 245 #endif ··· 1011 1006 extern void arch_freq_prepare_all(void); 1012 1007 extern unsigned int arch_freq_get_on_cpu(int cpu); 1013 1008 1014 - extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, 1009 + extern void arch_set_freq_scale(const struct cpumask *cpus, 1010 + unsigned long cur_freq, 1015 1011 unsigned long max_freq); 1016 1012 1017 1013 /* the following are really really optional */