Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: scmi: Use .register_em() to register with energy model

Set the newly added .register_em() callback to register with the EM
after the cpufreq policy is properly initialized.

Acked-by: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

+42 -23
+42 -23
drivers/cpufreq/scmi-cpufreq.c
··· 22 22 23 23 struct scmi_data { 24 24 int domain_id; 25 + int nr_opp; 25 26 struct device *cpu_dev; 27 + cpumask_var_t opp_shared_cpus; 26 28 }; 27 29 28 30 static struct scmi_protocol_handle *ph; ··· 125 123 struct device *cpu_dev; 126 124 struct scmi_data *priv; 127 125 struct cpufreq_frequency_table *freq_table; 128 - struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); 129 - cpumask_var_t opp_shared_cpus; 130 - bool power_scale_mw; 131 126 132 127 cpu_dev = get_cpu_device(policy->cpu); 133 128 if (!cpu_dev) { ··· 132 133 return -ENODEV; 133 134 } 134 135 135 - if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) 136 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 137 + if (!priv) 136 138 return -ENOMEM; 139 + 140 + if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) { 141 + ret = -ENOMEM; 142 + goto out_free_priv; 143 + } 137 144 138 145 /* Obtain CPUs that share SCMI performance controls */ 139 146 ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); ··· 153 148 * The OPP 'sharing cpus' info may come from DT through an empty opp 154 149 * table and opp-shared. 155 150 */ 156 - ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus); 157 - if (ret || !cpumask_weight(opp_shared_cpus)) { 151 + ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus); 152 + if (ret || !cpumask_weight(priv->opp_shared_cpus)) { 158 153 /* 159 154 * Either opp-table is not set or no opp-shared was found. 160 155 * Use the CPU mask from SCMI to designate CPUs sharing an OPP 161 156 * table. 162 157 */ 163 - cpumask_copy(opp_shared_cpus, policy->cpus); 158 + cpumask_copy(priv->opp_shared_cpus, policy->cpus); 164 159 } 165 160 166 161 /* ··· 185 180 goto out_free_opp; 186 181 } 187 182 188 - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus); 183 + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus); 189 184 if (ret) { 190 185 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", 191 186 __func__, ret); ··· 193 188 goto out_free_opp; 194 189 } 195 190 196 - power_scale_mw = perf_ops->power_scale_mw_get(ph); 197 - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, 198 - opp_shared_cpus, power_scale_mw); 199 - } 200 - 201 - priv = kzalloc(sizeof(*priv), GFP_KERNEL); 202 - if (!priv) { 203 - ret = -ENOMEM; 204 - goto out_free_opp; 191 + priv->nr_opp = nr_opp; 205 192 } 206 193 207 194 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 208 195 if (ret) { 209 196 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); 210 - goto out_free_priv; 197 + goto out_free_opp; 211 198 } 212 199 213 200 priv->cpu_dev = cpu_dev; ··· 220 223 policy->fast_switch_possible = 221 224 perf_ops->fast_switch_possible(ph, cpu_dev); 222 225 223 - free_cpumask_var(opp_shared_cpus); 224 226 return 0; 225 - 226 - out_free_priv: 227 - kfree(priv); 228 227 229 228 out_free_opp: 230 229 dev_pm_opp_remove_all_dynamic(cpu_dev); 231 230 232 231 out_free_cpumask: 233 - free_cpumask_var(opp_shared_cpus); 232 + free_cpumask_var(priv->opp_shared_cpus); 233 + 234 + out_free_priv: 235 + kfree(priv); 234 236 235 237 return ret; 236 238 } ··· 240 244 241 245 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 242 246 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 247 + free_cpumask_var(priv->opp_shared_cpus); 243 248 kfree(priv); 244 249 245 250 return 0; 251 + } 252 + 253 + static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) 254 + { 255 + struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); 256 + bool power_scale_mw = perf_ops->power_scale_mw_get(ph); 257 + struct scmi_data *priv = policy->driver_data; 258 + 259 + /* 260 + * This callback will be called for each policy, but we don't need to 261 + * register with EM every time. Despite not being part of the same 262 + * policy, some CPUs may still share their perf-domains, and a CPU from 263 + * another policy may already have registered with EM on behalf of CPUs 264 + * of this policy. 265 + */ 266 + if (!priv->nr_opp) 267 + return; 268 + 269 + em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp, 270 + &em_cb, priv->opp_shared_cpus, 271 + power_scale_mw); 246 272 } 247 273 248 274 static struct cpufreq_driver scmi_cpufreq_driver = { ··· 279 261 .get = scmi_cpufreq_get_rate, 280 262 .init = scmi_cpufreq_init, 281 263 .exit = scmi_cpufreq_exit, 264 + .register_em = scmi_cpufreq_register_em, 282 265 }; 283 266 284 267 static int scmi_cpufreq_probe(struct scmi_device *sdev)