Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm

Pull ARM cpufreq driver changes for v5.15 from Viresh Kumar:

"This contains:

- Update cpufreq-dt blocklist with more platforms (Bjorn Andersson).

- Allow freq changes from any CPU for qcom-hw driver (Taniya Das).

- Add DSVS interrupt's support for qcom-hw driver (Thara Gopinath).

- A new callback (->register_em()) to register EM at a more convenient
point of time."

* 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
cpufreq: qcom-hw: Set dvfs_possible_from_any_cpu cpufreq driver flag
cpufreq: blocklist more Qualcomm platforms in cpufreq-dt-platdev
cpufreq: qcom-cpufreq-hw: Add dcvs interrupt support
cpufreq: scmi: Use .register_em() to register with energy model
cpufreq: vexpress: Use .register_em() to register with energy model
cpufreq: scpi: Use .register_em() to register with energy model
cpufreq: qcom-cpufreq-hw: Use .register_em() to register with energy model
cpufreq: omap: Use .register_em() to register with energy model
cpufreq: mediatek: Use .register_em() to register with energy model
cpufreq: imx6q: Use .register_em() to register with energy model
cpufreq: dt: Use .register_em() to register with energy model
cpufreq: Add callback to register with energy model
cpufreq: vexpress: Set CPUFREQ_IS_COOLING_DEV flag

+233 -54
+2
drivers/base/arch_topology.c
··· 149 149 } 150 150 151 151 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; 152 + EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); 152 153 153 154 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) 154 155 { ··· 166 165 for_each_cpu(cpu, cpus) 167 166 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); 168 167 } 168 + EXPORT_SYMBOL_GPL(topology_set_thermal_pressure); 169 169 170 170 static ssize_t cpu_capacity_show(struct device *dev, 171 171 struct device_attribute *attr,
+4
drivers/cpufreq/cpufreq-dt-platdev.c
··· 137 137 { .compatible = "qcom,apq8096", }, 138 138 { .compatible = "qcom,msm8996", }, 139 139 { .compatible = "qcom,qcs404", }, 140 + { .compatible = "qcom,sa8155p" }, 140 141 { .compatible = "qcom,sc7180", }, 141 142 { .compatible = "qcom,sc7280", }, 142 143 { .compatible = "qcom,sc8180x", }, 143 144 { .compatible = "qcom,sdm845", }, 145 + { .compatible = "qcom,sm6350", }, 144 146 { .compatible = "qcom,sm8150", }, 147 + { .compatible = "qcom,sm8250", }, 148 + { .compatible = "qcom,sm8350", }, 145 149 146 150 { .compatible = "st,stih407", }, 147 151 { .compatible = "st,stih410", },
+1 -2
drivers/cpufreq/cpufreq-dt.c
··· 143 143 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; 144 144 } 145 145 146 - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); 147 - 148 146 return 0; 149 147 150 148 out_clk_put: ··· 182 184 .exit = cpufreq_exit, 183 185 .online = cpufreq_online, 184 186 .offline = cpufreq_offline, 187 + .register_em = cpufreq_register_em_with_opp, 185 188 .name = "cpufreq-dt", 186 189 .attr = cpufreq_dt_attr, 187 190 .suspend = cpufreq_generic_suspend,
+13
drivers/cpufreq/cpufreq.c
··· 1491 1491 write_lock_irqsave(&cpufreq_driver_lock, flags); 1492 1492 list_add(&policy->policy_list, &cpufreq_policy_list); 1493 1493 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1494 + 1495 + /* 1496 + * Register with the energy model before 1497 + * sched_cpufreq_governor_change() is called, which will result 1498 + * in rebuilding of the sched domains, which should only be done 1499 + * once the energy model is properly initialized for the policy 1500 + * first. 1501 + * 1502 + * Also, this should be called before the policy is registered 1503 + * with cooling framework. 1504 + */ 1505 + if (cpufreq_driver->register_em) 1506 + cpufreq_driver->register_em(policy); 1494 1507 } 1495 1508 1496 1509 ret = cpufreq_init_policy(policy);
+1 -1
drivers/cpufreq/imx6q-cpufreq.c
··· 192 192 policy->clk = clks[ARM].clk; 193 193 cpufreq_generic_init(policy, freq_table, transition_latency); 194 194 policy->suspend_freq = max_freq; 195 - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); 196 195 197 196 return 0; 198 197 } ··· 203 204 .target_index = imx6q_set_target, 204 205 .get = cpufreq_generic_get, 205 206 .init = imx6q_cpufreq_init, 207 + .register_em = cpufreq_register_em_with_opp, 206 208 .name = "imx6q-cpufreq", 207 209 .attr = cpufreq_generic_attr, 208 210 .suspend = cpufreq_generic_suspend,
+1 -2
drivers/cpufreq/mediatek-cpufreq.c
··· 448 448 policy->driver_data = info; 449 449 policy->clk = info->cpu_clk; 450 450 451 - dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus); 452 - 453 451 return 0; 454 452 } 455 453 ··· 469 471 .get = cpufreq_generic_get, 470 472 .init = mtk_cpufreq_init, 471 473 .exit = mtk_cpufreq_exit, 474 + .register_em = cpufreq_register_em_with_opp, 472 475 .name = "mtk-cpufreq", 473 476 .attr = cpufreq_generic_attr, 474 477 };
+1 -1
drivers/cpufreq/omap-cpufreq.c
··· 131 131 132 132 /* FIXME: what's the actual transition time? */ 133 133 cpufreq_generic_init(policy, freq_table, 300 * 1000); 134 - dev_pm_opp_of_register_em(mpu_dev, policy->cpus); 135 134 136 135 return 0; 137 136 } ··· 149 150 .get = cpufreq_generic_get, 150 151 .init = omap_cpu_init, 151 152 .exit = omap_cpu_exit, 153 + .register_em = cpufreq_register_em_with_opp, 152 154 .name = "omap", 153 155 .attr = cpufreq_generic_attr, 154 156 };
+149 -2
drivers/cpufreq/qcom-cpufreq-hw.c
··· 7 7 #include <linux/cpufreq.h> 8 8 #include <linux/init.h> 9 9 #include <linux/interconnect.h> 10 + #include <linux/interrupt.h> 10 11 #include <linux/kernel.h> 11 12 #include <linux/module.h> 12 13 #include <linux/of_address.h> 13 14 #include <linux/of_platform.h> 14 15 #include <linux/pm_opp.h> 15 16 #include <linux/slab.h> 17 + #include <linux/spinlock.h> 16 18 17 19 #define LUT_MAX_ENTRIES 40U 18 20 #define LUT_SRC GENMASK(31, 30) ··· 24 22 #define CLK_HW_DIV 2 25 23 #define LUT_TURBO_IND 1 26 24 25 + #define HZ_PER_KHZ 1000 26 + 27 27 struct qcom_cpufreq_soc_data { 28 28 u32 reg_enable; 29 29 u32 reg_freq_lut; 30 30 u32 reg_volt_lut; 31 + u32 reg_current_vote; 31 32 u32 reg_perf_state; 32 33 u8 lut_row_size; 33 34 }; ··· 39 34 void __iomem *base; 40 35 struct resource *res; 41 36 const struct qcom_cpufreq_soc_data *soc_data; 37 + 38 + /* 39 + * Mutex to synchronize between de-init sequence and re-starting LMh 40 + * polling/interrupts 41 + */ 42 + struct mutex throttle_lock; 43 + int throttle_irq; 44 + bool cancel_throttle; 45 + struct delayed_work throttle_work; 46 + struct cpufreq_policy *policy; 42 47 }; 43 48 44 49 static unsigned long cpu_hw_rate, xo_rate; ··· 266 251 } 267 252 } 268 253 254 + static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) 255 + { 256 + unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote); 257 + 258 + return (val & 0x3FF) * 19200; 259 + } 260 + 261 + static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) 262 + { 263 + unsigned long max_capacity, capacity, freq_hz, throttled_freq; 264 + struct cpufreq_policy *policy = data->policy; 265 + int cpu = cpumask_first(policy->cpus); 266 + struct device *dev = get_cpu_device(cpu); 267 + struct dev_pm_opp *opp; 268 + unsigned int freq; 269 + 270 + /* 271 + * Get the h/w throttled frequency, normalize it using the 272 + * registered opp table and use it to calculate thermal pressure. 273 + */ 274 + freq = qcom_lmh_get_throttle_freq(data); 275 + freq_hz = freq * HZ_PER_KHZ; 276 + 277 + opp = dev_pm_opp_find_freq_floor(dev, &freq_hz); 278 + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) 279 + dev_pm_opp_find_freq_ceil(dev, &freq_hz); 280 + 281 + throttled_freq = freq_hz / HZ_PER_KHZ; 282 + 283 + /* Update thermal pressure */ 284 + 285 + max_capacity = arch_scale_cpu_capacity(cpu); 286 + capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq); 287 + 288 + /* Don't pass boost capacity to scheduler */ 289 + if (capacity > max_capacity) 290 + capacity = max_capacity; 291 + 292 + arch_set_thermal_pressure(policy->cpus, max_capacity - capacity); 293 + 294 + /* 295 + * In the unlikely case policy is unregistered do not enable 296 + * polling or h/w interrupt 297 + */ 298 + mutex_lock(&data->throttle_lock); 299 + if (data->cancel_throttle) 300 + goto out; 301 + 302 + /* 303 + * If h/w throttled frequency is higher than what cpufreq has requested 304 + * for, then stop polling and switch back to interrupt mechanism. 305 + */ 306 + if (throttled_freq >= qcom_cpufreq_hw_get(cpu)) 307 + enable_irq(data->throttle_irq); 308 + else 309 + mod_delayed_work(system_highpri_wq, &data->throttle_work, 310 + msecs_to_jiffies(10)); 311 + 312 + out: 313 + mutex_unlock(&data->throttle_lock); 314 + } 315 + 316 + static void qcom_lmh_dcvs_poll(struct work_struct *work) 317 + { 318 + struct qcom_cpufreq_data *data; 319 + 320 + data = container_of(work, struct qcom_cpufreq_data, throttle_work.work); 321 + qcom_lmh_dcvs_notify(data); 322 + } 323 + 324 + static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) 325 + { 326 + struct qcom_cpufreq_data *c_data = data; 327 + 328 + /* Disable interrupt and enable polling */ 329 + disable_irq_nosync(c_data->throttle_irq); 330 + qcom_lmh_dcvs_notify(c_data); 331 + 332 + return 0; 333 + } 334 + 269 335 static const struct qcom_cpufreq_soc_data qcom_soc_data = { 270 336 .reg_enable = 0x0, 271 337 .reg_freq_lut = 0x110, 272 338 .reg_volt_lut = 0x114, 339 + .reg_current_vote = 0x704, 273 340 .reg_perf_state = 0x920, 274 341 .lut_row_size = 32, 275 342 }; ··· 370 273 {} 371 274 }; 372 275 MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match); 276 + 277 + static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) 278 + { 279 + struct qcom_cpufreq_data *data = policy->driver_data; 280 + struct platform_device *pdev = cpufreq_get_driver_data(); 281 + char irq_name[15]; 282 + int ret; 283 + 284 + /* 285 + * Look for LMh interrupt. If no interrupt line is specified / 286 + * if there is an error, allow cpufreq to be enabled as usual. 287 + */ 288 + data->throttle_irq = platform_get_irq(pdev, index); 289 + if (data->throttle_irq <= 0) 290 + return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0; 291 + 292 + data->cancel_throttle = false; 293 + data->policy = policy; 294 + 295 + mutex_init(&data->throttle_lock); 296 + INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll); 297 + 298 + snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu); 299 + ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq, 300 + IRQF_ONESHOT, irq_name, data); 301 + if (ret) { 302 + dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret); 303 + return 0; 304 + } 305 + 306 + return 0; 307 + } 308 + 309 + static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) 310 + { 311 + if (data->throttle_irq <= 0) 312 + return; 313 + 314 + mutex_lock(&data->throttle_lock); 315 + data->cancel_throttle = true; 316 + mutex_unlock(&data->throttle_lock); 317 + 318 + cancel_delayed_work_sync(&data->throttle_work); 319 + free_irq(data->throttle_irq, data); 320 + } 373 321 374 322 static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) 375 323 { ··· 490 348 } 491 349 492 350 policy->driver_data = data; 351 + policy->dvfs_possible_from_any_cpu = true; 493 352 494 353 ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy); 495 354 if (ret) { ··· 505 362 goto error; 506 363 } 507 364 508 - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); 509 - 510 365 if (policy_has_boost_freq(policy)) { 511 366 ret = cpufreq_enable_boost_support(); 512 367 if (ret) 513 368 dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); 514 369 } 370 + 371 + ret = qcom_cpufreq_hw_lmh_init(policy, index); 372 + if (ret) 373 + goto error; 515 374 516 375 return 0; 517 376 error: ··· 534 389 535 390 dev_pm_opp_remove_all_dynamic(cpu_dev); 536 391 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); 392 + qcom_cpufreq_hw_lmh_exit(data); 537 393 kfree(policy->freq_table); 538 394 kfree(data); 539 395 iounmap(base); ··· 558 412 .get = qcom_cpufreq_hw_get, 559 413 .init = qcom_cpufreq_hw_cpu_init, 560 414 .exit = qcom_cpufreq_hw_cpu_exit, 415 + .register_em = cpufreq_register_em_with_opp, 561 416 .fast_switch = qcom_cpufreq_hw_fast_switch, 562 417 .name = "qcom-cpufreq-hw", 563 418 .attr = qcom_cpufreq_hw_attr,
+42 -23
drivers/cpufreq/scmi-cpufreq.c
··· 22 22 23 23 struct scmi_data { 24 24 int domain_id; 25 + int nr_opp; 25 26 struct device *cpu_dev; 27 + cpumask_var_t opp_shared_cpus; 26 28 }; 27 29 28 30 static struct scmi_protocol_handle *ph; ··· 125 123 struct device *cpu_dev; 126 124 struct scmi_data *priv; 127 125 struct cpufreq_frequency_table *freq_table; 128 - struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); 129 - cpumask_var_t opp_shared_cpus; 130 - bool power_scale_mw; 131 126 132 127 cpu_dev = get_cpu_device(policy->cpu); 133 128 if (!cpu_dev) { ··· 132 133 return -ENODEV; 133 134 } 134 135 135 - if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) 136 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 137 + if (!priv) 136 138 return -ENOMEM; 139 + 140 + if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) { 141 + ret = -ENOMEM; 142 + goto out_free_priv; 143 + } 137 144 138 145 /* Obtain CPUs that share SCMI performance controls */ 139 146 ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); ··· 153 148 * The OPP 'sharing cpus' info may come from DT through an empty opp 154 149 * table and opp-shared. 155 150 */ 156 - ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus); 157 - if (ret || !cpumask_weight(opp_shared_cpus)) { 151 + ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus); 152 + if (ret || !cpumask_weight(priv->opp_shared_cpus)) { 158 153 /* 159 154 * Either opp-table is not set or no opp-shared was found. 160 155 * Use the CPU mask from SCMI to designate CPUs sharing an OPP 161 156 * table. 162 157 */ 163 - cpumask_copy(opp_shared_cpus, policy->cpus); 158 + cpumask_copy(priv->opp_shared_cpus, policy->cpus); 164 159 } 165 160 166 161 /* ··· 185 180 goto out_free_opp; 186 181 } 187 182 188 - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus); 183 + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus); 189 184 if (ret) { 190 185 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", 191 186 __func__, ret); ··· 193 188 goto out_free_opp; 194 189 } 195 190 196 - power_scale_mw = perf_ops->power_scale_mw_get(ph); 197 - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, 198 - opp_shared_cpus, power_scale_mw); 199 - } 200 - 201 - priv = kzalloc(sizeof(*priv), GFP_KERNEL); 202 - if (!priv) { 203 - ret = -ENOMEM; 204 - goto out_free_opp; 191 + priv->nr_opp = nr_opp; 205 192 } 206 193 207 194 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 208 195 if (ret) { 209 196 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); 210 - goto out_free_priv; 197 + goto out_free_opp; 211 198 } 212 199 213 200 priv->cpu_dev = cpu_dev; ··· 220 223 policy->fast_switch_possible = 221 224 perf_ops->fast_switch_possible(ph, cpu_dev); 222 225 223 - free_cpumask_var(opp_shared_cpus); 224 226 return 0; 225 - 226 - out_free_priv: 227 - kfree(priv); 228 227 229 228 out_free_opp: 230 229 dev_pm_opp_remove_all_dynamic(cpu_dev); 231 230 232 231 out_free_cpumask: 233 - free_cpumask_var(opp_shared_cpus); 232 + free_cpumask_var(priv->opp_shared_cpus); 233 + 234 + out_free_priv: 235 + kfree(priv); 234 236 235 237 return ret; 236 238 } ··· 240 244 241 245 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 242 246 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 247 + free_cpumask_var(priv->opp_shared_cpus); 243 248 kfree(priv); 244 249 245 250 return 0; 251 + } 252 + 253 + static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) 254 + { 255 + struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); 256 + bool power_scale_mw = perf_ops->power_scale_mw_get(ph); 257 + struct scmi_data *priv = policy->driver_data; 258 + 259 + /* 260 + * This callback will be called for each policy, but we don't need to 261 + * register with EM every time. Despite not being part of the same 262 + * policy, some CPUs may still share their perf-domains, and a CPU from 263 + * another policy may already have registered with EM on behalf of CPUs 264 + * of this policy. 265 + */ 266 + if (!priv->nr_opp) 267 + return; 268 + 269 + em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp, 270 + &em_cb, priv->opp_shared_cpus, 271 + power_scale_mw); 246 272 } 247 273 248 274 static struct cpufreq_driver scmi_cpufreq_driver = { ··· 279 261 .get = scmi_cpufreq_get_rate, 280 262 .init = scmi_cpufreq_init, 281 263 .exit = scmi_cpufreq_exit, 264 + .register_em = scmi_cpufreq_register_em, 282 265 }; 283 266 284 267 static int scmi_cpufreq_probe(struct scmi_device *sdev)
+1 -2
drivers/cpufreq/scpi-cpufreq.c
··· 163 163 164 164 policy->fast_switch_possible = false; 165 165 166 - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); 167 - 168 166 return 0; 169 167 170 168 out_free_cpufreq_table: ··· 198 200 .init = scpi_cpufreq_init, 199 201 .exit = scpi_cpufreq_exit, 200 202 .target_index = scpi_cpufreq_set_target, 203 + .register_em = cpufreq_register_em_with_opp, 201 204 }; 202 205 203 206 static int scpi_cpufreq_probe(struct platform_device *pdev)
+4 -21
drivers/cpufreq/vexpress-spc-cpufreq.c
··· 15 15 #include <linux/cpu.h> 16 16 #include <linux/cpufreq.h> 17 17 #include <linux/cpumask.h> 18 - #include <linux/cpu_cooling.h> 19 18 #include <linux/device.h> 20 19 #include <linux/module.h> 21 20 #include <linux/mutex.h> ··· 46 47 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) 47 48 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) 48 49 49 - static struct thermal_cooling_device *cdev[MAX_CLUSTERS]; 50 50 static struct clk *clk[MAX_CLUSTERS]; 51 51 static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; 52 52 static atomic_t cluster_usage[MAX_CLUSTERS + 1]; ··· 440 442 policy->freq_table = freq_table[cur_cluster]; 441 443 policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ 442 444 443 - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); 444 - 445 445 if (is_bL_switching_enabled()) 446 446 per_cpu(cpu_last_req_freq, policy->cpu) = 447 447 clk_get_cpu_rate(policy->cpu); ··· 453 457 struct device *cpu_dev; 454 458 int cur_cluster = cpu_to_cluster(policy->cpu); 455 459 456 - if (cur_cluster < MAX_CLUSTERS) { 457 - cpufreq_cooling_unregister(cdev[cur_cluster]); 458 - cdev[cur_cluster] = NULL; 459 - } 460 - 461 460 cpu_dev = get_cpu_device(policy->cpu); 462 461 if (!cpu_dev) { 463 462 pr_err("%s: failed to get cpu%d device\n", __func__, ··· 464 473 return 0; 465 474 } 466 475 467 - static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy) 468 - { 469 - int cur_cluster = cpu_to_cluster(policy->cpu); 470 - 471 - /* Do not register a cpu_cooling device if we are in IKS mode */ 472 - if (cur_cluster >= MAX_CLUSTERS) 473 - return; 474 - 475 - cdev[cur_cluster] = of_cpufreq_cooling_register(policy); 476 - } 477 - 478 476 static struct cpufreq_driver ve_spc_cpufreq_driver = { 479 477 .name = "vexpress-spc", 480 478 .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | ··· 473 493 .get = ve_spc_cpufreq_get_rate, 474 494 .init = ve_spc_cpufreq_init, 475 495 .exit = ve_spc_cpufreq_exit, 476 - .ready = ve_spc_cpufreq_ready, 496 + .register_em = cpufreq_register_em_with_opp, 477 497 .attr = cpufreq_generic_attr, 478 498 }; 479 499 ··· 532 552 533 553 for (i = 0; i < MAX_CLUSTERS; i++) 534 554 mutex_init(&cluster_lock[i]); 555 + 556 + if (!is_bL_switching_enabled()) 557 + ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV; 535 558 536 559 ret = cpufreq_register_driver(&ve_spc_cpufreq_driver); 537 560 if (ret) {
+14
include/linux/cpufreq.h
··· 9 9 #define _LINUX_CPUFREQ_H 10 10 11 11 #include <linux/clk.h> 12 + #include <linux/cpu.h> 12 13 #include <linux/cpumask.h> 13 14 #include <linux/completion.h> 14 15 #include <linux/kobject.h> 15 16 #include <linux/notifier.h> 17 + #include <linux/pm_opp.h> 16 18 #include <linux/pm_qos.h> 17 19 #include <linux/spinlock.h> 18 20 #include <linux/sysfs.h> ··· 375 373 /* platform specific boost support code */ 376 374 bool boost_enabled; 377 375 int (*set_boost)(struct cpufreq_policy *policy, int state); 376 + 377 + /* 378 + * Set by drivers that want to register with the energy model after the 379 + * policy is properly initialized, but before the governor is started. 380 + */ 381 + void (*register_em)(struct cpufreq_policy *policy); 378 382 }; 379 383 380 384 /* flags */ ··· 1054 1046 void cpufreq_generic_init(struct cpufreq_policy *policy, 1055 1047 struct cpufreq_frequency_table *table, 1056 1048 unsigned int transition_latency); 1049 + 1050 + static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) 1051 + { 1052 + dev_pm_opp_of_register_em(get_cpu_device(policy->cpu), 1053 + policy->related_cpus); 1054 + } 1057 1055 #endif /* _LINUX_CPUFREQ_H */