Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: powerpc_cbe: Switch to QoS requests for freq limits

The cpufreq core now takes the min/max frequency constraints via QoS
requests and the CPUFREQ_ADJUST notifier shall get removed later on.

Switch over to using the QoS request for maximum frequency constraint
for ppc_cbe_cpufreq driver.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
[ rjw: Subject ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Viresh Kumar and committed by
Rafael J. Wysocki
afe96907 dce2e3a8

+88 -39
+18 -1
drivers/cpufreq/ppc_cbe_cpufreq.c
··· 110 110 #endif 111 111 112 112 policy->freq_table = cbe_freqs; 113 + cbe_cpufreq_pmi_policy_init(policy); 114 + return 0; 115 + } 116 + 117 + static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) 118 + { 119 + cbe_cpufreq_pmi_policy_exit(policy); 113 120 return 0; 114 121 } 115 122 ··· 136 129 .verify = cpufreq_generic_frequency_table_verify, 137 130 .target_index = cbe_cpufreq_target, 138 131 .init = cbe_cpufreq_cpu_init, 132 + .exit = cbe_cpufreq_cpu_exit, 139 133 .name = "cbe-cpufreq", 140 134 .flags = CPUFREQ_CONST_LOOPS, 141 135 }; ··· 147 139 148 140 static int __init cbe_cpufreq_init(void) 149 141 { 142 + int ret; 143 + 150 144 if (!machine_is(cell)) 151 145 return -ENODEV; 152 146 153 - return cpufreq_register_driver(&cbe_cpufreq_driver); 147 + cbe_cpufreq_pmi_init(); 148 + 149 + ret = cpufreq_register_driver(&cbe_cpufreq_driver); 150 + if (ret) 151 + cbe_cpufreq_pmi_exit(); 152 + 153 + return ret; 154 154 } 155 155 156 156 static void __exit cbe_cpufreq_exit(void) 157 157 { 158 158 cpufreq_unregister_driver(&cbe_cpufreq_driver); 159 + cbe_cpufreq_pmi_exit(); 159 160 } 160 161 161 162 module_init(cbe_cpufreq_init);
+8
drivers/cpufreq/ppc_cbe_cpufreq.h
··· 20 20 21 21 #if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI) 22 22 extern bool cbe_cpufreq_has_pmi; 23 + void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy); 24 + void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy); 25 + void cbe_cpufreq_pmi_init(void); 26 + void cbe_cpufreq_pmi_exit(void); 23 27 #else 24 28 #define cbe_cpufreq_has_pmi (0) 29 + static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {} 30 + static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {} 31 + static inline void cbe_cpufreq_pmi_init(void) {} 32 + static inline void cbe_cpufreq_pmi_exit(void) {} 25 33 #endif
+62 -38
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
··· 12 12 #include <linux/timer.h> 13 13 #include <linux/init.h> 14 14 #include <linux/of_platform.h> 15 + #include <linux/pm_qos.h> 15 16 16 17 #include <asm/processor.h> 17 18 #include <asm/prom.h> ··· 24 23 #endif 25 24 26 25 #include "ppc_cbe_cpufreq.h" 27 - 28 - static u8 pmi_slow_mode_limit[MAX_CBE]; 29 26 30 27 bool cbe_cpufreq_has_pmi = false; 31 28 EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); ··· 64 65 65 66 static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) 66 67 { 68 + struct cpufreq_policy *policy; 69 + struct dev_pm_qos_request *req; 67 70 u8 node, slow_mode; 71 + int cpu, ret; 68 72 69 73 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); 70 74 71 75 node = pmi_msg.data1; 72 76 slow_mode = pmi_msg.data2; 73 77 74 - pmi_slow_mode_limit[node] = slow_mode; 78 + cpu = cbe_node_to_cpu(node); 75 79 76 80 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); 77 - } 78 81 79 - static int pmi_notifier(struct notifier_block *nb, 80 - unsigned long event, void *data) 81 - { 82 - struct cpufreq_policy *policy = data; 83 - struct cpufreq_frequency_table *cbe_freqs = policy->freq_table; 84 - u8 node; 85 - 86 - /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY 87 - * policy events?) 88 - */ 89 - node = cbe_cpu_to_node(policy->cpu); 90 - 91 - pr_debug("got notified, event=%lu, node=%u\n", event, node); 92 - 93 - if (pmi_slow_mode_limit[node] != 0) { 94 - pr_debug("limiting node %d to slow mode %d\n", 95 - node, pmi_slow_mode_limit[node]); 96 - 97 - cpufreq_verify_within_limits(policy, 0, 98 - 99 - cbe_freqs[pmi_slow_mode_limit[node]].frequency); 82 + policy = cpufreq_cpu_get(cpu); 83 + if (!policy) { 84 + pr_warn("cpufreq policy not found cpu%d\n", cpu); 85 + return; 100 86 } 101 87 102 - return 0; 103 - } 88 + req = policy->driver_data; 104 89 105 - static struct notifier_block pmi_notifier_block = { 106 - .notifier_call = pmi_notifier, 107 - }; 90 + ret = dev_pm_qos_update_request(req, 91 + policy->freq_table[slow_mode].frequency); 92 + if (ret < 0) 93 + pr_warn("Failed to update freq constraint: %d\n", ret); 94 + else 95 + pr_debug("limiting node %d to slow mode %d\n", node, slow_mode); 96 + 97 + cpufreq_cpu_put(policy); 98 + } 108 99 109 100 static struct pmi_handler cbe_pmi_handler = { 110 101 .type = PMI_TYPE_FREQ_CHANGE, 111 102 .handle_pmi_message = cbe_cpufreq_handle_pmi, 112 103 }; 113 104 114 - 115 - 116 - static int __init cbe_cpufreq_pmi_init(void) 105 + void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) 117 106 { 118 - cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; 107 + struct dev_pm_qos_request *req; 108 + int ret; 119 109 120 110 if (!cbe_cpufreq_has_pmi) 121 - return -ENODEV; 111 + return; 122 112 123 - cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); 113 + req = kzalloc(sizeof(*req), GFP_KERNEL); 114 + if (!req) 115 + return; 124 116 125 - return 0; 117 + ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req, 118 + DEV_PM_QOS_MAX_FREQUENCY, 119 + policy->freq_table[0].frequency); 120 + if (ret < 0) { 121 + pr_err("Failed to add freq constraint (%d)\n", ret); 122 + kfree(req); 123 + return; 124 + } 125 + 126 + policy->driver_data = req; 126 127 } 127 - device_initcall(cbe_cpufreq_pmi_init); 128 + EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init); 129 + 130 + void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) 131 + { 132 + struct dev_pm_qos_request *req = policy->driver_data; 133 + 134 + if (cbe_cpufreq_has_pmi) { 135 + dev_pm_qos_remove_request(req); 136 + kfree(req); 137 + } 138 + } 139 + EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit); 140 + 141 + void cbe_cpufreq_pmi_init(void) 142 + { 143 + if (!pmi_register_handler(&cbe_pmi_handler)) 144 + cbe_cpufreq_has_pmi = true; 145 + } 146 + EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init); 147 + 148 + void cbe_cpufreq_pmi_exit(void) 149 + { 150 + pmi_unregister_handler(&cbe_pmi_handler); 151 + cbe_cpufreq_has_pmi = false; 152 + } 153 + EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);