Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: Use per-policy frequency QoS

Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.

Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).

In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.

The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.

After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.

Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>

+114 -114
+4 -5
drivers/acpi/processor_driver.c
··· 290 290 unsigned long event, void *data) 291 291 { 292 292 struct cpufreq_policy *policy = data; 293 - int cpu = policy->cpu; 294 293 295 294 if (event == CPUFREQ_CREATE_POLICY) { 296 - acpi_thermal_cpufreq_init(cpu); 297 - acpi_processor_ppc_init(cpu); 295 + acpi_thermal_cpufreq_init(policy); 296 + acpi_processor_ppc_init(policy); 298 297 } else if (event == CPUFREQ_REMOVE_POLICY) { 299 - acpi_processor_ppc_exit(cpu); 300 - acpi_thermal_cpufreq_exit(cpu); 298 + acpi_processor_ppc_exit(policy); 299 + acpi_thermal_cpufreq_exit(policy); 301 300 } 302 301 303 302 return 0;
+9 -9
drivers/acpi/processor_perflib.c
··· 81 81 pr->performance_platform_limit = (int)ppc; 82 82 83 83 if (ppc >= pr->performance->state_count || 84 - unlikely(!dev_pm_qos_request_active(&pr->perflib_req))) 84 + unlikely(!freq_qos_request_active(&pr->perflib_req))) 85 85 return 0; 86 86 87 - ret = dev_pm_qos_update_request(&pr->perflib_req, 87 + ret = freq_qos_update_request(&pr->perflib_req, 88 88 pr->performance->states[ppc].core_frequency * 1000); 89 89 if (ret < 0) { 90 90 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", ··· 157 157 ignore_ppc = 0; 158 158 } 159 159 160 - void acpi_processor_ppc_init(int cpu) 160 + void acpi_processor_ppc_init(struct cpufreq_policy *policy) 161 161 { 162 + int cpu = policy->cpu; 162 163 struct acpi_processor *pr = per_cpu(processors, cpu); 163 164 int ret; 164 165 165 166 if (!pr) 166 167 return; 167 168 168 - ret = dev_pm_qos_add_request(get_cpu_device(cpu), 169 - &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY, 170 - INT_MAX); 169 + ret = freq_qos_add_request(&policy->constraints, &pr->perflib_req, 170 + FREQ_QOS_MAX, INT_MAX); 171 171 if (ret < 0) 172 172 pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, 173 173 ret); 174 174 } 175 175 176 - void acpi_processor_ppc_exit(int cpu) 176 + void acpi_processor_ppc_exit(struct cpufreq_policy *policy) 177 177 { 178 - struct acpi_processor *pr = per_cpu(processors, cpu); 178 + struct acpi_processor *pr = per_cpu(processors, policy->cpu); 179 179 180 180 if (pr) 181 - dev_pm_qos_remove_request(&pr->perflib_req); 181 + freq_qos_remove_request(&pr->perflib_req); 182 182 } 183 183 184 184 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
+9 -9
drivers/acpi/processor_thermal.c
··· 105 105 106 106 pr = per_cpu(processors, i); 107 107 108 - if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req))) 108 + if (unlikely(!freq_qos_request_active(&pr->thermal_req))) 109 109 continue; 110 110 111 111 policy = cpufreq_cpu_get(i); ··· 116 116 117 117 cpufreq_cpu_put(policy); 118 118 119 - ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq); 119 + ret = freq_qos_update_request(&pr->thermal_req, max_freq); 120 120 if (ret < 0) { 121 121 pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n", 122 122 pr->id, ret); ··· 125 125 return 0; 126 126 } 127 127 128 - void acpi_thermal_cpufreq_init(int cpu) 128 + void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) 129 129 { 130 + int cpu = policy->cpu; 130 131 struct acpi_processor *pr = per_cpu(processors, cpu); 131 132 int ret; 132 133 133 134 if (!pr) 134 135 return; 135 136 136 - ret = dev_pm_qos_add_request(get_cpu_device(cpu), 137 - &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY, 138 - INT_MAX); 137 + ret = freq_qos_add_request(&policy->constraints, &pr->thermal_req, 138 + FREQ_QOS_MAX, INT_MAX); 139 139 if (ret < 0) 140 140 pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, 141 141 ret); 142 142 } 143 143 144 - void acpi_thermal_cpufreq_exit(int cpu) 144 + void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) 145 145 { 146 - struct acpi_processor *pr = per_cpu(processors, cpu); 146 + struct acpi_processor *pr = per_cpu(processors, policy->cpu); 147 147 148 148 if (pr) 149 - dev_pm_qos_remove_request(&pr->thermal_req); 149 + freq_qos_remove_request(&pr->thermal_req); 150 150 } 151 151 #else /* ! CONFIG_CPU_FREQ */ 152 152 static int cpufreq_get_max_state(unsigned int cpu)
+26 -33
drivers/cpufreq/cpufreq.c
··· 720 720 if (ret != 1) \ 721 721 return -EINVAL; \ 722 722 \ 723 - ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\ 723 + ret = freq_qos_update_request(policy->object##_freq_req, val);\ 724 724 return ret >= 0 ? count : ret; \ 725 725 } 726 726 ··· 1202 1202 goto err_free_real_cpus; 1203 1203 } 1204 1204 1205 + freq_constraints_init(&policy->constraints); 1206 + 1205 1207 policy->nb_min.notifier_call = cpufreq_notifier_min; 1206 1208 policy->nb_max.notifier_call = cpufreq_notifier_max; 1207 1209 1208 - ret = dev_pm_qos_add_notifier(dev, &policy->nb_min, 1209 - DEV_PM_QOS_MIN_FREQUENCY); 1210 + ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN, 1211 + &policy->nb_min); 1210 1212 if (ret) { 1211 1213 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n", 1212 1214 ret, cpumask_pr_args(policy->cpus)); 1213 1215 goto err_kobj_remove; 1214 1216 } 1215 1217 1216 - ret = dev_pm_qos_add_notifier(dev, &policy->nb_max, 1217 - DEV_PM_QOS_MAX_FREQUENCY); 1218 + ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX, 1219 + &policy->nb_max); 1218 1220 if (ret) { 1219 1221 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n", 1220 1222 ret, cpumask_pr_args(policy->cpus)); ··· 1234 1232 return policy; 1235 1233 1236 1234 err_min_qos_notifier: 1237 - dev_pm_qos_remove_notifier(dev, &policy->nb_min, 1238 - DEV_PM_QOS_MIN_FREQUENCY); 1235 + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, 1236 + &policy->nb_min); 1239 1237 err_kobj_remove: 1240 1238 cpufreq_policy_put_kobj(policy); 1241 1239 err_free_real_cpus: ··· 1252 1250 1253 1251 static void cpufreq_policy_free(struct cpufreq_policy *policy) 1254 1252 { 1255 - struct device *dev = get_cpu_device(policy->cpu); 1256 1253 unsigned long flags; 1257 1254 int cpu; 1258 1255 ··· 1263 1262 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1264 1263 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1265 1264 1266 - dev_pm_qos_remove_notifier(dev, &policy->nb_max, 1267 - DEV_PM_QOS_MAX_FREQUENCY); 1268 - dev_pm_qos_remove_notifier(dev, &policy->nb_min, 1269 - DEV_PM_QOS_MIN_FREQUENCY); 1265 + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX, 1266 + &policy->nb_max); 1267 + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, 1268 + &policy->nb_min); 1270 1269 1271 1270 if (policy->max_freq_req) { 1272 1271 /* ··· 1275 1274 */ 1276 1275 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1277 1276 CPUFREQ_REMOVE_POLICY, policy); 1278 - dev_pm_qos_remove_request(policy->max_freq_req); 1277 + freq_qos_remove_request(policy->max_freq_req); 1279 1278 } 1280 1279 1281 - dev_pm_qos_remove_request(policy->min_freq_req); 1280 + freq_qos_remove_request(policy->min_freq_req); 1282 1281 kfree(policy->min_freq_req); 1283 1282 1284 1283 cpufreq_policy_put_kobj(policy); ··· 1358 1357 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1359 1358 1360 1359 if (new_policy) { 1361 - struct device *dev = get_cpu_device(cpu); 1362 - 1363 1360 for_each_cpu(j, policy->related_cpus) { 1364 1361 per_cpu(cpufreq_cpu_data, j) = policy; 1365 1362 add_cpu_dev_symlink(policy, j); ··· 1368 1369 if (!policy->min_freq_req) 1369 1370 goto out_destroy_policy; 1370 1371 1371 - ret = dev_pm_qos_add_request(dev, policy->min_freq_req, 1372 - DEV_PM_QOS_MIN_FREQUENCY, 1373 - policy->min); 1372 + ret = freq_qos_add_request(&policy->constraints, 1373 + policy->min_freq_req, FREQ_QOS_MIN, 1374 + policy->min); 1374 1375 if (ret < 0) { 1375 1376 /* 1376 - * So we don't call dev_pm_qos_remove_request() for an 1377 + * So we don't call freq_qos_remove_request() for an 1377 1378 * uninitialized request. 1378 1379 */ 1379 1380 kfree(policy->min_freq_req); 1380 1381 policy->min_freq_req = NULL; 1381 - 1382 - dev_err(dev, "Failed to add min-freq constraint (%d)\n", 1383 - ret); 1384 1382 goto out_destroy_policy; 1385 1383 } 1386 1384 1387 1385 /* 1388 1386 * This must be initialized right here to avoid calling 1389 - * dev_pm_qos_remove_request() on uninitialized request in case 1387 + * freq_qos_remove_request() on uninitialized request in case 1390 1388 * of errors. 1391 1389 */ 1392 1390 policy->max_freq_req = policy->min_freq_req + 1; 1393 1391 1394 - ret = dev_pm_qos_add_request(dev, policy->max_freq_req, 1395 - DEV_PM_QOS_MAX_FREQUENCY, 1396 - policy->max); 1392 + ret = freq_qos_add_request(&policy->constraints, 1393 + policy->max_freq_req, FREQ_QOS_MAX, 1394 + policy->max); 1397 1395 if (ret < 0) { 1398 1396 policy->max_freq_req = NULL; 1399 - dev_err(dev, "Failed to add max-freq constraint (%d)\n", 1400 - ret); 1401 1397 goto out_destroy_policy; 1402 1398 } 1403 1399 ··· 2368 2374 struct cpufreq_policy *new_policy) 2369 2375 { 2370 2376 struct cpufreq_governor *old_gov; 2371 - struct device *cpu_dev = get_cpu_device(policy->cpu); 2372 2377 int ret; 2373 2378 2374 2379 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", ··· 2379 2386 * PM QoS framework collects all the requests from users and provide us 2380 2387 * the final aggregated value here. 2381 2388 */ 2382 - new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY); 2383 - new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY); 2389 + new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); 2390 + new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); 2384 2391 2385 2392 /* verify the cpu speed can be set within this limit */ 2386 2393 ret = cpufreq_driver->verify(new_policy); ··· 2511 2518 break; 2512 2519 } 2513 2520 2514 - ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); 2521 + ret = freq_qos_update_request(policy->max_freq_req, policy->max); 2515 2522 if (ret < 0) 2516 2523 break; 2517 2524 }
+15 -15
drivers/cpufreq/intel_pstate.c
··· 1088 1088 1089 1089 static struct cpufreq_driver intel_pstate; 1090 1090 1091 - static void update_qos_request(enum dev_pm_qos_req_type type) 1091 + static void update_qos_request(enum freq_qos_req_type type) 1092 1092 { 1093 1093 int max_state, turbo_max, freq, i, perf_pct; 1094 - struct dev_pm_qos_request *req; 1094 + struct freq_qos_request *req; 1095 1095 struct cpufreq_policy *policy; 1096 1096 1097 1097 for_each_possible_cpu(i) { ··· 1112 1112 else 1113 1113 turbo_max = cpu->pstate.turbo_pstate; 1114 1114 1115 - if (type == DEV_PM_QOS_MIN_FREQUENCY) { 1115 + if (type == FREQ_QOS_MIN) { 1116 1116 perf_pct = global.min_perf_pct; 1117 1117 } else { 1118 1118 req++; ··· 1122 1122 freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); 1123 1123 freq *= cpu->pstate.scaling; 1124 1124 1125 - if (dev_pm_qos_update_request(req, freq) < 0) 1125 + if (freq_qos_update_request(req, freq) < 0) 1126 1126 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1127 1127 } 1128 1128 } ··· 1153 1153 if (intel_pstate_driver == &intel_pstate) 1154 1154 intel_pstate_update_policies(); 1155 1155 else 1156 - update_qos_request(DEV_PM_QOS_MAX_FREQUENCY); 1156 + update_qos_request(FREQ_QOS_MAX); 1157 1157 1158 1158 mutex_unlock(&intel_pstate_driver_lock); 1159 1159 ··· 1187 1187 if (intel_pstate_driver == &intel_pstate) 1188 1188 intel_pstate_update_policies(); 1189 1189 else 1190 - update_qos_request(DEV_PM_QOS_MIN_FREQUENCY); 1190 + update_qos_request(FREQ_QOS_MIN); 1191 1191 1192 1192 mutex_unlock(&intel_pstate_driver_lock); 1193 1193 ··· 2381 2381 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2382 2382 { 2383 2383 int max_state, turbo_max, min_freq, max_freq, ret; 2384 - struct dev_pm_qos_request *req; 2384 + struct freq_qos_request *req; 2385 2385 struct cpudata *cpu; 2386 2386 struct device *dev; 2387 2387 ··· 2416 2416 max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2417 2417 max_freq *= cpu->pstate.scaling; 2418 2418 2419 - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY, 2420 - min_freq); 2419 + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 2420 + min_freq); 2421 2421 if (ret < 0) { 2422 2422 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 2423 2423 goto free_req; 2424 2424 } 2425 2425 2426 - ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY, 2427 - max_freq); 2426 + ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 2427 + max_freq); 2428 2428 if (ret < 0) { 2429 2429 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 2430 2430 goto remove_min_req; ··· 2435 2435 return 0; 2436 2436 2437 2437 remove_min_req: 2438 - dev_pm_qos_remove_request(req); 2438 + freq_qos_remove_request(req); 2439 2439 free_req: 2440 2440 kfree(req); 2441 2441 pstate_exit: ··· 2446 2446 2447 2447 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 2448 2448 { 2449 - struct dev_pm_qos_request *req; 2449 + struct freq_qos_request *req; 2450 2450 2451 2451 req = policy->driver_data; 2452 2452 2453 - dev_pm_qos_remove_request(req + 1); 2454 - dev_pm_qos_remove_request(req); 2453 + freq_qos_remove_request(req + 1); 2454 + freq_qos_remove_request(req); 2455 2455 kfree(req); 2456 2456 2457 2457 return intel_pstate_cpu_exit(policy);
+7 -8
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
··· 65 65 static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) 66 66 { 67 67 struct cpufreq_policy *policy; 68 - struct dev_pm_qos_request *req; 68 + struct freq_qos_request *req; 69 69 u8 node, slow_mode; 70 70 int cpu, ret; 71 71 ··· 86 86 87 87 req = policy->driver_data; 88 88 89 - ret = dev_pm_qos_update_request(req, 89 + ret = freq_qos_update_request(req, 90 90 policy->freq_table[slow_mode].frequency); 91 91 if (ret < 0) 92 92 pr_warn("Failed to update freq constraint: %d\n", ret); ··· 103 103 104 104 void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) 105 105 { 106 - struct dev_pm_qos_request *req; 106 + struct freq_qos_request *req; 107 107 int ret; 108 108 109 109 if (!cbe_cpufreq_has_pmi) ··· 113 113 if (!req) 114 114 return; 115 115 116 - ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req, 117 - DEV_PM_QOS_MAX_FREQUENCY, 118 - policy->freq_table[0].frequency); 116 + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX, 117 + policy->freq_table[0].frequency); 119 118 if (ret < 0) { 120 119 pr_err("Failed to add freq constraint (%d)\n", ret); 121 120 kfree(req); ··· 127 128 128 129 void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) 129 130 { 130 - struct dev_pm_qos_request *req = policy->driver_data; 131 + struct freq_qos_request *req = policy->driver_data; 131 132 132 133 if (cbe_cpufreq_has_pmi) { 133 - dev_pm_qos_remove_request(req); 134 + freq_qos_remove_request(req); 134 135 kfree(req); 135 136 } 136 137 }
+22 -16
drivers/macintosh/windfarm_cpufreq_clamp.c
··· 18 18 19 19 static int clamped; 20 20 static struct wf_control *clamp_control; 21 - static struct dev_pm_qos_request qos_req; 21 + static struct freq_qos_request qos_req; 22 22 static unsigned int min_freq, max_freq; 23 23 24 24 static int clamp_set(struct wf_control *ct, s32 value) ··· 35 35 } 36 36 clamped = value; 37 37 38 - return dev_pm_qos_update_request(&qos_req, freq); 38 + return freq_qos_update_request(&qos_req, freq); 39 39 } 40 40 41 41 static int clamp_get(struct wf_control *ct, s32 *value) ··· 77 77 78 78 min_freq = policy->cpuinfo.min_freq; 79 79 max_freq = policy->cpuinfo.max_freq; 80 + 81 + ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX, 82 + max_freq); 83 + 80 84 cpufreq_cpu_put(policy); 85 + 86 + if (ret < 0) { 87 + pr_err("%s: Failed to add freq constraint (%d)\n", __func__, 88 + ret); 89 + return ret; 90 + } 81 91 82 92 dev = get_cpu_device(0); 83 93 if (unlikely(!dev)) { 84 94 pr_warn("%s: No cpu device for cpu0\n", __func__); 85 - return -ENODEV; 95 + ret = -ENODEV; 96 + goto fail; 86 97 } 87 98 88 99 clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); 89 - if (clamp == NULL) 90 - return -ENOMEM; 91 - 92 - ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY, 93 - max_freq); 94 - if (ret < 0) { 95 - pr_err("%s: Failed to add freq constraint (%d)\n", __func__, 96 - ret); 97 - goto free; 100 + if (clamp == NULL) { 101 + ret = -ENOMEM; 102 + goto fail; 98 103 } 99 104 100 105 clamp->ops = &clamp_ops; 101 106 clamp->name = "cpufreq-clamp"; 102 107 ret = wf_register_control(clamp); 103 108 if (ret) 104 - goto fail; 109 + goto free; 110 + 105 111 clamp_control = clamp; 106 112 return 0; 107 - fail: 108 - dev_pm_qos_remove_request(&qos_req); 109 113 110 114 free: 111 115 kfree(clamp); 116 + fail: 117 + freq_qos_remove_request(&qos_req); 112 118 return ret; 113 119 } 114 120 ··· 122 116 { 123 117 if (clamp_control) { 124 118 wf_unregister_control(clamp_control); 125 - dev_pm_qos_remove_request(&qos_req); 119 + freq_qos_remove_request(&qos_req); 126 120 } 127 121 } 128 122
+7 -7
drivers/thermal/cpu_cooling.c
··· 88 88 struct cpufreq_policy *policy; 89 89 struct list_head node; 90 90 struct time_in_idle *idle_time; 91 - struct dev_pm_qos_request qos_req; 91 + struct freq_qos_request qos_req; 92 92 }; 93 93 94 94 static DEFINE_IDA(cpufreq_ida); ··· 331 331 332 332 cpufreq_cdev->cpufreq_state = state; 333 333 334 - return dev_pm_qos_update_request(&cpufreq_cdev->qos_req, 334 + return freq_qos_update_request(&cpufreq_cdev->qos_req, 335 335 cpufreq_cdev->freq_table[state].frequency); 336 336 } 337 337 ··· 615 615 cooling_ops = &cpufreq_cooling_ops; 616 616 } 617 617 618 - ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req, 619 - DEV_PM_QOS_MAX_FREQUENCY, 620 - cpufreq_cdev->freq_table[0].frequency); 618 + ret = freq_qos_add_request(&policy->constraints, 619 + &cpufreq_cdev->qos_req, FREQ_QOS_MAX, 620 + cpufreq_cdev->freq_table[0].frequency); 621 621 if (ret < 0) { 622 622 pr_err("%s: Failed to add freq constraint (%d)\n", __func__, 623 623 ret); ··· 637 637 return cdev; 638 638 639 639 remove_qos_req: 640 - dev_pm_qos_remove_request(&cpufreq_cdev->qos_req); 640 + freq_qos_remove_request(&cpufreq_cdev->qos_req); 641 641 remove_ida: 642 642 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); 643 643 free_table: ··· 736 736 mutex_unlock(&cooling_list_lock); 737 737 738 738 thermal_cooling_device_unregister(cdev); 739 - dev_pm_qos_remove_request(&cpufreq_cdev->qos_req); 739 + freq_qos_remove_request(&cpufreq_cdev->qos_req); 740 740 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); 741 741 kfree(cpufreq_cdev->idle_time); 742 742 kfree(cpufreq_cdev->freq_table);
+10 -10
include/acpi/processor.h
··· 232 232 struct acpi_processor_limit limit; 233 233 struct thermal_cooling_device *cdev; 234 234 struct device *dev; /* Processor device. */ 235 - struct dev_pm_qos_request perflib_req; 236 - struct dev_pm_qos_request thermal_req; 235 + struct freq_qos_request perflib_req; 236 + struct freq_qos_request thermal_req; 237 237 }; 238 238 239 239 struct acpi_processor_errata { ··· 302 302 #ifdef CONFIG_CPU_FREQ 303 303 extern bool acpi_processor_cpufreq_init; 304 304 void acpi_processor_ignore_ppc_init(void); 305 - void acpi_processor_ppc_init(int cpu); 306 - void acpi_processor_ppc_exit(int cpu); 305 + void acpi_processor_ppc_init(struct cpufreq_policy *policy); 306 + void acpi_processor_ppc_exit(struct cpufreq_policy *policy); 307 307 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); 308 308 extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); 309 309 #else ··· 311 311 { 312 312 return; 313 313 } 314 - static inline void acpi_processor_ppc_init(int cpu) 314 + static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy) 315 315 { 316 316 return; 317 317 } 318 - static inline void acpi_processor_ppc_exit(int cpu) 318 + static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy) 319 319 { 320 320 return; 321 321 } ··· 431 431 int acpi_processor_get_limit_info(struct acpi_processor *pr); 432 432 extern const struct thermal_cooling_device_ops processor_cooling_ops; 433 433 #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) 434 - void acpi_thermal_cpufreq_init(int cpu); 435 - void acpi_thermal_cpufreq_exit(int cpu); 434 + void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); 435 + void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); 436 436 #else 437 - static inline void acpi_thermal_cpufreq_init(int cpu) 437 + static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) 438 438 { 439 439 return; 440 440 } 441 - static inline void acpi_thermal_cpufreq_exit(int cpu) 441 + static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) 442 442 { 443 443 return; 444 444 }
+5 -2
include/linux/cpufreq.h
··· 13 13 #include <linux/completion.h> 14 14 #include <linux/kobject.h> 15 15 #include <linux/notifier.h> 16 + #include <linux/pm_qos.h> 16 17 #include <linux/spinlock.h> 17 18 #include <linux/sysfs.h> 18 19 ··· 77 76 struct work_struct update; /* if update_policy() needs to be 78 77 * called, but you're in IRQ context */ 79 78 80 - struct dev_pm_qos_request *min_freq_req; 81 - struct dev_pm_qos_request *max_freq_req; 79 + struct freq_constraints constraints; 80 + struct freq_qos_request *min_freq_req; 81 + struct freq_qos_request *max_freq_req; 82 + 82 83 struct cpufreq_frequency_table *freq_table; 83 84 enum cpufreq_table_sorting freq_table_sorted; 84 85