[PATCH] Reorganize the cpufreq cpu hotplug locking to not be totally bizare

The patch below moves the cpu hotplugging higher up in the cpufreq
layering; this is needed to avoid recursive taking of the cpu hotplug
lock and to otherwise detangle the mess.

The new rules are:
1. you must do lock_cpu_hotplug() around the following functions:
__cpufreq_driver_target
__cpufreq_governor (for CPUFREQ_GOV_LIMITS operation only)
__cpufreq_set_policy
2. governer methods (.governer) must NOT take the lock_cpu_hotplug()
lock in any way; they are called with the lock taken already
3. if your governer spawns a thread that does things, like calling
__cpufreq_driver_target, your thread must honor rule #1.
4. the policy lock and other cpufreq internal locks nest within
the lock_cpu_hotplug() lock.

I'm not entirely happy about how the __cpufreq_governor rule ended up
(conditional locking rule depending on the argument) but basically all
callers pass this as a constant so it's not too horrible.

The patch also removes the cpufreq_governor() function since during the
locking audit it turned out to be entirely unused (so no need to fix it)

The patch works on my testbox, but it could use more testing
(otoh... it can't be much worse than the current code)

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Arjan van de Ven and committed by Linus Torvalds 153d7f3f 44eb1231

+23 -29
+18 -22
drivers/cpufreq/cpufreq.c
··· 364 if (ret != 1) \ 365 return -EINVAL; \ 366 \ 367 mutex_lock(&policy->lock); \ 368 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 policy->user_policy.object = policy->object; \ 370 mutex_unlock(&policy->lock); \ 371 \ 372 return ret ? ret : count; \ 373 } ··· 1199 *********************************************************************/ 1200 1201 1202 int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203 unsigned int target_freq, 1204 unsigned int relation) 1205 { 1206 int retval = -EINVAL; 1207 1208 - lock_cpu_hotplug(); 1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1210 target_freq, relation); 1211 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1212 retval = cpufreq_driver->target(policy, target_freq, relation); 1213 - 1214 - unlock_cpu_hotplug(); 1215 1216 return retval; 1217 } ··· 1225 if (!policy) 1226 return -EINVAL; 1227 1228 mutex_lock(&policy->lock); 1229 1230 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 1232 mutex_unlock(&policy->lock); 1233 1234 cpufreq_cpu_put(policy); 1235 return ret; 1236 } 1237 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1238 1239 1240 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1241 { ··· 1261 1262 return ret; 1263 } 1264 - 1265 - 1266 - int cpufreq_governor(unsigned int cpu, unsigned int event) 1267 - { 1268 - int ret = 0; 1269 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1270 - 1271 - if (!policy) 1272 - return -EINVAL; 1273 - 1274 - mutex_lock(&policy->lock); 1275 - ret = __cpufreq_governor(policy, event); 1276 - mutex_unlock(&policy->lock); 1277 - 1278 - cpufreq_cpu_put(policy); 1279 - return ret; 1280 - } 1281 - EXPORT_SYMBOL_GPL(cpufreq_governor); 1282 1283 1284 int cpufreq_register_governor(struct cpufreq_governor *governor) ··· 1330 EXPORT_SYMBOL(cpufreq_get_policy); 1331 1332 1333 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1334 { 1335 int ret = 0; ··· 1427 if (!data) 1428 return -EINVAL; 1429 1430 /* lock this CPU */ 1431 mutex_lock(&data->lock); 1432 ··· 1439 data->user_policy.governor = data->governor; 1440 1441 mutex_unlock(&data->lock); 1442 cpufreq_cpu_put(data); 1443 1444 return ret; ··· 1464 if (!data) 1465 return -ENODEV; 1466 1467 mutex_lock(&data->lock); 1468 1469 dprintk("updating policy for CPU %u\n", cpu); ··· 1490 ret = __cpufreq_set_policy(data, &policy); 1491 1492 mutex_unlock(&data->lock); 1493 - 1494 cpufreq_cpu_put(data); 1495 return ret; 1496 }
··· 364 if (ret != 1) \ 365 return -EINVAL; \ 366 \ 367 + lock_cpu_hotplug(); \ 368 mutex_lock(&policy->lock); \ 369 ret = __cpufreq_set_policy(policy, &new_policy); \ 370 policy->user_policy.object = policy->object; \ 371 mutex_unlock(&policy->lock); \ 372 + unlock_cpu_hotplug(); \ 373 \ 374 return ret ? ret : count; \ 375 } ··· 1197 *********************************************************************/ 1198 1199 1200 + /* Must be called with lock_cpu_hotplug held */ 1201 int __cpufreq_driver_target(struct cpufreq_policy *policy, 1202 unsigned int target_freq, 1203 unsigned int relation) 1204 { 1205 int retval = -EINVAL; 1206 1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1208 target_freq, relation); 1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1211 1212 return retval; 1213 } ··· 1225 if (!policy) 1226 return -EINVAL; 1227 1228 + lock_cpu_hotplug(); 1229 mutex_lock(&policy->lock); 1230 1231 ret = __cpufreq_driver_target(policy, target_freq, relation); 1232 1233 mutex_unlock(&policy->lock); 1234 + unlock_cpu_hotplug(); 1235 1236 cpufreq_cpu_put(policy); 1237 return ret; 1238 } 1239 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1240 1241 + /* 1242 + * Locking: Must be called with the lock_cpu_hotplug() lock held 1243 + * when "event" is CPUFREQ_GOV_LIMITS 1244 + */ 1245 1246 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1247 { ··· 1255 1256 return ret; 1257 } 1258 1259 1260 int cpufreq_register_governor(struct cpufreq_governor *governor) ··· 1342 EXPORT_SYMBOL(cpufreq_get_policy); 1343 1344 1345 + /* 1346 + * Locking: Must be called with the lock_cpu_hotplug() lock held 1347 + */ 1348 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1349 { 1350 int ret = 0; ··· 1436 if (!data) 1437 return -EINVAL; 1438 1439 + lock_cpu_hotplug(); 1440 + 1441 /* lock this CPU */ 1442 mutex_lock(&data->lock); 1443 ··· 1446 data->user_policy.governor = data->governor; 1447 1448 mutex_unlock(&data->lock); 1449 + 1450 + unlock_cpu_hotplug(); 1451 cpufreq_cpu_put(data); 1452 1453 return ret; ··· 1469 if (!data) 1470 return -ENODEV; 1471 1472 + lock_cpu_hotplug(); 1473 mutex_lock(&data->lock); 1474 1475 dprintk("updating policy for CPU %u\n", cpu); ··· 1494 ret = __cpufreq_set_policy(data, &policy); 1495 1496 mutex_unlock(&data->lock); 1497 + unlock_cpu_hotplug(); 1498 cpufreq_cpu_put(data); 1499 return ret; 1500 }
-2
drivers/cpufreq/cpufreq_conservative.c
··· 525 break; 526 527 case CPUFREQ_GOV_LIMITS: 528 - lock_cpu_hotplug(); 529 mutex_lock(&dbs_mutex); 530 if (policy->max < this_dbs_info->cur_policy->cur) 531 __cpufreq_driver_target( ··· 535 this_dbs_info->cur_policy, 536 policy->min, CPUFREQ_RELATION_L); 537 mutex_unlock(&dbs_mutex); 538 - unlock_cpu_hotplug(); 539 break; 540 } 541 return 0;
··· 525 break; 526 527 case CPUFREQ_GOV_LIMITS: 528 mutex_lock(&dbs_mutex); 529 if (policy->max < this_dbs_info->cur_policy->cur) 530 __cpufreq_driver_target( ··· 536 this_dbs_info->cur_policy, 537 policy->min, CPUFREQ_RELATION_L); 538 mutex_unlock(&dbs_mutex); 539 break; 540 } 541 return 0;
+2 -2
drivers/cpufreq/cpufreq_ondemand.c
··· 309 if (!dbs_info->enable) 310 return; 311 312 dbs_check_cpu(dbs_info); 313 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 314 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 315 } ··· 414 break; 415 416 case CPUFREQ_GOV_LIMITS: 417 - lock_cpu_hotplug(); 418 mutex_lock(&dbs_mutex); 419 if (policy->max < this_dbs_info->cur_policy->cur) 420 __cpufreq_driver_target(this_dbs_info->cur_policy, ··· 424 policy->min, 425 CPUFREQ_RELATION_L); 426 mutex_unlock(&dbs_mutex); 427 - unlock_cpu_hotplug(); 428 break; 429 } 430 return 0;
··· 309 if (!dbs_info->enable) 310 return; 311 312 + lock_cpu_hotplug(); 313 dbs_check_cpu(dbs_info); 314 + unlock_cpu_hotplug(); 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 317 } ··· 412 break; 413 414 case CPUFREQ_GOV_LIMITS: 415 mutex_lock(&dbs_mutex); 416 if (policy->max < this_dbs_info->cur_policy->cur) 417 __cpufreq_driver_target(this_dbs_info->cur_policy, ··· 423 policy->min, 424 CPUFREQ_RELATION_L); 425 mutex_unlock(&dbs_mutex); 426 break; 427 } 428 return 0;
+3
drivers/cpufreq/cpufreq_userspace.c
··· 18 #include <linux/spinlock.h> 19 #include <linux/interrupt.h> 20 #include <linux/cpufreq.h> 21 #include <linux/types.h> 22 #include <linux/fs.h> 23 #include <linux/sysfs.h> ··· 71 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 73 74 mutex_lock(&userspace_mutex); 75 if (!cpu_is_managed[policy->cpu]) 76 goto err; ··· 94 95 err: 96 mutex_unlock(&userspace_mutex); 97 return ret; 98 } 99
··· 18 #include <linux/spinlock.h> 19 #include <linux/interrupt.h> 20 #include <linux/cpufreq.h> 21 + #include <linux/cpu.h> 22 #include <linux/types.h> 23 #include <linux/fs.h> 24 #include <linux/sysfs.h> ··· 70 71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 73 + lock_cpu_hotplug(); 74 mutex_lock(&userspace_mutex); 75 if (!cpu_is_managed[policy->cpu]) 76 goto err; ··· 92 93 err: 94 mutex_unlock(&userspace_mutex); 95 + unlock_cpu_hotplug(); 96 return ret; 97 } 98
-3
include/linux/cpufreq.h
··· 172 unsigned int relation); 173 174 175 - /* pass an event to the cpufreq governor */ 176 - int cpufreq_governor(unsigned int cpu, unsigned int event); 177 - 178 int cpufreq_register_governor(struct cpufreq_governor *governor); 179 void cpufreq_unregister_governor(struct cpufreq_governor *governor); 180
··· 172 unsigned int relation); 173 174 175 int cpufreq_register_governor(struct cpufreq_governor *governor); 176 void cpufreq_unregister_governor(struct cpufreq_governor *governor); 177