[PATCH] Reorganize the cpufreq cpu hotplug locking to not be totally bizare

The patch below moves the cpu hotplugging higher up in the cpufreq
layering; this is needed to avoid recursive taking of the cpu hotplug
lock and to otherwise detangle the mess.

The new rules are:
1. you must do lock_cpu_hotplug() around the following functions:
__cpufreq_driver_target
__cpufreq_governor (for CPUFREQ_GOV_LIMITS operation only)
__cpufreq_set_policy
2. governer methods (.governer) must NOT take the lock_cpu_hotplug()
lock in any way; they are called with the lock taken already
3. if your governer spawns a thread that does things, like calling
__cpufreq_driver_target, your thread must honor rule #1.
4. the policy lock and other cpufreq internal locks nest within
the lock_cpu_hotplug() lock.

I'm not entirely happy about how the __cpufreq_governor rule ended up
(conditional locking rule depending on the argument) but basically all
callers pass this as a constant so it's not too horrible.

The patch also removes the cpufreq_governor() function since during the
locking audit it turned out to be entirely unused (so no need to fix it)

The patch works on my testbox, but it could use more testing
(otoh... it can't be much worse than the current code)

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Arjan van de Ven and committed by Linus Torvalds 153d7f3f 44eb1231

+23 -29
+18 -22
drivers/cpufreq/cpufreq.c
··· 364 364 if (ret != 1) \ 365 365 return -EINVAL; \ 366 366 \ 367 + lock_cpu_hotplug(); \ 367 368 mutex_lock(&policy->lock); \ 368 369 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 370 policy->user_policy.object = policy->object; \ 370 371 mutex_unlock(&policy->lock); \ 372 + unlock_cpu_hotplug(); \ 371 373 \ 372 374 return ret ? ret : count; \ 373 375 } ··· 1199 1197 *********************************************************************/ 1200 1198 1201 1199 1200 + /* Must be called with lock_cpu_hotplug held */ 1202 1201 int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203 1202 unsigned int target_freq, 1204 1203 unsigned int relation) 1205 1204 { 1206 1205 int retval = -EINVAL; 1207 1206 1208 - lock_cpu_hotplug(); 1209 1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1210 1208 target_freq, relation); 1211 1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1212 1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1213 - 1214 - unlock_cpu_hotplug(); 1215 1211 1216 1212 return retval; 1217 1213 } ··· 1225 1225 if (!policy) 1226 1226 return -EINVAL; 1227 1227 1228 + lock_cpu_hotplug(); 1228 1229 mutex_lock(&policy->lock); 1229 1230 1230 1231 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 1232 1232 1233 mutex_unlock(&policy->lock); 1234 + unlock_cpu_hotplug(); 1233 1235 1234 1236 cpufreq_cpu_put(policy); 1235 1237 return ret; 1236 1238 } 1237 1239 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1238 1240 1241 + /* 1242 + * Locking: Must be called with the lock_cpu_hotplug() lock held 1243 + * when "event" is CPUFREQ_GOV_LIMITS 1244 + */ 1239 1245 1240 1246 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1241 1247 { ··· 1261 1255 1262 1256 return ret; 1263 1257 } 1264 - 1265 - 1266 - int cpufreq_governor(unsigned int cpu, unsigned int event) 1267 - { 1268 - int ret = 0; 1269 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1270 - 1271 - if (!policy) 1272 - return -EINVAL; 1273 - 1274 - mutex_lock(&policy->lock); 1275 - ret = __cpufreq_governor(policy, event); 1276 - mutex_unlock(&policy->lock); 1277 - 1278 - cpufreq_cpu_put(policy); 1279 - return ret; 1280 - } 1281 - EXPORT_SYMBOL_GPL(cpufreq_governor); 1282 1258 1283 1259 1284 1260 int cpufreq_register_governor(struct cpufreq_governor *governor) ··· 1330 1342 EXPORT_SYMBOL(cpufreq_get_policy); 1331 1343 1332 1344 1345 + /* 1346 + * Locking: Must be called with the lock_cpu_hotplug() lock held 1347 + */ 1333 1348 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1334 1349 { 1335 1350 int ret = 0; ··· 1427 1436 if (!data) 1428 1437 return -EINVAL; 1429 1438 1439 + lock_cpu_hotplug(); 1440 + 1430 1441 /* lock this CPU */ 1431 1442 mutex_lock(&data->lock); 1432 1443 ··· 1439 1446 data->user_policy.governor = data->governor; 1440 1447 1441 1448 mutex_unlock(&data->lock); 1449 + 1450 + unlock_cpu_hotplug(); 1442 1451 cpufreq_cpu_put(data); 1443 1452 1444 1453 return ret; ··· 1464 1469 if (!data) 1465 1470 return -ENODEV; 1466 1471 1472 + lock_cpu_hotplug(); 1467 1473 mutex_lock(&data->lock); 1468 1474 1469 1475 dprintk("updating policy for CPU %u\n", cpu); ··· 1490 1494 ret = __cpufreq_set_policy(data, &policy); 1491 1495 1492 1496 mutex_unlock(&data->lock); 1493 - 1497 + unlock_cpu_hotplug(); 1494 1498 cpufreq_cpu_put(data); 1495 1499 return ret; 1496 1500 }
-2
drivers/cpufreq/cpufreq_conservative.c
··· 525 525 break; 526 526 527 527 case CPUFREQ_GOV_LIMITS: 528 - lock_cpu_hotplug(); 529 528 mutex_lock(&dbs_mutex); 530 529 if (policy->max < this_dbs_info->cur_policy->cur) 531 530 __cpufreq_driver_target( ··· 535 536 this_dbs_info->cur_policy, 536 537 policy->min, CPUFREQ_RELATION_L); 537 538 mutex_unlock(&dbs_mutex); 538 - unlock_cpu_hotplug(); 539 539 break; 540 540 } 541 541 return 0;
+2 -2
drivers/cpufreq/cpufreq_ondemand.c
··· 309 309 if (!dbs_info->enable) 310 310 return; 311 311 312 + lock_cpu_hotplug(); 312 313 dbs_check_cpu(dbs_info); 314 + unlock_cpu_hotplug(); 313 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 314 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 315 317 } ··· 414 412 break; 415 413 416 414 case CPUFREQ_GOV_LIMITS: 417 - lock_cpu_hotplug(); 418 415 mutex_lock(&dbs_mutex); 419 416 if (policy->max < this_dbs_info->cur_policy->cur) 420 417 __cpufreq_driver_target(this_dbs_info->cur_policy, ··· 424 423 policy->min, 425 424 CPUFREQ_RELATION_L); 426 425 mutex_unlock(&dbs_mutex); 427 - unlock_cpu_hotplug(); 428 426 break; 429 427 } 430 428 return 0;
+3
drivers/cpufreq/cpufreq_userspace.c
··· 18 18 #include <linux/spinlock.h> 19 19 #include <linux/interrupt.h> 20 20 #include <linux/cpufreq.h> 21 + #include <linux/cpu.h> 21 22 #include <linux/types.h> 22 23 #include <linux/fs.h> 23 24 #include <linux/sysfs.h> ··· 71 70 72 71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 73 72 73 + lock_cpu_hotplug(); 74 74 mutex_lock(&userspace_mutex); 75 75 if (!cpu_is_managed[policy->cpu]) 76 76 goto err; ··· 94 92 95 93 err: 96 94 mutex_unlock(&userspace_mutex); 95 + unlock_cpu_hotplug(); 97 96 return ret; 98 97 } 99 98
-3
include/linux/cpufreq.h
··· 172 172 unsigned int relation); 173 173 174 174 175 - /* pass an event to the cpufreq governor */ 176 - int cpufreq_governor(unsigned int cpu, unsigned int event); 177 - 178 175 int cpufreq_register_governor(struct cpufreq_governor *governor); 179 176 void cpufreq_unregister_governor(struct cpufreq_governor *governor); 180 177