Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Make cpufreq suspend code conditional on powerpc.
[CPUFREQ] Fix a kobject reference bug related to managed CPUs
[CPUFREQ] Do not set policy for offline cpus
[CPUFREQ] Fix NULL pointer dereference regression in conservative governor

+30 -3
+24 -3
drivers/cpufreq/cpufreq.c
··· 858 858 859 859 /* Check for existing affected CPUs. 860 860 * They may not be aware of it due to CPU Hotplug. 861 + * cpufreq_cpu_put is called when the device is removed 862 + * in __cpufreq_remove_dev() 861 863 */ 862 864 managed_policy = cpufreq_cpu_get(j); 863 865 if (unlikely(managed_policy)) { ··· 886 884 ret = sysfs_create_link(&sys_dev->kobj, 887 885 &managed_policy->kobj, 888 886 "cpufreq"); 889 - if (!ret) 887 + if (ret) 890 888 cpufreq_cpu_put(managed_policy); 891 889 /* 892 890 * Success. We only needed to be added to the mask. ··· 926 924 927 925 spin_lock_irqsave(&cpufreq_driver_lock, flags); 928 926 for_each_cpu(j, policy->cpus) { 927 + if (!cpu_online(j)) 928 + continue; 929 929 per_cpu(cpufreq_cpu_data, j) = policy; 930 930 per_cpu(policy_cpu, j) = policy->cpu; 931 931 } ··· 1248 1244 1249 1245 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) 1250 1246 { 1251 - int cpu = sysdev->id; 1252 1247 int ret = 0; 1248 + 1249 + #ifdef __powerpc__ 1250 + int cpu = sysdev->id; 1253 1251 unsigned int cur_freq = 0; 1254 1252 struct cpufreq_policy *cpu_policy; 1255 1253 1256 1254 dprintk("suspending cpu %u\n", cpu); 1255 + 1256 + /* 1257 + * This whole bogosity is here because Powerbooks are made of fail. 1258 + * No sane platform should need any of the code below to be run. 1259 + * (it's entirely the wrong thing to do, as driver->get may 1260 + * reenable interrupts on some architectures). 1261 + */ 1257 1262 1258 1263 if (!cpu_online(cpu)) 1259 1264 return 0; ··· 1322 1309 1323 1310 out: 1324 1311 cpufreq_cpu_put(cpu_policy); 1312 + #endif /* __powerpc__ */ 1325 1313 return ret; 1326 1314 } 1327 1315 ··· 1336 1322 */ 1337 1323 static int cpufreq_resume(struct sys_device *sysdev) 1338 1324 { 1339 - int cpu = sysdev->id; 1340 1325 int ret = 0; 1326 + 1327 + #ifdef __powerpc__ 1328 + int cpu = sysdev->id; 1341 1329 struct cpufreq_policy *cpu_policy; 1342 1330 1343 1331 dprintk("resuming cpu %u\n", cpu); 1332 + 1333 + /* As with the ->suspend method, all the code below is 1334 + * only necessary because Powerbooks suck. 1335 + * See commit 42d4dc3f4e1e for jokes. */ 1344 1336 1345 1337 if (!cpu_online(cpu)) 1346 1338 return 0; ··· 1411 1391 schedule_work(&cpu_policy->update); 1412 1392 fail: 1413 1393 cpufreq_cpu_put(cpu_policy); 1394 + #endif /* __powerpc__ */ 1414 1395 return ret; 1415 1396 } 1416 1397
+6
drivers/cpufreq/cpufreq_conservative.c
··· 63 63 unsigned int down_skip; 64 64 unsigned int requested_freq; 65 65 int cpu; 66 + unsigned int enable:1; 66 67 /* 67 68 * percpu mutex that serializes governor limit change with 68 69 * do_dbs_timer invocation. We do not want do_dbs_timer to run ··· 141 140 freq->cpu); 142 141 143 142 struct cpufreq_policy *policy; 143 + 144 + if (!this_dbs_info->enable) 145 + return 0; 144 146 145 147 policy = this_dbs_info->cur_policy; 146 148 ··· 501 497 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 502 498 delay -= jiffies % delay; 503 499 500 + dbs_info->enable = 1; 504 501 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 505 502 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, 506 503 delay); ··· 509 504 510 505 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 511 506 { 507 + dbs_info->enable = 0; 512 508 cancel_delayed_work_sync(&dbs_info->work); 513 509 } 514 510