Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] use max load in conservative governor
[CPUFREQ] fix a lockdep warning

+20 -7
+14 -5
drivers/cpufreq/cpufreq.c
··· 1113 unsigned int cpu = sys_dev->id; 1114 unsigned long flags; 1115 struct cpufreq_policy *data; 1116 #ifdef CONFIG_SMP 1117 struct sys_device *cpu_sys_dev; 1118 unsigned int j; ··· 1143 dprintk("removing link\n"); 1144 cpumask_clear_cpu(cpu, data->cpus); 1145 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1146 - sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1147 cpufreq_cpu_put(data); 1148 cpufreq_debug_enable_ratelimit(); 1149 unlock_policy_rwsem_write(cpu); 1150 return 0; 1151 } 1152 #endif ··· 1184 data->governor->name, CPUFREQ_NAME_LEN); 1185 #endif 1186 cpu_sys_dev = get_cpu_sysdev(j); 1187 - sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 1188 cpufreq_cpu_put(data); 1189 } 1190 } ··· 1198 if (cpufreq_driver->target) 1199 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1200 1201 - kobject_put(&data->kobj); 1202 1203 /* we need to make sure that the underlying kobj is actually 1204 * not referenced anymore by anybody before we proceed with 1205 * unloading. 1206 */ 1207 dprintk("waiting for dropping of refcount\n"); 1208 - wait_for_completion(&data->kobj_unregister); 1209 dprintk("wait complete\n"); 1210 1211 if (cpufreq_driver->exit) 1212 cpufreq_driver->exit(data); 1213 - 1214 unlock_policy_rwsem_write(cpu); 1215 1216 free_cpumask_var(data->related_cpus);
··· 1113 unsigned int cpu = sys_dev->id; 1114 unsigned long flags; 1115 struct cpufreq_policy *data; 1116 + struct kobject *kobj; 1117 + struct completion *cmp; 1118 #ifdef CONFIG_SMP 1119 struct sys_device *cpu_sys_dev; 1120 unsigned int j; ··· 1141 dprintk("removing link\n"); 1142 cpumask_clear_cpu(cpu, data->cpus); 1143 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1144 + kobj = &sys_dev->kobj; 1145 cpufreq_cpu_put(data); 1146 cpufreq_debug_enable_ratelimit(); 1147 unlock_policy_rwsem_write(cpu); 1148 + sysfs_remove_link(kobj, "cpufreq"); 1149 return 0; 1150 } 1151 #endif ··· 1181 data->governor->name, CPUFREQ_NAME_LEN); 1182 #endif 1183 cpu_sys_dev = get_cpu_sysdev(j); 1184 + kobj = &cpu_sys_dev->kobj; 1185 + unlock_policy_rwsem_write(cpu); 1186 + sysfs_remove_link(kobj, "cpufreq"); 1187 + lock_policy_rwsem_write(cpu); 1188 cpufreq_cpu_put(data); 1189 } 1190 } ··· 1192 if (cpufreq_driver->target) 1193 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1194 1195 + kobj = &data->kobj; 1196 + cmp = &data->kobj_unregister; 1197 + unlock_policy_rwsem_write(cpu); 1198 + kobject_put(kobj); 1199 1200 /* we need to make sure that the underlying kobj is actually 1201 * not referenced anymore by anybody before we proceed with 1202 * unloading. 1203 */ 1204 dprintk("waiting for dropping of refcount\n"); 1205 + wait_for_completion(cmp); 1206 dprintk("wait complete\n"); 1207 1208 + lock_policy_rwsem_write(cpu); 1209 if (cpufreq_driver->exit) 1210 cpufreq_driver->exit(data); 1211 unlock_policy_rwsem_write(cpu); 1212 1213 free_cpumask_var(data->related_cpus);
+6 -2
drivers/cpufreq/cpufreq_conservative.c
··· 444 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 445 { 446 unsigned int load = 0; 447 unsigned int freq_target; 448 449 struct cpufreq_policy *policy; ··· 502 continue; 503 504 load = 100 * (wall_time - idle_time) / wall_time; 505 } 506 507 /* ··· 515 return; 516 517 /* Check for frequency increase */ 518 - if (load > dbs_tuners_ins.up_threshold) { 519 this_dbs_info->down_skip = 0; 520 521 /* if we are already at full speed then break out early */ ··· 542 * can support the current CPU usage without triggering the up 543 * policy. To be safe, we focus 10 points under the threshold. 544 */ 545 - if (load < (dbs_tuners_ins.down_threshold - 10)) { 546 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 547 548 this_dbs_info->requested_freq -= freq_target;
··· 444 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 445 { 446 unsigned int load = 0; 447 + unsigned int max_load = 0; 448 unsigned int freq_target; 449 450 struct cpufreq_policy *policy; ··· 501 continue; 502 503 load = 100 * (wall_time - idle_time) / wall_time; 504 + 505 + if (load > max_load) 506 + max_load = load; 507 } 508 509 /* ··· 511 return; 512 513 /* Check for frequency increase */ 514 + if (max_load > dbs_tuners_ins.up_threshold) { 515 this_dbs_info->down_skip = 0; 516 517 /* if we are already at full speed then break out early */ ··· 538 * can support the current CPU usage without triggering the up 539 * policy. To be safe, we focus 10 points under the threshold. 540 */ 541 + if (max_load < (dbs_tuners_ins.down_threshold - 10)) { 542 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 543 544 this_dbs_info->requested_freq -= freq_target;