[CPUFREQ] Misc cleanups in ondemand.

Misc cleanups in ondemand. Should have zero functional impact.
Also adding Alexey as author.

Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>

authored by Venkatesh Pallipadi and committed by Dave Jones ffac80e9 2f8a835c

+19 -31
+19 -31
drivers/cpufreq/cpufreq_ondemand.c
··· 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 - #include <linux/smp.h> 16 #include <linux/init.h> 17 - #include <linux/interrupt.h> 18 - #include <linux/ctype.h> 19 #include <linux/cpufreq.h> 20 - #include <linux/sysctl.h> 21 - #include <linux/types.h> 22 - #include <linux/fs.h> 23 - #include <linux/sysfs.h> 24 #include <linux/cpu.h> 25 - #include <linux/sched.h> 26 - #include <linux/kmod.h> 27 - #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/kernel_stat.h> 30 - #include <linux/percpu.h> 31 #include <linux/mutex.h> 32 33 /* ··· 68 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 69 * is recursive for the same process. -Venki 70 */ 71 - static DEFINE_MUTEX (dbs_mutex); 72 - static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 73 74 static struct workqueue_struct *kondemand_wq; 75 ··· 130 { 131 unsigned int input; 132 int ret; 133 - ret = sscanf (buf, "%u", &input); 134 135 mutex_lock(&dbs_mutex); 136 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { ··· 149 { 150 unsigned int input; 151 int ret; 152 - ret = sscanf (buf, "%u", &input); 153 154 mutex_lock(&dbs_mutex); 155 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || ··· 172 173 unsigned int j; 174 175 - ret = sscanf (buf, "%u", &input); 176 if ( ret != 1 ) 177 return -EINVAL; 178 ··· 337 338 switch (event) { 339 case CPUFREQ_GOV_START: 340 - if ((!cpu_online(cpu)) || 341 - (!policy->cur)) 342 return -EINVAL; 343 344 if (policy->cpuinfo.transition_latency > ··· 411 lock_cpu_hotplug(); 412 mutex_lock(&dbs_mutex); 413 if (policy->max < this_dbs_info->cur_policy->cur) 414 - __cpufreq_driver_target( 415 - this_dbs_info->cur_policy, 416 - policy->max, CPUFREQ_RELATION_H); 417 else if (policy->min > this_dbs_info->cur_policy->cur) 418 - __cpufreq_driver_target( 419 - this_dbs_info->cur_policy, 420 - policy->min, CPUFREQ_RELATION_L); 421 mutex_unlock(&dbs_mutex); 422 unlock_cpu_hotplug(); 423 break; ··· 426 } 427 428 static struct cpufreq_governor cpufreq_gov_dbs = { 429 - .name = "ondemand", 430 - .governor = cpufreq_governor_dbs, 431 - .owner = THIS_MODULE, 432 }; 433 434 static int __init cpufreq_gov_dbs_init(void) ··· 442 } 443 444 445 - MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 446 - MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " 447 - "Low Latency Frequency Transition capable processors"); 448 - MODULE_LICENSE ("GPL"); 449 450 module_init(cpufreq_gov_dbs_init); 451 module_exit(cpufreq_gov_dbs_exit);
··· 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/cpufreq.h> 17 #include <linux/cpu.h> 18 #include <linux/jiffies.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/mutex.h> 21 22 /* ··· 79 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 80 * is recursive for the same process. -Venki 81 */ 82 + static DEFINE_MUTEX(dbs_mutex); 83 84 static struct workqueue_struct *kondemand_wq; 85 ··· 142 { 143 unsigned int input; 144 int ret; 145 + ret = sscanf(buf, "%u", &input); 146 147 mutex_lock(&dbs_mutex); 148 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { ··· 161 { 162 unsigned int input; 163 int ret; 164 + ret = sscanf(buf, "%u", &input); 165 166 mutex_lock(&dbs_mutex); 167 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || ··· 184 185 unsigned int j; 186 187 + ret = sscanf(buf, "%u", &input); 188 if ( ret != 1 ) 189 return -EINVAL; 190 ··· 349 350 switch (event) { 351 case CPUFREQ_GOV_START: 352 + if ((!cpu_online(cpu)) || (!policy->cur)) 353 return -EINVAL; 354 355 if (policy->cpuinfo.transition_latency > ··· 424 lock_cpu_hotplug(); 425 mutex_lock(&dbs_mutex); 426 if (policy->max < this_dbs_info->cur_policy->cur) 427 + __cpufreq_driver_target(this_dbs_info->cur_policy, 428 + policy->max, 429 + CPUFREQ_RELATION_H); 430 else if (policy->min > this_dbs_info->cur_policy->cur) 431 + __cpufreq_driver_target(this_dbs_info->cur_policy, 432 + policy->min, 433 + CPUFREQ_RELATION_L); 434 mutex_unlock(&dbs_mutex); 435 unlock_cpu_hotplug(); 436 break; ··· 439 } 440 441 static struct cpufreq_governor cpufreq_gov_dbs = { 442 + .name = "ondemand", 443 + .governor = cpufreq_governor_dbs, 444 + .owner = THIS_MODULE, 445 }; 446 447 static int __init cpufreq_gov_dbs_init(void) ··· 455 } 456 457 458 + MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 459 + MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); 460 + MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " 461 + "Low Latency Frequency Transition capable processors"); 462 + MODULE_LICENSE("GPL"); 463 464 module_init(cpufreq_gov_dbs_init); 465 module_exit(cpufreq_gov_dbs_exit);