Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq

* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
Move workqueue exports to where the functions are defined.
[CPUFREQ] Misc cleanups in ondemand.
[CPUFREQ] Make ondemand sampling per CPU and remove the mutex usage in sampling path.
[CPUFREQ] Add queue_delayed_work_on() interface for workqueues.
[CPUFREQ] Remove slowdown from ondemand sampling path.

+127 -194
+91 -169
drivers/cpufreq/cpufreq_ondemand.c
··· 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 - #include <linux/smp.h> 16 #include <linux/init.h> 17 - #include <linux/interrupt.h> 18 - #include <linux/ctype.h> 19 #include <linux/cpufreq.h> 20 - #include <linux/sysctl.h> 21 - #include <linux/types.h> 22 - #include <linux/fs.h> 23 - #include <linux/sysfs.h> 24 #include <linux/cpu.h> 25 - #include <linux/sched.h> 26 - #include <linux/kmod.h> 27 - #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/kernel_stat.h> 30 - #include <linux/percpu.h> 31 #include <linux/mutex.h> 32 33 /* ··· 45 #define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 46 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 47 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 48 - #define DEF_SAMPLING_DOWN_FACTOR (1) 49 - #define MAX_SAMPLING_DOWN_FACTOR (10) 50 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 51 52 static void do_dbs_timer(void *data); 53 54 struct cpu_dbs_info_s { 55 struct cpufreq_policy *cur_policy; 56 - unsigned int prev_cpu_idle_up; 57 - unsigned int prev_cpu_idle_down; 58 unsigned int enable; 59 }; 60 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); ··· 68 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 69 * is recursive for the same process. -Venki 70 */ 71 - static DEFINE_MUTEX (dbs_mutex); 72 - static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 73 74 - static struct workqueue_struct *dbs_workq; 75 76 struct dbs_tuners { 77 unsigned int sampling_rate; 78 - unsigned int sampling_down_factor; 79 unsigned int up_threshold; 80 unsigned int ignore_nice; 81 }; 82 83 static struct dbs_tuners dbs_tuners_ins = { 84 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 85 - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 86 .ignore_nice = 0, 87 }; 88 89 - static inline unsigned int get_cpu_idle_time(unsigned int cpu) 90 { 91 - return kstat_cpu(cpu).cpustat.idle + 92 - kstat_cpu(cpu).cpustat.iowait + 93 - ( dbs_tuners_ins.ignore_nice ? 94 - kstat_cpu(cpu).cpustat.nice : 95 - 0); 96 } 97 98 /************************** sysfs interface ************************/ ··· 122 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 123 } 124 show_one(sampling_rate, sampling_rate); 125 - show_one(sampling_down_factor, sampling_down_factor); 126 show_one(up_threshold, up_threshold); 127 show_one(ignore_nice_load, ignore_nice); 128 - 129 - static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 130 - const char *buf, size_t count) 131 - { 132 - unsigned int input; 133 - int ret; 134 - ret = sscanf (buf, "%u", &input); 135 - if (ret != 1 ) 136 - return -EINVAL; 137 - 138 - if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 139 - return -EINVAL; 140 - 141 - mutex_lock(&dbs_mutex); 142 - dbs_tuners_ins.sampling_down_factor = input; 143 - mutex_unlock(&dbs_mutex); 144 - 145 - return count; 146 - } 147 148 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 149 const char *buf, size_t count) 150 { 151 unsigned int input; 152 int ret; 153 - ret = sscanf (buf, "%u", &input); 154 155 mutex_lock(&dbs_mutex); 156 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { ··· 149 { 150 unsigned int input; 151 int ret; 152 - ret = sscanf (buf, "%u", &input); 153 154 mutex_lock(&dbs_mutex); 155 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || ··· 172 173 unsigned int j; 174 175 - ret = sscanf (buf, "%u", &input); 176 if ( ret != 1 ) 177 return -EINVAL; 178 ··· 186 } 187 dbs_tuners_ins.ignore_nice = input; 188 189 - /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 190 for_each_online_cpu(j) { 191 - struct cpu_dbs_info_s *j_dbs_info; 192 - j_dbs_info = &per_cpu(cpu_dbs_info, j); 193 - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 194 - j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 195 } 196 mutex_unlock(&dbs_mutex); 197 ··· 203 __ATTR(_name, 0644, show_##_name, store_##_name) 204 205 define_one_rw(sampling_rate); 206 - define_one_rw(sampling_down_factor); 207 define_one_rw(up_threshold); 208 define_one_rw(ignore_nice_load); 209 ··· 210 &sampling_rate_max.attr, 211 &sampling_rate_min.attr, 212 &sampling_rate.attr, 213 - &sampling_down_factor.attr, 214 &up_threshold.attr, 215 &ignore_nice_load.attr, 216 NULL ··· 222 223 /************************** sysfs end ************************/ 224 225 - static void dbs_check_cpu(int cpu) 226 { 227 - unsigned int idle_ticks, up_idle_ticks, total_ticks; 228 - unsigned int freq_next; 229 - unsigned int freq_down_sampling_rate; 230 - static int down_skip[NR_CPUS]; 231 - struct cpu_dbs_info_s *this_dbs_info; 232 233 struct cpufreq_policy *policy; 234 unsigned int j; 235 236 - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 237 if (!this_dbs_info->enable) 238 return; 239 240 policy = this_dbs_info->cur_policy; 241 /* 242 * Every sampling_rate, we check, if current idle time is less 243 * than 20% (default), then we try to increase frequency 244 - * Every sampling_rate*sampling_down_factor, we look for a the lowest 245 * frequency which can sustain the load while keeping idle time over 246 * 30%. If such a frequency exist, we try to decrease to this frequency. 247 * ··· 251 * 5% (default) of current frequency 252 */ 253 254 - /* Check for frequency increase */ 255 idle_ticks = UINT_MAX; 256 for_each_cpu_mask(j, policy->cpus) { 257 - unsigned int tmp_idle_ticks, total_idle_ticks; 258 struct cpu_dbs_info_s *j_dbs_info; 259 260 j_dbs_info = &per_cpu(cpu_dbs_info, j); 261 total_idle_ticks = get_cpu_idle_time(j); 262 - tmp_idle_ticks = total_idle_ticks - 263 - j_dbs_info->prev_cpu_idle_up; 264 - j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 265 266 if (tmp_idle_ticks < idle_ticks) 267 idle_ticks = tmp_idle_ticks; 268 } 269 270 - /* Scale idle ticks by 100 and compare with up and down ticks */ 271 - idle_ticks *= 100; 272 - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 273 - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 274 - 275 - if (idle_ticks < up_idle_ticks) { 276 - down_skip[cpu] = 0; 277 - for_each_cpu_mask(j, policy->cpus) { 278 - struct cpu_dbs_info_s *j_dbs_info; 279 - 280 - j_dbs_info = &per_cpu(cpu_dbs_info, j); 281 - j_dbs_info->prev_cpu_idle_down = 282 - j_dbs_info->prev_cpu_idle_up; 283 - } 284 /* if we are already at full speed then break out early */ 285 if (policy->cur == policy->max) 286 return; ··· 281 } 282 283 /* Check for frequency decrease */ 284 - down_skip[cpu]++; 285 - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 286 - return; 287 - 288 - idle_ticks = UINT_MAX; 289 - for_each_cpu_mask(j, policy->cpus) { 290 - unsigned int tmp_idle_ticks, total_idle_ticks; 291 - struct cpu_dbs_info_s *j_dbs_info; 292 - 293 - j_dbs_info = &per_cpu(cpu_dbs_info, j); 294 - /* Check for frequency decrease */ 295 - total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 296 - tmp_idle_ticks = total_idle_ticks - 297 - j_dbs_info->prev_cpu_idle_down; 298 - j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 299 - 300 - if (tmp_idle_ticks < idle_ticks) 301 - idle_ticks = tmp_idle_ticks; 302 - } 303 - 304 - down_skip[cpu] = 0; 305 /* if we cannot reduce the frequency anymore, break out early */ 306 if (policy->cur == policy->min) 307 return; 308 - 309 - /* Compute how many ticks there are between two measurements */ 310 - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 311 - dbs_tuners_ins.sampling_down_factor; 312 - total_ticks = usecs_to_jiffies(freq_down_sampling_rate); 313 314 /* 315 * The optimal frequency is the frequency that is the lowest that 316 * can support the current CPU usage without triggering the up 317 * policy. To be safe, we focus 10 points under the threshold. 318 */ 319 - freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; 320 - freq_next = (freq_next * policy->cur) / 321 (dbs_tuners_ins.up_threshold - 10); 322 323 - if (freq_next < policy->min) 324 - freq_next = policy->min; 325 - 326 - if (freq_next <= ((policy->cur * 95) / 100)) 327 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 328 } 329 330 static void do_dbs_timer(void *data) 331 { 332 - int i; 333 - lock_cpu_hotplug(); 334 - mutex_lock(&dbs_mutex); 335 - for_each_online_cpu(i) 336 - dbs_check_cpu(i); 337 - queue_delayed_work(dbs_workq, &dbs_work, 338 - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 339 - mutex_unlock(&dbs_mutex); 340 - unlock_cpu_hotplug(); 341 } 342 343 - static inline void dbs_timer_init(void) 344 { 345 - INIT_WORK(&dbs_work, do_dbs_timer, NULL); 346 - if (!dbs_workq) 347 - dbs_workq = create_singlethread_workqueue("ondemand"); 348 - if (!dbs_workq) { 349 - printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); 350 - return; 351 - } 352 - queue_delayed_work(dbs_workq, &dbs_work, 353 - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 354 return; 355 } 356 357 - static inline void dbs_timer_exit(void) 358 { 359 - if (dbs_workq) 360 - cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); 361 } 362 363 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ··· 337 338 switch (event) { 339 case CPUFREQ_GOV_START: 340 - if ((!cpu_online(cpu)) || 341 - (!policy->cur)) 342 return -EINVAL; 343 344 if (policy->cpuinfo.transition_latency > ··· 350 break; 351 352 mutex_lock(&dbs_mutex); 353 for_each_cpu_mask(j, policy->cpus) { 354 struct cpu_dbs_info_s *j_dbs_info; 355 j_dbs_info = &per_cpu(cpu_dbs_info, j); 356 j_dbs_info->cur_policy = policy; 357 358 - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 359 - j_dbs_info->prev_cpu_idle_down 360 - = j_dbs_info->prev_cpu_idle_up; 361 } 362 this_dbs_info->enable = 1; 363 sysfs_create_group(&policy->kobj, &dbs_attr_group); 364 - dbs_enable++; 365 /* 366 * Start the timerschedule work, when this governor 367 * is used for first time ··· 388 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 389 390 dbs_tuners_ins.sampling_rate = def_sampling_rate; 391 - dbs_timer_init(); 392 } 393 394 mutex_unlock(&dbs_mutex); 395 break; 396 397 case CPUFREQ_GOV_STOP: 398 mutex_lock(&dbs_mutex); 399 this_dbs_info->enable = 0; 400 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 401 dbs_enable--; 402 - /* 403 - * Stop the timerschedule work, when this governor 404 - * is used for first time 405 - */ 406 if (dbs_enable == 0) 407 - dbs_timer_exit(); 408 409 mutex_unlock(&dbs_mutex); 410 ··· 411 lock_cpu_hotplug(); 412 mutex_lock(&dbs_mutex); 413 if (policy->max < this_dbs_info->cur_policy->cur) 414 - __cpufreq_driver_target( 415 - this_dbs_info->cur_policy, 416 - policy->max, CPUFREQ_RELATION_H); 417 else if (policy->min > this_dbs_info->cur_policy->cur) 418 - __cpufreq_driver_target( 419 - this_dbs_info->cur_policy, 420 - policy->min, CPUFREQ_RELATION_L); 421 mutex_unlock(&dbs_mutex); 422 unlock_cpu_hotplug(); 423 break; ··· 426 } 427 428 static struct cpufreq_governor cpufreq_gov_dbs = { 429 - .name = "ondemand", 430 - .governor = cpufreq_governor_dbs, 431 - .owner = THIS_MODULE, 432 }; 433 434 static int __init cpufreq_gov_dbs_init(void) ··· 438 439 static void __exit cpufreq_gov_dbs_exit(void) 440 { 441 - /* Make sure that the scheduled work is indeed not running. 442 - Assumes the timer has been cancelled first. */ 443 - if (dbs_workq) { 444 - flush_workqueue(dbs_workq); 445 - destroy_workqueue(dbs_workq); 446 - } 447 - 448 cpufreq_unregister_governor(&cpufreq_gov_dbs); 449 } 450 451 452 - MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 453 - MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " 454 - "Low Latency Frequency Transition capable processors"); 455 - MODULE_LICENSE ("GPL"); 456 457 module_init(cpufreq_gov_dbs_init); 458 module_exit(cpufreq_gov_dbs_exit);
··· 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/cpufreq.h> 17 #include <linux/cpu.h> 18 #include <linux/jiffies.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/mutex.h> 21 22 /* ··· 56 #define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 57 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 58 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 59 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 60 61 static void do_dbs_timer(void *data); 62 63 struct cpu_dbs_info_s { 64 + cputime64_t prev_cpu_idle; 65 + cputime64_t prev_cpu_wall; 66 struct cpufreq_policy *cur_policy; 67 + struct work_struct work; 68 unsigned int enable; 69 }; 70 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); ··· 80 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 81 * is recursive for the same process. -Venki 82 */ 83 + static DEFINE_MUTEX(dbs_mutex); 84 85 + static struct workqueue_struct *kondemand_wq; 86 87 struct dbs_tuners { 88 unsigned int sampling_rate; 89 unsigned int up_threshold; 90 unsigned int ignore_nice; 91 }; 92 93 static struct dbs_tuners dbs_tuners_ins = { 94 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 95 .ignore_nice = 0, 96 }; 97 98 + static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 99 { 100 + cputime64_t retval; 101 + 102 + retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, 103 + kstat_cpu(cpu).cpustat.iowait); 104 + 105 + if (dbs_tuners_ins.ignore_nice) 106 + retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); 107 + 108 + return retval; 109 } 110 111 /************************** sysfs interface ************************/ ··· 133 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 134 } 135 show_one(sampling_rate, sampling_rate); 136 show_one(up_threshold, up_threshold); 137 show_one(ignore_nice_load, ignore_nice); 138 139 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 140 const char *buf, size_t count) 141 { 142 unsigned int input; 143 int ret; 144 + ret = sscanf(buf, "%u", &input); 145 146 mutex_lock(&dbs_mutex); 147 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { ··· 180 { 181 unsigned int input; 182 int ret; 183 + ret = sscanf(buf, "%u", &input); 184 185 mutex_lock(&dbs_mutex); 186 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || ··· 203 204 unsigned int j; 205 206 + ret = sscanf(buf, "%u", &input); 207 if ( ret != 1 ) 208 return -EINVAL; 209 ··· 217 } 218 dbs_tuners_ins.ignore_nice = input; 219 220 + /* we need to re-evaluate prev_cpu_idle */ 221 for_each_online_cpu(j) { 222 + struct cpu_dbs_info_s *dbs_info; 223 + dbs_info = &per_cpu(cpu_dbs_info, j); 224 + dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 225 + dbs_info->prev_cpu_wall = get_jiffies_64(); 226 } 227 mutex_unlock(&dbs_mutex); 228 ··· 234 __ATTR(_name, 0644, show_##_name, store_##_name) 235 236 define_one_rw(sampling_rate); 237 define_one_rw(up_threshold); 238 define_one_rw(ignore_nice_load); 239 ··· 242 &sampling_rate_max.attr, 243 &sampling_rate_min.attr, 244 &sampling_rate.attr, 245 &up_threshold.attr, 246 &ignore_nice_load.attr, 247 NULL ··· 255 256 /************************** sysfs end ************************/ 257 258 + static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 259 { 260 + unsigned int idle_ticks, total_ticks; 261 + unsigned int load; 262 + cputime64_t cur_jiffies; 263 264 struct cpufreq_policy *policy; 265 unsigned int j; 266 267 if (!this_dbs_info->enable) 268 return; 269 270 policy = this_dbs_info->cur_policy; 271 + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 272 + total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 273 + this_dbs_info->prev_cpu_wall); 274 + this_dbs_info->prev_cpu_wall = cur_jiffies; 275 /* 276 * Every sampling_rate, we check, if current idle time is less 277 * than 20% (default), then we try to increase frequency 278 + * Every sampling_rate, we look for a the lowest 279 * frequency which can sustain the load while keeping idle time over 280 * 30%. If such a frequency exist, we try to decrease to this frequency. 281 * ··· 283 * 5% (default) of current frequency 284 */ 285 286 + /* Get Idle Time */ 287 idle_ticks = UINT_MAX; 288 for_each_cpu_mask(j, policy->cpus) { 289 + cputime64_t total_idle_ticks; 290 + unsigned int tmp_idle_ticks; 291 struct cpu_dbs_info_s *j_dbs_info; 292 293 j_dbs_info = &per_cpu(cpu_dbs_info, j); 294 total_idle_ticks = get_cpu_idle_time(j); 295 + tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, 296 + j_dbs_info->prev_cpu_idle); 297 + j_dbs_info->prev_cpu_idle = total_idle_ticks; 298 299 if (tmp_idle_ticks < idle_ticks) 300 idle_ticks = tmp_idle_ticks; 301 } 302 + load = (100 * (total_ticks - idle_ticks)) / total_ticks; 303 304 + /* Check for frequency increase */ 305 + if (load > dbs_tuners_ins.up_threshold) { 306 /* if we are already at full speed then break out early */ 307 if (policy->cur == policy->max) 308 return; ··· 323 } 324 325 /* Check for frequency decrease */ 326 /* if we cannot reduce the frequency anymore, break out early */ 327 if (policy->cur == policy->min) 328 return; 329 330 /* 331 * The optimal frequency is the frequency that is the lowest that 332 * can support the current CPU usage without triggering the up 333 * policy. To be safe, we focus 10 points under the threshold. 334 */ 335 + if (load < (dbs_tuners_ins.up_threshold - 10)) { 336 + unsigned int freq_next; 337 + freq_next = (policy->cur * load) / 338 (dbs_tuners_ins.up_threshold - 10); 339 340 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 341 + } 342 } 343 344 static void do_dbs_timer(void *data) 345 { 346 + unsigned int cpu = smp_processor_id(); 347 + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 348 + 349 + dbs_check_cpu(dbs_info); 350 + queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 351 + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 352 } 353 354 + static inline void dbs_timer_init(unsigned int cpu) 355 { 356 + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 357 + 358 + INIT_WORK(&dbs_info->work, do_dbs_timer, 0); 359 + queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 360 + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 361 return; 362 } 363 364 + static inline void dbs_timer_exit(unsigned int cpu) 365 { 366 + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 367 + 368 + cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 369 } 370 371 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ··· 413 414 switch (event) { 415 case CPUFREQ_GOV_START: 416 + if ((!cpu_online(cpu)) || (!policy->cur)) 417 return -EINVAL; 418 419 if (policy->cpuinfo.transition_latency > ··· 427 break; 428 429 mutex_lock(&dbs_mutex); 430 + dbs_enable++; 431 + if (dbs_enable == 1) { 432 + kondemand_wq = create_workqueue("kondemand"); 433 + if (!kondemand_wq) { 434 + printk(KERN_ERR "Creation of kondemand failed\n"); 435 + dbs_enable--; 436 + mutex_unlock(&dbs_mutex); 437 + return -ENOSPC; 438 + } 439 + } 440 for_each_cpu_mask(j, policy->cpus) { 441 struct cpu_dbs_info_s *j_dbs_info; 442 j_dbs_info = &per_cpu(cpu_dbs_info, j); 443 j_dbs_info->cur_policy = policy; 444 445 + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 446 + j_dbs_info->prev_cpu_wall = get_jiffies_64(); 447 } 448 this_dbs_info->enable = 1; 449 sysfs_create_group(&policy->kobj, &dbs_attr_group); 450 /* 451 * Start the timerschedule work, when this governor 452 * is used for first time ··· 457 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 458 459 dbs_tuners_ins.sampling_rate = def_sampling_rate; 460 } 461 + dbs_timer_init(policy->cpu); 462 463 mutex_unlock(&dbs_mutex); 464 break; 465 466 case CPUFREQ_GOV_STOP: 467 mutex_lock(&dbs_mutex); 468 + dbs_timer_exit(policy->cpu); 469 this_dbs_info->enable = 0; 470 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 471 dbs_enable--; 472 if (dbs_enable == 0) 473 + destroy_workqueue(kondemand_wq); 474 475 mutex_unlock(&dbs_mutex); 476 ··· 483 lock_cpu_hotplug(); 484 mutex_lock(&dbs_mutex); 485 if (policy->max < this_dbs_info->cur_policy->cur) 486 + __cpufreq_driver_target(this_dbs_info->cur_policy, 487 + policy->max, 488 + CPUFREQ_RELATION_H); 489 else if (policy->min > this_dbs_info->cur_policy->cur) 490 + __cpufreq_driver_target(this_dbs_info->cur_policy, 491 + policy->min, 492 + CPUFREQ_RELATION_L); 493 mutex_unlock(&dbs_mutex); 494 unlock_cpu_hotplug(); 495 break; ··· 498 } 499 500 static struct cpufreq_governor cpufreq_gov_dbs = { 501 + .name = "ondemand", 502 + .governor = cpufreq_governor_dbs, 503 + .owner = THIS_MODULE, 504 }; 505 506 static int __init cpufreq_gov_dbs_init(void) ··· 510 511 static void __exit cpufreq_gov_dbs_exit(void) 512 { 513 cpufreq_unregister_governor(&cpufreq_gov_dbs); 514 } 515 516 517 + MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 518 + MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); 519 + MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " 520 + "Low Latency Frequency Transition capable processors"); 521 + MODULE_LICENSE("GPL"); 522 523 module_init(cpufreq_gov_dbs_init); 524 module_exit(cpufreq_gov_dbs_exit);
+2
include/asm-generic/cputime.h
··· 24 25 #define cputime64_zero (0ULL) 26 #define cputime64_add(__a, __b) ((__a) + (__b)) 27 #define cputime64_to_jiffies64(__ct) (__ct) 28 #define cputime_to_cputime64(__ct) ((u64) __ct) 29 30
··· 24 25 #define cputime64_zero (0ULL) 26 #define cputime64_add(__a, __b) ((__a) + (__b)) 27 + #define cputime64_sub(__a, __b) ((__a) - (__b)) 28 #define cputime64_to_jiffies64(__ct) (__ct) 29 + #define jiffies64_to_cputime64(__jif) (__jif) 30 #define cputime_to_cputime64(__ct) ((u64) __ct) 31 32
+2
include/linux/workqueue.h
··· 63 64 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 65 extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 66 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 67 68 extern int FASTCALL(schedule_work(struct work_struct *work));
··· 63 64 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 65 extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 66 + extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 67 + struct work_struct *work, unsigned long delay); 68 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 69 70 extern int FASTCALL(schedule_work(struct work_struct *work));
+32 -25
kernel/workqueue.c
··· 114 put_cpu(); 115 return ret; 116 } 117 118 static void delayed_work_timer_fn(unsigned long __data) 119 { ··· 148 } 149 return ret; 150 } 151 152 static void run_workqueue(struct cpu_workqueue_struct *cwq) 153 { ··· 305 unlock_cpu_hotplug(); 306 } 307 } 308 309 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 310 int cpu) ··· 383 } 384 return wq; 385 } 386 387 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 388 { ··· 421 free_percpu(wq->cpu_wq); 422 kfree(wq); 423 } 424 425 static struct workqueue_struct *keventd_wq; 426 ··· 429 { 430 return queue_work(keventd_wq, work); 431 } 432 433 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 434 { 435 return queue_delayed_work(keventd_wq, work, delay); 436 } 437 438 int schedule_delayed_work_on(int cpu, 439 struct work_struct *work, unsigned long delay) 440 { 441 - int ret = 0; 442 - struct timer_list *timer = &work->timer; 443 - 444 - if (!test_and_set_bit(0, &work->pending)) { 445 - BUG_ON(timer_pending(timer)); 446 - BUG_ON(!list_empty(&work->entry)); 447 - /* This stores keventd_wq for the moment, for the timer_fn */ 448 - work->wq_data = keventd_wq; 449 - timer->expires = jiffies + delay; 450 - timer->data = (unsigned long)work; 451 - timer->function = delayed_work_timer_fn; 452 - add_timer_on(timer, cpu); 453 - ret = 1; 454 - } 455 - return ret; 456 } 457 458 /** 459 * schedule_on_each_cpu - call a function on each online CPU from keventd ··· 479 { 480 flush_workqueue(keventd_wq); 481 } 482 483 /** 484 * cancel_rearming_delayed_workqueue - reliably kill off a delayed ··· 636 BUG_ON(!keventd_wq); 637 } 638 639 - EXPORT_SYMBOL_GPL(__create_workqueue); 640 - EXPORT_SYMBOL_GPL(queue_work); 641 - EXPORT_SYMBOL_GPL(queue_delayed_work); 642 - EXPORT_SYMBOL_GPL(flush_workqueue); 643 - EXPORT_SYMBOL_GPL(destroy_workqueue); 644 - 645 - EXPORT_SYMBOL(schedule_work); 646 - EXPORT_SYMBOL(schedule_delayed_work); 647 - EXPORT_SYMBOL(schedule_delayed_work_on); 648 - EXPORT_SYMBOL(flush_scheduled_work);
··· 114 put_cpu(); 115 return ret; 116 } 117 + EXPORT_SYMBOL_GPL(queue_work); 118 119 static void delayed_work_timer_fn(unsigned long __data) 120 { ··· 147 } 148 return ret; 149 } 150 + EXPORT_SYMBOL_GPL(queue_delayed_work); 151 + 152 + int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 153 + struct work_struct *work, unsigned long delay) 154 + { 155 + int ret = 0; 156 + struct timer_list *timer = &work->timer; 157 + 158 + if (!test_and_set_bit(0, &work->pending)) { 159 + BUG_ON(timer_pending(timer)); 160 + BUG_ON(!list_empty(&work->entry)); 161 + 162 + /* This stores wq for the moment, for the timer_fn */ 163 + work->wq_data = wq; 164 + timer->expires = jiffies + delay; 165 + timer->data = (unsigned long)work; 166 + timer->function = delayed_work_timer_fn; 167 + add_timer_on(timer, cpu); 168 + ret = 1; 169 + } 170 + return ret; 171 + } 172 + EXPORT_SYMBOL_GPL(queue_delayed_work_on); 173 174 static void run_workqueue(struct cpu_workqueue_struct *cwq) 175 { ··· 281 unlock_cpu_hotplug(); 282 } 283 } 284 + EXPORT_SYMBOL_GPL(flush_workqueue); 285 286 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 287 int cpu) ··· 358 } 359 return wq; 360 } 361 + EXPORT_SYMBOL_GPL(__create_workqueue); 362 363 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 364 { ··· 395 free_percpu(wq->cpu_wq); 396 kfree(wq); 397 } 398 + EXPORT_SYMBOL_GPL(destroy_workqueue); 399 400 static struct workqueue_struct *keventd_wq; 401 ··· 402 { 403 return queue_work(keventd_wq, work); 404 } 405 + EXPORT_SYMBOL(schedule_work); 406 407 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 408 { 409 return queue_delayed_work(keventd_wq, work, delay); 410 } 411 + EXPORT_SYMBOL(schedule_delayed_work); 412 413 int schedule_delayed_work_on(int cpu, 414 struct work_struct *work, unsigned long delay) 415 { 416 + return queue_delayed_work_on(cpu, keventd_wq, work, delay); 417 } 418 + EXPORT_SYMBOL(schedule_delayed_work_on); 419 420 /** 421 * schedule_on_each_cpu - call a function on each online CPU from keventd ··· 463 { 464 flush_workqueue(keventd_wq); 465 } 466 + EXPORT_SYMBOL(flush_scheduled_work); 467 468 /** 469 * cancel_rearming_delayed_workqueue - reliably kill off a delayed ··· 619 BUG_ON(!keventd_wq); 620 } 621