Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc8 688 lines 19 kB view raw
1/* 2 * drivers/cpufreq/cpufreq_ondemand.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/init.h> 16#include <linux/cpufreq.h> 17#include <linux/cpu.h> 18#include <linux/jiffies.h> 19#include <linux/kernel_stat.h> 20#include <linux/mutex.h> 21#include <linux/hrtimer.h> 22#include <linux/tick.h> 23#include <linux/ktime.h> 24 25/* 26 * dbs is used in this file as a shortform for demandbased switching 27 * It helps to keep variable names smaller, simpler 28 */ 29 30#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) 31#define DEF_FREQUENCY_UP_THRESHOLD (80) 32#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) 33#define MICRO_FREQUENCY_UP_THRESHOLD (95) 34#define MIN_FREQUENCY_UP_THRESHOLD (11) 35#define MAX_FREQUENCY_UP_THRESHOLD (100) 36 37/* 38 * The polling frequency of this governor depends on the capability of 39 * the processor. Default polling frequency is 1000 times the transition 40 * latency of the processor. The governor will work on any processor with 41 * transition latency <= 10mS, using appropriate sampling 42 * rate. 43 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 44 * this governor will not work. 45 * All times here are in uS. 46 */ 47static unsigned int def_sampling_rate; 48#define MIN_SAMPLING_RATE_RATIO (2) 49/* for correct statistics, we need at least 10 ticks between each measure */ 50#define MIN_STAT_SAMPLING_RATE \ 51 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 52#define MIN_SAMPLING_RATE \ 53 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 54#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 55#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 56#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 57 58static void do_dbs_timer(struct work_struct *work); 59 60/* Sampling types */ 61enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 62 63struct cpu_dbs_info_s { 64 cputime64_t prev_cpu_idle; 65 cputime64_t prev_cpu_wall; 66 cputime64_t prev_cpu_nice; 67 struct cpufreq_policy *cur_policy; 68 struct delayed_work work; 69 struct cpufreq_frequency_table *freq_table; 70 unsigned int freq_lo; 71 unsigned int freq_lo_jiffies; 72 unsigned int freq_hi_jiffies; 73 int cpu; 74 unsigned int enable:1, 75 sample_type:1; 76}; 77static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 78 79static unsigned int dbs_enable; /* number of CPUs using this policy */ 80 81/* 82 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 83 * lock and dbs_mutex. cpu_hotplug lock should always be held before 84 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 85 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 86 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 87 * is recursive for the same process. -Venki 88 */ 89static DEFINE_MUTEX(dbs_mutex); 90 91static struct workqueue_struct *kondemand_wq; 92 93static struct dbs_tuners { 94 unsigned int sampling_rate; 95 unsigned int up_threshold; 96 unsigned int down_differential; 97 unsigned int ignore_nice; 98 unsigned int powersave_bias; 99} dbs_tuners_ins = { 100 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 101 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, 102 .ignore_nice = 0, 103 .powersave_bias = 0, 104}; 105 106static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, 107 cputime64_t *wall) 108{ 109 cputime64_t idle_time; 110 cputime64_t cur_wall_time; 111 cputime64_t busy_time; 112 113 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 114 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, 115 kstat_cpu(cpu).cpustat.system); 116 117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); 121 122 idle_time = cputime64_sub(cur_wall_time, busy_time); 123 if (wall) 124 *wall = cur_wall_time; 125 126 return idle_time; 127} 128 129static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 130{ 131 u64 idle_time = get_cpu_idle_time_us(cpu, wall); 132 133 if (idle_time == -1ULL) 134 return get_cpu_idle_time_jiffy(cpu, wall); 135 136 return idle_time; 137} 138 139/* 140 * Find right freq to be set now with powersave_bias on. 141 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 142 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. 143 */ 144static unsigned int powersave_bias_target(struct cpufreq_policy *policy, 145 unsigned int freq_next, 146 unsigned int relation) 147{ 148 unsigned int freq_req, freq_reduc, freq_avg; 149 unsigned int freq_hi, freq_lo; 150 unsigned int index = 0; 151 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 152 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 153 154 if (!dbs_info->freq_table) { 155 dbs_info->freq_lo = 0; 156 dbs_info->freq_lo_jiffies = 0; 157 return freq_next; 158 } 159 160 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, 161 relation, &index); 162 freq_req = dbs_info->freq_table[index].frequency; 163 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; 164 freq_avg = freq_req - freq_reduc; 165 166 /* Find freq bounds for freq_avg in freq_table */ 167 index = 0; 168 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, 169 CPUFREQ_RELATION_H, &index); 170 freq_lo = dbs_info->freq_table[index].frequency; 171 index = 0; 172 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, 173 CPUFREQ_RELATION_L, &index); 174 freq_hi = dbs_info->freq_table[index].frequency; 175 176 /* Find out how long we have to be in hi and lo freqs */ 177 if (freq_hi == freq_lo) { 178 dbs_info->freq_lo = 0; 179 dbs_info->freq_lo_jiffies = 0; 180 return freq_lo; 181 } 182 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 183 jiffies_hi = (freq_avg - freq_lo) * jiffies_total; 184 jiffies_hi += ((freq_hi - freq_lo) / 2); 185 jiffies_hi /= (freq_hi - freq_lo); 186 jiffies_lo = jiffies_total - jiffies_hi; 187 dbs_info->freq_lo = freq_lo; 188 dbs_info->freq_lo_jiffies = jiffies_lo; 189 dbs_info->freq_hi_jiffies = jiffies_hi; 190 return freq_hi; 191} 192 193static void ondemand_powersave_bias_init(void) 194{ 195 int i; 196 for_each_online_cpu(i) { 197 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); 198 dbs_info->freq_table = cpufreq_frequency_get_table(i); 199 dbs_info->freq_lo = 0; 200 } 201} 202 203/************************** sysfs interface ************************/ 204static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 205{ 206 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 207} 208 209static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 210{ 211 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 212} 213 214#define define_one_ro(_name) \ 215static struct freq_attr _name = \ 216__ATTR(_name, 0444, show_##_name, NULL) 217 218define_one_ro(sampling_rate_max); 219define_one_ro(sampling_rate_min); 220 221/* cpufreq_ondemand Governor Tunables */ 222#define show_one(file_name, object) \ 223static ssize_t show_##file_name \ 224(struct cpufreq_policy *unused, char *buf) \ 225{ \ 226 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 227} 228show_one(sampling_rate, sampling_rate); 229show_one(up_threshold, up_threshold); 230show_one(ignore_nice_load, ignore_nice); 231show_one(powersave_bias, powersave_bias); 232 233static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 234 const char *buf, size_t count) 235{ 236 unsigned int input; 237 int ret; 238 ret = sscanf(buf, "%u", &input); 239 240 mutex_lock(&dbs_mutex); 241 if (ret != 1 || input > MAX_SAMPLING_RATE 242 || input < MIN_SAMPLING_RATE) { 243 mutex_unlock(&dbs_mutex); 244 return -EINVAL; 245 } 246 247 dbs_tuners_ins.sampling_rate = input; 248 mutex_unlock(&dbs_mutex); 249 250 return count; 251} 252 253static ssize_t store_up_threshold(struct cpufreq_policy *unused, 254 const char *buf, size_t count) 255{ 256 unsigned int input; 257 int ret; 258 ret = sscanf(buf, "%u", &input); 259 260 mutex_lock(&dbs_mutex); 261 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 262 input < MIN_FREQUENCY_UP_THRESHOLD) { 263 mutex_unlock(&dbs_mutex); 264 return -EINVAL; 265 } 266 267 dbs_tuners_ins.up_threshold = input; 268 mutex_unlock(&dbs_mutex); 269 270 return count; 271} 272 273static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 274 const char *buf, size_t count) 275{ 276 unsigned int input; 277 int ret; 278 279 unsigned int j; 280 281 ret = sscanf(buf, "%u", &input); 282 if ( ret != 1 ) 283 return -EINVAL; 284 285 if ( input > 1 ) 286 input = 1; 287 288 mutex_lock(&dbs_mutex); 289 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 290 mutex_unlock(&dbs_mutex); 291 return count; 292 } 293 dbs_tuners_ins.ignore_nice = input; 294 295 /* we need to re-evaluate prev_cpu_idle */ 296 for_each_online_cpu(j) { 297 struct cpu_dbs_info_s *dbs_info; 298 dbs_info = &per_cpu(cpu_dbs_info, j); 299 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 300 &dbs_info->prev_cpu_wall); 301 if (dbs_tuners_ins.ignore_nice) 302 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 303 304 } 305 mutex_unlock(&dbs_mutex); 306 307 return count; 308} 309 310static ssize_t store_powersave_bias(struct cpufreq_policy *unused, 311 const char *buf, size_t count) 312{ 313 unsigned int input; 314 int ret; 315 ret = sscanf(buf, "%u", &input); 316 317 if (ret != 1) 318 return -EINVAL; 319 320 if (input > 1000) 321 input = 1000; 322 323 mutex_lock(&dbs_mutex); 324 dbs_tuners_ins.powersave_bias = input; 325 ondemand_powersave_bias_init(); 326 mutex_unlock(&dbs_mutex); 327 328 return count; 329} 330 331#define define_one_rw(_name) \ 332static struct freq_attr _name = \ 333__ATTR(_name, 0644, show_##_name, store_##_name) 334 335define_one_rw(sampling_rate); 336define_one_rw(up_threshold); 337define_one_rw(ignore_nice_load); 338define_one_rw(powersave_bias); 339 340static struct attribute * dbs_attributes[] = { 341 &sampling_rate_max.attr, 342 &sampling_rate_min.attr, 343 &sampling_rate.attr, 344 &up_threshold.attr, 345 &ignore_nice_load.attr, 346 &powersave_bias.attr, 347 NULL 348}; 349 350static struct attribute_group dbs_attr_group = { 351 .attrs = dbs_attributes, 352 .name = "ondemand", 353}; 354 355/************************** sysfs end ************************/ 356 357static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 358{ 359 unsigned int max_load_freq; 360 361 struct cpufreq_policy *policy; 362 unsigned int j; 363 364 if (!this_dbs_info->enable) 365 return; 366 367 this_dbs_info->freq_lo = 0; 368 policy = this_dbs_info->cur_policy; 369 370 /* 371 * Every sampling_rate, we check, if current idle time is less 372 * than 20% (default), then we try to increase frequency 373 * Every sampling_rate, we look for a the lowest 374 * frequency which can sustain the load while keeping idle time over 375 * 30%. If such a frequency exist, we try to decrease to this frequency. 376 * 377 * Any frequency increase takes it to the maximum frequency. 378 * Frequency reduction happens at minimum steps of 379 * 5% (default) of current frequency 380 */ 381 382 /* Get Absolute Load - in terms of freq */ 383 max_load_freq = 0; 384 385 for_each_cpu(j, policy->cpus) { 386 struct cpu_dbs_info_s *j_dbs_info; 387 cputime64_t cur_wall_time, cur_idle_time; 388 unsigned int idle_time, wall_time; 389 unsigned int load, load_freq; 390 int freq_avg; 391 392 j_dbs_info = &per_cpu(cpu_dbs_info, j); 393 394 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 395 396 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 397 j_dbs_info->prev_cpu_wall); 398 j_dbs_info->prev_cpu_wall = cur_wall_time; 399 400 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 401 j_dbs_info->prev_cpu_idle); 402 j_dbs_info->prev_cpu_idle = cur_idle_time; 403 404 if (dbs_tuners_ins.ignore_nice) { 405 cputime64_t cur_nice; 406 unsigned long cur_nice_jiffies; 407 408 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, 409 j_dbs_info->prev_cpu_nice); 410 /* 411 * Assumption: nice time between sampling periods will 412 * be less than 2^32 jiffies for 32 bit sys 413 */ 414 cur_nice_jiffies = (unsigned long) 415 cputime64_to_jiffies64(cur_nice); 416 417 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 418 idle_time += jiffies_to_usecs(cur_nice_jiffies); 419 } 420 421 if (unlikely(!wall_time || wall_time < idle_time)) 422 continue; 423 424 load = 100 * (wall_time - idle_time) / wall_time; 425 426 freq_avg = __cpufreq_driver_getavg(policy, j); 427 if (freq_avg <= 0) 428 freq_avg = policy->cur; 429 430 load_freq = load * freq_avg; 431 if (load_freq > max_load_freq) 432 max_load_freq = load_freq; 433 } 434 435 /* Check for frequency increase */ 436 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { 437 /* if we are already at full speed then break out early */ 438 if (!dbs_tuners_ins.powersave_bias) { 439 if (policy->cur == policy->max) 440 return; 441 442 __cpufreq_driver_target(policy, policy->max, 443 CPUFREQ_RELATION_H); 444 } else { 445 int freq = powersave_bias_target(policy, policy->max, 446 CPUFREQ_RELATION_H); 447 __cpufreq_driver_target(policy, freq, 448 CPUFREQ_RELATION_L); 449 } 450 return; 451 } 452 453 /* Check for frequency decrease */ 454 /* if we cannot reduce the frequency anymore, break out early */ 455 if (policy->cur == policy->min) 456 return; 457 458 /* 459 * The optimal frequency is the frequency that is the lowest that 460 * can support the current CPU usage without triggering the up 461 * policy. To be safe, we focus 10 points under the threshold. 462 */ 463 if (max_load_freq < 464 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * 465 policy->cur) { 466 unsigned int freq_next; 467 freq_next = max_load_freq / 468 (dbs_tuners_ins.up_threshold - 469 dbs_tuners_ins.down_differential); 470 471 if (!dbs_tuners_ins.powersave_bias) { 472 __cpufreq_driver_target(policy, freq_next, 473 CPUFREQ_RELATION_L); 474 } else { 475 int freq = powersave_bias_target(policy, freq_next, 476 CPUFREQ_RELATION_L); 477 __cpufreq_driver_target(policy, freq, 478 CPUFREQ_RELATION_L); 479 } 480 } 481} 482 483static void do_dbs_timer(struct work_struct *work) 484{ 485 struct cpu_dbs_info_s *dbs_info = 486 container_of(work, struct cpu_dbs_info_s, work.work); 487 unsigned int cpu = dbs_info->cpu; 488 int sample_type = dbs_info->sample_type; 489 490 /* We want all CPUs to do sampling nearly on same jiffy */ 491 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 492 493 delay -= jiffies % delay; 494 495 if (lock_policy_rwsem_write(cpu) < 0) 496 return; 497 498 if (!dbs_info->enable) { 499 unlock_policy_rwsem_write(cpu); 500 return; 501 } 502 503 /* Common NORMAL_SAMPLE setup */ 504 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 505 if (!dbs_tuners_ins.powersave_bias || 506 sample_type == DBS_NORMAL_SAMPLE) { 507 dbs_check_cpu(dbs_info); 508 if (dbs_info->freq_lo) { 509 /* Setup timer for SUB_SAMPLE */ 510 dbs_info->sample_type = DBS_SUB_SAMPLE; 511 delay = dbs_info->freq_hi_jiffies; 512 } 513 } else { 514 __cpufreq_driver_target(dbs_info->cur_policy, 515 dbs_info->freq_lo, 516 CPUFREQ_RELATION_H); 517 } 518 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 519 unlock_policy_rwsem_write(cpu); 520} 521 522static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 523{ 524 /* We want all CPUs to do sampling nearly on same jiffy */ 525 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 526 delay -= jiffies % delay; 527 528 dbs_info->enable = 1; 529 ondemand_powersave_bias_init(); 530 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 531 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 532 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 533 delay); 534} 535 536static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 537{ 538 dbs_info->enable = 0; 539 cancel_delayed_work(&dbs_info->work); 540} 541 542static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 543 unsigned int event) 544{ 545 unsigned int cpu = policy->cpu; 546 struct cpu_dbs_info_s *this_dbs_info; 547 unsigned int j; 548 int rc; 549 550 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 551 552 switch (event) { 553 case CPUFREQ_GOV_START: 554 if ((!cpu_online(cpu)) || (!policy->cur)) 555 return -EINVAL; 556 557 if (this_dbs_info->enable) /* Already enabled */ 558 break; 559 560 mutex_lock(&dbs_mutex); 561 dbs_enable++; 562 563 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 564 if (rc) { 565 dbs_enable--; 566 mutex_unlock(&dbs_mutex); 567 return rc; 568 } 569 570 for_each_cpu(j, policy->cpus) { 571 struct cpu_dbs_info_s *j_dbs_info; 572 j_dbs_info = &per_cpu(cpu_dbs_info, j); 573 j_dbs_info->cur_policy = policy; 574 575 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 576 &j_dbs_info->prev_cpu_wall); 577 if (dbs_tuners_ins.ignore_nice) { 578 j_dbs_info->prev_cpu_nice = 579 kstat_cpu(j).cpustat.nice; 580 } 581 } 582 this_dbs_info->cpu = cpu; 583 /* 584 * Start the timerschedule work, when this governor 585 * is used for first time 586 */ 587 if (dbs_enable == 1) { 588 unsigned int latency; 589 /* policy latency is in nS. Convert it to uS first */ 590 latency = policy->cpuinfo.transition_latency / 1000; 591 if (latency == 0) 592 latency = 1; 593 594 def_sampling_rate = latency * 595 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 596 597 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 598 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 599 600 dbs_tuners_ins.sampling_rate = def_sampling_rate; 601 } 602 dbs_timer_init(this_dbs_info); 603 604 mutex_unlock(&dbs_mutex); 605 break; 606 607 case CPUFREQ_GOV_STOP: 608 mutex_lock(&dbs_mutex); 609 dbs_timer_exit(this_dbs_info); 610 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 611 dbs_enable--; 612 mutex_unlock(&dbs_mutex); 613 614 break; 615 616 case CPUFREQ_GOV_LIMITS: 617 mutex_lock(&dbs_mutex); 618 if (policy->max < this_dbs_info->cur_policy->cur) 619 __cpufreq_driver_target(this_dbs_info->cur_policy, 620 policy->max, 621 CPUFREQ_RELATION_H); 622 else if (policy->min > this_dbs_info->cur_policy->cur) 623 __cpufreq_driver_target(this_dbs_info->cur_policy, 624 policy->min, 625 CPUFREQ_RELATION_L); 626 mutex_unlock(&dbs_mutex); 627 break; 628 } 629 return 0; 630} 631 632#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 633static 634#endif 635struct cpufreq_governor cpufreq_gov_ondemand = { 636 .name = "ondemand", 637 .governor = cpufreq_governor_dbs, 638 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 639 .owner = THIS_MODULE, 640}; 641 642static int __init cpufreq_gov_dbs_init(void) 643{ 644 int err; 645 cputime64_t wall; 646 u64 idle_time; 647 int cpu = get_cpu(); 648 649 idle_time = get_cpu_idle_time_us(cpu, &wall); 650 put_cpu(); 651 if (idle_time != -1ULL) { 652 /* Idle micro accounting is supported. Use finer thresholds */ 653 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; 654 dbs_tuners_ins.down_differential = 655 MICRO_FREQUENCY_DOWN_DIFFERENTIAL; 656 } 657 658 kondemand_wq = create_workqueue("kondemand"); 659 if (!kondemand_wq) { 660 printk(KERN_ERR "Creation of kondemand failed\n"); 661 return -EFAULT; 662 } 663 err = cpufreq_register_governor(&cpufreq_gov_ondemand); 664 if (err) 665 destroy_workqueue(kondemand_wq); 666 667 return err; 668} 669 670static void __exit cpufreq_gov_dbs_exit(void) 671{ 672 cpufreq_unregister_governor(&cpufreq_gov_ondemand); 673 destroy_workqueue(kondemand_wq); 674} 675 676 677MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 678MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); 679MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " 680 "Low Latency Frequency Transition capable processors"); 681MODULE_LICENSE("GPL"); 682 683#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 684fs_initcall(cpufreq_gov_dbs_init); 685#else 686module_init(cpufreq_gov_dbs_init); 687#endif 688module_exit(cpufreq_gov_dbs_exit);