Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.16 500 lines 14 kB view raw
1/* 2 * drivers/cpufreq/cpufreq_ondemand.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/smp.h> 16#include <linux/init.h> 17#include <linux/interrupt.h> 18#include <linux/ctype.h> 19#include <linux/cpufreq.h> 20#include <linux/sysctl.h> 21#include <linux/types.h> 22#include <linux/fs.h> 23#include <linux/sysfs.h> 24#include <linux/sched.h> 25#include <linux/kmod.h> 26#include <linux/workqueue.h> 27#include <linux/jiffies.h> 28#include <linux/kernel_stat.h> 29#include <linux/percpu.h> 30#include <linux/mutex.h> 31 32/* 33 * dbs is used in this file as a shortform for demandbased switching 34 * It helps to keep variable names smaller, simpler 35 */ 36 37#define DEF_FREQUENCY_UP_THRESHOLD (80) 38#define MIN_FREQUENCY_UP_THRESHOLD (11) 39#define MAX_FREQUENCY_UP_THRESHOLD (100) 40 41/* 42 * The polling frequency of this governor depends on the capability of 43 * the processor. Default polling frequency is 1000 times the transition 44 * latency of the processor. The governor will work on any processor with 45 * transition latency <= 10mS, using appropriate sampling 46 * rate. 47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 48 * this governor will not work. 49 * All times here are in uS. 50 */ 51static unsigned int def_sampling_rate; 52#define MIN_SAMPLING_RATE_RATIO (2) 53/* for correct statistics, we need at least 10 ticks between each measure */ 54#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 55#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 56#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 58#define DEF_SAMPLING_DOWN_FACTOR (1) 59#define MAX_SAMPLING_DOWN_FACTOR (10) 60#define TRANSITION_LATENCY_LIMIT (10 * 1000) 61 62static void do_dbs_timer(void *data); 63 64struct cpu_dbs_info_s { 65 struct cpufreq_policy *cur_policy; 66 unsigned int prev_cpu_idle_up; 67 unsigned int prev_cpu_idle_down; 68 unsigned int enable; 69}; 70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 71 72static unsigned int dbs_enable; /* number of CPUs using this policy */ 73 74static DEFINE_MUTEX (dbs_mutex); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 76 77struct dbs_tuners { 78 unsigned int sampling_rate; 79 unsigned int sampling_down_factor; 80 unsigned int up_threshold; 81 unsigned int ignore_nice; 82}; 83 84static struct dbs_tuners dbs_tuners_ins = { 85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 86 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 87}; 88 89static inline unsigned int get_cpu_idle_time(unsigned int cpu) 90{ 91 return kstat_cpu(cpu).cpustat.idle + 92 kstat_cpu(cpu).cpustat.iowait + 93 ( dbs_tuners_ins.ignore_nice ? 94 kstat_cpu(cpu).cpustat.nice : 95 0); 96} 97 98/************************** sysfs interface ************************/ 99static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 100{ 101 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 102} 103 104static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 105{ 106 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 107} 108 109#define define_one_ro(_name) \ 110static struct freq_attr _name = \ 111__ATTR(_name, 0444, show_##_name, NULL) 112 113define_one_ro(sampling_rate_max); 114define_one_ro(sampling_rate_min); 115 116/* cpufreq_ondemand Governor Tunables */ 117#define show_one(file_name, object) \ 118static ssize_t show_##file_name \ 119(struct cpufreq_policy *unused, char *buf) \ 120{ \ 121 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 122} 123show_one(sampling_rate, sampling_rate); 124show_one(sampling_down_factor, sampling_down_factor); 125show_one(up_threshold, up_threshold); 126show_one(ignore_nice_load, ignore_nice); 127 128static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 129 const char *buf, size_t count) 130{ 131 unsigned int input; 132 int ret; 133 ret = sscanf (buf, "%u", &input); 134 if (ret != 1 ) 135 return -EINVAL; 136 137 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 138 return -EINVAL; 139 140 mutex_lock(&dbs_mutex); 141 dbs_tuners_ins.sampling_down_factor = input; 142 mutex_unlock(&dbs_mutex); 143 144 return count; 145} 146 147static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 148 const char *buf, size_t count) 149{ 150 unsigned int input; 151 int ret; 152 ret = sscanf (buf, "%u", &input); 153 154 mutex_lock(&dbs_mutex); 155 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 156 mutex_unlock(&dbs_mutex); 157 return -EINVAL; 158 } 159 160 dbs_tuners_ins.sampling_rate = input; 161 mutex_unlock(&dbs_mutex); 162 163 return count; 164} 165 166static ssize_t store_up_threshold(struct cpufreq_policy *unused, 167 const char *buf, size_t count) 168{ 169 unsigned int input; 170 int ret; 171 ret = sscanf (buf, "%u", &input); 172 173 mutex_lock(&dbs_mutex); 174 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 175 input < MIN_FREQUENCY_UP_THRESHOLD) { 176 mutex_unlock(&dbs_mutex); 177 return -EINVAL; 178 } 179 180 dbs_tuners_ins.up_threshold = input; 181 mutex_unlock(&dbs_mutex); 182 183 return count; 184} 185 186static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 187 const char *buf, size_t count) 188{ 189 unsigned int input; 190 int ret; 191 192 unsigned int j; 193 194 ret = sscanf (buf, "%u", &input); 195 if ( ret != 1 ) 196 return -EINVAL; 197 198 if ( input > 1 ) 199 input = 1; 200 201 mutex_lock(&dbs_mutex); 202 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 203 mutex_unlock(&dbs_mutex); 204 return count; 205 } 206 dbs_tuners_ins.ignore_nice = input; 207 208 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 209 for_each_online_cpu(j) { 210 struct cpu_dbs_info_s *j_dbs_info; 211 j_dbs_info = &per_cpu(cpu_dbs_info, j); 212 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 213 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 214 } 215 mutex_unlock(&dbs_mutex); 216 217 return count; 218} 219 220#define define_one_rw(_name) \ 221static struct freq_attr _name = \ 222__ATTR(_name, 0644, show_##_name, store_##_name) 223 224define_one_rw(sampling_rate); 225define_one_rw(sampling_down_factor); 226define_one_rw(up_threshold); 227define_one_rw(ignore_nice_load); 228 229static struct attribute * dbs_attributes[] = { 230 &sampling_rate_max.attr, 231 &sampling_rate_min.attr, 232 &sampling_rate.attr, 233 &sampling_down_factor.attr, 234 &up_threshold.attr, 235 &ignore_nice_load.attr, 236 NULL 237}; 238 239static struct attribute_group dbs_attr_group = { 240 .attrs = dbs_attributes, 241 .name = "ondemand", 242}; 243 244/************************** sysfs end ************************/ 245 246static void dbs_check_cpu(int cpu) 247{ 248 unsigned int idle_ticks, up_idle_ticks, total_ticks; 249 unsigned int freq_next; 250 unsigned int freq_down_sampling_rate; 251 static int down_skip[NR_CPUS]; 252 struct cpu_dbs_info_s *this_dbs_info; 253 254 struct cpufreq_policy *policy; 255 unsigned int j; 256 257 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 258 if (!this_dbs_info->enable) 259 return; 260 261 policy = this_dbs_info->cur_policy; 262 /* 263 * Every sampling_rate, we check, if current idle time is less 264 * than 20% (default), then we try to increase frequency 265 * Every sampling_rate*sampling_down_factor, we look for a the lowest 266 * frequency which can sustain the load while keeping idle time over 267 * 30%. If such a frequency exist, we try to decrease to this frequency. 268 * 269 * Any frequency increase takes it to the maximum frequency. 270 * Frequency reduction happens at minimum steps of 271 * 5% (default) of current frequency 272 */ 273 274 /* Check for frequency increase */ 275 idle_ticks = UINT_MAX; 276 for_each_cpu_mask(j, policy->cpus) { 277 unsigned int tmp_idle_ticks, total_idle_ticks; 278 struct cpu_dbs_info_s *j_dbs_info; 279 280 j_dbs_info = &per_cpu(cpu_dbs_info, j); 281 total_idle_ticks = get_cpu_idle_time(j); 282 tmp_idle_ticks = total_idle_ticks - 283 j_dbs_info->prev_cpu_idle_up; 284 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 285 286 if (tmp_idle_ticks < idle_ticks) 287 idle_ticks = tmp_idle_ticks; 288 } 289 290 /* Scale idle ticks by 100 and compare with up and down ticks */ 291 idle_ticks *= 100; 292 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 293 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 294 295 if (idle_ticks < up_idle_ticks) { 296 down_skip[cpu] = 0; 297 for_each_cpu_mask(j, policy->cpus) { 298 struct cpu_dbs_info_s *j_dbs_info; 299 300 j_dbs_info = &per_cpu(cpu_dbs_info, j); 301 j_dbs_info->prev_cpu_idle_down = 302 j_dbs_info->prev_cpu_idle_up; 303 } 304 /* if we are already at full speed then break out early */ 305 if (policy->cur == policy->max) 306 return; 307 308 __cpufreq_driver_target(policy, policy->max, 309 CPUFREQ_RELATION_H); 310 return; 311 } 312 313 /* Check for frequency decrease */ 314 down_skip[cpu]++; 315 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 316 return; 317 318 idle_ticks = UINT_MAX; 319 for_each_cpu_mask(j, policy->cpus) { 320 unsigned int tmp_idle_ticks, total_idle_ticks; 321 struct cpu_dbs_info_s *j_dbs_info; 322 323 j_dbs_info = &per_cpu(cpu_dbs_info, j); 324 /* Check for frequency decrease */ 325 total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 326 tmp_idle_ticks = total_idle_ticks - 327 j_dbs_info->prev_cpu_idle_down; 328 j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 329 330 if (tmp_idle_ticks < idle_ticks) 331 idle_ticks = tmp_idle_ticks; 332 } 333 334 down_skip[cpu] = 0; 335 /* if we cannot reduce the frequency anymore, break out early */ 336 if (policy->cur == policy->min) 337 return; 338 339 /* Compute how many ticks there are between two measurements */ 340 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 341 dbs_tuners_ins.sampling_down_factor; 342 total_ticks = usecs_to_jiffies(freq_down_sampling_rate); 343 344 /* 345 * The optimal frequency is the frequency that is the lowest that 346 * can support the current CPU usage without triggering the up 347 * policy. To be safe, we focus 10 points under the threshold. 348 */ 349 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; 350 freq_next = (freq_next * policy->cur) / 351 (dbs_tuners_ins.up_threshold - 10); 352 353 if (freq_next <= ((policy->cur * 95) / 100)) 354 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 355} 356 357static void do_dbs_timer(void *data) 358{ 359 int i; 360 mutex_lock(&dbs_mutex); 361 for_each_online_cpu(i) 362 dbs_check_cpu(i); 363 schedule_delayed_work(&dbs_work, 364 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 365 mutex_unlock(&dbs_mutex); 366} 367 368static inline void dbs_timer_init(void) 369{ 370 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 371 schedule_delayed_work(&dbs_work, 372 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 373 return; 374} 375 376static inline void dbs_timer_exit(void) 377{ 378 cancel_delayed_work(&dbs_work); 379 return; 380} 381 382static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 383 unsigned int event) 384{ 385 unsigned int cpu = policy->cpu; 386 struct cpu_dbs_info_s *this_dbs_info; 387 unsigned int j; 388 389 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 390 391 switch (event) { 392 case CPUFREQ_GOV_START: 393 if ((!cpu_online(cpu)) || 394 (!policy->cur)) 395 return -EINVAL; 396 397 if (policy->cpuinfo.transition_latency > 398 (TRANSITION_LATENCY_LIMIT * 1000)) 399 return -EINVAL; 400 if (this_dbs_info->enable) /* Already enabled */ 401 break; 402 403 mutex_lock(&dbs_mutex); 404 for_each_cpu_mask(j, policy->cpus) { 405 struct cpu_dbs_info_s *j_dbs_info; 406 j_dbs_info = &per_cpu(cpu_dbs_info, j); 407 j_dbs_info->cur_policy = policy; 408 409 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 410 j_dbs_info->prev_cpu_idle_down 411 = j_dbs_info->prev_cpu_idle_up; 412 } 413 this_dbs_info->enable = 1; 414 sysfs_create_group(&policy->kobj, &dbs_attr_group); 415 dbs_enable++; 416 /* 417 * Start the timerschedule work, when this governor 418 * is used for first time 419 */ 420 if (dbs_enable == 1) { 421 unsigned int latency; 422 /* policy latency is in nS. Convert it to uS first */ 423 latency = policy->cpuinfo.transition_latency / 1000; 424 if (latency == 0) 425 latency = 1; 426 427 def_sampling_rate = latency * 428 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 429 430 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 431 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 432 433 dbs_tuners_ins.sampling_rate = def_sampling_rate; 434 dbs_tuners_ins.ignore_nice = 0; 435 436 dbs_timer_init(); 437 } 438 439 mutex_unlock(&dbs_mutex); 440 break; 441 442 case CPUFREQ_GOV_STOP: 443 mutex_lock(&dbs_mutex); 444 this_dbs_info->enable = 0; 445 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 446 dbs_enable--; 447 /* 448 * Stop the timerschedule work, when this governor 449 * is used for first time 450 */ 451 if (dbs_enable == 0) 452 dbs_timer_exit(); 453 454 mutex_unlock(&dbs_mutex); 455 456 break; 457 458 case CPUFREQ_GOV_LIMITS: 459 mutex_lock(&dbs_mutex); 460 if (policy->max < this_dbs_info->cur_policy->cur) 461 __cpufreq_driver_target( 462 this_dbs_info->cur_policy, 463 policy->max, CPUFREQ_RELATION_H); 464 else if (policy->min > this_dbs_info->cur_policy->cur) 465 __cpufreq_driver_target( 466 this_dbs_info->cur_policy, 467 policy->min, CPUFREQ_RELATION_L); 468 mutex_unlock(&dbs_mutex); 469 break; 470 } 471 return 0; 472} 473 474static struct cpufreq_governor cpufreq_gov_dbs = { 475 .name = "ondemand", 476 .governor = cpufreq_governor_dbs, 477 .owner = THIS_MODULE, 478}; 479 480static int __init cpufreq_gov_dbs_init(void) 481{ 482 return cpufreq_register_governor(&cpufreq_gov_dbs); 483} 484 485static void __exit cpufreq_gov_dbs_exit(void) 486{ 487 /* Make sure that the scheduled work is indeed not running */ 488 flush_scheduled_work(); 489 490 cpufreq_unregister_governor(&cpufreq_gov_dbs); 491} 492 493 494MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 495MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " 496 "Low Latency Frequency Transition capable processors"); 497MODULE_LICENSE ("GPL"); 498 499module_init(cpufreq_gov_dbs_init); 500module_exit(cpufreq_gov_dbs_exit);