Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq

* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] cpufreq_conservative: keep ignore_nice_load and freq_step values when reselected
[CPUFREQ] powernow: remove private for_each_cpu_mask()
[CPUFREQ] hotplug cpu fix for powernow-k8
[PATCH] cpufreq_ondemand: add range check
[PATCH] cpufreq_ondemand: keep ignore_nice_load value when it is reselected
[PATCH] cpufreq_ondemand: Warn if it cannot run due to too long transition latency
[PATCH] cpufreq_conservative: alternative initialise approach
[PATCH] cpufreq_conservative: make for_each_cpu() safe
[PATCH] cpufreq_conservative: alter default responsiveness
[PATCH] cpufreq_conservative: aligning of codebase with ondemand

+78 -97
+6 -1
arch/i386/kernel/cpu/cpufreq/powernow-k8.c
··· 1095 1095 1096 1096 static unsigned int powernowk8_get (unsigned int cpu) 1097 1097 { 1098 - struct powernow_k8_data *data = powernow_data[cpu]; 1098 + struct powernow_k8_data *data; 1099 1099 cpumask_t oldmask = current->cpus_allowed; 1100 1100 unsigned int khz = 0; 1101 + 1102 + data = powernow_data[first_cpu(cpu_core_map[cpu])]; 1103 + 1104 + if (!data) 1105 + return -EINVAL; 1101 1106 1102 1107 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 1103 1108 if (smp_processor_id() != cpu) {
-4
arch/i386/kernel/cpu/cpufreq/powernow-k8.h
··· 182 182 183 183 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); 184 184 185 - #ifndef for_each_cpu_mask 186 - #define for_each_cpu_mask(i,mask) for (i=0;i<1;i++) 187 - #endif 188 - 189 185 #ifdef CONFIG_SMP 190 186 static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) 191 187 {
+64 -89
drivers/cpufreq/cpufreq_conservative.c
··· 35 35 */ 36 36 37 37 #define DEF_FREQUENCY_UP_THRESHOLD (80) 38 - #define MIN_FREQUENCY_UP_THRESHOLD (0) 39 - #define MAX_FREQUENCY_UP_THRESHOLD (100) 40 - 41 38 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 42 - #define MIN_FREQUENCY_DOWN_THRESHOLD (0) 43 - #define MAX_FREQUENCY_DOWN_THRESHOLD (100) 44 39 45 40 /* 46 41 * The polling frequency of this governor depends on the capability of ··· 48 53 * All times here are in uS. 49 54 */ 50 55 static unsigned int def_sampling_rate; 51 - #define MIN_SAMPLING_RATE (def_sampling_rate / 2) 56 + #define MIN_SAMPLING_RATE_RATIO (2) 57 + /* for correct statistics, we need at least 10 ticks between each measure */ 58 + #define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 59 + #define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 52 60 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 53 - #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000) 54 - #define DEF_SAMPLING_DOWN_FACTOR (5) 61 + #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 62 + #define DEF_SAMPLING_DOWN_FACTOR (1) 63 + #define MAX_SAMPLING_DOWN_FACTOR (10) 55 64 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 56 65 57 66 static void do_dbs_timer(void *data); ··· 65 66 unsigned int prev_cpu_idle_up; 66 67 unsigned int prev_cpu_idle_down; 67 68 unsigned int enable; 69 + unsigned int down_skip; 70 + unsigned int requested_freq; 68 71 }; 69 72 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 70 73 ··· 88 87 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 89 88 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 90 89 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 90 + .ignore_nice = 0, 91 + .freq_step = 5, 91 92 }; 92 93 93 94 static inline unsigned int get_cpu_idle_time(unsigned int cpu) ··· 139 136 unsigned int input; 140 137 int ret; 141 138 ret = sscanf (buf, "%u", &input); 142 - if (ret != 1 ) 139 + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 143 140 return -EINVAL; 144 141 145 142 mutex_lock(&dbs_mutex); ··· 176 173 ret = sscanf (buf, "%u", &input); 177 174 178 175 mutex_lock(&dbs_mutex); 179 - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 180 - input < MIN_FREQUENCY_UP_THRESHOLD || 176 + if (ret != 1 || input > 100 || input < 0 || 181 177 input <= dbs_tuners_ins.down_threshold) { 182 178 mutex_unlock(&dbs_mutex); 183 179 return -EINVAL; ··· 196 194 ret = sscanf (buf, "%u", &input); 197 195 198 196 mutex_lock(&dbs_mutex); 199 - if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 200 - input < MIN_FREQUENCY_DOWN_THRESHOLD || 197 + if (ret != 1 || input > 100 || input < 0 || 201 198 input >= dbs_tuners_ins.up_threshold) { 202 199 mutex_unlock(&dbs_mutex); 203 200 return -EINVAL; ··· 298 297 static void dbs_check_cpu(int cpu) 299 298 { 300 299 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 300 + unsigned int tmp_idle_ticks, total_idle_ticks; 301 301 unsigned int freq_step; 302 302 unsigned int freq_down_sampling_rate; 303 - static int down_skip[NR_CPUS]; 304 - static int requested_freq[NR_CPUS]; 305 - static unsigned short init_flag = 0; 306 - struct cpu_dbs_info_s *this_dbs_info; 307 - struct cpu_dbs_info_s *dbs_info; 308 - 303 + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 309 304 struct cpufreq_policy *policy; 310 - unsigned int j; 311 305 312 - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 313 306 if (!this_dbs_info->enable) 314 307 return; 315 308 316 309 policy = this_dbs_info->cur_policy; 317 310 318 - if ( init_flag == 0 ) { 319 - for_each_online_cpu(j) { 320 - dbs_info = &per_cpu(cpu_dbs_info, j); 321 - requested_freq[j] = dbs_info->cur_policy->cur; 322 - } 323 - init_flag = 1; 324 - } 325 - 326 311 /* 327 312 * The default safe range is 20% to 80% 328 313 * Every sampling_rate, we check ··· 324 337 */ 325 338 326 339 /* Check for frequency increase */ 327 - 328 340 idle_ticks = UINT_MAX; 329 - for_each_cpu_mask(j, policy->cpus) { 330 - unsigned int tmp_idle_ticks, total_idle_ticks; 331 - struct cpu_dbs_info_s *j_dbs_info; 332 341 333 - j_dbs_info = &per_cpu(cpu_dbs_info, j); 334 - /* Check for frequency increase */ 335 - total_idle_ticks = get_cpu_idle_time(j); 336 - tmp_idle_ticks = total_idle_ticks - 337 - j_dbs_info->prev_cpu_idle_up; 338 - j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 342 + /* Check for frequency increase */ 343 + total_idle_ticks = get_cpu_idle_time(cpu); 344 + tmp_idle_ticks = total_idle_ticks - 345 + this_dbs_info->prev_cpu_idle_up; 346 + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 339 347 340 - if (tmp_idle_ticks < idle_ticks) 341 - idle_ticks = tmp_idle_ticks; 342 - } 348 + if (tmp_idle_ticks < idle_ticks) 349 + idle_ticks = tmp_idle_ticks; 343 350 344 351 /* Scale idle ticks by 100 and compare with up and down ticks */ 345 352 idle_ticks *= 100; 346 353 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 347 - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 354 + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 348 355 349 356 if (idle_ticks < up_idle_ticks) { 350 - down_skip[cpu] = 0; 351 - for_each_cpu_mask(j, policy->cpus) { 352 - struct cpu_dbs_info_s *j_dbs_info; 357 + this_dbs_info->down_skip = 0; 358 + this_dbs_info->prev_cpu_idle_down = 359 + this_dbs_info->prev_cpu_idle_up; 353 360 354 - j_dbs_info = &per_cpu(cpu_dbs_info, j); 355 - j_dbs_info->prev_cpu_idle_down = 356 - j_dbs_info->prev_cpu_idle_up; 357 - } 358 361 /* if we are already at full speed then break out early */ 359 - if (requested_freq[cpu] == policy->max) 362 + if (this_dbs_info->requested_freq == policy->max) 360 363 return; 361 364 362 365 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; ··· 355 378 if (unlikely(freq_step == 0)) 356 379 freq_step = 5; 357 380 358 - requested_freq[cpu] += freq_step; 359 - if (requested_freq[cpu] > policy->max) 360 - requested_freq[cpu] = policy->max; 381 + this_dbs_info->requested_freq += freq_step; 382 + if (this_dbs_info->requested_freq > policy->max) 383 + this_dbs_info->requested_freq = policy->max; 361 384 362 - __cpufreq_driver_target(policy, requested_freq[cpu], 385 + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 363 386 CPUFREQ_RELATION_H); 364 387 return; 365 388 } 366 389 367 390 /* Check for frequency decrease */ 368 - down_skip[cpu]++; 369 - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 391 + this_dbs_info->down_skip++; 392 + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) 370 393 return; 371 394 372 - idle_ticks = UINT_MAX; 373 - for_each_cpu_mask(j, policy->cpus) { 374 - unsigned int tmp_idle_ticks, total_idle_ticks; 375 - struct cpu_dbs_info_s *j_dbs_info; 395 + /* Check for frequency decrease */ 396 + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 397 + tmp_idle_ticks = total_idle_ticks - 398 + this_dbs_info->prev_cpu_idle_down; 399 + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 376 400 377 - j_dbs_info = &per_cpu(cpu_dbs_info, j); 378 - total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 379 - tmp_idle_ticks = total_idle_ticks - 380 - j_dbs_info->prev_cpu_idle_down; 381 - j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 382 - 383 - if (tmp_idle_ticks < idle_ticks) 384 - idle_ticks = tmp_idle_ticks; 385 - } 401 + if (tmp_idle_ticks < idle_ticks) 402 + idle_ticks = tmp_idle_ticks; 386 403 387 404 /* Scale idle ticks by 100 and compare with up and down ticks */ 388 405 idle_ticks *= 100; 389 - down_skip[cpu] = 0; 406 + this_dbs_info->down_skip = 0; 390 407 391 408 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 392 409 dbs_tuners_ins.sampling_down_factor; 393 410 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 394 - usecs_to_jiffies(freq_down_sampling_rate); 411 + usecs_to_jiffies(freq_down_sampling_rate); 395 412 396 413 if (idle_ticks > down_idle_ticks) { 397 - /* if we are already at the lowest speed then break out early 414 + /* 415 + * if we are already at the lowest speed then break out early 398 416 * or if we 'cannot' reduce the speed as the user might want 399 - * freq_step to be zero */ 400 - if (requested_freq[cpu] == policy->min 417 + * freq_step to be zero 418 + */ 419 + if (this_dbs_info->requested_freq == policy->min 401 420 || dbs_tuners_ins.freq_step == 0) 402 421 return; 403 422 ··· 403 430 if (unlikely(freq_step == 0)) 404 431 freq_step = 5; 405 432 406 - requested_freq[cpu] -= freq_step; 407 - if (requested_freq[cpu] < policy->min) 408 - requested_freq[cpu] = policy->min; 433 + this_dbs_info->requested_freq -= freq_step; 434 + if (this_dbs_info->requested_freq < policy->min) 435 + this_dbs_info->requested_freq = policy->min; 409 436 410 - __cpufreq_driver_target(policy, 411 - requested_freq[cpu], 412 - CPUFREQ_RELATION_H); 437 + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 438 + CPUFREQ_RELATION_H); 413 439 return; 414 440 } 415 441 } ··· 465 493 j_dbs_info = &per_cpu(cpu_dbs_info, j); 466 494 j_dbs_info->cur_policy = policy; 467 495 468 - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 496 + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 469 497 j_dbs_info->prev_cpu_idle_down 470 498 = j_dbs_info->prev_cpu_idle_up; 471 499 } 472 500 this_dbs_info->enable = 1; 501 + this_dbs_info->down_skip = 0; 502 + this_dbs_info->requested_freq = policy->cur; 473 503 sysfs_create_group(&policy->kobj, &dbs_attr_group); 474 504 dbs_enable++; 475 505 /* ··· 481 507 if (dbs_enable == 1) { 482 508 unsigned int latency; 483 509 /* policy latency is in nS. Convert it to uS first */ 510 + latency = policy->cpuinfo.transition_latency / 1000; 511 + if (latency == 0) 512 + latency = 1; 484 513 485 - latency = policy->cpuinfo.transition_latency; 486 - if (latency < 1000) 487 - latency = 1000; 488 - 489 - def_sampling_rate = (latency / 1000) * 514 + def_sampling_rate = 10 * latency * 490 515 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 516 + 517 + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 518 + def_sampling_rate = MIN_STAT_SAMPLING_RATE; 519 + 491 520 dbs_tuners_ins.sampling_rate = def_sampling_rate; 492 - dbs_tuners_ins.ignore_nice = 0; 493 - dbs_tuners_ins.freq_step = 5; 494 521 495 522 dbs_timer_init(); 496 523 }
+8 -3
drivers/cpufreq/cpufreq_ondemand.c
··· 84 84 static struct dbs_tuners dbs_tuners_ins = { 85 85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 86 86 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 87 + .ignore_nice = 0, 87 88 }; 88 89 89 90 static inline unsigned int get_cpu_idle_time(unsigned int cpu) ··· 351 350 freq_next = (freq_next * policy->cur) / 352 351 (dbs_tuners_ins.up_threshold - 10); 353 352 353 + if (freq_next < policy->min) 354 + freq_next = policy->min; 355 + 354 356 if (freq_next <= ((policy->cur * 95) / 100)) 355 357 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 356 358 } ··· 399 395 return -EINVAL; 400 396 401 397 if (policy->cpuinfo.transition_latency > 402 - (TRANSITION_LATENCY_LIMIT * 1000)) 398 + (TRANSITION_LATENCY_LIMIT * 1000)) { 399 + printk(KERN_WARNING "ondemand governor failed to load " 400 + "due to too long transition latency\n"); 403 401 return -EINVAL; 402 + } 404 403 if (this_dbs_info->enable) /* Already enabled */ 405 404 break; 406 405 ··· 438 431 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 439 432 440 433 dbs_tuners_ins.sampling_rate = def_sampling_rate; 441 - dbs_tuners_ins.ignore_nice = 0; 442 - 443 434 dbs_timer_init(); 444 435 } 445 436