Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://github.com/kernelslacker/cpufreq

* 'next' of git://github.com/kernelslacker/cpufreq:
[CPUFREQ] db8500: support all frequencies
[CPUFREQ] db8500: remove unneeded for loop iteration over freq_table
[CPUFREQ] ARM Exynos4210 PM/Suspend compatibility with different bootloaders
[CPUFREQ] ARM: ux500: send cpufreq notification for all cpus
[CPUFREQ] e_powersaver: Allow user to lower maximum voltage
[CPUFREQ] e_powersaver: Check BIOS limit for CPU frequency
[CPUFREQ] e_powersaver: Additional checks
[CPUFREQ] exynos4210: Show list of available frequencies

+268 -32
+20 -16
drivers/cpufreq/db8500-cpufreq.c
··· 18 18 static struct cpufreq_frequency_table freq_table[] = { 19 19 [0] = { 20 20 .index = 0, 21 - .frequency = 300000, 21 + .frequency = 200000, 22 22 }, 23 23 [1] = { 24 24 .index = 1, 25 - .frequency = 600000, 25 + .frequency = 300000, 26 26 }, 27 27 [2] = { 28 - /* Used for MAX_OPP, if available */ 29 28 .index = 2, 30 - .frequency = CPUFREQ_TABLE_END, 29 + .frequency = 600000, 31 30 }, 32 31 [3] = { 32 + /* Used for MAX_OPP, if available */ 33 33 .index = 3, 34 + .frequency = CPUFREQ_TABLE_END, 35 + }, 36 + [4] = { 37 + .index = 4, 34 38 .frequency = CPUFREQ_TABLE_END, 35 39 }, 36 40 }; 37 41 38 42 static enum arm_opp idx2opp[] = { 43 + ARM_EXTCLK, 39 44 ARM_50_OPP, 40 45 ARM_100_OPP, 41 46 ARM_MAX_OPP ··· 77 72 78 73 freqs.old = policy->cur; 79 74 freqs.new = freq_table[idx].frequency; 80 - freqs.cpu = policy->cpu; 81 75 82 76 if (freqs.old == freqs.new) 83 77 return 0; 84 78 85 79 /* pre-change notification */ 86 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 80 + for_each_cpu(freqs.cpu, policy->cpus) 81 + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 87 82 88 83 /* request the PRCM unit for opp change */ 89 84 if (prcmu_set_arm_opp(idx2opp[idx])) { ··· 92 87 } 93 88 94 89 /* post change notification */ 95 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 90 + for_each_cpu(freqs.cpu, policy->cpus) 91 + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 96 92 97 93 return 0; 98 94 } ··· 110 104 static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) 111 105 { 112 106 int res; 113 - int i; 114 107 115 108 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); 116 109 117 - if (cpu_is_u8500v2() && !prcmu_is_u8400()) { 118 - freq_table[0].frequency = 400000; 119 - freq_table[1].frequency = 800000; 110 + if (!prcmu_is_u8400()) { 111 + freq_table[1].frequency = 400000; 112 + freq_table[2].frequency = 800000; 120 113 if (prcmu_has_arm_maxopp()) 121 - freq_table[2].frequency = 1000000; 114 + freq_table[3].frequency = 1000000; 122 115 } 116 + pr_info("db8500-cpufreq : Available frequencies:\n"); 117 + while (freq_table[i].frequency != CPUFREQ_TABLE_END) 118 + pr_info(" %d Mhz\n", freq_table[i++].frequency/1000); 123 119 124 120 /* get policy fields based on the table */ 125 121 res = cpufreq_frequency_table_cpuinfo(policy, freq_table); ··· 135 127 policy->min = policy->cpuinfo.min_freq; 136 128 policy->max = policy->cpuinfo.max_freq; 137 129 policy->cur = db8500_cpufreq_getspeed(policy->cpu); 138 - 139 - for (i = 0; freq_table[i].frequency != policy->cur; i++) 140 - ; 141 - 142 130 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 143 131 144 132 /*
+124 -11
drivers/cpufreq/e_powersaver.c
··· 19 19 #include <asm/msr.h> 20 20 #include <asm/tsc.h> 21 21 22 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 23 + #include <linux/acpi.h> 24 + #include <acpi/processor.h> 25 + #endif 26 + 22 27 #define EPS_BRAND_C7M 0 23 28 #define EPS_BRAND_C7 1 24 29 #define EPS_BRAND_EDEN 2 ··· 32 27 33 28 struct eps_cpu_data { 34 29 u32 fsb; 30 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 31 + u32 bios_limit; 32 + #endif 35 33 struct cpufreq_frequency_table freq_table[]; 36 34 }; 37 35 38 36 static struct eps_cpu_data *eps_cpu[NR_CPUS]; 39 37 38 + /* Module parameters */ 39 + static int freq_failsafe_off; 40 + static int voltage_failsafe_off; 41 + static int set_max_voltage; 42 + 43 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 44 + static int ignore_acpi_limit; 45 + 46 + static struct acpi_processor_performance *eps_acpi_cpu_perf; 47 + 48 + /* Minimum necessary to get acpi_processor_get_bios_limit() working */ 49 + static int eps_acpi_init(void) 50 + { 51 + eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance), 52 + GFP_KERNEL); 53 + if (!eps_acpi_cpu_perf) 54 + return -ENOMEM; 55 + 56 + if (!zalloc_cpumask_var(&eps_acpi_cpu_perf->shared_cpu_map, 57 + GFP_KERNEL)) { 58 + kfree(eps_acpi_cpu_perf); 59 + eps_acpi_cpu_perf = NULL; 60 + return -ENOMEM; 61 + } 62 + 63 + if (acpi_processor_register_performance(eps_acpi_cpu_perf, 0)) { 64 + free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map); 65 + kfree(eps_acpi_cpu_perf); 66 + eps_acpi_cpu_perf = NULL; 67 + return -EIO; 68 + } 69 + return 0; 70 + } 71 + 72 + static int eps_acpi_exit(struct cpufreq_policy *policy) 73 + { 74 + if (eps_acpi_cpu_perf) { 75 + acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0); 76 + free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map); 77 + kfree(eps_acpi_cpu_perf); 78 + eps_acpi_cpu_perf = NULL; 79 + } 80 + return 0; 81 + } 82 + #endif 40 83 41 84 static unsigned int eps_get(unsigned int cpu) 42 85 { ··· 217 164 int k, step, voltage; 218 165 int ret; 219 166 int states; 167 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 168 + unsigned int limit; 169 + #endif 220 170 221 171 if (policy->cpu != 0) 222 172 return -ENODEV; ··· 300 244 return -EINVAL; 301 245 if (current_voltage > 0x1f || max_voltage > 0x1f) 302 246 return -EINVAL; 303 - if (max_voltage < min_voltage) 247 + if (max_voltage < min_voltage 248 + || current_voltage < min_voltage 249 + || current_voltage > max_voltage) 304 250 return -EINVAL; 251 + 252 + /* Check for systems using underclocked CPU */ 253 + if (!freq_failsafe_off && max_multiplier != current_multiplier) { 254 + printk(KERN_INFO "eps: Your processor is running at different " 255 + "frequency then its maximum. Aborting.\n"); 256 + printk(KERN_INFO "eps: You can use freq_failsafe_off option " 257 + "to disable this check.\n"); 258 + return -EINVAL; 259 + } 260 + if (!voltage_failsafe_off && max_voltage != current_voltage) { 261 + printk(KERN_INFO "eps: Your processor is running at different " 262 + "voltage then its maximum. Aborting.\n"); 263 + printk(KERN_INFO "eps: You can use voltage_failsafe_off " 264 + "option to disable this check.\n"); 265 + return -EINVAL; 266 + } 305 267 306 268 /* Calc FSB speed */ 307 269 fsb = cpu_khz / current_multiplier; 270 + 271 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 272 + /* Check for ACPI processor speed limit */ 273 + if (!ignore_acpi_limit && !eps_acpi_init()) { 274 + if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) { 275 + printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n", 276 + limit/1000000, 277 + (limit%1000000)/10000); 278 + eps_acpi_exit(policy); 279 + /* Check if max_multiplier is in BIOS limits */ 280 + if (limit && max_multiplier * fsb > limit) { 281 + printk(KERN_INFO "eps: Aborting.\n"); 282 + return -EINVAL; 283 + } 284 + } 285 + } 286 + #endif 287 + 288 + /* Allow user to set lower maximum voltage then that reported 289 + * by processor */ 290 + if (brand == EPS_BRAND_C7M && set_max_voltage) { 291 + u32 v; 292 + 293 + /* Change mV to something hardware can use */ 294 + v = (set_max_voltage - 700) / 16; 295 + /* Check if voltage is within limits */ 296 + if (v >= min_voltage && v <= max_voltage) { 297 + printk(KERN_INFO "eps: Setting %dmV as maximum.\n", 298 + v * 16 + 700); 299 + max_voltage = v; 300 + } 301 + } 302 + 308 303 /* Calc number of p-states supported */ 309 304 if (brand == EPS_BRAND_C7M) 310 305 states = max_multiplier - min_multiplier + 1; ··· 372 265 373 266 /* Copy basic values */ 374 267 centaur->fsb = fsb; 268 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 269 + centaur->bios_limit = limit; 270 + #endif 375 271 376 272 /* Fill frequency and MSR value table */ 377 273 f_table = &centaur->freq_table[0]; ··· 413 303 static int eps_cpu_exit(struct cpufreq_policy *policy) 414 304 { 415 305 unsigned int cpu = policy->cpu; 416 - struct eps_cpu_data *centaur; 417 - u32 lo, hi; 418 306 419 - if (eps_cpu[cpu] == NULL) 420 - return -ENODEV; 421 - centaur = eps_cpu[cpu]; 422 - 423 - /* Get max frequency */ 424 - rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 425 - /* Set max frequency */ 426 - eps_set_state(centaur, cpu, hi & 0xffff); 427 307 /* Bye */ 428 308 cpufreq_frequency_table_put_attr(policy->cpu); 429 309 kfree(eps_cpu[cpu]); ··· 458 358 { 459 359 cpufreq_unregister_driver(&eps_driver); 460 360 } 361 + 362 + /* Allow user to overclock his machine or to change frequency to higher after 363 + * unloading module */ 364 + module_param(freq_failsafe_off, int, 0644); 365 + MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check"); 366 + module_param(voltage_failsafe_off, int, 0644); 367 + MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check"); 368 + #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 369 + module_param(ignore_acpi_limit, int, 0644); 370 + MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit"); 371 + #endif 372 + module_param(set_max_voltage, int, 0644); 373 + MODULE_PARM_DESC(set_max_voltage, "Set maximum CPU voltage (mV) C7-M only"); 461 374 462 375 MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>"); 463 376 MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
+124 -5
drivers/cpufreq/exynos4210-cpufreq.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/regulator/consumer.h> 19 19 #include <linux/cpufreq.h> 20 + #include <linux/notifier.h> 21 + #include <linux/suspend.h> 20 22 21 23 #include <mach/map.h> 22 24 #include <mach/regs-clock.h> ··· 37 35 38 36 static struct cpufreq_freqs freqs; 39 37 static unsigned int memtype; 38 + 39 + static unsigned int locking_frequency; 40 + static bool frequency_locked; 41 + static DEFINE_MUTEX(cpufreq_lock); 40 42 41 43 enum exynos4_memory_type { 42 44 DDR2 = 4, ··· 411 405 { 412 406 unsigned int index, old_index; 413 407 unsigned int arm_volt, int_volt; 408 + int err = -EINVAL; 414 409 415 410 freqs.old = exynos4_getspeed(policy->cpu); 416 411 412 + mutex_lock(&cpufreq_lock); 413 + 414 + if (frequency_locked && target_freq != locking_frequency) { 415 + err = -EAGAIN; 416 + goto out; 417 + } 418 + 417 419 if (cpufreq_frequency_table_target(policy, exynos4_freq_table, 418 420 freqs.old, relation, &old_index)) 419 - return -EINVAL; 421 + goto out; 420 422 421 423 if (cpufreq_frequency_table_target(policy, exynos4_freq_table, 422 424 target_freq, relation, &index)) 423 - return -EINVAL; 425 + goto out; 426 + 427 + err = 0; 424 428 425 429 freqs.new = exynos4_freq_table[index].frequency; 426 430 freqs.cpu = policy->cpu; 427 431 428 432 if (freqs.new == freqs.old) 429 - return 0; 433 + goto out; 430 434 431 435 /* get the voltage value */ 432 436 arm_volt = exynos4_volt_table[index].arm_volt; ··· 463 447 464 448 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 465 449 466 - return 0; 450 + out: 451 + mutex_unlock(&cpufreq_lock); 452 + return err; 467 453 } 468 454 469 455 #ifdef CONFIG_PM 456 + /* 457 + * These suspend/resume are used as syscore_ops, it is already too 458 + * late to set regulator voltages at this stage. 459 + */ 470 460 static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy) 471 461 { 472 462 return 0; ··· 484 462 } 485 463 #endif 486 464 465 + /** 466 + * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume 467 + * context 468 + * @notifier 469 + * @pm_event 470 + * @v 471 + * 472 + * While frequency_locked == true, target() ignores every frequency but 473 + * locking_frequency. The locking_frequency value is the initial frequency, 474 + * which is set by the bootloader. In order to eliminate possible 475 + * inconsistency in clock values, we save and restore frequencies during 476 + * suspend and resume and block CPUFREQ activities. Note that the standard 477 + * suspend/resume cannot be used as they are too deep (syscore_ops) for 478 + * regulator actions. 479 + */ 480 + static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier, 481 + unsigned long pm_event, void *v) 482 + { 483 + struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ 484 + static unsigned int saved_frequency; 485 + unsigned int temp; 486 + 487 + mutex_lock(&cpufreq_lock); 488 + switch (pm_event) { 489 + case PM_SUSPEND_PREPARE: 490 + if (frequency_locked) 491 + goto out; 492 + frequency_locked = true; 493 + 494 + if (locking_frequency) { 495 + saved_frequency = exynos4_getspeed(0); 496 + 497 + mutex_unlock(&cpufreq_lock); 498 + exynos4_target(policy, locking_frequency, 499 + CPUFREQ_RELATION_H); 500 + mutex_lock(&cpufreq_lock); 501 + } 502 + 503 + break; 504 + case PM_POST_SUSPEND: 505 + 506 + if (saved_frequency) { 507 + /* 508 + * While frequency_locked, only locking_frequency 509 + * is valid for target(). In order to use 510 + * saved_frequency while keeping frequency_locked, 511 + * we temporarly overwrite locking_frequency. 512 + */ 513 + temp = locking_frequency; 514 + locking_frequency = saved_frequency; 515 + 516 + mutex_unlock(&cpufreq_lock); 517 + exynos4_target(policy, locking_frequency, 518 + CPUFREQ_RELATION_H); 519 + mutex_lock(&cpufreq_lock); 520 + 521 + locking_frequency = temp; 522 + } 523 + 524 + frequency_locked = false; 525 + break; 526 + } 527 + out: 528 + mutex_unlock(&cpufreq_lock); 529 + 530 + return NOTIFY_OK; 531 + } 532 + 533 + static struct notifier_block exynos4_cpufreq_nb = { 534 + .notifier_call = exynos4_cpufreq_pm_notifier, 535 + }; 536 + 487 537 static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy) 488 538 { 539 + int ret; 540 + 489 541 policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu); 490 542 491 543 cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu); ··· 575 479 */ 576 480 cpumask_setall(policy->cpus); 577 481 578 - return cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table); 482 + ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table); 483 + if (ret) 484 + return ret; 485 + 486 + cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu); 487 + 488 + return 0; 579 489 } 490 + 491 + static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy) 492 + { 493 + cpufreq_frequency_table_put_attr(policy->cpu); 494 + return 0; 495 + } 496 + 497 + static struct freq_attr *exynos4_cpufreq_attr[] = { 498 + &cpufreq_freq_attr_scaling_available_freqs, 499 + NULL, 500 + }; 580 501 581 502 static struct cpufreq_driver exynos4_driver = { 582 503 .flags = CPUFREQ_STICKY, ··· 601 488 .target = exynos4_target, 602 489 .get = exynos4_getspeed, 603 490 .init = exynos4_cpufreq_cpu_init, 491 + .exit = exynos4_cpufreq_cpu_exit, 604 492 .name = "exynos4_cpufreq", 493 + .attr = exynos4_cpufreq_attr, 605 494 #ifdef CONFIG_PM 606 495 .suspend = exynos4_cpufreq_suspend, 607 496 .resume = exynos4_cpufreq_resume, ··· 615 500 cpu_clk = clk_get(NULL, "armclk"); 616 501 if (IS_ERR(cpu_clk)) 617 502 return PTR_ERR(cpu_clk); 503 + 504 + locking_frequency = exynos4_getspeed(0); 618 505 619 506 moutcore = clk_get(NULL, "moutcore"); 620 507 if (IS_ERR(moutcore)) ··· 656 539 } else { 657 540 printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype); 658 541 } 542 + 543 + register_pm_notifier(&exynos4_cpufreq_nb); 659 544 660 545 return cpufreq_register_driver(&exynos4_driver); 661 546