Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.20-rc6 1744 lines 44 kB view raw
1/* 2 * linux/drivers/cpufreq/cpufreq.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6 * 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 8 * Added handling for CPU hotplug 9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 10 * Fix handling for CPU hotplug -- affected CPUs 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 * 16 */ 17 18#include <linux/kernel.h> 19#include <linux/module.h> 20#include <linux/init.h> 21#include <linux/notifier.h> 22#include <linux/cpufreq.h> 23#include <linux/delay.h> 24#include <linux/interrupt.h> 25#include <linux/spinlock.h> 26#include <linux/device.h> 27#include <linux/slab.h> 28#include <linux/cpu.h> 29#include <linux/completion.h> 30#include <linux/mutex.h> 31 32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ 33 "cpufreq-core", msg) 34 35/** 36 * The "cpufreq driver" - the arch- or hardware-dependent low 37 * level driver of CPUFreq support, and its spinlock. This lock 38 * also protects the cpufreq_cpu_data array. 39 */ 40static struct cpufreq_driver *cpufreq_driver; 41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; 42static DEFINE_SPINLOCK(cpufreq_driver_lock); 43 44/* internal prototypes */ 45static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 46static void handle_update(struct work_struct *work); 47 48/** 49 * Two notifier lists: the "policy" list is involved in the 50 * validation process for a new CPU frequency policy; the 51 * "transition" list for kernel code that needs to handle 52 * changes to devices when the CPU clock speed changes. 53 * The mutex locks both lists. 54 */ 55static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 56static struct srcu_notifier_head cpufreq_transition_notifier_list; 57 58static int __init init_cpufreq_transition_notifier_list(void) 59{ 60 srcu_init_notifier_head(&cpufreq_transition_notifier_list); 61 return 0; 62} 63pure_initcall(init_cpufreq_transition_notifier_list); 64 65static LIST_HEAD(cpufreq_governor_list); 66static DEFINE_MUTEX (cpufreq_governor_mutex); 67 68struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 69{ 70 struct cpufreq_policy *data; 71 unsigned long flags; 72 73 if (cpu >= NR_CPUS) 74 goto err_out; 75 76 /* get the cpufreq driver */ 77 spin_lock_irqsave(&cpufreq_driver_lock, flags); 78 79 if (!cpufreq_driver) 80 goto err_out_unlock; 81 82 if (!try_module_get(cpufreq_driver->owner)) 83 goto err_out_unlock; 84 85 86 /* get the CPU */ 87 data = cpufreq_cpu_data[cpu]; 88 89 if (!data) 90 goto err_out_put_module; 91 92 if (!kobject_get(&data->kobj)) 93 goto err_out_put_module; 94 95 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 96 return data; 97 98err_out_put_module: 99 module_put(cpufreq_driver->owner); 100err_out_unlock: 101 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 102err_out: 103 return NULL; 104} 105EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 106 107 108void cpufreq_cpu_put(struct cpufreq_policy *data) 109{ 110 kobject_put(&data->kobj); 111 module_put(cpufreq_driver->owner); 112} 113EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 114 115 116/********************************************************************* 117 * UNIFIED DEBUG HELPERS * 118 *********************************************************************/ 119#ifdef CONFIG_CPU_FREQ_DEBUG 120 121/* what part(s) of the CPUfreq subsystem are debugged? */ 122static unsigned int debug; 123 124/* is the debug output ratelimit'ed using printk_ratelimit? User can 125 * set or modify this value. 126 */ 127static unsigned int debug_ratelimit = 1; 128 129/* is the printk_ratelimit'ing enabled? It's enabled after a successful 130 * loading of a cpufreq driver, temporarily disabled when a new policy 131 * is set, and disabled upon cpufreq driver removal 132 */ 133static unsigned int disable_ratelimit = 1; 134static DEFINE_SPINLOCK(disable_ratelimit_lock); 135 136static void cpufreq_debug_enable_ratelimit(void) 137{ 138 unsigned long flags; 139 140 spin_lock_irqsave(&disable_ratelimit_lock, flags); 141 if (disable_ratelimit) 142 disable_ratelimit--; 143 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 144} 145 146static void cpufreq_debug_disable_ratelimit(void) 147{ 148 unsigned long flags; 149 150 spin_lock_irqsave(&disable_ratelimit_lock, flags); 151 disable_ratelimit++; 152 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 153} 154 155void cpufreq_debug_printk(unsigned int type, const char *prefix, 156 const char *fmt, ...) 157{ 158 char s[256]; 159 va_list args; 160 unsigned int len; 161 unsigned long flags; 162 163 WARN_ON(!prefix); 164 if (type & debug) { 165 spin_lock_irqsave(&disable_ratelimit_lock, flags); 166 if (!disable_ratelimit && debug_ratelimit 167 && !printk_ratelimit()) { 168 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 169 return; 170 } 171 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 172 173 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); 174 175 va_start(args, fmt); 176 len += vsnprintf(&s[len], (256 - len), fmt, args); 177 va_end(args); 178 179 printk(s); 180 181 WARN_ON(len < 5); 182 } 183} 184EXPORT_SYMBOL(cpufreq_debug_printk); 185 186 187module_param(debug, uint, 0644); 188MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," 189 " 2 to debug drivers, and 4 to debug governors."); 190 191module_param(debug_ratelimit, uint, 0644); 192MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" 193 " set to 0 to disable ratelimiting."); 194 195#else /* !CONFIG_CPU_FREQ_DEBUG */ 196 197static inline void cpufreq_debug_enable_ratelimit(void) { return; } 198static inline void cpufreq_debug_disable_ratelimit(void) { return; } 199 200#endif /* CONFIG_CPU_FREQ_DEBUG */ 201 202 203/********************************************************************* 204 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 205 *********************************************************************/ 206 207/** 208 * adjust_jiffies - adjust the system "loops_per_jiffy" 209 * 210 * This function alters the system "loops_per_jiffy" for the clock 211 * speed change. Note that loops_per_jiffy cannot be updated on SMP 212 * systems as each CPU might be scaled differently. So, use the arch 213 * per-CPU loops_per_jiffy value wherever possible. 214 */ 215#ifndef CONFIG_SMP 216static unsigned long l_p_j_ref; 217static unsigned int l_p_j_ref_freq; 218 219static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 220{ 221 if (ci->flags & CPUFREQ_CONST_LOOPS) 222 return; 223 224 if (!l_p_j_ref_freq) { 225 l_p_j_ref = loops_per_jiffy; 226 l_p_j_ref_freq = ci->old; 227 dprintk("saving %lu as reference value for loops_per_jiffy;" 228 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 229 } 230 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 231 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || 232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 234 ci->new); 235 dprintk("scaling loops_per_jiffy to %lu" 236 "for frequency %u kHz\n", loops_per_jiffy, ci->new); 237 } 238} 239#else 240static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 241{ 242 return; 243} 244#endif 245 246 247/** 248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 249 * on frequency transition. 250 * 251 * This function calls the transition notifiers and the "adjust_jiffies" 252 * function. It is called twice on all CPU frequency changes that have 253 * external effects. 254 */ 255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) 256{ 257 struct cpufreq_policy *policy; 258 259 BUG_ON(irqs_disabled()); 260 261 freqs->flags = cpufreq_driver->flags; 262 dprintk("notification %u of frequency transition to %u kHz\n", 263 state, freqs->new); 264 265 policy = cpufreq_cpu_data[freqs->cpu]; 266 switch (state) { 267 268 case CPUFREQ_PRECHANGE: 269 /* detect if the driver reported a value as "old frequency" 270 * which is not equal to what the cpufreq core thinks is 271 * "old frequency". 272 */ 273 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 274 if ((policy) && (policy->cpu == freqs->cpu) && 275 (policy->cur) && (policy->cur != freqs->old)) { 276 dprintk("Warning: CPU frequency is" 277 " %u, cpufreq assumed %u kHz.\n", 278 freqs->old, policy->cur); 279 freqs->old = policy->cur; 280 } 281 } 282 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 283 CPUFREQ_PRECHANGE, freqs); 284 adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 285 break; 286 287 case CPUFREQ_POSTCHANGE: 288 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 290 CPUFREQ_POSTCHANGE, freqs); 291 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 292 policy->cur = freqs->new; 293 break; 294 } 295} 296EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 297 298 299 300/********************************************************************* 301 * SYSFS INTERFACE * 302 *********************************************************************/ 303 304static struct cpufreq_governor *__find_governor(const char *str_governor) 305{ 306 struct cpufreq_governor *t; 307 308 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 309 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) 310 return t; 311 312 return NULL; 313} 314 315/** 316 * cpufreq_parse_governor - parse a governor string 317 */ 318static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, 319 struct cpufreq_governor **governor) 320{ 321 int err = -EINVAL; 322 323 if (!cpufreq_driver) 324 goto out; 325 326 if (cpufreq_driver->setpolicy) { 327 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 328 *policy = CPUFREQ_POLICY_PERFORMANCE; 329 err = 0; 330 } else if (!strnicmp(str_governor, "powersave", 331 CPUFREQ_NAME_LEN)) { 332 *policy = CPUFREQ_POLICY_POWERSAVE; 333 err = 0; 334 } 335 } else if (cpufreq_driver->target) { 336 struct cpufreq_governor *t; 337 338 mutex_lock(&cpufreq_governor_mutex); 339 340 t = __find_governor(str_governor); 341 342 if (t == NULL) { 343 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", 344 str_governor); 345 346 if (name) { 347 int ret; 348 349 mutex_unlock(&cpufreq_governor_mutex); 350 ret = request_module(name); 351 mutex_lock(&cpufreq_governor_mutex); 352 353 if (ret == 0) 354 t = __find_governor(str_governor); 355 } 356 357 kfree(name); 358 } 359 360 if (t != NULL) { 361 *governor = t; 362 err = 0; 363 } 364 365 mutex_unlock(&cpufreq_governor_mutex); 366 } 367 out: 368 return err; 369} 370 371 372/* drivers/base/cpu.c */ 373extern struct sysdev_class cpu_sysdev_class; 374 375 376/** 377 * cpufreq_per_cpu_attr_read() / show_##file_name() - 378 * print out cpufreq information 379 * 380 * Write out information from cpufreq_driver->policy[cpu]; object must be 381 * "unsigned int". 382 */ 383 384#define show_one(file_name, object) \ 385static ssize_t show_##file_name \ 386(struct cpufreq_policy * policy, char *buf) \ 387{ \ 388 return sprintf (buf, "%u\n", policy->object); \ 389} 390 391show_one(cpuinfo_min_freq, cpuinfo.min_freq); 392show_one(cpuinfo_max_freq, cpuinfo.max_freq); 393show_one(scaling_min_freq, min); 394show_one(scaling_max_freq, max); 395show_one(scaling_cur_freq, cur); 396 397static int __cpufreq_set_policy(struct cpufreq_policy *data, 398 struct cpufreq_policy *policy); 399 400/** 401 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 402 */ 403#define store_one(file_name, object) \ 404static ssize_t store_##file_name \ 405(struct cpufreq_policy * policy, const char *buf, size_t count) \ 406{ \ 407 unsigned int ret = -EINVAL; \ 408 struct cpufreq_policy new_policy; \ 409 \ 410 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 411 if (ret) \ 412 return -EINVAL; \ 413 \ 414 ret = sscanf (buf, "%u", &new_policy.object); \ 415 if (ret != 1) \ 416 return -EINVAL; \ 417 \ 418 lock_cpu_hotplug(); \ 419 mutex_lock(&policy->lock); \ 420 ret = __cpufreq_set_policy(policy, &new_policy); \ 421 policy->user_policy.object = policy->object; \ 422 mutex_unlock(&policy->lock); \ 423 unlock_cpu_hotplug(); \ 424 \ 425 return ret ? ret : count; \ 426} 427 428store_one(scaling_min_freq,min); 429store_one(scaling_max_freq,max); 430 431/** 432 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 433 */ 434static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, 435 char *buf) 436{ 437 unsigned int cur_freq = cpufreq_get(policy->cpu); 438 if (!cur_freq) 439 return sprintf(buf, "<unknown>"); 440 return sprintf(buf, "%u\n", cur_freq); 441} 442 443 444/** 445 * show_scaling_governor - show the current policy for the specified CPU 446 */ 447static ssize_t show_scaling_governor (struct cpufreq_policy * policy, 448 char *buf) 449{ 450 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 451 return sprintf(buf, "powersave\n"); 452 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 453 return sprintf(buf, "performance\n"); 454 else if (policy->governor) 455 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); 456 return -EINVAL; 457} 458 459 460/** 461 * store_scaling_governor - store policy for the specified CPU 462 */ 463static ssize_t store_scaling_governor (struct cpufreq_policy * policy, 464 const char *buf, size_t count) 465{ 466 unsigned int ret = -EINVAL; 467 char str_governor[16]; 468 struct cpufreq_policy new_policy; 469 470 ret = cpufreq_get_policy(&new_policy, policy->cpu); 471 if (ret) 472 return ret; 473 474 ret = sscanf (buf, "%15s", str_governor); 475 if (ret != 1) 476 return -EINVAL; 477 478 if (cpufreq_parse_governor(str_governor, &new_policy.policy, 479 &new_policy.governor)) 480 return -EINVAL; 481 482 lock_cpu_hotplug(); 483 484 /* Do not use cpufreq_set_policy here or the user_policy.max 485 will be wrongly overridden */ 486 mutex_lock(&policy->lock); 487 ret = __cpufreq_set_policy(policy, &new_policy); 488 489 policy->user_policy.policy = policy->policy; 490 policy->user_policy.governor = policy->governor; 491 mutex_unlock(&policy->lock); 492 493 unlock_cpu_hotplug(); 494 495 if (ret) 496 return ret; 497 else 498 return count; 499} 500 501/** 502 * show_scaling_driver - show the cpufreq driver currently loaded 503 */ 504static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf) 505{ 506 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); 507} 508 509/** 510 * show_scaling_available_governors - show the available CPUfreq governors 511 */ 512static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy, 513 char *buf) 514{ 515 ssize_t i = 0; 516 struct cpufreq_governor *t; 517 518 if (!cpufreq_driver->target) { 519 i += sprintf(buf, "performance powersave"); 520 goto out; 521 } 522 523 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 524 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) 525 goto out; 526 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); 527 } 528out: 529 i += sprintf(&buf[i], "\n"); 530 return i; 531} 532/** 533 * show_affected_cpus - show the CPUs affected by each transition 534 */ 535static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf) 536{ 537 ssize_t i = 0; 538 unsigned int cpu; 539 540 for_each_cpu_mask(cpu, policy->cpus) { 541 if (i) 542 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 543 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 544 if (i >= (PAGE_SIZE - 5)) 545 break; 546 } 547 i += sprintf(&buf[i], "\n"); 548 return i; 549} 550 551 552#define define_one_ro(_name) \ 553static struct freq_attr _name = \ 554__ATTR(_name, 0444, show_##_name, NULL) 555 556#define define_one_ro0400(_name) \ 557static struct freq_attr _name = \ 558__ATTR(_name, 0400, show_##_name, NULL) 559 560#define define_one_rw(_name) \ 561static struct freq_attr _name = \ 562__ATTR(_name, 0644, show_##_name, store_##_name) 563 564define_one_ro0400(cpuinfo_cur_freq); 565define_one_ro(cpuinfo_min_freq); 566define_one_ro(cpuinfo_max_freq); 567define_one_ro(scaling_available_governors); 568define_one_ro(scaling_driver); 569define_one_ro(scaling_cur_freq); 570define_one_ro(affected_cpus); 571define_one_rw(scaling_min_freq); 572define_one_rw(scaling_max_freq); 573define_one_rw(scaling_governor); 574 575static struct attribute * default_attrs[] = { 576 &cpuinfo_min_freq.attr, 577 &cpuinfo_max_freq.attr, 578 &scaling_min_freq.attr, 579 &scaling_max_freq.attr, 580 &affected_cpus.attr, 581 &scaling_governor.attr, 582 &scaling_driver.attr, 583 &scaling_available_governors.attr, 584 NULL 585}; 586 587#define to_policy(k) container_of(k,struct cpufreq_policy,kobj) 588#define to_attr(a) container_of(a,struct freq_attr,attr) 589 590static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) 591{ 592 struct cpufreq_policy * policy = to_policy(kobj); 593 struct freq_attr * fattr = to_attr(attr); 594 ssize_t ret; 595 policy = cpufreq_cpu_get(policy->cpu); 596 if (!policy) 597 return -EINVAL; 598 if (fattr->show) 599 ret = fattr->show(policy, buf); 600 else 601 ret = -EIO; 602 603 cpufreq_cpu_put(policy); 604 return ret; 605} 606 607static ssize_t store(struct kobject * kobj, struct attribute * attr, 608 const char * buf, size_t count) 609{ 610 struct cpufreq_policy * policy = to_policy(kobj); 611 struct freq_attr * fattr = to_attr(attr); 612 ssize_t ret; 613 policy = cpufreq_cpu_get(policy->cpu); 614 if (!policy) 615 return -EINVAL; 616 if (fattr->store) 617 ret = fattr->store(policy, buf, count); 618 else 619 ret = -EIO; 620 621 cpufreq_cpu_put(policy); 622 return ret; 623} 624 625static void cpufreq_sysfs_release(struct kobject * kobj) 626{ 627 struct cpufreq_policy * policy = to_policy(kobj); 628 dprintk("last reference is dropped\n"); 629 complete(&policy->kobj_unregister); 630} 631 632static struct sysfs_ops sysfs_ops = { 633 .show = show, 634 .store = store, 635}; 636 637static struct kobj_type ktype_cpufreq = { 638 .sysfs_ops = &sysfs_ops, 639 .default_attrs = default_attrs, 640 .release = cpufreq_sysfs_release, 641}; 642 643 644/** 645 * cpufreq_add_dev - add a CPU device 646 * 647 * Adds the cpufreq interface for a CPU device. 648 */ 649static int cpufreq_add_dev (struct sys_device * sys_dev) 650{ 651 unsigned int cpu = sys_dev->id; 652 int ret = 0; 653 struct cpufreq_policy new_policy; 654 struct cpufreq_policy *policy; 655 struct freq_attr **drv_attr; 656 struct sys_device *cpu_sys_dev; 657 unsigned long flags; 658 unsigned int j; 659#ifdef CONFIG_SMP 660 struct cpufreq_policy *managed_policy; 661#endif 662 663 if (cpu_is_offline(cpu)) 664 return 0; 665 666 cpufreq_debug_disable_ratelimit(); 667 dprintk("adding CPU %u\n", cpu); 668 669#ifdef CONFIG_SMP 670 /* check whether a different CPU already registered this 671 * CPU because it is in the same boat. */ 672 policy = cpufreq_cpu_get(cpu); 673 if (unlikely(policy)) { 674 cpufreq_cpu_put(policy); 675 cpufreq_debug_enable_ratelimit(); 676 return 0; 677 } 678#endif 679 680 if (!try_module_get(cpufreq_driver->owner)) { 681 ret = -EINVAL; 682 goto module_out; 683 } 684 685 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); 686 if (!policy) { 687 ret = -ENOMEM; 688 goto nomem_out; 689 } 690 691 policy->cpu = cpu; 692 policy->cpus = cpumask_of_cpu(cpu); 693 694 mutex_init(&policy->lock); 695 mutex_lock(&policy->lock); 696 init_completion(&policy->kobj_unregister); 697 INIT_WORK(&policy->update, handle_update); 698 699 /* call driver. From then on the cpufreq must be able 700 * to accept all calls to ->verify and ->setpolicy for this CPU 701 */ 702 ret = cpufreq_driver->init(policy); 703 if (ret) { 704 dprintk("initialization failed\n"); 705 mutex_unlock(&policy->lock); 706 goto err_out; 707 } 708 709#ifdef CONFIG_SMP 710 for_each_cpu_mask(j, policy->cpus) { 711 if (cpu == j) 712 continue; 713 714 /* check for existing affected CPUs. They may not be aware 715 * of it due to CPU Hotplug. 716 */ 717 managed_policy = cpufreq_cpu_get(j); 718 if (unlikely(managed_policy)) { 719 spin_lock_irqsave(&cpufreq_driver_lock, flags); 720 managed_policy->cpus = policy->cpus; 721 cpufreq_cpu_data[cpu] = managed_policy; 722 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 723 724 dprintk("CPU already managed, adding link\n"); 725 sysfs_create_link(&sys_dev->kobj, 726 &managed_policy->kobj, "cpufreq"); 727 728 cpufreq_debug_enable_ratelimit(); 729 mutex_unlock(&policy->lock); 730 ret = 0; 731 goto err_out_driver_exit; /* call driver->exit() */ 732 } 733 } 734#endif 735 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); 736 737 /* prepare interface data */ 738 policy->kobj.parent = &sys_dev->kobj; 739 policy->kobj.ktype = &ktype_cpufreq; 740 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); 741 742 ret = kobject_register(&policy->kobj); 743 if (ret) { 744 mutex_unlock(&policy->lock); 745 goto err_out_driver_exit; 746 } 747 /* set up files for this cpu device */ 748 drv_attr = cpufreq_driver->attr; 749 while ((drv_attr) && (*drv_attr)) { 750 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 751 drv_attr++; 752 } 753 if (cpufreq_driver->get) 754 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 755 if (cpufreq_driver->target) 756 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 757 758 spin_lock_irqsave(&cpufreq_driver_lock, flags); 759 for_each_cpu_mask(j, policy->cpus) 760 cpufreq_cpu_data[j] = policy; 761 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 762 763 /* symlink affected CPUs */ 764 for_each_cpu_mask(j, policy->cpus) { 765 if (j == cpu) 766 continue; 767 if (!cpu_online(j)) 768 continue; 769 770 dprintk("CPU %u already managed, adding link\n", j); 771 cpufreq_cpu_get(cpu); 772 cpu_sys_dev = get_cpu_sysdev(j); 773 sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 774 "cpufreq"); 775 } 776 777 policy->governor = NULL; /* to assure that the starting sequence is 778 * run in cpufreq_set_policy */ 779 mutex_unlock(&policy->lock); 780 781 /* set default policy */ 782 ret = cpufreq_set_policy(&new_policy); 783 if (ret) { 784 dprintk("setting policy failed\n"); 785 goto err_out_unregister; 786 } 787 788 module_put(cpufreq_driver->owner); 789 dprintk("initialization complete\n"); 790 cpufreq_debug_enable_ratelimit(); 791 792 return 0; 793 794 795err_out_unregister: 796 spin_lock_irqsave(&cpufreq_driver_lock, flags); 797 for_each_cpu_mask(j, policy->cpus) 798 cpufreq_cpu_data[j] = NULL; 799 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 800 801 kobject_unregister(&policy->kobj); 802 wait_for_completion(&policy->kobj_unregister); 803 804err_out_driver_exit: 805 if (cpufreq_driver->exit) 806 cpufreq_driver->exit(policy); 807 808err_out: 809 kfree(policy); 810 811nomem_out: 812 module_put(cpufreq_driver->owner); 813module_out: 814 cpufreq_debug_enable_ratelimit(); 815 return ret; 816} 817 818 819/** 820 * cpufreq_remove_dev - remove a CPU device 821 * 822 * Removes the cpufreq interface for a CPU device. 823 */ 824static int cpufreq_remove_dev (struct sys_device * sys_dev) 825{ 826 unsigned int cpu = sys_dev->id; 827 unsigned long flags; 828 struct cpufreq_policy *data; 829#ifdef CONFIG_SMP 830 struct sys_device *cpu_sys_dev; 831 unsigned int j; 832#endif 833 834 cpufreq_debug_disable_ratelimit(); 835 dprintk("unregistering CPU %u\n", cpu); 836 837 spin_lock_irqsave(&cpufreq_driver_lock, flags); 838 data = cpufreq_cpu_data[cpu]; 839 840 if (!data) { 841 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 842 cpufreq_debug_enable_ratelimit(); 843 return -EINVAL; 844 } 845 cpufreq_cpu_data[cpu] = NULL; 846 847 848#ifdef CONFIG_SMP 849 /* if this isn't the CPU which is the parent of the kobj, we 850 * only need to unlink, put and exit 851 */ 852 if (unlikely(cpu != data->cpu)) { 853 dprintk("removing link\n"); 854 cpu_clear(cpu, data->cpus); 855 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 856 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 857 cpufreq_cpu_put(data); 858 cpufreq_debug_enable_ratelimit(); 859 return 0; 860 } 861#endif 862 863 864 if (!kobject_get(&data->kobj)) { 865 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 866 cpufreq_debug_enable_ratelimit(); 867 return -EFAULT; 868 } 869 870#ifdef CONFIG_SMP 871 /* if we have other CPUs still registered, we need to unlink them, 872 * or else wait_for_completion below will lock up. Clean the 873 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs 874 * links afterwards. 875 */ 876 if (unlikely(cpus_weight(data->cpus) > 1)) { 877 for_each_cpu_mask(j, data->cpus) { 878 if (j == cpu) 879 continue; 880 cpufreq_cpu_data[j] = NULL; 881 } 882 } 883 884 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 885 886 if (unlikely(cpus_weight(data->cpus) > 1)) { 887 for_each_cpu_mask(j, data->cpus) { 888 if (j == cpu) 889 continue; 890 dprintk("removing link for cpu %u\n", j); 891 cpu_sys_dev = get_cpu_sysdev(j); 892 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 893 cpufreq_cpu_put(data); 894 } 895 } 896#else 897 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 898#endif 899 900 mutex_lock(&data->lock); 901 if (cpufreq_driver->target) 902 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 903 mutex_unlock(&data->lock); 904 905 kobject_unregister(&data->kobj); 906 907 kobject_put(&data->kobj); 908 909 /* we need to make sure that the underlying kobj is actually 910 * not referenced anymore by anybody before we proceed with 911 * unloading. 912 */ 913 dprintk("waiting for dropping of refcount\n"); 914 wait_for_completion(&data->kobj_unregister); 915 dprintk("wait complete\n"); 916 917 if (cpufreq_driver->exit) 918 cpufreq_driver->exit(data); 919 920 kfree(data); 921 922 cpufreq_debug_enable_ratelimit(); 923 return 0; 924} 925 926 927static void handle_update(struct work_struct *work) 928{ 929 struct cpufreq_policy *policy = 930 container_of(work, struct cpufreq_policy, update); 931 unsigned int cpu = policy->cpu; 932 dprintk("handle_update for cpu %u called\n", cpu); 933 cpufreq_update_policy(cpu); 934} 935 936/** 937 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. 938 * @cpu: cpu number 939 * @old_freq: CPU frequency the kernel thinks the CPU runs at 940 * @new_freq: CPU frequency the CPU actually runs at 941 * 942 * We adjust to current frequency first, and need to clean up later. So either call 943 * to cpufreq_update_policy() or schedule handle_update()). 944 */ 945static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 946 unsigned int new_freq) 947{ 948 struct cpufreq_freqs freqs; 949 950 dprintk("Warning: CPU frequency out of sync: cpufreq and timing " 951 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 952 953 freqs.cpu = cpu; 954 freqs.old = old_freq; 955 freqs.new = new_freq; 956 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 957 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 958} 959 960 961/** 962 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 963 * @cpu: CPU number 964 * 965 * This is the last known freq, without actually getting it from the driver. 966 * Return value will be same as what is shown in scaling_cur_freq in sysfs. 967 */ 968unsigned int cpufreq_quick_get(unsigned int cpu) 969{ 970 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 971 unsigned int ret_freq = 0; 972 973 if (policy) { 974 mutex_lock(&policy->lock); 975 ret_freq = policy->cur; 976 mutex_unlock(&policy->lock); 977 cpufreq_cpu_put(policy); 978 } 979 980 return (ret_freq); 981} 982EXPORT_SYMBOL(cpufreq_quick_get); 983 984 985/** 986 * cpufreq_get - get the current CPU frequency (in kHz) 987 * @cpu: CPU number 988 * 989 * Get the CPU current (static) CPU frequency 990 */ 991unsigned int cpufreq_get(unsigned int cpu) 992{ 993 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 994 unsigned int ret_freq = 0; 995 996 if (!policy) 997 return 0; 998 999 if (!cpufreq_driver->get) 1000 goto out; 1001 1002 mutex_lock(&policy->lock); 1003 1004 ret_freq = cpufreq_driver->get(cpu); 1005 1006 if (ret_freq && policy->cur && 1007 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1008 /* verify no discrepancy between actual and 1009 saved value exists */ 1010 if (unlikely(ret_freq != policy->cur)) { 1011 cpufreq_out_of_sync(cpu, policy->cur, ret_freq); 1012 schedule_work(&policy->update); 1013 } 1014 } 1015 1016 mutex_unlock(&policy->lock); 1017 1018out: 1019 cpufreq_cpu_put(policy); 1020 1021 return (ret_freq); 1022} 1023EXPORT_SYMBOL(cpufreq_get); 1024 1025 1026/** 1027 * cpufreq_suspend - let the low level driver prepare for suspend 1028 */ 1029 1030static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) 1031{ 1032 int cpu = sysdev->id; 1033 int ret = 0; 1034 unsigned int cur_freq = 0; 1035 struct cpufreq_policy *cpu_policy; 1036 1037 dprintk("suspending cpu %u\n", cpu); 1038 1039 if (!cpu_online(cpu)) 1040 return 0; 1041 1042 /* we may be lax here as interrupts are off. Nonetheless 1043 * we need to grab the correct cpu policy, as to check 1044 * whether we really run on this CPU. 1045 */ 1046 1047 cpu_policy = cpufreq_cpu_get(cpu); 1048 if (!cpu_policy) 1049 return -EINVAL; 1050 1051 /* only handle each CPU group once */ 1052 if (unlikely(cpu_policy->cpu != cpu)) { 1053 cpufreq_cpu_put(cpu_policy); 1054 return 0; 1055 } 1056 1057 if (cpufreq_driver->suspend) { 1058 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1059 if (ret) { 1060 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1061 "step on CPU %u\n", cpu_policy->cpu); 1062 cpufreq_cpu_put(cpu_policy); 1063 return ret; 1064 } 1065 } 1066 1067 1068 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) 1069 goto out; 1070 1071 if (cpufreq_driver->get) 1072 cur_freq = cpufreq_driver->get(cpu_policy->cpu); 1073 1074 if (!cur_freq || !cpu_policy->cur) { 1075 printk(KERN_ERR "cpufreq: suspend failed to assert current " 1076 "frequency is what timing core thinks it is.\n"); 1077 goto out; 1078 } 1079 1080 if (unlikely(cur_freq != cpu_policy->cur)) { 1081 struct cpufreq_freqs freqs; 1082 1083 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) 1084 dprintk("Warning: CPU frequency is %u, " 1085 "cpufreq assumed %u kHz.\n", 1086 cur_freq, cpu_policy->cur); 1087 1088 freqs.cpu = cpu; 1089 freqs.old = cpu_policy->cur; 1090 freqs.new = cur_freq; 1091 1092 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 1093 CPUFREQ_SUSPENDCHANGE, &freqs); 1094 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); 1095 1096 cpu_policy->cur = cur_freq; 1097 } 1098 1099out: 1100 cpufreq_cpu_put(cpu_policy); 1101 return 0; 1102} 1103 1104/** 1105 * cpufreq_resume - restore proper CPU frequency handling after resume 1106 * 1107 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) 1108 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync 1109 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are 1110 * restored. 1111 */ 1112static int cpufreq_resume(struct sys_device * sysdev) 1113{ 1114 int cpu = sysdev->id; 1115 int ret = 0; 1116 struct cpufreq_policy *cpu_policy; 1117 1118 dprintk("resuming cpu %u\n", cpu); 1119 1120 if (!cpu_online(cpu)) 1121 return 0; 1122 1123 /* we may be lax here as interrupts are off. Nonetheless 1124 * we need to grab the correct cpu policy, as to check 1125 * whether we really run on this CPU. 1126 */ 1127 1128 cpu_policy = cpufreq_cpu_get(cpu); 1129 if (!cpu_policy) 1130 return -EINVAL; 1131 1132 /* only handle each CPU group once */ 1133 if (unlikely(cpu_policy->cpu != cpu)) { 1134 cpufreq_cpu_put(cpu_policy); 1135 return 0; 1136 } 1137 1138 if (cpufreq_driver->resume) { 1139 ret = cpufreq_driver->resume(cpu_policy); 1140 if (ret) { 1141 printk(KERN_ERR "cpufreq: resume failed in ->resume " 1142 "step on CPU %u\n", cpu_policy->cpu); 1143 cpufreq_cpu_put(cpu_policy); 1144 return ret; 1145 } 1146 } 1147 1148 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1149 unsigned int cur_freq = 0; 1150 1151 if (cpufreq_driver->get) 1152 cur_freq = cpufreq_driver->get(cpu_policy->cpu); 1153 1154 if (!cur_freq || !cpu_policy->cur) { 1155 printk(KERN_ERR "cpufreq: resume failed to assert " 1156 "current frequency is what timing core " 1157 "thinks it is.\n"); 1158 goto out; 1159 } 1160 1161 if (unlikely(cur_freq != cpu_policy->cur)) { 1162 struct cpufreq_freqs freqs; 1163 1164 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) 1165 dprintk("Warning: CPU frequency" 1166 "is %u, cpufreq assumed %u kHz.\n", 1167 cur_freq, cpu_policy->cur); 1168 1169 freqs.cpu = cpu; 1170 freqs.old = cpu_policy->cur; 1171 freqs.new = cur_freq; 1172 1173 srcu_notifier_call_chain( 1174 &cpufreq_transition_notifier_list, 1175 CPUFREQ_RESUMECHANGE, &freqs); 1176 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); 1177 1178 cpu_policy->cur = cur_freq; 1179 } 1180 } 1181 1182out: 1183 schedule_work(&cpu_policy->update); 1184 cpufreq_cpu_put(cpu_policy); 1185 return ret; 1186} 1187 1188static struct sysdev_driver cpufreq_sysdev_driver = { 1189 .add = cpufreq_add_dev, 1190 .remove = cpufreq_remove_dev, 1191 .suspend = cpufreq_suspend, 1192 .resume = cpufreq_resume, 1193}; 1194 1195 1196/********************************************************************* 1197 * NOTIFIER LISTS INTERFACE * 1198 *********************************************************************/ 1199 1200/** 1201 * cpufreq_register_notifier - register a driver with cpufreq 1202 * @nb: notifier function to register 1203 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1204 * 1205 * Add a driver to one of two lists: either a list of drivers that 1206 * are notified about clock rate changes (once before and once after 1207 * the transition), or a list of drivers that are notified about 1208 * changes in cpufreq policy. 1209 * 1210 * This function may sleep, and has the same return conditions as 1211 * blocking_notifier_chain_register. 1212 */ 1213int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 1214{ 1215 int ret; 1216 1217 switch (list) { 1218 case CPUFREQ_TRANSITION_NOTIFIER: 1219 ret = srcu_notifier_chain_register( 1220 &cpufreq_transition_notifier_list, nb); 1221 break; 1222 case CPUFREQ_POLICY_NOTIFIER: 1223 ret = blocking_notifier_chain_register( 1224 &cpufreq_policy_notifier_list, nb); 1225 break; 1226 default: 1227 ret = -EINVAL; 1228 } 1229 1230 return ret; 1231} 1232EXPORT_SYMBOL(cpufreq_register_notifier); 1233 1234 1235/** 1236 * cpufreq_unregister_notifier - unregister a driver with cpufreq 1237 * @nb: notifier block to be unregistered 1238 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1239 * 1240 * Remove a driver from the CPU frequency notifier list. 1241 * 1242 * This function may sleep, and has the same return conditions as 1243 * blocking_notifier_chain_unregister. 1244 */ 1245int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 1246{ 1247 int ret; 1248 1249 switch (list) { 1250 case CPUFREQ_TRANSITION_NOTIFIER: 1251 ret = srcu_notifier_chain_unregister( 1252 &cpufreq_transition_notifier_list, nb); 1253 break; 1254 case CPUFREQ_POLICY_NOTIFIER: 1255 ret = blocking_notifier_chain_unregister( 1256 &cpufreq_policy_notifier_list, nb); 1257 break; 1258 default: 1259 ret = -EINVAL; 1260 } 1261 1262 return ret; 1263} 1264EXPORT_SYMBOL(cpufreq_unregister_notifier); 1265 1266 1267/********************************************************************* 1268 * GOVERNORS * 1269 *********************************************************************/ 1270 1271 1272/* Must be called with lock_cpu_hotplug held */ 1273int __cpufreq_driver_target(struct cpufreq_policy *policy, 1274 unsigned int target_freq, 1275 unsigned int relation) 1276{ 1277 int retval = -EINVAL; 1278 1279 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1280 target_freq, relation); 1281 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1282 retval = cpufreq_driver->target(policy, target_freq, relation); 1283 1284 return retval; 1285} 1286EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1287 1288int cpufreq_driver_target(struct cpufreq_policy *policy, 1289 unsigned int target_freq, 1290 unsigned int relation) 1291{ 1292 int ret; 1293 1294 policy = cpufreq_cpu_get(policy->cpu); 1295 if (!policy) 1296 return -EINVAL; 1297 1298 lock_cpu_hotplug(); 1299 mutex_lock(&policy->lock); 1300 1301 ret = __cpufreq_driver_target(policy, target_freq, relation); 1302 1303 mutex_unlock(&policy->lock); 1304 unlock_cpu_hotplug(); 1305 1306 cpufreq_cpu_put(policy); 1307 return ret; 1308} 1309EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1310 1311int cpufreq_driver_getavg(struct cpufreq_policy *policy) 1312{ 1313 int ret = 0; 1314 1315 policy = cpufreq_cpu_get(policy->cpu); 1316 if (!policy) 1317 return -EINVAL; 1318 1319 mutex_lock(&policy->lock); 1320 1321 if (cpu_online(policy->cpu) && cpufreq_driver->getavg) 1322 ret = cpufreq_driver->getavg(policy->cpu); 1323 1324 mutex_unlock(&policy->lock); 1325 1326 cpufreq_cpu_put(policy); 1327 return ret; 1328} 1329EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); 1330 1331/* 1332 * Locking: Must be called with the lock_cpu_hotplug() lock held 1333 * when "event" is CPUFREQ_GOV_LIMITS 1334 */ 1335 1336static int __cpufreq_governor(struct cpufreq_policy *policy, 1337 unsigned int event) 1338{ 1339 int ret; 1340 1341 if (!try_module_get(policy->governor->owner)) 1342 return -EINVAL; 1343 1344 dprintk("__cpufreq_governor for CPU %u, event %u\n", 1345 policy->cpu, event); 1346 ret = policy->governor->governor(policy, event); 1347 1348 /* we keep one module reference alive for 1349 each CPU governed by this CPU */ 1350 if ((event != CPUFREQ_GOV_START) || ret) 1351 module_put(policy->governor->owner); 1352 if ((event == CPUFREQ_GOV_STOP) && !ret) 1353 module_put(policy->governor->owner); 1354 1355 return ret; 1356} 1357 1358 1359int cpufreq_register_governor(struct cpufreq_governor *governor) 1360{ 1361 int err; 1362 1363 if (!governor) 1364 return -EINVAL; 1365 1366 mutex_lock(&cpufreq_governor_mutex); 1367 1368 err = -EBUSY; 1369 if (__find_governor(governor->name) == NULL) { 1370 err = 0; 1371 list_add(&governor->governor_list, &cpufreq_governor_list); 1372 } 1373 1374 mutex_unlock(&cpufreq_governor_mutex); 1375 return err; 1376} 1377EXPORT_SYMBOL_GPL(cpufreq_register_governor); 1378 1379 1380void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1381{ 1382 if (!governor) 1383 return; 1384 1385 mutex_lock(&cpufreq_governor_mutex); 1386 list_del(&governor->governor_list); 1387 mutex_unlock(&cpufreq_governor_mutex); 1388 return; 1389} 1390EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 1391 1392 1393 1394/********************************************************************* 1395 * POLICY INTERFACE * 1396 *********************************************************************/ 1397 1398/** 1399 * cpufreq_get_policy - get the current cpufreq_policy 1400 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written 1401 * 1402 * Reads the current cpufreq policy. 1403 */ 1404int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 1405{ 1406 struct cpufreq_policy *cpu_policy; 1407 if (!policy) 1408 return -EINVAL; 1409 1410 cpu_policy = cpufreq_cpu_get(cpu); 1411 if (!cpu_policy) 1412 return -EINVAL; 1413 1414 mutex_lock(&cpu_policy->lock); 1415 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); 1416 mutex_unlock(&cpu_policy->lock); 1417 1418 cpufreq_cpu_put(cpu_policy); 1419 return 0; 1420} 1421EXPORT_SYMBOL(cpufreq_get_policy); 1422 1423 1424/* 1425 * data : current policy. 1426 * policy : policy to be set. 1427 * Locking: Must be called with the lock_cpu_hotplug() lock held 1428 */ 1429static int __cpufreq_set_policy(struct cpufreq_policy *data, 1430 struct cpufreq_policy *policy) 1431{ 1432 int ret = 0; 1433 1434 cpufreq_debug_disable_ratelimit(); 1435 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1436 policy->min, policy->max); 1437 1438 memcpy(&policy->cpuinfo, &data->cpuinfo, 1439 sizeof(struct cpufreq_cpuinfo)); 1440 1441 if (policy->min > data->min && policy->min > policy->max) { 1442 ret = -EINVAL; 1443 goto error_out; 1444 } 1445 1446 /* verify the cpu speed can be set within this limit */ 1447 ret = cpufreq_driver->verify(policy); 1448 if (ret) 1449 goto error_out; 1450 1451 /* adjust if necessary - all reasons */ 1452 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1453 CPUFREQ_ADJUST, policy); 1454 1455 /* adjust if necessary - hardware incompatibility*/ 1456 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1457 CPUFREQ_INCOMPATIBLE, policy); 1458 1459 /* verify the cpu speed can be set within this limit, 1460 which might be different to the first one */ 1461 ret = cpufreq_driver->verify(policy); 1462 if (ret) 1463 goto error_out; 1464 1465 /* notification of the new policy */ 1466 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1467 CPUFREQ_NOTIFY, policy); 1468 1469 data->min = policy->min; 1470 data->max = policy->max; 1471 1472 dprintk("new min and max freqs are %u - %u kHz\n", 1473 data->min, data->max); 1474 1475 if (cpufreq_driver->setpolicy) { 1476 data->policy = policy->policy; 1477 dprintk("setting range\n"); 1478 ret = cpufreq_driver->setpolicy(policy); 1479 } else { 1480 if (policy->governor != data->governor) { 1481 /* save old, working values */ 1482 struct cpufreq_governor *old_gov = data->governor; 1483 1484 dprintk("governor switch\n"); 1485 1486 /* end old governor */ 1487 if (data->governor) 1488 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1489 1490 /* start new governor */ 1491 data->governor = policy->governor; 1492 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1493 /* new governor failed, so re-start old one */ 1494 dprintk("starting governor %s failed\n", 1495 data->governor->name); 1496 if (old_gov) { 1497 data->governor = old_gov; 1498 __cpufreq_governor(data, 1499 CPUFREQ_GOV_START); 1500 } 1501 ret = -EINVAL; 1502 goto error_out; 1503 } 1504 /* might be a policy change, too, so fall through */ 1505 } 1506 dprintk("governor: change or update limits\n"); 1507 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1508 } 1509 1510error_out: 1511 cpufreq_debug_enable_ratelimit(); 1512 return ret; 1513} 1514 1515/** 1516 * cpufreq_set_policy - set a new CPUFreq policy 1517 * @policy: policy to be set. 1518 * 1519 * Sets a new CPU frequency and voltage scaling policy. 1520 */ 1521int cpufreq_set_policy(struct cpufreq_policy *policy) 1522{ 1523 int ret = 0; 1524 struct cpufreq_policy *data; 1525 1526 if (!policy) 1527 return -EINVAL; 1528 1529 data = cpufreq_cpu_get(policy->cpu); 1530 if (!data) 1531 return -EINVAL; 1532 1533 lock_cpu_hotplug(); 1534 1535 /* lock this CPU */ 1536 mutex_lock(&data->lock); 1537 1538 ret = __cpufreq_set_policy(data, policy); 1539 data->user_policy.min = data->min; 1540 data->user_policy.max = data->max; 1541 data->user_policy.policy = data->policy; 1542 data->user_policy.governor = data->governor; 1543 1544 mutex_unlock(&data->lock); 1545 1546 unlock_cpu_hotplug(); 1547 cpufreq_cpu_put(data); 1548 1549 return ret; 1550} 1551EXPORT_SYMBOL(cpufreq_set_policy); 1552 1553 1554/** 1555 * cpufreq_update_policy - re-evaluate an existing cpufreq policy 1556 * @cpu: CPU which shall be re-evaluated 1557 * 1558 * Usefull for policy notifiers which have different necessities 1559 * at different times. 1560 */ 1561int cpufreq_update_policy(unsigned int cpu) 1562{ 1563 struct cpufreq_policy *data = cpufreq_cpu_get(cpu); 1564 struct cpufreq_policy policy; 1565 int ret = 0; 1566 1567 if (!data) 1568 return -ENODEV; 1569 1570 lock_cpu_hotplug(); 1571 mutex_lock(&data->lock); 1572 1573 dprintk("updating policy for CPU %u\n", cpu); 1574 memcpy(&policy, data, sizeof(struct cpufreq_policy)); 1575 policy.min = data->user_policy.min; 1576 policy.max = data->user_policy.max; 1577 policy.policy = data->user_policy.policy; 1578 policy.governor = data->user_policy.governor; 1579 1580 /* BIOS might change freq behind our back 1581 -> ask driver for current freq and notify governors about a change */ 1582 if (cpufreq_driver->get) { 1583 policy.cur = cpufreq_driver->get(cpu); 1584 if (!data->cur) { 1585 dprintk("Driver did not initialize current freq"); 1586 data->cur = policy.cur; 1587 } else { 1588 if (data->cur != policy.cur) 1589 cpufreq_out_of_sync(cpu, data->cur, 1590 policy.cur); 1591 } 1592 } 1593 1594 ret = __cpufreq_set_policy(data, &policy); 1595 1596 mutex_unlock(&data->lock); 1597 unlock_cpu_hotplug(); 1598 cpufreq_cpu_put(data); 1599 return ret; 1600} 1601EXPORT_SYMBOL(cpufreq_update_policy); 1602 1603static int cpufreq_cpu_callback(struct notifier_block *nfb, 1604 unsigned long action, void *hcpu) 1605{ 1606 unsigned int cpu = (unsigned long)hcpu; 1607 struct cpufreq_policy *policy; 1608 struct sys_device *sys_dev; 1609 1610 sys_dev = get_cpu_sysdev(cpu); 1611 1612 if (sys_dev) { 1613 switch (action) { 1614 case CPU_ONLINE: 1615 cpufreq_add_dev(sys_dev); 1616 break; 1617 case CPU_DOWN_PREPARE: 1618 /* 1619 * We attempt to put this cpu in lowest frequency 1620 * possible before going down. This will permit 1621 * hardware-managed P-State to switch other related 1622 * threads to min or higher speeds if possible. 1623 */ 1624 policy = cpufreq_cpu_data[cpu]; 1625 if (policy) { 1626 cpufreq_driver_target(policy, policy->min, 1627 CPUFREQ_RELATION_H); 1628 } 1629 break; 1630 case CPU_DEAD: 1631 cpufreq_remove_dev(sys_dev); 1632 break; 1633 } 1634 } 1635 return NOTIFY_OK; 1636} 1637 1638static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = 1639{ 1640 .notifier_call = cpufreq_cpu_callback, 1641}; 1642 1643/********************************************************************* 1644 * REGISTER / UNREGISTER CPUFREQ DRIVER * 1645 *********************************************************************/ 1646 1647/** 1648 * cpufreq_register_driver - register a CPU Frequency driver 1649 * @driver_data: A struct cpufreq_driver containing the values# 1650 * submitted by the CPU Frequency driver. 1651 * 1652 * Registers a CPU Frequency driver to this core code. This code 1653 * returns zero on success, -EBUSY when another driver got here first 1654 * (and isn't unregistered in the meantime). 1655 * 1656 */ 1657int cpufreq_register_driver(struct cpufreq_driver *driver_data) 1658{ 1659 unsigned long flags; 1660 int ret; 1661 1662 if (!driver_data || !driver_data->verify || !driver_data->init || 1663 ((!driver_data->setpolicy) && (!driver_data->target))) 1664 return -EINVAL; 1665 1666 dprintk("trying to register driver %s\n", driver_data->name); 1667 1668 if (driver_data->setpolicy) 1669 driver_data->flags |= CPUFREQ_CONST_LOOPS; 1670 1671 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1672 if (cpufreq_driver) { 1673 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1674 return -EBUSY; 1675 } 1676 cpufreq_driver = driver_data; 1677 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1678 1679 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver); 1680 1681 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1682 int i; 1683 ret = -ENODEV; 1684 1685 /* check for at least one working CPU */ 1686 for (i=0; i<NR_CPUS; i++) 1687 if (cpufreq_cpu_data[i]) 1688 ret = 0; 1689 1690 /* if all ->init() calls failed, unregister */ 1691 if (ret) { 1692 dprintk("no CPU initialized for driver %s\n", 1693 driver_data->name); 1694 sysdev_driver_unregister(&cpu_sysdev_class, 1695 &cpufreq_sysdev_driver); 1696 1697 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1698 cpufreq_driver = NULL; 1699 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1700 } 1701 } 1702 1703 if (!ret) { 1704 register_hotcpu_notifier(&cpufreq_cpu_notifier); 1705 dprintk("driver %s up and running\n", driver_data->name); 1706 cpufreq_debug_enable_ratelimit(); 1707 } 1708 1709 return (ret); 1710} 1711EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1712 1713 1714/** 1715 * cpufreq_unregister_driver - unregister the current CPUFreq driver 1716 * 1717 * Unregister the current CPUFreq driver. Only call this if you have 1718 * the right to do so, i.e. if you have succeeded in initialising before! 1719 * Returns zero if successful, and -EINVAL if the cpufreq_driver is 1720 * currently not initialised. 1721 */ 1722int cpufreq_unregister_driver(struct cpufreq_driver *driver) 1723{ 1724 unsigned long flags; 1725 1726 cpufreq_debug_disable_ratelimit(); 1727 1728 if (!cpufreq_driver || (driver != cpufreq_driver)) { 1729 cpufreq_debug_enable_ratelimit(); 1730 return -EINVAL; 1731 } 1732 1733 dprintk("unregistering driver %s\n", driver->name); 1734 1735 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1736 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 1737 1738 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1739 cpufreq_driver = NULL; 1740 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1741 1742 return 0; 1743} 1744EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);