Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Remove pointless printk from p4-clockmod.
[CPUFREQ] Fix section mismatch for powernow_cpu_init in powernow-k7.c
[CPUFREQ] Fix section mismatch for longhaul_cpu_init.
[CPUFREQ] Fix section mismatch for longrun_cpu_init.
[CPUFREQ] powernow-k8: Fix misleading variable naming
[CPUFREQ] Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used)
[CPUFREQ] arch/x86/kernel/cpu/cpufreq: use for_each_pci_dev()
[CPUFREQ] fix brace coding style issue.
[CPUFREQ] x86 cpufreq: Make trace_power_frequency cpufreq driver independent
[CPUFREQ] acpi-cpufreq: Fix CPU_ANY CPUFREQ_{PRE,POST}CHANGE notification
[CPUFREQ] ondemand: don't synchronize sample rate unless multiple cpus present
[CPUFREQ] unexport (un)lock_policy_rwsem* functions
[CPUFREQ] ondemand: Refactor frequency increase code
[CPUFREQ] powernow-k8: On load failure, remind the user to enable support in BIOS setup
[CPUFREQ] powernow-k8: Limit Pstate transition latency check
[CPUFREQ] Fix PCC driver error path
[CPUFREQ] fix double freeing in error path of pcc-cpufreq
[CPUFREQ] pcc driver should check for pcch method before calling _OSC
[CPUFREQ] fix memory leak in cpufreq_add_dev
[CPUFREQ] revert "[CPUFREQ] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site)"

Manually fix up non-data merge conflict introduced by new calling
conventions for trace_power_start() in commit 6f4f2723d085 ("x86
cpufreq: Make trace_power_frequency cpufreq driver independent"), which
didn't update the intel_idle native hardware cpuidle driver.

+88 -105
-10
Documentation/feature-removal-schedule.txt
··· 377 378 ---------------------------- 379 380 - What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be 381 - exported interface anymore. 382 - When: 2.6.33 383 - Why: cpu_policy_rwsem has a new cleaner definition making it local to 384 - cpufreq core and contained inside cpufreq.c. Other dependent 385 - drivers should not use it in order to safely avoid lockdep issues. 386 - Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 387 - 388 - ---------------------------- 389 - 390 What: sound-slot/service-* module aliases and related clutters in 391 sound/sound_core.c 392 When: August 2010
··· 377 378 ---------------------------- 379 380 What: sound-slot/service-* module aliases and related clutters in 381 sound/sound_core.c 382 When: August 2010
+2 -5
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 34 #include <linux/compiler.h> 35 #include <linux/dmi.h> 36 #include <linux/slab.h> 37 - #include <trace/events/power.h> 38 39 #include <linux/acpi.h> 40 #include <linux/io.h> ··· 323 } 324 } 325 326 - trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency); 327 - 328 switch (data->cpu_feature) { 329 case SYSTEM_INTEL_MSR_CAPABLE: 330 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; ··· 348 349 freqs.old = perf->states[perf->state].core_frequency * 1000; 350 freqs.new = data->freq_table[next_state].frequency; 351 - for_each_cpu(i, cmd.mask) { 352 freqs.cpu = i; 353 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 354 } ··· 364 } 365 } 366 367 - for_each_cpu(i, cmd.mask) { 368 freqs.cpu = i; 369 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 370 }
··· 34 #include <linux/compiler.h> 35 #include <linux/dmi.h> 36 #include <linux/slab.h> 37 38 #include <linux/acpi.h> 39 #include <linux/io.h> ··· 324 } 325 } 326 327 switch (data->cpu_feature) { 328 case SYSTEM_INTEL_MSR_CAPABLE: 329 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; ··· 351 352 freqs.old = perf->states[perf->state].core_frequency * 1000; 353 freqs.new = data->freq_table[next_state].frequency; 354 + for_each_cpu(i, policy->cpus) { 355 freqs.cpu = i; 356 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 357 } ··· 367 } 368 } 369 370 + for_each_cpu(i, policy->cpus) { 371 freqs.cpu = i; 372 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 373 }
+4 -7
arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
··· 169 * Low Level chipset interface * 170 ****************************************************************/ 171 static struct pci_device_id gx_chipset_tbl[] __initdata = { 172 - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, 173 - PCI_ANY_ID, PCI_ANY_ID }, 174 - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, 175 - PCI_ANY_ID, PCI_ANY_ID }, 176 - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, 177 - PCI_ANY_ID, PCI_ANY_ID }, 178 { 0, }, 179 }; 180 ··· 196 } 197 198 /* detect which companion chip is used */ 199 - while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { 200 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) 201 return gx_pci; 202 }
··· 169 * Low Level chipset interface * 170 ****************************************************************/ 171 static struct pci_device_id gx_chipset_tbl[] __initdata = { 172 + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), }, 173 + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, 174 + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, 175 { 0, }, 176 }; 177 ··· 199 } 200 201 /* detect which companion chip is used */ 202 + for_each_pci_dev(gx_pci) { 203 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) 204 return gx_pci; 205 }
+3 -3
arch/x86/kernel/cpu/cpufreq/longhaul.c
··· 426 } 427 428 429 - static int __init longhaul_get_ranges(void) 430 { 431 unsigned int i, j, k = 0; 432 unsigned int ratio; ··· 530 } 531 532 533 - static void __init longhaul_setup_voltagescaling(void) 534 { 535 union msr_longhaul longhaul; 536 struct mV_pos minvid, maxvid, vid; ··· 784 return 0; 785 } 786 787 - static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 788 { 789 struct cpuinfo_x86 *c = &cpu_data(0); 790 char *cpuname = NULL;
··· 426 } 427 428 429 + static int __cpuinit longhaul_get_ranges(void) 430 { 431 unsigned int i, j, k = 0; 432 unsigned int ratio; ··· 530 } 531 532 533 + static void __cpuinit longhaul_setup_voltagescaling(void) 534 { 535 union msr_longhaul longhaul; 536 struct mV_pos minvid, maxvid, vid; ··· 784 return 0; 785 } 786 787 + static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) 788 { 789 struct cpuinfo_x86 *c = &cpu_data(0); 790 char *cpuname = NULL;
+13 -13
arch/x86/kernel/cpu/cpufreq/longhaul.h
··· 56 /* 57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0) 58 */ 59 - static const int __initdata samuel1_mults[16] = { 60 -1, /* 0000 -> RESERVED */ 61 30, /* 0001 -> 3.0x */ 62 40, /* 0010 -> 4.0x */ ··· 75 -1, /* 1111 -> RESERVED */ 76 }; 77 78 - static const int __initdata samuel1_eblcr[16] = { 79 50, /* 0000 -> RESERVED */ 80 30, /* 0001 -> 3.0x */ 81 40, /* 0010 -> 4.0x */ ··· 97 /* 98 * VIA C3 Samuel2 Stepping 1->15 99 */ 100 - static const int __initdata samuel2_eblcr[16] = { 101 50, /* 0000 -> 5.0x */ 102 30, /* 0001 -> 3.0x */ 103 40, /* 0010 -> 4.0x */ ··· 119 /* 120 * VIA C3 Ezra 121 */ 122 - static const int __initdata ezra_mults[16] = { 123 100, /* 0000 -> 10.0x */ 124 30, /* 0001 -> 3.0x */ 125 40, /* 0010 -> 4.0x */ ··· 138 120, /* 1111 -> 12.0x */ 139 }; 140 141 - static const int __initdata ezra_eblcr[16] = { 142 50, /* 0000 -> 5.0x */ 143 30, /* 0001 -> 3.0x */ 144 40, /* 0010 -> 4.0x */ ··· 160 /* 161 * VIA C3 (Ezra-T) [C5M]. 162 */ 163 - static const int __initdata ezrat_mults[32] = { 164 100, /* 0000 -> 10.0x */ 165 30, /* 0001 -> 3.0x */ 166 40, /* 0010 -> 4.0x */ ··· 196 -1, /* 1111 -> RESERVED (12.0x) */ 197 }; 198 199 - static const int __initdata ezrat_eblcr[32] = { 200 50, /* 0000 -> 5.0x */ 201 30, /* 0001 -> 3.0x */ 202 40, /* 0010 -> 4.0x */ ··· 235 /* 236 * VIA C3 Nehemiah */ 237 238 - static const int __initdata nehemiah_mults[32] = { 239 100, /* 0000 -> 10.0x */ 240 -1, /* 0001 -> 16.0x */ 241 40, /* 0010 -> 4.0x */ ··· 270 -1, /* 1111 -> 12.0x */ 271 }; 272 273 - static const int __initdata nehemiah_eblcr[32] = { 274 50, /* 0000 -> 5.0x */ 275 160, /* 0001 -> 16.0x */ 276 40, /* 0010 -> 4.0x */ ··· 315 unsigned short pos; 316 }; 317 318 - static const struct mV_pos __initdata vrm85_mV[32] = { 319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, 320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, 321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, ··· 326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} 327 }; 328 329 - static const unsigned char __initdata mV_vrm85[32] = { 330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 334 }; 335 336 - static const struct mV_pos __initdata mobilevrm_mV[32] = { 337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, 338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, 339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, ··· 344 {675, 3}, {650, 2}, {625, 1}, {600, 0} 345 }; 346 347 - static const unsigned char __initdata mV_mobilevrm[32] = { 348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
··· 56 /* 57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0) 58 */ 59 + static const int __cpuinitdata samuel1_mults[16] = { 60 -1, /* 0000 -> RESERVED */ 61 30, /* 0001 -> 3.0x */ 62 40, /* 0010 -> 4.0x */ ··· 75 -1, /* 1111 -> RESERVED */ 76 }; 77 78 + static const int __cpuinitdata samuel1_eblcr[16] = { 79 50, /* 0000 -> RESERVED */ 80 30, /* 0001 -> 3.0x */ 81 40, /* 0010 -> 4.0x */ ··· 97 /* 98 * VIA C3 Samuel2 Stepping 1->15 99 */ 100 + static const int __cpuinitdata samuel2_eblcr[16] = { 101 50, /* 0000 -> 5.0x */ 102 30, /* 0001 -> 3.0x */ 103 40, /* 0010 -> 4.0x */ ··· 119 /* 120 * VIA C3 Ezra 121 */ 122 + static const int __cpuinitdata ezra_mults[16] = { 123 100, /* 0000 -> 10.0x */ 124 30, /* 0001 -> 3.0x */ 125 40, /* 0010 -> 4.0x */ ··· 138 120, /* 1111 -> 12.0x */ 139 }; 140 141 + static const int __cpuinitdata ezra_eblcr[16] = { 142 50, /* 0000 -> 5.0x */ 143 30, /* 0001 -> 3.0x */ 144 40, /* 0010 -> 4.0x */ ··· 160 /* 161 * VIA C3 (Ezra-T) [C5M]. 162 */ 163 + static const int __cpuinitdata ezrat_mults[32] = { 164 100, /* 0000 -> 10.0x */ 165 30, /* 0001 -> 3.0x */ 166 40, /* 0010 -> 4.0x */ ··· 196 -1, /* 1111 -> RESERVED (12.0x) */ 197 }; 198 199 + static const int __cpuinitdata ezrat_eblcr[32] = { 200 50, /* 0000 -> 5.0x */ 201 30, /* 0001 -> 3.0x */ 202 40, /* 0010 -> 4.0x */ ··· 235 /* 236 * VIA C3 Nehemiah */ 237 238 + static const int __cpuinitdata nehemiah_mults[32] = { 239 100, /* 0000 -> 10.0x */ 240 -1, /* 0001 -> 16.0x */ 241 40, /* 0010 -> 4.0x */ ··· 270 -1, /* 1111 -> 12.0x */ 271 }; 272 273 + static const int __cpuinitdata nehemiah_eblcr[32] = { 274 50, /* 0000 -> 5.0x */ 275 160, /* 0001 -> 16.0x */ 276 40, /* 0010 -> 4.0x */ ··· 315 unsigned short pos; 316 }; 317 318 + static const struct mV_pos __cpuinitdata vrm85_mV[32] = { 319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, 320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, 321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, ··· 326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} 327 }; 328 329 + static const unsigned char __cpuinitdata mV_vrm85[32] = { 330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 334 }; 335 336 + static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { 337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, 338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, 339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, ··· 344 {675, 3}, {650, 2}, {625, 1}, {600, 0} 345 }; 346 347 + static const unsigned char __cpuinitdata mV_mobilevrm[32] = { 348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
+3 -3
arch/x86/kernel/cpu/cpufreq/longrun.c
··· 165 * TMTA rules: 166 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) 167 */ 168 - static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, 169 - unsigned int *high_freq) 170 { 171 u32 msr_lo, msr_hi; 172 u32 save_lo, save_hi; ··· 258 } 259 260 261 - static int __init longrun_cpu_init(struct cpufreq_policy *policy) 262 { 263 int result = 0; 264
··· 165 * TMTA rules: 166 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) 167 */ 168 + static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq, 169 + unsigned int *high_freq) 170 { 171 u32 msr_lo, msr_hi; 172 u32 save_lo, save_hi; ··· 258 } 259 260 261 + static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy) 262 { 263 int result = 0; 264
+1 -6
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
··· 178 } 179 } 180 181 - if (c->x86 != 0xF) { 182 - if (!cpu_has(c, X86_FEATURE_EST)) 183 - printk(KERN_WARNING PFX "Unknown CPU. " 184 - "Please send an e-mail to " 185 - "<cpufreq@vger.kernel.org>\n"); 186 return 0; 187 - } 188 189 /* on P-4s, the TSC runs with constant frequency independent whether 190 * throttling is active or not. */
··· 178 } 179 } 180 181 + if (c->x86 != 0xF) 182 return 0; 183 184 /* on P-4s, the TSC runs with constant frequency independent whether 185 * throttling is active or not. */
+4 -4
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
··· 569 * We will then get the same kind of behaviour already tested under 570 * the "well-known" other OS. 571 */ 572 - static int __init fixup_sgtc(void) 573 { 574 unsigned int sgtc; 575 unsigned int m; ··· 603 } 604 605 606 - static int __init acer_cpufreq_pst(const struct dmi_system_id *d) 607 { 608 printk(KERN_WARNING PFX 609 "%s laptop with broken PST tables in BIOS detected.\n", ··· 621 * A BIOS update is all that can save them. 622 * Mention this, and disable cpufreq. 623 */ 624 - static struct dmi_system_id __initdata powernow_dmi_table[] = { 625 { 626 .callback = acer_cpufreq_pst, 627 .ident = "Acer Aspire", ··· 633 { } 634 }; 635 636 - static int __init powernow_cpu_init(struct cpufreq_policy *policy) 637 { 638 union msr_fidvidstatus fidvidstatus; 639 int result;
··· 569 * We will then get the same kind of behaviour already tested under 570 * the "well-known" other OS. 571 */ 572 + static int __cpuinit fixup_sgtc(void) 573 { 574 unsigned int sgtc; 575 unsigned int m; ··· 603 } 604 605 606 + static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d) 607 { 608 printk(KERN_WARNING PFX 609 "%s laptop with broken PST tables in BIOS detected.\n", ··· 621 * A BIOS update is all that can save them. 622 * Mention this, and disable cpufreq. 623 */ 624 + static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = { 625 { 626 .callback = acer_cpufreq_pst, 627 .ident = "Acer Aspire", ··· 633 { } 634 }; 635 636 + static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) 637 { 638 union msr_fidvidstatus fidvidstatus; 639 int result;
+4 -2
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 806 * www.amd.com 807 */ 808 printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); 809 return -ENODEV; 810 } 811 ··· 912 { 913 int i; 914 u32 hi = 0, lo = 0; 915 - rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 916 - data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 917 918 for (i = 0; i < data->acpi_data.state_count; i++) { 919 u32 index;
··· 806 * www.amd.com 807 */ 808 printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); 809 + printk(KERN_ERR PFX "Make sure that your BIOS is up to date" 810 + " and Cool'N'Quiet support is enabled in BIOS setup\n"); 811 return -ENODEV; 812 } 813 ··· 910 { 911 int i; 912 u32 hi = 0, lo = 0; 913 + rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi); 914 + data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 915 916 for (i = 0; i < data->acpi_data.state_count; i++) { 917 u32 index;
+4 -4
arch/x86/kernel/process.c
··· 372 void default_idle(void) 373 { 374 if (hlt_use_halt()) { 375 - trace_power_start(POWER_CSTATE, 1); 376 current_thread_info()->status &= ~TS_POLLING; 377 /* 378 * TS_POLLING-cleared state must be visible before we ··· 442 */ 443 void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 444 { 445 - trace_power_start(POWER_CSTATE, (ax>>4)+1); 446 if (!need_resched()) { 447 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 448 clflush((void *)&current_thread_info()->flags); ··· 458 static void mwait_idle(void) 459 { 460 if (!need_resched()) { 461 - trace_power_start(POWER_CSTATE, 1); 462 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 463 clflush((void *)&current_thread_info()->flags); 464 ··· 479 */ 480 static void poll_idle(void) 481 { 482 - trace_power_start(POWER_CSTATE, 0); 483 local_irq_enable(); 484 while (!need_resched()) 485 cpu_relax();
··· 372 void default_idle(void) 373 { 374 if (hlt_use_halt()) { 375 + trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 376 current_thread_info()->status &= ~TS_POLLING; 377 /* 378 * TS_POLLING-cleared state must be visible before we ··· 442 */ 443 void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 444 { 445 + trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); 446 if (!need_resched()) { 447 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 448 clflush((void *)&current_thread_info()->flags); ··· 458 static void mwait_idle(void) 459 { 460 if (!need_resched()) { 461 + trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 462 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 463 clflush((void *)&current_thread_info()->flags); 464 ··· 479 */ 480 static void poll_idle(void) 481 { 482 + trace_power_start(POWER_CSTATE, 0, smp_processor_id()); 483 local_irq_enable(); 484 while (!need_resched()) 485 cpu_relax();
+9 -9
drivers/cpufreq/cpufreq.c
··· 29 #include <linux/completion.h> 30 #include <linux/mutex.h> 31 32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ 33 "cpufreq-core", msg) 34 ··· 70 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 71 72 #define lock_policy_rwsem(mode, cpu) \ 73 - int lock_policy_rwsem_##mode \ 74 (int cpu) \ 75 { \ 76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ ··· 85 } 86 87 lock_policy_rwsem(read, cpu); 88 - EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); 89 90 lock_policy_rwsem(write, cpu); 91 - EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); 92 93 - void unlock_policy_rwsem_read(int cpu) 94 { 95 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 96 BUG_ON(policy_cpu == -1); 97 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); 98 } 99 - EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); 100 101 - void unlock_policy_rwsem_write(int cpu) 102 { 103 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 104 BUG_ON(policy_cpu == -1); 105 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); 106 } 107 - EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); 108 109 110 /* internal prototypes */ ··· 352 353 case CPUFREQ_POSTCHANGE: 354 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 355 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 356 CPUFREQ_POSTCHANGE, freqs); 357 if (likely(policy) && likely(policy->cpu == freqs->cpu)) ··· 1876 return NOTIFY_OK; 1877 } 1878 1879 - static struct notifier_block __refdata cpufreq_cpu_notifier = 1880 - { 1881 .notifier_call = cpufreq_cpu_callback, 1882 }; 1883
··· 29 #include <linux/completion.h> 30 #include <linux/mutex.h> 31 32 + #include <trace/events/power.h> 33 + 34 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ 35 "cpufreq-core", msg) 36 ··· 68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 69 70 #define lock_policy_rwsem(mode, cpu) \ 71 + static int lock_policy_rwsem_##mode \ 72 (int cpu) \ 73 { \ 74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ ··· 83 } 84 85 lock_policy_rwsem(read, cpu); 86 87 lock_policy_rwsem(write, cpu); 88 89 + static void unlock_policy_rwsem_read(int cpu) 90 { 91 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 92 BUG_ON(policy_cpu == -1); 93 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); 94 } 95 96 + static void unlock_policy_rwsem_write(int cpu) 97 { 98 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 99 BUG_ON(policy_cpu == -1); 100 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); 101 } 102 103 104 /* internal prototypes */ ··· 354 355 case CPUFREQ_POSTCHANGE: 356 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 357 + dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 358 + (unsigned long)freqs->cpu); 359 + trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); 360 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 361 CPUFREQ_POSTCHANGE, freqs); 362 if (likely(policy) && likely(policy->cpu == freqs->cpu)) ··· 1875 return NOTIFY_OK; 1876 } 1877 1878 + static struct notifier_block __refdata cpufreq_cpu_notifier = { 1879 .notifier_call = cpufreq_cpu_callback, 1880 }; 1881
+18 -15
drivers/cpufreq/cpufreq_ondemand.c
··· 459 460 /************************** sysfs end ************************/ 461 462 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 463 { 464 unsigned int max_load_freq; ··· 562 563 /* Check for frequency increase */ 564 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { 565 - /* if we are already at full speed then break out early */ 566 - if (!dbs_tuners_ins.powersave_bias) { 567 - if (policy->cur == policy->max) 568 - return; 569 - 570 - __cpufreq_driver_target(policy, policy->max, 571 - CPUFREQ_RELATION_H); 572 - } else { 573 - int freq = powersave_bias_target(policy, policy->max, 574 - CPUFREQ_RELATION_H); 575 - __cpufreq_driver_target(policy, freq, 576 - CPUFREQ_RELATION_L); 577 - } 578 return; 579 } 580 ··· 609 /* We want all CPUs to do sampling nearly on same jiffy */ 610 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 611 612 - delay -= jiffies % delay; 613 mutex_lock(&dbs_info->timer_mutex); 614 615 /* Common NORMAL_SAMPLE setup */ ··· 636 { 637 /* We want all CPUs to do sampling nearly on same jiffy */ 638 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 639 - delay -= jiffies % delay; 640 641 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 642 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
··· 459 460 /************************** sysfs end ************************/ 461 462 + static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) 463 + { 464 + if (dbs_tuners_ins.powersave_bias) 465 + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); 466 + else if (p->cur == p->max) 467 + return; 468 + 469 + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? 470 + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); 471 + } 472 + 473 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 474 { 475 unsigned int max_load_freq; ··· 551 552 /* Check for frequency increase */ 553 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { 554 + dbs_freq_increase(policy, policy->max); 555 return; 556 } 557 ··· 610 /* We want all CPUs to do sampling nearly on same jiffy */ 611 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 612 613 + if (num_online_cpus() > 1) 614 + delay -= jiffies % delay; 615 + 616 mutex_lock(&dbs_info->timer_mutex); 617 618 /* Common NORMAL_SAMPLE setup */ ··· 635 { 636 /* We want all CPUs to do sampling nearly on same jiffy */ 637 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 638 + 639 + if (num_online_cpus() > 1) 640 + delay -= jiffies % delay; 641 642 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 643 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+1 -1
drivers/cpuidle/cpuidle.c
··· 95 /* give the governor an opportunity to reflect on the outcome */ 96 if (cpuidle_curr_governor->reflect) 97 cpuidle_curr_governor->reflect(dev); 98 - trace_power_end(0); 99 } 100 101 /**
··· 95 /* give the governor an opportunity to reflect on the outcome */ 96 if (cpuidle_curr_governor->reflect) 97 cpuidle_curr_governor->reflect(dev); 98 + trace_power_end(smp_processor_id()); 99 } 100 101 /**
+1 -1
drivers/idle/intel_idle.c
··· 231 232 stop_critical_timings(); 233 #ifndef MODULE 234 - trace_power_start(POWER_CSTATE, (eax >> 4) + 1); 235 #endif 236 if (!need_resched()) { 237
··· 231 232 stop_critical_timings(); 233 #ifndef MODULE 234 + trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu); 235 #endif 236 if (!need_resched()) { 237
-5
include/linux/cpufreq.h
··· 196 int cpufreq_register_governor(struct cpufreq_governor *governor); 197 void cpufreq_unregister_governor(struct cpufreq_governor *governor); 198 199 - int lock_policy_rwsem_read(int cpu); 200 - int lock_policy_rwsem_write(int cpu); 201 - void unlock_policy_rwsem_read(int cpu); 202 - void unlock_policy_rwsem_write(int cpu); 203 - 204 205 /********************************************************************* 206 * CPUFREQ DRIVER INTERFACE *
··· 196 int cpufreq_register_governor(struct cpufreq_governor *governor); 197 void cpufreq_unregister_governor(struct cpufreq_governor *governor); 198 199 200 /********************************************************************* 201 * CPUFREQ DRIVER INTERFACE *
+15 -12
include/trace/events/power.h
··· 18 19 DECLARE_EVENT_CLASS(power, 20 21 - TP_PROTO(unsigned int type, unsigned int state), 22 23 - TP_ARGS(type, state), 24 25 TP_STRUCT__entry( 26 __field( u64, type ) 27 __field( u64, state ) 28 ), 29 30 TP_fast_assign( 31 __entry->type = type; 32 __entry->state = state; 33 ), 34 35 - TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) 36 ); 37 38 DEFINE_EVENT(power, power_start, 39 40 - TP_PROTO(unsigned int type, unsigned int state), 41 42 - TP_ARGS(type, state) 43 ); 44 45 DEFINE_EVENT(power, power_frequency, 46 47 - TP_PROTO(unsigned int type, unsigned int state), 48 49 - TP_ARGS(type, state) 50 ); 51 52 TRACE_EVENT(power_end, 53 54 - TP_PROTO(int dummy), 55 56 - TP_ARGS(dummy), 57 58 TP_STRUCT__entry( 59 - __field( u64, dummy ) 60 ), 61 62 TP_fast_assign( 63 - __entry->dummy = 0xffff; 64 ), 65 66 - TP_printk("dummy=%lu", (unsigned long)__entry->dummy) 67 68 ); 69
··· 18 19 DECLARE_EVENT_CLASS(power, 20 21 + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), 22 23 + TP_ARGS(type, state, cpu_id), 24 25 TP_STRUCT__entry( 26 __field( u64, type ) 27 __field( u64, state ) 28 + __field( u64, cpu_id ) 29 ), 30 31 TP_fast_assign( 32 __entry->type = type; 33 __entry->state = state; 34 + __entry->cpu_id = cpu_id; 35 ), 36 37 + TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type, 38 + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) 39 ); 40 41 DEFINE_EVENT(power, power_start, 42 43 + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), 44 45 + TP_ARGS(type, state, cpu_id) 46 ); 47 48 DEFINE_EVENT(power, power_frequency, 49 50 + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), 51 52 + TP_ARGS(type, state, cpu_id) 53 ); 54 55 TRACE_EVENT(power_end, 56 57 + TP_PROTO(unsigned int cpu_id), 58 59 + TP_ARGS(cpu_id), 60 61 TP_STRUCT__entry( 62 + __field( u64, cpu_id ) 63 ), 64 65 TP_fast_assign( 66 + __entry->cpu_id = cpu_id; 67 ), 68 69 + TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id) 70 71 ); 72
+6 -5
tools/perf/builtin-timechart.c
··· 300 301 struct power_entry { 302 struct trace_entry te; 303 - s64 type; 304 - s64 value; 305 }; 306 307 #define TASK_COMM_LEN 16 ··· 499 return 0; 500 501 if (strcmp(event_str, "power:power_start") == 0) 502 - c_state_start(data.cpu, data.time, pe->value); 503 504 if (strcmp(event_str, "power:power_end") == 0) 505 - c_state_end(data.cpu, data.time); 506 507 if (strcmp(event_str, "power:power_frequency") == 0) 508 - p_state_change(data.cpu, data.time, pe->value); 509 510 if (strcmp(event_str, "sched:sched_wakeup") == 0) 511 sched_wakeup(data.cpu, data.time, data.pid, te);
··· 300 301 struct power_entry { 302 struct trace_entry te; 303 + u64 type; 304 + u64 value; 305 + u64 cpu_id; 306 }; 307 308 #define TASK_COMM_LEN 16 ··· 498 return 0; 499 500 if (strcmp(event_str, "power:power_start") == 0) 501 + c_state_start(pe->cpu_id, data.time, pe->value); 502 503 if (strcmp(event_str, "power:power_end") == 0) 504 + c_state_end(pe->cpu_id, data.time); 505 506 if (strcmp(event_str, "power:power_frequency") == 0) 507 + p_state_change(pe->cpu_id, data.time, pe->value); 508 509 if (strcmp(event_str, "sched:sched_wakeup") == 0) 510 sched_wakeup(data.cpu, data.time, data.pid, te);