Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ][2/2] preregister support for powernow-k8
[CPUFREQ][1/2] whitespace fix for powernow-k8
[CPUFREQ] Update MAINTAINERS to reflect new mailing list.
[CPUFREQ] Fix warning in elanfreq
[CPUFREQ] Fix -Wshadow warning in conservative governor.
[CPUFREQ] Remove EXPERIMENTAL annotation from VIA C7 powersaver kconfig.

+89 -54
+1 -1
MAINTAINERS
··· 1249 CPU FREQUENCY DRIVERS 1250 P: Dave Jones 1251 M: davej@codemonkey.org.uk 1252 - L: cpufreq@lists.linux.org.uk 1253 W: http://www.codemonkey.org.uk/projects/cpufreq/ 1254 T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git 1255 S: Maintained
··· 1249 CPU FREQUENCY DRIVERS 1250 P: Dave Jones 1251 M: davej@codemonkey.org.uk 1252 + L: cpufreq@vger.kernel.org 1253 W: http://www.codemonkey.org.uk/projects/cpufreq/ 1254 T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git 1255 S: Maintained
+2 -2
arch/x86/kernel/cpu/cpufreq/Kconfig
··· 235 If in doubt, say N. 236 237 config X86_E_POWERSAVER 238 - tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" 239 select CPU_FREQ_TABLE 240 - depends on X86_32 && EXPERIMENTAL 241 help 242 This adds the CPUFreq driver for VIA C7 processors. 243
··· 235 If in doubt, say N. 236 237 config X86_E_POWERSAVER 238 + tristate "VIA C7 Enhanced PowerSaver" 239 select CPU_FREQ_TABLE 240 + depends on X86_32 241 help 242 This adds the CPUFreq driver for VIA C7 processors. 243
+1 -1
arch/x86/kernel/cpu/cpufreq/elanfreq.c
··· 44 * It is important that the frequencies 45 * are listed in ascending order here! 46 */ 47 - struct s_elan_multiplier elan_multiplier[] = { 48 {1000, 0x02, 0x18}, 49 {2000, 0x02, 0x10}, 50 {4000, 0x02, 0x08},
··· 44 * It is important that the frequencies 45 * are listed in ascending order here! 46 */ 47 + static struct s_elan_multiplier elan_multiplier[] = { 48 {1000, 0x02, 0x18}, 49 {2000, 0x02, 0x10}, 50 {4000, 0x02, 0x08},
+73 -39
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 66 return 800 + (fid * 100); 67 } 68 69 - 70 /* Return a frequency in KHz, given an input fid */ 71 static u32 find_khz_freq_from_fid(u32 fid) 72 { ··· 76 { 77 return data[pstate].frequency; 78 } 79 - 80 81 /* Return the vco fid for an input fid 82 * ··· 163 dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); 164 wrmsr(MSR_FIDVID_CTL, lo, hi); 165 } 166 - 167 168 /* write the new fid value along with the other control fields to the msr */ 169 static int write_new_fid(struct powernow_k8_data *data, u32 fid) ··· 737 #ifdef CONFIG_X86_POWERNOW_K8_ACPI 738 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 739 { 740 - if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 741 return; 742 743 - data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; 744 - data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; 745 - data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 746 - data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 747 - data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); 748 - data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; 749 } 750 751 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 752 { 753 struct cpufreq_frequency_table *powernow_table; 754 int ret_val; 755 756 - if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 757 dprintk("register performance failed: bad ACPI data\n"); 758 return -EIO; 759 } 760 761 /* verify the data contained in the ACPI structures */ 762 - if (data->acpi_data.state_count <= 1) { 763 dprintk("No ACPI P-States\n"); 764 goto err_out; 765 } 766 767 - if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 768 - (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 769 dprintk("Invalid control/status registers (%x - %x)\n", 770 - data->acpi_data.control_register.space_id, 771 - data->acpi_data.status_register.space_id); 772 goto err_out; 773 } 774 775 /* fill in data->powernow_table */ 776 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 777 - * (data->acpi_data.state_count + 1)), GFP_KERNEL); 778 if (!powernow_table) { 779 dprintk("powernow_table memory alloc failure\n"); 780 goto err_out; ··· 806 if (ret_val) 807 goto err_out_mem; 808 809 - powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; 810 - powernow_table[data->acpi_data.state_count].index = 0; 811 data->powernow_table = powernow_table; 812 813 /* fill in data */ 814 - data->numps = data->acpi_data.state_count; 815 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 816 print_basics(data); 817 powernow_k8_acpi_pst_values(data, 0); ··· 819 /* notify BIOS that we exist */ 820 acpi_processor_notify_smm(THIS_MODULE); 821 822 return 0; 823 824 err_out_mem: 825 kfree(powernow_table); 826 827 err_out: 828 - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 829 830 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 831 - data->acpi_data.state_count = 0; 832 833 return -ENODEV; 834 } ··· 855 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 856 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 857 858 - for (i = 0; i < data->acpi_data.state_count; i++) { 859 u32 index; 860 861 - index = data->acpi_data.states[i].control & HW_PSTATE_MASK; 862 if (index > data->max_hw_pstate) { 863 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); 864 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); ··· 874 875 powernow_table[i].index = index; 876 877 - powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; 878 } 879 return 0; 880 } ··· 883 { 884 int i; 885 int cntlofreq = 0; 886 - for (i = 0; i < data->acpi_data.state_count; i++) { 887 u32 fid; 888 u32 vid; 889 890 if (data->exttype) { 891 - fid = data->acpi_data.states[i].status & EXT_FID_MASK; 892 - vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; 893 } else { 894 - fid = data->acpi_data.states[i].control & FID_MASK; 895 - vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; 896 } 897 898 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); ··· 933 cntlofreq = i; 934 } 935 936 - if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { 937 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", 938 powernow_table[i].frequency, 939 - (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); 940 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; 941 continue; 942 } ··· 946 947 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 948 { 949 - if (data->acpi_data.state_count) 950 - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 951 } 952 953 #else 954 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 955 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 956 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } ··· 1136 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1137 { 1138 struct powernow_k8_data *data; 1139 - cpumask_t oldmask; 1140 int rc; 1141 1142 if (!cpu_online(pol->cpu)) ··· 1209 /* run on any CPU again */ 1210 set_cpus_allowed_ptr(current, &oldmask); 1211 1212 - if (cpu_family == CPU_HW_PSTATE) 1213 - pol->cpus = cpumask_of_cpu(pol->cpu); 1214 - else 1215 - pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1216 data->available_cores = &(pol->cpus); 1217 1218 /* Take a crude guess here. ··· 1332 } 1333 1334 if (supported_cpus == num_online_cpus()) { 1335 printk(KERN_INFO PFX "Found %d %s " 1336 "processors (%d cpu cores) (" VERSION ")\n", 1337 num_online_nodes(), ··· 1349 dprintk("exit\n"); 1350 1351 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1352 } 1353 1354 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
··· 66 return 800 + (fid * 100); 67 } 68 69 /* Return a frequency in KHz, given an input fid */ 70 static u32 find_khz_freq_from_fid(u32 fid) 71 { ··· 77 { 78 return data[pstate].frequency; 79 } 80 81 /* Return the vco fid for an input fid 82 * ··· 165 dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); 166 wrmsr(MSR_FIDVID_CTL, lo, hi); 167 } 168 169 /* write the new fid value along with the other control fields to the msr */ 170 static int write_new_fid(struct powernow_k8_data *data, u32 fid) ··· 740 #ifdef CONFIG_X86_POWERNOW_K8_ACPI 741 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 742 { 743 + if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) 744 return; 745 746 + data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; 747 + data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; 748 + data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 749 + data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 750 + data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); 751 + data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; 752 + } 753 + 754 + 755 + static struct acpi_processor_performance *acpi_perf_data; 756 + static int preregister_valid; 757 + 758 + static int powernow_k8_cpu_preinit_acpi(void) 759 + { 760 + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 761 + if (!acpi_perf_data) 762 + return -ENODEV; 763 + 764 + if (acpi_processor_preregister_performance(acpi_perf_data)) 765 + return -ENODEV; 766 + else 767 + preregister_valid = 1; 768 + return 0; 769 } 770 771 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 772 { 773 struct cpufreq_frequency_table *powernow_table; 774 int ret_val; 775 + int cpu = 0; 776 777 + data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 778 + if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { 779 dprintk("register performance failed: bad ACPI data\n"); 780 return -EIO; 781 } 782 783 /* verify the data contained in the ACPI structures */ 784 + if (data->acpi_data->state_count <= 1) { 785 dprintk("No ACPI P-States\n"); 786 goto err_out; 787 } 788 789 + if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 790 + (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 791 dprintk("Invalid control/status registers (%x - %x)\n", 792 + data->acpi_data->control_register.space_id, 793 + data->acpi_data->status_register.space_id); 794 goto err_out; 795 } 796 797 /* fill in data->powernow_table */ 798 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 799 + * (data->acpi_data->state_count + 1)), GFP_KERNEL); 800 if (!powernow_table) { 801 dprintk("powernow_table memory alloc failure\n"); 802 goto err_out; ··· 790 if (ret_val) 791 goto err_out_mem; 792 793 + powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; 794 + powernow_table[data->acpi_data->state_count].index = 0; 795 data->powernow_table = powernow_table; 796 797 /* fill in data */ 798 + data->numps = data->acpi_data->state_count; 799 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 800 print_basics(data); 801 powernow_k8_acpi_pst_values(data, 0); ··· 803 /* notify BIOS that we exist */ 804 acpi_processor_notify_smm(THIS_MODULE); 805 806 + /* determine affinity, from ACPI if available */ 807 + if (preregister_valid) { 808 + if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || 809 + (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) 810 + data->starting_core_affinity = data->acpi_data->shared_cpu_map; 811 + else 812 + data->starting_core_affinity = cpumask_of_cpu(data->cpu); 813 + } else { 814 + /* best guess from family if not */ 815 + if (cpu_family == CPU_HW_PSTATE) 816 + data->starting_core_affinity = cpumask_of_cpu(data->cpu); 817 + else 818 + data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); 819 + } 820 + 821 return 0; 822 823 err_out_mem: 824 kfree(powernow_table); 825 826 err_out: 827 + acpi_processor_unregister_performance(data->acpi_data, data->cpu); 828 829 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 830 + data->acpi_data->state_count = 0; 831 832 return -ENODEV; 833 } ··· 824 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 825 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 826 827 + for (i = 0; i < data->acpi_data->state_count; i++) { 828 u32 index; 829 830 + index = data->acpi_data->states[i].control & HW_PSTATE_MASK; 831 if (index > data->max_hw_pstate) { 832 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); 833 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); ··· 843 844 powernow_table[i].index = index; 845 846 + powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; 847 } 848 return 0; 849 } ··· 852 { 853 int i; 854 int cntlofreq = 0; 855 + for (i = 0; i < data->acpi_data->state_count; i++) { 856 u32 fid; 857 u32 vid; 858 859 if (data->exttype) { 860 + fid = data->acpi_data->states[i].status & EXT_FID_MASK; 861 + vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; 862 } else { 863 + fid = data->acpi_data->states[i].control & FID_MASK; 864 + vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; 865 } 866 867 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); ··· 902 cntlofreq = i; 903 } 904 905 + if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { 906 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", 907 powernow_table[i].frequency, 908 + (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); 909 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; 910 continue; 911 } ··· 915 916 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 917 { 918 + if (data->acpi_data->state_count) 919 + acpi_processor_unregister_performance(data->acpi_data, data->cpu); 920 } 921 922 #else 923 + static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } 924 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 925 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 926 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } ··· 1104 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1105 { 1106 struct powernow_k8_data *data; 1107 + cpumask_t oldmask = CPU_MASK_ALL; 1108 int rc; 1109 1110 if (!cpu_online(pol->cpu)) ··· 1177 /* run on any CPU again */ 1178 set_cpus_allowed_ptr(current, &oldmask); 1179 1180 + pol->cpus = data->starting_core_affinity; 1181 data->available_cores = &(pol->cpus); 1182 1183 /* Take a crude guess here. ··· 1303 } 1304 1305 if (supported_cpus == num_online_cpus()) { 1306 + powernow_k8_cpu_preinit_acpi(); 1307 printk(KERN_INFO PFX "Found %d %s " 1308 "processors (%d cpu cores) (" VERSION ")\n", 1309 num_online_nodes(), ··· 1319 dprintk("exit\n"); 1320 1321 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1322 + 1323 + #ifdef CONFIG_X86_POWERNOW_K8_ACPI 1324 + free_percpu(acpi_perf_data); 1325 + #endif 1326 } 1327 1328 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
+2 -1
arch/x86/kernel/cpu/cpufreq/powernow-k8.h
··· 33 #ifdef CONFIG_X86_POWERNOW_K8_ACPI 34 /* the acpi table needs to be kept. it's only available if ACPI was 35 * used to determine valid frequency/vid/fid states */ 36 - struct acpi_processor_performance acpi_data; 37 #endif 38 /* we need to keep track of associated cores, but let cpufreq 39 * handle hotplug events - so just point at cpufreq pol->cpus 40 * structure */ 41 cpumask_t *available_cores; 42 }; 43 44
··· 33 #ifdef CONFIG_X86_POWERNOW_K8_ACPI 34 /* the acpi table needs to be kept. it's only available if ACPI was 35 * used to determine valid frequency/vid/fid states */ 36 + struct acpi_processor_performance *acpi_data; 37 #endif 38 /* we need to keep track of associated cores, but let cpufreq 39 * handle hotplug events - so just point at cpufreq pol->cpus 40 * structure */ 41 cpumask_t *available_cores; 42 + cpumask_t starting_core_affinity; 43 }; 44 45
+10 -10
drivers/cpufreq/cpufreq_conservative.c
··· 333 { 334 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 335 unsigned int tmp_idle_ticks, total_idle_ticks; 336 - unsigned int freq_step; 337 unsigned int freq_down_sampling_rate; 338 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 339 struct cpufreq_policy *policy; ··· 383 if (this_dbs_info->requested_freq == policy->max) 384 return; 385 386 - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 387 388 /* max freq cannot be less than 100. But who knows.... */ 389 - if (unlikely(freq_step == 0)) 390 - freq_step = 5; 391 392 - this_dbs_info->requested_freq += freq_step; 393 if (this_dbs_info->requested_freq > policy->max) 394 this_dbs_info->requested_freq = policy->max; 395 ··· 425 /* 426 * if we are already at the lowest speed then break out early 427 * or if we 'cannot' reduce the speed as the user might want 428 - * freq_step to be zero 429 */ 430 if (this_dbs_info->requested_freq == policy->min 431 || dbs_tuners_ins.freq_step == 0) 432 return; 433 434 - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 435 436 /* max freq cannot be less than 100. But who knows.... */ 437 - if (unlikely(freq_step == 0)) 438 - freq_step = 5; 439 440 - this_dbs_info->requested_freq -= freq_step; 441 if (this_dbs_info->requested_freq < policy->min) 442 this_dbs_info->requested_freq = policy->min; 443
··· 333 { 334 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 335 unsigned int tmp_idle_ticks, total_idle_ticks; 336 + unsigned int freq_target; 337 unsigned int freq_down_sampling_rate; 338 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 339 struct cpufreq_policy *policy; ··· 383 if (this_dbs_info->requested_freq == policy->max) 384 return; 385 386 + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 387 388 /* max freq cannot be less than 100. But who knows.... */ 389 + if (unlikely(freq_target == 0)) 390 + freq_target = 5; 391 392 + this_dbs_info->requested_freq += freq_target; 393 if (this_dbs_info->requested_freq > policy->max) 394 this_dbs_info->requested_freq = policy->max; 395 ··· 425 /* 426 * if we are already at the lowest speed then break out early 427 * or if we 'cannot' reduce the speed as the user might want 428 + * freq_target to be zero 429 */ 430 if (this_dbs_info->requested_freq == policy->min 431 || dbs_tuners_ins.freq_step == 0) 432 return; 433 434 + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 435 436 /* max freq cannot be less than 100. But who knows.... */ 437 + if (unlikely(freq_target == 0)) 438 + freq_target = 5; 439 440 + this_dbs_info->requested_freq -= freq_target; 441 if (this_dbs_info->requested_freq < policy->min) 442 this_dbs_info->requested_freq = policy->min; 443