Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back cpufreq material for 3.19-rc1.

+428 -17
+23 -10
Documentation/cpu-freq/intel-pstate.txt
··· 1 1 Intel P-state driver 2 2 -------------------- 3 3 4 - This driver implements a scaling driver with an internal governor for 5 - Intel Core processors. The driver follows the same model as the 6 - Transmeta scaling driver (longrun.c) and implements the setpolicy() 7 - instead of target(). Scaling drivers that implement setpolicy() are 8 - assumed to implement internal governors by the cpufreq core. All the 9 - logic for selecting the current P state is contained within the 10 - driver; no external governor is used by the cpufreq core. 4 + This driver provides an interface to control the P state selection for 5 + SandyBridge+ Intel processors. The driver can operate two different 6 + modes based on the processor model legacy and Hardware P state (HWP) 7 + mode. 11 8 12 - Intel SandyBridge+ processors are supported. 9 + In legacy mode the driver implements a scaling driver with an internal 10 + governor for Intel Core processors. The driver follows the same model 11 + as the Transmeta scaling driver (longrun.c) and implements the 12 + setpolicy() instead of target(). Scaling drivers that implement 13 + setpolicy() are assumed to implement internal governors by the cpufreq 14 + core. All the logic for selecting the current P state is contained 15 + within the driver; no external governor is used by the cpufreq core. 13 16 14 - New sysfs files for controlling P state selection have been added to 17 + In HWP mode P state selection is implemented in the processor 18 + itself. The driver provides the interfaces between the cpufreq core and 19 + the processor to control P state selection based on user preferences 20 + and reporting frequency to the cpufreq core. In this mode the 21 + internal governor code is disabled. 22 + 23 + In addtion to the interfaces provided by the cpufreq core for 24 + controlling frequency the driver provides sysfs files for 25 + controlling P state selection. These files have been added to 15 26 /sys/devices/system/cpu/intel_pstate/ 16 27 17 28 max_perf_pct: limits the maximum P state that will be requested by ··· 44 33 driver selects a single P state the actual frequency the processor 45 34 will run at is selected by the processor itself. 46 35 47 - New debugfs files have also been added to /sys/kernel/debug/pstate_snb/ 36 + For legacy mode debugfs files have also been added to allow tuning of 37 + the internal governor algorythm. These files are located at 38 + /sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode. 48 39 49 40 deadband 50 41 d_gain_pct
+3
Documentation/kernel-parameters.txt
··· 1446 1446 disable 1447 1447 Do not enable intel_pstate as the default 1448 1448 scaling driver for the supported processors 1449 + no_hwp 1450 + Do not enable hardware P state control (HWP) 1451 + if available. 1449 1452 1450 1453 intremap= [X86-64, Intel-IOMMU] 1451 1454 on enable Interrupt Remapping (default)
+5
arch/x86/include/asm/cpufeature.h
··· 189 189 #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ 190 190 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 191 191 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 192 + #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 193 + #define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ 194 + #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ 195 + #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ 196 + #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ 192 197 193 198 /* Virtualization flags: Linux defined, word 8 */ 194 199 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+41
arch/x86/include/uapi/asm/msr-index.h
··· 152 152 #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 153 153 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 154 154 155 + /* Hardware P state interface */ 156 + #define MSR_PPERF 0x0000064e 157 + #define MSR_PERF_LIMIT_REASONS 0x0000064f 158 + #define MSR_PM_ENABLE 0x00000770 159 + #define MSR_HWP_CAPABILITIES 0x00000771 160 + #define MSR_HWP_REQUEST_PKG 0x00000772 161 + #define MSR_HWP_INTERRUPT 0x00000773 162 + #define MSR_HWP_REQUEST 0x00000774 163 + #define MSR_HWP_STATUS 0x00000777 164 + 165 + /* CPUID.6.EAX */ 166 + #define HWP_BASE_BIT (1<<7) 167 + #define HWP_NOTIFICATIONS_BIT (1<<8) 168 + #define HWP_ACTIVITY_WINDOW_BIT (1<<9) 169 + #define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) 170 + #define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) 171 + 172 + /* IA32_HWP_CAPABILITIES */ 173 + #define HWP_HIGHEST_PERF(x) (x & 0xff) 174 + #define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) 175 + #define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) 176 + #define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) 177 + 178 + /* IA32_HWP_REQUEST */ 179 + #define HWP_MIN_PERF(x) (x & 0xff) 180 + #define HWP_MAX_PERF(x) ((x & 0xff) << 8) 181 + #define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) 182 + #define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) 183 + #define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) 184 + #define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) 185 + 186 + /* IA32_HWP_STATUS */ 187 + #define HWP_GUARANTEED_CHANGE(x) (x & 0x1) 188 + #define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) 189 + 190 + /* IA32_HWP_INTERRUPT */ 191 + #define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) 192 + #define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) 193 + 155 194 #define MSR_AMD64_MC0_MASK 0xc0010044 156 195 157 196 #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) ··· 383 344 #define MSR_IA32_MISC_ENABLE 0x000001a0 384 345 385 346 #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 347 + 348 + #define MSR_MISC_PWR_MGMT 0x000001aa 386 349 387 350 #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 388 351 #define ENERGY_PERF_BIAS_PERFORMANCE 0
+5
arch/x86/kernel/cpu/scattered.c
··· 36 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 37 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 38 38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, 39 + { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, 40 + { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, 41 + { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, 42 + { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, 43 + { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, 39 44 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 40 45 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 41 46 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
+10 -1
drivers/cpufreq/Kconfig
··· 63 63 64 64 config CPU_FREQ_DEFAULT_GOV_POWERSAVE 65 65 bool "powersave" 66 - depends on EXPERT 67 66 select CPU_FREQ_GOV_POWERSAVE 68 67 help 69 68 Use the CPUFreq governor 'powersave' as default. This sets ··· 244 245 support software configurable cpu frequency. 245 246 246 247 Loongson2F and it's successors support this feature. 248 + 249 + For details, take a look at <file:Documentation/cpu-freq/>. 250 + 251 + If in doubt, say N. 252 + 253 + config LOONGSON1_CPUFREQ 254 + tristate "Loongson1 CPUFreq Driver" 255 + help 256 + This option adds a CPUFreq driver for loongson1 processors which 257 + support software configurable cpu frequency. 247 258 248 259 For details, take a look at <file:Documentation/cpu-freq/>. 249 260
+1
drivers/cpufreq/Makefile
··· 98 98 obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o 99 99 obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o 100 100 obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o 101 + obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o 101 102 obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o 102 103 obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o 103 104 obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
+7 -2
drivers/cpufreq/cpufreq-dt.c
··· 58 58 old_freq = clk_get_rate(cpu_clk) / 1000; 59 59 60 60 if (!IS_ERR(cpu_reg)) { 61 + unsigned long opp_freq; 62 + 61 63 rcu_read_lock(); 62 64 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); 63 65 if (IS_ERR(opp)) { ··· 69 67 return PTR_ERR(opp); 70 68 } 71 69 volt = dev_pm_opp_get_voltage(opp); 70 + opp_freq = dev_pm_opp_get_freq(opp); 72 71 rcu_read_unlock(); 73 72 tol = volt * priv->voltage_tolerance / 100; 74 73 volt_old = regulator_get_voltage(cpu_reg); 74 + dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", 75 + opp_freq / 1000, volt); 75 76 } 76 77 77 78 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", 78 - old_freq / 1000, volt_old ? volt_old / 1000 : -1, 79 + old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, 79 80 new_freq / 1000, volt ? volt / 1000 : -1); 80 81 81 82 /* scaling up? scale voltage before frequency */ ··· 94 89 ret = clk_set_rate(cpu_clk, freq_exact); 95 90 if (ret) { 96 91 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 97 - if (!IS_ERR(cpu_reg)) 92 + if (!IS_ERR(cpu_reg) && volt_old > 0) 98 93 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 99 94 return ret; 100 95 }
+4 -2
drivers/cpufreq/cpufreq.c
··· 535 535 static ssize_t store_##file_name \ 536 536 (struct cpufreq_policy *policy, const char *buf, size_t count) \ 537 537 { \ 538 - int ret; \ 538 + int ret, temp; \ 539 539 struct cpufreq_policy new_policy; \ 540 540 \ 541 541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ ··· 546 546 if (ret != 1) \ 547 547 return -EINVAL; \ 548 548 \ 549 + temp = new_policy.object; \ 549 550 ret = cpufreq_set_policy(policy, &new_policy); \ 550 - policy->user_policy.object = policy->object; \ 551 + if (!ret) \ 552 + policy->user_policy.object = temp; \ 551 553 \ 552 554 return ret ? ret : count; \ 553 555 }
+99 -2
drivers/cpufreq/intel_pstate.c
··· 137 137 138 138 static struct pstate_adjust_policy pid_params; 139 139 static struct pstate_funcs pstate_funcs; 140 + static int hwp_active; 140 141 141 142 struct perf_limits { 142 143 int no_turbo; ··· 245 244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 246 245 } 247 246 247 + #define PCT_TO_HWP(x) (x * 255 / 100) 248 + static void intel_pstate_hwp_set(void) 249 + { 250 + int min, max, cpu; 251 + u64 value, freq; 252 + 253 + get_online_cpus(); 254 + 255 + for_each_online_cpu(cpu) { 256 + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 257 + min = PCT_TO_HWP(limits.min_perf_pct); 258 + value &= ~HWP_MIN_PERF(~0L); 259 + value |= HWP_MIN_PERF(min); 260 + 261 + max = PCT_TO_HWP(limits.max_perf_pct); 262 + if (limits.no_turbo) { 263 + rdmsrl( MSR_HWP_CAPABILITIES, freq); 264 + max = HWP_GUARANTEED_PERF(freq); 265 + } 266 + 267 + value &= ~HWP_MAX_PERF(~0L); 268 + value |= HWP_MAX_PERF(max); 269 + wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 270 + } 271 + 272 + put_online_cpus(); 273 + } 274 + 248 275 /************************** debugfs begin ************************/ 249 276 static int pid_param_set(void *data, u64 val) 250 277 { ··· 308 279 struct dentry *debugfs_parent; 309 280 int i = 0; 310 281 282 + if (hwp_active) 283 + return; 311 284 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 312 285 if (IS_ERR_OR_NULL(debugfs_parent)) 313 286 return; ··· 360 329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 361 330 return -EPERM; 362 331 } 332 + 363 333 limits.no_turbo = clamp_t(int, input, 0, 1); 334 + 335 + if (hwp_active) 336 + intel_pstate_hwp_set(); 364 337 365 338 return count; 366 339 } ··· 383 348 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 384 349 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 385 350 351 + if (hwp_active) 352 + intel_pstate_hwp_set(); 386 353 return count; 387 354 } 388 355 ··· 400 363 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 401 364 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 402 365 366 + if (hwp_active) 367 + intel_pstate_hwp_set(); 403 368 return count; 404 369 } 405 370 ··· 434 395 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 435 396 BUG_ON(rc); 436 397 } 437 - 438 398 /************************** sysfs end ************************/ 399 + 400 + static void intel_pstate_hwp_enable(void) 401 + { 402 + hwp_active++; 403 + pr_info("intel_pstate HWP enabled\n"); 404 + 405 + wrmsrl( MSR_PM_ENABLE, 0x1); 406 + } 407 + 439 408 static int byt_get_min_pstate(void) 440 409 { 441 410 u64 value; ··· 695 648 cpu->prev_mperf = mperf; 696 649 } 697 650 651 + static inline void intel_hwp_set_sample_time(struct cpudata *cpu) 652 + { 653 + int delay; 654 + 655 + delay = msecs_to_jiffies(50); 656 + mod_timer_pinned(&cpu->timer, jiffies + delay); 657 + } 658 + 698 659 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 699 660 { 700 661 int delay; ··· 749 694 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 750 695 } 751 696 697 + static void intel_hwp_timer_func(unsigned long __data) 698 + { 699 + struct cpudata *cpu = (struct cpudata *) __data; 700 + 701 + intel_pstate_sample(cpu); 702 + intel_hwp_set_sample_time(cpu); 703 + } 704 + 752 705 static void intel_pstate_timer_func(unsigned long __data) 753 706 { 754 707 struct cpudata *cpu = (struct cpudata *) __data; ··· 793 730 ICPU(0x3f, core_params), 794 731 ICPU(0x45, core_params), 795 732 ICPU(0x46, core_params), 733 + ICPU(0x47, core_params), 796 734 ICPU(0x4c, byt_params), 797 735 ICPU(0x4f, core_params), 798 736 ICPU(0x56, core_params), 799 737 {} 800 738 }; 801 739 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 740 + 741 + static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 742 + ICPU(0x56, core_params), 743 + {} 744 + }; 802 745 803 746 static int intel_pstate_init_cpu(unsigned int cpunum) 804 747 { ··· 822 753 intel_pstate_get_cpu_pstates(cpu); 823 754 824 755 init_timer_deferrable(&cpu->timer); 825 - cpu->timer.function = intel_pstate_timer_func; 826 756 cpu->timer.data = (unsigned long)cpu; 827 757 cpu->timer.expires = jiffies + HZ/100; 758 + 759 + if (!hwp_active) 760 + cpu->timer.function = intel_pstate_timer_func; 761 + else 762 + cpu->timer.function = intel_hwp_timer_func; 763 + 828 764 intel_pstate_busy_pid_reset(cpu); 829 765 intel_pstate_sample(cpu); 830 766 ··· 866 792 limits.no_turbo = 0; 867 793 return 0; 868 794 } 795 + 869 796 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 870 797 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 871 798 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); ··· 875 800 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 876 801 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 877 802 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 803 + 804 + if (hwp_active) 805 + intel_pstate_hwp_set(); 878 806 879 807 return 0; 880 808 } ··· 901 823 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 902 824 903 825 del_timer_sync(&all_cpu_data[cpu_num]->timer); 826 + if (hwp_active) 827 + return; 828 + 904 829 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 905 830 } 906 831 ··· 947 866 }; 948 867 949 868 static int __initdata no_load; 869 + static int __initdata no_hwp; 950 870 951 871 static int intel_pstate_msrs_not_valid(void) 952 872 { ··· 1041 959 { 1042 960 struct acpi_table_header hdr; 1043 961 struct hw_vendor_info *v_info; 962 + const struct x86_cpu_id *id; 963 + u64 misc_pwr; 964 + 965 + id = x86_match_cpu(intel_pstate_cpu_oob_ids); 966 + if (id) { 967 + rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 968 + if ( misc_pwr & (1 << 8)) 969 + return true; 970 + } 1044 971 1045 972 if (acpi_disabled || 1046 973 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) ··· 1073 982 int cpu, rc = 0; 1074 983 const struct x86_cpu_id *id; 1075 984 struct cpu_defaults *cpu_info; 985 + struct cpuinfo_x86 *c = &boot_cpu_data; 1076 986 1077 987 if (no_load) 1078 988 return -ENODEV; ··· 1102 1010 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1103 1011 if (!all_cpu_data) 1104 1012 return -ENOMEM; 1013 + 1014 + if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) 1015 + intel_pstate_hwp_enable(); 1105 1016 1106 1017 rc = cpufreq_register_driver(&intel_pstate_driver); 1107 1018 if (rc) ··· 1136 1041 1137 1042 if (!strcmp(str, "disable")) 1138 1043 no_load = 1; 1044 + if (!strcmp(str, "no_hwp")) 1045 + no_hwp = 1; 1139 1046 return 0; 1140 1047 } 1141 1048 early_param("intel_pstate", intel_pstate_setup);
+223
drivers/cpufreq/ls1x-cpufreq.c
··· 1 + /* 2 + * CPU Frequency Scaling for Loongson 1 SoC 3 + * 4 + * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com> 5 + * 6 + * This file is licensed under the terms of the GNU General Public 7 + * License version 2. This program is licensed "as is" without any 8 + * warranty of any kind, whether express or implied. 9 + */ 10 + 11 + #include <linux/clk.h> 12 + #include <linux/clk-provider.h> 13 + #include <linux/cpu.h> 14 + #include <linux/cpufreq.h> 15 + #include <linux/delay.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/slab.h> 19 + 20 + #include <asm/mach-loongson1/cpufreq.h> 21 + #include <asm/mach-loongson1/loongson1.h> 22 + 23 + static struct { 24 + struct device *dev; 25 + struct clk *clk; /* CPU clk */ 26 + struct clk *mux_clk; /* MUX of CPU clk */ 27 + struct clk *pll_clk; /* PLL clk */ 28 + struct clk *osc_clk; /* OSC clk */ 29 + unsigned int max_freq; 30 + unsigned int min_freq; 31 + } ls1x_cpufreq; 32 + 33 + static int ls1x_cpufreq_notifier(struct notifier_block *nb, 34 + unsigned long val, void *data) 35 + { 36 + if (val == CPUFREQ_POSTCHANGE) 37 + current_cpu_data.udelay_val = loops_per_jiffy; 38 + 39 + return NOTIFY_OK; 40 + } 41 + 42 + static struct notifier_block ls1x_cpufreq_notifier_block = { 43 + .notifier_call = ls1x_cpufreq_notifier 44 + }; 45 + 46 + static int ls1x_cpufreq_target(struct cpufreq_policy *policy, 47 + unsigned int index) 48 + { 49 + unsigned int old_freq, new_freq; 50 + 51 + old_freq = policy->cur; 52 + new_freq = policy->freq_table[index].frequency; 53 + 54 + /* 55 + * The procedure of reconfiguring CPU clk is as below. 56 + * 57 + * - Reparent CPU clk to OSC clk 58 + * - Reset CPU clock (very important) 59 + * - Reconfigure CPU DIV 60 + * - Reparent CPU clk back to CPU DIV clk 61 + */ 62 + 63 + dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq); 64 + clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk); 65 + __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU, 66 + LS1X_CLK_PLL_DIV); 67 + __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU), 68 + LS1X_CLK_PLL_DIV); 69 + clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000); 70 + clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk); 71 + 72 + return 0; 73 + } 74 + 75 + static int ls1x_cpufreq_init(struct cpufreq_policy *policy) 76 + { 77 + struct cpufreq_frequency_table *freq_tbl; 78 + unsigned int pll_freq, freq; 79 + int steps, i, ret; 80 + 81 + pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000; 82 + 83 + steps = 1 << DIV_CPU_WIDTH; 84 + freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL); 85 + if (!freq_tbl) { 86 + dev_err(ls1x_cpufreq.dev, 87 + "failed to alloc cpufreq_frequency_table\n"); 88 + ret = -ENOMEM; 89 + goto out; 90 + } 91 + 92 + for (i = 0; i < (steps - 1); i++) { 93 + freq = pll_freq / (i + 1); 94 + if ((freq < ls1x_cpufreq.min_freq) || 95 + (freq > ls1x_cpufreq.max_freq)) 96 + freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID; 97 + else 98 + freq_tbl[i].frequency = freq; 99 + dev_dbg(ls1x_cpufreq.dev, 100 + "cpufreq table: index %d: frequency %d\n", i, 101 + freq_tbl[i].frequency); 102 + } 103 + freq_tbl[i].frequency = CPUFREQ_TABLE_END; 104 + 105 + policy->clk = ls1x_cpufreq.clk; 106 + ret = cpufreq_generic_init(policy, freq_tbl, 0); 107 + if (ret) 108 + kfree(freq_tbl); 109 + out: 110 + return ret; 111 + } 112 + 113 + static int ls1x_cpufreq_exit(struct cpufreq_policy *policy) 114 + { 115 + kfree(policy->freq_table); 116 + return 0; 117 + } 118 + 119 + static struct cpufreq_driver ls1x_cpufreq_driver = { 120 + .name = "cpufreq-ls1x", 121 + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 122 + .verify = cpufreq_generic_frequency_table_verify, 123 + .target_index = ls1x_cpufreq_target, 124 + .get = cpufreq_generic_get, 125 + .init = ls1x_cpufreq_init, 126 + .exit = ls1x_cpufreq_exit, 127 + .attr = cpufreq_generic_attr, 128 + }; 129 + 130 + static int ls1x_cpufreq_remove(struct platform_device *pdev) 131 + { 132 + cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block, 133 + CPUFREQ_TRANSITION_NOTIFIER); 134 + cpufreq_unregister_driver(&ls1x_cpufreq_driver); 135 + 136 + return 0; 137 + } 138 + 139 + static int ls1x_cpufreq_probe(struct platform_device *pdev) 140 + { 141 + struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data; 142 + struct clk *clk; 143 + int ret; 144 + 145 + if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) 146 + return -EINVAL; 147 + 148 + ls1x_cpufreq.dev = &pdev->dev; 149 + 150 + clk = devm_clk_get(&pdev->dev, pdata->clk_name); 151 + if (IS_ERR(clk)) { 152 + dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n", 153 + pdata->clk_name); 154 + ret = PTR_ERR(clk); 155 + goto out; 156 + } 157 + ls1x_cpufreq.clk = clk; 158 + 159 + clk = clk_get_parent(clk); 160 + if (IS_ERR(clk)) { 161 + dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n", 162 + __clk_get_name(ls1x_cpufreq.clk)); 163 + ret = PTR_ERR(clk); 164 + goto out; 165 + } 166 + ls1x_cpufreq.mux_clk = clk; 167 + 168 + clk = clk_get_parent(clk); 169 + if (IS_ERR(clk)) { 170 + dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n", 171 + __clk_get_name(ls1x_cpufreq.mux_clk)); 172 + ret = PTR_ERR(clk); 173 + goto out; 174 + } 175 + ls1x_cpufreq.pll_clk = clk; 176 + 177 + clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name); 178 + if (IS_ERR(clk)) { 179 + dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n", 180 + pdata->osc_clk_name); 181 + ret = PTR_ERR(clk); 182 + goto out; 183 + } 184 + ls1x_cpufreq.osc_clk = clk; 185 + 186 + ls1x_cpufreq.max_freq = pdata->max_freq; 187 + ls1x_cpufreq.min_freq = pdata->min_freq; 188 + 189 + ret = cpufreq_register_driver(&ls1x_cpufreq_driver); 190 + if (ret) { 191 + dev_err(ls1x_cpufreq.dev, 192 + "failed to register cpufreq driver: %d\n", ret); 193 + goto out; 194 + } 195 + 196 + ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block, 197 + CPUFREQ_TRANSITION_NOTIFIER); 198 + 199 + if (!ret) 200 + goto out; 201 + 202 + dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n", 203 + ret); 204 + 205 + cpufreq_unregister_driver(&ls1x_cpufreq_driver); 206 + out: 207 + return ret; 208 + } 209 + 210 + static struct platform_driver ls1x_cpufreq_platdrv = { 211 + .driver = { 212 + .name = "ls1x-cpufreq", 213 + .owner = THIS_MODULE, 214 + }, 215 + .probe = ls1x_cpufreq_probe, 216 + .remove = ls1x_cpufreq_remove, 217 + }; 218 + 219 + module_platform_driver(ls1x_cpufreq_platdrv); 220 + 221 + MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>"); 222 + MODULE_DESCRIPTION("Loongson 1 CPUFreq driver"); 223 + MODULE_LICENSE("GPL");
+7
drivers/cpufreq/pcc-cpufreq.c
··· 603 603 free_percpu(pcc_cpu_info); 604 604 } 605 605 606 + static const struct acpi_device_id processor_device_ids[] = { 607 + {ACPI_PROCESSOR_OBJECT_HID, }, 608 + {ACPI_PROCESSOR_DEVICE_HID, }, 609 + {}, 610 + }; 611 + MODULE_DEVICE_TABLE(acpi, processor_device_ids); 612 + 606 613 MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); 607 614 MODULE_VERSION(PCC_VERSION); 608 615 MODULE_DESCRIPTION("Processor Clocking Control interface driver");