Merge branches 'pm-cpufreq' and 'pm-cpuidle'

* pm-cpufreq:
intel_pstate: Knights Landing support
intel_pstate: remove MSR test
cpufreq: fix qoriq uniprocessor build
cpufreq: hisilicon: add acpu driver
cpufreq: powernv: Report cpu frequency throttling
cpufreq: qoriq: rename the driver
cpufreq: qoriq: Make the driver usable on all QorIQ platforms

* pm-cpuidle:
intel_idle: mark cpu id array as __initconst
intel_idle: Add support for the Airmont Core in the Cherrytrail and Braswell SOCs
intel_idle: Update support for Silvermont Core in Baytrail SOC
ARM: cpuidle: Document the code
ARM: cpuidle: Register per cpuidle device
ARM: cpuidle: Enable the ARM64 driver for both ARM32/ARM64
ARM64: cpuidle: Remove arm64 reference
ARM64: cpuidle: Rename cpu_init_idle to a common function name
ARM64: cpuidle: Replace cpu_suspend by the common ARM/ARM64 function
ARM: cpuidle: Add a cpuidle ops structure to be used for DT
ARM: cpuidle: Remove duplicate header inclusion

+545 -172
+23
arch/arm/include/asm/cpuidle.h
··· 1 1 #ifndef __ASM_ARM_CPUIDLE_H 2 2 #define __ASM_ARM_CPUIDLE_H 3 3 4 + #include <asm/proc-fns.h> 5 + 4 6 #ifdef CONFIG_CPU_IDLE 5 7 extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev, 6 8 struct cpuidle_driver *drv, int index); ··· 26 24 * by some governors 27 25 */ 28 26 #define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX) 27 + 28 + struct device_node; 29 + 30 + struct cpuidle_ops { 31 + int (*suspend)(int cpu, unsigned long arg); 32 + int (*init)(struct device_node *, int cpu); 33 + }; 34 + 35 + struct of_cpuidle_method { 36 + const char *method; 37 + struct cpuidle_ops *ops; 38 + }; 39 + 40 + #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \ 41 + static const struct of_cpuidle_method __cpuidle_method_of_table_##name \ 42 + __used __section(__cpuidle_method_of_table) \ 43 + = { .method = _method, .ops = _ops } 44 + 45 + extern int arm_cpuidle_suspend(int index); 46 + 47 + extern int arm_cpuidle_init(int cpu); 29 48 30 49 #endif
+132 -1
arch/arm/kernel/cpuidle.c
··· 10 10 */ 11 11 12 12 #include <linux/cpuidle.h> 13 - #include <asm/proc-fns.h> 13 + #include <linux/of.h> 14 + #include <linux/of_device.h> 15 + #include <asm/cpuidle.h> 14 16 17 + extern struct of_cpuidle_method __cpuidle_method_of_table[]; 18 + 19 + static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel 20 + __used __section(__cpuidle_method_of_table_end); 21 + 22 + static struct cpuidle_ops cpuidle_ops[NR_CPUS]; 23 + 24 + /** 25 + * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle() 26 + * @dev: not used 27 + * @drv: not used 28 + * @index: not used 29 + * 30 + * A trivial wrapper to allow the cpu_do_idle function to be assigned as a 31 + * cpuidle callback by matching the function signature. 32 + * 33 + * Returns the index passed as parameter 34 + */ 15 35 int arm_cpuidle_simple_enter(struct cpuidle_device *dev, 16 36 struct cpuidle_driver *drv, int index) 17 37 { 18 38 cpu_do_idle(); 19 39 20 40 return index; 41 + } 42 + 43 + /** 44 + * arm_cpuidle_suspend() - function to enter low power idle states 45 + * @index: an integer used as an identifier for the low level PM callbacks 46 + * 47 + * This function calls the underlying arch specific low level PM code as 48 + * registered at the init time. 49 + * 50 + * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the 51 + * callback otherwise. 52 + */ 53 + int arm_cpuidle_suspend(int index) 54 + { 55 + int ret = -EOPNOTSUPP; 56 + int cpu = smp_processor_id(); 57 + 58 + if (cpuidle_ops[cpu].suspend) 59 + ret = cpuidle_ops[cpu].suspend(cpu, index); 60 + 61 + return ret; 62 + } 63 + 64 + /** 65 + * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name 66 + * @method: the method name 67 + * 68 + * Search in the __cpuidle_method_of_table array the cpuidle ops matching the 69 + * method name. 70 + * 71 + * Returns a struct cpuidle_ops pointer, NULL if not found. 72 + */ 73 + static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method) 74 + { 75 + struct of_cpuidle_method *m = __cpuidle_method_of_table; 76 + 77 + for (; m->method; m++) 78 + if (!strcmp(m->method, method)) 79 + return m->ops; 80 + 81 + return NULL; 82 + } 83 + 84 + /** 85 + * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree 86 + * @dn: a pointer to a struct device node corresponding to a cpu node 87 + * @cpu: the cpu identifier 88 + * 89 + * Get the method name defined in the 'enable-method' property, retrieve the 90 + * associated cpuidle_ops and do a struct copy. This copy is needed because all 91 + * cpuidle_ops are tagged __initdata and will be unloaded after the init 92 + * process. 93 + * 94 + * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if 95 + * no cpuidle_ops is registered for the 'enable-method'. 96 + */ 97 + static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) 98 + { 99 + const char *enable_method; 100 + struct cpuidle_ops *ops; 101 + 102 + enable_method = of_get_property(dn, "enable-method", NULL); 103 + if (!enable_method) 104 + return -ENOENT; 105 + 106 + ops = arm_cpuidle_get_ops(enable_method); 107 + if (!ops) { 108 + pr_warn("%s: unsupported enable-method property: %s\n", 109 + dn->full_name, enable_method); 110 + return -EOPNOTSUPP; 111 + } 112 + 113 + cpuidle_ops[cpu] = *ops; /* structure copy */ 114 + 115 + pr_notice("cpuidle: enable-method property '%s'" 116 + " found operations\n", enable_method); 117 + 118 + return 0; 119 + } 120 + 121 + /** 122 + * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu 123 + * @cpu: the cpu to be initialized 124 + * 125 + * Initialize the cpuidle ops with the device for the cpu and then call 126 + * the cpu's idle initialization callback. This may fail if the underlying HW 127 + * is not operational. 128 + * 129 + * Returns: 130 + * 0 on success, 131 + * -ENODEV if it fails to find the cpu node in the device tree, 132 + * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu, 133 + * -ENOENT if it fails to find an 'enable-method' property, 134 + * -ENXIO if the HW reports a failure or a misconfiguration, 135 + * -ENOMEM if the HW report an memory allocation failure 136 + */ 137 + int __init arm_cpuidle_init(int cpu) 138 + { 139 + struct device_node *cpu_node = of_cpu_device_node_get(cpu); 140 + int ret; 141 + 142 + if (!cpu_node) 143 + return -ENODEV; 144 + 145 + ret = arm_cpuidle_read_ops(cpu_node, cpu); 146 + if (!ret && cpuidle_ops[cpu].init) 147 + ret = cpuidle_ops[cpu].init(cpu_node, cpu); 148 + 149 + of_node_put(cpu_node); 150 + 151 + return ret; 21 152 }
-1
arch/arm/mach-davinci/cpuidle.c
··· 17 17 #include <linux/cpuidle.h> 18 18 #include <linux/io.h> 19 19 #include <linux/export.h> 20 - #include <asm/proc-fns.h> 21 20 #include <asm/cpuidle.h> 22 21 23 22 #include <mach/cpuidle.h>
-1
arch/arm/mach-imx/cpuidle-imx6q.c
··· 9 9 #include <linux/cpuidle.h> 10 10 #include <linux/module.h> 11 11 #include <asm/cpuidle.h> 12 - #include <asm/proc-fns.h> 13 12 14 13 #include "common.h" 15 14 #include "cpuidle.h"
-1
arch/arm/mach-imx/cpuidle-imx6sl.c
··· 9 9 #include <linux/cpuidle.h> 10 10 #include <linux/module.h> 11 11 #include <asm/cpuidle.h> 12 - #include <asm/proc-fns.h> 13 12 14 13 #include "common.h" 15 14 #include "cpuidle.h"
-1
arch/arm/mach-imx/cpuidle-imx6sx.c
··· 10 10 #include <linux/cpu_pm.h> 11 11 #include <linux/module.h> 12 12 #include <asm/cpuidle.h> 13 - #include <asm/proc-fns.h> 14 13 #include <asm/suspend.h> 15 14 16 15 #include "common.h"
-1
arch/arm/mach-omap2/cpuidle44xx.c
··· 17 17 #include <linux/clockchips.h> 18 18 19 19 #include <asm/cpuidle.h> 20 - #include <asm/proc-fns.h> 21 20 22 21 #include "common.h" 23 22 #include "pm.h"
+1 -1
arch/arm/mach-s3c64xx/cpuidle.c
··· 16 16 #include <linux/export.h> 17 17 #include <linux/time.h> 18 18 19 - #include <asm/proc-fns.h> 19 + #include <asm/cpuidle.h> 20 20 21 21 #include <mach/map.h> 22 22
-1
arch/arm/mach-tegra/cpuidle-tegra20.c
··· 27 27 #include <linux/module.h> 28 28 29 29 #include <asm/cpuidle.h> 30 - #include <asm/proc-fns.h> 31 30 #include <asm/smp_plat.h> 32 31 #include <asm/suspend.h> 33 32
-1
arch/arm/mach-tegra/cpuidle-tegra30.c
··· 27 27 #include <linux/module.h> 28 28 29 29 #include <asm/cpuidle.h> 30 - #include <asm/proc-fns.h> 31 30 #include <asm/smp_plat.h> 32 31 #include <asm/suspend.h> 33 32
+1 -1
arch/arm64/configs/defconfig
··· 48 48 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 49 49 CONFIG_COMPAT=y 50 50 CONFIG_CPU_IDLE=y 51 - CONFIG_ARM64_CPUIDLE=y 51 + CONFIG_ARM_CPUIDLE=y 52 52 CONFIG_NET=y 53 53 CONFIG_PACKET=y 54 54 CONFIG_UNIX=y
+6 -3
arch/arm64/include/asm/cpuidle.h
··· 4 4 #include <asm/proc-fns.h> 5 5 6 6 #ifdef CONFIG_CPU_IDLE 7 - extern int cpu_init_idle(unsigned int cpu); 7 + extern int arm_cpuidle_init(unsigned int cpu); 8 8 extern int cpu_suspend(unsigned long arg); 9 9 #else 10 - static inline int cpu_init_idle(unsigned int cpu) 10 + static inline int arm_cpuidle_init(unsigned int cpu) 11 11 { 12 12 return -EOPNOTSUPP; 13 13 } ··· 17 17 return -EOPNOTSUPP; 18 18 } 19 19 #endif 20 - 20 + static inline int arm_cpuidle_suspend(int index) 21 + { 22 + return cpu_suspend(index); 23 + } 21 24 #endif
+1 -1
arch/arm64/kernel/cpuidle.c
··· 15 15 #include <asm/cpuidle.h> 16 16 #include <asm/cpu_ops.h> 17 17 18 - int cpu_init_idle(unsigned int cpu) 18 + int arm_cpuidle_init(unsigned int cpu) 19 19 { 20 20 int ret = -EOPNOTSUPP; 21 21 struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+8
drivers/cpufreq/Kconfig
··· 293 293 If unsure, say N. 294 294 endif 295 295 296 + config QORIQ_CPUFREQ 297 + tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" 298 + depends on OF && COMMON_CLK && (PPC_E500MC || ARM) 299 + select CLK_QORIQ 300 + help 301 + This adds the CPUFreq driver support for Freescale QorIQ SoCs 302 + which are capable of changing the CPU's frequency dynamically. 303 + 296 304 endif 297 305 endmenu
+9
drivers/cpufreq/Kconfig.arm
··· 108 108 109 109 If in doubt, say N. 110 110 111 + config ARM_HISI_ACPU_CPUFREQ 112 + tristate "Hisilicon ACPU CPUfreq driver" 113 + depends on ARCH_HISI && CPUFREQ_DT 114 + select PM_OPP 115 + help 116 + This enables the hisilicon ACPU CPUfreq driver. 117 + 118 + If in doubt, say N. 119 + 111 120 config ARM_IMX6Q_CPUFREQ 112 121 tristate "Freescale i.MX6 cpufreq support" 113 122 depends on ARCH_MXC
-9
drivers/cpufreq/Kconfig.powerpc
··· 23 23 This adds support for frequency switching on Maple 970FX 24 24 Evaluation Board and compatible boards (IBM JS2x blades). 25 25 26 - config PPC_CORENET_CPUFREQ 27 - tristate "CPU frequency scaling driver for Freescale E500MC SoCs" 28 - depends on PPC_E500MC && OF && COMMON_CLK 29 - select CLK_QORIQ 30 - help 31 - This adds the CPUFreq driver support for Freescale e500mc, 32 - e5500 and e6500 series SoCs which are capable of changing 33 - the CPU's frequency dynamically. 34 - 35 26 config CPU_FREQ_PMAC 36 27 bool "Support for Apple PowerBooks" 37 28 depends on ADB_PMU && PPC32
+2 -1
drivers/cpufreq/Makefile
··· 59 59 arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o 60 60 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o 61 61 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 62 + obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o 62 63 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 63 64 obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o 64 65 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o ··· 86 85 ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o 87 86 obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o 88 87 obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o 89 - obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o 88 + obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o 90 89 obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o 91 90 obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o 92 91 obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
+42
drivers/cpufreq/hisi-acpu-cpufreq.c
··· 1 + /* 2 + * Hisilicon Platforms Using ACPU CPUFreq Support 3 + * 4 + * Copyright (c) 2015 Hisilicon Limited. 5 + * Copyright (c) 2015 Linaro Limited. 6 + * 7 + * Leo Yan <leo.yan@linaro.org> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + * 13 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 14 + * kind, whether express or implied; without even the implied warranty 15 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + */ 18 + 19 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 + 21 + #include <linux/err.h> 22 + #include <linux/init.h> 23 + #include <linux/kernel.h> 24 + #include <linux/module.h> 25 + #include <linux/of.h> 26 + #include <linux/platform_device.h> 27 + 28 + static int __init hisi_acpu_cpufreq_driver_init(void) 29 + { 30 + struct platform_device *pdev; 31 + 32 + if (!of_machine_is_compatible("hisilicon,hi6220")) 33 + return -ENODEV; 34 + 35 + pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 36 + return PTR_ERR_OR_ZERO(pdev); 37 + } 38 + module_init(hisi_acpu_cpufreq_driver_init); 39 + 40 + MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>"); 41 + MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver"); 42 + MODULE_LICENSE("GPL v2");
+31 -14
drivers/cpufreq/intel_pstate.c
··· 614 614 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 615 615 } 616 616 617 + static int knl_get_turbo_pstate(void) 618 + { 619 + u64 value; 620 + int nont, ret; 621 + 622 + rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 623 + nont = core_get_max_pstate(); 624 + ret = (((value) >> 8) & 0xFF); 625 + if (ret <= nont) 626 + ret = nont; 627 + return ret; 628 + } 629 + 617 630 static struct cpu_defaults core_params = { 618 631 .pid_policy = { 619 632 .sample_rate_ms = 10, ··· 661 648 .set = byt_set_pstate, 662 649 .get_scaling = byt_get_scaling, 663 650 .get_vid = byt_get_vid, 651 + }, 652 + }; 653 + 654 + static struct cpu_defaults knl_params = { 655 + .pid_policy = { 656 + .sample_rate_ms = 10, 657 + .deadband = 0, 658 + .setpoint = 97, 659 + .p_gain_pct = 20, 660 + .d_gain_pct = 0, 661 + .i_gain_pct = 0, 662 + }, 663 + .funcs = { 664 + .get_max = core_get_max_pstate, 665 + .get_min = core_get_min_pstate, 666 + .get_turbo = knl_get_turbo_pstate, 667 + .set = core_set_pstate, 664 668 }, 665 669 }; 666 670 ··· 895 865 ICPU(0x4e, core_params), 896 866 ICPU(0x4f, core_params), 897 867 ICPU(0x56, core_params), 868 + ICPU(0x57, knl_params), 898 869 {} 899 870 }; 900 871 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); ··· 1055 1024 1056 1025 static int intel_pstate_msrs_not_valid(void) 1057 1026 { 1058 - /* Check that all the msr's we are using are valid. */ 1059 - u64 aperf, mperf, tmp; 1060 - 1061 - rdmsrl(MSR_IA32_APERF, aperf); 1062 - rdmsrl(MSR_IA32_MPERF, mperf); 1063 - 1064 1027 if (!pstate_funcs.get_max() || 1065 1028 !pstate_funcs.get_min() || 1066 1029 !pstate_funcs.get_turbo()) 1067 - return -ENODEV; 1068 - 1069 - rdmsrl(MSR_IA32_APERF, tmp); 1070 - if (!(tmp - aperf)) 1071 - return -ENODEV; 1072 - 1073 - rdmsrl(MSR_IA32_MPERF, tmp); 1074 - if (!(tmp - mperf)) 1075 1030 return -ENODEV; 1076 1031 1077 1032 return 0;
+46 -1
drivers/cpufreq/powernv-cpufreq.c
··· 34 34 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ 35 35 36 36 #define POWERNV_MAX_PSTATES 256 37 + #define PMSR_PSAFE_ENABLE (1UL << 30) 38 + #define PMSR_SPR_EM_DISABLE (1UL << 31) 39 + #define PMSR_MAX(x) ((x >> 32) & 0xFF) 40 + #define PMSR_LP(x) ((x >> 48) & 0xFF) 37 41 38 42 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; 39 - static bool rebooting; 43 + static bool rebooting, throttled; 40 44 41 45 /* 42 46 * Note: The set of pstates consists of contiguous integers, the ··· 298 294 return powernv_pstate_info.max - powernv_pstate_info.nominal; 299 295 } 300 296 297 + static void powernv_cpufreq_throttle_check(unsigned int cpu) 298 + { 299 + unsigned long pmsr; 300 + int pmsr_pmax, pmsr_lp; 301 + 302 + pmsr = get_pmspr(SPRN_PMSR); 303 + 304 + /* Check for Pmax Capping */ 305 + pmsr_pmax = (s8)PMSR_MAX(pmsr); 306 + if (pmsr_pmax != powernv_pstate_info.max) { 307 + throttled = true; 308 + pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax); 309 + pr_info("Max allowed Pstate is capped\n"); 310 + } 311 + 312 + /* 313 + * Check for Psafe by reading LocalPstate 314 + * or check if Psafe_mode_active is set in PMSR. 315 + */ 316 + pmsr_lp = (s8)PMSR_LP(pmsr); 317 + if ((pmsr_lp < powernv_pstate_info.min) || 318 + (pmsr & PMSR_PSAFE_ENABLE)) { 319 + throttled = true; 320 + pr_info("Pstate set to safe frequency\n"); 321 + } 322 + 323 + /* Check if SPR_EM_DISABLE is set in PMSR */ 324 + if (pmsr & PMSR_SPR_EM_DISABLE) { 325 + throttled = true; 326 + pr_info("Frequency Control disabled from OS\n"); 327 + } 328 + 329 + if (throttled) { 330 + pr_info("PMSR = %16lx\n", pmsr); 331 + pr_crit("CPU Frequency could be throttled\n"); 332 + } 333 + } 334 + 301 335 /* 302 336 * powernv_cpufreq_target_index: Sets the frequency corresponding to 303 337 * the cpufreq table entry indexed by new_index on the cpus in the ··· 348 306 349 307 if (unlikely(rebooting) && new_index != get_nominal_index()) 350 308 return 0; 309 + 310 + if (!throttled) 311 + powernv_cpufreq_throttle_check(smp_processor_id()); 351 312 352 313 freq_data.pstate_id = powernv_freqs[new_index].driver_data; 353 314
+102 -63
drivers/cpufreq/ppc-corenet-cpufreq.c drivers/cpufreq/qoriq-cpufreq.c
··· 1 1 /* 2 2 * Copyright 2013 Freescale Semiconductor, Inc. 3 3 * 4 - * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs. 4 + * CPU Frequency Scaling driver for Freescale QorIQ SoCs. 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify 7 7 * it under the terms of the GNU General Public License version 2 as ··· 20 20 #include <linux/of.h> 21 21 #include <linux/slab.h> 22 22 #include <linux/smp.h> 23 - #include <sysdev/fsl_soc.h> 24 23 24 + #if !defined(CONFIG_ARM) 25 25 #include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */ 26 + #endif 26 27 27 28 /** 28 - * struct cpu_data - per CPU data struct 29 + * struct cpu_data 29 30 * @parent: the parent node of cpu clock 30 31 * @table: frequency table 31 32 */ ··· 70 69 static u32 min_cpufreq; 71 70 static const u32 *fmask; 72 71 73 - static DEFINE_PER_CPU(struct cpu_data *, cpu_data); 74 - 75 - /* cpumask in a cluster */ 76 - static DEFINE_PER_CPU(cpumask_var_t, cpu_mask); 77 - 78 - #ifndef CONFIG_SMP 79 - static inline const struct cpumask *cpu_core_mask(int cpu) 72 + #if defined(CONFIG_ARM) 73 + static int get_cpu_physical_id(int cpu) 80 74 { 81 - return cpumask_of(0); 75 + return topology_core_id(cpu); 76 + } 77 + #else 78 + static int get_cpu_physical_id(int cpu) 79 + { 80 + return get_hard_smp_processor_id(cpu); 82 81 } 83 82 #endif 83 + 84 + static u32 get_bus_freq(void) 85 + { 86 + struct device_node *soc; 87 + u32 sysfreq; 88 + 89 + soc = of_find_node_by_type(NULL, "soc"); 90 + if (!soc) 91 + return 0; 92 + 93 + if (of_property_read_u32(soc, "bus-frequency", &sysfreq)) 94 + sysfreq = 0; 95 + 96 + of_node_put(soc); 97 + 98 + return sysfreq; 99 + } 100 + 101 + static struct device_node *cpu_to_clk_node(int cpu) 102 + { 103 + struct device_node *np, *clk_np; 104 + 105 + if (!cpu_present(cpu)) 106 + return NULL; 107 + 108 + np = of_get_cpu_node(cpu, NULL); 109 + if (!np) 110 + return NULL; 111 + 112 + clk_np = of_parse_phandle(np, "clocks", 0); 113 + if (!clk_np) 114 + return NULL; 115 + 116 + of_node_put(np); 117 + 118 + return clk_np; 119 + } 120 + 121 + /* traverse cpu nodes to get cpu mask of sharing clock wire */ 122 + static void set_affected_cpus(struct cpufreq_policy *policy) 123 + { 124 + struct device_node *np, *clk_np; 125 + struct cpumask *dstp = policy->cpus; 126 + int i; 127 + 128 + np = cpu_to_clk_node(policy->cpu); 129 + if (!np) 130 + return; 131 + 132 + for_each_present_cpu(i) { 133 + clk_np = cpu_to_clk_node(i); 134 + if (!clk_np) 135 + continue; 136 + 137 + if (clk_np == np) 138 + cpumask_set_cpu(i, dstp); 139 + 140 + of_node_put(clk_np); 141 + } 142 + of_node_put(np); 143 + } 84 144 85 145 /* reduce the duplicated frequencies in frequency table */ 86 146 static void freq_table_redup(struct cpufreq_frequency_table *freq_table, ··· 169 107 int i, j, ind; 170 108 unsigned int freq, max_freq; 171 109 struct cpufreq_frequency_table table; 110 + 172 111 for (i = 0; i < count - 1; i++) { 173 112 max_freq = freq_table[i].frequency; 174 113 ind = i; ··· 194 131 } 195 132 } 196 133 197 - static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) 134 + static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) 198 135 { 199 136 struct device_node *np; 200 137 int i, count, ret; ··· 210 147 return -ENODEV; 211 148 212 149 data = kzalloc(sizeof(*data), GFP_KERNEL); 213 - if (!data) { 214 - pr_err("%s: no memory\n", __func__); 150 + if (!data) 215 151 goto err_np; 216 - } 217 152 218 153 policy->clk = of_clk_get(np, 0); 219 154 if (IS_ERR(policy->clk)) { ··· 233 172 } 234 173 235 174 if (fmask) 236 - mask = fmask[get_hard_smp_processor_id(cpu)]; 175 + mask = fmask[get_cpu_physical_id(cpu)]; 237 176 else 238 177 mask = 0x0; 239 178 ··· 264 203 data->table = table; 265 204 266 205 /* update ->cpus if we have cluster, no harm if not */ 267 - cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); 268 - for_each_cpu(i, per_cpu(cpu_mask, cpu)) 269 - per_cpu(cpu_data, i) = data; 206 + set_affected_cpus(policy); 207 + policy->driver_data = data; 270 208 271 209 /* Minimum transition latency is 12 platform clocks */ 272 210 u64temp = 12ULL * NSEC_PER_SEC; 273 - do_div(u64temp, fsl_get_sys_freq()); 211 + do_div(u64temp, get_bus_freq()); 274 212 policy->cpuinfo.transition_latency = u64temp + 1; 275 213 276 214 of_node_put(np); ··· 281 221 err_node: 282 222 of_node_put(data->parent); 283 223 err_nomem2: 284 - per_cpu(cpu_data, cpu) = NULL; 224 + policy->driver_data = NULL; 285 225 kfree(data); 286 226 err_np: 287 227 of_node_put(np); ··· 289 229 return -ENODEV; 290 230 } 291 231 292 - static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) 232 + static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) 293 233 { 294 - struct cpu_data *data = per_cpu(cpu_data, policy->cpu); 295 - unsigned int cpu; 234 + struct cpu_data *data = policy->driver_data; 296 235 297 236 of_node_put(data->parent); 298 237 kfree(data->table); 299 238 kfree(data); 300 - 301 - for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu)) 302 - per_cpu(cpu_data, cpu) = NULL; 239 + policy->driver_data = NULL; 303 240 304 241 return 0; 305 242 } 306 243 307 - static int corenet_cpufreq_target(struct cpufreq_policy *policy, 244 + static int qoriq_cpufreq_target(struct cpufreq_policy *policy, 308 245 unsigned int index) 309 246 { 310 247 struct clk *parent; 311 - struct cpu_data *data = per_cpu(cpu_data, policy->cpu); 248 + struct cpu_data *data = policy->driver_data; 312 249 313 250 parent = of_clk_get(data->parent, data->table[index].driver_data); 314 251 return clk_set_parent(policy->clk, parent); 315 252 } 316 253 317 - static struct cpufreq_driver ppc_corenet_cpufreq_driver = { 318 - .name = "ppc_cpufreq", 254 + static struct cpufreq_driver qoriq_cpufreq_driver = { 255 + .name = "qoriq_cpufreq", 319 256 .flags = CPUFREQ_CONST_LOOPS, 320 - .init = corenet_cpufreq_cpu_init, 321 - .exit = __exit_p(corenet_cpufreq_cpu_exit), 257 + .init = qoriq_cpufreq_cpu_init, 258 + .exit = __exit_p(qoriq_cpufreq_cpu_exit), 322 259 .verify = cpufreq_generic_frequency_table_verify, 323 - .target_index = corenet_cpufreq_target, 260 + .target_index = qoriq_cpufreq_target, 324 261 .get = cpufreq_generic_get, 325 262 .attr = cpufreq_generic_attr, 326 263 }; 327 264 328 - static const struct of_device_id node_matches[] __initdata = { 265 + static const struct of_device_id node_matches[] __initconst = { 329 266 { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], }, 330 267 { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], }, 331 268 { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], }, ··· 332 275 {} 333 276 }; 334 277 335 - static int __init ppc_corenet_cpufreq_init(void) 278 + static int __init qoriq_cpufreq_init(void) 336 279 { 337 280 int ret; 338 281 struct device_node *np; 339 282 const struct of_device_id *match; 340 283 const struct soc_data *data; 341 - unsigned int cpu; 342 284 343 285 np = of_find_matching_node(NULL, node_matches); 344 286 if (!np) 345 287 return -ENODEV; 346 - 347 - for_each_possible_cpu(cpu) { 348 - if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL)) 349 - goto err_mask; 350 - cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu)); 351 - } 352 288 353 289 match = of_match_node(node_matches, np); 354 290 data = match->data; 355 291 if (data) { 356 292 if (data->flag) 357 293 fmask = data->freq_mask; 358 - min_cpufreq = fsl_get_sys_freq(); 294 + min_cpufreq = get_bus_freq(); 359 295 } else { 360 - min_cpufreq = fsl_get_sys_freq() / 2; 296 + min_cpufreq = get_bus_freq() / 2; 361 297 } 362 298 363 299 of_node_put(np); 364 300 365 - ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver); 301 + ret = cpufreq_register_driver(&qoriq_cpufreq_driver); 366 302 if (!ret) 367 - pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n"); 303 + pr_info("Freescale QorIQ CPU frequency scaling driver\n"); 368 304 369 305 return ret; 370 - 371 - err_mask: 372 - for_each_possible_cpu(cpu) 373 - free_cpumask_var(per_cpu(cpu_mask, cpu)); 374 - 375 - return -ENOMEM; 376 306 } 377 - module_init(ppc_corenet_cpufreq_init); 307 + module_init(qoriq_cpufreq_init); 378 308 379 - static void __exit ppc_corenet_cpufreq_exit(void) 309 + static void __exit qoriq_cpufreq_exit(void) 380 310 { 381 - unsigned int cpu; 382 - 383 - for_each_possible_cpu(cpu) 384 - free_cpumask_var(per_cpu(cpu_mask, cpu)); 385 - 386 - cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver); 311 + cpufreq_unregister_driver(&qoriq_cpufreq_driver); 387 312 } 388 - module_exit(ppc_corenet_cpufreq_exit); 313 + module_exit(qoriq_cpufreq_exit); 389 314 390 315 MODULE_LICENSE("GPL"); 391 316 MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>"); 392 - MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs"); 317 + MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
+1 -6
drivers/cpuidle/Kconfig
··· 29 29 bool 30 30 31 31 menu "ARM CPU Idle Drivers" 32 - depends on ARM 32 + depends on ARM || ARM64 33 33 source "drivers/cpuidle/Kconfig.arm" 34 - endmenu 35 - 36 - menu "ARM64 CPU Idle Drivers" 37 - depends on ARM64 38 - source "drivers/cpuidle/Kconfig.arm64" 39 34 endmenu 40 35 41 36 menu "MIPS CPU Idle Drivers"
+19 -9
drivers/cpuidle/Kconfig.arm
··· 1 1 # 2 2 # ARM CPU Idle drivers 3 3 # 4 + config ARM_CPUIDLE 5 + bool "Generic ARM/ARM64 CPU idle Driver" 6 + select DT_IDLE_STATES 7 + help 8 + Select this to enable generic cpuidle driver for ARM. 9 + It provides a generic idle driver whose idle states are configured 10 + at run-time through DT nodes. The CPUidle suspend backend is 11 + initialized by calling the CPU operations init idle hook 12 + provided by architecture code. 13 + 4 14 config ARM_BIG_LITTLE_CPUIDLE 5 15 bool "Support for ARM big.LITTLE processors" 6 16 depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS 7 - depends on MCPM 17 + depends on MCPM && !ARM64 8 18 select ARM_CPU_SUSPEND 9 19 select CPU_IDLE_MULTIPLE_DRIVERS 10 20 select DT_IDLE_STATES ··· 26 16 27 17 config ARM_CLPS711X_CPUIDLE 28 18 bool "CPU Idle Driver for CLPS711X processors" 29 - depends on ARCH_CLPS711X || COMPILE_TEST 19 + depends on ARCH_CLPS711X && !ARM64 || COMPILE_TEST 30 20 help 31 21 Select this to enable cpuidle on Cirrus Logic CLPS711X SOCs. 32 22 33 23 config ARM_HIGHBANK_CPUIDLE 34 24 bool "CPU Idle Driver for Calxeda processors" 35 - depends on ARM_PSCI 25 + depends on ARM_PSCI && !ARM64 36 26 select ARM_CPU_SUSPEND 37 27 help 38 28 Select this to enable cpuidle on Calxeda processors. 39 29 40 30 config ARM_KIRKWOOD_CPUIDLE 41 31 bool "CPU Idle Driver for Marvell Kirkwood SoCs" 42 - depends on MACH_KIRKWOOD 32 + depends on MACH_KIRKWOOD && !ARM64 43 33 help 44 34 This adds the CPU Idle driver for Marvell Kirkwood SoCs. 45 35 46 36 config ARM_ZYNQ_CPUIDLE 47 37 bool "CPU Idle Driver for Xilinx Zynq processors" 48 - depends on ARCH_ZYNQ 38 + depends on ARCH_ZYNQ && !ARM64 49 39 help 50 40 Select this to enable cpuidle on Xilinx Zynq processors. 51 41 52 42 config ARM_U8500_CPUIDLE 53 43 bool "Cpu Idle Driver for the ST-E u8500 processors" 54 - depends on ARCH_U8500 44 + depends on ARCH_U8500 && !ARM64 55 45 help 56 46 Select this to enable cpuidle for ST-E u8500 processors 57 47 58 48 config ARM_AT91_CPUIDLE 59 49 bool "Cpu Idle Driver for the AT91 processors" 60 50 default y 61 - depends on ARCH_AT91 51 + depends on ARCH_AT91 && !ARM64 62 52 help 63 53 Select this to enable cpuidle for AT91 processors 64 54 65 55 config ARM_EXYNOS_CPUIDLE 66 56 bool "Cpu Idle Driver for the Exynos processors" 67 - depends on ARCH_EXYNOS 57 + depends on ARCH_EXYNOS && !ARM64 68 58 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP 69 59 help 70 60 Select this to enable cpuidle for Exynos processors 71 61 72 62 config ARM_MVEBU_V7_CPUIDLE 73 63 bool "CPU Idle Driver for mvebu v7 family processors" 74 - depends on ARCH_MVEBU 64 + depends on ARCH_MVEBU && !ARM64 75 65 help 76 66 Select this to enable cpuidle on Armada 370, 38x and XP processors.
-13
drivers/cpuidle/Kconfig.arm64
··· 1 - # 2 - # ARM64 CPU Idle drivers 3 - # 4 - 5 - config ARM64_CPUIDLE 6 - bool "Generic ARM64 CPU idle Driver" 7 - select DT_IDLE_STATES 8 - help 9 - Select this to enable generic cpuidle driver for ARM64. 10 - It provides a generic idle driver whose idle states are configured 11 - at run-time through DT nodes. The CPUidle suspend backend is 12 - initialized by calling the CPU operations init idle hook 13 - provided by architecture code.
+1 -4
drivers/cpuidle/Makefile
··· 17 17 obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o 18 18 obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o 19 19 obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o 20 + obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o 20 21 21 22 ############################################################################### 22 23 # MIPS drivers 23 24 obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o 24 - 25 - ############################################################################### 26 - # ARM64 drivers 27 - obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o 28 25 29 26 ############################################################################### 30 27 # POWERPC drivers
+62 -21
drivers/cpuidle/cpuidle-arm64.c drivers/cpuidle/cpuidle-arm.c
··· 1 1 /* 2 - * ARM64 generic CPU idle driver. 2 + * ARM/ARM64 generic CPU idle driver. 3 3 * 4 4 * Copyright (C) 2014 ARM Ltd. 5 5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> ··· 9 9 * published by the Free Software Foundation. 10 10 */ 11 11 12 - #define pr_fmt(fmt) "CPUidle arm64: " fmt 12 + #define pr_fmt(fmt) "CPUidle arm: " fmt 13 13 14 14 #include <linux/cpuidle.h> 15 15 #include <linux/cpumask.h> ··· 17 17 #include <linux/kernel.h> 18 18 #include <linux/module.h> 19 19 #include <linux/of.h> 20 + #include <linux/slab.h> 20 21 21 22 #include <asm/cpuidle.h> 22 23 23 24 #include "dt_idle_states.h" 24 25 25 26 /* 26 - * arm64_enter_idle_state - Programs CPU to enter the specified state 27 + * arm_enter_idle_state - Programs CPU to enter the specified state 27 28 * 28 29 * dev: cpuidle device 29 30 * drv: cpuidle driver ··· 33 32 * Called from the CPUidle framework to program the device to the 34 33 * specified target state selected by the governor. 35 34 */ 36 - static int arm64_enter_idle_state(struct cpuidle_device *dev, 37 - struct cpuidle_driver *drv, int idx) 35 + static int arm_enter_idle_state(struct cpuidle_device *dev, 36 + struct cpuidle_driver *drv, int idx) 38 37 { 39 38 int ret; 40 39 ··· 50 49 * call the CPU ops suspend protocol with idle index as a 51 50 * parameter. 52 51 */ 53 - ret = cpu_suspend(idx); 52 + arm_cpuidle_suspend(idx); 54 53 55 54 cpu_pm_exit(); 56 55 } ··· 58 57 return ret ? -1 : idx; 59 58 } 60 59 61 - static struct cpuidle_driver arm64_idle_driver = { 62 - .name = "arm64_idle", 60 + static struct cpuidle_driver arm_idle_driver = { 61 + .name = "arm_idle", 63 62 .owner = THIS_MODULE, 64 63 /* 65 64 * State at index 0 is standby wfi and considered standard ··· 69 68 * handler for idle state index 0. 70 69 */ 71 70 .states[0] = { 72 - .enter = arm64_enter_idle_state, 71 + .enter = arm_enter_idle_state, 73 72 .exit_latency = 1, 74 73 .target_residency = 1, 75 74 .power_usage = UINT_MAX, 76 75 .name = "WFI", 77 - .desc = "ARM64 WFI", 76 + .desc = "ARM WFI", 78 77 } 79 78 }; 80 79 81 - static const struct of_device_id arm64_idle_state_match[] __initconst = { 80 + static const struct of_device_id arm_idle_state_match[] __initconst = { 82 81 { .compatible = "arm,idle-state", 83 - .data = arm64_enter_idle_state }, 82 + .data = arm_enter_idle_state }, 84 83 { }, 85 84 }; 86 85 87 86 /* 88 - * arm64_idle_init 87 + * arm_idle_init 89 88 * 90 - * Registers the arm64 specific cpuidle driver with the cpuidle 89 + * Registers the arm specific cpuidle driver with the cpuidle 91 90 * framework. It relies on core code to parse the idle states 92 91 * and initialize them using driver data structures accordingly. 93 92 */ 94 - static int __init arm64_idle_init(void) 93 + static int __init arm_idle_init(void) 95 94 { 96 95 int cpu, ret; 97 - struct cpuidle_driver *drv = &arm64_idle_driver; 96 + struct cpuidle_driver *drv = &arm_idle_driver; 97 + struct cpuidle_device *dev; 98 98 99 99 /* 100 100 * Initialize idle states data, starting at index 1. ··· 103 101 * let the driver initialization fail accordingly since there is no 104 102 * reason to initialize the idle driver if only wfi is supported. 105 103 */ 106 - ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1); 104 + ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); 107 105 if (ret <= 0) 108 106 return ret ? : -ENODEV; 107 + 108 + ret = cpuidle_register_driver(drv); 109 + if (ret) { 110 + pr_err("Failed to register cpuidle driver\n"); 111 + return ret; 112 + } 109 113 110 114 /* 111 115 * Call arch CPU operations in order to initialize 112 116 * idle states suspend back-end specific data 113 117 */ 114 118 for_each_possible_cpu(cpu) { 115 - ret = cpu_init_idle(cpu); 119 + ret = arm_cpuidle_init(cpu); 120 + 121 + /* 122 + * Skip the cpuidle device initialization if the reported 123 + * failure is a HW misconfiguration/breakage (-ENXIO). 124 + */ 125 + if (ret == -ENXIO) 126 + continue; 127 + 116 128 if (ret) { 117 129 pr_err("CPU %d failed to init idle CPU ops\n", cpu); 118 - return ret; 130 + goto out_fail; 131 + } 132 + 133 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 134 + if (!dev) { 135 + pr_err("Failed to allocate cpuidle device\n"); 136 + goto out_fail; 137 + } 138 + dev->cpu = cpu; 139 + 140 + ret = cpuidle_register_device(dev); 141 + if (ret) { 142 + pr_err("Failed to register cpuidle device for CPU %d\n", 143 + cpu); 144 + kfree(dev); 145 + goto out_fail; 119 146 } 120 147 } 121 148 122 - return cpuidle_register(drv, NULL); 149 + return 0; 150 + out_fail: 151 + while (--cpu >= 0) { 152 + dev = per_cpu(cpuidle_devices, cpu); 153 + cpuidle_unregister_device(dev); 154 + kfree(dev); 155 + } 156 + 157 + cpuidle_unregister_driver(drv); 158 + 159 + return ret; 123 160 } 124 - device_initcall(arm64_idle_init); 161 + device_initcall(arm_idle_init);
-1
drivers/cpuidle/cpuidle-at91.c
··· 19 19 #include <linux/cpuidle.h> 20 20 #include <linux/io.h> 21 21 #include <linux/export.h> 22 - #include <asm/proc-fns.h> 23 22 #include <asm/cpuidle.h> 24 23 25 24 #define AT91_MAX_STATES 2
-1
drivers/cpuidle/cpuidle-exynos.c
··· 19 19 #include <linux/of.h> 20 20 #include <linux/platform_data/cpuidle-exynos.h> 21 21 22 - #include <asm/proc-fns.h> 23 22 #include <asm/suspend.h> 24 23 #include <asm/cpuidle.h> 25 24
-1
drivers/cpuidle/cpuidle-kirkwood.c
··· 21 21 #include <linux/cpuidle.h> 22 22 #include <linux/io.h> 23 23 #include <linux/export.h> 24 - #include <asm/proc-fns.h> 25 24 #include <asm/cpuidle.h> 26 25 27 26 #define KIRKWOOD_MAX_STATES 2
-1
drivers/cpuidle/cpuidle-ux500.c
··· 19 19 #include <linux/platform_device.h> 20 20 21 21 #include <asm/cpuidle.h> 22 - #include <asm/proc-fns.h> 23 22 24 23 static atomic_t master = ATOMIC_INIT(0); 25 24 static DEFINE_SPINLOCK(master_lock);
-1
drivers/cpuidle/cpuidle-zynq.c
··· 28 28 #include <linux/init.h> 29 29 #include <linux/cpuidle.h> 30 30 #include <linux/platform_device.h> 31 - #include <asm/proc-fns.h> 32 31 #include <asm/cpuidle.h> 33 32 34 33 #define ZYNQ_MAX_STATES 2
+56 -12
drivers/idle/intel_idle.c
··· 218 218 .enter = &intel_idle, 219 219 .enter_freeze = intel_idle_freeze, }, 220 220 { 221 - .name = "C1E-BYT", 222 - .desc = "MWAIT 0x01", 223 - .flags = MWAIT2flg(0x01), 224 - .exit_latency = 15, 225 - .target_residency = 30, 226 - .enter = &intel_idle, 227 - .enter_freeze = intel_idle_freeze, }, 228 - { 229 221 .name = "C6N-BYT", 230 222 .desc = "MWAIT 0x58", 231 223 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, 232 - .exit_latency = 40, 224 + .exit_latency = 300, 233 225 .target_residency = 275, 234 226 .enter = &intel_idle, 235 227 .enter_freeze = intel_idle_freeze, }, ··· 229 237 .name = "C6S-BYT", 230 238 .desc = "MWAIT 0x52", 231 239 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 232 - .exit_latency = 140, 240 + .exit_latency = 500, 233 241 .target_residency = 560, 234 242 .enter = &intel_idle, 235 243 .enter_freeze = intel_idle_freeze, }, ··· 238 246 .desc = "MWAIT 0x60", 239 247 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 240 248 .exit_latency = 1200, 241 - .target_residency = 1500, 249 + .target_residency = 4000, 242 250 .enter = &intel_idle, 243 251 .enter_freeze = intel_idle_freeze, }, 244 252 { 245 253 .name = "C7S-BYT", 254 + .desc = "MWAIT 0x64", 255 + .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 256 + .exit_latency = 10000, 257 + .target_residency = 20000, 258 + .enter = &intel_idle, 259 + .enter_freeze = intel_idle_freeze, }, 260 + { 261 + .enter = NULL } 262 + }; 263 + 264 + static struct cpuidle_state cht_cstates[] = { 265 + { 266 + .name = "C1-CHT", 267 + .desc = "MWAIT 0x00", 268 + .flags = MWAIT2flg(0x00), 269 + .exit_latency = 1, 270 + .target_residency = 1, 271 + .enter = &intel_idle, 272 + .enter_freeze = intel_idle_freeze, }, 273 + { 274 + .name = "C6N-CHT", 275 + .desc = "MWAIT 0x58", 276 + .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, 277 + .exit_latency = 80, 278 + .target_residency = 275, 279 + .enter = &intel_idle, 280 + .enter_freeze = intel_idle_freeze, }, 281 + { 282 + .name = "C6S-CHT", 283 + .desc = "MWAIT 0x52", 284 + .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 285 + .exit_latency = 200, 286 + .target_residency = 560, 287 + .enter = &intel_idle, 288 + .enter_freeze = intel_idle_freeze, }, 289 + { 290 + .name = "C7-CHT", 291 + .desc = "MWAIT 0x60", 292 + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 293 + .exit_latency = 1200, 294 + .target_residency = 4000, 295 + .enter = &intel_idle, 296 + .enter_freeze = intel_idle_freeze, }, 297 + { 298 + .name = "C7S-CHT", 246 299 .desc = "MWAIT 0x64", 247 300 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 248 301 .exit_latency = 10000, ··· 785 748 .byt_auto_demotion_disable_flag = true, 786 749 }; 787 750 751 + static const struct idle_cpu idle_cpu_cht = { 752 + .state_table = cht_cstates, 753 + .disable_promotion_to_c1e = true, 754 + .byt_auto_demotion_disable_flag = true, 755 + }; 756 + 788 757 static const struct idle_cpu idle_cpu_ivb = { 789 758 .state_table = ivb_cstates, 790 759 .disable_promotion_to_c1e = true, ··· 819 776 #define ICPU(model, cpu) \ 820 777 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } 821 778 822 - static const struct x86_cpu_id intel_idle_ids[] = { 779 + static const struct x86_cpu_id intel_idle_ids[] __initconst = { 823 780 ICPU(0x1a, idle_cpu_nehalem), 824 781 ICPU(0x1e, idle_cpu_nehalem), 825 782 ICPU(0x1f, idle_cpu_nehalem), ··· 833 790 ICPU(0x2d, idle_cpu_snb), 834 791 ICPU(0x36, idle_cpu_atom), 835 792 ICPU(0x37, idle_cpu_byt), 793 + ICPU(0x4c, idle_cpu_cht), 836 794 ICPU(0x3a, idle_cpu_ivb), 837 795 ICPU(0x3e, idle_cpu_ivt), 838 796 ICPU(0x3c, idle_cpu_hsw),
+2
include/asm-generic/vmlinux.lds.h
··· 167 167 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 168 168 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 169 169 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 170 + #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 170 171 #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) 171 172 172 173 #define KERNEL_DTB() \ ··· 502 501 CLKSRC_OF_TABLES() \ 503 502 IOMMU_OF_TABLES() \ 504 503 CPU_METHOD_OF_TABLES() \ 504 + CPUIDLE_METHOD_OF_TABLES() \ 505 505 KERNEL_DTB() \ 506 506 IRQCHIP_OF_MATCH_TABLE() \ 507 507 EARLYCON_OF_TABLES()