Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back earlier cpufreq material for v4.7.

+1940 -789
+5
Documentation/kernel-parameters.txt
··· 1661 1661 hwp_only 1662 1662 Only load intel_pstate on systems which support 1663 1663 hardware P state control (HWP) if available. 1664 + support_acpi_ppc 1665 + Enforce ACPI _PPC performance limits. If the Fixed ACPI 1666 + Description Table, specifies preferred power management 1667 + profile as "Enterprise Server" or "Performance Server", 1668 + then this feature is turned on by default. 1664 1669 1665 1670 intremap= [X86-64, Intel-IOMMU] 1666 1671 on enable Interrupt Remapping (default)
-6
arch/arm/mach-berlin/berlin.c
··· 18 18 #include <asm/hardware/cache-l2x0.h> 19 19 #include <asm/mach/arch.h> 20 20 21 - static void __init berlin_init_late(void) 22 - { 23 - platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 24 - } 25 - 26 21 static const char * const berlin_dt_compat[] = { 27 22 "marvell,berlin", 28 23 NULL, ··· 25 30 26 31 DT_MACHINE_START(BERLIN_DT, "Marvell Berlin") 27 32 .dt_compat = berlin_dt_compat, 28 - .init_late = berlin_init_late, 29 33 /* 30 34 * with DT probing for L2CCs, berlin_init_machine can be removed. 31 35 * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
-29
arch/arm/mach-exynos/exynos.c
··· 213 213 exynos_map_pmu(); 214 214 } 215 215 216 - static const struct of_device_id exynos_cpufreq_matches[] = { 217 - { .compatible = "samsung,exynos3250", .data = "cpufreq-dt" }, 218 - { .compatible = "samsung,exynos4210", .data = "cpufreq-dt" }, 219 - { .compatible = "samsung,exynos4212", .data = "cpufreq-dt" }, 220 - { .compatible = "samsung,exynos4412", .data = "cpufreq-dt" }, 221 - { .compatible = "samsung,exynos5250", .data = "cpufreq-dt" }, 222 - #ifndef CONFIG_BL_SWITCHER 223 - { .compatible = "samsung,exynos5420", .data = "cpufreq-dt" }, 224 - { .compatible = "samsung,exynos5800", .data = "cpufreq-dt" }, 225 - #endif 226 - { /* sentinel */ } 227 - }; 228 - 229 - static void __init exynos_cpufreq_init(void) 230 - { 231 - struct device_node *root = of_find_node_by_path("/"); 232 - const struct of_device_id *match; 233 - 234 - match = of_match_node(exynos_cpufreq_matches, root); 235 - if (!match) { 236 - platform_device_register_simple("exynos-cpufreq", -1, NULL, 0); 237 - return; 238 - } 239 - 240 - platform_device_register_simple(match->data, -1, NULL, 0); 241 - } 242 - 243 216 static void __init exynos_dt_machine_init(void) 244 217 { 245 218 /* ··· 234 261 of_machine_is_compatible("samsung,exynos3250") || 235 262 of_machine_is_compatible("samsung,exynos5250")) 236 263 platform_device_register(&exynos_cpuidle); 237 - 238 - exynos_cpufreq_init(); 239 264 240 265 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 241 266 }
-10
arch/arm/mach-imx/imx27-dt.c
··· 18 18 #include "common.h" 19 19 #include "mx27.h" 20 20 21 - static void __init imx27_dt_init(void) 22 - { 23 - struct platform_device_info devinfo = { .name = "cpufreq-dt", }; 24 - 25 - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 26 - 27 - platform_device_register_full(&devinfo); 28 - } 29 - 30 21 static const char * const imx27_dt_board_compat[] __initconst = { 31 22 "fsl,imx27", 32 23 NULL ··· 27 36 .map_io = mx27_map_io, 28 37 .init_early = imx27_init_early, 29 38 .init_irq = mx27_init_irq, 30 - .init_machine = imx27_dt_init, 31 39 .dt_compat = imx27_dt_board_compat, 32 40 MACHINE_END
-3
arch/arm/mach-imx/mach-imx51.c
··· 50 50 51 51 static void __init imx51_dt_init(void) 52 52 { 53 - struct platform_device_info devinfo = { .name = "cpufreq-dt", }; 54 - 55 53 imx51_ipu_mipi_setup(); 56 54 imx_src_init(); 57 55 58 56 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 59 - platform_device_register_full(&devinfo); 60 57 } 61 58 62 59 static void __init imx51_init_late(void)
-2
arch/arm/mach-imx/mach-imx53.c
··· 40 40 static void __init imx53_init_late(void) 41 41 { 42 42 imx53_pm_init(); 43 - 44 - platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 45 43 } 46 44 47 45 static const char * const imx53_dt_board_compat[] __initconst = {
-6
arch/arm/mach-imx/mach-imx7d.c
··· 105 105 irqchip_init(); 106 106 } 107 107 108 - static void __init imx7d_init_late(void) 109 - { 110 - platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 111 - } 112 - 113 108 static const char *const imx7d_dt_compat[] __initconst = { 114 109 "fsl,imx7d", 115 110 NULL, ··· 112 117 113 118 DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)") 114 119 .init_irq = imx7d_init_irq, 115 - .init_late = imx7d_init_late, 116 120 .init_machine = imx7d_init_machine, 117 121 .dt_compat = imx7d_dt_compat, 118 122 MACHINE_END
+2 -5
arch/arm/mach-omap2/pm.c
··· 277 277 278 278 static inline void omap_init_cpufreq(void) 279 279 { 280 - struct platform_device_info devinfo = { }; 280 + struct platform_device_info devinfo = { .name = "omap-cpufreq" }; 281 281 282 282 if (!of_have_populated_dt()) 283 - devinfo.name = "omap-cpufreq"; 284 - else 285 - devinfo.name = "cpufreq-dt"; 286 - platform_device_register_full(&devinfo); 283 + platform_device_register_full(&devinfo); 287 284 } 288 285 289 286 static int __init omap2_common_pm_init(void)
-1
arch/arm/mach-rockchip/rockchip.c
··· 74 74 { 75 75 rockchip_suspend_init(); 76 76 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 77 - platform_device_register_simple("cpufreq-dt", 0, NULL, 0); 78 77 } 79 78 80 79 static const char * const rockchip_board_dt_compat[] = {
-1
arch/arm/mach-shmobile/Makefile
··· 38 38 39 39 # PM objects 40 40 obj-$(CONFIG_SUSPEND) += suspend.o 41 - obj-$(CONFIG_CPU_FREQ) += cpufreq.o 42 41 obj-$(CONFIG_PM_RCAR) += pm-rcar.o 43 42 obj-$(CONFIG_PM_RMOBILE) += pm-rmobile.o 44 43 obj-$(CONFIG_ARCH_RCAR_GEN2) += pm-rcar-gen2.o
-7
arch/arm/mach-shmobile/common.h
··· 25 25 static inline void shmobile_smp_apmu_suspend_init(void) { } 26 26 #endif 27 27 28 - #ifdef CONFIG_CPU_FREQ 29 - int shmobile_cpufreq_init(void); 30 - #else 31 - static inline int shmobile_cpufreq_init(void) { return 0; } 32 - #endif 33 - 34 28 static inline void __init shmobile_init_late(void) 35 29 { 36 30 shmobile_suspend_init(); 37 - shmobile_cpufreq_init(); 38 31 } 39 32 40 33 #endif /* __ARCH_MACH_COMMON_H */
-19
arch/arm/mach-shmobile/cpufreq.c
··· 1 - /* 2 - * CPUFreq support code for SH-Mobile ARM 3 - * 4 - * Copyright (C) 2014 Gaku Inami 5 - * 6 - * This file is subject to the terms and conditions of the GNU General Public 7 - * License. See the file "COPYING" in the main directory of this archive 8 - * for more details. 9 - */ 10 - 11 - #include <linux/platform_device.h> 12 - 13 - #include "common.h" 14 - 15 - int __init shmobile_cpufreq_init(void) 16 - { 17 - platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 18 - return 0; 19 - }
-9
arch/arm/mach-sunxi/sunxi.c
··· 17 17 18 18 #include <asm/mach/arch.h> 19 19 20 - static void __init sunxi_dt_cpufreq_init(void) 21 - { 22 - platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 23 - } 24 - 25 20 static const char * const sunxi_board_dt_compat[] = { 26 21 "allwinner,sun4i-a10", 27 22 "allwinner,sun5i-a10s", ··· 27 32 28 33 DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families") 29 34 .dt_compat = sunxi_board_dt_compat, 30 - .init_late = sunxi_dt_cpufreq_init, 31 35 MACHINE_END 32 36 33 37 static const char * const sun6i_board_dt_compat[] = { ··· 47 53 DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family") 48 54 .init_time = sun6i_timer_init, 49 55 .dt_compat = sun6i_board_dt_compat, 50 - .init_late = sunxi_dt_cpufreq_init, 51 56 MACHINE_END 52 57 53 58 static const char * const sun7i_board_dt_compat[] = { ··· 56 63 57 64 DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family") 58 65 .dt_compat = sun7i_board_dt_compat, 59 - .init_late = sunxi_dt_cpufreq_init, 60 66 MACHINE_END 61 67 62 68 static const char * const sun8i_board_dt_compat[] = { ··· 69 77 DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family") 70 78 .init_time = sun6i_timer_init, 71 79 .dt_compat = sun8i_board_dt_compat, 72 - .init_late = sunxi_dt_cpufreq_init, 73 80 MACHINE_END 74 81 75 82 static const char * const sun9i_board_dt_compat[] = {
-2
arch/arm/mach-zynq/common.c
··· 110 110 */ 111 111 static void __init zynq_init_machine(void) 112 112 { 113 - struct platform_device_info devinfo = { .name = "cpufreq-dt", }; 114 113 struct soc_device_attribute *soc_dev_attr; 115 114 struct soc_device *soc_dev; 116 115 struct device *parent = NULL; ··· 144 145 of_platform_populate(NULL, of_default_bus_match_table, NULL, parent); 145 146 146 147 platform_device_register(&zynq_cpuidle_device); 147 - platform_device_register_full(&devinfo); 148 148 } 149 149 150 150 static void __init zynq_timer_init(void)
+44
drivers/cpufreq/Kconfig
··· 18 18 19 19 if CPU_FREQ 20 20 21 + config CPU_FREQ_GOV_ATTR_SET 22 + bool 23 + 21 24 config CPU_FREQ_GOV_COMMON 25 + select CPU_FREQ_GOV_ATTR_SET 22 26 select IRQ_WORK 23 27 bool 24 28 ··· 107 103 Be aware that not all cpufreq drivers support the conservative 108 104 governor. If unsure have a look at the help section of the 109 105 driver. Fallback governor will be the performance governor. 106 + 107 + config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 108 + bool "schedutil" 109 + select CPU_FREQ_GOV_SCHEDUTIL 110 + select CPU_FREQ_GOV_PERFORMANCE 111 + help 112 + Use the 'schedutil' CPUFreq governor by default. If unsure, 113 + have a look at the help section of that governor. The fallback 114 + governor will be 'performance'. 115 + 110 116 endchoice 111 117 112 118 config CPU_FREQ_GOV_PERFORMANCE ··· 198 184 199 185 If in doubt, say N. 200 186 187 + config CPU_FREQ_GOV_SCHEDUTIL 188 + tristate "'schedutil' cpufreq policy governor" 189 + depends on CPU_FREQ 190 + select CPU_FREQ_GOV_ATTR_SET 191 + select IRQ_WORK 192 + help 193 + This governor makes decisions based on the utilization data provided 194 + by the scheduler. It sets the CPU frequency to be proportional to 195 + the utilization/capacity ratio coming from the scheduler. If the 196 + utilization is frequency-invariant, the new frequency is also 197 + proportional to the maximum available frequency. If that is not the 198 + case, it is proportional to the current frequency of the CPU. The 199 + frequency tipping point is at utilization/capacity equal to 80% in 200 + both cases. 201 + 202 + To compile this driver as a module, choose M here: the module will 203 + be called cpufreq_schedutil. 204 + 205 + If in doubt, say N. 206 + 201 207 comment "CPU frequency scaling drivers" 202 208 203 209 config CPUFREQ_DT ··· 225 191 depends on HAVE_CLK && OF 226 192 # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: 227 193 depends on !CPU_THERMAL || THERMAL 194 + select CPUFREQ_DT_PLATDEV 228 195 select PM_OPP 229 196 help 230 197 This adds a generic DT based cpufreq driver for frequency management. 231 198 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) 232 199 systems which share clock and voltage across all CPUs. 200 + 201 + If in doubt, say N. 202 + 203 + config CPUFREQ_DT_PLATDEV 204 + bool 205 + help 206 + This adds a generic DT based cpufreq platdev driver for frequency 207 + management. This creates a 'cpufreq-dt' platform device, on the 208 + supported platforms. 233 209 234 210 If in doubt, say N. 235 211
-9
drivers/cpufreq/Kconfig.arm
··· 50 50 51 51 If in doubt, say N. 52 52 53 - config ARM_HISI_ACPU_CPUFREQ 54 - tristate "Hisilicon ACPU CPUfreq driver" 55 - depends on ARCH_HISI && CPUFREQ_DT 56 - select PM_OPP 57 - help 58 - This enables the hisilicon ACPU CPUfreq driver. 59 - 60 - If in doubt, say N. 61 - 62 53 config ARM_IMX6Q_CPUFREQ 63 54 tristate "Freescale i.MX6 cpufreq support" 64 55 depends on ARCH_MXC
+1
drivers/cpufreq/Kconfig.x86
··· 5 5 config X86_INTEL_PSTATE 6 6 bool "Intel P state control" 7 7 depends on X86 8 + select ACPI_PROCESSOR if ACPI 8 9 help 9 10 This driver provides a P state for Intel core processors. 10 11 The driver implements an internal governor and will become
+2 -1
drivers/cpufreq/Makefile
··· 11 11 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o 12 12 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o 13 13 obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o 14 + obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o 14 15 15 16 obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o 17 + obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o 16 18 17 19 ################################################################################## 18 20 # x86 drivers. ··· 55 53 obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 56 54 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o 57 55 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 58 - obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o 59 56 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 60 57 obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o 61 58 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+85 -44
drivers/cpufreq/acpi-cpufreq.c
··· 25 25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 26 26 */ 27 27 28 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 + 28 30 #include <linux/kernel.h> 29 31 #include <linux/module.h> 30 32 #include <linux/init.h> ··· 52 50 MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 53 51 MODULE_LICENSE("GPL"); 54 52 55 - #define PFX "acpi-cpufreq: " 56 - 57 53 enum { 58 54 UNDEFINED_CAPABLE = 0, 59 55 SYSTEM_INTEL_MSR_CAPABLE, ··· 65 65 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) 66 66 67 67 struct acpi_cpufreq_data { 68 - struct cpufreq_frequency_table *freq_table; 69 68 unsigned int resume; 70 69 unsigned int cpu_feature; 71 70 unsigned int acpi_perf_cpu; ··· 199 200 return cpu_has(cpu, X86_FEATURE_HW_PSTATE); 200 201 } 201 202 202 - static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 203 + static unsigned extract_io(struct cpufreq_policy *policy, u32 value) 203 204 { 205 + struct acpi_cpufreq_data *data = policy->driver_data; 204 206 struct acpi_processor_performance *perf; 205 207 int i; 206 208 ··· 209 209 210 210 for (i = 0; i < perf->state_count; i++) { 211 211 if (value == perf->states[i].status) 212 - return data->freq_table[i].frequency; 212 + return policy->freq_table[i].frequency; 213 213 } 214 214 return 0; 215 215 } 216 216 217 - static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) 217 + static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) 218 218 { 219 + struct acpi_cpufreq_data *data = policy->driver_data; 219 220 struct cpufreq_frequency_table *pos; 220 221 struct acpi_processor_performance *perf; 221 222 ··· 227 226 228 227 perf = to_perf_data(data); 229 228 230 - cpufreq_for_each_entry(pos, data->freq_table) 229 + cpufreq_for_each_entry(pos, policy->freq_table) 231 230 if (msr == perf->states[pos->driver_data].status) 232 231 return pos->frequency; 233 - return data->freq_table[0].frequency; 232 + return policy->freq_table[0].frequency; 234 233 } 235 234 236 - static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) 235 + static unsigned extract_freq(struct cpufreq_policy *policy, u32 val) 237 236 { 237 + struct acpi_cpufreq_data *data = policy->driver_data; 238 + 238 239 switch (data->cpu_feature) { 239 240 case SYSTEM_INTEL_MSR_CAPABLE: 240 241 case SYSTEM_AMD_MSR_CAPABLE: 241 - return extract_msr(val, data); 242 + return extract_msr(policy, val); 242 243 case SYSTEM_IO_CAPABLE: 243 - return extract_io(val, data); 244 + return extract_io(policy, val); 244 245 default: 245 246 return 0; 246 247 } ··· 377 374 return 0; 378 375 379 376 data = policy->driver_data; 380 - if (unlikely(!data || !data->freq_table)) 377 + if (unlikely(!data || !policy->freq_table)) 381 378 return 0; 382 379 383 - cached_freq = data->freq_table[to_perf_data(data)->state].frequency; 384 - freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data); 380 + cached_freq = policy->freq_table[to_perf_data(data)->state].frequency; 381 + freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data)); 385 382 if (freq != cached_freq) { 386 383 /* 387 384 * The dreaded BIOS frequency change behind our back. ··· 395 392 return freq; 396 393 } 397 394 398 - static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, 399 - struct acpi_cpufreq_data *data) 395 + static unsigned int check_freqs(struct cpufreq_policy *policy, 396 + const struct cpumask *mask, unsigned int freq) 400 397 { 398 + struct acpi_cpufreq_data *data = policy->driver_data; 401 399 unsigned int cur_freq; 402 400 unsigned int i; 403 401 404 402 for (i = 0; i < 100; i++) { 405 - cur_freq = extract_freq(get_cur_val(mask, data), data); 403 + cur_freq = extract_freq(policy, get_cur_val(mask, data)); 406 404 if (cur_freq == freq) 407 405 return 1; 408 406 udelay(10); ··· 420 416 unsigned int next_perf_state = 0; /* Index into perf table */ 421 417 int result = 0; 422 418 423 - if (unlikely(data == NULL || data->freq_table == NULL)) { 419 + if (unlikely(!data)) { 424 420 return -ENODEV; 425 421 } 426 422 427 423 perf = to_perf_data(data); 428 - next_perf_state = data->freq_table[index].driver_data; 424 + next_perf_state = policy->freq_table[index].driver_data; 429 425 if (perf->state == next_perf_state) { 430 426 if (unlikely(data->resume)) { 431 427 pr_debug("Called after resume, resetting to P%d\n", ··· 448 444 drv_write(data, mask, perf->states[next_perf_state].control); 449 445 450 446 if (acpi_pstate_strict) { 451 - if (!check_freqs(mask, data->freq_table[index].frequency, 452 - data)) { 447 + if (!check_freqs(policy, mask, 448 + policy->freq_table[index].frequency)) { 453 449 pr_debug("acpi_cpufreq_target failed (%d)\n", 454 450 policy->cpu); 455 451 result = -EAGAIN; ··· 460 456 perf->state = next_perf_state; 461 457 462 458 return result; 459 + } 460 + 461 + unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, 462 + unsigned int target_freq) 463 + { 464 + struct acpi_cpufreq_data *data = policy->driver_data; 465 + struct acpi_processor_performance *perf; 466 + struct cpufreq_frequency_table *entry; 467 + unsigned int next_perf_state, next_freq, freq; 468 + 469 + /* 470 + * Find the closest frequency above target_freq. 471 + * 472 + * The table is sorted in the reverse order with respect to the 473 + * frequency and all of the entries are valid (see the initialization). 474 + */ 475 + entry = policy->freq_table; 476 + do { 477 + entry++; 478 + freq = entry->frequency; 479 + } while (freq >= target_freq && freq != CPUFREQ_TABLE_END); 480 + entry--; 481 + next_freq = entry->frequency; 482 + next_perf_state = entry->driver_data; 483 + 484 + perf = to_perf_data(data); 485 + if (perf->state == next_perf_state) { 486 + if (unlikely(data->resume)) 487 + data->resume = 0; 488 + else 489 + return next_freq; 490 + } 491 + 492 + data->cpu_freq_write(&perf->control_register, 493 + perf->states[next_perf_state].control); 494 + perf->state = next_perf_state; 495 + return next_freq; 463 496 } 464 497 465 498 static unsigned long ··· 652 611 if ((c->x86 == 15) && 653 612 (c->x86_model == 6) && 654 613 (c->x86_mask == 8)) { 655 - printk(KERN_INFO "acpi-cpufreq: Intel(R) " 656 - "Xeon(R) 7100 Errata AL30, processors may " 657 - "lock up on frequency changes: disabling " 658 - "acpi-cpufreq.\n"); 614 + pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); 659 615 return -ENODEV; 660 616 } 661 617 } ··· 669 631 unsigned int result = 0; 670 632 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); 671 633 struct acpi_processor_performance *perf; 634 + struct cpufreq_frequency_table *freq_table; 672 635 #ifdef CONFIG_SMP 673 636 static int blacklisted; 674 637 #endif ··· 729 690 cpumask_copy(data->freqdomain_cpus, 730 691 topology_sibling_cpumask(cpu)); 731 692 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; 732 - pr_info_once(PFX "overriding BIOS provided _PSD data\n"); 693 + pr_info_once("overriding BIOS provided _PSD data\n"); 733 694 } 734 695 #endif 735 696 ··· 781 742 goto err_unreg; 782 743 } 783 744 784 - data->freq_table = kzalloc(sizeof(*data->freq_table) * 745 + freq_table = kzalloc(sizeof(*freq_table) * 785 746 (perf->state_count+1), GFP_KERNEL); 786 - if (!data->freq_table) { 747 + if (!freq_table) { 787 748 result = -ENOMEM; 788 749 goto err_unreg; 789 750 } ··· 801 762 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 802 763 policy->cpuinfo.transition_latency > 20 * 1000) { 803 764 policy->cpuinfo.transition_latency = 20 * 1000; 804 - printk_once(KERN_INFO 805 - "P-state transition latency capped at 20 uS\n"); 765 + pr_info_once("P-state transition latency capped at 20 uS\n"); 806 766 } 807 767 808 768 /* table init */ 809 769 for (i = 0; i < perf->state_count; i++) { 810 770 if (i > 0 && perf->states[i].core_frequency >= 811 - data->freq_table[valid_states-1].frequency / 1000) 771 + freq_table[valid_states-1].frequency / 1000) 812 772 continue; 813 773 814 - data->freq_table[valid_states].driver_data = i; 815 - data->freq_table[valid_states].frequency = 774 + freq_table[valid_states].driver_data = i; 775 + freq_table[valid_states].frequency = 816 776 perf->states[i].core_frequency * 1000; 817 777 valid_states++; 818 778 } 819 - data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; 779 + freq_table[valid_states].frequency = CPUFREQ_TABLE_END; 820 780 perf->state = 0; 821 781 822 - result = cpufreq_table_validate_and_show(policy, data->freq_table); 782 + result = cpufreq_table_validate_and_show(policy, freq_table); 823 783 if (result) 824 784 goto err_freqfree; 825 785 826 786 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) 827 - printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); 787 + pr_warn(FW_WARN "P-state 0 is not max freq\n"); 828 788 829 789 switch (perf->control_register.space_id) { 830 790 case ACPI_ADR_SPACE_SYSTEM_IO: ··· 859 821 */ 860 822 data->resume = 1; 861 823 824 + policy->fast_switch_possible = !acpi_pstate_strict && 825 + !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY); 826 + 862 827 return result; 863 828 864 829 err_freqfree: 865 - kfree(data->freq_table); 830 + kfree(freq_table); 866 831 err_unreg: 867 832 acpi_processor_unregister_performance(cpu); 868 833 err_free_mask: ··· 883 842 884 843 pr_debug("acpi_cpufreq_cpu_exit\n"); 885 844 886 - if (data) { 887 - policy->driver_data = NULL; 888 - acpi_processor_unregister_performance(data->acpi_perf_cpu); 889 - free_cpumask_var(data->freqdomain_cpus); 890 - kfree(data->freq_table); 891 - kfree(data); 892 - } 845 + policy->fast_switch_possible = false; 846 + policy->driver_data = NULL; 847 + acpi_processor_unregister_performance(data->acpi_perf_cpu); 848 + free_cpumask_var(data->freqdomain_cpus); 849 + kfree(policy->freq_table); 850 + kfree(data); 893 851 894 852 return 0; 895 853 } ··· 916 876 static struct cpufreq_driver acpi_cpufreq_driver = { 917 877 .verify = cpufreq_generic_frequency_table_verify, 918 878 .target_index = acpi_cpufreq_target, 879 + .fast_switch = acpi_cpufreq_fast_switch, 919 880 .bios_limit = acpi_processor_get_bios_limit, 920 881 .init = acpi_cpufreq_cpu_init, 921 882 .exit = acpi_cpufreq_cpu_exit,
+21
drivers/cpufreq/cppc_cpufreq.c
··· 173 173 return -ENODEV; 174 174 } 175 175 176 + static void __exit cppc_cpufreq_exit(void) 177 + { 178 + struct cpudata *cpu; 179 + int i; 180 + 181 + cpufreq_unregister_driver(&cppc_cpufreq_driver); 182 + 183 + for_each_possible_cpu(i) { 184 + cpu = all_cpu_data[i]; 185 + free_cpumask_var(cpu->shared_cpu_map); 186 + kfree(cpu); 187 + } 188 + 189 + kfree(all_cpu_data); 190 + } 191 + 192 + module_exit(cppc_cpufreq_exit); 193 + MODULE_AUTHOR("Ashwin Chaugule"); 194 + MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 195 + MODULE_LICENSE("GPL"); 196 + 176 197 late_initcall(cppc_cpufreq_init);
+92
drivers/cpufreq/cpufreq-dt-platdev.c
··· 1 + /* 2 + * Copyright (C) 2016 Linaro. 3 + * Viresh Kumar <viresh.kumar@linaro.org> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #include <linux/err.h> 11 + #include <linux/of.h> 12 + #include <linux/platform_device.h> 13 + 14 + static const struct of_device_id machines[] __initconst = { 15 + { .compatible = "allwinner,sun4i-a10", }, 16 + { .compatible = "allwinner,sun5i-a10s", }, 17 + { .compatible = "allwinner,sun5i-a13", }, 18 + { .compatible = "allwinner,sun5i-r8", }, 19 + { .compatible = "allwinner,sun6i-a31", }, 20 + { .compatible = "allwinner,sun6i-a31s", }, 21 + { .compatible = "allwinner,sun7i-a20", }, 22 + { .compatible = "allwinner,sun8i-a23", }, 23 + { .compatible = "allwinner,sun8i-a33", }, 24 + { .compatible = "allwinner,sun8i-a83t", }, 25 + { .compatible = "allwinner,sun8i-h3", }, 26 + 27 + { .compatible = "hisilicon,hi6220", }, 28 + 29 + { .compatible = "fsl,imx27", }, 30 + { .compatible = "fsl,imx51", }, 31 + { .compatible = "fsl,imx53", }, 32 + { .compatible = "fsl,imx7d", }, 33 + 34 + { .compatible = "marvell,berlin", }, 35 + 36 + { .compatible = "samsung,exynos3250", }, 37 + { .compatible = "samsung,exynos4210", }, 38 + { .compatible = "samsung,exynos4212", }, 39 + { .compatible = "samsung,exynos4412", }, 40 + { .compatible = "samsung,exynos5250", }, 41 + #ifndef CONFIG_BL_SWITCHER 42 + { .compatible = "samsung,exynos5420", }, 43 + { .compatible = "samsung,exynos5800", }, 44 + #endif 45 + 46 + { .compatible = "renesas,emev2", }, 47 + { .compatible = "renesas,r7s72100", }, 48 + { .compatible = "renesas,r8a73a4", }, 49 + { .compatible = "renesas,r8a7740", }, 50 + { .compatible = "renesas,r8a7778", }, 51 + { .compatible = "renesas,r8a7779", }, 52 + { .compatible = "renesas,r8a7790", }, 53 + { .compatible = "renesas,r8a7791", }, 54 + { .compatible = "renesas,r8a7793", }, 55 + { .compatible = "renesas,r8a7794", }, 56 + { .compatible = "renesas,sh73a0", }, 57 + 58 + { .compatible = "rockchip,rk2928", }, 59 + { .compatible = "rockchip,rk3036", }, 60 + { .compatible = "rockchip,rk3066a", }, 61 + { .compatible = "rockchip,rk3066b", }, 62 + { .compatible = "rockchip,rk3188", }, 63 + { .compatible = "rockchip,rk3228", }, 64 + { .compatible = "rockchip,rk3288", }, 65 + { .compatible = "rockchip,rk3366", }, 66 + { .compatible = "rockchip,rk3368", }, 67 + { .compatible = "rockchip,rk3399", }, 68 + 69 + { .compatible = "ti,omap2", }, 70 + { .compatible = "ti,omap3", }, 71 + { .compatible = "ti,omap4", }, 72 + { .compatible = "ti,omap5", }, 73 + 74 + { .compatible = "xlnx,zynq-7000", }, 75 + }; 76 + 77 + static int __init cpufreq_dt_platdev_init(void) 78 + { 79 + struct device_node *np = of_find_node_by_path("/"); 80 + 81 + if (!np) 82 + return -ENODEV; 83 + 84 + if (!of_match_node(machines, np)) 85 + return -ENODEV; 86 + 87 + of_node_put(of_root); 88 + 89 + return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, 90 + NULL, 0)); 91 + } 92 + device_initcall(cpufreq_dt_platdev_init);
+12 -16
drivers/cpufreq/cpufreq-nforce2.c
··· 7 7 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* 8 8 */ 9 9 10 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 + 10 12 #include <linux/kernel.h> 11 13 #include <linux/module.h> 12 14 #include <linux/moduleparam.h> ··· 57 55 MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); 58 56 MODULE_PARM_DESC(min_fsb, 59 57 "Minimum FSB to use, if not defined: current FSB - 50"); 60 - 61 - #define PFX "cpufreq-nforce2: " 62 58 63 59 /** 64 60 * nforce2_calc_fsb - calculate FSB ··· 174 174 int pll = 0; 175 175 176 176 if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { 177 - printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb); 177 + pr_err("FSB %d is out of range!\n", fsb); 178 178 return -EINVAL; 179 179 } 180 180 181 181 tfsb = nforce2_fsb_read(0); 182 182 if (!tfsb) { 183 - printk(KERN_ERR PFX "Error while reading the FSB\n"); 183 + pr_err("Error while reading the FSB\n"); 184 184 return -EINVAL; 185 185 } 186 186 ··· 276 276 /* local_irq_save(flags); */ 277 277 278 278 if (nforce2_set_fsb(target_fsb) < 0) 279 - printk(KERN_ERR PFX "Changing FSB to %d failed\n", 280 - target_fsb); 279 + pr_err("Changing FSB to %d failed\n", target_fsb); 281 280 else 282 281 pr_debug("Changed FSB successfully to %d\n", 283 282 target_fsb); ··· 324 325 /* FIX: Get FID from CPU */ 325 326 if (!fid) { 326 327 if (!cpu_khz) { 327 - printk(KERN_WARNING PFX 328 - "cpu_khz not set, can't calculate multiplier!\n"); 328 + pr_warn("cpu_khz not set, can't calculate multiplier!\n"); 329 329 return -ENODEV; 330 330 } 331 331 ··· 339 341 } 340 342 } 341 343 342 - printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb, 343 - fid / 10, fid % 10); 344 + pr_info("FSB currently at %i MHz, FID %d.%d\n", 345 + fsb, fid / 10, fid % 10); 344 346 345 347 /* Set maximum FSB to FSB at boot time */ 346 348 max_fsb = nforce2_fsb_read(1); ··· 399 401 if (nforce2_dev == NULL) 400 402 return -ENODEV; 401 403 402 - printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n", 403 - nforce2_dev->revision); 404 - printk(KERN_INFO PFX 405 - "FSB changing is maybe unstable and can lead to " 406 - "crashes and data loss.\n"); 404 + pr_info("Detected nForce2 chipset revision %X\n", 405 + nforce2_dev->revision); 406 + pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n"); 407 407 408 408 return 0; 409 409 } ··· 419 423 420 424 /* detect chipset */ 421 425 if (nforce2_detect_chipset()) { 422 - printk(KERN_INFO PFX "No nForce2 chipset.\n"); 426 + pr_info("No nForce2 chipset\n"); 423 427 return -ENODEV; 424 428 } 425 429
+141 -23
drivers/cpufreq/cpufreq.c
··· 78 78 static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 79 79 static int cpufreq_start_governor(struct cpufreq_policy *policy); 80 80 81 + static inline int cpufreq_exit_governor(struct cpufreq_policy *policy) 82 + { 83 + return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 84 + } 85 + 81 86 /** 82 87 * Two notifier lists: the "policy" list is involved in the 83 88 * validation process for a new CPU frequency policy; the ··· 434 429 } 435 430 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 436 431 432 + /* 433 + * Fast frequency switching status count. Positive means "enabled", negative 434 + * means "disabled" and 0 means "not decided yet". 435 + */ 436 + static int cpufreq_fast_switch_count; 437 + static DEFINE_MUTEX(cpufreq_fast_switch_lock); 438 + 439 + static void cpufreq_list_transition_notifiers(void) 440 + { 441 + struct notifier_block *nb; 442 + 443 + pr_info("Registered transition notifiers:\n"); 444 + 445 + mutex_lock(&cpufreq_transition_notifier_list.mutex); 446 + 447 + for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) 448 + pr_info("%pF\n", nb->notifier_call); 449 + 450 + mutex_unlock(&cpufreq_transition_notifier_list.mutex); 451 + } 452 + 453 + /** 454 + * cpufreq_enable_fast_switch - Enable fast frequency switching for policy. 455 + * @policy: cpufreq policy to enable fast frequency switching for. 456 + * 457 + * Try to enable fast frequency switching for @policy. 458 + * 459 + * The attempt will fail if there is at least one transition notifier registered 460 + * at this point, as fast frequency switching is quite fundamentally at odds 461 + * with transition notifiers. Thus if successful, it will make registration of 462 + * transition notifiers fail going forward. 463 + */ 464 + void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) 465 + { 466 + lockdep_assert_held(&policy->rwsem); 467 + 468 + if (!policy->fast_switch_possible) 469 + return; 470 + 471 + mutex_lock(&cpufreq_fast_switch_lock); 472 + if (cpufreq_fast_switch_count >= 0) { 473 + cpufreq_fast_switch_count++; 474 + policy->fast_switch_enabled = true; 475 + } else { 476 + pr_warn("CPU%u: Fast frequency switching not enabled\n", 477 + policy->cpu); 478 + cpufreq_list_transition_notifiers(); 479 + } 480 + mutex_unlock(&cpufreq_fast_switch_lock); 481 + } 482 + EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch); 483 + 484 + /** 485 + * cpufreq_disable_fast_switch - Disable fast frequency switching for policy. 486 + * @policy: cpufreq policy to disable fast frequency switching for. 487 + */ 488 + void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) 489 + { 490 + mutex_lock(&cpufreq_fast_switch_lock); 491 + if (policy->fast_switch_enabled) { 492 + policy->fast_switch_enabled = false; 493 + if (!WARN_ON(cpufreq_fast_switch_count <= 0)) 494 + cpufreq_fast_switch_count--; 495 + } 496 + mutex_unlock(&cpufreq_fast_switch_lock); 497 + } 498 + EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); 437 499 438 500 /********************************************************************* 439 501 * SYSFS INTERFACE * ··· 1320 1248 */ 1321 1249 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 1322 1250 { 1251 + struct cpufreq_policy *policy; 1323 1252 unsigned cpu = dev->id; 1324 - int ret; 1325 1253 1326 1254 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); 1327 1255 1328 - if (cpu_online(cpu)) { 1329 - ret = cpufreq_online(cpu); 1330 - } else { 1331 - /* 1332 - * A hotplug notifier will follow and we will handle it as CPU 1333 - * online then. For now, just create the sysfs link, unless 1334 - * there is no policy or the link is already present. 1335 - */ 1336 - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1256 + if (cpu_online(cpu)) 1257 + return cpufreq_online(cpu); 1337 1258 1338 - ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) 1339 - ? add_cpu_dev_symlink(policy, cpu) : 0; 1340 - } 1259 + /* 1260 + * A hotplug notifier will follow and we will handle it as CPU online 1261 + * then. For now, just create the sysfs link, unless there is no policy 1262 + * or the link is already present. 1263 + */ 1264 + policy = per_cpu(cpufreq_cpu_data, cpu); 1265 + if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 1266 + return 0; 1341 1267 1342 - return ret; 1268 + return add_cpu_dev_symlink(policy, cpu); 1343 1269 } 1344 1270 1345 1271 static void cpufreq_offline(unsigned int cpu) ··· 1389 1319 1390 1320 /* If cpu is last user of policy, free policy */ 1391 1321 if (has_target()) { 1392 - ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1322 + ret = cpufreq_exit_governor(policy); 1393 1323 if (ret) 1394 1324 pr_err("%s: Failed to exit governor\n", __func__); 1395 1325 } ··· 1517 1447 1518 1448 ret_freq = cpufreq_driver->get(policy->cpu); 1519 1449 1520 - /* Updating inactive policies is invalid, so avoid doing that. */ 1521 - if (unlikely(policy_is_inactive(policy))) 1450 + /* 1451 + * Updating inactive policies is invalid, so avoid doing that. Also 1452 + * if fast frequency switching is used with the given policy, the check 1453 + * against policy->cur is pointless, so skip it in that case too. 1454 + */ 1455 + if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) 1522 1456 return ret_freq; 1523 1457 1524 1458 if (ret_freq && policy->cur && ··· 1749 1675 1750 1676 switch (list) { 1751 1677 case CPUFREQ_TRANSITION_NOTIFIER: 1678 + mutex_lock(&cpufreq_fast_switch_lock); 1679 + 1680 + if (cpufreq_fast_switch_count > 0) { 1681 + mutex_unlock(&cpufreq_fast_switch_lock); 1682 + return -EBUSY; 1683 + } 1752 1684 ret = srcu_notifier_chain_register( 1753 1685 &cpufreq_transition_notifier_list, nb); 1686 + if (!ret) 1687 + cpufreq_fast_switch_count--; 1688 + 1689 + mutex_unlock(&cpufreq_fast_switch_lock); 1754 1690 break; 1755 1691 case CPUFREQ_POLICY_NOTIFIER: 1756 1692 ret = blocking_notifier_chain_register( ··· 1793 1709 1794 1710 switch (list) { 1795 1711 case CPUFREQ_TRANSITION_NOTIFIER: 1712 + mutex_lock(&cpufreq_fast_switch_lock); 1713 + 1796 1714 ret = srcu_notifier_chain_unregister( 1797 1715 &cpufreq_transition_notifier_list, nb); 1716 + if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0)) 1717 + cpufreq_fast_switch_count++; 1718 + 1719 + mutex_unlock(&cpufreq_fast_switch_lock); 1798 1720 break; 1799 1721 case CPUFREQ_POLICY_NOTIFIER: 1800 1722 ret = blocking_notifier_chain_unregister( ··· 1818 1728 /********************************************************************* 1819 1729 * GOVERNORS * 1820 1730 *********************************************************************/ 1731 + 1732 + /** 1733 + * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch. 1734 + * @policy: cpufreq policy to switch the frequency for. 1735 + * @target_freq: New frequency to set (may be approximate). 1736 + * 1737 + * Carry out a fast frequency switch without sleeping. 1738 + * 1739 + * The driver's ->fast_switch() callback invoked by this function must be 1740 + * suitable for being called from within RCU-sched read-side critical sections 1741 + * and it is expected to select the minimum available frequency greater than or 1742 + * equal to @target_freq (CPUFREQ_RELATION_L). 1743 + * 1744 + * This function must not be called if policy->fast_switch_enabled is unset. 1745 + * 1746 + * Governors calling this function must guarantee that it will never be invoked 1747 + * twice in parallel for the same policy and that it will never be called in 1748 + * parallel with either ->target() or ->target_index() for the same policy. 1749 + * 1750 + * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch() 1751 + * callback to indicate an error condition, the hardware configuration must be 1752 + * preserved. 1753 + */ 1754 + unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 1755 + unsigned int target_freq) 1756 + { 1757 + clamp_val(target_freq, policy->min, policy->max); 1758 + 1759 + return cpufreq_driver->fast_switch(policy, target_freq); 1760 + } 1761 + EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); 1821 1762 1822 1763 /* Must set freqs->new to intermediate frequency */ 1823 1764 static int __target_intermediate(struct cpufreq_policy *policy, ··· 2225 2104 return ret; 2226 2105 } 2227 2106 2228 - ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2107 + ret = cpufreq_exit_governor(policy); 2229 2108 if (ret) { 2230 2109 pr_err("%s: Failed to Exit Governor: %s (%d)\n", 2231 2110 __func__, old_gov->name, ret); ··· 2242 2121 pr_debug("cpufreq: governor change\n"); 2243 2122 return 0; 2244 2123 } 2245 - cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2124 + cpufreq_exit_governor(policy); 2246 2125 } 2247 2126 2248 2127 /* new governor failed, so re-start old one */ ··· 2310 2189 2311 2190 switch (action & ~CPU_TASKS_FROZEN) { 2312 2191 case CPU_ONLINE: 2192 + case CPU_DOWN_FAILED: 2313 2193 cpufreq_online(cpu); 2314 2194 break; 2315 2195 2316 2196 case CPU_DOWN_PREPARE: 2317 2197 cpufreq_offline(cpu); 2318 - break; 2319 - 2320 - case CPU_DOWN_FAILED: 2321 - cpufreq_online(cpu); 2322 2198 break; 2323 2199 } 2324 2200 return NOTIFY_OK;
+15 -10
drivers/cpufreq/cpufreq_conservative.c
··· 129 129 /************************** sysfs interface ************************/ 130 130 static struct dbs_governor cs_dbs_gov; 131 131 132 - static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, 133 - const char *buf, size_t count) 132 + static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, 133 + const char *buf, size_t count) 134 134 { 135 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 135 136 unsigned int input; 136 137 int ret; 137 138 ret = sscanf(buf, "%u", &input); ··· 144 143 return count; 145 144 } 146 145 147 - static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, 148 - size_t count) 146 + static ssize_t store_up_threshold(struct gov_attr_set *attr_set, 147 + const char *buf, size_t count) 149 148 { 149 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 150 150 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 151 151 unsigned int input; 152 152 int ret; ··· 160 158 return count; 161 159 } 162 160 163 - static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, 164 - size_t count) 161 + static ssize_t store_down_threshold(struct gov_attr_set *attr_set, 162 + const char *buf, size_t count) 165 163 { 164 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 166 165 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 167 166 unsigned int input; 168 167 int ret; ··· 178 175 return count; 179 176 } 180 177 181 - static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, 182 - const char *buf, size_t count) 178 + static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, 179 + const char *buf, size_t count) 183 180 { 181 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 184 182 unsigned int input; 185 183 int ret; 186 184 ··· 203 199 return count; 204 200 } 205 201 206 - static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf, 207 - size_t count) 202 + static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf, 203 + size_t count) 208 204 { 205 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 209 206 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 210 207 unsigned int input; 211 208 int ret;
+67 -133
drivers/cpufreq/cpufreq_governor.c
··· 43 43 * This must be called with dbs_data->mutex held, otherwise traversing 44 44 * policy_dbs_list isn't safe. 45 45 */ 46 - ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, 46 + ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, 47 47 size_t count) 48 48 { 49 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 49 50 struct policy_dbs_info *policy_dbs; 50 51 unsigned int rate; 51 52 int ret; ··· 60 59 * We are operating under dbs_data->mutex and so the list and its 61 60 * entries can't be freed concurrently. 62 61 */ 63 - list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { 62 + list_for_each_entry(policy_dbs, &attr_set->policy_list, list) { 64 63 mutex_lock(&policy_dbs->timer_mutex); 65 64 /* 66 65 * On 32-bit architectures this may race with the ··· 97 96 { 98 97 struct policy_dbs_info *policy_dbs; 99 98 100 - list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { 99 + list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) { 101 100 unsigned int j; 102 101 103 102 for_each_cpu(j, policy_dbs->policy->cpus) { 104 103 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); 105 104 106 - j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, 105 + j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, 107 106 dbs_data->io_is_busy); 108 107 if (dbs_data->ignore_nice_load) 109 108 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; ··· 111 110 } 112 111 } 113 112 EXPORT_SYMBOL_GPL(gov_update_cpu_data); 114 - 115 - static inline struct dbs_data *to_dbs_data(struct kobject *kobj) 116 - { 117 - return container_of(kobj, struct dbs_data, kobj); 118 - } 119 - 120 - static inline struct governor_attr *to_gov_attr(struct attribute *attr) 121 - { 122 - return container_of(attr, struct governor_attr, attr); 123 - } 124 - 125 - static ssize_t governor_show(struct kobject *kobj, struct attribute *attr, 126 - char *buf) 127 - { 128 - struct dbs_data *dbs_data = to_dbs_data(kobj); 129 - struct governor_attr *gattr = to_gov_attr(attr); 130 - 131 - return gattr->show(dbs_data, buf); 132 - } 133 - 134 - static ssize_t governor_store(struct kobject *kobj, struct attribute *attr, 135 - const char *buf, size_t count) 136 - { 137 - struct dbs_data *dbs_data = to_dbs_data(kobj); 138 - struct governor_attr *gattr = to_gov_attr(attr); 139 - int ret = -EBUSY; 140 - 141 - mutex_lock(&dbs_data->mutex); 142 - 143 - if (dbs_data->usage_count) 144 - ret = gattr->store(dbs_data, buf, count); 145 - 146 - mutex_unlock(&dbs_data->mutex); 147 - 148 - return ret; 149 - } 150 - 151 - /* 152 - * Sysfs Ops for accessing governor attributes. 153 - * 154 - * All show/store invocations for governor specific sysfs attributes, will first 155 - * call the below show/store callbacks and the attribute specific callback will 156 - * be called from within it. 157 - */ 158 - static const struct sysfs_ops governor_sysfs_ops = { 159 - .show = governor_show, 160 - .store = governor_store, 161 - }; 162 113 163 114 unsigned int dbs_update(struct cpufreq_policy *policy) 164 115 { ··· 137 184 /* Get Absolute Load */ 138 185 for_each_cpu(j, policy->cpus) { 139 186 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); 140 - u64 cur_wall_time, cur_idle_time; 141 - unsigned int idle_time, wall_time; 187 + u64 update_time, cur_idle_time; 188 + unsigned int idle_time, time_elapsed; 142 189 unsigned int load; 143 190 144 - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); 191 + cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy); 145 192 146 - wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; 147 - j_cdbs->prev_cpu_wall = cur_wall_time; 193 + time_elapsed = update_time - j_cdbs->prev_update_time; 194 + j_cdbs->prev_update_time = update_time; 148 195 149 - if (cur_idle_time <= j_cdbs->prev_cpu_idle) { 150 - idle_time = 0; 151 - } else { 152 - idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; 153 - j_cdbs->prev_cpu_idle = cur_idle_time; 154 - } 196 + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; 197 + j_cdbs->prev_cpu_idle = cur_idle_time; 155 198 156 199 if (ignore_nice) { 157 200 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; ··· 156 207 j_cdbs->prev_cpu_nice = cur_nice; 157 208 } 158 209 159 - if (unlikely(!wall_time || wall_time < idle_time)) 210 + if (unlikely(!time_elapsed || time_elapsed < idle_time)) 160 211 continue; 161 212 162 213 /* ··· 177 228 * 178 229 * Detecting this situation is easy: the governor's utilization 179 230 * update handler would not have run during CPU-idle periods. 180 - * Hence, an unusually large 'wall_time' (as compared to the 231 + * Hence, an unusually large 'time_elapsed' (as compared to the 181 232 * sampling rate) indicates this scenario. 182 233 * 183 234 * prev_load can be zero in two cases and we must recalculate it ··· 185 236 * - during long idle intervals 186 237 * - explicitly set to zero 187 238 */ 188 - if (unlikely(wall_time > (2 * sampling_rate) && 239 + if (unlikely(time_elapsed > 2 * sampling_rate && 189 240 j_cdbs->prev_load)) { 190 241 load = j_cdbs->prev_load; 191 242 ··· 196 247 */ 197 248 j_cdbs->prev_load = 0; 198 249 } else { 199 - load = 100 * (wall_time - idle_time) / wall_time; 250 + load = 100 * (time_elapsed - idle_time) / time_elapsed; 200 251 j_cdbs->prev_load = load; 201 252 } 202 253 ··· 206 257 return max_load; 207 258 } 208 259 EXPORT_SYMBOL_GPL(dbs_update); 209 - 210 - static void gov_set_update_util(struct policy_dbs_info *policy_dbs, 211 - unsigned int delay_us) 212 - { 213 - struct cpufreq_policy *policy = policy_dbs->policy; 214 - int cpu; 215 - 216 - gov_update_sample_delay(policy_dbs, delay_us); 217 - policy_dbs->last_sample_time = 0; 218 - 219 - for_each_cpu(cpu, policy->cpus) { 220 - struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); 221 - 222 - cpufreq_set_update_util_data(cpu, &cdbs->update_util); 223 - } 224 - } 225 - 226 - static inline void gov_clear_update_util(struct cpufreq_policy *policy) 227 - { 228 - int i; 229 - 230 - for_each_cpu(i, policy->cpus) 231 - cpufreq_set_update_util_data(i, NULL); 232 - 233 - synchronize_sched(); 234 - } 235 - 236 - static void gov_cancel_work(struct cpufreq_policy *policy) 237 - { 238 - struct policy_dbs_info *policy_dbs = policy->governor_data; 239 - 240 - gov_clear_update_util(policy_dbs->policy); 241 - irq_work_sync(&policy_dbs->irq_work); 242 - cancel_work_sync(&policy_dbs->work); 243 - atomic_set(&policy_dbs->work_count, 0); 244 - policy_dbs->work_in_progress = false; 245 - } 246 260 247 261 static void dbs_work_handler(struct work_struct *work) 248 262 { ··· 294 382 irq_work_queue(&policy_dbs->irq_work); 295 383 } 296 384 385 + static void gov_set_update_util(struct policy_dbs_info *policy_dbs, 386 + unsigned int delay_us) 387 + { 388 + struct cpufreq_policy *policy = policy_dbs->policy; 389 + int cpu; 390 + 391 + gov_update_sample_delay(policy_dbs, delay_us); 392 + policy_dbs->last_sample_time = 0; 393 + 394 + for_each_cpu(cpu, policy->cpus) { 395 + struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); 396 + 397 + cpufreq_add_update_util_hook(cpu, &cdbs->update_util, 398 + dbs_update_util_handler); 399 + } 400 + } 401 + 402 + static inline void gov_clear_update_util(struct cpufreq_policy *policy) 403 + { 404 + int i; 405 + 406 + for_each_cpu(i, policy->cpus) 407 + cpufreq_remove_update_util_hook(i); 408 + 409 + synchronize_sched(); 410 + } 411 + 412 + static void gov_cancel_work(struct cpufreq_policy *policy) 413 + { 414 + struct policy_dbs_info *policy_dbs = policy->governor_data; 415 + 416 + gov_clear_update_util(policy_dbs->policy); 417 + irq_work_sync(&policy_dbs->irq_work); 418 + cancel_work_sync(&policy_dbs->work); 419 + atomic_set(&policy_dbs->work_count, 0); 420 + policy_dbs->work_in_progress = false; 421 + } 422 + 297 423 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, 298 424 struct dbs_governor *gov) 299 425 { ··· 354 404 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); 355 405 356 406 j_cdbs->policy_dbs = policy_dbs; 357 - j_cdbs->update_util.func = dbs_update_util_handler; 358 407 } 359 408 return policy_dbs; 360 409 } ··· 402 453 policy_dbs->dbs_data = dbs_data; 403 454 policy->governor_data = policy_dbs; 404 455 405 - mutex_lock(&dbs_data->mutex); 406 - dbs_data->usage_count++; 407 - list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); 408 - mutex_unlock(&dbs_data->mutex); 456 + gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list); 409 457 goto out; 410 458 } 411 459 ··· 412 466 goto free_policy_dbs_info; 413 467 } 414 468 415 - INIT_LIST_HEAD(&dbs_data->policy_dbs_list); 416 - mutex_init(&dbs_data->mutex); 469 + gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list); 417 470 418 471 ret = gov->init(dbs_data, !policy->governor->initialized); 419 472 if (ret) ··· 432 487 if (!have_governor_per_policy()) 433 488 gov->gdbs_data = dbs_data; 434 489 490 + policy_dbs->dbs_data = dbs_data; 435 491 policy->governor_data = policy_dbs; 436 492 437 - policy_dbs->dbs_data = dbs_data; 438 - dbs_data->usage_count = 1; 439 - list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); 440 - 441 493 gov->kobj_type.sysfs_ops = &governor_sysfs_ops; 442 - ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type, 494 + ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type, 443 495 get_governor_parent_kobj(policy), 444 496 "%s", gov->gov.name); 445 497 if (!ret) ··· 465 523 struct dbs_governor *gov = dbs_governor_of(policy); 466 524 struct policy_dbs_info *policy_dbs = policy->governor_data; 467 525 struct dbs_data *dbs_data = policy_dbs->dbs_data; 468 - int count; 526 + unsigned int count; 469 527 470 528 /* Protect gov->gdbs_data against concurrent updates. */ 471 529 mutex_lock(&gov_dbs_data_mutex); 472 530 473 - mutex_lock(&dbs_data->mutex); 474 - list_del(&policy_dbs->list); 475 - count = --dbs_data->usage_count; 476 - mutex_unlock(&dbs_data->mutex); 531 + count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list); 532 + 533 + policy->governor_data = NULL; 477 534 478 535 if (!count) { 479 - kobject_put(&dbs_data->kobj); 480 - 481 - policy->governor_data = NULL; 482 - 483 536 if (!have_governor_per_policy()) 484 537 gov->gdbs_data = NULL; 485 538 486 539 gov->exit(dbs_data, policy->governor->initialized == 1); 487 - mutex_destroy(&dbs_data->mutex); 488 540 kfree(dbs_data); 489 - } else { 490 - policy->governor_data = NULL; 491 541 } 492 542 493 543 free_policy_dbs_info(policy_dbs, gov); ··· 508 574 509 575 for_each_cpu(j, policy->cpus) { 510 576 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); 511 - unsigned int prev_load; 512 577 513 - j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); 514 - 515 - prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle; 516 - j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall; 578 + j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy); 579 + /* 580 + * Make the first invocation of dbs_update() compute the load. 581 + */ 582 + j_cdbs->prev_load = 0; 517 583 518 584 if (ignore_nice) 519 585 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+11 -35
drivers/cpufreq/cpufreq_governor.h
··· 24 24 #include <linux/module.h> 25 25 #include <linux/mutex.h> 26 26 27 - /* 28 - * The polling frequency depends on the capability of the processor. Default 29 - * polling frequency is 1000 times the transition latency of the processor. The 30 - * governor will work on any processor with transition latency <= 10ms, using 31 - * appropriate sampling rate. 32 - * 33 - * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) 34 - * this governor will not work. All times here are in us (micro seconds). 35 - */ 36 - #define MIN_SAMPLING_RATE_RATIO (2) 37 - #define LATENCY_MULTIPLIER (1000) 38 - #define MIN_LATENCY_MULTIPLIER (20) 39 - #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 40 - 41 27 /* Ondemand Sampling types */ 42 28 enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; 43 29 ··· 38 52 39 53 /* Governor demand based switching data (per-policy or global). */ 40 54 struct dbs_data { 41 - int usage_count; 55 + struct gov_attr_set attr_set; 42 56 void *tuners; 43 57 unsigned int min_sampling_rate; 44 58 unsigned int ignore_nice_load; ··· 46 60 unsigned int sampling_down_factor; 47 61 unsigned int up_threshold; 48 62 unsigned int io_is_busy; 49 - 50 - struct kobject kobj; 51 - struct list_head policy_dbs_list; 52 - /* 53 - * Protect concurrent updates to governor tunables from sysfs, 54 - * policy_dbs_list and usage_count. 55 - */ 56 - struct mutex mutex; 57 63 }; 58 64 59 - /* Governor's specific attributes */ 60 - struct dbs_data; 61 - struct governor_attr { 62 - struct attribute attr; 63 - ssize_t (*show)(struct dbs_data *dbs_data, char *buf); 64 - ssize_t (*store)(struct dbs_data *dbs_data, const char *buf, 65 - size_t count); 66 - }; 65 + static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set) 66 + { 67 + return container_of(attr_set, struct dbs_data, attr_set); 68 + } 67 69 68 70 #define gov_show_one(_gov, file_name) \ 69 71 static ssize_t show_##file_name \ 70 - (struct dbs_data *dbs_data, char *buf) \ 72 + (struct gov_attr_set *attr_set, char *buf) \ 71 73 { \ 74 + struct dbs_data *dbs_data = to_dbs_data(attr_set); \ 72 75 struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \ 73 76 return sprintf(buf, "%u\n", tuners->file_name); \ 74 77 } 75 78 76 79 #define gov_show_one_common(file_name) \ 77 80 static ssize_t show_##file_name \ 78 - (struct dbs_data *dbs_data, char *buf) \ 81 + (struct gov_attr_set *attr_set, char *buf) \ 79 82 { \ 83 + struct dbs_data *dbs_data = to_dbs_data(attr_set); \ 80 84 return sprintf(buf, "%u\n", dbs_data->file_name); \ 81 85 } 82 86 ··· 111 135 /* Per cpu structures */ 112 136 struct cpu_dbs_info { 113 137 u64 prev_cpu_idle; 114 - u64 prev_cpu_wall; 138 + u64 prev_update_time; 115 139 u64 prev_cpu_nice; 116 140 /* 117 141 * Used to keep track of load in the previous interval. However, when ··· 160 184 (struct cpufreq_policy *, unsigned int, unsigned int), 161 185 unsigned int powersave_bias); 162 186 void od_unregister_powersave_bias_handler(void); 163 - ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, 187 + ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, 164 188 size_t count); 165 189 void gov_update_cpu_data(struct dbs_data *dbs_data); 166 190 #endif /* _CPUFREQ_GOVERNOR_H */
+84
drivers/cpufreq/cpufreq_governor_attr_set.c
··· 1 + /* 2 + * Abstract code for CPUFreq governor tunable sysfs attributes. 3 + * 4 + * Copyright (C) 2016, Intel Corporation 5 + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include "cpufreq_governor.h" 13 + 14 + static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) 15 + { 16 + return container_of(kobj, struct gov_attr_set, kobj); 17 + } 18 + 19 + static inline struct governor_attr *to_gov_attr(struct attribute *attr) 20 + { 21 + return container_of(attr, struct governor_attr, attr); 22 + } 23 + 24 + static ssize_t governor_show(struct kobject *kobj, struct attribute *attr, 25 + char *buf) 26 + { 27 + struct governor_attr *gattr = to_gov_attr(attr); 28 + 29 + return gattr->show(to_gov_attr_set(kobj), buf); 30 + } 31 + 32 + static ssize_t governor_store(struct kobject *kobj, struct attribute *attr, 33 + const char *buf, size_t count) 34 + { 35 + struct gov_attr_set *attr_set = to_gov_attr_set(kobj); 36 + struct governor_attr *gattr = to_gov_attr(attr); 37 + int ret; 38 + 39 + mutex_lock(&attr_set->update_lock); 40 + ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY; 41 + mutex_unlock(&attr_set->update_lock); 42 + return ret; 43 + } 44 + 45 + const struct sysfs_ops governor_sysfs_ops = { 46 + .show = governor_show, 47 + .store = governor_store, 48 + }; 49 + EXPORT_SYMBOL_GPL(governor_sysfs_ops); 50 + 51 + void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node) 52 + { 53 + INIT_LIST_HEAD(&attr_set->policy_list); 54 + mutex_init(&attr_set->update_lock); 55 + attr_set->usage_count = 1; 56 + list_add(list_node, &attr_set->policy_list); 57 + } 58 + EXPORT_SYMBOL_GPL(gov_attr_set_init); 59 + 60 + void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node) 61 + { 62 + mutex_lock(&attr_set->update_lock); 63 + attr_set->usage_count++; 64 + list_add(list_node, &attr_set->policy_list); 65 + mutex_unlock(&attr_set->update_lock); 66 + } 67 + EXPORT_SYMBOL_GPL(gov_attr_set_get); 68 + 69 + unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node) 70 + { 71 + unsigned int count; 72 + 73 + mutex_lock(&attr_set->update_lock); 74 + list_del(list_node); 75 + count = --attr_set->usage_count; 76 + mutex_unlock(&attr_set->update_lock); 77 + if (count) 78 + return count; 79 + 80 + kobject_put(&attr_set->kobj); 81 + mutex_destroy(&attr_set->update_lock); 82 + return 0; 83 + } 84 + EXPORT_SYMBOL_GPL(gov_attr_set_put);
+17 -12
drivers/cpufreq/cpufreq_ondemand.c
··· 207 207 /************************** sysfs interface ************************/ 208 208 static struct dbs_governor od_dbs_gov; 209 209 210 - static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, 211 - size_t count) 210 + static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, 211 + size_t count) 212 212 { 213 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 213 214 unsigned int input; 214 215 int ret; 215 216 ··· 225 224 return count; 226 225 } 227 226 228 - static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, 229 - size_t count) 227 + static ssize_t store_up_threshold(struct gov_attr_set *attr_set, 228 + const char *buf, size_t count) 230 229 { 230 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 231 231 unsigned int input; 232 232 int ret; 233 233 ret = sscanf(buf, "%u", &input); ··· 242 240 return count; 243 241 } 244 242 245 - static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, 246 - const char *buf, size_t count) 243 + static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, 244 + const char *buf, size_t count) 247 245 { 246 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 248 247 struct policy_dbs_info *policy_dbs; 249 248 unsigned int input; 250 249 int ret; ··· 257 254 dbs_data->sampling_down_factor = input; 258 255 259 256 /* Reset down sampling multiplier in case it was active */ 260 - list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { 257 + list_for_each_entry(policy_dbs, &attr_set->policy_list, list) { 261 258 /* 262 259 * Doing this without locking might lead to using different 263 260 * rate_mult values in od_update() and od_dbs_timer(). ··· 270 267 return count; 271 268 } 272 269 273 - static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, 274 - const char *buf, size_t count) 270 + static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, 271 + const char *buf, size_t count) 275 272 { 273 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 276 274 unsigned int input; 277 275 int ret; 278 276 ··· 295 291 return count; 296 292 } 297 293 298 - static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf, 299 - size_t count) 294 + static ssize_t store_powersave_bias(struct gov_attr_set *attr_set, 295 + const char *buf, size_t count) 300 296 { 297 + struct dbs_data *dbs_data = to_dbs_data(attr_set); 301 298 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 302 299 struct policy_dbs_info *policy_dbs; 303 300 unsigned int input; ··· 313 308 314 309 od_tuners->powersave_bias = input; 315 310 316 - list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) 311 + list_for_each_entry(policy_dbs, &attr_set->policy_list, list) 317 312 ondemand_powersave_bias_init(policy_dbs->policy); 318 313 319 314 return count;
+34 -42
drivers/cpufreq/e_powersaver.c
··· 6 6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* 7 7 */ 8 8 9 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 + 9 11 #include <linux/kernel.h> 10 12 #include <linux/module.h> 11 13 #include <linux/init.h> ··· 22 20 #include <asm/msr.h> 23 21 #include <asm/tsc.h> 24 22 25 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 23 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 26 24 #include <linux/acpi.h> 27 25 #include <acpi/processor.h> 28 26 #endif ··· 35 33 36 34 struct eps_cpu_data { 37 35 u32 fsb; 38 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 36 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 39 37 u32 bios_limit; 40 38 #endif 41 39 struct cpufreq_frequency_table freq_table[]; ··· 48 46 static int voltage_failsafe_off; 49 47 static int set_max_voltage; 50 48 51 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 49 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 52 50 static int ignore_acpi_limit; 53 51 54 52 static struct acpi_processor_performance *eps_acpi_cpu_perf; ··· 143 141 /* Print voltage and multiplier */ 144 142 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 145 143 current_voltage = lo & 0xff; 146 - printk(KERN_INFO "eps: Current voltage = %dmV\n", 147 - current_voltage * 16 + 700); 144 + pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700); 148 145 current_multiplier = (lo >> 8) & 0xff; 149 - printk(KERN_INFO "eps: Current multiplier = %d\n", 150 - current_multiplier); 146 + pr_info("Current multiplier = %d\n", current_multiplier); 151 147 } 152 148 #endif 153 149 return 0; ··· 166 166 dest_state = centaur->freq_table[index].driver_data & 0xffff; 167 167 ret = eps_set_state(centaur, policy, dest_state); 168 168 if (ret) 169 - printk(KERN_ERR "eps: Timeout!\n"); 169 + pr_err("Timeout!\n"); 170 170 return ret; 171 171 } 172 172 ··· 186 186 int k, step, voltage; 187 187 int ret; 188 188 int states; 189 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 189 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 190 190 unsigned int limit; 191 191 #endif 192 192 ··· 194 194 return -ENODEV; 195 195 196 196 /* Check brand */ 197 - printk(KERN_INFO "eps: Detected VIA "); 197 + pr_info("Detected VIA "); 198 198 199 199 switch (c->x86_model) { 200 200 case 10: 201 201 rdmsr(0x1153, lo, hi); 202 202 brand = (((lo >> 2) ^ lo) >> 18) & 3; 203 - printk(KERN_CONT "Model A "); 203 + pr_cont("Model A "); 204 204 break; 205 205 case 13: 206 206 rdmsr(0x1154, lo, hi); 207 207 brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff; 208 - printk(KERN_CONT "Model D "); 208 + pr_cont("Model D "); 209 209 break; 210 210 } 211 211 212 212 switch (brand) { 213 213 case EPS_BRAND_C7M: 214 - printk(KERN_CONT "C7-M\n"); 214 + pr_cont("C7-M\n"); 215 215 break; 216 216 case EPS_BRAND_C7: 217 - printk(KERN_CONT "C7\n"); 217 + pr_cont("C7\n"); 218 218 break; 219 219 case EPS_BRAND_EDEN: 220 - printk(KERN_CONT "Eden\n"); 220 + pr_cont("Eden\n"); 221 221 break; 222 222 case EPS_BRAND_C7D: 223 - printk(KERN_CONT "C7-D\n"); 223 + pr_cont("C7-D\n"); 224 224 break; 225 225 case EPS_BRAND_C3: 226 - printk(KERN_CONT "C3\n"); 226 + pr_cont("C3\n"); 227 227 return -ENODEV; 228 228 break; 229 229 } ··· 235 235 /* Can be locked at 0 */ 236 236 rdmsrl(MSR_IA32_MISC_ENABLE, val); 237 237 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { 238 - printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); 238 + pr_info("Can't enable Enhanced PowerSaver\n"); 239 239 return -ENODEV; 240 240 } 241 241 } ··· 243 243 /* Print voltage and multiplier */ 244 244 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 245 245 current_voltage = lo & 0xff; 246 - printk(KERN_INFO "eps: Current voltage = %dmV\n", 247 - current_voltage * 16 + 700); 246 + pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700); 248 247 current_multiplier = (lo >> 8) & 0xff; 249 - printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier); 248 + pr_info("Current multiplier = %d\n", current_multiplier); 250 249 251 250 /* Print limits */ 252 251 max_voltage = hi & 0xff; 253 - printk(KERN_INFO "eps: Highest voltage = %dmV\n", 254 - max_voltage * 16 + 700); 252 + pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700); 255 253 max_multiplier = (hi >> 8) & 0xff; 256 - printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier); 254 + pr_info("Highest multiplier = %d\n", max_multiplier); 257 255 min_voltage = (hi >> 16) & 0xff; 258 - printk(KERN_INFO "eps: Lowest voltage = %dmV\n", 259 - min_voltage * 16 + 700); 256 + pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700); 260 257 min_multiplier = (hi >> 24) & 0xff; 261 - printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier); 258 + pr_info("Lowest multiplier = %d\n", min_multiplier); 262 259 263 260 /* Sanity checks */ 264 261 if (current_multiplier == 0 || max_multiplier == 0 ··· 273 276 274 277 /* Check for systems using underclocked CPU */ 275 278 if (!freq_failsafe_off && max_multiplier != current_multiplier) { 276 - printk(KERN_INFO "eps: Your processor is running at different " 277 - "frequency then its maximum. Aborting.\n"); 278 - printk(KERN_INFO "eps: You can use freq_failsafe_off option " 279 - "to disable this check.\n"); 279 + pr_info("Your processor is running at different frequency then its maximum. Aborting.\n"); 280 + pr_info("You can use freq_failsafe_off option to disable this check.\n"); 280 281 return -EINVAL; 281 282 } 282 283 if (!voltage_failsafe_off && max_voltage != current_voltage) { 283 - printk(KERN_INFO "eps: Your processor is running at different " 284 - "voltage then its maximum. Aborting.\n"); 285 - printk(KERN_INFO "eps: You can use voltage_failsafe_off " 286 - "option to disable this check.\n"); 284 + pr_info("Your processor is running at different voltage then its maximum. Aborting.\n"); 285 + pr_info("You can use voltage_failsafe_off option to disable this check.\n"); 287 286 return -EINVAL; 288 287 } 289 288 290 289 /* Calc FSB speed */ 291 290 fsb = cpu_khz / current_multiplier; 292 291 293 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 292 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 294 293 /* Check for ACPI processor speed limit */ 295 294 if (!ignore_acpi_limit && !eps_acpi_init()) { 296 295 if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) { 297 - printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n", 296 + pr_info("ACPI limit %u.%uGHz\n", 298 297 limit/1000000, 299 298 (limit%1000000)/10000); 300 299 eps_acpi_exit(policy); 301 300 /* Check if max_multiplier is in BIOS limits */ 302 301 if (limit && max_multiplier * fsb > limit) { 303 - printk(KERN_INFO "eps: Aborting.\n"); 302 + pr_info("Aborting\n"); 304 303 return -EINVAL; 305 304 } 306 305 } ··· 312 319 v = (set_max_voltage - 700) / 16; 313 320 /* Check if voltage is within limits */ 314 321 if (v >= min_voltage && v <= max_voltage) { 315 - printk(KERN_INFO "eps: Setting %dmV as maximum.\n", 316 - v * 16 + 700); 322 + pr_info("Setting %dmV as maximum\n", v * 16 + 700); 317 323 max_voltage = v; 318 324 } 319 325 } ··· 333 341 334 342 /* Copy basic values */ 335 343 centaur->fsb = fsb; 336 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 344 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 337 345 centaur->bios_limit = limit; 338 346 #endif 339 347 ··· 418 426 MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check"); 419 427 module_param(voltage_failsafe_off, int, 0644); 420 428 MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check"); 421 - #if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE 429 + #if IS_ENABLED(CONFIG_ACPI_PROCESSOR) 422 430 module_param(ignore_acpi_limit, int, 0644); 423 431 MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit"); 424 432 #endif
+3 -1
drivers/cpufreq/elanfreq.c
··· 16 16 * 17 17 */ 18 18 19 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 + 19 21 #include <linux/kernel.h> 20 22 #include <linux/module.h> 21 23 #include <linux/init.h> ··· 187 185 static int __init elanfreq_setup(char *str) 188 186 { 189 187 max_freq = simple_strtoul(str, &str, 0); 190 - printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n"); 188 + pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n"); 191 189 return 1; 192 190 } 193 191 __setup("elanfreq=", elanfreq_setup);
-42
drivers/cpufreq/hisi-acpu-cpufreq.c
··· 1 - /* 2 - * Hisilicon Platforms Using ACPU CPUFreq Support 3 - * 4 - * Copyright (c) 2015 Hisilicon Limited. 5 - * Copyright (c) 2015 Linaro Limited. 6 - * 7 - * Leo Yan <leo.yan@linaro.org> 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License version 2 as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any 14 - * kind, whether express or implied; without even the implied warranty 15 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 - * GNU General Public License for more details. 17 - */ 18 - 19 - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 - 21 - #include <linux/err.h> 22 - #include <linux/init.h> 23 - #include <linux/kernel.h> 24 - #include <linux/module.h> 25 - #include <linux/of.h> 26 - #include <linux/platform_device.h> 27 - 28 - static int __init hisi_acpu_cpufreq_driver_init(void) 29 - { 30 - struct platform_device *pdev; 31 - 32 - if (!of_machine_is_compatible("hisilicon,hi6220")) 33 - return -ENODEV; 34 - 35 - pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0); 36 - return PTR_ERR_OR_ZERO(pdev); 37 - } 38 - module_init(hisi_acpu_cpufreq_driver_init); 39 - 40 - MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>"); 41 - MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver"); 42 - MODULE_LICENSE("GPL v2");
+5 -5
drivers/cpufreq/ia64-acpi-cpufreq.c
··· 8 8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 9 9 */ 10 10 11 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 + 11 13 #include <linux/kernel.h> 12 14 #include <linux/slab.h> 13 15 #include <linux/module.h> ··· 120 118 121 119 if (ret) { 122 120 set_cpus_allowed_ptr(current, &saved_mask); 123 - printk(KERN_WARNING "get performance failed with error %d\n", 124 - ret); 121 + pr_warn("get performance failed with error %d\n", ret); 125 122 ret = 0; 126 123 goto migrate_end; 127 124 } ··· 178 177 179 178 ret = processor_set_pstate(value); 180 179 if (ret) { 181 - printk(KERN_WARNING "Transition failed with error %d\n", ret); 180 + pr_warn("Transition failed with error %d\n", ret); 182 181 retval = -ENODEV; 183 182 goto migrate_end; 184 183 } ··· 292 291 /* notify BIOS that we exist */ 293 292 acpi_processor_notify_smm(THIS_MODULE); 294 293 295 - printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " 296 - "activated.\n", cpu); 294 + pr_info("CPU%u - ACPI performance management activated\n", cpu); 297 295 298 296 for (i = 0; i < data->acpi_data.state_count; i++) 299 297 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
+188 -33
drivers/cpufreq/intel_pstate.c
··· 10 10 * of the License. 11 11 */ 12 12 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 + 13 15 #include <linux/kernel.h> 14 16 #include <linux/kernel_stat.h> 15 17 #include <linux/module.h> ··· 40 38 #define ATOM_VIDS 0x66b 41 39 #define ATOM_TURBO_RATIOS 0x66c 42 40 #define ATOM_TURBO_VIDS 0x66d 41 + 42 + #ifdef CONFIG_ACPI 43 + #include <acpi/processor.h> 44 + #endif 43 45 44 46 #define FRAC_BITS 8 45 47 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) ··· 178 172 * @prev_cummulative_iowait: IO Wait time difference from last and 179 173 * current sample 180 174 * @sample: Storage for storing last Sample data 175 + * @acpi_perf_data: Stores ACPI perf information read from _PSS 176 + * @valid_pss_table: Set to true for valid ACPI _PSS entries found 181 177 * 182 178 * This structure stores per CPU instance data for all CPUs. 183 179 */ ··· 198 190 u64 prev_tsc; 199 191 u64 prev_cummulative_iowait; 200 192 struct sample sample; 193 + #ifdef CONFIG_ACPI 194 + struct acpi_processor_performance acpi_perf_data; 195 + bool valid_pss_table; 196 + #endif 201 197 }; 202 198 203 199 static struct cpudata **all_cpu_data; ··· 270 258 static struct pstate_funcs pstate_funcs; 271 259 static int hwp_active; 272 260 261 + #ifdef CONFIG_ACPI 262 + static bool acpi_ppc; 263 + #endif 273 264 274 265 /** 275 266 * struct perf_limits - Store user and policy limits ··· 346 331 static struct perf_limits *limits = &powersave_limits; 347 332 #endif 348 333 334 + #ifdef CONFIG_ACPI 335 + 336 + static bool intel_pstate_get_ppc_enable_status(void) 337 + { 338 + if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 339 + acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 340 + return true; 341 + 342 + return acpi_ppc; 343 + } 344 + 345 + /* 346 + * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and 347 + * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and 348 + * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state 349 + * ratio, out of it only high 8 bits are used. For example 0x1700 is setting 350 + * target ratio 0x17. The _PSS control value stores in a format which can be 351 + * directly written to PERF_CTL MSR. But in intel_pstate driver this shift 352 + * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). 353 + * This function converts the _PSS control value to intel pstate driver format 354 + * for comparison and assignment. 355 + */ 356 + static int convert_to_native_pstate_format(struct cpudata *cpu, int index) 357 + { 358 + return cpu->acpi_perf_data.states[index].control >> 8; 359 + } 360 + 361 + static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 362 + { 363 + struct cpudata *cpu; 364 + int turbo_pss_ctl; 365 + int ret; 366 + int i; 367 + 368 + if (!intel_pstate_get_ppc_enable_status()) 369 + return; 370 + 371 + cpu = all_cpu_data[policy->cpu]; 372 + 373 + ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 374 + policy->cpu); 375 + if (ret) 376 + return; 377 + 378 + /* 379 + * Check if the control value in _PSS is for PERF_CTL MSR, which should 380 + * guarantee that the states returned by it map to the states in our 381 + * list directly. 382 + */ 383 + if (cpu->acpi_perf_data.control_register.space_id != 384 + ACPI_ADR_SPACE_FIXED_HARDWARE) 385 + goto err; 386 + 387 + /* 388 + * If there is only one entry _PSS, simply ignore _PSS and continue as 389 + * usual without taking _PSS into account 390 + */ 391 + if (cpu->acpi_perf_data.state_count < 2) 392 + goto err; 393 + 394 + pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 395 + for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 396 + pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 397 + (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 398 + (u32) cpu->acpi_perf_data.states[i].core_frequency, 399 + (u32) cpu->acpi_perf_data.states[i].power, 400 + (u32) cpu->acpi_perf_data.states[i].control); 401 + } 402 + 403 + /* 404 + * The _PSS table doesn't contain whole turbo frequency range. 405 + * This just contains +1 MHZ above the max non turbo frequency, 406 + * with control value corresponding to max turbo ratio. But 407 + * when cpufreq set policy is called, it will call with this 408 + * max frequency, which will cause a reduced performance as 409 + * this driver uses real max turbo frequency as the max 410 + * frequency. So correct this frequency in _PSS table to 411 + * correct max turbo frequency based on the turbo ratio. 412 + * Also need to convert to MHz as _PSS freq is in MHz. 413 + */ 414 + turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 415 + if (turbo_pss_ctl > cpu->pstate.max_pstate) 416 + cpu->acpi_perf_data.states[0].core_frequency = 417 + policy->cpuinfo.max_freq / 1000; 418 + cpu->valid_pss_table = true; 419 + pr_info("_PPC limits will be enforced\n"); 420 + 421 + return; 422 + 423 + err: 424 + cpu->valid_pss_table = false; 425 + acpi_processor_unregister_performance(policy->cpu); 426 + } 427 + 428 + static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 429 + { 430 + struct cpudata *cpu; 431 + 432 + cpu = all_cpu_data[policy->cpu]; 433 + if (!cpu->valid_pss_table) 434 + return; 435 + 436 + acpi_processor_unregister_performance(policy->cpu); 437 + } 438 + 439 + #else 440 + static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 441 + { 442 + } 443 + 444 + static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 445 + { 446 + } 447 + #endif 448 + 349 449 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 350 450 int deadband, int integral) { 351 451 pid->setpoint = int_tofp(setpoint); ··· 471 341 472 342 static inline void pid_p_gain_set(struct _pid *pid, int percent) 473 343 { 474 - pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 344 + pid->p_gain = div_fp(percent, 100); 475 345 } 476 346 477 347 static inline void pid_i_gain_set(struct _pid *pid, int percent) 478 348 { 479 - pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 349 + pid->i_gain = div_fp(percent, 100); 480 350 } 481 351 482 352 static inline void pid_d_gain_set(struct _pid *pid, int percent) 483 353 { 484 - pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 354 + pid->d_gain = div_fp(percent, 100); 485 355 } 486 356 487 357 static signed int pid_calc(struct _pid *pid, int32_t busy) ··· 659 529 660 530 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 661 531 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 662 - turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 532 + turbo_fp = div_fp(no_turbo, total); 663 533 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 664 534 return sprintf(buf, "%u\n", turbo_pct); 665 535 } ··· 701 571 702 572 update_turbo_state(); 703 573 if (limits->turbo_disabled) { 704 - pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 574 + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 705 575 return -EPERM; 706 576 } 707 577 ··· 730 600 limits->max_perf_pct); 731 601 limits->max_perf_pct = max(limits->min_perf_pct, 732 602 limits->max_perf_pct); 733 - limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 734 - int_tofp(100)); 603 + limits->max_perf = div_fp(limits->max_perf_pct, 100); 735 604 736 605 if (hwp_active) 737 606 intel_pstate_hwp_set_online_cpus(); ··· 754 625 limits->min_perf_pct); 755 626 limits->min_perf_pct = min(limits->max_perf_pct, 756 627 limits->min_perf_pct); 757 - limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 758 - int_tofp(100)); 628 + limits->min_perf = div_fp(limits->min_perf_pct, 100); 759 629 760 630 if (hwp_active) 761 631 intel_pstate_hwp_set_online_cpus(); ··· 1139 1011 struct sample *sample = &cpu->sample; 1140 1012 int64_t core_pct; 1141 1013 1142 - core_pct = int_tofp(sample->aperf) * int_tofp(100); 1143 - core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 1014 + core_pct = sample->aperf * int_tofp(100); 1015 + core_pct = div64_u64(core_pct, sample->mperf); 1144 1016 1145 1017 sample->core_pct_busy = (int32_t)core_pct; 1146 1018 } ··· 1189 1061 cpu->pstate.scaling, cpu->sample.mperf); 1190 1062 } 1191 1063 1064 + static inline int32_t get_avg_pstate(struct cpudata *cpu) 1065 + { 1066 + return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf, 1067 + cpu->sample.mperf); 1068 + } 1069 + 1192 1070 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1193 1071 { 1194 1072 struct sample *sample = &cpu->sample; ··· 1227 1093 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 1228 1094 cpu->sample.busy_scaled = cpu_load; 1229 1095 1230 - return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load); 1096 + return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load); 1231 1097 } 1232 1098 1233 1099 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) ··· 1249 1115 * specified pstate. 1250 1116 */ 1251 1117 core_busy = cpu->sample.core_pct_busy; 1252 - max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 1253 - current_pstate = int_tofp(cpu->pstate.current_pstate); 1118 + max_pstate = cpu->pstate.max_pstate_physical; 1119 + current_pstate = cpu->pstate.current_pstate; 1254 1120 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 1255 1121 1256 1122 /* ··· 1261 1127 */ 1262 1128 duration_ns = cpu->sample.time - cpu->last_sample_time; 1263 1129 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1264 - sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1265 - int_tofp(duration_ns)); 1130 + sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1266 1131 core_busy = mul_fp(core_busy, sample_ratio); 1267 1132 } else { 1268 1133 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); ··· 1379 1246 1380 1247 intel_pstate_busy_pid_reset(cpu); 1381 1248 1382 - cpu->update_util.func = intel_pstate_update_util; 1383 - 1384 - pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1249 + pr_debug("controlling: cpu %d\n", cpunum); 1385 1250 1386 1251 return 0; 1387 1252 } ··· 1402 1271 1403 1272 /* Prevent intel_pstate_update_util() from using stale data. */ 1404 1273 cpu->sample.time = 0; 1405 - cpufreq_set_update_util_data(cpu_num, &cpu->update_util); 1274 + cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1275 + intel_pstate_update_util); 1406 1276 } 1407 1277 1408 1278 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1409 1279 { 1410 - cpufreq_set_update_util_data(cpu, NULL); 1280 + cpufreq_remove_update_util_hook(cpu); 1411 1281 synchronize_sched(); 1412 1282 } 1413 1283 ··· 1428 1296 1429 1297 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1430 1298 { 1299 + struct cpudata *cpu; 1300 + 1431 1301 if (!policy->cpuinfo.max_freq) 1432 1302 return -ENODEV; 1433 1303 1434 1304 intel_pstate_clear_update_util_hook(policy->cpu); 1435 1305 1306 + cpu = all_cpu_data[0]; 1307 + if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) { 1308 + if (policy->max < policy->cpuinfo.max_freq && 1309 + policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1310 + pr_debug("policy->max > max non turbo frequency\n"); 1311 + policy->max = policy->cpuinfo.max_freq; 1312 + } 1313 + } 1314 + 1436 1315 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1437 1316 limits = &performance_limits; 1438 1317 if (policy->max >= policy->cpuinfo.max_freq) { 1439 - pr_debug("intel_pstate: set performance\n"); 1318 + pr_debug("set performance\n"); 1440 1319 intel_pstate_set_performance_limits(limits); 1441 1320 goto out; 1442 1321 } 1443 1322 } else { 1444 - pr_debug("intel_pstate: set powersave\n"); 1323 + pr_debug("set powersave\n"); 1445 1324 limits = &powersave_limits; 1446 1325 } 1447 1326 ··· 1476 1333 /* Make sure min_perf_pct <= max_perf_pct */ 1477 1334 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1478 1335 1479 - limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1480 - int_tofp(100)); 1481 - limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1482 - int_tofp(100)); 1336 + limits->min_perf = div_fp(limits->min_perf_pct, 100); 1337 + limits->max_perf = div_fp(limits->max_perf_pct, 100); 1483 1338 1484 1339 out: 1485 1340 intel_pstate_set_update_util_hook(policy->cpu); ··· 1504 1363 int cpu_num = policy->cpu; 1505 1364 struct cpudata *cpu = all_cpu_data[cpu_num]; 1506 1365 1507 - pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1366 + pr_debug("CPU %d exiting\n", cpu_num); 1508 1367 1509 1368 intel_pstate_clear_update_util_hook(cpu_num); 1510 1369 ··· 1537 1396 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1538 1397 policy->cpuinfo.max_freq = 1539 1398 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1399 + intel_pstate_init_acpi_perf_limits(policy); 1540 1400 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1541 1401 cpumask_set_cpu(policy->cpu, policy->cpus); 1402 + 1403 + return 0; 1404 + } 1405 + 1406 + static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1407 + { 1408 + intel_pstate_exit_perf_limits(policy); 1542 1409 1543 1410 return 0; 1544 1411 } ··· 1557 1408 .setpolicy = intel_pstate_set_policy, 1558 1409 .get = intel_pstate_get, 1559 1410 .init = intel_pstate_cpu_init, 1411 + .exit = intel_pstate_cpu_exit, 1560 1412 .stop_cpu = intel_pstate_stop_cpu, 1561 1413 .name = "intel_pstate", 1562 1414 }; ··· 1601 1451 1602 1452 } 1603 1453 1604 - #if IS_ENABLED(CONFIG_ACPI) 1605 - #include <acpi/processor.h> 1454 + #ifdef CONFIG_ACPI 1606 1455 1607 1456 static bool intel_pstate_no_acpi_pss(void) 1608 1457 { ··· 1757 1608 if (intel_pstate_platform_pwr_mgmt_exists()) 1758 1609 return -ENODEV; 1759 1610 1760 - pr_info("Intel P-state driver initializing.\n"); 1611 + pr_info("Intel P-state driver initializing\n"); 1761 1612 1762 1613 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1763 1614 if (!all_cpu_data) ··· 1774 1625 intel_pstate_sysfs_expose_params(); 1775 1626 1776 1627 if (hwp_active) 1777 - pr_info("intel_pstate: HWP enabled\n"); 1628 + pr_info("HWP enabled\n"); 1778 1629 1779 1630 return rc; 1780 1631 out: ··· 1800 1651 if (!strcmp(str, "disable")) 1801 1652 no_load = 1; 1802 1653 if (!strcmp(str, "no_hwp")) { 1803 - pr_info("intel_pstate: HWP disabled\n"); 1654 + pr_info("HWP disabled\n"); 1804 1655 no_hwp = 1; 1805 1656 } 1806 1657 if (!strcmp(str, "force")) 1807 1658 force_load = 1; 1808 1659 if (!strcmp(str, "hwp_only")) 1809 1660 hwp_only = 1; 1661 + 1662 + #ifdef CONFIG_ACPI 1663 + if (!strcmp(str, "support_acpi_ppc")) 1664 + acpi_ppc = true; 1665 + #endif 1666 + 1810 1667 return 0; 1811 1668 } 1812 1669 early_param("intel_pstate", intel_pstate_setup);
+35 -49
drivers/cpufreq/longhaul.c
··· 21 21 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* 22 22 */ 23 23 24 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 + 24 26 #include <linux/kernel.h> 25 27 #include <linux/module.h> 26 28 #include <linux/moduleparam.h> ··· 41 39 #include <acpi/processor.h> 42 40 43 41 #include "longhaul.h" 44 - 45 - #define PFX "longhaul: " 46 42 47 43 #define TYPE_LONGHAUL_V1 1 48 44 #define TYPE_LONGHAUL_V2 2 ··· 347 347 freqs.new = calc_speed(longhaul_get_cpu_mult()); 348 348 /* Check if requested frequency is set. */ 349 349 if (unlikely(freqs.new != speed)) { 350 - printk(KERN_INFO PFX "Failed to set requested frequency!\n"); 350 + pr_info("Failed to set requested frequency!\n"); 351 351 /* Revision ID = 1 but processor is expecting revision key 352 352 * equal to 0. Jumpers at the bottom of processor will change 353 353 * multiplier and FSB, but will not change bits in Longhaul 354 354 * MSR nor enable voltage scaling. */ 355 355 if (!revid_errata) { 356 - printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" " 357 - "option.\n"); 356 + pr_info("Enabling \"Ignore Revision ID\" option\n"); 358 357 revid_errata = 1; 359 358 msleep(200); 360 359 goto retry_loop; ··· 363 364 * but it doesn't change frequency. I tried poking various 364 365 * bits in northbridge registers, but without success. */ 365 366 if (longhaul_flags & USE_ACPI_C3) { 366 - printk(KERN_INFO PFX "Disabling ACPI C3 support.\n"); 367 + pr_info("Disabling ACPI C3 support\n"); 367 368 longhaul_flags &= ~USE_ACPI_C3; 368 369 if (revid_errata) { 369 - printk(KERN_INFO PFX "Disabling \"Ignore " 370 - "Revision ID\" option.\n"); 370 + pr_info("Disabling \"Ignore Revision ID\" option\n"); 371 371 revid_errata = 0; 372 372 } 373 373 msleep(200); ··· 377 379 * RevID = 1. RevID errata will make things right. Just 378 380 * to be 100% sure. */ 379 381 if (longhaul_version == TYPE_LONGHAUL_V2) { 380 - printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n"); 382 + pr_info("Switching to Longhaul ver. 1\n"); 381 383 longhaul_version = TYPE_LONGHAUL_V1; 382 384 msleep(200); 383 385 goto retry_loop; ··· 385 387 } 386 388 387 389 if (!bm_timeout) { 388 - printk(KERN_INFO PFX "Warning: Timeout while waiting for " 389 - "idle PCI bus.\n"); 390 + pr_info("Warning: Timeout while waiting for idle PCI bus\n"); 390 391 return -EBUSY; 391 392 } 392 393 ··· 430 433 /* Get current frequency */ 431 434 mult = longhaul_get_cpu_mult(); 432 435 if (mult == -1) { 433 - printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); 436 + pr_info("Invalid (reserved) multiplier!\n"); 434 437 return -EINVAL; 435 438 } 436 439 fsb = guess_fsb(mult); 437 440 if (fsb == 0) { 438 - printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); 441 + pr_info("Invalid (reserved) FSB!\n"); 439 442 return -EINVAL; 440 443 } 441 444 /* Get max multiplier - as we always did. ··· 465 468 print_speed(highest_speed/1000)); 466 469 467 470 if (lowest_speed == highest_speed) { 468 - printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n"); 471 + pr_info("highestspeed == lowest, aborting\n"); 469 472 return -EINVAL; 470 473 } 471 474 if (lowest_speed > highest_speed) { 472 - printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", 475 + pr_info("nonsense! lowest (%d > %d) !\n", 473 476 lowest_speed, highest_speed); 474 477 return -EINVAL; 475 478 } ··· 535 538 536 539 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 537 540 if (!(longhaul.bits.RevisionID & 1)) { 538 - printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n"); 541 + pr_info("Voltage scaling not supported by CPU\n"); 539 542 return; 540 543 } 541 544 542 545 if (!longhaul.bits.VRMRev) { 543 - printk(KERN_INFO PFX "VRM 8.5\n"); 546 + pr_info("VRM 8.5\n"); 544 547 vrm_mV_table = &vrm85_mV[0]; 545 548 mV_vrm_table = &mV_vrm85[0]; 546 549 } else { 547 - printk(KERN_INFO PFX "Mobile VRM\n"); 550 + pr_info("Mobile VRM\n"); 548 551 if (cpu_model < CPU_NEHEMIAH) 549 552 return; 550 553 vrm_mV_table = &mobilevrm_mV[0]; ··· 555 558 maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; 556 559 557 560 if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { 558 - printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " 559 - "Voltage scaling disabled.\n", 560 - minvid.mV/1000, minvid.mV%1000, 561 - maxvid.mV/1000, maxvid.mV%1000); 561 + pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n", 562 + minvid.mV/1000, minvid.mV%1000, 563 + maxvid.mV/1000, maxvid.mV%1000); 562 564 return; 563 565 } 564 566 565 567 if (minvid.mV == maxvid.mV) { 566 - printk(KERN_INFO PFX "Claims to support voltage scaling but " 567 - "min & max are both %d.%03d. " 568 - "Voltage scaling disabled\n", 569 - maxvid.mV/1000, maxvid.mV%1000); 568 + pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n", 569 + maxvid.mV/1000, maxvid.mV%1000); 570 570 return; 571 571 } 572 572 573 573 /* How many voltage steps*/ 574 574 numvscales = maxvid.pos - minvid.pos + 1; 575 - printk(KERN_INFO PFX 576 - "Max VID=%d.%03d " 577 - "Min VID=%d.%03d, " 578 - "%d possible voltage scales\n", 575 + pr_info("Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n", 579 576 maxvid.mV/1000, maxvid.mV%1000, 580 577 minvid.mV/1000, minvid.mV%1000, 581 578 numvscales); ··· 608 617 pos = minvid.pos; 609 618 freq_pos->driver_data |= mV_vrm_table[pos] << 8; 610 619 vid = vrm_mV_table[mV_vrm_table[pos]]; 611 - printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", 620 + pr_info("f: %d kHz, index: %d, vid: %d mV\n", 612 621 speed, (int)(freq_pos - longhaul_table), vid.mV); 613 622 } 614 623 615 624 can_scale_voltage = 1; 616 - printk(KERN_INFO PFX "Voltage scaling enabled.\n"); 625 + pr_info("Voltage scaling enabled\n"); 617 626 } 618 627 619 628 ··· 711 720 pci_write_config_byte(dev, reg, pci_cmd); 712 721 pci_read_config_byte(dev, reg, &pci_cmd); 713 722 if (!(pci_cmd & 1<<7)) { 714 - printk(KERN_ERR PFX 715 - "Can't enable access to port 0x22.\n"); 723 + pr_err("Can't enable access to port 0x22\n"); 716 724 status = 0; 717 725 } 718 726 } ··· 748 758 if (pci_cmd & 1 << 7) { 749 759 pci_read_config_dword(dev, 0x88, &acpi_regs_addr); 750 760 acpi_regs_addr &= 0xff00; 751 - printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", 752 - acpi_regs_addr); 761 + pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr); 753 762 } 754 763 755 764 pci_dev_put(dev); ··· 842 853 longhaul_version = TYPE_LONGHAUL_V1; 843 854 } 844 855 845 - printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname); 856 + pr_info("VIA %s CPU detected. ", cpuname); 846 857 switch (longhaul_version) { 847 858 case TYPE_LONGHAUL_V1: 848 859 case TYPE_LONGHAUL_V2: 849 - printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version); 860 + pr_cont("Longhaul v%d supported\n", longhaul_version); 850 861 break; 851 862 case TYPE_POWERSAVER: 852 - printk(KERN_CONT "Powersaver supported.\n"); 863 + pr_cont("Powersaver supported\n"); 853 864 break; 854 865 }; 855 866 ··· 878 889 if (!(longhaul_flags & USE_ACPI_C3 879 890 || longhaul_flags & USE_NORTHBRIDGE) 880 891 && ((pr == NULL) || !(pr->flags.bm_control))) { 881 - printk(KERN_ERR PFX 882 - "No ACPI support. Unsupported northbridge.\n"); 892 + pr_err("No ACPI support: Unsupported northbridge\n"); 883 893 return -ENODEV; 884 894 } 885 895 886 896 if (longhaul_flags & USE_NORTHBRIDGE) 887 - printk(KERN_INFO PFX "Using northbridge support.\n"); 897 + pr_info("Using northbridge support\n"); 888 898 if (longhaul_flags & USE_ACPI_C3) 889 - printk(KERN_INFO PFX "Using ACPI support.\n"); 899 + pr_info("Using ACPI support\n"); 890 900 891 901 ret = longhaul_get_ranges(); 892 902 if (ret != 0) ··· 922 934 return -ENODEV; 923 935 924 936 if (!enable) { 925 - printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n"); 937 + pr_err("Option \"enable\" not set - Aborting\n"); 926 938 return -ENODEV; 927 939 } 928 940 #ifdef CONFIG_SMP 929 941 if (num_online_cpus() > 1) { 930 - printk(KERN_ERR PFX "More than 1 CPU detected, " 931 - "longhaul disabled.\n"); 942 + pr_err("More than 1 CPU detected, longhaul disabled\n"); 932 943 return -ENODEV; 933 944 } 934 945 #endif 935 946 #ifdef CONFIG_X86_IO_APIC 936 947 if (cpu_has_apic) { 937 - printk(KERN_ERR PFX "APIC detected. Longhaul is currently " 938 - "broken in this configuration.\n"); 948 + pr_err("APIC detected. Longhaul is currently broken in this configuration.\n"); 939 949 return -ENODEV; 940 950 } 941 951 #endif ··· 941 955 case 6 ... 9: 942 956 return cpufreq_register_driver(&longhaul_driver); 943 957 case 10: 944 - printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); 958 + pr_err("Use acpi-cpufreq driver for VIA C7\n"); 945 959 default: 946 960 ; 947 961 }
+5 -2
drivers/cpufreq/loongson2_cpufreq.c
··· 10 10 * License. See the file "COPYING" in the main directory of this archive 11 11 * for more details. 12 12 */ 13 + 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 13 16 #include <linux/cpufreq.h> 14 17 #include <linux/module.h> 15 18 #include <linux/err.h> ··· 79 76 80 77 cpuclk = clk_get(NULL, "cpu_clk"); 81 78 if (IS_ERR(cpuclk)) { 82 - printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); 79 + pr_err("couldn't get CPU clk\n"); 83 80 return PTR_ERR(cpuclk); 84 81 } 85 82 ··· 166 163 if (ret) 167 164 return ret; 168 165 169 - pr_info("cpufreq: Loongson-2F CPU frequency driver.\n"); 166 + pr_info("Loongson-2F CPU frequency driver\n"); 170 167 171 168 cpufreq_register_notifier(&loongson2_cpufreq_notifier_block, 172 169 CPUFREQ_TRANSITION_NOTIFIER);
+6 -5
drivers/cpufreq/maple-cpufreq.c
··· 13 13 14 14 #undef DEBUG 15 15 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 16 18 #include <linux/module.h> 17 19 #include <linux/types.h> 18 20 #include <linux/errno.h> ··· 176 174 /* Get first CPU node */ 177 175 cpunode = of_cpu_device_node_get(0); 178 176 if (cpunode == NULL) { 179 - printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n"); 177 + pr_err("Can't find any CPU 0 node\n"); 180 178 goto bail_noprops; 181 179 } 182 180 ··· 184 182 /* we actually don't care on which CPU to access PVR */ 185 183 pvr_hi = PVR_VER(mfspr(SPRN_PVR)); 186 184 if (pvr_hi != 0x3c && pvr_hi != 0x44) { 187 - printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n", 188 - pvr_hi); 185 + pr_err("Unsupported CPU version (%x)\n", pvr_hi); 189 186 goto bail_noprops; 190 187 } 191 188 ··· 223 222 maple_pmode_cur = -1; 224 223 maple_scom_switch_freq(maple_scom_query_freq()); 225 224 226 - printk(KERN_INFO "Registering Maple CPU frequency driver\n"); 227 - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 225 + pr_info("Registering Maple CPU frequency driver\n"); 226 + pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 228 227 maple_cpu_freqs[1].frequency/1000, 229 228 maple_cpu_freqs[0].frequency/1000, 230 229 maple_cpu_freqs[maple_pmode_cur].frequency/1000);
+4 -10
drivers/cpufreq/mt8173-cpufreq.c
··· 59 59 static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) 60 60 { 61 61 struct mtk_cpu_dvfs_info *info; 62 - struct list_head *list; 63 62 64 - list_for_each(list, &dvfs_info_list) { 65 - info = list_entry(list, struct mtk_cpu_dvfs_info, list_head); 66 - 63 + list_for_each_entry(info, &dvfs_info_list, list_head) { 67 64 if (cpumask_test_cpu(cpu, &info->cpus)) 68 65 return info; 69 66 } ··· 521 524 522 525 static int mt8173_cpufreq_probe(struct platform_device *pdev) 523 526 { 524 - struct mtk_cpu_dvfs_info *info; 525 - struct list_head *list, *tmp; 527 + struct mtk_cpu_dvfs_info *info, *tmp; 526 528 int cpu, ret; 527 529 528 530 for_each_possible_cpu(cpu) { ··· 555 559 return 0; 556 560 557 561 release_dvfs_info_list: 558 - list_for_each_safe(list, tmp, &dvfs_info_list) { 559 - info = list_entry(list, struct mtk_cpu_dvfs_info, list_head); 560 - 562 + list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) { 561 563 mtk_cpu_dvfs_info_release(info); 562 - list_del(list); 564 + list_del(&info->list_head); 563 565 } 564 566 565 567 return ret;
+5 -2
drivers/cpufreq/omap-cpufreq.c
··· 13 13 * it under the terms of the GNU General Public License version 2 as 14 14 * published by the Free Software Foundation. 15 15 */ 16 + 17 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 + 16 19 #include <linux/types.h> 17 20 #include <linux/kernel.h> 18 21 #include <linux/sched.h> ··· 166 163 { 167 164 mpu_dev = get_cpu_device(0); 168 165 if (!mpu_dev) { 169 - pr_warning("%s: unable to get the mpu device\n", __func__); 166 + pr_warn("%s: unable to get the MPU device\n", __func__); 170 167 return -EINVAL; 171 168 } 172 169 173 170 mpu_reg = regulator_get(mpu_dev, "vcc"); 174 171 if (IS_ERR(mpu_reg)) { 175 - pr_warning("%s: unable to get MPU regulator\n", __func__); 172 + pr_warn("%s: unable to get MPU regulator\n", __func__); 176 173 mpu_reg = NULL; 177 174 } else { 178 175 /*
+5 -14
drivers/cpufreq/p4-clockmod.c
··· 20 20 * 21 21 */ 22 22 23 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 + 23 25 #include <linux/kernel.h> 24 26 #include <linux/module.h> 25 27 #include <linux/init.h> ··· 36 34 #include <asm/cpu_device_id.h> 37 35 38 36 #include "speedstep-lib.h" 39 - 40 - #define PFX "p4-clockmod: " 41 37 42 38 /* 43 39 * Duty Cycle (3bits), note DC_DISABLE is not specified in ··· 124 124 { 125 125 if (c->x86 == 0x06) { 126 126 if (cpu_has(c, X86_FEATURE_EST)) 127 - printk_once(KERN_WARNING PFX "Warning: EST-capable " 128 - "CPU detected. The acpi-cpufreq module offers " 129 - "voltage scaling in addition to frequency " 130 - "scaling. You should use that instead of " 131 - "p4-clockmod, if possible.\n"); 127 + pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n"); 132 128 switch (c->x86_model) { 133 129 case 0x0E: /* Core */ 134 130 case 0x0F: /* Core Duo */ ··· 148 152 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 149 153 150 154 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { 151 - printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " 152 - "The speedstep-ich or acpi cpufreq modules offer " 153 - "voltage scaling in addition of frequency scaling. " 154 - "You should use either one instead of p4-clockmod, " 155 - "if possible.\n"); 155 + pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n"); 156 156 return speedstep_get_frequency(SPEEDSTEP_CPU_P4M); 157 157 } 158 158 ··· 257 265 258 266 ret = cpufreq_register_driver(&p4clockmod_driver); 259 267 if (!ret) 260 - printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock " 261 - "Modulation available\n"); 268 + pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); 262 269 263 270 return ret; 264 271 }
+8 -6
drivers/cpufreq/pmac32-cpufreq.c
··· 13 13 * 14 14 */ 15 15 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 16 18 #include <linux/module.h> 17 19 #include <linux/types.h> 18 20 #include <linux/errno.h> ··· 483 481 freqs = of_get_property(cpunode, "bus-frequencies", &lenp); 484 482 lenp /= sizeof(u32); 485 483 if (freqs == NULL || lenp != 2) { 486 - printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n"); 484 + pr_err("bus-frequencies incorrect or missing\n"); 487 485 return 1; 488 486 } 489 487 ratio = of_get_property(cpunode, "processor-to-bus-ratio*2", 490 488 NULL); 491 489 if (ratio == NULL) { 492 - printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n"); 490 + pr_err("processor-to-bus-ratio*2 missing\n"); 493 491 return 1; 494 492 } 495 493 ··· 552 550 if (volt_gpio_np) 553 551 voltage_gpio = read_gpio(volt_gpio_np); 554 552 if (!voltage_gpio){ 555 - printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n"); 553 + pr_err("missing cpu-vcore-select gpio\n"); 556 554 return 1; 557 555 } 558 556 ··· 677 675 pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq; 678 676 ppc_proc_freq = cur_freq * 1000ul; 679 677 680 - printk(KERN_INFO "Registering PowerMac CPU frequency driver\n"); 681 - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n", 682 - low_freq/1000, hi_freq/1000, cur_freq/1000); 678 + pr_info("Registering PowerMac CPU frequency driver\n"); 679 + pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n", 680 + low_freq/1000, hi_freq/1000, cur_freq/1000); 683 681 684 682 return cpufreq_register_driver(&pmac_cpufreq_driver); 685 683 }
+23 -24
drivers/cpufreq/pmac64-cpufreq.c
··· 12 12 13 13 #undef DEBUG 14 14 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 + 15 17 #include <linux/module.h> 16 18 #include <linux/types.h> 17 19 #include <linux/errno.h> ··· 140 138 usleep_range(1000, 1000); 141 139 } 142 140 if (done == 0) 143 - printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); 141 + pr_warn("Timeout in clock slewing !\n"); 144 142 } 145 143 146 144 ··· 268 266 rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL); 269 267 270 268 if (rc) 271 - printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc); 269 + pr_warn("pfunc switch error %d\n", rc); 272 270 273 271 /* It's an irq GPIO so we should be able to just block here, 274 272 * I'll do that later after I've properly tested the IRQ code for ··· 284 282 usleep_range(500, 500); 285 283 } 286 284 if (done == 0) 287 - printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); 285 + pr_warn("Timeout in clock slewing !\n"); 288 286 289 287 /* If frequency is going down, last ramp the voltage */ 290 288 if (speed_mode > g5_pmode_cur) ··· 370 368 } 371 369 pvr_hi = (*valp) >> 16; 372 370 if (pvr_hi != 0x3c && pvr_hi != 0x44) { 373 - printk(KERN_ERR "cpufreq: Unsupported CPU version\n"); 371 + pr_err("Unsupported CPU version\n"); 374 372 goto bail_noprops; 375 373 } 376 374 ··· 405 403 406 404 root = of_find_node_by_path("/"); 407 405 if (root == NULL) { 408 - printk(KERN_ERR "cpufreq: Can't find root of " 409 - "device tree\n"); 406 + pr_err("Can't find root of device tree\n"); 410 407 goto bail_noprops; 411 408 } 412 409 pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); ··· 413 412 pmf_find_function(root, "slewing-done"); 414 413 if (pfunc_set_vdnap0 == NULL || 415 414 pfunc_vdnap0_complete == NULL) { 416 - printk(KERN_ERR "cpufreq: Can't find required " 417 - "platform function\n"); 415 + pr_err("Can't find required platform function\n"); 418 416 goto bail_noprops; 419 417 } 420 418 ··· 453 453 g5_pmode_cur = -1; 454 454 g5_switch_freq(g5_query_freq()); 455 455 456 - printk(KERN_INFO "Registering G5 CPU frequency driver\n"); 457 - printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n", 458 - freq_method, volt_method); 459 - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 456 + pr_info("Registering G5 CPU frequency driver\n"); 457 + pr_info("Frequency method: %s, Voltage method: %s\n", 458 + freq_method, volt_method); 459 + pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 460 460 g5_cpu_freqs[1].frequency/1000, 461 461 g5_cpu_freqs[0].frequency/1000, 462 462 g5_cpu_freqs[g5_pmode_cur].frequency/1000); ··· 493 493 if (cpuid != NULL) 494 494 eeprom = of_get_property(cpuid, "cpuid", NULL); 495 495 if (eeprom == NULL) { 496 - printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); 496 + pr_err("Can't find cpuid EEPROM !\n"); 497 497 rc = -ENODEV; 498 498 goto bail; 499 499 } ··· 511 511 break; 512 512 } 513 513 if (hwclock == NULL) { 514 - printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n"); 514 + pr_err("Can't find i2c clock chip !\n"); 515 515 rc = -ENODEV; 516 516 goto bail; 517 517 } ··· 539 539 /* Check we have minimum requirements */ 540 540 if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL || 541 541 pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) { 542 - printk(KERN_ERR "cpufreq: Can't find platform functions !\n"); 542 + pr_err("Can't find platform functions !\n"); 543 543 rc = -ENODEV; 544 544 goto bail; 545 545 } ··· 567 567 /* Get max frequency from device-tree */ 568 568 valp = of_get_property(cpunode, "clock-frequency", NULL); 569 569 if (!valp) { 570 - printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); 570 + pr_err("Can't find CPU frequency !\n"); 571 571 rc = -ENODEV; 572 572 goto bail; 573 573 } ··· 583 583 584 584 /* Check for machines with no useful settings */ 585 585 if (il == ih) { 586 - printk(KERN_WARNING "cpufreq: No low frequency mode available" 587 - " on this model !\n"); 586 + pr_warn("No low frequency mode available on this model !\n"); 588 587 rc = -ENODEV; 589 588 goto bail; 590 589 } ··· 594 595 595 596 /* Sanity check */ 596 597 if (min_freq >= max_freq || min_freq < 1000) { 597 - printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n"); 598 + pr_err("Can't calculate low frequency !\n"); 598 599 rc = -ENXIO; 599 600 goto bail; 600 601 } ··· 618 619 g5_pmode_cur = -1; 619 620 g5_switch_freq(g5_query_freq()); 620 621 621 - printk(KERN_INFO "Registering G5 CPU frequency driver\n"); 622 - printk(KERN_INFO "Frequency method: i2c/pfunc, " 623 - "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none"); 624 - printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 622 + pr_info("Registering G5 CPU frequency driver\n"); 623 + pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n", 624 + has_volt ? "i2c/pfunc" : "none"); 625 + pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 625 626 g5_cpu_freqs[1].frequency/1000, 626 627 g5_cpu_freqs[0].frequency/1000, 627 628 g5_cpu_freqs[g5_pmode_cur].frequency/1000); ··· 653 654 /* Get first CPU node */ 654 655 cpunode = of_cpu_device_node_get(0); 655 656 if (cpunode == NULL) { 656 - pr_err("cpufreq: Can't find any CPU node\n"); 657 + pr_err("Can't find any CPU node\n"); 657 658 return -ENODEV; 658 659 } 659 660
+9 -7
drivers/cpufreq/powernow-k6.c
··· 8 8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* 9 9 */ 10 10 11 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 + 11 13 #include <linux/kernel.h> 12 14 #include <linux/module.h> 13 15 #include <linux/init.h> ··· 24 22 #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long 25 23 as it is unused */ 26 24 27 - #define PFX "powernow-k6: " 28 25 static unsigned int busfreq; /* FSB, in 10 kHz */ 29 26 static unsigned int max_multiplier; 30 27 ··· 142 141 { 143 142 144 143 if (clock_ratio[best_i].driver_data > max_multiplier) { 145 - printk(KERN_ERR PFX "invalid target frequency\n"); 144 + pr_err("invalid target frequency\n"); 146 145 return -EINVAL; 147 146 } 148 147 ··· 176 175 max_multiplier = param_max_multiplier; 177 176 goto have_max_multiplier; 178 177 } 179 - printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); 178 + pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); 180 179 return -EINVAL; 181 180 } 182 181 183 182 if (!max_multiplier) { 184 - printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz); 185 - printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n"); 183 + pr_warn("unknown frequency %u, cannot determine current multiplier\n", 184 + khz); 185 + pr_warn("use module parameters max_multiplier and bus_frequency\n"); 186 186 return -EOPNOTSUPP; 187 187 } 188 188 ··· 195 193 busfreq = param_busfreq / 10; 196 194 goto have_busfreq; 197 195 } 198 - printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n"); 196 + pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n"); 199 197 return -EINVAL; 200 198 } 201 199 ··· 277 275 return -ENODEV; 278 276 279 277 if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { 280 - printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n"); 278 + pr_info("PowerNow IOPORT region already used\n"); 281 279 return -EIO; 282 280 } 283 281
+28 -42
drivers/cpufreq/powernow-k7.c
··· 13 13 * - We disable half multipliers if ACPI is used on A0 stepping CPUs. 14 14 */ 15 15 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 16 18 #include <linux/kernel.h> 17 19 #include <linux/module.h> 18 20 #include <linux/moduleparam.h> ··· 36 34 #endif 37 35 38 36 #include "powernow-k7.h" 39 - 40 - #define PFX "powernow: " 41 - 42 37 43 38 struct psb_s { 44 39 u8 signature[10]; ··· 126 127 maxei = cpuid_eax(0x80000000); 127 128 if (maxei < 0x80000007) { /* Any powernow info ? */ 128 129 #ifdef MODULE 129 - printk(KERN_INFO PFX "No powernow capabilities detected\n"); 130 + pr_info("No powernow capabilities detected\n"); 130 131 #endif 131 132 return 0; 132 133 } 133 134 134 135 if ((c->x86_model == 6) && (c->x86_mask == 0)) { 135 - printk(KERN_INFO PFX "K7 660[A0] core detected, " 136 - "enabling errata workarounds\n"); 136 + pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); 137 137 have_a0 = 1; 138 138 } 139 139 ··· 142 144 if (!(edx & (1 << 1 | 1 << 2))) 143 145 return 0; 144 146 145 - printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); 147 + pr_info("PowerNOW! Technology present. Can scale: "); 146 148 147 149 if (edx & 1 << 1) { 148 - printk("frequency"); 150 + pr_cont("frequency"); 149 151 can_scale_bus = 1; 150 152 } 151 153 152 154 if ((edx & (1 << 1 | 1 << 2)) == 0x6) 153 - printk(" and "); 155 + pr_cont(" and "); 154 156 155 157 if (edx & 1 << 2) { 156 - printk("voltage"); 158 + pr_cont("voltage"); 157 159 can_scale_vid = 1; 158 160 } 159 161 160 - printk(".\n"); 162 + pr_cont("\n"); 161 163 return 1; 162 164 } 163 165 ··· 425 427 err05: 426 428 kfree(acpi_processor_perf); 427 429 err0: 428 - printk(KERN_WARNING PFX "ACPI perflib can not be used on " 429 - "this platform\n"); 430 + pr_warn("ACPI perflib can not be used on this platform\n"); 430 431 acpi_processor_perf = NULL; 431 432 return retval; 432 433 } 433 434 #else 434 435 static int powernow_acpi_init(void) 435 436 { 436 - printk(KERN_INFO PFX "no support for ACPI processor found." 437 - " Please recompile your kernel with ACPI processor\n"); 437 + pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n"); 438 438 return -EINVAL; 439 439 } 440 440 #endif ··· 464 468 psb = (struct psb_s *) p; 465 469 pr_debug("Table version: 0x%x\n", psb->tableversion); 466 470 if (psb->tableversion != 0x12) { 467 - printk(KERN_INFO PFX "Sorry, only v1.2 tables" 468 - " supported right now\n"); 471 + pr_info("Sorry, only v1.2 tables supported right now\n"); 469 472 return -ENODEV; 470 473 } 471 474 ··· 476 481 477 482 latency = psb->settlingtime; 478 483 if (latency < 100) { 479 - printk(KERN_INFO PFX "BIOS set settling time " 480 - "to %d microseconds. " 481 - "Should be at least 100. " 482 - "Correcting.\n", latency); 484 + pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n", 485 + latency); 483 486 latency = 100; 484 487 } 485 488 pr_debug("Settling Time: %d microseconds.\n", ··· 509 516 p += 2; 510 517 } 511 518 } 512 - printk(KERN_INFO PFX "No PST tables match this cpuid " 513 - "(0x%x)\n", etuple); 514 - printk(KERN_INFO PFX "This is indicative of a broken " 515 - "BIOS.\n"); 519 + pr_info("No PST tables match this cpuid (0x%x)\n", 520 + etuple); 521 + pr_info("This is indicative of a broken BIOS\n"); 516 522 517 523 return -EINVAL; 518 524 } ··· 544 552 sgtc = 100 * m * latency; 545 553 sgtc = sgtc / 3; 546 554 if (sgtc > 0xfffff) { 547 - printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc); 555 + pr_warn("SGTC too large %d\n", sgtc); 548 556 sgtc = 0xfffff; 549 557 } 550 558 return sgtc; ··· 566 574 567 575 static int acer_cpufreq_pst(const struct dmi_system_id *d) 568 576 { 569 - printk(KERN_WARNING PFX 570 - "%s laptop with broken PST tables in BIOS detected.\n", 577 + pr_warn("%s laptop with broken PST tables in BIOS detected\n", 571 578 d->ident); 572 - printk(KERN_WARNING PFX 573 - "You need to downgrade to 3A21 (09/09/2002), or try a newer " 574 - "BIOS than 3A71 (01/20/2003)\n"); 575 - printk(KERN_WARNING PFX 576 - "cpufreq scaling has been disabled as a result of this.\n"); 579 + pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); 580 + pr_warn("cpufreq scaling has been disabled as a result of this\n"); 577 581 return 0; 578 582 } 579 583 ··· 604 616 605 617 fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID]; 606 618 if (!fsb) { 607 - printk(KERN_WARNING PFX "can not determine bus frequency\n"); 619 + pr_warn("can not determine bus frequency\n"); 608 620 return -EINVAL; 609 621 } 610 622 pr_debug("FSB: %3dMHz\n", fsb/1000); 611 623 612 624 if (dmi_check_system(powernow_dmi_table) || acpi_force) { 613 - printk(KERN_INFO PFX "PSB/PST known to be broken. " 614 - "Trying ACPI instead\n"); 625 + pr_info("PSB/PST known to be broken - trying ACPI instead\n"); 615 626 result = powernow_acpi_init(); 616 627 } else { 617 628 result = powernow_decode_bios(fidvidstatus.bits.MFID, 618 629 fidvidstatus.bits.SVID); 619 630 if (result) { 620 - printk(KERN_INFO PFX "Trying ACPI perflib\n"); 631 + pr_info("Trying ACPI perflib\n"); 621 632 maximum_speed = 0; 622 633 minimum_speed = -1; 623 634 latency = 0; 624 635 result = powernow_acpi_init(); 625 636 if (result) { 626 - printk(KERN_INFO PFX 627 - "ACPI and legacy methods failed\n"); 637 + pr_info("ACPI and legacy methods failed\n"); 628 638 } 629 639 } else { 630 640 /* SGTC use the bus clock as timer */ 631 641 latency = fixup_sgtc(); 632 - printk(KERN_INFO PFX "SGTC: %d\n", latency); 642 + pr_info("SGTC: %d\n", latency); 633 643 } 634 644 } 635 645 636 646 if (result) 637 647 return result; 638 648 639 - printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", 640 - minimum_speed/1000, maximum_speed/1000); 649 + pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n", 650 + minimum_speed/1000, maximum_speed/1000); 641 651 642 652 policy->cpuinfo.transition_latency = 643 653 cpufreq_scale(2000000UL, fsb, latency);
+256 -13
drivers/cpufreq/powernv-cpufreq.c
··· 36 36 #include <asm/reg.h> 37 37 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ 38 38 #include <asm/opal.h> 39 + #include <linux/timer.h> 39 40 40 41 #define POWERNV_MAX_PSTATES 256 41 42 #define PMSR_PSAFE_ENABLE (1UL << 30) 42 43 #define PMSR_SPR_EM_DISABLE (1UL << 31) 43 44 #define PMSR_MAX(x) ((x >> 32) & 0xFF) 45 + 46 + #define MAX_RAMP_DOWN_TIME 5120 47 + /* 48 + * On an idle system we want the global pstate to ramp-down from max value to 49 + * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and 50 + * then ramp-down rapidly later on. 51 + * 52 + * This gives a percentage rampdown for time elapsed in milliseconds. 53 + * ramp_down_percentage = ((ms * ms) >> 18) 54 + * ~= 3.8 * (sec * sec) 55 + * 56 + * At 0 ms ramp_down_percent = 0 57 + * At 5120 ms ramp_down_percent = 100 58 + */ 59 + #define ramp_down_percent(time) ((time * time) >> 18) 60 + 61 + /* Interval after which the timer is queued to bring down global pstate */ 62 + #define GPSTATE_TIMER_INTERVAL 2000 63 + 64 + /** 65 + * struct global_pstate_info - Per policy data structure to maintain history of 66 + * global pstates 67 + * @highest_lpstate: The local pstate from which we are ramping down 68 + * @elapsed_time: Time in ms spent in ramping down from 69 + * highest_lpstate 70 + * @last_sampled_time: Time from boot in ms when global pstates were 71 + * last set 72 + * @last_lpstate,last_gpstate: Last set values for local and global pstates 73 + * @timer: Is used for ramping down if cpu goes idle for 74 + * a long time with global pstate held high 75 + * @gpstate_lock: A spinlock to maintain synchronization between 76 + * routines called by the timer handler and 77 + * governer's target_index calls 78 + */ 79 + struct global_pstate_info { 80 + int highest_lpstate; 81 + unsigned int elapsed_time; 82 + unsigned int last_sampled_time; 83 + int last_lpstate; 84 + int last_gpstate; 85 + spinlock_t gpstate_lock; 86 + struct timer_list timer; 87 + }; 44 88 45 89 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; 46 90 static bool rebooting, throttled, occ_reset; ··· 137 93 int nominal; 138 94 int nr_pstates; 139 95 } powernv_pstate_info; 96 + 97 + static inline void reset_gpstates(struct cpufreq_policy *policy) 98 + { 99 + struct global_pstate_info *gpstates = policy->driver_data; 100 + 101 + gpstates->highest_lpstate = 0; 102 + gpstates->elapsed_time = 0; 103 + gpstates->last_sampled_time = 0; 104 + gpstates->last_lpstate = 0; 105 + gpstates->last_gpstate = 0; 106 + } 140 107 141 108 /* 142 109 * Initialize the freq table based on data obtained ··· 340 285 struct powernv_smp_call_data { 341 286 unsigned int freq; 342 287 int pstate_id; 288 + int gpstate_id; 343 289 }; 344 290 345 291 /* ··· 399 343 * (struct powernv_smp_call_data *) and the pstate_id which needs to be set 400 344 * on this CPU should be present in freq_data->pstate_id. 401 345 */ 402 - static void set_pstate(void *freq_data) 346 + static void set_pstate(void *data) 403 347 { 404 348 unsigned long val; 405 - unsigned long pstate_ul = 406 - ((struct powernv_smp_call_data *) freq_data)->pstate_id; 349 + struct powernv_smp_call_data *freq_data = data; 350 + unsigned long pstate_ul = freq_data->pstate_id; 351 + unsigned long gpstate_ul = freq_data->gpstate_id; 407 352 408 353 val = get_pmspr(SPRN_PMCR); 409 354 val = val & 0x0000FFFFFFFFFFFFULL; 410 355 411 356 pstate_ul = pstate_ul & 0xFF; 357 + gpstate_ul = gpstate_ul & 0xFF; 412 358 413 359 /* Set both global(bits 56..63) and local(bits 48..55) PStates */ 414 - val = val | (pstate_ul << 56) | (pstate_ul << 48); 360 + val = val | (gpstate_ul << 56) | (pstate_ul << 48); 415 361 416 362 pr_debug("Setting cpu %d pmcr to %016lX\n", 417 363 raw_smp_processor_id(), val); ··· 482 424 } 483 425 } 484 426 427 + /** 428 + * calc_global_pstate - Calculate global pstate 429 + * @elapsed_time: Elapsed time in milliseconds 430 + * @local_pstate: New local pstate 431 + * @highest_lpstate: pstate from which its ramping down 432 + * 433 + * Finds the appropriate global pstate based on the pstate from which its 434 + * ramping down and the time elapsed in ramping down. It follows a quadratic 435 + * equation which ensures that it reaches ramping down to pmin in 5sec. 436 + */ 437 + static inline int calc_global_pstate(unsigned int elapsed_time, 438 + int highest_lpstate, int local_pstate) 439 + { 440 + int pstate_diff; 441 + 442 + /* 443 + * Using ramp_down_percent we get the percentage of rampdown 444 + * that we are expecting to be dropping. Difference between 445 + * highest_lpstate and powernv_pstate_info.min will give a absolute 446 + * number of how many pstates we will drop eventually by the end of 447 + * 5 seconds, then just scale it get the number pstates to be dropped. 448 + */ 449 + pstate_diff = ((int)ramp_down_percent(elapsed_time) * 450 + (highest_lpstate - powernv_pstate_info.min)) / 100; 451 + 452 + /* Ensure that global pstate is >= to local pstate */ 453 + if (highest_lpstate - pstate_diff < local_pstate) 454 + return local_pstate; 455 + else 456 + return highest_lpstate - pstate_diff; 457 + } 458 + 459 + static inline void queue_gpstate_timer(struct global_pstate_info *gpstates) 460 + { 461 + unsigned int timer_interval; 462 + 463 + /* 464 + * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But 465 + * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time. 466 + * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME 467 + * seconds of ramp down time. 468 + */ 469 + if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL) 470 + > MAX_RAMP_DOWN_TIME) 471 + timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time; 472 + else 473 + timer_interval = GPSTATE_TIMER_INTERVAL; 474 + 475 + mod_timer_pinned(&gpstates->timer, jiffies + 476 + msecs_to_jiffies(timer_interval)); 477 + } 478 + 479 + /** 480 + * gpstate_timer_handler 481 + * 482 + * @data: pointer to cpufreq_policy on which timer was queued 483 + * 484 + * This handler brings down the global pstate closer to the local pstate 485 + * according quadratic equation. Queues a new timer if it is still not equal 486 + * to local pstate 487 + */ 488 + void gpstate_timer_handler(unsigned long data) 489 + { 490 + struct cpufreq_policy *policy = (struct cpufreq_policy *)data; 491 + struct global_pstate_info *gpstates = policy->driver_data; 492 + int gpstate_id; 493 + unsigned int time_diff = jiffies_to_msecs(jiffies) 494 + - gpstates->last_sampled_time; 495 + struct powernv_smp_call_data freq_data; 496 + 497 + if (!spin_trylock(&gpstates->gpstate_lock)) 498 + return; 499 + 500 + gpstates->last_sampled_time += time_diff; 501 + gpstates->elapsed_time += time_diff; 502 + freq_data.pstate_id = gpstates->last_lpstate; 503 + 504 + if ((gpstates->last_gpstate == freq_data.pstate_id) || 505 + (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) { 506 + gpstate_id = freq_data.pstate_id; 507 + reset_gpstates(policy); 508 + gpstates->highest_lpstate = freq_data.pstate_id; 509 + } else { 510 + gpstate_id = calc_global_pstate(gpstates->elapsed_time, 511 + gpstates->highest_lpstate, 512 + freq_data.pstate_id); 513 + } 514 + 515 + /* 516 + * If local pstate is equal to global pstate, rampdown is over 517 + * So timer is not required to be queued. 518 + */ 519 + if (gpstate_id != freq_data.pstate_id) 520 + queue_gpstate_timer(gpstates); 521 + 522 + freq_data.gpstate_id = gpstate_id; 523 + gpstates->last_gpstate = freq_data.gpstate_id; 524 + gpstates->last_lpstate = freq_data.pstate_id; 525 + 526 + /* Timer may get migrated to a different cpu on cpu hot unplug */ 527 + smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); 528 + spin_unlock(&gpstates->gpstate_lock); 529 + } 530 + 485 531 /* 486 532 * powernv_cpufreq_target_index: Sets the frequency corresponding to 487 533 * the cpufreq table entry indexed by new_index on the cpus in the ··· 595 433 unsigned int new_index) 596 434 { 597 435 struct powernv_smp_call_data freq_data; 436 + unsigned int cur_msec, gpstate_id; 437 + unsigned long flags; 438 + struct global_pstate_info *gpstates = policy->driver_data; 598 439 599 440 if (unlikely(rebooting) && new_index != get_nominal_index()) 600 441 return 0; ··· 605 440 if (!throttled) 606 441 powernv_cpufreq_throttle_check(NULL); 607 442 443 + cur_msec = jiffies_to_msecs(get_jiffies_64()); 444 + 445 + spin_lock_irqsave(&gpstates->gpstate_lock, flags); 608 446 freq_data.pstate_id = powernv_freqs[new_index].driver_data; 447 + 448 + if (!gpstates->last_sampled_time) { 449 + gpstate_id = freq_data.pstate_id; 450 + gpstates->highest_lpstate = freq_data.pstate_id; 451 + goto gpstates_done; 452 + } 453 + 454 + if (gpstates->last_gpstate > freq_data.pstate_id) { 455 + gpstates->elapsed_time += cur_msec - 456 + gpstates->last_sampled_time; 457 + 458 + /* 459 + * If its has been ramping down for more than MAX_RAMP_DOWN_TIME 460 + * we should be resetting all global pstate related data. Set it 461 + * equal to local pstate to start fresh. 462 + */ 463 + if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) { 464 + reset_gpstates(policy); 465 + gpstates->highest_lpstate = freq_data.pstate_id; 466 + gpstate_id = freq_data.pstate_id; 467 + } else { 468 + /* Elaspsed_time is less than 5 seconds, continue to rampdown */ 469 + gpstate_id = calc_global_pstate(gpstates->elapsed_time, 470 + gpstates->highest_lpstate, 471 + freq_data.pstate_id); 472 + } 473 + } else { 474 + reset_gpstates(policy); 475 + gpstates->highest_lpstate = freq_data.pstate_id; 476 + gpstate_id = freq_data.pstate_id; 477 + } 478 + 479 + /* 480 + * If local pstate is equal to global pstate, rampdown is over 481 + * So timer is not required to be queued. 482 + */ 483 + if (gpstate_id != freq_data.pstate_id) 484 + queue_gpstate_timer(gpstates); 485 + 486 + gpstates_done: 487 + freq_data.gpstate_id = gpstate_id; 488 + gpstates->last_sampled_time = cur_msec; 489 + gpstates->last_gpstate = freq_data.gpstate_id; 490 + gpstates->last_lpstate = freq_data.pstate_id; 609 491 610 492 /* 611 493 * Use smp_call_function to send IPI and execute the ··· 660 448 * if current CPU is within policy->cpus (core) 661 449 */ 662 450 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); 663 - 451 + spin_unlock_irqrestore(&gpstates->gpstate_lock, flags); 664 452 return 0; 665 453 } 666 454 667 455 static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) 668 456 { 669 - int base, i; 457 + int base, i, ret; 458 + struct kernfs_node *kn; 459 + struct global_pstate_info *gpstates; 670 460 671 461 base = cpu_first_thread_sibling(policy->cpu); 672 462 673 463 for (i = 0; i < threads_per_core; i++) 674 464 cpumask_set_cpu(base + i, policy->cpus); 675 465 676 - if (!policy->driver_data) { 466 + kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name); 467 + if (!kn) { 677 468 int ret; 678 469 679 470 ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp); ··· 685 470 policy->cpu); 686 471 return ret; 687 472 } 688 - /* 689 - * policy->driver_data is used as a flag for one-time 690 - * creation of throttle sysfs files. 691 - */ 692 - policy->driver_data = policy; 473 + } else { 474 + kernfs_put(kn); 693 475 } 694 - return cpufreq_table_validate_and_show(policy, powernv_freqs); 476 + 477 + gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL); 478 + if (!gpstates) 479 + return -ENOMEM; 480 + 481 + policy->driver_data = gpstates; 482 + 483 + /* initialize timer */ 484 + init_timer_deferrable(&gpstates->timer); 485 + gpstates->timer.data = (unsigned long)policy; 486 + gpstates->timer.function = gpstate_timer_handler; 487 + gpstates->timer.expires = jiffies + 488 + msecs_to_jiffies(GPSTATE_TIMER_INTERVAL); 489 + spin_lock_init(&gpstates->gpstate_lock); 490 + ret = cpufreq_table_validate_and_show(policy, powernv_freqs); 491 + 492 + if (ret < 0) 493 + kfree(policy->driver_data); 494 + 495 + return ret; 496 + } 497 + 498 + static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) 499 + { 500 + /* timer is deleted in cpufreq_cpu_stop() */ 501 + kfree(policy->driver_data); 502 + 503 + return 0; 695 504 } 696 505 697 506 static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, ··· 843 604 static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) 844 605 { 845 606 struct powernv_smp_call_data freq_data; 607 + struct global_pstate_info *gpstates = policy->driver_data; 846 608 847 609 freq_data.pstate_id = powernv_pstate_info.min; 610 + freq_data.gpstate_id = powernv_pstate_info.min; 848 611 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); 612 + del_timer_sync(&gpstates->timer); 849 613 } 850 614 851 615 static struct cpufreq_driver powernv_cpufreq_driver = { 852 616 .name = "powernv-cpufreq", 853 617 .flags = CPUFREQ_CONST_LOOPS, 854 618 .init = powernv_cpufreq_cpu_init, 619 + .exit = powernv_cpufreq_cpu_exit, 855 620 .verify = cpufreq_generic_frequency_table_verify, 856 621 .target_index = powernv_cpufreq_target_index, 857 622 .get = powernv_cpufreq_get,
+1 -1
drivers/cpufreq/ppc_cbe_cpufreq.h
··· 17 17 18 18 int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); 19 19 20 - #if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE) 20 + #if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI) 21 21 extern bool cbe_cpufreq_has_pmi; 22 22 #else 23 23 #define cbe_cpufreq_has_pmi (0)
+2 -13
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
··· 23 23 #include <linux/kernel.h> 24 24 #include <linux/types.h> 25 25 #include <linux/timer.h> 26 - #include <linux/module.h> 26 + #include <linux/init.h> 27 27 #include <linux/of_platform.h> 28 28 29 29 #include <asm/processor.h> ··· 142 142 143 143 return 0; 144 144 } 145 - 146 - static void __exit cbe_cpufreq_pmi_exit(void) 147 - { 148 - cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); 149 - pmi_unregister_handler(&cbe_pmi_handler); 150 - } 151 - 152 - module_init(cbe_cpufreq_pmi_init); 153 - module_exit(cbe_cpufreq_pmi_exit); 154 - 155 - MODULE_LICENSE("GPL"); 156 - MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); 145 + device_initcall(cbe_cpufreq_pmi_init);
+9 -9
drivers/cpufreq/pxa2xx-cpufreq.c
··· 29 29 * 30 30 */ 31 31 32 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 + 32 34 #include <linux/kernel.h> 33 35 #include <linux/module.h> 34 36 #include <linux/sched.h> ··· 188 186 189 187 ret = regulator_set_voltage(vcc_core, vmin, vmax); 190 188 if (ret) 191 - pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n", 192 - vmin, vmax); 189 + pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax); 193 190 return ret; 194 191 } 195 192 ··· 196 195 { 197 196 vcc_core = regulator_get(NULL, "vcc_core"); 198 197 if (IS_ERR(vcc_core)) { 199 - pr_info("cpufreq: Didn't find vcc_core regulator\n"); 198 + pr_info("Didn't find vcc_core regulator\n"); 200 199 vcc_core = NULL; 201 200 } else { 202 - pr_info("cpufreq: Found vcc_core regulator\n"); 201 + pr_info("Found vcc_core regulator\n"); 203 202 } 204 203 } 205 204 #else ··· 234 233 { 235 234 if (!pxa27x_maxfreq) { 236 235 pxa27x_maxfreq = 416000; 237 - printk(KERN_INFO "PXA CPU 27x max frequency not defined " 238 - "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", 239 - pxa27x_maxfreq); 236 + pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", 237 + pxa27x_maxfreq); 240 238 } else { 241 239 pxa27x_maxfreq *= 1000; 242 240 } ··· 408 408 */ 409 409 if (cpu_is_pxa25x()) { 410 410 find_freq_tables(&pxa255_freq_table, &pxa255_freqs); 411 - pr_info("PXA255 cpufreq using %s frequency table\n", 411 + pr_info("using %s frequency table\n", 412 412 pxa255_turbo_table ? "turbo" : "run"); 413 413 414 414 cpufreq_table_validate_and_show(policy, pxa255_freq_table); ··· 417 417 cpufreq_table_validate_and_show(policy, pxa27x_freq_table); 418 418 } 419 419 420 - printk(KERN_INFO "PXA CPU frequency change support initialized\n"); 420 + pr_info("frequency change support initialized\n"); 421 421 422 422 return 0; 423 423 }
+5 -4
drivers/cpufreq/qoriq-cpufreq.c
··· 301 301 return -ENODEV; 302 302 } 303 303 304 - static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) 304 + static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) 305 305 { 306 306 struct cpu_data *data = policy->driver_data; 307 307 308 + cpufreq_cooling_unregister(data->cdev); 308 309 kfree(data->pclk); 309 310 kfree(data->table); 310 311 kfree(data); ··· 334 333 cpud->cdev = of_cpufreq_cooling_register(np, 335 334 policy->related_cpus); 336 335 337 - if (IS_ERR(cpud->cdev)) { 338 - pr_err("Failed to register cooling device cpu%d: %ld\n", 336 + if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) { 337 + pr_err("cpu%d is not running as cooling device: %ld\n", 339 338 policy->cpu, PTR_ERR(cpud->cdev)); 340 339 341 340 cpud->cdev = NULL; ··· 349 348 .name = "qoriq_cpufreq", 350 349 .flags = CPUFREQ_CONST_LOOPS, 351 350 .init = qoriq_cpufreq_cpu_init, 352 - .exit = __exit_p(qoriq_cpufreq_cpu_exit), 351 + .exit = qoriq_cpufreq_cpu_exit, 353 352 .verify = cpufreq_generic_frequency_table_verify, 354 353 .target_index = qoriq_cpufreq_target, 355 354 .get = cpufreq_generic_get,
+8 -7
drivers/cpufreq/s3c2412-cpufreq.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 + 13 15 #include <linux/init.h> 14 16 #include <linux/module.h> 15 17 #include <linux/interrupt.h> ··· 199 197 200 198 hclk = clk_get(NULL, "hclk"); 201 199 if (IS_ERR(hclk)) { 202 - printk(KERN_ERR "%s: cannot find hclk clock\n", __func__); 200 + pr_err("cannot find hclk clock\n"); 203 201 return -ENOENT; 204 202 } 205 203 206 204 fclk = clk_get(NULL, "fclk"); 207 205 if (IS_ERR(fclk)) { 208 - printk(KERN_ERR "%s: cannot find fclk clock\n", __func__); 206 + pr_err("cannot find fclk clock\n"); 209 207 goto err_fclk; 210 208 } 211 209 212 210 fclk_rate = clk_get_rate(fclk); 213 211 if (fclk_rate > 200000000) { 214 - printk(KERN_INFO 215 - "%s: fclk %ld MHz, assuming 266MHz capable part\n", 216 - __func__, fclk_rate / 1000000); 212 + pr_info("fclk %ld MHz, assuming 266MHz capable part\n", 213 + fclk_rate / 1000000); 217 214 s3c2412_cpufreq_info.max.fclk = 266000000; 218 215 s3c2412_cpufreq_info.max.hclk = 133000000; 219 216 s3c2412_cpufreq_info.max.pclk = 66000000; ··· 220 219 221 220 armclk = clk_get(NULL, "armclk"); 222 221 if (IS_ERR(armclk)) { 223 - printk(KERN_ERR "%s: cannot find arm clock\n", __func__); 222 + pr_err("cannot find arm clock\n"); 224 223 goto err_armclk; 225 224 } 226 225 227 226 xtal = clk_get(NULL, "xtal"); 228 227 if (IS_ERR(xtal)) { 229 - printk(KERN_ERR "%s: cannot find xtal clock\n", __func__); 228 + pr_err("cannot find xtal clock\n"); 230 229 goto err_xtal; 231 230 } 232 231
+4 -2
drivers/cpufreq/s3c2440-cpufreq.c
··· 11 11 * published by the Free Software Foundation. 12 12 */ 13 13 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 14 16 #include <linux/init.h> 15 17 #include <linux/module.h> 16 18 #include <linux/interrupt.h> ··· 68 66 __func__, fclk, armclk, hclk_max); 69 67 70 68 if (armclk > fclk) { 71 - printk(KERN_WARNING "%s: armclk > fclk\n", __func__); 69 + pr_warn("%s: armclk > fclk\n", __func__); 72 70 armclk = fclk; 73 71 } 74 72 ··· 275 273 armclk = s3c_cpufreq_clk_get(NULL, "armclk"); 276 274 277 275 if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) { 278 - printk(KERN_ERR "%s: failed to get clocks\n", __func__); 276 + pr_err("%s: failed to get clocks\n", __func__); 279 277 return -ENOENT; 280 278 } 281 279
+3 -1
drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 + 13 15 #include <linux/init.h> 14 16 #include <linux/export.h> 15 17 #include <linux/interrupt.h> ··· 180 178 { 181 179 dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL); 182 180 if (IS_ERR(dbgfs_root)) { 183 - printk(KERN_ERR "%s: error creating debugfs root\n", __func__); 181 + pr_err("%s: error creating debugfs root\n", __func__); 184 182 return PTR_ERR(dbgfs_root); 185 183 } 186 184
+30 -29
drivers/cpufreq/s3c24xx-cpufreq.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 + 13 15 #include <linux/init.h> 14 16 #include <linux/module.h> 15 17 #include <linux/interrupt.h> ··· 177 175 cpu_new.freq.fclk = cpu_new.pll.frequency; 178 176 179 177 if (s3c_cpufreq_calcdivs(&cpu_new) < 0) { 180 - printk(KERN_ERR "no divisors for %d\n", target_freq); 178 + pr_err("no divisors for %d\n", target_freq); 181 179 goto err_notpossible; 182 180 } 183 181 ··· 189 187 190 188 if (cpu_new.freq.hclk != cpu_cur.freq.hclk) { 191 189 if (s3c_cpufreq_calcio(&cpu_new) < 0) { 192 - printk(KERN_ERR "%s: no IO timings\n", __func__); 190 + pr_err("%s: no IO timings\n", __func__); 193 191 goto err_notpossible; 194 192 } 195 193 } ··· 264 262 return 0; 265 263 266 264 err_notpossible: 267 - printk(KERN_ERR "no compatible settings for %d\n", target_freq); 265 + pr_err("no compatible settings for %d\n", target_freq); 268 266 return -EINVAL; 269 267 } 270 268 ··· 333 331 &index); 334 332 335 333 if (ret < 0) { 336 - printk(KERN_ERR "%s: no PLL available\n", __func__); 334 + pr_err("%s: no PLL available\n", __func__); 337 335 goto err_notpossible; 338 336 } 339 337 ··· 348 346 return s3c_cpufreq_settarget(policy, target_freq, pll); 349 347 350 348 err_notpossible: 351 - printk(KERN_ERR "no compatible settings for %d\n", target_freq); 349 + pr_err("no compatible settings for %d\n", target_freq); 352 350 return -EINVAL; 353 351 } 354 352 ··· 358 356 359 357 clk = clk_get(dev, name); 360 358 if (IS_ERR(clk)) 361 - printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name); 359 + pr_err("failed to get clock '%s'\n", name); 362 360 363 361 return clk; 364 362 } ··· 380 378 381 379 if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) || 382 380 IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) { 383 - printk(KERN_ERR "%s: could not get clock(s)\n", __func__); 381 + pr_err("%s: could not get clock(s)\n", __func__); 384 382 return -ENOENT; 385 383 } 386 384 387 - printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__, 388 - clk_get_rate(clk_fclk) / 1000, 389 - clk_get_rate(clk_hclk) / 1000, 390 - clk_get_rate(clk_pclk) / 1000, 391 - clk_get_rate(clk_arm) / 1000); 385 + pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", 386 + __func__, 387 + clk_get_rate(clk_fclk) / 1000, 388 + clk_get_rate(clk_hclk) / 1000, 389 + clk_get_rate(clk_pclk) / 1000, 390 + clk_get_rate(clk_arm) / 1000); 392 391 393 392 return 0; 394 393 } ··· 427 424 428 425 ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll); 429 426 if (ret) { 430 - printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__); 427 + pr_err("%s: failed to reset pll/freq\n", __func__); 431 428 return ret; 432 429 } 433 430 ··· 452 449 int s3c_cpufreq_register(struct s3c_cpufreq_info *info) 453 450 { 454 451 if (!info || !info->name) { 455 - printk(KERN_ERR "%s: failed to pass valid information\n", 456 - __func__); 452 + pr_err("%s: failed to pass valid information\n", __func__); 457 453 return -EINVAL; 458 454 } 459 455 460 - printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n", 461 - info->name); 456 + pr_info("S3C24XX CPU Frequency driver, %s cpu support\n", 457 + info->name); 462 458 463 459 /* check our driver info has valid data */ 464 460 ··· 480 478 struct s3c_cpufreq_board *ours; 481 479 482 480 if (!board) { 483 - printk(KERN_INFO "%s: no board data\n", __func__); 481 + pr_info("%s: no board data\n", __func__); 484 482 return -EINVAL; 485 483 } 486 484 ··· 489 487 490 488 ours = kzalloc(sizeof(*ours), GFP_KERNEL); 491 489 if (ours == NULL) { 492 - printk(KERN_ERR "%s: no memory\n", __func__); 490 + pr_err("%s: no memory\n", __func__); 493 491 return -ENOMEM; 494 492 } 495 493 ··· 504 502 int ret; 505 503 506 504 if (!cpu_cur.info->get_iotiming) { 507 - printk(KERN_ERR "%s: get_iotiming undefined\n", __func__); 505 + pr_err("%s: get_iotiming undefined\n", __func__); 508 506 return -ENOENT; 509 507 } 510 508 511 - printk(KERN_INFO "%s: working out IO settings\n", __func__); 509 + pr_info("%s: working out IO settings\n", __func__); 512 510 513 511 ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming); 514 512 if (ret) 515 - printk(KERN_ERR "%s: failed to get timings\n", __func__); 513 + pr_err("%s: failed to get timings\n", __func__); 516 514 517 515 return ret; 518 516 } ··· 563 561 val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits; 564 562 val |= calc_locktime(rate, cpu_cur.info->locktime_m); 565 563 566 - printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val); 564 + pr_info("%s: new locktime is 0x%08x\n", __func__, val); 567 565 __raw_writel(val, S3C2410_LOCKTIME); 568 566 } 569 567 ··· 582 580 583 581 ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL); 584 582 if (!ftab) { 585 - printk(KERN_ERR "%s: no memory for tables\n", __func__); 583 + pr_err("%s: no memory for tables\n", __func__); 586 584 return -ENOMEM; 587 585 } 588 586 ··· 610 608 if (cpu_cur.board->auto_io) { 611 609 ret = s3c_cpufreq_auto_io(); 612 610 if (ret) { 613 - printk(KERN_ERR "%s: failed to get io timing\n", 611 + pr_err("%s: failed to get io timing\n", 614 612 __func__); 615 613 goto out; 616 614 } 617 615 } 618 616 619 617 if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) { 620 - printk(KERN_ERR "%s: no IO support registered\n", 621 - __func__); 618 + pr_err("%s: no IO support registered\n", __func__); 622 619 ret = -EINVAL; 623 620 goto out; 624 621 } ··· 667 666 vals += plls_no; 668 667 vals->frequency = CPUFREQ_TABLE_END; 669 668 670 - printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no); 669 + pr_info("%d PLL entries\n", plls_no); 671 670 } else 672 - printk(KERN_ERR "cpufreq: no memory for PLL tables\n"); 671 + pr_err("no memory for PLL tables\n"); 673 672 674 673 return vals ? 0 : -ENOMEM; 675 674 }
+6 -4
drivers/cpufreq/s5pv210-cpufreq.c
··· 9 9 * published by the Free Software Foundation. 10 10 */ 11 11 12 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 + 12 14 #include <linux/types.h> 13 15 #include <linux/kernel.h> 14 16 #include <linux/init.h> ··· 207 205 } else if (ch == DMC1) { 208 206 reg = (dmc_base[1] + 0x30); 209 207 } else { 210 - printk(KERN_ERR "Cannot find DMC port\n"); 208 + pr_err("Cannot find DMC port\n"); 211 209 return; 212 210 } 213 211 ··· 536 534 mem_type = check_mem_type(dmc_base[0]); 537 535 538 536 if ((mem_type != LPDDR) && (mem_type != LPDDR2)) { 539 - printk(KERN_ERR "CPUFreq doesn't support this memory type\n"); 537 + pr_err("CPUFreq doesn't support this memory type\n"); 540 538 ret = -EINVAL; 541 539 goto out_dmc1; 542 540 } ··· 637 635 638 636 arm_regulator = regulator_get(NULL, "vddarm"); 639 637 if (IS_ERR(arm_regulator)) { 640 - pr_err("failed to get regulator vddarm"); 638 + pr_err("failed to get regulator vddarm\n"); 641 639 return PTR_ERR(arm_regulator); 642 640 } 643 641 644 642 int_regulator = regulator_get(NULL, "vddint"); 645 643 if (IS_ERR(int_regulator)) { 646 - pr_err("failed to get regulator vddint"); 644 + pr_err("failed to get regulator vddint\n"); 647 645 regulator_put(arm_regulator); 648 646 return PTR_ERR(int_regulator); 649 647 }
+5 -5
drivers/cpufreq/sc520_freq.c
··· 13 13 * 2005-03-30: - initial revision 14 14 */ 15 15 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 16 18 #include <linux/kernel.h> 17 19 #include <linux/module.h> 18 20 #include <linux/init.h> ··· 32 30 33 31 static __u8 __iomem *cpuctl; 34 32 35 - #define PFX "sc520_freq: " 36 - 37 33 static struct cpufreq_frequency_table sc520_freq_table[] = { 38 34 {0, 0x01, 100000}, 39 35 {0, 0x02, 133000}, ··· 44 44 45 45 switch (clockspeed_reg & 0x03) { 46 46 default: 47 - printk(KERN_ERR PFX "error: cpuctl register has unexpected " 48 - "value %02x\n", clockspeed_reg); 47 + pr_err("error: cpuctl register has unexpected value %02x\n", 48 + clockspeed_reg); 49 49 case 0x01: 50 50 return 100000; 51 51 case 0x02: ··· 112 112 113 113 cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); 114 114 if (!cpuctl) { 115 - printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); 115 + pr_err("sc520_freq: error: failed to remap memory\n"); 116 116 return -ENOMEM; 117 117 } 118 118
+3 -3
drivers/cpufreq/speedstep-centrino.c
··· 13 13 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org> 14 14 */ 15 15 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 16 18 #include <linux/kernel.h> 17 19 #include <linux/module.h> 18 20 #include <linux/init.h> ··· 29 27 #include <asm/cpufeature.h> 30 28 #include <asm/cpu_device_id.h> 31 29 32 - #define PFX "speedstep-centrino: " 33 30 #define MAINTAINER "linux-pm@vger.kernel.org" 34 31 35 32 #define INTEL_MSR_RANGE (0xffff) ··· 387 386 /* check to see if it stuck */ 388 387 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 389 388 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { 390 - printk(KERN_INFO PFX 391 - "couldn't enable Enhanced SpeedStep\n"); 389 + pr_info("couldn't enable Enhanced SpeedStep\n"); 392 390 return -ENODEV; 393 391 } 394 392 }
+5 -3
drivers/cpufreq/speedstep-ich.c
··· 18 18 * SPEEDSTEP - DEFINITIONS * 19 19 *********************************************************************/ 20 20 21 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 + 21 23 #include <linux/kernel.h> 22 24 #include <linux/module.h> 23 25 #include <linux/init.h> ··· 70 68 /* get PMBASE */ 71 69 pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); 72 70 if (!(pmbase & 0x01)) { 73 - printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); 71 + pr_err("could not find speedstep register\n"); 74 72 return -ENODEV; 75 73 } 76 74 77 75 pmbase &= 0xFFFFFFFE; 78 76 if (!pmbase) { 79 - printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); 77 + pr_err("could not find speedstep register\n"); 80 78 return -ENODEV; 81 79 } 82 80 ··· 138 136 pr_debug("change to %u MHz succeeded\n", 139 137 speedstep_get_frequency(speedstep_processor) / 1000); 140 138 else 141 - printk(KERN_ERR "cpufreq: change failed - I/O error\n"); 139 + pr_err("change failed - I/O error\n"); 142 140 143 141 return; 144 142 }
+5 -6
drivers/cpufreq/speedstep-lib.c
··· 8 8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* 9 9 */ 10 10 11 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 + 11 13 #include <linux/kernel.h> 12 14 #include <linux/module.h> 13 15 #include <linux/moduleparam.h> ··· 155 153 fsb = 333333; 156 154 break; 157 155 default: 158 - printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); 156 + pr_err("PCORE - MSR_FSB_FREQ undefined value\n"); 159 157 } 160 158 161 159 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); ··· 455 453 */ 456 454 if (*transition_latency > 10000000 || 457 455 *transition_latency < 50000) { 458 - printk(KERN_WARNING PFX "frequency transition " 459 - "measured seems out of range (%u " 460 - "nSec), falling back to a safe one of" 461 - "%u nSec.\n", 462 - *transition_latency, 500000); 456 + pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n", 457 + *transition_latency, 500000); 463 458 *transition_latency = 500000; 464 459 } 465 460 }
+4 -3
drivers/cpufreq/speedstep-smi.c
··· 12 12 * SPEEDSTEP - DEFINITIONS * 13 13 *********************************************************************/ 14 14 15 + #define pr_fmt(fmt) "cpufreq: " fmt 16 + 15 17 #include <linux/kernel.h> 16 18 #include <linux/module.h> 17 19 #include <linux/moduleparam.h> ··· 206 204 (speedstep_freqs[new_state].frequency / 1000), 207 205 retry, result); 208 206 else 209 - printk(KERN_ERR "cpufreq: change to state %u " 210 - "failed with new_state %u and result %u\n", 211 - state, new_state, result); 207 + pr_err("change to state %u failed with new_state %u and result %u\n", 208 + state, new_state, result); 212 209 213 210 return; 214 211 }
-7
drivers/cpufreq/tegra124-cpufreq.c
··· 14 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 15 16 16 #include <linux/clk.h> 17 - #include <linux/cpufreq-dt.h> 18 17 #include <linux/err.h> 19 18 #include <linux/init.h> 20 19 #include <linux/kernel.h> ··· 67 68 regulator_sync_voltage(priv->vdd_cpu_reg); 68 69 clk_set_parent(priv->cpu_clk, priv->pllx_clk); 69 70 } 70 - 71 - static struct cpufreq_dt_platform_data cpufreq_dt_pd = { 72 - .independent_clocks = false, 73 - }; 74 71 75 72 static int tegra124_cpufreq_probe(struct platform_device *pdev) 76 73 { ··· 124 129 125 130 cpufreq_dt_devinfo.name = "cpufreq-dt"; 126 131 cpufreq_dt_devinfo.parent = &pdev->dev; 127 - cpufreq_dt_devinfo.data = &cpufreq_dt_pd; 128 - cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd); 129 132 130 133 priv->cpufreq_dt_pdev = 131 134 platform_device_register_full(&cpufreq_dt_devinfo);
+2
include/linux/cpufreq-dt.h
··· 10 10 #ifndef __CPUFREQ_DT_H__ 11 11 #define __CPUFREQ_DT_H__ 12 12 13 + #include <linux/types.h> 14 + 13 15 struct cpufreq_dt_platform_data { 14 16 /* 15 17 * True when each CPU has its own clock to control its
+54
include/linux/cpufreq.h
··· 102 102 */ 103 103 struct rw_semaphore rwsem; 104 104 105 + /* 106 + * Fast switch flags: 107 + * - fast_switch_possible should be set by the driver if it can 108 + * guarantee that frequency can be changed on any CPU sharing the 109 + * policy and that the change will affect all of the policy CPUs then. 110 + * - fast_switch_enabled is to be set by governors that support fast 111 + * freqnency switching with the help of cpufreq_enable_fast_switch(). 112 + */ 113 + bool fast_switch_possible; 114 + bool fast_switch_enabled; 115 + 105 116 /* Synchronization for frequency transitions */ 106 117 bool transition_ongoing; /* Tracks transition status */ 107 118 spinlock_t transition_lock; ··· 167 156 int cpufreq_update_policy(unsigned int cpu); 168 157 bool have_governor_per_policy(void); 169 158 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); 159 + void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); 160 + void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); 170 161 #else 171 162 static inline unsigned int cpufreq_get(unsigned int cpu) 172 163 { ··· 249 236 unsigned int relation); /* Deprecated */ 250 237 int (*target_index)(struct cpufreq_policy *policy, 251 238 unsigned int index); 239 + unsigned int (*fast_switch)(struct cpufreq_policy *policy, 240 + unsigned int target_freq); 252 241 /* 253 242 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION 254 243 * unset. ··· 441 426 #define CPUFREQ_POLICY_POWERSAVE (1) 442 427 #define CPUFREQ_POLICY_PERFORMANCE (2) 443 428 429 + /* 430 + * The polling frequency depends on the capability of the processor. Default 431 + * polling frequency is 1000 times the transition latency of the processor. The 432 + * ondemand governor will work on any processor with transition latency <= 10ms, 433 + * using appropriate sampling rate. 434 + * 435 + * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) 436 + * the ondemand governor will not work. All times here are in us (microseconds). 437 + */ 438 + #define MIN_SAMPLING_RATE_RATIO (2) 439 + #define LATENCY_MULTIPLIER (1000) 440 + #define MIN_LATENCY_MULTIPLIER (20) 441 + #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 442 + 444 443 /* Governor Events */ 445 444 #define CPUFREQ_GOV_START 1 446 445 #define CPUFREQ_GOV_STOP 2 ··· 479 450 }; 480 451 481 452 /* Pass a target to the cpufreq driver */ 453 + unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 454 + unsigned int target_freq); 482 455 int cpufreq_driver_target(struct cpufreq_policy *policy, 483 456 unsigned int target_freq, 484 457 unsigned int relation); ··· 492 461 493 462 struct cpufreq_governor *cpufreq_default_governor(void); 494 463 struct cpufreq_governor *cpufreq_fallback_governor(void); 464 + 465 + /* Governor attribute set */ 466 + struct gov_attr_set { 467 + struct kobject kobj; 468 + struct list_head policy_list; 469 + struct mutex update_lock; 470 + int usage_count; 471 + }; 472 + 473 + /* sysfs ops for cpufreq governors */ 474 + extern const struct sysfs_ops governor_sysfs_ops; 475 + 476 + void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); 477 + void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); 478 + unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); 479 + 480 + /* Governor sysfs attribute */ 481 + struct governor_attr { 482 + struct attribute attr; 483 + ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); 484 + ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, 485 + size_t count); 486 + }; 495 487 496 488 /********************************************************************* 497 489 * FREQUENCY TABLE HELPERS *
+4 -1
include/linux/sched.h
··· 3240 3240 u64 time, unsigned long util, unsigned long max); 3241 3241 }; 3242 3242 3243 - void cpufreq_set_update_util_data(int cpu, struct update_util_data *data); 3243 + void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, 3244 + void (*func)(struct update_util_data *data, u64 time, 3245 + unsigned long util, unsigned long max)); 3246 + void cpufreq_remove_update_util_hook(int cpu); 3244 3247 #endif /* CONFIG_CPU_FREQ */ 3245 3248 3246 3249 #endif
+1
kernel/sched/Makefile
··· 24 24 obj-$(CONFIG_SCHED_DEBUG) += debug.o 25 25 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o 26 26 obj-$(CONFIG_CPU_FREQ) += cpufreq.o 27 + obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+37 -11
kernel/sched/cpufreq.c
··· 14 14 DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 15 15 16 16 /** 17 - * cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer. 17 + * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer. 18 18 * @cpu: The CPU to set the pointer for. 19 19 * @data: New pointer value. 20 + * @func: Callback function to set for the CPU. 20 21 * 21 - * Set and publish the update_util_data pointer for the given CPU. That pointer 22 - * points to a struct update_util_data object containing a callback function 23 - * to call from cpufreq_update_util(). That function will be called from an RCU 24 - * read-side critical section, so it must not sleep. 22 + * Set and publish the update_util_data pointer for the given CPU. 23 + * 24 + * The update_util_data pointer of @cpu is set to @data and the callback 25 + * function pointer in the target struct update_util_data is set to @func. 26 + * That function will be called by cpufreq_update_util() from RCU-sched 27 + * read-side critical sections, so it must not sleep. @data will always be 28 + * passed to it as the first argument which allows the function to get to the 29 + * target update_util_data structure and its container. 30 + * 31 + * The update_util_data pointer of @cpu must be NULL when this function is 32 + * called or it will WARN() and return with no effect. 33 + */ 34 + void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, 35 + void (*func)(struct update_util_data *data, u64 time, 36 + unsigned long util, unsigned long max)) 37 + { 38 + if (WARN_ON(!data || !func)) 39 + return; 40 + 41 + if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu))) 42 + return; 43 + 44 + data->func = func; 45 + rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data); 46 + } 47 + EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook); 48 + 49 + /** 50 + * cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer. 51 + * @cpu: The CPU to clear the pointer for. 52 + * 53 + * Clear the update_util_data pointer for the given CPU. 25 54 * 26 55 * Callers must use RCU-sched callbacks to free any memory that might be 27 56 * accessed via the old update_util_data pointer or invoke synchronize_sched() 28 57 * right after this function to avoid use-after-free. 29 58 */ 30 - void cpufreq_set_update_util_data(int cpu, struct update_util_data *data) 59 + void cpufreq_remove_update_util_hook(int cpu) 31 60 { 32 - if (WARN_ON(data && !data->func)) 33 - return; 34 - 35 - rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data); 61 + rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); 36 62 } 37 - EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data); 63 + EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
+530
kernel/sched/cpufreq_schedutil.c
··· 1 + /* 2 + * CPUFreq governor based on scheduler-provided CPU utilization data. 3 + * 4 + * Copyright (C) 2016, Intel Corporation 5 + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/cpufreq.h> 13 + #include <linux/module.h> 14 + #include <linux/slab.h> 15 + #include <trace/events/power.h> 16 + 17 + #include "sched.h" 18 + 19 + struct sugov_tunables { 20 + struct gov_attr_set attr_set; 21 + unsigned int rate_limit_us; 22 + }; 23 + 24 + struct sugov_policy { 25 + struct cpufreq_policy *policy; 26 + 27 + struct sugov_tunables *tunables; 28 + struct list_head tunables_hook; 29 + 30 + raw_spinlock_t update_lock; /* For shared policies */ 31 + u64 last_freq_update_time; 32 + s64 freq_update_delay_ns; 33 + unsigned int next_freq; 34 + 35 + /* The next fields are only needed if fast switch cannot be used. */ 36 + struct irq_work irq_work; 37 + struct work_struct work; 38 + struct mutex work_lock; 39 + bool work_in_progress; 40 + 41 + bool need_freq_update; 42 + }; 43 + 44 + struct sugov_cpu { 45 + struct update_util_data update_util; 46 + struct sugov_policy *sg_policy; 47 + 48 + /* The fields below are only needed when sharing a policy. */ 49 + unsigned long util; 50 + unsigned long max; 51 + u64 last_update; 52 + }; 53 + 54 + static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); 55 + 56 + /************************ Governor internals ***********************/ 57 + 58 + static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) 59 + { 60 + s64 delta_ns; 61 + 62 + if (sg_policy->work_in_progress) 63 + return false; 64 + 65 + if (unlikely(sg_policy->need_freq_update)) { 66 + sg_policy->need_freq_update = false; 67 + /* 68 + * This happens when limits change, so forget the previous 69 + * next_freq value and force an update. 70 + */ 71 + sg_policy->next_freq = UINT_MAX; 72 + return true; 73 + } 74 + 75 + delta_ns = time - sg_policy->last_freq_update_time; 76 + return delta_ns >= sg_policy->freq_update_delay_ns; 77 + } 78 + 79 + static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, 80 + unsigned int next_freq) 81 + { 82 + struct cpufreq_policy *policy = sg_policy->policy; 83 + 84 + sg_policy->last_freq_update_time = time; 85 + 86 + if (policy->fast_switch_enabled) { 87 + if (sg_policy->next_freq == next_freq) { 88 + trace_cpu_frequency(policy->cur, smp_processor_id()); 89 + return; 90 + } 91 + sg_policy->next_freq = next_freq; 92 + next_freq = cpufreq_driver_fast_switch(policy, next_freq); 93 + if (next_freq == CPUFREQ_ENTRY_INVALID) 94 + return; 95 + 96 + policy->cur = next_freq; 97 + trace_cpu_frequency(next_freq, smp_processor_id()); 98 + } else if (sg_policy->next_freq != next_freq) { 99 + sg_policy->next_freq = next_freq; 100 + sg_policy->work_in_progress = true; 101 + irq_work_queue(&sg_policy->irq_work); 102 + } 103 + } 104 + 105 + /** 106 + * get_next_freq - Compute a new frequency for a given cpufreq policy. 107 + * @policy: cpufreq policy object to compute the new frequency for. 108 + * @util: Current CPU utilization. 109 + * @max: CPU capacity. 110 + * 111 + * If the utilization is frequency-invariant, choose the new frequency to be 112 + * proportional to it, that is 113 + * 114 + * next_freq = C * max_freq * util / max 115 + * 116 + * Otherwise, approximate the would-be frequency-invariant utilization by 117 + * util_raw * (curr_freq / max_freq) which leads to 118 + * 119 + * next_freq = C * curr_freq * util_raw / max 120 + * 121 + * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. 122 + */ 123 + static unsigned int get_next_freq(struct cpufreq_policy *policy, 124 + unsigned long util, unsigned long max) 125 + { 126 + unsigned int freq = arch_scale_freq_invariant() ? 127 + policy->cpuinfo.max_freq : policy->cur; 128 + 129 + return (freq + (freq >> 2)) * util / max; 130 + } 131 + 132 + static void sugov_update_single(struct update_util_data *hook, u64 time, 133 + unsigned long util, unsigned long max) 134 + { 135 + struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 136 + struct sugov_policy *sg_policy = sg_cpu->sg_policy; 137 + struct cpufreq_policy *policy = sg_policy->policy; 138 + unsigned int next_f; 139 + 140 + if (!sugov_should_update_freq(sg_policy, time)) 141 + return; 142 + 143 + next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq : 144 + get_next_freq(policy, util, max); 145 + sugov_update_commit(sg_policy, time, next_f); 146 + } 147 + 148 + static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy, 149 + unsigned long util, unsigned long max) 150 + { 151 + struct cpufreq_policy *policy = sg_policy->policy; 152 + unsigned int max_f = policy->cpuinfo.max_freq; 153 + u64 last_freq_update_time = sg_policy->last_freq_update_time; 154 + unsigned int j; 155 + 156 + if (util == ULONG_MAX) 157 + return max_f; 158 + 159 + for_each_cpu(j, policy->cpus) { 160 + struct sugov_cpu *j_sg_cpu; 161 + unsigned long j_util, j_max; 162 + s64 delta_ns; 163 + 164 + if (j == smp_processor_id()) 165 + continue; 166 + 167 + j_sg_cpu = &per_cpu(sugov_cpu, j); 168 + /* 169 + * If the CPU utilization was last updated before the previous 170 + * frequency update and the time elapsed between the last update 171 + * of the CPU utilization and the last frequency update is long 172 + * enough, don't take the CPU into account as it probably is 173 + * idle now. 174 + */ 175 + delta_ns = last_freq_update_time - j_sg_cpu->last_update; 176 + if (delta_ns > TICK_NSEC) 177 + continue; 178 + 179 + j_util = j_sg_cpu->util; 180 + if (j_util == ULONG_MAX) 181 + return max_f; 182 + 183 + j_max = j_sg_cpu->max; 184 + if (j_util * max > j_max * util) { 185 + util = j_util; 186 + max = j_max; 187 + } 188 + } 189 + 190 + return get_next_freq(policy, util, max); 191 + } 192 + 193 + static void sugov_update_shared(struct update_util_data *hook, u64 time, 194 + unsigned long util, unsigned long max) 195 + { 196 + struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 197 + struct sugov_policy *sg_policy = sg_cpu->sg_policy; 198 + unsigned int next_f; 199 + 200 + raw_spin_lock(&sg_policy->update_lock); 201 + 202 + sg_cpu->util = util; 203 + sg_cpu->max = max; 204 + sg_cpu->last_update = time; 205 + 206 + if (sugov_should_update_freq(sg_policy, time)) { 207 + next_f = sugov_next_freq_shared(sg_policy, util, max); 208 + sugov_update_commit(sg_policy, time, next_f); 209 + } 210 + 211 + raw_spin_unlock(&sg_policy->update_lock); 212 + } 213 + 214 + static void sugov_work(struct work_struct *work) 215 + { 216 + struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 217 + 218 + mutex_lock(&sg_policy->work_lock); 219 + __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, 220 + CPUFREQ_RELATION_L); 221 + mutex_unlock(&sg_policy->work_lock); 222 + 223 + sg_policy->work_in_progress = false; 224 + } 225 + 226 + static void sugov_irq_work(struct irq_work *irq_work) 227 + { 228 + struct sugov_policy *sg_policy; 229 + 230 + sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 231 + schedule_work_on(smp_processor_id(), &sg_policy->work); 232 + } 233 + 234 + /************************** sysfs interface ************************/ 235 + 236 + static struct sugov_tunables *global_tunables; 237 + static DEFINE_MUTEX(global_tunables_lock); 238 + 239 + static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) 240 + { 241 + return container_of(attr_set, struct sugov_tunables, attr_set); 242 + } 243 + 244 + static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) 245 + { 246 + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 247 + 248 + return sprintf(buf, "%u\n", tunables->rate_limit_us); 249 + } 250 + 251 + static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, 252 + size_t count) 253 + { 254 + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 255 + struct sugov_policy *sg_policy; 256 + unsigned int rate_limit_us; 257 + 258 + if (kstrtouint(buf, 10, &rate_limit_us)) 259 + return -EINVAL; 260 + 261 + tunables->rate_limit_us = rate_limit_us; 262 + 263 + list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) 264 + sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; 265 + 266 + return count; 267 + } 268 + 269 + static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); 270 + 271 + static struct attribute *sugov_attributes[] = { 272 + &rate_limit_us.attr, 273 + NULL 274 + }; 275 + 276 + static struct kobj_type sugov_tunables_ktype = { 277 + .default_attrs = sugov_attributes, 278 + .sysfs_ops = &governor_sysfs_ops, 279 + }; 280 + 281 + /********************** cpufreq governor interface *********************/ 282 + 283 + static struct cpufreq_governor schedutil_gov; 284 + 285 + static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) 286 + { 287 + struct sugov_policy *sg_policy; 288 + 289 + sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); 290 + if (!sg_policy) 291 + return NULL; 292 + 293 + sg_policy->policy = policy; 294 + init_irq_work(&sg_policy->irq_work, sugov_irq_work); 295 + INIT_WORK(&sg_policy->work, sugov_work); 296 + mutex_init(&sg_policy->work_lock); 297 + raw_spin_lock_init(&sg_policy->update_lock); 298 + return sg_policy; 299 + } 300 + 301 + static void sugov_policy_free(struct sugov_policy *sg_policy) 302 + { 303 + mutex_destroy(&sg_policy->work_lock); 304 + kfree(sg_policy); 305 + } 306 + 307 + static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) 308 + { 309 + struct sugov_tunables *tunables; 310 + 311 + tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); 312 + if (tunables) { 313 + gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); 314 + if (!have_governor_per_policy()) 315 + global_tunables = tunables; 316 + } 317 + return tunables; 318 + } 319 + 320 + static void sugov_tunables_free(struct sugov_tunables *tunables) 321 + { 322 + if (!have_governor_per_policy()) 323 + global_tunables = NULL; 324 + 325 + kfree(tunables); 326 + } 327 + 328 + static int sugov_init(struct cpufreq_policy *policy) 329 + { 330 + struct sugov_policy *sg_policy; 331 + struct sugov_tunables *tunables; 332 + unsigned int lat; 333 + int ret = 0; 334 + 335 + /* State should be equivalent to EXIT */ 336 + if (policy->governor_data) 337 + return -EBUSY; 338 + 339 + sg_policy = sugov_policy_alloc(policy); 340 + if (!sg_policy) 341 + return -ENOMEM; 342 + 343 + mutex_lock(&global_tunables_lock); 344 + 345 + if (global_tunables) { 346 + if (WARN_ON(have_governor_per_policy())) { 347 + ret = -EINVAL; 348 + goto free_sg_policy; 349 + } 350 + policy->governor_data = sg_policy; 351 + sg_policy->tunables = global_tunables; 352 + 353 + gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); 354 + goto out; 355 + } 356 + 357 + tunables = sugov_tunables_alloc(sg_policy); 358 + if (!tunables) { 359 + ret = -ENOMEM; 360 + goto free_sg_policy; 361 + } 362 + 363 + tunables->rate_limit_us = LATENCY_MULTIPLIER; 364 + lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC; 365 + if (lat) 366 + tunables->rate_limit_us *= lat; 367 + 368 + policy->governor_data = sg_policy; 369 + sg_policy->tunables = tunables; 370 + 371 + ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, 372 + get_governor_parent_kobj(policy), "%s", 373 + schedutil_gov.name); 374 + if (ret) 375 + goto fail; 376 + 377 + out: 378 + mutex_unlock(&global_tunables_lock); 379 + 380 + cpufreq_enable_fast_switch(policy); 381 + return 0; 382 + 383 + fail: 384 + policy->governor_data = NULL; 385 + sugov_tunables_free(tunables); 386 + 387 + free_sg_policy: 388 + mutex_unlock(&global_tunables_lock); 389 + 390 + sugov_policy_free(sg_policy); 391 + pr_err("cpufreq: schedutil governor initialization failed (error %d)\n", ret); 392 + return ret; 393 + } 394 + 395 + static int sugov_exit(struct cpufreq_policy *policy) 396 + { 397 + struct sugov_policy *sg_policy = policy->governor_data; 398 + struct sugov_tunables *tunables = sg_policy->tunables; 399 + unsigned int count; 400 + 401 + cpufreq_disable_fast_switch(policy); 402 + 403 + mutex_lock(&global_tunables_lock); 404 + 405 + count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); 406 + policy->governor_data = NULL; 407 + if (!count) 408 + sugov_tunables_free(tunables); 409 + 410 + mutex_unlock(&global_tunables_lock); 411 + 412 + sugov_policy_free(sg_policy); 413 + return 0; 414 + } 415 + 416 + static int sugov_start(struct cpufreq_policy *policy) 417 + { 418 + struct sugov_policy *sg_policy = policy->governor_data; 419 + unsigned int cpu; 420 + 421 + sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 422 + sg_policy->last_freq_update_time = 0; 423 + sg_policy->next_freq = UINT_MAX; 424 + sg_policy->work_in_progress = false; 425 + sg_policy->need_freq_update = false; 426 + 427 + for_each_cpu(cpu, policy->cpus) { 428 + struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 429 + 430 + sg_cpu->sg_policy = sg_policy; 431 + if (policy_is_shared(policy)) { 432 + sg_cpu->util = ULONG_MAX; 433 + sg_cpu->max = 0; 434 + sg_cpu->last_update = 0; 435 + cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 436 + sugov_update_shared); 437 + } else { 438 + cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 439 + sugov_update_single); 440 + } 441 + } 442 + return 0; 443 + } 444 + 445 + static int sugov_stop(struct cpufreq_policy *policy) 446 + { 447 + struct sugov_policy *sg_policy = policy->governor_data; 448 + unsigned int cpu; 449 + 450 + for_each_cpu(cpu, policy->cpus) 451 + cpufreq_remove_update_util_hook(cpu); 452 + 453 + synchronize_sched(); 454 + 455 + irq_work_sync(&sg_policy->irq_work); 456 + cancel_work_sync(&sg_policy->work); 457 + return 0; 458 + } 459 + 460 + static int sugov_limits(struct cpufreq_policy *policy) 461 + { 462 + struct sugov_policy *sg_policy = policy->governor_data; 463 + 464 + if (!policy->fast_switch_enabled) { 465 + mutex_lock(&sg_policy->work_lock); 466 + 467 + if (policy->max < policy->cur) 468 + __cpufreq_driver_target(policy, policy->max, 469 + CPUFREQ_RELATION_H); 470 + else if (policy->min > policy->cur) 471 + __cpufreq_driver_target(policy, policy->min, 472 + CPUFREQ_RELATION_L); 473 + 474 + mutex_unlock(&sg_policy->work_lock); 475 + } 476 + 477 + sg_policy->need_freq_update = true; 478 + return 0; 479 + } 480 + 481 + int sugov_governor(struct cpufreq_policy *policy, unsigned int event) 482 + { 483 + if (event == CPUFREQ_GOV_POLICY_INIT) { 484 + return sugov_init(policy); 485 + } else if (policy->governor_data) { 486 + switch (event) { 487 + case CPUFREQ_GOV_POLICY_EXIT: 488 + return sugov_exit(policy); 489 + case CPUFREQ_GOV_START: 490 + return sugov_start(policy); 491 + case CPUFREQ_GOV_STOP: 492 + return sugov_stop(policy); 493 + case CPUFREQ_GOV_LIMITS: 494 + return sugov_limits(policy); 495 + } 496 + } 497 + return -EINVAL; 498 + } 499 + 500 + static struct cpufreq_governor schedutil_gov = { 501 + .name = "schedutil", 502 + .governor = sugov_governor, 503 + .owner = THIS_MODULE, 504 + }; 505 + 506 + static int __init sugov_module_init(void) 507 + { 508 + return cpufreq_register_governor(&schedutil_gov); 509 + } 510 + 511 + static void __exit sugov_module_exit(void) 512 + { 513 + cpufreq_unregister_governor(&schedutil_gov); 514 + } 515 + 516 + MODULE_AUTHOR("Rafael J. Wysocki <rafael.j.wysocki@intel.com>"); 517 + MODULE_DESCRIPTION("Utilization-based CPU frequency selection"); 518 + MODULE_LICENSE("GPL"); 519 + 520 + #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 521 + struct cpufreq_governor *cpufreq_default_governor(void) 522 + { 523 + return &schedutil_gov; 524 + } 525 + 526 + fs_initcall(sugov_module_init); 527 + #else 528 + module_init(sugov_module_init); 529 + #endif 530 + module_exit(sugov_module_exit);
+8
kernel/sched/sched.h
··· 1842 1842 static inline void cpufreq_trigger_update(u64 time) {} 1843 1843 #endif /* CONFIG_CPU_FREQ */ 1844 1844 1845 + #ifdef arch_scale_freq_capacity 1846 + #ifndef arch_scale_freq_invariant 1847 + #define arch_scale_freq_invariant() (true) 1848 + #endif 1849 + #else /* arch_scale_freq_capacity */ 1850 + #define arch_scale_freq_invariant() (false) 1851 + #endif 1852 + 1845 1853 static inline void account_reset_rq(struct rq *rq) 1846 1854 { 1847 1855 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+1
kernel/trace/power-traces.c
··· 15 15 16 16 EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume); 17 17 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); 18 + EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency); 18 19 EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle); 19 20