Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pm-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management updates from Rafael Wysocki:
"These add support for some new hardware, extend the existing hardware
support, fix some issues and clean up code

Specifics:

- Add isupport for Tiger Lake in no-HWP mode to intel_pstate (Doug
Smythies)

- Update the AMD P-state driver (Perry Yuan):
- Fix wrong lowest perf fetch
- Map desired perf into pstate scope for powersave governor
- Update pstate frequency transition delay time
- Fix initial highest_perf value
- Clean up

- Move max CPU capacity to sugov_policy in the schedutil cpufreq
governor (Lukasz Luba)

- Add SM6115 to cpufreq-dt blocklist (Adam Skladowski)

- Add support for Tegra239 and minor cleanups (Sumit Gupta, ye
xingchen, and Yang Yingliang)

- Add freq qos for qcom cpufreq driver and minor cleanups (Xuewen
Yan, and Viresh Kumar)

- Minor cleanups around functions called at module_init() (Xiu
Jianfeng)

- Use module_init and add module_exit for bmips driver (Zhang
Jianhua)

- Add AlderLake-N support to intel_idle (Zhang Rui)

- Replace strlcpy() with unused retval with strscpy() in intel_idle
(Wolfram Sang)

- Remove redundant check from cpuidle_switch_governor() (Yu Liao)

- Replace strlcpy() with unused retval with strscpy() in the powernv
cpuidle driver (Wolfram Sang)

- Drop duplicate word from a comment in the coupled cpuidle driver
(Jason Wang)

- Make rpm_resume() return -EINPROGRESS if RPM_NOWAIT is passed to it
in the flags and the device is about to resume (Rafael Wysocki)

- Add extra debugging statement for multiple active IRQs to system
wakeup handling code (Mario Limonciello)

- Replace strlcpy() with unused retval with strscpy() in the core
system suspend support code (Wolfram Sang)

- Update the intel_rapl power capping driver:
- Use standard Energy Unit for SPR Dram RAPL domain (Zhang Rui).
- Add support for RAPTORLAKE_S (Zhang Rui).
- Fix UBSAN shift-out-of-bounds issue (Chao Qin)

- Handle -EPROBE_DEFER when regulator is not probed on
mtk-ci-devfreq.c (AngeloGioacchino Del Regno)

- Fix message typo and use dev_err_probe() in rockchip-dfi.c
(Christophe JAILLET)"

* tag 'pm-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (29 commits)
cpufreq: qcom-cpufreq-hw: Add cpufreq qos for LMh
cpufreq: Add __init annotation to module init funcs
cpufreq: tegra194: change tegra239_cpufreq_soc to static
PM / devfreq: rockchip-dfi: Fix an error message
PM / devfreq: mtk-cci: Handle sram regulator probe deferral
powercap: intel_rapl: Use standard Energy Unit for SPR Dram RAPL domain
PM: runtime: Return -EINPROGRESS from rpm_resume() in the RPM_NOWAIT case
intel_idle: Add AlderLake-N support
powercap: intel_rapl: fix UBSAN shift-out-of-bounds issue
cpufreq: tegra194: Add support for Tegra239
cpufreq: qcom-cpufreq-hw: Fix uninitialized throttled_freq warning
cpufreq: intel_pstate: Add Tigerlake support in no-HWP mode
powercap: intel_rapl: Add support for RAPTORLAKE_S
cpufreq: amd-pstate: Fix initial highest_perf value
cpuidle: Remove redundant check in cpuidle_switch_governor()
PM: wakeup: Add extra debugging statement for multiple active IRQs
cpufreq: tegra194: Remove the unneeded result variable
PM: suspend: move from strlcpy() with unused retval to strscpy()
intel_idle: move from strlcpy() with unused retval to strscpy()
cpuidle: powernv: move from strlcpy() with unused retval to strscpy()
...

+165 -82
+5 -2
drivers/base/power/runtime.c
··· 792 792 DEFINE_WAIT(wait); 793 793 794 794 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 795 - if (dev->power.runtime_status == RPM_SUSPENDING) 795 + if (dev->power.runtime_status == RPM_SUSPENDING) { 796 796 dev->power.deferred_resume = true; 797 - else 797 + if (rpmflags & RPM_NOWAIT) 798 + retval = -EINPROGRESS; 799 + } else { 798 800 retval = -EINPROGRESS; 801 + } 799 802 goto out; 800 803 } 801 804
+2
drivers/base/power/wakeup.c
··· 944 944 else 945 945 irq_number = 0; 946 946 947 + pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number); 948 + 947 949 raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); 948 950 949 951 if (irq_number)
+21 -18
drivers/cpufreq/amd-pstate.c
··· 46 46 #include <asm/cpu_device_id.h> 47 47 #include "amd-pstate-trace.h" 48 48 49 - #define AMD_PSTATE_TRANSITION_LATENCY 0x20000 50 - #define AMD_PSTATE_TRANSITION_DELAY 500 49 + #define AMD_PSTATE_TRANSITION_LATENCY 20000 50 + #define AMD_PSTATE_TRANSITION_DELAY 1000 51 51 52 52 /* 53 53 * TODO: We need more time to fine tune processors with shared memory solution ··· 120 120 struct amd_aperf_mperf cur; 121 121 struct amd_aperf_mperf prev; 122 122 123 - u64 freq; 123 + u64 freq; 124 124 bool boost_supported; 125 125 }; 126 126 ··· 152 152 static int pstate_init_perf(struct amd_cpudata *cpudata) 153 153 { 154 154 u64 cap1; 155 + u32 highest_perf; 155 156 156 157 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, 157 158 &cap1); ··· 164 163 * 165 164 * CPPC entry doesn't indicate the highest performance in some ASICs. 166 165 */ 167 - WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); 166 + highest_perf = amd_get_highest_perf(); 167 + if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1)) 168 + highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); 169 + 170 + WRITE_ONCE(cpudata->highest_perf, highest_perf); 168 171 169 172 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); 170 173 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); ··· 180 175 static int cppc_init_perf(struct amd_cpudata *cpudata) 181 176 { 182 177 struct cppc_perf_caps cppc_perf; 178 + u32 highest_perf; 183 179 184 180 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 185 181 if (ret) 186 182 return ret; 187 183 188 - WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); 184 + highest_perf = amd_get_highest_perf(); 185 + if (highest_perf > cppc_perf.highest_perf) 186 + highest_perf = cppc_perf.highest_perf; 187 + 188 + WRITE_ONCE(cpudata->highest_perf, highest_perf); 189 189 190 190 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 191 191 WRITE_ONCE(cpudata->lowest_nonlinear_perf, ··· 279 269 u64 prev = READ_ONCE(cpudata->cppc_req_cached); 280 270 u64 value = prev; 281 271 272 + des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 282 273 value &= ~AMD_CPPC_MIN_PERF(~0L); 283 274 value |= AMD_CPPC_MIN_PERF(min_perf); 284 275 ··· 323 312 return -ENODEV; 324 313 325 314 cap_perf = READ_ONCE(cpudata->highest_perf); 326 - min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 315 + min_perf = READ_ONCE(cpudata->lowest_perf); 327 316 max_perf = cap_perf; 328 317 329 318 freqs.old = policy->cur; ··· 367 356 max_perf = cap_perf; 368 357 if (max_perf < min_perf) 369 358 max_perf = min_perf; 370 - 371 - des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 372 359 373 360 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); 374 361 } ··· 564 555 565 556 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) 566 557 { 567 - struct amd_cpudata *cpudata; 568 - 569 - cpudata = policy->driver_data; 558 + struct amd_cpudata *cpudata = policy->driver_data; 570 559 571 560 freq_qos_remove_request(&cpudata->req[1]); 572 561 freq_qos_remove_request(&cpudata->req[0]); ··· 606 599 char *buf) 607 600 { 608 601 int max_freq; 609 - struct amd_cpudata *cpudata; 610 - 611 - cpudata = policy->driver_data; 602 + struct amd_cpudata *cpudata = policy->driver_data; 612 603 613 604 max_freq = amd_get_max_freq(cpudata); 614 605 if (max_freq < 0) ··· 619 614 char *buf) 620 615 { 621 616 int freq; 622 - struct amd_cpudata *cpudata; 623 - 624 - cpudata = policy->driver_data; 617 + struct amd_cpudata *cpudata = policy->driver_data; 625 618 626 619 freq = amd_get_lowest_nonlinear_freq(cpudata); 627 620 if (freq < 0) ··· 665 662 .resume = amd_pstate_cpu_resume, 666 663 .set_boost = amd_pstate_set_boost, 667 664 .name = "amd-pstate", 668 - .attr = amd_pstate_attr, 665 + .attr = amd_pstate_attr, 669 666 }; 670 667 671 668 static int __init amd_pstate_init(void)
+8 -2
drivers/cpufreq/bmips-cpufreq.c
··· 156 156 .name = BMIPS_CPUFREQ_PREFIX, 157 157 }; 158 158 159 - static int __init bmips_cpufreq_probe(void) 159 + static int __init bmips_cpufreq_driver_init(void) 160 160 { 161 161 struct cpufreq_compat *cc; 162 162 struct device_node *np; ··· 176 176 177 177 return cpufreq_register_driver(&bmips_cpufreq_driver); 178 178 } 179 - device_initcall(bmips_cpufreq_probe); 179 + module_init(bmips_cpufreq_driver_init); 180 + 181 + static void __exit bmips_cpufreq_driver_exit(void) 182 + { 183 + cpufreq_unregister_driver(&bmips_cpufreq_driver); 184 + } 185 + module_exit(bmips_cpufreq_driver_exit); 180 186 181 187 MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>"); 182 188 MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
+1
drivers/cpufreq/cpufreq-dt-platdev.c
··· 146 146 { .compatible = "qcom,sc8180x", }, 147 147 { .compatible = "qcom,sc8280xp", }, 148 148 { .compatible = "qcom,sdm845", }, 149 + { .compatible = "qcom,sm6115", }, 149 150 { .compatible = "qcom,sm6350", }, 150 151 { .compatible = "qcom,sm8150", }, 151 152 { .compatible = "qcom,sm8250", },
+1 -1
drivers/cpufreq/highbank-cpufreq.c
··· 55 55 .notifier_call = hb_cpufreq_clk_notify, 56 56 }; 57 57 58 - static int hb_cpufreq_driver_init(void) 58 + static int __init hb_cpufreq_driver_init(void) 59 59 { 60 60 struct platform_device_info devinfo = { .name = "cpufreq-dt", }; 61 61 struct device *cpu_dev;
+1
drivers/cpufreq/intel_pstate.c
··· 2416 2416 X86_MATCH(SKYLAKE_X, core_funcs), 2417 2417 X86_MATCH(COMETLAKE, core_funcs), 2418 2418 X86_MATCH(ICELAKE_X, core_funcs), 2419 + X86_MATCH(TIGERLAKE, core_funcs), 2419 2420 {} 2420 2421 }; 2421 2422 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+19 -5
drivers/cpufreq/qcom-cpufreq-hw.c
··· 13 13 #include <linux/of_address.h> 14 14 #include <linux/of_platform.h> 15 15 #include <linux/pm_opp.h> 16 + #include <linux/pm_qos.h> 16 17 #include <linux/slab.h> 17 18 #include <linux/spinlock.h> 18 19 #include <linux/units.h> ··· 57 56 struct cpufreq_policy *policy; 58 57 59 58 bool per_core_dcvs; 59 + 60 + struct freq_qos_request throttle_freq_req; 60 61 }; 61 62 62 63 static unsigned long cpu_hw_rate, xo_rate; ··· 319 316 if (IS_ERR(opp)) { 320 317 dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp); 321 318 } else { 322 - throttled_freq = freq_hz / HZ_PER_KHZ; 323 - 324 - /* Update thermal pressure (the boost frequencies are accepted) */ 325 - arch_update_thermal_pressure(policy->related_cpus, throttled_freq); 326 - 327 319 dev_pm_opp_put(opp); 328 320 } 321 + 322 + throttled_freq = freq_hz / HZ_PER_KHZ; 323 + 324 + freq_qos_update_request(&data->throttle_freq_req, throttled_freq); 325 + 326 + /* Update thermal pressure (the boost frequencies are accepted) */ 327 + arch_update_thermal_pressure(policy->related_cpus, throttled_freq); 329 328 330 329 /* 331 330 * In the unlikely case policy is unregistered do not enable ··· 418 413 if (data->throttle_irq < 0) 419 414 return data->throttle_irq; 420 415 416 + ret = freq_qos_add_request(&policy->constraints, 417 + &data->throttle_freq_req, FREQ_QOS_MAX, 418 + FREQ_QOS_MAX_DEFAULT_VALUE); 419 + if (ret < 0) { 420 + dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret); 421 + return ret; 422 + } 423 + 421 424 data->cancel_throttle = false; 422 425 data->policy = policy; 423 426 ··· 492 479 if (data->throttle_irq <= 0) 493 480 return; 494 481 482 + freq_qos_remove_request(&data->throttle_freq_req); 495 483 free_irq(data->throttle_irq, data); 496 484 } 497 485
+1 -1
drivers/cpufreq/sti-cpufreq.c
··· 252 252 return 0; 253 253 } 254 254 255 - static int sti_cpufreq_init(void) 255 + static int __init sti_cpufreq_init(void) 256 256 { 257 257 int ret; 258 258
+16 -19
drivers/cpufreq/tegra194-cpufreq.c
··· 38 38 /* cpufreq transisition latency */ 39 39 #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ 40 40 41 - enum cluster { 42 - CLUSTER0, 43 - CLUSTER1, 44 - CLUSTER2, 45 - CLUSTER3, 46 - MAX_CLUSTERS, 47 - }; 48 - 49 41 struct tegra_cpu_ctr { 50 42 u32 cpu; 51 43 u32 coreclk_cnt, last_coreclk_cnt; ··· 59 67 struct tegra_cpufreq_soc { 60 68 struct tegra_cpufreq_ops *ops; 61 69 int maxcpus_per_cluster; 70 + unsigned int num_clusters; 62 71 phys_addr_t actmon_cntr_base; 63 72 }; 64 73 65 74 struct tegra194_cpufreq_data { 66 75 void __iomem *regs; 67 - size_t num_clusters; 68 76 struct cpufreq_frequency_table **tables; 69 77 const struct tegra_cpufreq_soc *soc; 70 78 }; ··· 158 166 .ops = &tegra234_cpufreq_ops, 159 167 .actmon_cntr_base = 0x9000, 160 168 .maxcpus_per_cluster = 4, 169 + .num_clusters = 3, 170 + }; 171 + 172 + static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { 173 + .ops = &tegra234_cpufreq_ops, 174 + .actmon_cntr_base = 0x4000, 175 + .maxcpus_per_cluster = 8, 176 + .num_clusters = 1, 161 177 }; 162 178 163 179 static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) ··· 314 314 315 315 static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) 316 316 { 317 - int ret; 318 - 319 - ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true); 320 - 321 - return ret; 317 + return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true); 322 318 } 323 319 324 320 static void tegra194_set_cpu_ndiv_sysreg(void *data) ··· 378 382 379 383 data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); 380 384 381 - if (clusterid >= data->num_clusters || !data->tables[clusterid]) 385 + if (clusterid >= data->soc->num_clusters || !data->tables[clusterid]) 382 386 return -EINVAL; 383 387 384 388 start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); ··· 429 433 static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { 430 434 .ops = &tegra194_cpufreq_ops, 431 435 .maxcpus_per_cluster = 2, 436 + .num_clusters = 4, 432 437 }; 433 438 434 439 static void tegra194_cpufreq_free_resources(void) ··· 522 525 523 526 soc = of_device_get_match_data(&pdev->dev); 524 527 525 - if (soc->ops && soc->maxcpus_per_cluster) { 528 + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { 526 529 data->soc = soc; 527 530 } else { 528 531 dev_err(&pdev->dev, "soc data missing\n"); 529 532 return -EINVAL; 530 533 } 531 534 532 - data->num_clusters = MAX_CLUSTERS; 533 - data->tables = devm_kcalloc(&pdev->dev, data->num_clusters, 535 + data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters, 534 536 sizeof(*data->tables), GFP_KERNEL); 535 537 if (!data->tables) 536 538 return -ENOMEM; ··· 554 558 goto put_bpmp; 555 559 } 556 560 557 - for (i = 0; i < data->num_clusters; i++) { 561 + for (i = 0; i < data->soc->num_clusters; i++) { 558 562 data->tables[i] = init_freq_table(pdev, bpmp, i); 559 563 if (IS_ERR(data->tables[i])) { 560 564 err = PTR_ERR(data->tables[i]); ··· 586 590 static const struct of_device_id tegra194_cpufreq_of_match[] = { 587 591 { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, 588 592 { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, 593 + { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, 589 594 { /* sentinel */ } 590 595 }; 591 596
+1 -1
drivers/cpufreq/ti-cpufreq.c
··· 398 398 return ret; 399 399 } 400 400 401 - static int ti_cpufreq_init(void) 401 + static int __init ti_cpufreq_init(void) 402 402 { 403 403 const struct of_device_id *match; 404 404
+1 -1
drivers/cpuidle/coupled.c
··· 54 54 * variable is not locked. It is only written from the cpu that 55 55 * it stores (or by the on/offlining cpu if that cpu is offline), 56 56 * and only read after all the cpus are ready for the coupled idle 57 - * state are are no longer updating it. 57 + * state are no longer updating it. 58 58 * 59 59 * Three atomic counters are used. alive_count tracks the number 60 60 * of cpus in the coupled set that are currently or soon will be
+2 -2
drivers/cpuidle/cpuidle-powernv.c
··· 233 233 unsigned int exit_latency, 234 234 u64 psscr_val, u64 psscr_mask) 235 235 { 236 - strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); 237 - strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); 236 + strscpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); 237 + strscpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); 238 238 powernv_states[index].flags = flags; 239 239 powernv_states[index].target_residency = target_residency; 240 240 powernv_states[index].exit_latency = exit_latency;
+5 -6
drivers/cpuidle/governor.c
··· 63 63 64 64 cpuidle_curr_governor = gov; 65 65 66 - if (gov) { 67 - list_for_each_entry(dev, &cpuidle_detected_devices, device_list) 68 - cpuidle_enable_device(dev); 69 - cpuidle_install_idle_handler(); 70 - printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); 71 - } 66 + list_for_each_entry(dev, &cpuidle_detected_devices, device_list) 67 + cpuidle_enable_device(dev); 68 + 69 + cpuidle_install_idle_handler(); 70 + pr_info("cpuidle: using governor %s\n", gov->name); 72 71 73 72 return 0; 74 73 }
+3 -4
drivers/devfreq/event/rockchip-dfi.c
··· 189 189 return PTR_ERR(data->regs); 190 190 191 191 data->clk = devm_clk_get(dev, "pclk_ddr_mon"); 192 - if (IS_ERR(data->clk)) { 193 - dev_err(dev, "Cannot get the clk dmc_clk\n"); 194 - return PTR_ERR(data->clk); 195 - } 192 + if (IS_ERR(data->clk)) 193 + return dev_err_probe(dev, PTR_ERR(data->clk), 194 + "Cannot get the clk pclk_ddr_mon\n"); 196 195 197 196 /* try to find the optional reference to the pmu syscon */ 198 197 node = of_parse_phandle(np, "rockchip,pmu", 0);
+6 -2
drivers/devfreq/mtk-cci-devfreq.c
··· 291 291 } 292 292 293 293 drv->sram_reg = devm_regulator_get_optional(dev, "sram"); 294 - if (IS_ERR(drv->sram_reg)) 294 + if (IS_ERR(drv->sram_reg)) { 295 + ret = PTR_ERR(drv->sram_reg); 296 + if (ret == -EPROBE_DEFER) 297 + goto out_free_resources; 298 + 295 299 drv->sram_reg = NULL; 296 - else { 300 + } else { 297 301 ret = regulator_enable(drv->sram_reg); 298 302 if (ret) { 299 303 dev_err(dev, "failed to enable sram regulator\n");
+52 -1
drivers/idle/intel_idle.c
··· 928 928 .enter = NULL } 929 929 }; 930 930 931 + static struct cpuidle_state adl_n_cstates[] __initdata = { 932 + { 933 + .name = "C1", 934 + .desc = "MWAIT 0x00", 935 + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 936 + .exit_latency = 1, 937 + .target_residency = 1, 938 + .enter = &intel_idle, 939 + .enter_s2idle = intel_idle_s2idle, }, 940 + { 941 + .name = "C1E", 942 + .desc = "MWAIT 0x01", 943 + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 944 + .exit_latency = 2, 945 + .target_residency = 4, 946 + .enter = &intel_idle, 947 + .enter_s2idle = intel_idle_s2idle, }, 948 + { 949 + .name = "C6", 950 + .desc = "MWAIT 0x20", 951 + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 952 + .exit_latency = 195, 953 + .target_residency = 585, 954 + .enter = &intel_idle, 955 + .enter_s2idle = intel_idle_s2idle, }, 956 + { 957 + .name = "C8", 958 + .desc = "MWAIT 0x40", 959 + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 960 + .exit_latency = 260, 961 + .target_residency = 1040, 962 + .enter = &intel_idle, 963 + .enter_s2idle = intel_idle_s2idle, }, 964 + { 965 + .name = "C10", 966 + .desc = "MWAIT 0x60", 967 + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 968 + .exit_latency = 660, 969 + .target_residency = 1980, 970 + .enter = &intel_idle, 971 + .enter_s2idle = intel_idle_s2idle, }, 972 + { 973 + .enter = NULL } 974 + }; 975 + 931 976 static struct cpuidle_state spr_cstates[] __initdata = { 932 977 { 933 978 .name = "C1", ··· 1354 1309 .state_table = adl_l_cstates, 1355 1310 }; 1356 1311 1312 + static const struct idle_cpu idle_cpu_adl_n __initconst = { 1313 + .state_table = adl_n_cstates, 1314 + }; 1315 + 1357 1316 static const struct idle_cpu idle_cpu_spr __initconst = { 1358 1317 .state_table = spr_cstates, 1359 1318 .disable_promotion_to_c1e = true, ··· 1428 1379 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), 1429 1380 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl), 1430 1381 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), 1382 + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n), 1431 1383 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), 1432 1384 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), 1433 1385 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), ··· 1557 1507 state = &drv->states[drv->state_count++]; 1558 1508 1559 1509 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); 1560 - strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1510 + strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1561 1511 state->exit_latency = cx->latency; 1562 1512 /* 1563 1513 * For C1-type C-states use the same number for both the exit ··· 1866 1816 break; 1867 1817 case INTEL_FAM6_ALDERLAKE: 1868 1818 case INTEL_FAM6_ALDERLAKE_L: 1819 + case INTEL_FAM6_ALDERLAKE_N: 1869 1820 adl_idle_state_table_update(); 1870 1821 break; 1871 1822 }
+4 -1
drivers/powercap/intel_rapl_common.c
··· 994 994 y = value & 0x1f; 995 995 value = (1 << y) * (4 + f) * rp->time_unit / 4; 996 996 } else { 997 + if (value < rp->time_unit) 998 + return 0; 999 + 997 1000 do_div(value, rp->time_unit); 998 1001 y = ilog2(value); 999 1002 f = div64_u64(4 * (value - (1 << y)), 1 << y); ··· 1038 1035 .check_unit = rapl_check_unit_core, 1039 1036 .set_floor_freq = set_floor_freq_default, 1040 1037 .compute_time_window = rapl_compute_time_window_core, 1041 - .dram_domain_energy_unit = 15300, 1042 1038 .psys_domain_energy_unit = 1000000000, 1043 1039 .spr_psys_bits = true, 1044 1040 }; ··· 1112 1110 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core), 1113 1111 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core), 1114 1112 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core), 1113 + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core), 1115 1114 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), 1116 1115 X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core), 1117 1116
+1 -1
include/linux/suspend.h
··· 75 75 76 76 static inline void dpm_save_failed_dev(const char *name) 77 77 { 78 - strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], 78 + strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], 79 79 name, 80 80 sizeof(suspend_stats.failed_devs[0])); 81 81 suspend_stats.last_failed_dev++;
+15 -15
kernel/sched/cpufreq_schedutil.c
··· 25 25 unsigned int next_freq; 26 26 unsigned int cached_raw_freq; 27 27 28 + /* max CPU capacity, which is equal for all CPUs in freq. domain */ 29 + unsigned long max; 30 + 28 31 /* The next fields are only needed if fast switch cannot be used: */ 29 32 struct irq_work irq_work; 30 33 struct kthread_work work; ··· 51 48 52 49 unsigned long util; 53 50 unsigned long bw_dl; 54 - unsigned long max; 55 51 56 52 /* The field below is for single-CPU policies only: */ 57 53 #ifdef CONFIG_NO_HZ_COMMON ··· 160 158 { 161 159 struct rq *rq = cpu_rq(sg_cpu->cpu); 162 160 163 - sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu); 164 161 sg_cpu->bw_dl = cpu_bw_dl(rq); 165 162 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), 166 163 FREQUENCY_UTIL, NULL); ··· 254 253 */ 255 254 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) 256 255 { 256 + struct sugov_policy *sg_policy = sg_cpu->sg_policy; 257 257 unsigned long boost; 258 258 259 259 /* No boost currently required */ ··· 282 280 * sg_cpu->util is already in capacity scale; convert iowait_boost 283 281 * into the same scale so we can compare. 284 282 */ 285 - boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; 283 + boost = sg_cpu->iowait_boost * sg_policy->max; 284 + boost >>= SCHED_CAPACITY_SHIFT; 286 285 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); 287 286 if (sg_cpu->util < boost) 288 287 sg_cpu->util = boost; ··· 340 337 if (!sugov_update_single_common(sg_cpu, time, flags)) 341 338 return; 342 339 343 - next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); 340 + next_f = get_next_freq(sg_policy, sg_cpu->util, sg_policy->max); 344 341 /* 345 342 * Do not reduce the frequency if the CPU has not been idle 346 343 * recently, as the reduction is likely to be premature then. ··· 376 373 unsigned int flags) 377 374 { 378 375 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 376 + struct sugov_policy *sg_policy = sg_cpu->sg_policy; 379 377 unsigned long prev_util = sg_cpu->util; 380 378 381 379 /* ··· 403 399 sg_cpu->util = prev_util; 404 400 405 401 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), 406 - map_util_perf(sg_cpu->util), sg_cpu->max); 402 + map_util_perf(sg_cpu->util), 403 + sg_policy->max); 407 404 408 405 sg_cpu->sg_policy->last_freq_update_time = time; 409 406 } ··· 413 408 { 414 409 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 415 410 struct cpufreq_policy *policy = sg_policy->policy; 416 - unsigned long util = 0, max = 1; 411 + unsigned long util = 0; 417 412 unsigned int j; 418 413 419 414 for_each_cpu(j, policy->cpus) { 420 415 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 421 - unsigned long j_util, j_max; 422 416 423 417 sugov_get_util(j_sg_cpu); 424 418 sugov_iowait_apply(j_sg_cpu, time); 425 - j_util = j_sg_cpu->util; 426 - j_max = j_sg_cpu->max; 427 419 428 - if (j_util * max > j_max * util) { 429 - util = j_util; 430 - max = j_max; 431 - } 420 + util = max(j_sg_cpu->util, util); 432 421 } 433 422 434 - return get_next_freq(sg_policy, util, max); 423 + return get_next_freq(sg_policy, util, sg_policy->max); 435 424 } 436 425 437 426 static void ··· 751 752 { 752 753 struct sugov_policy *sg_policy = policy->governor_data; 753 754 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags); 754 - unsigned int cpu; 755 + unsigned int cpu = cpumask_first(policy->cpus); 755 756 756 757 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 757 758 sg_policy->last_freq_update_time = 0; ··· 759 760 sg_policy->work_in_progress = false; 760 761 sg_policy->limits_changed = false; 761 762 sg_policy->cached_raw_freq = 0; 763 + sg_policy->max = arch_scale_cpu_capacity(cpu); 762 764 763 765 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); 764 766