Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'pm-sleep', 'pm-cpuidle' and 'pm-em'

Merge updates related to system sleep, a cpuidle update and an Energy
Model handling code update for 6.14-rc1:

- Allow configuring the system suspend-resume (DPM) watchdog to warn
earlier than panic (Douglas Anderson).

- Implement devm_device_init_wakeup() helper and introduce a device-
managed variant of dev_pm_set_wake_irq() (Joe Hattori, Peng Fan).

- Remove direct inclusions of 'pm_wakeup.h' which should be only
included via 'device.h' (Wolfram Sang).

- Clean up two comments in the core system-wide PM code (Rafael
Wysocki, Randy Dunlap).

- Add Clearwater Forest processor support to the intel_idle cpuidle
driver (Artem Bityutskiy).

- Move sched domains rebuild function from the schedutil cpufreq
governor to the Energy Model handling code (Rafael Wysocki).

* pm-sleep:
PM: sleep: wakeirq: Introduce device-managed variant of dev_pm_set_wake_irq()
PM: sleep: Allow configuring the DPM watchdog to warn earlier than panic
PM: sleep: convert comment from kernel-doc to plain comment
PM: wakeup: implement devm_device_init_wakeup() helper
PM: sleep: sysfs: don't include 'pm_wakeup.h' directly
PM: sleep: autosleep: don't include 'pm_wakeup.h' directly
PM: sleep: Update stale comment in device_resume()

* pm-cpuidle:
intel_idle: add Clearwater Forest SoC support

* pm-em:
PM: EM: Move sched domains rebuild function from schedutil to EM

+117 -38
+20 -6
drivers/base/power/main.c
··· 496 496 struct device *dev; 497 497 struct task_struct *tsk; 498 498 struct timer_list timer; 499 + bool fatal; 499 500 }; 500 501 501 502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ ··· 513 512 static void dpm_watchdog_handler(struct timer_list *t) 514 513 { 515 514 struct dpm_watchdog *wd = from_timer(wd, t, timer); 515 + struct timer_list *timer = &wd->timer; 516 + unsigned int time_left; 516 517 517 - dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 518 - show_stack(wd->tsk, NULL, KERN_EMERG); 519 - panic("%s %s: unrecoverable failure\n", 520 - dev_driver_string(wd->dev), dev_name(wd->dev)); 518 + if (wd->fatal) { 519 + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 520 + show_stack(wd->tsk, NULL, KERN_EMERG); 521 + panic("%s %s: unrecoverable failure\n", 522 + dev_driver_string(wd->dev), dev_name(wd->dev)); 523 + } 524 + 525 + time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; 526 + dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n", 527 + CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left); 528 + show_stack(wd->tsk, NULL, KERN_WARNING); 529 + 530 + wd->fatal = true; 531 + mod_timer(timer, jiffies + HZ * time_left); 521 532 } 522 533 523 534 /** ··· 543 530 544 531 wd->dev = dev; 545 532 wd->tsk = current; 533 + wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; 546 534 547 535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0); 548 536 /* use same timeout value for both suspend and resume */ 549 - timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 537 + timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; 550 538 add_timer(timer); 551 539 } 552 540 ··· 928 914 goto Complete; 929 915 930 916 if (dev->power.direct_complete) { 931 - /* Match the pm_runtime_disable() in __device_suspend(). */ 917 + /* Match the pm_runtime_disable() in device_suspend(). */ 932 918 pm_runtime_enable(dev); 933 919 goto Complete; 934 920 }
-1
drivers/base/power/sysfs.c
··· 6 6 #include <linux/export.h> 7 7 #include <linux/pm_qos.h> 8 8 #include <linux/pm_runtime.h> 9 - #include <linux/pm_wakeup.h> 10 9 #include <linux/atomic.h> 11 10 #include <linux/jiffies.h> 12 11 #include "power.h"
+26
drivers/base/power/wakeirq.c
··· 103 103 } 104 104 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); 105 105 106 + static void devm_pm_clear_wake_irq(void *dev) 107 + { 108 + dev_pm_clear_wake_irq(dev); 109 + } 110 + 111 + /** 112 + * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq 113 + * @dev: Device entry 114 + * @irq: Device IO interrupt 115 + * 116 + * 117 + * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq, 118 + * but the device will be auto clear wake capability on driver detach. 119 + */ 120 + int devm_pm_set_wake_irq(struct device *dev, int irq) 121 + { 122 + int ret; 123 + 124 + ret = dev_pm_set_wake_irq(dev, irq); 125 + if (ret) 126 + return ret; 127 + 128 + return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev); 129 + } 130 + EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq); 131 + 106 132 /** 107 133 * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts 108 134 * @irq: Device specific dedicated wake-up interrupt
+1 -1
drivers/cpufreq/cpufreq.c
··· 1538 1538 1539 1539 /* 1540 1540 * Register with the energy model before 1541 - * sugov_eas_rebuild_sd() is called, which will result 1541 + * em_rebuild_sched_domains() is called, which will result 1542 1542 * in rebuilding of the sched domains, which should only be done 1543 1543 * once the energy model is properly initialized for the policy 1544 1544 * first.
+1
drivers/idle/intel_idle.c
··· 1651 1651 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr), 1652 1652 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr), 1653 1653 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf), 1654 + X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf), 1654 1655 {} 1655 1656 }; 1656 1657
+2
include/linux/energy_model.h
··· 179 179 int em_dev_update_chip_binning(struct device *dev); 180 180 int em_update_performance_limits(struct em_perf_domain *pd, 181 181 unsigned long freq_min_khz, unsigned long freq_max_khz); 182 + void em_rebuild_sched_domains(void); 182 183 183 184 /** 184 185 * em_pd_get_efficient_state() - Get an efficient performance state from the EM ··· 405 404 { 406 405 return -EINVAL; 407 406 } 407 + static inline void em_rebuild_sched_domains(void) {} 408 408 #endif 409 409 410 410 #endif
+6
include/linux/pm_wakeirq.h
··· 10 10 extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); 11 11 extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); 12 12 extern void dev_pm_clear_wake_irq(struct device *dev); 13 + extern int devm_pm_set_wake_irq(struct device *dev, int irq); 13 14 14 15 #else /* !CONFIG_PM */ 15 16 ··· 31 30 32 31 static inline void dev_pm_clear_wake_irq(struct device *dev) 33 32 { 33 + } 34 + 35 + static inline int devm_pm_set_wake_irq(struct device *dev, int irq) 36 + { 37 + return 0; 34 38 } 35 39 36 40 #endif /* CONFIG_PM */
+17
include/linux/pm_wakeup.h
··· 240 240 return 0; 241 241 } 242 242 243 + static void device_disable_wakeup(void *dev) 244 + { 245 + device_init_wakeup(dev, false); 246 + } 247 + 248 + /** 249 + * devm_device_init_wakeup - Resource managed device wakeup initialization. 250 + * @dev: Device to handle. 251 + * 252 + * This function is the devm managed version of device_init_wakeup(dev, true). 253 + */ 254 + static inline int devm_device_init_wakeup(struct device *dev) 255 + { 256 + device_init_wakeup(dev, true); 257 + return devm_add_action_or_reset(dev, device_disable_wakeup, dev); 258 + } 259 + 243 260 #endif /* _LINUX_PM_WAKEUP_H */
+20 -1
kernel/power/Kconfig
··· 257 257 boot session. 258 258 259 259 config DPM_WATCHDOG_TIMEOUT 260 - int "Watchdog timeout in seconds" 260 + int "Watchdog timeout to panic in seconds" 261 261 range 1 120 262 262 default 120 263 263 depends on DPM_WATCHDOG 264 + 265 + config DPM_WATCHDOG_WARNING_TIMEOUT 266 + int "Watchdog timeout to warn in seconds" 267 + range 1 DPM_WATCHDOG_TIMEOUT 268 + default DPM_WATCHDOG_TIMEOUT 269 + depends on DPM_WATCHDOG 270 + help 271 + If the DPM watchdog warning timeout and main timeout are 272 + different then a non-fatal warning (with a stack trace of 273 + the stuck suspend routine) will be printed when the warning 274 + timeout expires. If the suspend routine gets un-stuck 275 + before the main timeout expires then no other action is 276 + taken. If the routine continues to be stuck and the main 277 + timeout expires then an emergency-level message and stack 278 + trace will be printed and the system will panic. 279 + 280 + If the warning timeout is equal to the main timeout (the 281 + default) then the warning will never happen and the system 282 + will jump straight to panic when the main timeout expires. 264 283 265 284 config PM_TRACE 266 285 bool
-1
kernel/power/autosleep.c
··· 9 9 10 10 #include <linux/device.h> 11 11 #include <linux/mutex.h> 12 - #include <linux/pm_wakeup.h> 13 12 14 13 #include "power.h" 15 14
+17
kernel/power/energy_model.c
··· 908 908 return 0; 909 909 } 910 910 EXPORT_SYMBOL_GPL(em_update_performance_limits); 911 + 912 + static void rebuild_sd_workfn(struct work_struct *work) 913 + { 914 + rebuild_sched_domains_energy(); 915 + } 916 + 917 + void em_rebuild_sched_domains(void) 918 + { 919 + static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); 920 + 921 + /* 922 + * When called from the cpufreq_register_driver() path, the 923 + * cpu_hotplug_lock is already held, so use a work item to 924 + * avoid nested locking in rebuild_sched_domains(). 925 + */ 926 + schedule_work(&rebuild_sd_work); 927 + }
+1 -1
kernel/power/power.h
··· 110 110 111 111 extern void clear_or_poison_free_pages(void); 112 112 113 - /** 113 + /* 114 114 * Auxiliary structure used for reading the snapshot image data and 115 115 * metadata from and writing them to the list of page backup entries 116 116 * (PBEs) which is the main data structure of swsusp.
+6 -27
kernel/sched/cpufreq_schedutil.c
··· 604 604 605 605 /********************** cpufreq governor interface *********************/ 606 606 607 - #ifdef CONFIG_ENERGY_MODEL 608 - static void rebuild_sd_workfn(struct work_struct *work) 609 - { 610 - rebuild_sched_domains_energy(); 611 - } 612 - 613 - static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); 614 - 615 - /* 616 - * EAS shouldn't be attempted without sugov, so rebuild the sched_domains 617 - * on governor changes to make sure the scheduler knows about it. 618 - */ 619 - static void sugov_eas_rebuild_sd(void) 620 - { 621 - /* 622 - * When called from the cpufreq_register_driver() path, the 623 - * cpu_hotplug_lock is already held, so use a work item to 624 - * avoid nested locking in rebuild_sched_domains(). 625 - */ 626 - schedule_work(&rebuild_sd_work); 627 - } 628 - #else 629 - static inline void sugov_eas_rebuild_sd(void) { }; 630 - #endif 631 - 632 607 struct cpufreq_governor schedutil_gov; 633 608 634 609 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) ··· 759 784 goto fail; 760 785 761 786 out: 762 - sugov_eas_rebuild_sd(); 787 + /* 788 + * Schedutil is the preferred governor for EAS, so rebuild sched domains 789 + * on governor changes to make sure the scheduler knows about them. 790 + */ 791 + em_rebuild_sched_domains(); 763 792 mutex_unlock(&global_tunables_lock); 764 793 return 0; 765 794 ··· 805 826 sugov_policy_free(sg_policy); 806 827 cpufreq_disable_fast_switch(policy); 807 828 808 - sugov_eas_rebuild_sd(); 829 + em_rebuild_sched_domains(); 809 830 } 810 831 811 832 static int sugov_start(struct cpufreq_policy *policy)