Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back earlier material related to system sleep for 6.19

+273 -113
+16
Documentation/ABI/testing/sysfs-power
··· 454 454 disables it. Reads from the file return the current value. 455 455 The default is "1" if the build-time "SUSPEND_SKIP_SYNC" config 456 456 flag is unset, or "0" otherwise. 457 + 458 + What: /sys/power/hibernate_compression_threads 459 + Date: October 2025 460 + Contact: <luoxueqin@kylinos.cn> 461 + Description: 462 + Controls the number of threads used for compression 463 + and decompression of hibernation images. 464 + 465 + The value can be adjusted at runtime to balance 466 + performance and CPU utilization. 467 + 468 + The change takes effect on the next hibernation or 469 + resume operation. 470 + 471 + Minimum value: 1 472 + Default value: 3
+10
Documentation/admin-guide/kernel-parameters.txt
··· 1907 1907 /sys/power/pm_test). Only available when CONFIG_PM_DEBUG 1908 1908 is set. Default value is 5. 1909 1909 1910 + hibernate_compression_threads= 1911 + [HIBERNATION] 1912 + Set the number of threads used for compressing or decompressing 1913 + hibernation images. 1914 + 1915 + Format: <integer> 1916 + Default: 3 1917 + Minimum: 1 1918 + Example: hibernate_compression_threads=4 1919 + 1910 1920 highmem=nn[KMG] [KNL,BOOT,EARLY] forces the highmem zone to have an exact 1911 1921 size of <nn>. This works even on boxes that have no 1912 1922 highmem otherwise. This also works to reduce highmem
+1
Documentation/power/index.rst
··· 19 19 power_supply_class 20 20 runtime_pm 21 21 s2ram 22 + shutdown-debugging 22 23 suspend-and-cpuhotplug 23 24 suspend-and-interrupts 24 25 swsusp-and-swap-files
+53
Documentation/power/shutdown-debugging.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + Debugging Kernel Shutdown Hangs with pstore 4 + +++++++++++++++++++++++++++++++++++++++++++ 5 + 6 + Overview 7 + ======== 8 + If the system hangs while shutting down, the kernel logs may need to be 9 + retrieved to debug the issue. 10 + 11 + On systems that have a UART available, it is best to configure the kernel to use 12 + this UART for kernel console output. 13 + 14 + If a UART isn't available, the ``pstore`` subsystem provides a mechanism to 15 + persist this data across a system reset, allowing it to be retrieved on the next 16 + boot. 17 + 18 + Kernel Configuration 19 + ==================== 20 + To enable ``pstore`` and enable saving kernel ring buffer logs, set the 21 + following kernel configuration options: 22 + 23 + * ``CONFIG_PSTORE=y`` 24 + * ``CONFIG_PSTORE_CONSOLE=y`` 25 + 26 + Additionally, enable a backend to store the data. Depending upon your platform 27 + some potential options include: 28 + 29 + * ``CONFIG_EFI_VARS_PSTORE=y`` 30 + * ``CONFIG_PSTORE_RAM=y`` 31 + * ``CONFIG_CHROMEOS_PSTORE=y`` 32 + * ``CONFIG_PSTORE_BLK=y`` 33 + 34 + Kernel Command-line Parameters 35 + ============================== 36 + Add these parameters to your kernel command line: 37 + 38 + * ``printk.always_kmsg_dump=Y`` 39 + * Forces the kernel to dump the entire message buffer to pstore during 40 + shutdown 41 + * ``efi_pstore.pstore_disable=N`` 42 + * For EFI-based systems, ensures the EFI backend is active 43 + 44 + Userspace Interaction and Log Retrieval 45 + ======================================= 46 + On the next boot after a hang, pstore logs will be available in the pstore 47 + filesystem (``/sys/fs/pstore``) and can be retrieved by userspace. 48 + 49 + On systemd systems, the ``systemd-pstore`` service will help do the following: 50 + 51 + #. Locate pstore data in ``/sys/fs/pstore`` 52 + #. Read and save it to ``/var/lib/systemd/pstore`` 53 + #. Clear pstore data for the next event
+25 -60
drivers/base/power/generic_ops.c
··· 8 8 #include <linux/pm_runtime.h> 9 9 #include <linux/export.h> 10 10 11 + #define CALL_PM_OP(dev, op) \ 12 + ({ \ 13 + struct device *_dev = (dev); \ 14 + const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \ 15 + pm && pm->op ? pm->op(_dev) : 0; \ 16 + }) 17 + 11 18 #ifdef CONFIG_PM 12 19 /** 13 20 * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. ··· 26 19 */ 27 20 int pm_generic_runtime_suspend(struct device *dev) 28 21 { 29 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 30 - int ret; 31 - 32 - ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; 33 - 34 - return ret; 22 + return CALL_PM_OP(dev, runtime_suspend); 35 23 } 36 24 EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); 37 25 ··· 40 38 */ 41 39 int pm_generic_runtime_resume(struct device *dev) 42 40 { 43 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 44 - int ret; 45 - 46 - ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; 47 - 48 - return ret; 41 + return CALL_PM_OP(dev, runtime_resume); 49 42 } 50 43 EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); 51 44 #endif /* CONFIG_PM */ ··· 69 72 */ 70 73 int pm_generic_suspend_noirq(struct device *dev) 71 74 { 72 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 73 - 74 - return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; 75 + return CALL_PM_OP(dev, suspend_noirq); 75 76 } 76 77 EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); 77 78 ··· 79 84 */ 80 85 int pm_generic_suspend_late(struct device *dev) 81 86 { 82 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 83 - 84 - return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; 87 + return CALL_PM_OP(dev, suspend_late); 85 88 } 86 89 EXPORT_SYMBOL_GPL(pm_generic_suspend_late); 87 90 ··· 89 96 */ 90 97 int pm_generic_suspend(struct device *dev) 91 98 { 92 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 93 - 94 - return pm && pm->suspend ? pm->suspend(dev) : 0; 99 + return CALL_PM_OP(dev, suspend); 95 100 } 96 101 EXPORT_SYMBOL_GPL(pm_generic_suspend); 97 102 ··· 99 108 */ 100 109 int pm_generic_freeze_noirq(struct device *dev) 101 110 { 102 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 103 - 104 - return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; 111 + return CALL_PM_OP(dev, freeze_noirq); 105 112 } 106 113 EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); 107 114 ··· 109 120 */ 110 121 int pm_generic_freeze(struct device *dev) 111 122 { 112 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 113 - 114 - return pm && pm->freeze ? pm->freeze(dev) : 0; 123 + return CALL_PM_OP(dev, freeze); 115 124 } 116 125 EXPORT_SYMBOL_GPL(pm_generic_freeze); 117 126 ··· 119 132 */ 120 133 int pm_generic_poweroff_noirq(struct device *dev) 121 134 { 122 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 123 - 124 - return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; 135 + return CALL_PM_OP(dev, poweroff_noirq); 125 136 } 126 137 EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); 127 138 ··· 129 144 */ 130 145 int pm_generic_poweroff_late(struct device *dev) 131 146 { 132 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 133 - 134 - return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; 147 + return CALL_PM_OP(dev, poweroff_late); 135 148 } 136 149 EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); 137 150 ··· 139 156 */ 140 157 int pm_generic_poweroff(struct device *dev) 141 158 { 142 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 143 - 144 - return pm && pm->poweroff ? pm->poweroff(dev) : 0; 159 + return CALL_PM_OP(dev, poweroff); 145 160 } 146 161 EXPORT_SYMBOL_GPL(pm_generic_poweroff); 147 162 ··· 149 168 */ 150 169 int pm_generic_thaw_noirq(struct device *dev) 151 170 { 152 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 153 - 154 - return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; 171 + return CALL_PM_OP(dev, thaw_noirq); 155 172 } 156 173 EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); 157 174 ··· 159 180 */ 160 181 int pm_generic_thaw(struct device *dev) 161 182 { 162 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 163 - 164 - return pm && pm->thaw ? pm->thaw(dev) : 0; 183 + return CALL_PM_OP(dev, thaw); 165 184 } 166 185 EXPORT_SYMBOL_GPL(pm_generic_thaw); 167 186 ··· 169 192 */ 170 193 int pm_generic_resume_noirq(struct device *dev) 171 194 { 172 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 173 - 174 - return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; 195 + return CALL_PM_OP(dev, resume_noirq); 175 196 } 176 197 EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 177 198 ··· 179 204 */ 180 205 int pm_generic_resume_early(struct device *dev) 181 206 { 182 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 183 - 184 - return pm && pm->resume_early ? pm->resume_early(dev) : 0; 207 + return CALL_PM_OP(dev, resume_early); 185 208 } 186 209 EXPORT_SYMBOL_GPL(pm_generic_resume_early); 187 210 ··· 189 216 */ 190 217 int pm_generic_resume(struct device *dev) 191 218 { 192 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 193 - 194 - return pm && pm->resume ? pm->resume(dev) : 0; 219 + return CALL_PM_OP(dev, resume); 195 220 } 196 221 EXPORT_SYMBOL_GPL(pm_generic_resume); 197 222 ··· 199 228 */ 200 229 int pm_generic_restore_noirq(struct device *dev) 201 230 { 202 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 203 - 204 - return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; 231 + return CALL_PM_OP(dev, restore_noirq); 205 232 } 206 233 EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 207 234 ··· 209 240 */ 210 241 int pm_generic_restore_early(struct device *dev) 211 242 { 212 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 213 - 214 - return pm && pm->restore_early ? pm->restore_early(dev) : 0; 243 + return CALL_PM_OP(dev, restore_early); 215 244 } 216 245 EXPORT_SYMBOL_GPL(pm_generic_restore_early); 217 246 ··· 219 252 */ 220 253 int pm_generic_restore(struct device *dev) 221 254 { 222 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 223 - 224 - return pm && pm->restore ? pm->restore(dev) : 0; 255 + return CALL_PM_OP(dev, restore); 225 256 } 226 257 EXPORT_SYMBOL_GPL(pm_generic_restore); 227 258
+15
drivers/base/power/main.c
··· 34 34 #include <linux/cpufreq.h> 35 35 #include <linux/devfreq.h> 36 36 #include <linux/timer.h> 37 + #include <linux/nmi.h> 37 38 38 39 #include "../base.h" 39 40 #include "power.h" ··· 96 95 return "restore"; 97 96 case PM_EVENT_RECOVER: 98 97 return "recover"; 98 + case PM_EVENT_POWEROFF: 99 + return "poweroff"; 99 100 default: 100 101 return "(unknown PM event)"; 101 102 } ··· 370 367 case PM_EVENT_FREEZE: 371 368 case PM_EVENT_QUIESCE: 372 369 return ops->freeze; 370 + case PM_EVENT_POWEROFF: 373 371 case PM_EVENT_HIBERNATE: 374 372 return ops->poweroff; 375 373 case PM_EVENT_THAW: ··· 405 401 case PM_EVENT_FREEZE: 406 402 case PM_EVENT_QUIESCE: 407 403 return ops->freeze_late; 404 + case PM_EVENT_POWEROFF: 408 405 case PM_EVENT_HIBERNATE: 409 406 return ops->poweroff_late; 410 407 case PM_EVENT_THAW: ··· 440 435 case PM_EVENT_FREEZE: 441 436 case PM_EVENT_QUIESCE: 442 437 return ops->freeze_noirq; 438 + case PM_EVENT_POWEROFF: 443 439 case PM_EVENT_HIBERNATE: 444 440 return ops->poweroff_noirq; 445 441 case PM_EVENT_THAW: ··· 521 515 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 522 516 struct dpm_watchdog wd 523 517 518 + static bool __read_mostly dpm_watchdog_all_cpu_backtrace; 519 + module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644); 520 + MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace, 521 + "Backtrace all CPUs on DPM watchdog timeout"); 522 + 524 523 /** 525 524 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 526 525 * @t: The timer that PM watchdog depends on. ··· 541 530 unsigned int time_left; 542 531 543 532 if (wd->fatal) { 533 + unsigned int this_cpu = smp_processor_id(); 534 + 544 535 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 545 536 show_stack(wd->tsk, NULL, KERN_EMERG); 537 + if (dpm_watchdog_all_cpu_backtrace) 538 + trigger_allbutcpu_cpu_backtrace(this_cpu); 546 539 panic("%s %s: unrecoverable failure\n", 547 540 dev_driver_string(wd->dev), dev_name(wd->dev)); 548 541 }
+1 -3
drivers/base/power/trace.c
··· 238 238 unsigned int hash = hash_string(DEVSEED, dev_name(dev), 239 239 DEVHASH); 240 240 if (hash == value) { 241 - int len = snprintf(buf, size, "%s\n", 241 + int len = scnprintf(buf, size, "%s\n", 242 242 dev_driver_string(dev)); 243 - if (len > size) 244 - len = size; 245 243 buf += len; 246 244 ret += len; 247 245 size -= len;
+11 -13
drivers/base/power/wakeup.c
··· 189 189 if (WARN_ON(!ws)) 190 190 return; 191 191 192 + /* 193 + * After shutting down the timer, wakeup_source_activate() will warn if 194 + * the given wakeup source is passed to it. 195 + */ 196 + timer_shutdown_sync(&ws->timer); 197 + 192 198 raw_spin_lock_irqsave(&events_lock, flags); 193 199 list_del_rcu(&ws->entry); 194 200 raw_spin_unlock_irqrestore(&events_lock, flags); 195 201 synchronize_srcu(&wakeup_srcu); 196 - 197 - timer_delete_sync(&ws->timer); 198 - /* 199 - * Clear timer.function to make wakeup_source_not_registered() treat 200 - * this wakeup source as not registered. 201 - */ 202 - ws->timer.function = NULL; 203 202 } 204 203 205 204 /** ··· 505 506 EXPORT_SYMBOL_GPL(device_set_wakeup_enable); 506 507 507 508 /** 508 - * wakeup_source_not_registered - validate the given wakeup source. 509 + * wakeup_source_not_usable - validate the given wakeup source. 509 510 * @ws: Wakeup source to be validated. 510 511 */ 511 - static bool wakeup_source_not_registered(struct wakeup_source *ws) 512 + static bool wakeup_source_not_usable(struct wakeup_source *ws) 512 513 { 513 514 /* 514 - * Use timer struct to check if the given source is initialized 515 - * by wakeup_source_add. 515 + * Use the timer struct to check if the given wakeup source has been 516 + * initialized by wakeup_source_add() and it is not going away. 516 517 */ 517 518 return ws->timer.function != pm_wakeup_timer_fn; 518 519 } ··· 557 558 { 558 559 unsigned int cec; 559 560 560 - if (WARN_ONCE(wakeup_source_not_registered(ws), 561 - "unregistered wakeup source\n")) 561 + if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n")) 562 562 return; 563 563 564 564 ws->active = true;
+1
drivers/scsi/mesh.c
··· 1762 1762 case PM_EVENT_SUSPEND: 1763 1763 case PM_EVENT_HIBERNATE: 1764 1764 case PM_EVENT_FREEZE: 1765 + case PM_EVENT_POWEROFF: 1765 1766 break; 1766 1767 default: 1767 1768 return 0;
+1
drivers/scsi/stex.c
··· 1965 1965 case PM_EVENT_SUSPEND: 1966 1966 return ST_S3; 1967 1967 case PM_EVENT_HIBERNATE: 1968 + case PM_EVENT_POWEROFF: 1968 1969 hba->msi_lock = 0; 1969 1970 return ST_S4; 1970 1971 default:
+1
drivers/usb/host/sl811-hcd.c
··· 1748 1748 break; 1749 1749 case PM_EVENT_SUSPEND: 1750 1750 case PM_EVENT_HIBERNATE: 1751 + case PM_EVENT_POWEROFF: 1751 1752 case PM_EVENT_PRETHAW: /* explicitly discard hw state */ 1752 1753 port_power(sl811, 0); 1753 1754 break;
+8 -4
include/linux/freezer.h
··· 22 22 extern unsigned int freeze_timeout_msecs; 23 23 24 24 /* 25 - * Check if a process has been frozen 25 + * Check if a process has been frozen for PM or cgroup1 freezer. Note that 26 + * cgroup2 freezer uses the job control mechanism and does not interact with 27 + * the PM freezer. 26 28 */ 27 29 extern bool frozen(struct task_struct *p); 28 30 29 31 extern bool freezing_slow_path(struct task_struct *p); 30 32 31 33 /* 32 - * Check if there is a request to freeze a process 34 + * Check if there is a request to freeze a task from PM or cgroup1 freezer. 35 + * Note that cgroup2 freezer uses the job control mechanism and does not 36 + * interact with the PM freezer. 33 37 */ 34 38 static inline bool freezing(struct task_struct *p) 35 39 { ··· 67 63 extern bool set_freezable(void); 68 64 69 65 #ifdef CONFIG_CGROUP_FREEZER 70 - extern bool cgroup_freezing(struct task_struct *task); 66 + extern bool cgroup1_freezing(struct task_struct *task); 71 67 #else /* !CONFIG_CGROUP_FREEZER */ 72 - static inline bool cgroup_freezing(struct task_struct *task) 68 + static inline bool cgroup1_freezing(struct task_struct *task) 73 69 { 74 70 return false; 75 71 }
+6 -2
include/linux/pm.h
··· 25 25 26 26 struct device; /* we have a circular dep with device.h */ 27 27 #ifdef CONFIG_VT_CONSOLE_SLEEP 28 - extern void pm_vt_switch_required(struct device *dev, bool required); 28 + extern int pm_vt_switch_required(struct device *dev, bool required); 29 29 extern void pm_vt_switch_unregister(struct device *dev); 30 30 #else 31 - static inline void pm_vt_switch_required(struct device *dev, bool required) 31 + static inline int pm_vt_switch_required(struct device *dev, bool required) 32 32 { 33 + return 0; 33 34 } 34 35 static inline void pm_vt_switch_unregister(struct device *dev) 35 36 { ··· 508 507 * RECOVER Creation of a hibernation image or restoration of the main 509 508 * memory contents from a hibernation image has failed, call 510 509 * ->thaw() and ->complete() for all devices. 510 + * POWEROFF System will poweroff, call ->poweroff() for all devices. 511 511 * 512 512 * The following PM_EVENT_ messages are defined for internal use by 513 513 * kernel subsystems. They are never issued by the PM core. ··· 539 537 #define PM_EVENT_USER 0x0100 540 538 #define PM_EVENT_REMOTE 0x0200 541 539 #define PM_EVENT_AUTO 0x0400 540 + #define PM_EVENT_POWEROFF 0x0800 542 541 543 542 #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 544 543 #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) ··· 554 551 #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 555 552 #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 556 553 #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) 554 + #define PMSG_POWEROFF ((struct pm_message){ .event = PM_EVENT_POWEROFF, }) 557 555 #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) 558 556 #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 559 557 #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
+2 -1
include/trace/events/power.h
··· 179 179 { PM_EVENT_HIBERNATE, "hibernate" }, \ 180 180 { PM_EVENT_THAW, "thaw" }, \ 181 181 { PM_EVENT_RESTORE, "restore" }, \ 182 - { PM_EVENT_RECOVER, "recover" }) 182 + { PM_EVENT_RECOVER, "recover" }, \ 183 + { PM_EVENT_POWEROFF, "poweroff" }) 183 184 184 185 DEFINE_EVENT(cpu, cpu_frequency, 185 186
+1 -1
kernel/cgroup/legacy_freezer.c
··· 63 63 return css_freezer(freezer->css.parent); 64 64 } 65 65 66 - bool cgroup_freezing(struct task_struct *task) 66 + bool cgroup1_freezing(struct task_struct *task) 67 67 { 68 68 bool ret; 69 69
+1 -1
kernel/freezer.c
··· 44 44 if (tsk_is_oom_victim(p)) 45 45 return false; 46 46 47 - if (pm_nosig_freezing || cgroup_freezing(p)) 47 + if (pm_nosig_freezing || cgroup1_freezing(p)) 48 48 return true; 49 49 50 50 if (pm_freezing && !(p->flags & PF_KTHREAD))
+6 -2
kernel/power/console.c
··· 44 44 * no_console_suspend argument has been passed on the command line, VT 45 45 * switches will occur. 46 46 */ 47 - void pm_vt_switch_required(struct device *dev, bool required) 47 + int pm_vt_switch_required(struct device *dev, bool required) 48 48 { 49 49 struct pm_vt_switch *entry, *tmp; 50 + int ret = 0; 50 51 51 52 mutex_lock(&vt_switch_mutex); 52 53 list_for_each_entry(tmp, &pm_vt_switch_list, head) { ··· 59 58 } 60 59 61 60 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 62 - if (!entry) 61 + if (!entry) { 62 + ret = -ENOMEM; 63 63 goto out; 64 + } 64 65 65 66 entry->required = required; 66 67 entry->dev = dev; ··· 70 67 list_add(&entry->head, &pm_vt_switch_list); 71 68 out: 72 69 mutex_unlock(&vt_switch_mutex); 70 + return ret; 73 71 } 74 72 EXPORT_SYMBOL(pm_vt_switch_required); 75 73
+6 -7
kernel/power/snapshot.c
··· 2110 2110 { 2111 2111 unsigned int nr_pages, nr_highmem; 2112 2112 2113 - pr_info("Creating image:\n"); 2113 + pm_deferred_pr_dbg("Creating image\n"); 2114 2114 2115 2115 drain_local_pages(NULL); 2116 2116 nr_pages = count_data_pages(); 2117 2117 nr_highmem = count_highmem_pages(); 2118 - pr_info("Need to copy %u pages\n", nr_pages + nr_highmem); 2118 + pm_deferred_pr_dbg("Need to copy %u pages\n", nr_pages + nr_highmem); 2119 2119 2120 2120 if (!enough_free_mem(nr_pages, nr_highmem)) { 2121 - pr_err("Not enough free memory\n"); 2121 + pm_deferred_pr_dbg("Not enough free memory for image creation\n"); 2122 2122 return -ENOMEM; 2123 2123 } 2124 2124 2125 - if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) { 2126 - pr_err("Memory allocation failed\n"); 2125 + if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) 2127 2126 return -ENOMEM; 2128 - } 2129 2127 2130 2128 /* 2131 2129 * During allocating of suspend pagedir, new cold pages may appear. ··· 2142 2144 nr_zero_pages = nr_pages - nr_copy_pages; 2143 2145 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 2144 2146 2145 - pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages); 2147 + pm_deferred_pr_dbg("Image created (%d pages copied, %d zero pages)\n", 2148 + nr_copy_pages, nr_zero_pages); 2146 2149 2147 2150 return 0; 2148 2151 }
+5 -1
kernel/power/suspend.c
··· 344 344 static int suspend_test(int level) 345 345 { 346 346 #ifdef CONFIG_PM_DEBUG 347 + int i; 348 + 347 349 if (pm_test_level == level) { 348 350 pr_info("suspend debug: Waiting for %d second(s).\n", 349 351 pm_test_delay); 350 - mdelay(pm_test_delay * 1000); 352 + for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++) 353 + msleep(1000); 354 + 351 355 return 1; 352 356 } 353 357 #endif /* !CONFIG_PM_DEBUG */
+103 -18
kernel/power/swap.c
··· 519 519 CMP_HEADER, PAGE_SIZE) 520 520 #define CMP_SIZE (CMP_PAGES * PAGE_SIZE) 521 521 522 - /* Maximum number of threads for compression/decompression. */ 523 - #define CMP_THREADS 3 522 + /* Default number of threads for compression/decompression. */ 523 + #define CMP_THREADS 3 524 + static unsigned int hibernate_compression_threads = CMP_THREADS; 524 525 525 526 /* Minimum/maximum number of pages for read buffering. */ 526 527 #define CMP_MIN_RD_PAGES 1024 ··· 586 585 wait_queue_head_t go; /* start crc update */ 587 586 wait_queue_head_t done; /* crc update done */ 588 587 u32 *crc32; /* points to handle's crc32 */ 589 - size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */ 590 - unsigned char *unc[CMP_THREADS]; /* uncompressed data */ 588 + size_t **unc_len; /* uncompressed lengths */ 589 + unsigned char **unc; /* uncompressed data */ 591 590 }; 591 + 592 + static struct crc_data *alloc_crc_data(int nr_threads) 593 + { 594 + struct crc_data *crc; 595 + 596 + crc = kzalloc(sizeof(*crc), GFP_KERNEL); 597 + if (!crc) 598 + return NULL; 599 + 600 + crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL); 601 + if (!crc->unc) 602 + goto err_free_crc; 603 + 604 + crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL); 605 + if (!crc->unc_len) 606 + goto err_free_unc; 607 + 608 + return crc; 609 + 610 + err_free_unc: 611 + kfree(crc->unc); 612 + err_free_crc: 613 + kfree(crc); 614 + return NULL; 615 + } 616 + 617 + static void free_crc_data(struct crc_data *crc) 618 + { 619 + if (!crc) 620 + return; 621 + 622 + if (crc->thr) 623 + kthread_stop(crc->thr); 624 + 625 + kfree(crc->unc_len); 626 + kfree(crc->unc); 627 + kfree(crc); 628 + } 592 629 593 630 /* 594 631 * CRC32 update function that runs in its own thread. ··· 742 703 * footprint. 743 704 */ 744 705 nr_threads = num_online_cpus() - 1; 745 - nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); 706 + nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads); 746 707 747 708 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); 748 709 if (!page) { ··· 758 719 goto out_clean; 759 720 } 760 721 761 - crc = kzalloc(sizeof(*crc), GFP_KERNEL); 722 + crc = alloc_crc_data(nr_threads); 762 723 if (!crc) { 763 724 pr_err("Failed to allocate crc\n"); 764 725 ret = -ENOMEM; ··· 927 888 928 889 out_clean: 929 890 hib_finish_batch(&hb); 930 - if (crc) { 931 - if (crc->thr) 932 - kthread_stop(crc->thr); 933 - kfree(crc); 934 - } 891 + free_crc_data(crc); 935 892 if (data) { 936 893 for (thr = 0; thr < nr_threads; thr++) { 937 894 if (data[thr].thr) ··· 1262 1227 * footprint. 1263 1228 */ 1264 1229 nr_threads = num_online_cpus() - 1; 1265 - nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); 1230 + nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads); 1266 1231 1267 1232 page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page)); 1268 1233 if (!page) { ··· 1278 1243 goto out_clean; 1279 1244 } 1280 1245 1281 - crc = kzalloc(sizeof(*crc), GFP_KERNEL); 1246 + crc = alloc_crc_data(nr_threads); 1282 1247 if (!crc) { 1283 1248 pr_err("Failed to allocate crc\n"); 1284 1249 ret = -ENOMEM; ··· 1545 1510 hib_finish_batch(&hb); 1546 1511 for (i = 0; i < ring_size; i++) 1547 1512 free_page((unsigned long)page[i]); 1548 - if (crc) { 1549 - if (crc->thr) 1550 - kthread_stop(crc->thr); 1551 - kfree(crc); 1552 - } 1513 + free_crc_data(crc); 1553 1514 if (data) { 1554 1515 for (thr = 0; thr < nr_threads; thr++) { 1555 1516 if (data[thr].thr) ··· 1693 1662 } 1694 1663 #endif 1695 1664 1665 + static ssize_t hibernate_compression_threads_show(struct kobject *kobj, 1666 + struct kobj_attribute *attr, char *buf) 1667 + { 1668 + return sysfs_emit(buf, "%d\n", hibernate_compression_threads); 1669 + } 1670 + 1671 + static ssize_t hibernate_compression_threads_store(struct kobject *kobj, 1672 + struct kobj_attribute *attr, 1673 + const char *buf, size_t n) 1674 + { 1675 + unsigned long val; 1676 + 1677 + if (kstrtoul(buf, 0, &val)) 1678 + return -EINVAL; 1679 + 1680 + if (val < 1) 1681 + return -EINVAL; 1682 + 1683 + hibernate_compression_threads = val; 1684 + return n; 1685 + } 1686 + power_attr(hibernate_compression_threads); 1687 + 1688 + static struct attribute *g[] = { 1689 + &hibernate_compression_threads_attr.attr, 1690 + NULL, 1691 + }; 1692 + 1693 + static const struct attribute_group attr_group = { 1694 + .attrs = g, 1695 + }; 1696 + 1696 1697 static int __init swsusp_header_init(void) 1697 1698 { 1699 + int error; 1700 + 1701 + error = sysfs_create_group(power_kobj, &attr_group); 1702 + if (error) 1703 + return -ENOMEM; 1704 + 1698 1705 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); 1699 1706 if (!swsusp_header) 1700 1707 panic("Could not allocate memory for swsusp_header\n"); ··· 1740 1671 } 1741 1672 1742 1673 core_initcall(swsusp_header_init); 1674 + 1675 + static int __init hibernate_compression_threads_setup(char *str) 1676 + { 1677 + int rc = kstrtouint(str, 0, &hibernate_compression_threads); 1678 + 1679 + if (rc) 1680 + return rc; 1681 + 1682 + if (hibernate_compression_threads < 1) 1683 + hibernate_compression_threads = CMP_THREADS; 1684 + 1685 + return 1; 1686 + 1687 + } 1688 + 1689 + __setup("hibernate_compression_threads=", hibernate_compression_threads_setup);