at v4.0 32 kB view raw
1/* 2 * pm.h - Power management interface 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21#ifndef _LINUX_PM_H 22#define _LINUX_PM_H 23 24#include <linux/list.h> 25#include <linux/workqueue.h> 26#include <linux/spinlock.h> 27#include <linux/wait.h> 28#include <linux/timer.h> 29#include <linux/completion.h> 30 31/* 32 * Callbacks for platform drivers to implement. 33 */ 34extern void (*pm_power_off)(void); 35extern void (*pm_power_off_prepare)(void); 36 37struct device; /* we have a circular dep with device.h */ 38#ifdef CONFIG_VT_CONSOLE_SLEEP 39extern void pm_vt_switch_required(struct device *dev, bool required); 40extern void pm_vt_switch_unregister(struct device *dev); 41#else 42static inline void pm_vt_switch_required(struct device *dev, bool required) 43{ 44} 45static inline void pm_vt_switch_unregister(struct device *dev) 46{ 47} 48#endif /* CONFIG_VT_CONSOLE_SLEEP */ 49 50/* 51 * Device power management 52 */ 53 54struct device; 55 56#ifdef CONFIG_PM 57extern const char power_group_name[]; /* = "power" */ 58#else 59#define power_group_name NULL 60#endif 61 62typedef struct pm_message { 63 int event; 64} pm_message_t; 65 66/** 67 * struct dev_pm_ops - device PM callbacks 68 * 69 * Several device power state transitions are externally visible, affecting 70 * the state of pending I/O queues and (for drivers that touch hardware) 71 * interrupts, wakeups, DMA, and other hardware state. There may also be 72 * internal transitions to various low-power modes which are transparent 73 * to the rest of the driver stack (such as a driver that's ON gating off 74 * clocks which are not in active use). 75 * 76 * The externally visible transitions are handled with the help of callbacks 77 * included in this structure in such a way that two levels of callbacks are 78 * involved. First, the PM core executes callbacks provided by PM domains, 79 * device types, classes and bus types. They are the subsystem-level callbacks 80 * supposed to execute callbacks provided by device drivers, although they may 81 * choose not to do that. If the driver callbacks are executed, they have to 82 * collaborate with the subsystem-level callbacks to achieve the goals 83 * appropriate for the given system transition, given transition phase and the 84 * subsystem the device belongs to. 85 * 86 * @prepare: The principal role of this callback is to prevent new children of 87 * the device from being registered after it has returned (the driver's 88 * subsystem and generally the rest of the kernel is supposed to prevent 89 * new calls to the probe method from being made too once @prepare() has 90 * succeeded). If @prepare() detects a situation it cannot handle (e.g. 91 * registration of a child already in progress), it may return -EAGAIN, so 92 * that the PM core can execute it once again (e.g. after a new child has 93 * been registered) to recover from the race condition. 94 * This method is executed for all kinds of suspend transitions and is 95 * followed by one of the suspend callbacks: @suspend(), @freeze(), or 96 * @poweroff(). If the transition is a suspend to memory or standby (that 97 * is, not related to hibernation), the return value of @prepare() may be 98 * used to indicate to the PM core to leave the device in runtime suspend 99 * if applicable. Namely, if @prepare() returns a positive number, the PM 100 * core will understand that as a declaration that the device appears to be 101 * runtime-suspended and it may be left in that state during the entire 102 * transition and during the subsequent resume if all of its descendants 103 * are left in runtime suspend too. If that happens, @complete() will be 104 * executed directly after @prepare() and it must ensure the proper 105 * functioning of the device after the system resume. 106 * The PM core executes subsystem-level @prepare() for all devices before 107 * starting to invoke suspend callbacks for any of them, so generally 108 * devices may be assumed to be functional or to respond to runtime resume 109 * requests while @prepare() is being executed. However, device drivers 110 * may NOT assume anything about the availability of user space at that 111 * time and it is NOT valid to request firmware from within @prepare() 112 * (it's too late to do that). It also is NOT valid to allocate 113 * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. 114 * [To work around these limitations, drivers may register suspend and 115 * hibernation notifiers to be executed before the freezing of tasks.] 116 * 117 * @complete: Undo the changes made by @prepare(). This method is executed for 118 * all kinds of resume transitions, following one of the resume callbacks: 119 * @resume(), @thaw(), @restore(). Also called if the state transition 120 * fails before the driver's suspend callback: @suspend(), @freeze() or 121 * @poweroff(), can be executed (e.g. if the suspend callback fails for one 122 * of the other devices that the PM core has unsuccessfully attempted to 123 * suspend earlier). 124 * The PM core executes subsystem-level @complete() after it has executed 125 * the appropriate resume callbacks for all devices. If the corresponding 126 * @prepare() at the beginning of the suspend transition returned a 127 * positive number and the device was left in runtime suspend (without 128 * executing any suspend and resume callbacks for it), @complete() will be 129 * the only callback executed for the device during resume. In that case, 130 * @complete() must be prepared to do whatever is necessary to ensure the 131 * proper functioning of the device after the system resume. To this end, 132 * @complete() can check the power.direct_complete flag of the device to 133 * learn whether (unset) or not (set) the previous suspend and resume 134 * callbacks have been executed for it. 135 * 136 * @suspend: Executed before putting the system into a sleep state in which the 137 * contents of main memory are preserved. The exact action to perform 138 * depends on the device's subsystem (PM domain, device type, class or bus 139 * type), but generally the device must be quiescent after subsystem-level 140 * @suspend() has returned, so that it doesn't do any I/O or DMA. 141 * Subsystem-level @suspend() is executed for all devices after invoking 142 * subsystem-level @prepare() for all of them. 143 * 144 * @suspend_late: Continue operations started by @suspend(). For a number of 145 * devices @suspend_late() may point to the same callback routine as the 146 * runtime suspend callback. 147 * 148 * @resume: Executed after waking the system up from a sleep state in which the 149 * contents of main memory were preserved. The exact action to perform 150 * depends on the device's subsystem, but generally the driver is expected 151 * to start working again, responding to hardware events and software 152 * requests (the device itself may be left in a low-power state, waiting 153 * for a runtime resume to occur). The state of the device at the time its 154 * driver's @resume() callback is run depends on the platform and subsystem 155 * the device belongs to. On most platforms, there are no restrictions on 156 * availability of resources like clocks during @resume(). 157 * Subsystem-level @resume() is executed for all devices after invoking 158 * subsystem-level @resume_noirq() for all of them. 159 * 160 * @resume_early: Prepare to execute @resume(). For a number of devices 161 * @resume_early() may point to the same callback routine as the runtime 162 * resume callback. 163 * 164 * @freeze: Hibernation-specific, executed before creating a hibernation image. 165 * Analogous to @suspend(), but it should not enable the device to signal 166 * wakeup events or change its power state. The majority of subsystems 167 * (with the notable exception of the PCI bus type) expect the driver-level 168 * @freeze() to save the device settings in memory to be used by @restore() 169 * during the subsequent resume from hibernation. 170 * Subsystem-level @freeze() is executed for all devices after invoking 171 * subsystem-level @prepare() for all of them. 172 * 173 * @freeze_late: Continue operations started by @freeze(). Analogous to 174 * @suspend_late(), but it should not enable the device to signal wakeup 175 * events or change its power state. 176 * 177 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 178 * if the creation of an image has failed. Also executed after a failing 179 * attempt to restore the contents of main memory from such an image. 180 * Undo the changes made by the preceding @freeze(), so the device can be 181 * operated in the same way as immediately before the call to @freeze(). 182 * Subsystem-level @thaw() is executed for all devices after invoking 183 * subsystem-level @thaw_noirq() for all of them. It also may be executed 184 * directly after @freeze() in case of a transition error. 185 * 186 * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the 187 * preceding @freeze_late(). 188 * 189 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 190 * Analogous to @suspend(), but it need not save the device's settings in 191 * memory. 192 * Subsystem-level @poweroff() is executed for all devices after invoking 193 * subsystem-level @prepare() for all of them. 194 * 195 * @poweroff_late: Continue operations started by @poweroff(). Analogous to 196 * @suspend_late(), but it need not save the device's settings in memory. 197 * 198 * @restore: Hibernation-specific, executed after restoring the contents of main 199 * memory from a hibernation image, analogous to @resume(). 200 * 201 * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). 202 * 203 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any 204 * additional operations required for suspending the device that might be 205 * racing with its driver's interrupt handler, which is guaranteed not to 206 * run while @suspend_noirq() is being executed. 207 * It generally is expected that the device will be in a low-power state 208 * (appropriate for the target system sleep state) after subsystem-level 209 * @suspend_noirq() has returned successfully. If the device can generate 210 * system wakeup signals and is enabled to wake up the system, it should be 211 * configured to do so at that time. However, depending on the platform 212 * and device's subsystem, @suspend() or @suspend_late() may be allowed to 213 * put the device into the low-power state and configure it to generate 214 * wakeup signals, in which case it generally is not necessary to define 215 * @suspend_noirq(). 216 * 217 * @resume_noirq: Prepare for the execution of @resume() by carrying out any 218 * operations required for resuming the device that might be racing with 219 * its driver's interrupt handler, which is guaranteed not to run while 220 * @resume_noirq() is being executed. 221 * 222 * @freeze_noirq: Complete the actions started by @freeze(). Carry out any 223 * additional operations required for freezing the device that might be 224 * racing with its driver's interrupt handler, which is guaranteed not to 225 * run while @freeze_noirq() is being executed. 226 * The power state of the device should not be changed by either @freeze(), 227 * or @freeze_late(), or @freeze_noirq() and it should not be configured to 228 * signal system wakeup by any of these callbacks. 229 * 230 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any 231 * operations required for thawing the device that might be racing with its 232 * driver's interrupt handler, which is guaranteed not to run while 233 * @thaw_noirq() is being executed. 234 * 235 * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to 236 * @suspend_noirq(), but it need not save the device's settings in memory. 237 * 238 * @restore_noirq: Prepare for the execution of @restore() by carrying out any 239 * operations required for thawing the device that might be racing with its 240 * driver's interrupt handler, which is guaranteed not to run while 241 * @restore_noirq() is being executed. Analogous to @resume_noirq(). 242 * 243 * All of the above callbacks, except for @complete(), return error codes. 244 * However, the error codes returned by the resume operations, @resume(), 245 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do 246 * not cause the PM core to abort the resume transition during which they are 247 * returned. The error codes returned in those cases are only printed by the PM 248 * core to the system logs for debugging purposes. Still, it is recommended 249 * that drivers only return error codes from their resume methods in case of an 250 * unrecoverable failure (i.e. when the device being handled refuses to resume 251 * and becomes unusable) to allow us to modify the PM core in the future, so 252 * that it can avoid attempting to handle devices that failed to resume and 253 * their children. 254 * 255 * It is allowed to unregister devices while the above callbacks are being 256 * executed. However, a callback routine must NOT try to unregister the device 257 * it was called for, although it may unregister children of that device (for 258 * example, if it detects that a child was unplugged while the system was 259 * asleep). 260 * 261 * Refer to Documentation/power/devices.txt for more information about the role 262 * of the above callbacks in the system suspend process. 263 * 264 * There also are callbacks related to runtime power management of devices. 265 * Again, these callbacks are executed by the PM core only for subsystems 266 * (PM domains, device types, classes and bus types) and the subsystem-level 267 * callbacks are supposed to invoke the driver callbacks. Moreover, the exact 268 * actions to be performed by a device driver's callbacks generally depend on 269 * the platform and subsystem the device belongs to. 270 * 271 * @runtime_suspend: Prepare the device for a condition in which it won't be 272 * able to communicate with the CPU(s) and RAM due to power management. 273 * This need not mean that the device should be put into a low-power state. 274 * For example, if the device is behind a link which is about to be turned 275 * off, the device may remain at full power. If the device does go to low 276 * power and is capable of generating runtime wakeup events, remote wakeup 277 * (i.e., a hardware mechanism allowing the device to request a change of 278 * its power state via an interrupt) should be enabled for it. 279 * 280 * @runtime_resume: Put the device into the fully active state in response to a 281 * wakeup event generated by hardware or at the request of software. If 282 * necessary, put the device into the full-power state and restore its 283 * registers, so that it is fully operational. 284 * 285 * @runtime_idle: Device appears to be inactive and it might be put into a 286 * low-power state if all of the necessary conditions are satisfied. 287 * Check these conditions, and return 0 if it's appropriate to let the PM 288 * core queue a suspend request for the device. 289 * 290 * Refer to Documentation/power/runtime_pm.txt for more information about the 291 * role of the above callbacks in device runtime power management. 292 * 293 */ 294 295struct dev_pm_ops { 296 int (*prepare)(struct device *dev); 297 void (*complete)(struct device *dev); 298 int (*suspend)(struct device *dev); 299 int (*resume)(struct device *dev); 300 int (*freeze)(struct device *dev); 301 int (*thaw)(struct device *dev); 302 int (*poweroff)(struct device *dev); 303 int (*restore)(struct device *dev); 304 int (*suspend_late)(struct device *dev); 305 int (*resume_early)(struct device *dev); 306 int (*freeze_late)(struct device *dev); 307 int (*thaw_early)(struct device *dev); 308 int (*poweroff_late)(struct device *dev); 309 int (*restore_early)(struct device *dev); 310 int (*suspend_noirq)(struct device *dev); 311 int (*resume_noirq)(struct device *dev); 312 int (*freeze_noirq)(struct device *dev); 313 int (*thaw_noirq)(struct device *dev); 314 int (*poweroff_noirq)(struct device *dev); 315 int (*restore_noirq)(struct device *dev); 316 int (*runtime_suspend)(struct device *dev); 317 int (*runtime_resume)(struct device *dev); 318 int (*runtime_idle)(struct device *dev); 319}; 320 321#ifdef CONFIG_PM_SLEEP 322#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 323 .suspend = suspend_fn, \ 324 .resume = resume_fn, \ 325 .freeze = suspend_fn, \ 326 .thaw = resume_fn, \ 327 .poweroff = suspend_fn, \ 328 .restore = resume_fn, 329#else 330#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 331#endif 332 333#ifdef CONFIG_PM_SLEEP 334#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 335 .suspend_late = suspend_fn, \ 336 .resume_early = resume_fn, \ 337 .freeze_late = suspend_fn, \ 338 .thaw_early = resume_fn, \ 339 .poweroff_late = suspend_fn, \ 340 .restore_early = resume_fn, 341#else 342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 343#endif 344 345#ifdef CONFIG_PM 346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 347 .runtime_suspend = suspend_fn, \ 348 .runtime_resume = resume_fn, \ 349 .runtime_idle = idle_fn, 350#else 351#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 352#endif 353 354/* 355 * Use this if you want to use the same suspend and resume callbacks for suspend 356 * to RAM and hibernation. 357 */ 358#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 359const struct dev_pm_ops name = { \ 360 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 361} 362 363/* 364 * Use this for defining a set of PM operations to be used in all situations 365 * (system suspend, hibernation or runtime PM). 366 * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should 367 * be different from the corresponding runtime PM callbacks, .runtime_suspend(), 368 * and .runtime_resume(), because .runtime_suspend() always works on an already 369 * quiescent device, while .suspend() should assume that the device may be doing 370 * something when it is called (it should ensure that the device will be 371 * quiescent after it has returned). Therefore it's better to point the "late" 372 * suspend and "early" resume callback pointers, .suspend_late() and 373 * .resume_early(), to the same routines as .runtime_suspend() and 374 * .runtime_resume(), respectively (and analogously for hibernation). 375 */ 376#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 377const struct dev_pm_ops name = { \ 378 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 379 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 380} 381 382/** 383 * PM_EVENT_ messages 384 * 385 * The following PM_EVENT_ messages are defined for the internal use of the PM 386 * core, in order to provide a mechanism allowing the high level suspend and 387 * hibernation code to convey the necessary information to the device PM core 388 * code: 389 * 390 * ON No transition. 391 * 392 * FREEZE System is going to hibernate, call ->prepare() and ->freeze() 393 * for all devices. 394 * 395 * SUSPEND System is going to suspend, call ->prepare() and ->suspend() 396 * for all devices. 397 * 398 * HIBERNATE Hibernation image has been saved, call ->prepare() and 399 * ->poweroff() for all devices. 400 * 401 * QUIESCE Contents of main memory are going to be restored from a (loaded) 402 * hibernation image, call ->prepare() and ->freeze() for all 403 * devices. 404 * 405 * RESUME System is resuming, call ->resume() and ->complete() for all 406 * devices. 407 * 408 * THAW Hibernation image has been created, call ->thaw() and 409 * ->complete() for all devices. 410 * 411 * RESTORE Contents of main memory have been restored from a hibernation 412 * image, call ->restore() and ->complete() for all devices. 413 * 414 * RECOVER Creation of a hibernation image or restoration of the main 415 * memory contents from a hibernation image has failed, call 416 * ->thaw() and ->complete() for all devices. 417 * 418 * The following PM_EVENT_ messages are defined for internal use by 419 * kernel subsystems. They are never issued by the PM core. 420 * 421 * USER_SUSPEND Manual selective suspend was issued by userspace. 422 * 423 * USER_RESUME Manual selective resume was issued by userspace. 424 * 425 * REMOTE_WAKEUP Remote-wakeup request was received from the device. 426 * 427 * AUTO_SUSPEND Automatic (device idle) runtime suspend was 428 * initiated by the subsystem. 429 * 430 * AUTO_RESUME Automatic (device needed) runtime resume was 431 * requested by a driver. 432 */ 433 434#define PM_EVENT_INVALID (-1) 435#define PM_EVENT_ON 0x0000 436#define PM_EVENT_FREEZE 0x0001 437#define PM_EVENT_SUSPEND 0x0002 438#define PM_EVENT_HIBERNATE 0x0004 439#define PM_EVENT_QUIESCE 0x0008 440#define PM_EVENT_RESUME 0x0010 441#define PM_EVENT_THAW 0x0020 442#define PM_EVENT_RESTORE 0x0040 443#define PM_EVENT_RECOVER 0x0080 444#define PM_EVENT_USER 0x0100 445#define PM_EVENT_REMOTE 0x0200 446#define PM_EVENT_AUTO 0x0400 447 448#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 449#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) 450#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) 451#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) 452#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) 453#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) 454 455#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) 456#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) 457#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) 458#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 459#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 460#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) 461#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) 462#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 463#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) 464#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) 465#define PMSG_USER_SUSPEND ((struct pm_message) \ 466 { .event = PM_EVENT_USER_SUSPEND, }) 467#define PMSG_USER_RESUME ((struct pm_message) \ 468 { .event = PM_EVENT_USER_RESUME, }) 469#define PMSG_REMOTE_RESUME ((struct pm_message) \ 470 { .event = PM_EVENT_REMOTE_RESUME, }) 471#define PMSG_AUTO_SUSPEND ((struct pm_message) \ 472 { .event = PM_EVENT_AUTO_SUSPEND, }) 473#define PMSG_AUTO_RESUME ((struct pm_message) \ 474 { .event = PM_EVENT_AUTO_RESUME, }) 475 476#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) 477 478/** 479 * Device run-time power management status. 480 * 481 * These status labels are used internally by the PM core to indicate the 482 * current status of a device with respect to the PM core operations. They do 483 * not reflect the actual power state of the device or its status as seen by the 484 * driver. 485 * 486 * RPM_ACTIVE Device is fully operational. Indicates that the device 487 * bus type's ->runtime_resume() callback has completed 488 * successfully. 489 * 490 * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has 491 * completed successfully. The device is regarded as 492 * suspended. 493 * 494 * RPM_RESUMING Device bus type's ->runtime_resume() callback is being 495 * executed. 496 * 497 * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being 498 * executed. 499 */ 500 501enum rpm_status { 502 RPM_ACTIVE = 0, 503 RPM_RESUMING, 504 RPM_SUSPENDED, 505 RPM_SUSPENDING, 506}; 507 508/** 509 * Device run-time power management request types. 510 * 511 * RPM_REQ_NONE Do nothing. 512 * 513 * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback 514 * 515 * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback 516 * 517 * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has 518 * been inactive for as long as power.autosuspend_delay 519 * 520 * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback 521 */ 522 523enum rpm_request { 524 RPM_REQ_NONE = 0, 525 RPM_REQ_IDLE, 526 RPM_REQ_SUSPEND, 527 RPM_REQ_AUTOSUSPEND, 528 RPM_REQ_RESUME, 529}; 530 531struct wakeup_source; 532struct pm_domain_data; 533 534struct pm_subsys_data { 535 spinlock_t lock; 536 unsigned int refcount; 537#ifdef CONFIG_PM_CLK 538 struct list_head clock_list; 539#endif 540#ifdef CONFIG_PM_GENERIC_DOMAINS 541 struct pm_domain_data *domain_data; 542#endif 543}; 544 545struct dev_pm_info { 546 pm_message_t power_state; 547 unsigned int can_wakeup:1; 548 unsigned int async_suspend:1; 549 bool is_prepared:1; /* Owned by the PM core */ 550 bool is_suspended:1; /* Ditto */ 551 bool is_noirq_suspended:1; 552 bool is_late_suspended:1; 553 bool ignore_children:1; 554 bool early_init:1; /* Owned by the PM core */ 555 bool direct_complete:1; /* Owned by the PM core */ 556 spinlock_t lock; 557#ifdef CONFIG_PM_SLEEP 558 struct list_head entry; 559 struct completion completion; 560 struct wakeup_source *wakeup; 561 bool wakeup_path:1; 562 bool syscore:1; 563#else 564 unsigned int should_wakeup:1; 565#endif 566#ifdef CONFIG_PM 567 struct timer_list suspend_timer; 568 unsigned long timer_expires; 569 struct work_struct work; 570 wait_queue_head_t wait_queue; 571 atomic_t usage_count; 572 atomic_t child_count; 573 unsigned int disable_depth:3; 574 unsigned int idle_notification:1; 575 unsigned int request_pending:1; 576 unsigned int deferred_resume:1; 577 unsigned int run_wake:1; 578 unsigned int runtime_auto:1; 579 unsigned int no_callbacks:1; 580 unsigned int irq_safe:1; 581 unsigned int use_autosuspend:1; 582 unsigned int timer_autosuspends:1; 583 unsigned int memalloc_noio:1; 584 enum rpm_request request; 585 enum rpm_status runtime_status; 586 int runtime_error; 587 int autosuspend_delay; 588 unsigned long last_busy; 589 unsigned long active_jiffies; 590 unsigned long suspended_jiffies; 591 unsigned long accounting_timestamp; 592#endif 593 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 594 void (*set_latency_tolerance)(struct device *, s32); 595 struct dev_pm_qos *qos; 596}; 597 598extern void update_pm_runtime_accounting(struct device *dev); 599extern int dev_pm_get_subsys_data(struct device *dev); 600extern void dev_pm_put_subsys_data(struct device *dev); 601 602/* 603 * Power domains provide callbacks that are executed during system suspend, 604 * hibernation, system resume and during runtime PM transitions along with 605 * subsystem-level and driver-level callbacks. 606 */ 607struct dev_pm_domain { 608 struct dev_pm_ops ops; 609 void (*detach)(struct device *dev, bool power_off); 610}; 611 612/* 613 * The PM_EVENT_ messages are also used by drivers implementing the legacy 614 * suspend framework, based on the ->suspend() and ->resume() callbacks common 615 * for suspend and hibernation transitions, according to the rules below. 616 */ 617 618/* Necessary, because several drivers use PM_EVENT_PRETHAW */ 619#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE 620 621/* 622 * One transition is triggered by resume(), after a suspend() call; the 623 * message is implicit: 624 * 625 * ON Driver starts working again, responding to hardware events 626 * and software requests. The hardware may have gone through 627 * a power-off reset, or it may have maintained state from the 628 * previous suspend() which the driver will rely on while 629 * resuming. On most platforms, there are no restrictions on 630 * availability of resources like clocks during resume(). 631 * 632 * Other transitions are triggered by messages sent using suspend(). All 633 * these transitions quiesce the driver, so that I/O queues are inactive. 634 * That commonly entails turning off IRQs and DMA; there may be rules 635 * about how to quiesce that are specific to the bus or the device's type. 636 * (For example, network drivers mark the link state.) Other details may 637 * differ according to the message: 638 * 639 * SUSPEND Quiesce, enter a low power device state appropriate for 640 * the upcoming system state (such as PCI_D3hot), and enable 641 * wakeup events as appropriate. 642 * 643 * HIBERNATE Enter a low power device state appropriate for the hibernation 644 * state (eg. ACPI S4) and enable wakeup events as appropriate. 645 * 646 * FREEZE Quiesce operations so that a consistent image can be saved; 647 * but do NOT otherwise enter a low power device state, and do 648 * NOT emit system wakeup events. 649 * 650 * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring 651 * the system from a snapshot taken after an earlier FREEZE. 652 * Some drivers will need to reset their hardware state instead 653 * of preserving it, to ensure that it's never mistaken for the 654 * state which that earlier snapshot had set up. 655 * 656 * A minimally power-aware driver treats all messages as SUSPEND, fully 657 * reinitializes its device during resume() -- whether or not it was reset 658 * during the suspend/resume cycle -- and can't issue wakeup events. 659 * 660 * More power-aware drivers may also use low power states at runtime as 661 * well as during system sleep states like PM_SUSPEND_STANDBY. They may 662 * be able to use wakeup events to exit from runtime low-power states, 663 * or from system low-power states such as standby or suspend-to-RAM. 664 */ 665 666#ifdef CONFIG_PM_SLEEP 667extern void device_pm_lock(void); 668extern void dpm_resume_start(pm_message_t state); 669extern void dpm_resume_end(pm_message_t state); 670extern void dpm_resume_noirq(pm_message_t state); 671extern void dpm_resume_early(pm_message_t state); 672extern void dpm_resume(pm_message_t state); 673extern void dpm_complete(pm_message_t state); 674 675extern void device_pm_unlock(void); 676extern int dpm_suspend_end(pm_message_t state); 677extern int dpm_suspend_start(pm_message_t state); 678extern int dpm_suspend_noirq(pm_message_t state); 679extern int dpm_suspend_late(pm_message_t state); 680extern int dpm_suspend(pm_message_t state); 681extern int dpm_prepare(pm_message_t state); 682 683extern void __suspend_report_result(const char *function, void *fn, int ret); 684 685#define suspend_report_result(fn, ret) \ 686 do { \ 687 __suspend_report_result(__func__, fn, ret); \ 688 } while (0) 689 690extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 691extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); 692 693extern int pm_generic_prepare(struct device *dev); 694extern int pm_generic_suspend_late(struct device *dev); 695extern int pm_generic_suspend_noirq(struct device *dev); 696extern int pm_generic_suspend(struct device *dev); 697extern int pm_generic_resume_early(struct device *dev); 698extern int pm_generic_resume_noirq(struct device *dev); 699extern int pm_generic_resume(struct device *dev); 700extern int pm_generic_freeze_noirq(struct device *dev); 701extern int pm_generic_freeze_late(struct device *dev); 702extern int pm_generic_freeze(struct device *dev); 703extern int pm_generic_thaw_noirq(struct device *dev); 704extern int pm_generic_thaw_early(struct device *dev); 705extern int pm_generic_thaw(struct device *dev); 706extern int pm_generic_restore_noirq(struct device *dev); 707extern int pm_generic_restore_early(struct device *dev); 708extern int pm_generic_restore(struct device *dev); 709extern int pm_generic_poweroff_noirq(struct device *dev); 710extern int pm_generic_poweroff_late(struct device *dev); 711extern int pm_generic_poweroff(struct device *dev); 712extern void pm_generic_complete(struct device *dev); 713 714#else /* !CONFIG_PM_SLEEP */ 715 716#define device_pm_lock() do {} while (0) 717#define device_pm_unlock() do {} while (0) 718 719static inline int dpm_suspend_start(pm_message_t state) 720{ 721 return 0; 722} 723 724#define suspend_report_result(fn, ret) do {} while (0) 725 726static inline int device_pm_wait_for_dev(struct device *a, struct device *b) 727{ 728 return 0; 729} 730 731static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 732{ 733} 734 735#define pm_generic_prepare NULL 736#define pm_generic_suspend_late NULL 737#define pm_generic_suspend_noirq NULL 738#define pm_generic_suspend NULL 739#define pm_generic_resume_early NULL 740#define pm_generic_resume_noirq NULL 741#define pm_generic_resume NULL 742#define pm_generic_freeze_noirq NULL 743#define pm_generic_freeze_late NULL 744#define pm_generic_freeze NULL 745#define pm_generic_thaw_noirq NULL 746#define pm_generic_thaw_early NULL 747#define pm_generic_thaw NULL 748#define pm_generic_restore_noirq NULL 749#define pm_generic_restore_early NULL 750#define pm_generic_restore NULL 751#define pm_generic_poweroff_noirq NULL 752#define pm_generic_poweroff_late NULL 753#define pm_generic_poweroff NULL 754#define pm_generic_complete NULL 755#endif /* !CONFIG_PM_SLEEP */ 756 757/* How to reorder dpm_list after device_move() */ 758enum dpm_order { 759 DPM_ORDER_NONE, 760 DPM_ORDER_DEV_AFTER_PARENT, 761 DPM_ORDER_PARENT_BEFORE_DEV, 762 DPM_ORDER_DEV_LAST, 763}; 764 765#endif /* _LINUX_PM_H */