at v3.9 29 kB view raw
1/* 2 * pm.h - Power management interface 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21#ifndef _LINUX_PM_H 22#define _LINUX_PM_H 23 24#include <linux/list.h> 25#include <linux/workqueue.h> 26#include <linux/spinlock.h> 27#include <linux/wait.h> 28#include <linux/timer.h> 29#include <linux/completion.h> 30 31/* 32 * Callbacks for platform drivers to implement. 33 */ 34extern void (*pm_power_off)(void); 35extern void (*pm_power_off_prepare)(void); 36 37/* 38 * Device power management 39 */ 40 41struct device; 42 43#ifdef CONFIG_PM 44extern const char power_group_name[]; /* = "power" */ 45#else 46#define power_group_name NULL 47#endif 48 49typedef struct pm_message { 50 int event; 51} pm_message_t; 52 53/** 54 * struct dev_pm_ops - device PM callbacks 55 * 56 * Several device power state transitions are externally visible, affecting 57 * the state of pending I/O queues and (for drivers that touch hardware) 58 * interrupts, wakeups, DMA, and other hardware state. There may also be 59 * internal transitions to various low-power modes which are transparent 60 * to the rest of the driver stack (such as a driver that's ON gating off 61 * clocks which are not in active use). 62 * 63 * The externally visible transitions are handled with the help of callbacks 64 * included in this structure in such a way that two levels of callbacks are 65 * involved. First, the PM core executes callbacks provided by PM domains, 66 * device types, classes and bus types. They are the subsystem-level callbacks 67 * supposed to execute callbacks provided by device drivers, although they may 68 * choose not to do that. If the driver callbacks are executed, they have to 69 * collaborate with the subsystem-level callbacks to achieve the goals 70 * appropriate for the given system transition, given transition phase and the 71 * subsystem the device belongs to. 72 * 73 * @prepare: The principal role of this callback is to prevent new children of 74 * the device from being registered after it has returned (the driver's 75 * subsystem and generally the rest of the kernel is supposed to prevent 76 * new calls to the probe method from being made too once @prepare() has 77 * succeeded). If @prepare() detects a situation it cannot handle (e.g. 78 * registration of a child already in progress), it may return -EAGAIN, so 79 * that the PM core can execute it once again (e.g. after a new child has 80 * been registered) to recover from the race condition. 81 * This method is executed for all kinds of suspend transitions and is 82 * followed by one of the suspend callbacks: @suspend(), @freeze(), or 83 * @poweroff(). The PM core executes subsystem-level @prepare() for all 84 * devices before starting to invoke suspend callbacks for any of them, so 85 * generally devices may be assumed to be functional or to respond to 86 * runtime resume requests while @prepare() is being executed. However, 87 * device drivers may NOT assume anything about the availability of user 88 * space at that time and it is NOT valid to request firmware from within 89 * @prepare() (it's too late to do that). It also is NOT valid to allocate 90 * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. 91 * [To work around these limitations, drivers may register suspend and 92 * hibernation notifiers to be executed before the freezing of tasks.] 93 * 94 * @complete: Undo the changes made by @prepare(). This method is executed for 95 * all kinds of resume transitions, following one of the resume callbacks: 96 * @resume(), @thaw(), @restore(). Also called if the state transition 97 * fails before the driver's suspend callback: @suspend(), @freeze() or 98 * @poweroff(), can be executed (e.g. if the suspend callback fails for one 99 * of the other devices that the PM core has unsuccessfully attempted to 100 * suspend earlier). 101 * The PM core executes subsystem-level @complete() after it has executed 102 * the appropriate resume callbacks for all devices. 103 * 104 * @suspend: Executed before putting the system into a sleep state in which the 105 * contents of main memory are preserved. The exact action to perform 106 * depends on the device's subsystem (PM domain, device type, class or bus 107 * type), but generally the device must be quiescent after subsystem-level 108 * @suspend() has returned, so that it doesn't do any I/O or DMA. 109 * Subsystem-level @suspend() is executed for all devices after invoking 110 * subsystem-level @prepare() for all of them. 111 * 112 * @suspend_late: Continue operations started by @suspend(). For a number of 113 * devices @suspend_late() may point to the same callback routine as the 114 * runtime suspend callback. 115 * 116 * @resume: Executed after waking the system up from a sleep state in which the 117 * contents of main memory were preserved. The exact action to perform 118 * depends on the device's subsystem, but generally the driver is expected 119 * to start working again, responding to hardware events and software 120 * requests (the device itself may be left in a low-power state, waiting 121 * for a runtime resume to occur). The state of the device at the time its 122 * driver's @resume() callback is run depends on the platform and subsystem 123 * the device belongs to. On most platforms, there are no restrictions on 124 * availability of resources like clocks during @resume(). 125 * Subsystem-level @resume() is executed for all devices after invoking 126 * subsystem-level @resume_noirq() for all of them. 127 * 128 * @resume_early: Prepare to execute @resume(). For a number of devices 129 * @resume_early() may point to the same callback routine as the runtime 130 * resume callback. 131 * 132 * @freeze: Hibernation-specific, executed before creating a hibernation image. 133 * Analogous to @suspend(), but it should not enable the device to signal 134 * wakeup events or change its power state. The majority of subsystems 135 * (with the notable exception of the PCI bus type) expect the driver-level 136 * @freeze() to save the device settings in memory to be used by @restore() 137 * during the subsequent resume from hibernation. 138 * Subsystem-level @freeze() is executed for all devices after invoking 139 * subsystem-level @prepare() for all of them. 140 * 141 * @freeze_late: Continue operations started by @freeze(). Analogous to 142 * @suspend_late(), but it should not enable the device to signal wakeup 143 * events or change its power state. 144 * 145 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 146 * if the creation of an image has failed. Also executed after a failing 147 * attempt to restore the contents of main memory from such an image. 148 * Undo the changes made by the preceding @freeze(), so the device can be 149 * operated in the same way as immediately before the call to @freeze(). 150 * Subsystem-level @thaw() is executed for all devices after invoking 151 * subsystem-level @thaw_noirq() for all of them. It also may be executed 152 * directly after @freeze() in case of a transition error. 153 * 154 * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the 155 * preceding @freeze_late(). 156 * 157 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 158 * Analogous to @suspend(), but it need not save the device's settings in 159 * memory. 160 * Subsystem-level @poweroff() is executed for all devices after invoking 161 * subsystem-level @prepare() for all of them. 162 * 163 * @poweroff_late: Continue operations started by @poweroff(). Analogous to 164 * @suspend_late(), but it need not save the device's settings in memory. 165 * 166 * @restore: Hibernation-specific, executed after restoring the contents of main 167 * memory from a hibernation image, analogous to @resume(). 168 * 169 * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). 170 * 171 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any 172 * additional operations required for suspending the device that might be 173 * racing with its driver's interrupt handler, which is guaranteed not to 174 * run while @suspend_noirq() is being executed. 175 * It generally is expected that the device will be in a low-power state 176 * (appropriate for the target system sleep state) after subsystem-level 177 * @suspend_noirq() has returned successfully. If the device can generate 178 * system wakeup signals and is enabled to wake up the system, it should be 179 * configured to do so at that time. However, depending on the platform 180 * and device's subsystem, @suspend() or @suspend_late() may be allowed to 181 * put the device into the low-power state and configure it to generate 182 * wakeup signals, in which case it generally is not necessary to define 183 * @suspend_noirq(). 184 * 185 * @resume_noirq: Prepare for the execution of @resume() by carrying out any 186 * operations required for resuming the device that might be racing with 187 * its driver's interrupt handler, which is guaranteed not to run while 188 * @resume_noirq() is being executed. 189 * 190 * @freeze_noirq: Complete the actions started by @freeze(). Carry out any 191 * additional operations required for freezing the device that might be 192 * racing with its driver's interrupt handler, which is guaranteed not to 193 * run while @freeze_noirq() is being executed. 194 * The power state of the device should not be changed by either @freeze(), 195 * or @freeze_late(), or @freeze_noirq() and it should not be configured to 196 * signal system wakeup by any of these callbacks. 197 * 198 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any 199 * operations required for thawing the device that might be racing with its 200 * driver's interrupt handler, which is guaranteed not to run while 201 * @thaw_noirq() is being executed. 202 * 203 * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to 204 * @suspend_noirq(), but it need not save the device's settings in memory. 205 * 206 * @restore_noirq: Prepare for the execution of @restore() by carrying out any 207 * operations required for thawing the device that might be racing with its 208 * driver's interrupt handler, which is guaranteed not to run while 209 * @restore_noirq() is being executed. Analogous to @resume_noirq(). 210 * 211 * All of the above callbacks, except for @complete(), return error codes. 212 * However, the error codes returned by the resume operations, @resume(), 213 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do 214 * not cause the PM core to abort the resume transition during which they are 215 * returned. The error codes returned in those cases are only printed by the PM 216 * core to the system logs for debugging purposes. Still, it is recommended 217 * that drivers only return error codes from their resume methods in case of an 218 * unrecoverable failure (i.e. when the device being handled refuses to resume 219 * and becomes unusable) to allow us to modify the PM core in the future, so 220 * that it can avoid attempting to handle devices that failed to resume and 221 * their children. 222 * 223 * It is allowed to unregister devices while the above callbacks are being 224 * executed. However, a callback routine must NOT try to unregister the device 225 * it was called for, although it may unregister children of that device (for 226 * example, if it detects that a child was unplugged while the system was 227 * asleep). 228 * 229 * Refer to Documentation/power/devices.txt for more information about the role 230 * of the above callbacks in the system suspend process. 231 * 232 * There also are callbacks related to runtime power management of devices. 233 * Again, these callbacks are executed by the PM core only for subsystems 234 * (PM domains, device types, classes and bus types) and the subsystem-level 235 * callbacks are supposed to invoke the driver callbacks. Moreover, the exact 236 * actions to be performed by a device driver's callbacks generally depend on 237 * the platform and subsystem the device belongs to. 238 * 239 * @runtime_suspend: Prepare the device for a condition in which it won't be 240 * able to communicate with the CPU(s) and RAM due to power management. 241 * This need not mean that the device should be put into a low-power state. 242 * For example, if the device is behind a link which is about to be turned 243 * off, the device may remain at full power. If the device does go to low 244 * power and is capable of generating runtime wakeup events, remote wakeup 245 * (i.e., a hardware mechanism allowing the device to request a change of 246 * its power state via an interrupt) should be enabled for it. 247 * 248 * @runtime_resume: Put the device into the fully active state in response to a 249 * wakeup event generated by hardware or at the request of software. If 250 * necessary, put the device into the full-power state and restore its 251 * registers, so that it is fully operational. 252 * 253 * @runtime_idle: Device appears to be inactive and it might be put into a 254 * low-power state if all of the necessary conditions are satisfied. Check 255 * these conditions and handle the device as appropriate, possibly queueing 256 * a suspend request for it. The return value is ignored by the PM core. 257 * 258 * Refer to Documentation/power/runtime_pm.txt for more information about the 259 * role of the above callbacks in device runtime power management. 260 * 261 */ 262 263struct dev_pm_ops { 264 int (*prepare)(struct device *dev); 265 void (*complete)(struct device *dev); 266 int (*suspend)(struct device *dev); 267 int (*resume)(struct device *dev); 268 int (*freeze)(struct device *dev); 269 int (*thaw)(struct device *dev); 270 int (*poweroff)(struct device *dev); 271 int (*restore)(struct device *dev); 272 int (*suspend_late)(struct device *dev); 273 int (*resume_early)(struct device *dev); 274 int (*freeze_late)(struct device *dev); 275 int (*thaw_early)(struct device *dev); 276 int (*poweroff_late)(struct device *dev); 277 int (*restore_early)(struct device *dev); 278 int (*suspend_noirq)(struct device *dev); 279 int (*resume_noirq)(struct device *dev); 280 int (*freeze_noirq)(struct device *dev); 281 int (*thaw_noirq)(struct device *dev); 282 int (*poweroff_noirq)(struct device *dev); 283 int (*restore_noirq)(struct device *dev); 284 int (*runtime_suspend)(struct device *dev); 285 int (*runtime_resume)(struct device *dev); 286 int (*runtime_idle)(struct device *dev); 287}; 288 289#ifdef CONFIG_PM_SLEEP 290#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 291 .suspend = suspend_fn, \ 292 .resume = resume_fn, \ 293 .freeze = suspend_fn, \ 294 .thaw = resume_fn, \ 295 .poweroff = suspend_fn, \ 296 .restore = resume_fn, 297#else 298#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 299#endif 300 301#ifdef CONFIG_PM_RUNTIME 302#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 303 .runtime_suspend = suspend_fn, \ 304 .runtime_resume = resume_fn, \ 305 .runtime_idle = idle_fn, 306#else 307#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 308#endif 309 310/* 311 * Use this if you want to use the same suspend and resume callbacks for suspend 312 * to RAM and hibernation. 313 */ 314#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 315const struct dev_pm_ops name = { \ 316 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 317} 318 319/* 320 * Use this for defining a set of PM operations to be used in all situations 321 * (sustem suspend, hibernation or runtime PM). 322 * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should 323 * be different from the corresponding runtime PM callbacks, .runtime_suspend(), 324 * and .runtime_resume(), because .runtime_suspend() always works on an already 325 * quiescent device, while .suspend() should assume that the device may be doing 326 * something when it is called (it should ensure that the device will be 327 * quiescent after it has returned). Therefore it's better to point the "late" 328 * suspend and "early" resume callback pointers, .suspend_late() and 329 * .resume_early(), to the same routines as .runtime_suspend() and 330 * .runtime_resume(), respectively (and analogously for hibernation). 331 */ 332#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 333const struct dev_pm_ops name = { \ 334 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 335 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 336} 337 338/** 339 * PM_EVENT_ messages 340 * 341 * The following PM_EVENT_ messages are defined for the internal use of the PM 342 * core, in order to provide a mechanism allowing the high level suspend and 343 * hibernation code to convey the necessary information to the device PM core 344 * code: 345 * 346 * ON No transition. 347 * 348 * FREEZE System is going to hibernate, call ->prepare() and ->freeze() 349 * for all devices. 350 * 351 * SUSPEND System is going to suspend, call ->prepare() and ->suspend() 352 * for all devices. 353 * 354 * HIBERNATE Hibernation image has been saved, call ->prepare() and 355 * ->poweroff() for all devices. 356 * 357 * QUIESCE Contents of main memory are going to be restored from a (loaded) 358 * hibernation image, call ->prepare() and ->freeze() for all 359 * devices. 360 * 361 * RESUME System is resuming, call ->resume() and ->complete() for all 362 * devices. 363 * 364 * THAW Hibernation image has been created, call ->thaw() and 365 * ->complete() for all devices. 366 * 367 * RESTORE Contents of main memory have been restored from a hibernation 368 * image, call ->restore() and ->complete() for all devices. 369 * 370 * RECOVER Creation of a hibernation image or restoration of the main 371 * memory contents from a hibernation image has failed, call 372 * ->thaw() and ->complete() for all devices. 373 * 374 * The following PM_EVENT_ messages are defined for internal use by 375 * kernel subsystems. They are never issued by the PM core. 376 * 377 * USER_SUSPEND Manual selective suspend was issued by userspace. 378 * 379 * USER_RESUME Manual selective resume was issued by userspace. 380 * 381 * REMOTE_WAKEUP Remote-wakeup request was received from the device. 382 * 383 * AUTO_SUSPEND Automatic (device idle) runtime suspend was 384 * initiated by the subsystem. 385 * 386 * AUTO_RESUME Automatic (device needed) runtime resume was 387 * requested by a driver. 388 */ 389 390#define PM_EVENT_INVALID (-1) 391#define PM_EVENT_ON 0x0000 392#define PM_EVENT_FREEZE 0x0001 393#define PM_EVENT_SUSPEND 0x0002 394#define PM_EVENT_HIBERNATE 0x0004 395#define PM_EVENT_QUIESCE 0x0008 396#define PM_EVENT_RESUME 0x0010 397#define PM_EVENT_THAW 0x0020 398#define PM_EVENT_RESTORE 0x0040 399#define PM_EVENT_RECOVER 0x0080 400#define PM_EVENT_USER 0x0100 401#define PM_EVENT_REMOTE 0x0200 402#define PM_EVENT_AUTO 0x0400 403 404#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 405#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) 406#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) 407#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) 408#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) 409#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) 410 411#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) 412#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) 413#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) 414#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 415#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 416#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) 417#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) 418#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 419#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) 420#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) 421#define PMSG_USER_SUSPEND ((struct pm_message) \ 422 { .event = PM_EVENT_USER_SUSPEND, }) 423#define PMSG_USER_RESUME ((struct pm_message) \ 424 { .event = PM_EVENT_USER_RESUME, }) 425#define PMSG_REMOTE_RESUME ((struct pm_message) \ 426 { .event = PM_EVENT_REMOTE_RESUME, }) 427#define PMSG_AUTO_SUSPEND ((struct pm_message) \ 428 { .event = PM_EVENT_AUTO_SUSPEND, }) 429#define PMSG_AUTO_RESUME ((struct pm_message) \ 430 { .event = PM_EVENT_AUTO_RESUME, }) 431 432#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) 433 434/** 435 * Device run-time power management status. 436 * 437 * These status labels are used internally by the PM core to indicate the 438 * current status of a device with respect to the PM core operations. They do 439 * not reflect the actual power state of the device or its status as seen by the 440 * driver. 441 * 442 * RPM_ACTIVE Device is fully operational. Indicates that the device 443 * bus type's ->runtime_resume() callback has completed 444 * successfully. 445 * 446 * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has 447 * completed successfully. The device is regarded as 448 * suspended. 449 * 450 * RPM_RESUMING Device bus type's ->runtime_resume() callback is being 451 * executed. 452 * 453 * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being 454 * executed. 455 */ 456 457enum rpm_status { 458 RPM_ACTIVE = 0, 459 RPM_RESUMING, 460 RPM_SUSPENDED, 461 RPM_SUSPENDING, 462}; 463 464/** 465 * Device run-time power management request types. 466 * 467 * RPM_REQ_NONE Do nothing. 468 * 469 * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback 470 * 471 * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback 472 * 473 * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has 474 * been inactive for as long as power.autosuspend_delay 475 * 476 * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback 477 */ 478 479enum rpm_request { 480 RPM_REQ_NONE = 0, 481 RPM_REQ_IDLE, 482 RPM_REQ_SUSPEND, 483 RPM_REQ_AUTOSUSPEND, 484 RPM_REQ_RESUME, 485}; 486 487struct wakeup_source; 488 489struct pm_domain_data { 490 struct list_head list_node; 491 struct device *dev; 492}; 493 494struct pm_subsys_data { 495 spinlock_t lock; 496 unsigned int refcount; 497#ifdef CONFIG_PM_CLK 498 struct list_head clock_list; 499#endif 500#ifdef CONFIG_PM_GENERIC_DOMAINS 501 struct pm_domain_data *domain_data; 502#endif 503}; 504 505struct dev_pm_info { 506 pm_message_t power_state; 507 unsigned int can_wakeup:1; 508 unsigned int async_suspend:1; 509 bool is_prepared:1; /* Owned by the PM core */ 510 bool is_suspended:1; /* Ditto */ 511 bool ignore_children:1; 512 bool early_init:1; /* Owned by the PM core */ 513 spinlock_t lock; 514#ifdef CONFIG_PM_SLEEP 515 struct list_head entry; 516 struct completion completion; 517 struct wakeup_source *wakeup; 518 bool wakeup_path:1; 519 bool syscore:1; 520#else 521 unsigned int should_wakeup:1; 522#endif 523#ifdef CONFIG_PM_RUNTIME 524 struct timer_list suspend_timer; 525 unsigned long timer_expires; 526 struct work_struct work; 527 wait_queue_head_t wait_queue; 528 atomic_t usage_count; 529 atomic_t child_count; 530 unsigned int disable_depth:3; 531 unsigned int idle_notification:1; 532 unsigned int request_pending:1; 533 unsigned int deferred_resume:1; 534 unsigned int run_wake:1; 535 unsigned int runtime_auto:1; 536 unsigned int no_callbacks:1; 537 unsigned int irq_safe:1; 538 unsigned int use_autosuspend:1; 539 unsigned int timer_autosuspends:1; 540 unsigned int memalloc_noio:1; 541 enum rpm_request request; 542 enum rpm_status runtime_status; 543 int runtime_error; 544 int autosuspend_delay; 545 unsigned long last_busy; 546 unsigned long active_jiffies; 547 unsigned long suspended_jiffies; 548 unsigned long accounting_timestamp; 549#endif 550 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 551 struct dev_pm_qos *qos; 552}; 553 554extern void update_pm_runtime_accounting(struct device *dev); 555extern int dev_pm_get_subsys_data(struct device *dev); 556extern int dev_pm_put_subsys_data(struct device *dev); 557 558/* 559 * Power domains provide callbacks that are executed during system suspend, 560 * hibernation, system resume and during runtime PM transitions along with 561 * subsystem-level and driver-level callbacks. 562 */ 563struct dev_pm_domain { 564 struct dev_pm_ops ops; 565}; 566 567/* 568 * The PM_EVENT_ messages are also used by drivers implementing the legacy 569 * suspend framework, based on the ->suspend() and ->resume() callbacks common 570 * for suspend and hibernation transitions, according to the rules below. 571 */ 572 573/* Necessary, because several drivers use PM_EVENT_PRETHAW */ 574#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE 575 576/* 577 * One transition is triggered by resume(), after a suspend() call; the 578 * message is implicit: 579 * 580 * ON Driver starts working again, responding to hardware events 581 * and software requests. The hardware may have gone through 582 * a power-off reset, or it may have maintained state from the 583 * previous suspend() which the driver will rely on while 584 * resuming. On most platforms, there are no restrictions on 585 * availability of resources like clocks during resume(). 586 * 587 * Other transitions are triggered by messages sent using suspend(). All 588 * these transitions quiesce the driver, so that I/O queues are inactive. 589 * That commonly entails turning off IRQs and DMA; there may be rules 590 * about how to quiesce that are specific to the bus or the device's type. 591 * (For example, network drivers mark the link state.) Other details may 592 * differ according to the message: 593 * 594 * SUSPEND Quiesce, enter a low power device state appropriate for 595 * the upcoming system state (such as PCI_D3hot), and enable 596 * wakeup events as appropriate. 597 * 598 * HIBERNATE Enter a low power device state appropriate for the hibernation 599 * state (eg. ACPI S4) and enable wakeup events as appropriate. 600 * 601 * FREEZE Quiesce operations so that a consistent image can be saved; 602 * but do NOT otherwise enter a low power device state, and do 603 * NOT emit system wakeup events. 604 * 605 * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring 606 * the system from a snapshot taken after an earlier FREEZE. 607 * Some drivers will need to reset their hardware state instead 608 * of preserving it, to ensure that it's never mistaken for the 609 * state which that earlier snapshot had set up. 610 * 611 * A minimally power-aware driver treats all messages as SUSPEND, fully 612 * reinitializes its device during resume() -- whether or not it was reset 613 * during the suspend/resume cycle -- and can't issue wakeup events. 614 * 615 * More power-aware drivers may also use low power states at runtime as 616 * well as during system sleep states like PM_SUSPEND_STANDBY. They may 617 * be able to use wakeup events to exit from runtime low-power states, 618 * or from system low-power states such as standby or suspend-to-RAM. 619 */ 620 621#ifdef CONFIG_PM_SLEEP 622extern void device_pm_lock(void); 623extern void dpm_resume_start(pm_message_t state); 624extern void dpm_resume_end(pm_message_t state); 625extern void dpm_resume(pm_message_t state); 626extern void dpm_complete(pm_message_t state); 627 628extern void device_pm_unlock(void); 629extern int dpm_suspend_end(pm_message_t state); 630extern int dpm_suspend_start(pm_message_t state); 631extern int dpm_suspend(pm_message_t state); 632extern int dpm_prepare(pm_message_t state); 633 634extern void __suspend_report_result(const char *function, void *fn, int ret); 635 636#define suspend_report_result(fn, ret) \ 637 do { \ 638 __suspend_report_result(__func__, fn, ret); \ 639 } while (0) 640 641extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 642extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); 643 644extern int pm_generic_prepare(struct device *dev); 645extern int pm_generic_suspend_late(struct device *dev); 646extern int pm_generic_suspend_noirq(struct device *dev); 647extern int pm_generic_suspend(struct device *dev); 648extern int pm_generic_resume_early(struct device *dev); 649extern int pm_generic_resume_noirq(struct device *dev); 650extern int pm_generic_resume(struct device *dev); 651extern int pm_generic_freeze_noirq(struct device *dev); 652extern int pm_generic_freeze_late(struct device *dev); 653extern int pm_generic_freeze(struct device *dev); 654extern int pm_generic_thaw_noirq(struct device *dev); 655extern int pm_generic_thaw_early(struct device *dev); 656extern int pm_generic_thaw(struct device *dev); 657extern int pm_generic_restore_noirq(struct device *dev); 658extern int pm_generic_restore_early(struct device *dev); 659extern int pm_generic_restore(struct device *dev); 660extern int pm_generic_poweroff_noirq(struct device *dev); 661extern int pm_generic_poweroff_late(struct device *dev); 662extern int pm_generic_poweroff(struct device *dev); 663extern void pm_generic_complete(struct device *dev); 664 665#else /* !CONFIG_PM_SLEEP */ 666 667#define device_pm_lock() do {} while (0) 668#define device_pm_unlock() do {} while (0) 669 670static inline int dpm_suspend_start(pm_message_t state) 671{ 672 return 0; 673} 674 675#define suspend_report_result(fn, ret) do {} while (0) 676 677static inline int device_pm_wait_for_dev(struct device *a, struct device *b) 678{ 679 return 0; 680} 681 682static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 683{ 684} 685 686#define pm_generic_prepare NULL 687#define pm_generic_suspend NULL 688#define pm_generic_resume NULL 689#define pm_generic_freeze NULL 690#define pm_generic_thaw NULL 691#define pm_generic_restore NULL 692#define pm_generic_poweroff NULL 693#define pm_generic_complete NULL 694#endif /* !CONFIG_PM_SLEEP */ 695 696/* How to reorder dpm_list after device_move() */ 697enum dpm_order { 698 DPM_ORDER_NONE, 699 DPM_ORDER_DEV_AFTER_PARENT, 700 DPM_ORDER_PARENT_BEFORE_DEV, 701 DPM_ORDER_DEV_LAST, 702}; 703 704#endif /* _LINUX_PM_H */