PM / Runtime: Merge synchronous and async runtime routines

This patch (as1423) merges the asynchronous routines
__pm_request_idle(), __pm_request_suspend(), and __pm_request_resume()
with their synchronous counterparts. The RPM_ASYNC bitflag argument
serves to indicate what sort of operation to perform.

In the course of performing this merger, it became apparent that the
various functions don't all behave consistenly with regard to error
reporting and cancellation of outstanding requests. A new routine,
rpm_check_suspend_allowed(), was written to centralize much of the
testing, and the other functions were revised to follow a simple
algorithm:

If the operation is disallowed because of the device's
settings or current state, return an error.

Cancel pending or scheduled requests of lower priority.

Schedule, queue, or perform the desired operation.

A few special cases and exceptions are noted in comments.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>

authored by Alan Stern and committed by Rafael J. Wysocki 1bfee5bc 3f9af051

+142 -237
+142 -237
drivers/base/power/runtime.c
··· 2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * 6 * This file is released under the GPLv2. 7 */ ··· 12 #include <linux/jiffies.h> 13 14 static int __pm_runtime_resume(struct device *dev, int rpmflags); 15 - static int __pm_request_idle(struct device *dev); 16 - static int __pm_request_resume(struct device *dev); 17 18 /** 19 * update_pm_runtime_accounting - Update the time accounting of power states ··· 78 } 79 80 /** 81 - * __pm_runtime_idle - Notify device bus type if the device can be suspended. 82 - * @dev: Device to notify the bus type about. 83 - * 84 - * This function must be called under dev->power.lock with interrupts disabled. 85 */ 86 - static int __pm_runtime_idle(struct device *dev) 87 - __releases(&dev->power.lock) __acquires(&dev->power.lock) 88 { 89 int retval = 0; 90 91 if (dev->power.runtime_error) 92 retval = -EINVAL; 93 - else if (dev->power.idle_notification) 94 - retval = -EINPROGRESS; 95 else if (atomic_read(&dev->power.usage_count) > 0 96 - || dev->power.disable_depth > 0 97 - || dev->power.runtime_status != RPM_ACTIVE) 98 retval = -EAGAIN; 99 else if (!pm_children_suspended(dev)) 100 retval = -EBUSY; 101 if (retval) 102 goto out; 103 104 - if (dev->power.request_pending) { 105 - /* 106 - * If an idle notification request is pending, cancel it. Any 107 - * other pending request takes precedence over us. 108 - */ 109 - if (dev->power.request == RPM_REQ_IDLE) { 110 - dev->power.request = RPM_REQ_NONE; 111 - } else if (dev->power.request != RPM_REQ_NONE) { 112 - retval = -EAGAIN; 113 - goto out; 114 } 115 } 116 117 dev->power.idle_notification = true; ··· 197 int retval; 198 199 spin_lock_irq(&dev->power.lock); 200 - retval = __pm_runtime_idle(dev); 201 spin_unlock_irq(&dev->power.lock); 202 203 return retval; ··· 209 * @dev: Device to suspend. 210 * @rpmflags: Flag bits. 211 * 212 - * Check if the device can be suspended and run the ->runtime_suspend() callback 213 - * provided by its bus type. If another suspend has been started earlier, 214 - * either return immediately or wait for it to finish, depending on the 215 - * RPM_NOWAIT flag. If an idle notification or suspend request is pending or 216 - * scheduled, cancel it. 217 * 218 * This function must be called under dev->power.lock with interrupts disabled. 219 */ ··· 225 { 226 struct device *parent = NULL; 227 bool notify = false; 228 - int retval = 0; 229 230 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 231 232 repeat: 233 - if (dev->power.runtime_error) { 234 - retval = -EINVAL; 235 - goto out; 236 - } 237 238 - /* Pending resume requests take precedence over us. */ 239 - if (dev->power.request_pending 240 - && dev->power.request == RPM_REQ_RESUME) { 241 retval = -EAGAIN; 242 goto out; 243 - } 244 245 /* Other scheduled or pending requests need to be canceled. */ 246 pm_runtime_cancel_pending(dev); 247 248 - if (dev->power.runtime_status == RPM_SUSPENDED) 249 - retval = 1; 250 - else if (dev->power.runtime_status == RPM_RESUMING 251 - || dev->power.disable_depth > 0 252 - || atomic_read(&dev->power.usage_count) > 0) 253 - retval = -EAGAIN; 254 - else if (!pm_children_suspended(dev)) 255 - retval = -EBUSY; 256 - if (retval) 257 - goto out; 258 - 259 if (dev->power.runtime_status == RPM_SUSPENDING) { 260 DEFINE_WAIT(wait); 261 262 - if (rpmflags & RPM_NOWAIT) { 263 retval = -EINPROGRESS; 264 goto out; 265 } ··· 268 } 269 finish_wait(&dev->power.wait_queue, &wait); 270 goto repeat; 271 } 272 273 __update_runtime_status(dev, RPM_SUSPENDING); ··· 312 313 if (retval) { 314 __update_runtime_status(dev, RPM_ACTIVE); 315 if (retval == -EAGAIN || retval == -EBUSY) { 316 if (dev->power.timer_expires == 0) 317 notify = true; ··· 338 } 339 340 if (notify) 341 - __pm_runtime_idle(dev); 342 343 if (parent && !parent->power.ignore_children) { 344 spin_unlock_irq(&dev->power.lock); ··· 375 * @dev: Device to resume. 376 * @rpmflags: Flag bits. 377 * 378 - * Check if the device can be woken up and run the ->runtime_resume() callback 379 - * provided by its bus type. If another resume has been started earlier, 380 - * either return imediately or wait for it to finish, depending on the 381 - * RPM_NOWAIT flag. If there's a suspend running in parallel with this 382 - * function, either tell the other process to resume after suspending 383 - * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT 384 - * flag. Cancel any scheduled or pending requests. 385 * 386 * This function must be called under dev->power.lock with interrupts disabled. 387 */ ··· 396 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 397 398 repeat: 399 - if (dev->power.runtime_error) { 400 retval = -EINVAL; 401 - goto out; 402 - } 403 - 404 - pm_runtime_cancel_pending(dev); 405 - 406 - if (dev->power.runtime_status == RPM_ACTIVE) 407 - retval = 1; 408 else if (dev->power.disable_depth > 0) 409 retval = -EAGAIN; 410 if (retval) 411 goto out; 412 413 if (dev->power.runtime_status == RPM_RESUMING 414 || dev->power.runtime_status == RPM_SUSPENDING) { 415 DEFINE_WAIT(wait); 416 417 - if (rpmflags & RPM_NOWAIT) { 418 if (dev->power.runtime_status == RPM_SUSPENDING) 419 dev->power.deferred_resume = true; 420 - retval = -EINPROGRESS; 421 goto out; 422 } 423 ··· 439 } 440 finish_wait(&dev->power.wait_queue, &wait); 441 goto repeat; 442 } 443 444 if (!parent && dev->parent) { ··· 521 wake_up_all(&dev->power.wait_queue); 522 523 if (!retval) 524 - __pm_request_idle(dev); 525 526 out: 527 if (parent) { ··· 578 case RPM_REQ_NONE: 579 break; 580 case RPM_REQ_IDLE: 581 - __pm_runtime_idle(dev); 582 break; 583 case RPM_REQ_SUSPEND: 584 __pm_runtime_suspend(dev, RPM_NOWAIT); ··· 593 } 594 595 /** 596 - * __pm_request_idle - Submit an idle notification request for given device. 597 - * @dev: Device to handle. 598 - * 599 - * Check if the device's run-time PM status is correct for suspending the device 600 - * and queue up a request to run __pm_runtime_idle() for it. 601 - * 602 - * This function must be called under dev->power.lock with interrupts disabled. 603 - */ 604 - static int __pm_request_idle(struct device *dev) 605 - { 606 - int retval = 0; 607 - 608 - if (dev->power.runtime_error) 609 - retval = -EINVAL; 610 - else if (atomic_read(&dev->power.usage_count) > 0 611 - || dev->power.disable_depth > 0 612 - || dev->power.runtime_status == RPM_SUSPENDED 613 - || dev->power.runtime_status == RPM_SUSPENDING) 614 - retval = -EAGAIN; 615 - else if (!pm_children_suspended(dev)) 616 - retval = -EBUSY; 617 - if (retval) 618 - return retval; 619 - 620 - if (dev->power.request_pending) { 621 - /* Any requests other then RPM_REQ_IDLE take precedence. */ 622 - if (dev->power.request == RPM_REQ_NONE) 623 - dev->power.request = RPM_REQ_IDLE; 624 - else if (dev->power.request != RPM_REQ_IDLE) 625 - retval = -EAGAIN; 626 - return retval; 627 - } 628 - 629 - dev->power.request = RPM_REQ_IDLE; 630 - dev->power.request_pending = true; 631 - queue_work(pm_wq, &dev->power.work); 632 - 633 - return retval; 634 - } 635 - 636 - /** 637 * pm_request_idle - Submit an idle notification request for given device. 638 * @dev: Device to handle. 639 */ ··· 602 int retval; 603 604 spin_lock_irqsave(&dev->power.lock, flags); 605 - retval = __pm_request_idle(dev); 606 spin_unlock_irqrestore(&dev->power.lock, flags); 607 608 return retval; ··· 610 EXPORT_SYMBOL_GPL(pm_request_idle); 611 612 /** 613 - * __pm_request_suspend - Submit a suspend request for given device. 614 - * @dev: Device to suspend. 615 - * 616 - * This function must be called under dev->power.lock with interrupts disabled. 617 - */ 618 - static int __pm_request_suspend(struct device *dev) 619 - { 620 - int retval = 0; 621 - 622 - if (dev->power.runtime_error) 623 - return -EINVAL; 624 - 625 - if (dev->power.runtime_status == RPM_SUSPENDED) 626 - retval = 1; 627 - else if (atomic_read(&dev->power.usage_count) > 0 628 - || dev->power.disable_depth > 0) 629 - retval = -EAGAIN; 630 - else if (dev->power.runtime_status == RPM_SUSPENDING) 631 - retval = -EINPROGRESS; 632 - else if (!pm_children_suspended(dev)) 633 - retval = -EBUSY; 634 - if (retval < 0) 635 - return retval; 636 - 637 - pm_runtime_deactivate_timer(dev); 638 - 639 - if (dev->power.request_pending) { 640 - /* 641 - * Pending resume requests take precedence over us, but we can 642 - * overtake any other pending request. 643 - */ 644 - if (dev->power.request == RPM_REQ_RESUME) 645 - retval = -EAGAIN; 646 - else if (dev->power.request != RPM_REQ_SUSPEND) 647 - dev->power.request = retval ? 648 - RPM_REQ_NONE : RPM_REQ_SUSPEND; 649 - return retval; 650 - } else if (retval) { 651 - return retval; 652 - } 653 - 654 - dev->power.request = RPM_REQ_SUSPEND; 655 - dev->power.request_pending = true; 656 - queue_work(pm_wq, &dev->power.work); 657 - 658 - return 0; 659 - } 660 - 661 - /** 662 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 663 * @data: Device pointer passed by pm_schedule_suspend(). 664 * 665 - * Check if the time is right and execute __pm_request_suspend() in that case. 666 */ 667 static void pm_suspend_timer_fn(unsigned long data) 668 { ··· 627 /* If 'expire' is after 'jiffies' we've been called too early. */ 628 if (expires > 0 && !time_after(expires, jiffies)) { 629 dev->power.timer_expires = 0; 630 - __pm_request_suspend(dev); 631 } 632 633 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 641 int pm_schedule_suspend(struct device *dev, unsigned int delay) 642 { 643 unsigned long flags; 644 - int retval = 0; 645 646 spin_lock_irqsave(&dev->power.lock, flags); 647 648 - if (dev->power.runtime_error) { 649 - retval = -EINVAL; 650 - goto out; 651 - } 652 - 653 if (!delay) { 654 - retval = __pm_request_suspend(dev); 655 goto out; 656 } 657 658 - pm_runtime_deactivate_timer(dev); 659 - 660 - if (dev->power.request_pending) { 661 - /* 662 - * Pending resume requests take precedence over us, but any 663 - * other pending requests have to be canceled. 664 - */ 665 - if (dev->power.request == RPM_REQ_RESUME) { 666 - retval = -EAGAIN; 667 - goto out; 668 - } 669 - dev->power.request = RPM_REQ_NONE; 670 - } 671 - 672 - if (dev->power.runtime_status == RPM_SUSPENDED) 673 - retval = 1; 674 - else if (atomic_read(&dev->power.usage_count) > 0 675 - || dev->power.disable_depth > 0) 676 - retval = -EAGAIN; 677 - else if (!pm_children_suspended(dev)) 678 - retval = -EBUSY; 679 if (retval) 680 goto out; 681 682 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 683 - if (!dev->power.timer_expires) 684 - dev->power.timer_expires = 1; 685 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 686 687 out: ··· 671 /** 672 * pm_request_resume - Submit a resume request for given device. 673 * @dev: Device to resume. 674 - * 675 - * This function must be called under dev->power.lock with interrupts disabled. 676 - */ 677 - static int __pm_request_resume(struct device *dev) 678 - { 679 - int retval = 0; 680 - 681 - if (dev->power.runtime_error) 682 - return -EINVAL; 683 - 684 - if (dev->power.runtime_status == RPM_ACTIVE) 685 - retval = 1; 686 - else if (dev->power.runtime_status == RPM_RESUMING) 687 - retval = -EINPROGRESS; 688 - else if (dev->power.disable_depth > 0) 689 - retval = -EAGAIN; 690 - if (retval < 0) 691 - return retval; 692 - 693 - pm_runtime_deactivate_timer(dev); 694 - 695 - if (dev->power.runtime_status == RPM_SUSPENDING) { 696 - dev->power.deferred_resume = true; 697 - return retval; 698 - } 699 - if (dev->power.request_pending) { 700 - /* If non-resume request is pending, we can overtake it. */ 701 - dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; 702 - return retval; 703 - } 704 - if (retval) 705 - return retval; 706 - 707 - dev->power.request = RPM_REQ_RESUME; 708 - dev->power.request_pending = true; 709 - queue_work(pm_wq, &dev->power.work); 710 - 711 - return retval; 712 - } 713 - 714 - /** 715 - * pm_request_resume - Submit a resume request for given device. 716 - * @dev: Device to resume. 717 */ 718 int pm_request_resume(struct device *dev) 719 { ··· 678 int retval; 679 680 spin_lock_irqsave(&dev->power.lock, flags); 681 - retval = __pm_request_resume(dev); 682 spin_unlock_irqrestore(&dev->power.lock, flags); 683 684 return retval; ··· 993 994 dev->power.runtime_auto = true; 995 if (atomic_dec_and_test(&dev->power.usage_count)) 996 - __pm_runtime_idle(dev); 997 998 out: 999 spin_unlock_irq(&dev->power.lock);
··· 2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 + * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 6 * 7 * This file is released under the GPLv2. 8 */ ··· 11 #include <linux/jiffies.h> 12 13 static int __pm_runtime_resume(struct device *dev, int rpmflags); 14 15 /** 16 * update_pm_runtime_accounting - Update the time accounting of power states ··· 79 } 80 81 /** 82 + * rpm_check_suspend_allowed - Test whether a device may be suspended. 83 + * @dev: Device to test. 84 */ 85 + static int rpm_check_suspend_allowed(struct device *dev) 86 { 87 int retval = 0; 88 89 if (dev->power.runtime_error) 90 retval = -EINVAL; 91 else if (atomic_read(&dev->power.usage_count) > 0 92 + || dev->power.disable_depth > 0) 93 retval = -EAGAIN; 94 else if (!pm_children_suspended(dev)) 95 retval = -EBUSY; 96 + 97 + /* Pending resume requests take precedence over suspends. */ 98 + else if ((dev->power.deferred_resume 99 + && dev->power.status == RPM_SUSPENDING) 100 + || (dev->power.request_pending 101 + && dev->power.request == RPM_REQ_RESUME)) 102 + retval = -EAGAIN; 103 + else if (dev->power.runtime_status == RPM_SUSPENDED) 104 + retval = 1; 105 + 106 + return retval; 107 + } 108 + 109 + 110 + /** 111 + * __pm_runtime_idle - Notify device bus type if the device can be suspended. 112 + * @dev: Device to notify the bus type about. 113 + * @rpmflags: Flag bits. 114 + * 115 + * Check if the device's run-time PM status allows it to be suspended. If 116 + * another idle notification has been started earlier, return immediately. If 117 + * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 118 + * run the ->runtime_idle() callback directly. 119 + * 120 + * This function must be called under dev->power.lock with interrupts disabled. 121 + */ 122 + static int __pm_runtime_idle(struct device *dev, int rpmflags) 123 + __releases(&dev->power.lock) __acquires(&dev->power.lock) 124 + { 125 + int retval; 126 + 127 + retval = rpm_check_suspend_allowed(dev); 128 + if (retval < 0) 129 + ; /* Conditions are wrong. */ 130 + 131 + /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 132 + else if (dev->power.runtime_status != RPM_ACTIVE) 133 + retval = -EAGAIN; 134 + 135 + /* 136 + * Any pending request other than an idle notification takes 137 + * precedence over us, except that the timer may be running. 138 + */ 139 + else if (dev->power.request_pending && 140 + dev->power.request > RPM_REQ_IDLE) 141 + retval = -EAGAIN; 142 + 143 + /* Act as though RPM_NOWAIT is always set. */ 144 + else if (dev->power.idle_notification) 145 + retval = -EINPROGRESS; 146 if (retval) 147 goto out; 148 149 + /* Pending requests need to be canceled. */ 150 + dev->power.request = RPM_REQ_NONE; 151 + 152 + /* Carry out an asynchronous or a synchronous idle notification. */ 153 + if (rpmflags & RPM_ASYNC) { 154 + dev->power.request = RPM_REQ_IDLE; 155 + if (!dev->power.request_pending) { 156 + dev->power.request_pending = true; 157 + queue_work(pm_wq, &dev->power.work); 158 } 159 + goto out; 160 } 161 162 dev->power.idle_notification = true; ··· 154 int retval; 155 156 spin_lock_irq(&dev->power.lock); 157 + retval = __pm_runtime_idle(dev, 0); 158 spin_unlock_irq(&dev->power.lock); 159 160 return retval; ··· 166 * @dev: Device to suspend. 167 * @rpmflags: Flag bits. 168 * 169 + * Check if the device's run-time PM status allows it to be suspended. If 170 + * another suspend has been started earlier, either return immediately or wait 171 + * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a 172 + * pending idle notification. If the RPM_ASYNC flag is set then queue a 173 + * suspend request; otherwise run the ->runtime_suspend() callback directly. 174 + * If a deferred resume was requested while the callback was running then carry 175 + * it out; otherwise send an idle notification for the device (if the suspend 176 + * failed) or for its parent (if the suspend succeeded). 177 * 178 * This function must be called under dev->power.lock with interrupts disabled. 179 */ ··· 179 { 180 struct device *parent = NULL; 181 bool notify = false; 182 + int retval; 183 184 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 185 186 repeat: 187 + retval = rpm_check_suspend_allowed(dev); 188 189 + if (retval < 0) 190 + ; /* Conditions are wrong. */ 191 + 192 + /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 193 + else if (dev->power.runtime_status == RPM_RESUMING && 194 + !(rpmflags & RPM_ASYNC)) 195 retval = -EAGAIN; 196 + if (retval) 197 goto out; 198 199 /* Other scheduled or pending requests need to be canceled. */ 200 pm_runtime_cancel_pending(dev); 201 202 if (dev->power.runtime_status == RPM_SUSPENDING) { 203 DEFINE_WAIT(wait); 204 205 + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 206 retval = -EINPROGRESS; 207 goto out; 208 } ··· 233 } 234 finish_wait(&dev->power.wait_queue, &wait); 235 goto repeat; 236 + } 237 + 238 + /* Carry out an asynchronous or a synchronous suspend. */ 239 + if (rpmflags & RPM_ASYNC) { 240 + dev->power.request = RPM_REQ_SUSPEND; 241 + if (!dev->power.request_pending) { 242 + dev->power.request_pending = true; 243 + queue_work(pm_wq, &dev->power.work); 244 + } 245 + goto out; 246 } 247 248 __update_runtime_status(dev, RPM_SUSPENDING); ··· 267 268 if (retval) { 269 __update_runtime_status(dev, RPM_ACTIVE); 270 + dev->power.deferred_resume = 0; 271 if (retval == -EAGAIN || retval == -EBUSY) { 272 if (dev->power.timer_expires == 0) 273 notify = true; ··· 292 } 293 294 if (notify) 295 + __pm_runtime_idle(dev, 0); 296 297 if (parent && !parent->power.ignore_children) { 298 spin_unlock_irq(&dev->power.lock); ··· 329 * @dev: Device to resume. 330 * @rpmflags: Flag bits. 331 * 332 + * Check if the device's run-time PM status allows it to be resumed. Cancel 333 + * any scheduled or pending requests. If another resume has been started 334 + * earlier, either return imediately or wait for it to finish, depending on the 335 + * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 336 + * parallel with this function, either tell the other process to resume after 337 + * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 338 + * flag is set then queue a resume request; otherwise run the 339 + * ->runtime_resume() callback directly. Queue an idle notification for the 340 + * device if the resume succeeded. 341 * 342 * This function must be called under dev->power.lock with interrupts disabled. 343 */ ··· 348 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 349 350 repeat: 351 + if (dev->power.runtime_error) 352 retval = -EINVAL; 353 else if (dev->power.disable_depth > 0) 354 retval = -EAGAIN; 355 if (retval) 356 goto out; 357 358 + /* Other scheduled or pending requests need to be canceled. */ 359 + pm_runtime_cancel_pending(dev); 360 + 361 + if (dev->power.runtime_status == RPM_ACTIVE) { 362 + retval = 1; 363 + goto out; 364 + } 365 + 366 if (dev->power.runtime_status == RPM_RESUMING 367 || dev->power.runtime_status == RPM_SUSPENDING) { 368 DEFINE_WAIT(wait); 369 370 + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 371 if (dev->power.runtime_status == RPM_SUSPENDING) 372 dev->power.deferred_resume = true; 373 + else 374 + retval = -EINPROGRESS; 375 goto out; 376 } 377 ··· 389 } 390 finish_wait(&dev->power.wait_queue, &wait); 391 goto repeat; 392 + } 393 + 394 + /* Carry out an asynchronous or a synchronous resume. */ 395 + if (rpmflags & RPM_ASYNC) { 396 + dev->power.request = RPM_REQ_RESUME; 397 + if (!dev->power.request_pending) { 398 + dev->power.request_pending = true; 399 + queue_work(pm_wq, &dev->power.work); 400 + } 401 + retval = 0; 402 + goto out; 403 } 404 405 if (!parent && dev->parent) { ··· 460 wake_up_all(&dev->power.wait_queue); 461 462 if (!retval) 463 + __pm_runtime_idle(dev, RPM_ASYNC); 464 465 out: 466 if (parent) { ··· 517 case RPM_REQ_NONE: 518 break; 519 case RPM_REQ_IDLE: 520 + __pm_runtime_idle(dev, RPM_NOWAIT); 521 break; 522 case RPM_REQ_SUSPEND: 523 __pm_runtime_suspend(dev, RPM_NOWAIT); ··· 532 } 533 534 /** 535 * pm_request_idle - Submit an idle notification request for given device. 536 * @dev: Device to handle. 537 */ ··· 582 int retval; 583 584 spin_lock_irqsave(&dev->power.lock, flags); 585 + retval = __pm_runtime_idle(dev, RPM_ASYNC); 586 spin_unlock_irqrestore(&dev->power.lock, flags); 587 588 return retval; ··· 590 EXPORT_SYMBOL_GPL(pm_request_idle); 591 592 /** 593 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 594 * @data: Device pointer passed by pm_schedule_suspend(). 595 * 596 + * Check if the time is right and queue a suspend request. 597 */ 598 static void pm_suspend_timer_fn(unsigned long data) 599 { ··· 656 /* If 'expire' is after 'jiffies' we've been called too early. */ 657 if (expires > 0 && !time_after(expires, jiffies)) { 658 dev->power.timer_expires = 0; 659 + __pm_runtime_suspend(dev, RPM_ASYNC); 660 } 661 662 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 670 int pm_schedule_suspend(struct device *dev, unsigned int delay) 671 { 672 unsigned long flags; 673 + int retval; 674 675 spin_lock_irqsave(&dev->power.lock, flags); 676 677 if (!delay) { 678 + retval = __pm_runtime_suspend(dev, RPM_ASYNC); 679 goto out; 680 } 681 682 + retval = rpm_check_suspend_allowed(dev); 683 if (retval) 684 goto out; 685 686 + /* Other scheduled or pending requests need to be canceled. */ 687 + pm_runtime_cancel_pending(dev); 688 + 689 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 690 + dev->power.timer_expires += !dev->power.timer_expires; 691 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 692 693 out: ··· 723 /** 724 * pm_request_resume - Submit a resume request for given device. 725 * @dev: Device to resume. 726 */ 727 int pm_request_resume(struct device *dev) 728 { ··· 773 int retval; 774 775 spin_lock_irqsave(&dev->power.lock, flags); 776 + retval = __pm_runtime_resume(dev, RPM_ASYNC); 777 spin_unlock_irqrestore(&dev->power.lock, flags); 778 779 return retval; ··· 1088 1089 dev->power.runtime_auto = true; 1090 if (atomic_dec_and_test(&dev->power.usage_count)) 1091 + __pm_runtime_idle(dev, 0); 1092 1093 out: 1094 spin_unlock_irq(&dev->power.lock);