PM / Runtime: Combine runtime PM entry points

This patch (as1424) combines the various public entry points for the
runtime PM routines into three simple functions: one for idle, one for
suspend, and one for resume. A new bitflag specifies whether or not
to increment or decrement the usage_count field.

The new entry points are named __pm_runtime_idle,
__pm_runtime_suspend, and __pm_runtime_resume, to reflect that they
are trampolines. Simultaneously, the corresponding internal routines
are renamed to rpm_idle, rpm_suspend, and rpm_resume.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>

authored by Alan Stern and committed by Rafael J. Wysocki 140a6c94 1bfee5bc

+120 -140
+74 -120
drivers/base/power/runtime.c
··· 11 #include <linux/pm_runtime.h> 12 #include <linux/jiffies.h> 13 14 - static int __pm_runtime_resume(struct device *dev, int rpmflags); 15 16 /** 17 * update_pm_runtime_accounting - Update the time accounting of power states ··· 107 108 109 /** 110 - * __pm_runtime_idle - Notify device bus type if the device can be suspended. 111 * @dev: Device to notify the bus type about. 112 * @rpmflags: Flag bits. 113 * ··· 118 * 119 * This function must be called under dev->power.lock with interrupts disabled. 120 */ 121 - static int __pm_runtime_idle(struct device *dev, int rpmflags) 122 __releases(&dev->power.lock) __acquires(&dev->power.lock) 123 { 124 int retval; ··· 189 } 190 191 /** 192 - * pm_runtime_idle - Notify device bus type if the device can be suspended. 193 - * @dev: Device to notify the bus type about. 194 - */ 195 - int pm_runtime_idle(struct device *dev) 196 - { 197 - int retval; 198 - 199 - spin_lock_irq(&dev->power.lock); 200 - retval = __pm_runtime_idle(dev, 0); 201 - spin_unlock_irq(&dev->power.lock); 202 - 203 - return retval; 204 - } 205 - EXPORT_SYMBOL_GPL(pm_runtime_idle); 206 - 207 - /** 208 - * __pm_runtime_suspend - Carry out run-time suspend of given device. 209 * @dev: Device to suspend. 210 * @rpmflags: Flag bits. 211 * ··· 204 * 205 * This function must be called under dev->power.lock with interrupts disabled. 206 */ 207 - static int __pm_runtime_suspend(struct device *dev, int rpmflags) 208 __releases(&dev->power.lock) __acquires(&dev->power.lock) 209 { 210 struct device *parent = NULL; ··· 316 wake_up_all(&dev->power.wait_queue); 317 318 if (dev->power.deferred_resume) { 319 - __pm_runtime_resume(dev, 0); 320 retval = -EAGAIN; 321 goto out; 322 } 323 324 if (notify) 325 - __pm_runtime_idle(dev, 0); 326 327 if (parent && !parent->power.ignore_children) { 328 spin_unlock_irq(&dev->power.lock); ··· 339 } 340 341 /** 342 - * pm_runtime_suspend - Carry out run-time suspend of given device. 343 - * @dev: Device to suspend. 344 - */ 345 - int pm_runtime_suspend(struct device *dev) 346 - { 347 - int retval; 348 - 349 - spin_lock_irq(&dev->power.lock); 350 - retval = __pm_runtime_suspend(dev, 0); 351 - spin_unlock_irq(&dev->power.lock); 352 - 353 - return retval; 354 - } 355 - EXPORT_SYMBOL_GPL(pm_runtime_suspend); 356 - 357 - /** 358 - * __pm_runtime_resume - Carry out run-time resume of given device. 359 * @dev: Device to resume. 360 * @rpmflags: Flag bits. 361 * ··· 355 * 356 * This function must be called under dev->power.lock with interrupts disabled. 357 */ 358 - static int __pm_runtime_resume(struct device *dev, int rpmflags) 359 __releases(&dev->power.lock) __acquires(&dev->power.lock) 360 { 361 struct device *parent = NULL; ··· 437 */ 438 if (!parent->power.disable_depth 439 && !parent->power.ignore_children) { 440 - __pm_runtime_resume(parent, 0); 441 if (parent->power.runtime_status != RPM_ACTIVE) 442 retval = -EBUSY; 443 } ··· 489 wake_up_all(&dev->power.wait_queue); 490 491 if (!retval) 492 - __pm_runtime_idle(dev, RPM_ASYNC); 493 494 out: 495 if (parent) { ··· 504 505 return retval; 506 } 507 - 508 - /** 509 - * pm_runtime_resume - Carry out run-time resume of given device. 510 - * @dev: Device to suspend. 511 - */ 512 - int pm_runtime_resume(struct device *dev) 513 - { 514 - int retval; 515 - 516 - spin_lock_irq(&dev->power.lock); 517 - retval = __pm_runtime_resume(dev, 0); 518 - spin_unlock_irq(&dev->power.lock); 519 - 520 - return retval; 521 - } 522 - EXPORT_SYMBOL_GPL(pm_runtime_resume); 523 524 /** 525 * pm_runtime_work - Universal run-time PM work function. ··· 530 case RPM_REQ_NONE: 531 break; 532 case RPM_REQ_IDLE: 533 - __pm_runtime_idle(dev, RPM_NOWAIT); 534 break; 535 case RPM_REQ_SUSPEND: 536 - __pm_runtime_suspend(dev, RPM_NOWAIT); 537 break; 538 case RPM_REQ_RESUME: 539 - __pm_runtime_resume(dev, RPM_NOWAIT); 540 break; 541 } 542 543 out: 544 spin_unlock_irq(&dev->power.lock); 545 } 546 - 547 - /** 548 - * pm_request_idle - Submit an idle notification request for given device. 549 - * @dev: Device to handle. 550 - */ 551 - int pm_request_idle(struct device *dev) 552 - { 553 - unsigned long flags; 554 - int retval; 555 - 556 - spin_lock_irqsave(&dev->power.lock, flags); 557 - retval = __pm_runtime_idle(dev, RPM_ASYNC); 558 - spin_unlock_irqrestore(&dev->power.lock, flags); 559 - 560 - return retval; 561 - } 562 - EXPORT_SYMBOL_GPL(pm_request_idle); 563 564 /** 565 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). ··· 562 /* If 'expire' is after 'jiffies' we've been called too early. */ 563 if (expires > 0 && !time_after(expires, jiffies)) { 564 dev->power.timer_expires = 0; 565 - __pm_runtime_suspend(dev, RPM_ASYNC); 566 } 567 568 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 581 spin_lock_irqsave(&dev->power.lock, flags); 582 583 if (!delay) { 584 - retval = __pm_runtime_suspend(dev, RPM_ASYNC); 585 goto out; 586 } 587 ··· 604 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 605 606 /** 607 - * pm_request_resume - Submit a resume request for given device. 608 - * @dev: Device to resume. 609 */ 610 - int pm_request_resume(struct device *dev) 611 { 612 unsigned long flags; 613 int retval; 614 615 spin_lock_irqsave(&dev->power.lock, flags); 616 - retval = __pm_runtime_resume(dev, RPM_ASYNC); 617 spin_unlock_irqrestore(&dev->power.lock, flags); 618 619 return retval; 620 } 621 - EXPORT_SYMBOL_GPL(pm_request_resume); 622 623 /** 624 - * __pm_runtime_get - Reference count a device and wake it up, if necessary. 625 - * @dev: Device to handle. 626 * @rpmflags: Flag bits. 627 * 628 - * Increment the usage count of the device and resume it or submit a resume 629 - * request for it, depending on the RPM_ASYNC flag bit. 630 */ 631 - int __pm_runtime_get(struct device *dev, int rpmflags) 632 { 633 int retval; 634 635 - atomic_inc(&dev->power.usage_count); 636 - retval = (rpmflags & RPM_ASYNC) ? 637 - pm_request_resume(dev) : pm_runtime_resume(dev); 638 639 return retval; 640 } 641 - EXPORT_SYMBOL_GPL(__pm_runtime_get); 642 - 643 - /** 644 - * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 645 - * @dev: Device to handle. 646 - * @rpmflags: Flag bits. 647 - * 648 - * Decrement the usage count of the device and if it reaches zero, carry out a 649 - * synchronous idle notification or submit an idle notification request for it, 650 - * depending on the RPM_ASYNC flag bit. 651 - */ 652 - int __pm_runtime_put(struct device *dev, int rpmflags) 653 - { 654 - int retval = 0; 655 - 656 - if (atomic_dec_and_test(&dev->power.usage_count)) 657 - retval = (rpmflags & RPM_ASYNC) ? 658 - pm_request_idle(dev) : pm_runtime_idle(dev); 659 - 660 - return retval; 661 - } 662 - EXPORT_SYMBOL_GPL(__pm_runtime_put); 663 664 /** 665 * __pm_runtime_set_status - Set run-time PM status of a device. ··· 829 830 if (dev->power.request_pending 831 && dev->power.request == RPM_REQ_RESUME) { 832 - __pm_runtime_resume(dev, 0); 833 retval = 1; 834 } 835 ··· 878 */ 879 pm_runtime_get_noresume(dev); 880 881 - __pm_runtime_resume(dev, 0); 882 883 pm_runtime_put_noidle(dev); 884 } ··· 926 927 dev->power.runtime_auto = false; 928 atomic_inc(&dev->power.usage_count); 929 - __pm_runtime_resume(dev, 0); 930 931 out: 932 spin_unlock_irq(&dev->power.lock); ··· 947 948 dev->power.runtime_auto = true; 949 if (atomic_dec_and_test(&dev->power.usage_count)) 950 - __pm_runtime_idle(dev, 0); 951 952 out: 953 spin_unlock_irq(&dev->power.lock);
··· 11 #include <linux/pm_runtime.h> 12 #include <linux/jiffies.h> 13 14 + static int rpm_resume(struct device *dev, int rpmflags); 15 16 /** 17 * update_pm_runtime_accounting - Update the time accounting of power states ··· 107 108 109 /** 110 + * rpm_idle - Notify device bus type if the device can be suspended. 111 * @dev: Device to notify the bus type about. 112 * @rpmflags: Flag bits. 113 * ··· 118 * 119 * This function must be called under dev->power.lock with interrupts disabled. 120 */ 121 + static int rpm_idle(struct device *dev, int rpmflags) 122 __releases(&dev->power.lock) __acquires(&dev->power.lock) 123 { 124 int retval; ··· 189 } 190 191 /** 192 + * rpm_suspend - Carry out run-time suspend of given device. 193 * @dev: Device to suspend. 194 * @rpmflags: Flag bits. 195 * ··· 220 * 221 * This function must be called under dev->power.lock with interrupts disabled. 222 */ 223 + static int rpm_suspend(struct device *dev, int rpmflags) 224 __releases(&dev->power.lock) __acquires(&dev->power.lock) 225 { 226 struct device *parent = NULL; ··· 332 wake_up_all(&dev->power.wait_queue); 333 334 if (dev->power.deferred_resume) { 335 + rpm_resume(dev, 0); 336 retval = -EAGAIN; 337 goto out; 338 } 339 340 if (notify) 341 + rpm_idle(dev, 0); 342 343 if (parent && !parent->power.ignore_children) { 344 spin_unlock_irq(&dev->power.lock); ··· 355 } 356 357 /** 358 + * rpm_resume - Carry out run-time resume of given device. 359 * @dev: Device to resume. 360 * @rpmflags: Flag bits. 361 * ··· 387 * 388 * This function must be called under dev->power.lock with interrupts disabled. 389 */ 390 + static int rpm_resume(struct device *dev, int rpmflags) 391 __releases(&dev->power.lock) __acquires(&dev->power.lock) 392 { 393 struct device *parent = NULL; ··· 469 */ 470 if (!parent->power.disable_depth 471 && !parent->power.ignore_children) { 472 + rpm_resume(parent, 0); 473 if (parent->power.runtime_status != RPM_ACTIVE) 474 retval = -EBUSY; 475 } ··· 521 wake_up_all(&dev->power.wait_queue); 522 523 if (!retval) 524 + rpm_idle(dev, RPM_ASYNC); 525 526 out: 527 if (parent) { ··· 536 537 return retval; 538 } 539 540 /** 541 * pm_runtime_work - Universal run-time PM work function. ··· 578 case RPM_REQ_NONE: 579 break; 580 case RPM_REQ_IDLE: 581 + rpm_idle(dev, RPM_NOWAIT); 582 break; 583 case RPM_REQ_SUSPEND: 584 + rpm_suspend(dev, RPM_NOWAIT); 585 break; 586 case RPM_REQ_RESUME: 587 + rpm_resume(dev, RPM_NOWAIT); 588 break; 589 } 590 591 out: 592 spin_unlock_irq(&dev->power.lock); 593 } 594 595 /** 596 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). ··· 627 /* If 'expire' is after 'jiffies' we've been called too early. */ 628 if (expires > 0 && !time_after(expires, jiffies)) { 629 dev->power.timer_expires = 0; 630 + rpm_suspend(dev, RPM_ASYNC); 631 } 632 633 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 646 spin_lock_irqsave(&dev->power.lock, flags); 647 648 if (!delay) { 649 + retval = rpm_suspend(dev, RPM_ASYNC); 650 goto out; 651 } 652 ··· 669 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 670 671 /** 672 + * __pm_runtime_idle - Entry point for run-time idle operations. 673 + * @dev: Device to send idle notification for. 674 + * @rpmflags: Flag bits. 675 + * 676 + * If the RPM_GET_PUT flag is set, decrement the device's usage count and 677 + * return immediately if it is larger than zero. Then carry out an idle 678 + * notification, either synchronous or asynchronous. 679 + * 680 + * This routine may be called in atomic context if the RPM_ASYNC flag is set. 681 */ 682 + int __pm_runtime_idle(struct device *dev, int rpmflags) 683 + { 684 + unsigned long flags; 685 + int retval; 686 + 687 + if (rpmflags & RPM_GET_PUT) { 688 + if (!atomic_dec_and_test(&dev->power.usage_count)) 689 + return 0; 690 + } 691 + 692 + spin_lock_irqsave(&dev->power.lock, flags); 693 + retval = rpm_idle(dev, rpmflags); 694 + spin_unlock_irqrestore(&dev->power.lock, flags); 695 + 696 + return retval; 697 + } 698 + EXPORT_SYMBOL_GPL(__pm_runtime_idle); 699 + 700 + /** 701 + * __pm_runtime_suspend - Entry point for run-time put/suspend operations. 702 + * @dev: Device to suspend. 703 + * @rpmflags: Flag bits. 704 + * 705 + * Carry out a suspend, either synchronous or asynchronous. 706 + * 707 + * This routine may be called in atomic context if the RPM_ASYNC flag is set. 708 + */ 709 + int __pm_runtime_suspend(struct device *dev, int rpmflags) 710 { 711 unsigned long flags; 712 int retval; 713 714 spin_lock_irqsave(&dev->power.lock, flags); 715 + retval = rpm_suspend(dev, rpmflags); 716 spin_unlock_irqrestore(&dev->power.lock, flags); 717 718 return retval; 719 } 720 + EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 721 722 /** 723 + * __pm_runtime_resume - Entry point for run-time resume operations. 724 + * @dev: Device to resume. 725 * @rpmflags: Flag bits. 726 * 727 + * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 728 + * carry out a resume, either synchronous or asynchronous. 729 + * 730 + * This routine may be called in atomic context if the RPM_ASYNC flag is set. 731 */ 732 + int __pm_runtime_resume(struct device *dev, int rpmflags) 733 { 734 + unsigned long flags; 735 int retval; 736 737 + if (rpmflags & RPM_GET_PUT) 738 + atomic_inc(&dev->power.usage_count); 739 + 740 + spin_lock_irqsave(&dev->power.lock, flags); 741 + retval = rpm_resume(dev, rpmflags); 742 + spin_unlock_irqrestore(&dev->power.lock, flags); 743 744 return retval; 745 } 746 + EXPORT_SYMBOL_GPL(__pm_runtime_resume); 747 748 /** 749 * __pm_runtime_set_status - Set run-time PM status of a device. ··· 875 876 if (dev->power.request_pending 877 && dev->power.request == RPM_REQ_RESUME) { 878 + rpm_resume(dev, 0); 879 retval = 1; 880 } 881 ··· 924 */ 925 pm_runtime_get_noresume(dev); 926 927 + rpm_resume(dev, 0); 928 929 pm_runtime_put_noidle(dev); 930 } ··· 972 973 dev->power.runtime_auto = false; 974 atomic_inc(&dev->power.usage_count); 975 + rpm_resume(dev, 0); 976 977 out: 978 spin_unlock_irq(&dev->power.lock); ··· 993 994 dev->power.runtime_auto = true; 995 if (atomic_dec_and_test(&dev->power.usage_count)) 996 + rpm_idle(dev, 0); 997 998 out: 999 spin_unlock_irq(&dev->power.lock);
+46 -20
include/linux/pm_runtime.h
··· 16 #define RPM_ASYNC 0x01 /* Request is asynchronous */ 17 #define RPM_NOWAIT 0x02 /* Don't wait for concurrent 18 state change */ 19 20 #ifdef CONFIG_PM_RUNTIME 21 22 extern struct workqueue_struct *pm_wq; 23 24 - extern int pm_runtime_idle(struct device *dev); 25 - extern int pm_runtime_suspend(struct device *dev); 26 - extern int pm_runtime_resume(struct device *dev); 27 - extern int pm_request_idle(struct device *dev); 28 extern int pm_schedule_suspend(struct device *dev, unsigned int delay); 29 - extern int pm_request_resume(struct device *dev); 30 - extern int __pm_runtime_get(struct device *dev, int rpmflags); 31 - extern int __pm_runtime_put(struct device *dev, int rpmflags); 32 extern int __pm_runtime_set_status(struct device *dev, unsigned int status); 33 extern int pm_runtime_barrier(struct device *dev); 34 extern void pm_runtime_enable(struct device *dev); ··· 75 76 #else /* !CONFIG_PM_RUNTIME */ 77 78 - static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } 79 - static inline int pm_runtime_suspend(struct device *dev) { return -ENOSYS; } 80 - static inline int pm_runtime_resume(struct device *dev) { return 0; } 81 - static inline int pm_request_idle(struct device *dev) { return -ENOSYS; } 82 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) 83 { 84 return -ENOSYS; 85 } 86 - static inline int pm_request_resume(struct device *dev) { return 0; } 87 - static inline int __pm_runtime_get(struct device *dev, int rpmflags) 88 - { return 1; } 89 - static inline int __pm_runtime_put(struct device *dev, int rpmflags) 90 - { return 0; } 91 static inline int __pm_runtime_set_status(struct device *dev, 92 unsigned int status) { return 0; } 93 static inline int pm_runtime_barrier(struct device *dev) { return 0; } ··· 113 114 #endif /* !CONFIG_PM_RUNTIME */ 115 116 static inline int pm_runtime_get(struct device *dev) 117 { 118 - return __pm_runtime_get(dev, RPM_ASYNC); 119 } 120 121 static inline int pm_runtime_get_sync(struct device *dev) 122 { 123 - return __pm_runtime_get(dev, 0); 124 } 125 126 static inline int pm_runtime_put(struct device *dev) 127 { 128 - return __pm_runtime_put(dev, RPM_ASYNC); 129 } 130 131 static inline int pm_runtime_put_sync(struct device *dev) 132 { 133 - return __pm_runtime_put(dev, 0); 134 } 135 136 static inline int pm_runtime_set_active(struct device *dev)
··· 16 #define RPM_ASYNC 0x01 /* Request is asynchronous */ 17 #define RPM_NOWAIT 0x02 /* Don't wait for concurrent 18 state change */ 19 + #define RPM_GET_PUT 0x04 /* Increment/decrement the 20 + usage_count */ 21 22 #ifdef CONFIG_PM_RUNTIME 23 24 extern struct workqueue_struct *pm_wq; 25 26 + extern int __pm_runtime_idle(struct device *dev, int rpmflags); 27 + extern int __pm_runtime_suspend(struct device *dev, int rpmflags); 28 + extern int __pm_runtime_resume(struct device *dev, int rpmflags); 29 extern int pm_schedule_suspend(struct device *dev, unsigned int delay); 30 extern int __pm_runtime_set_status(struct device *dev, unsigned int status); 31 extern int pm_runtime_barrier(struct device *dev); 32 extern void pm_runtime_enable(struct device *dev); ··· 77 78 #else /* !CONFIG_PM_RUNTIME */ 79 80 + static inline int __pm_runtime_idle(struct device *dev, int rpmflags) 81 + { 82 + return -ENOSYS; 83 + } 84 + static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) 85 + { 86 + return -ENOSYS; 87 + } 88 + static inline int __pm_runtime_resume(struct device *dev, int rpmflags) 89 + { 90 + return 1; 91 + } 92 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) 93 { 94 return -ENOSYS; 95 } 96 static inline int __pm_runtime_set_status(struct device *dev, 97 unsigned int status) { return 0; } 98 static inline int pm_runtime_barrier(struct device *dev) { return 0; } ··· 112 113 #endif /* !CONFIG_PM_RUNTIME */ 114 115 + static inline int pm_runtime_idle(struct device *dev) 116 + { 117 + return __pm_runtime_idle(dev, 0); 118 + } 119 + 120 + static inline int pm_runtime_suspend(struct device *dev) 121 + { 122 + return __pm_runtime_suspend(dev, 0); 123 + } 124 + 125 + static inline int pm_runtime_resume(struct device *dev) 126 + { 127 + return __pm_runtime_resume(dev, 0); 128 + } 129 + 130 + static inline int pm_request_idle(struct device *dev) 131 + { 132 + return __pm_runtime_idle(dev, RPM_ASYNC); 133 + } 134 + 135 + static inline int pm_request_resume(struct device *dev) 136 + { 137 + return __pm_runtime_resume(dev, RPM_ASYNC); 138 + } 139 + 140 static inline int pm_runtime_get(struct device *dev) 141 { 142 + return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); 143 } 144 145 static inline int pm_runtime_get_sync(struct device *dev) 146 { 147 + return __pm_runtime_resume(dev, RPM_GET_PUT); 148 } 149 150 static inline int pm_runtime_put(struct device *dev) 151 { 152 + return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); 153 } 154 155 static inline int pm_runtime_put_sync(struct device *dev) 156 { 157 + return __pm_runtime_idle(dev, RPM_GET_PUT); 158 } 159 160 static inline int pm_runtime_set_active(struct device *dev)