PM / Runtime: Combine runtime PM entry points

This patch (as1424) combines the various public entry points for the
runtime PM routines into three simple functions: one for idle, one for
suspend, and one for resume. A new bitflag specifies whether or not
to increment or decrement the usage_count field.

The new entry points are named __pm_runtime_idle,
__pm_runtime_suspend, and __pm_runtime_resume, to reflect that they
are trampolines. Simultaneously, the corresponding internal routines
are renamed to rpm_idle, rpm_suspend, and rpm_resume.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>

authored by Alan Stern and committed by Rafael J. Wysocki 140a6c94 1bfee5bc

+120 -140
+74 -120
drivers/base/power/runtime.c
··· 11 11 #include <linux/pm_runtime.h> 12 12 #include <linux/jiffies.h> 13 13 14 - static int __pm_runtime_resume(struct device *dev, int rpmflags); 14 + static int rpm_resume(struct device *dev, int rpmflags); 15 15 16 16 /** 17 17 * update_pm_runtime_accounting - Update the time accounting of power states ··· 107 107 108 108 109 109 /** 110 - * __pm_runtime_idle - Notify device bus type if the device can be suspended. 110 + * rpm_idle - Notify device bus type if the device can be suspended. 111 111 * @dev: Device to notify the bus type about. 112 112 * @rpmflags: Flag bits. 113 113 * ··· 118 118 * 119 119 * This function must be called under dev->power.lock with interrupts disabled. 120 120 */ 121 - static int __pm_runtime_idle(struct device *dev, int rpmflags) 121 + static int rpm_idle(struct device *dev, int rpmflags) 122 122 __releases(&dev->power.lock) __acquires(&dev->power.lock) 123 123 { 124 124 int retval; ··· 189 189 } 190 190 191 191 /** 192 - * pm_runtime_idle - Notify device bus type if the device can be suspended. 193 - * @dev: Device to notify the bus type about. 194 - */ 195 - int pm_runtime_idle(struct device *dev) 196 - { 197 - int retval; 198 - 199 - spin_lock_irq(&dev->power.lock); 200 - retval = __pm_runtime_idle(dev, 0); 201 - spin_unlock_irq(&dev->power.lock); 202 - 203 - return retval; 204 - } 205 - EXPORT_SYMBOL_GPL(pm_runtime_idle); 206 - 207 - /** 208 - * __pm_runtime_suspend - Carry out run-time suspend of given device. 192 + * rpm_suspend - Carry out run-time suspend of given device. 209 193 * @dev: Device to suspend. 210 194 * @rpmflags: Flag bits. 211 195 * ··· 204 220 * 205 221 * This function must be called under dev->power.lock with interrupts disabled. 206 222 */ 207 - static int __pm_runtime_suspend(struct device *dev, int rpmflags) 223 + static int rpm_suspend(struct device *dev, int rpmflags) 208 224 __releases(&dev->power.lock) __acquires(&dev->power.lock) 209 225 { 210 226 struct device *parent = NULL; ··· 316 332 wake_up_all(&dev->power.wait_queue); 317 333 318 334 if (dev->power.deferred_resume) { 319 - __pm_runtime_resume(dev, 0); 335 + rpm_resume(dev, 0); 320 336 retval = -EAGAIN; 321 337 goto out; 322 338 } 323 339 324 340 if (notify) 325 - __pm_runtime_idle(dev, 0); 341 + rpm_idle(dev, 0); 326 342 327 343 if (parent && !parent->power.ignore_children) { 328 344 spin_unlock_irq(&dev->power.lock); ··· 339 355 } 340 356 341 357 /** 342 - * pm_runtime_suspend - Carry out run-time suspend of given device. 343 - * @dev: Device to suspend. 344 - */ 345 - int pm_runtime_suspend(struct device *dev) 346 - { 347 - int retval; 348 - 349 - spin_lock_irq(&dev->power.lock); 350 - retval = __pm_runtime_suspend(dev, 0); 351 - spin_unlock_irq(&dev->power.lock); 352 - 353 - return retval; 354 - } 355 - EXPORT_SYMBOL_GPL(pm_runtime_suspend); 356 - 357 - /** 358 - * __pm_runtime_resume - Carry out run-time resume of given device. 358 + * rpm_resume - Carry out run-time resume of given device. 359 359 * @dev: Device to resume. 360 360 * @rpmflags: Flag bits. 361 361 * ··· 355 387 * 356 388 * This function must be called under dev->power.lock with interrupts disabled. 357 389 */ 358 - static int __pm_runtime_resume(struct device *dev, int rpmflags) 390 + static int rpm_resume(struct device *dev, int rpmflags) 359 391 __releases(&dev->power.lock) __acquires(&dev->power.lock) 360 392 { 361 393 struct device *parent = NULL; ··· 437 469 */ 438 470 if (!parent->power.disable_depth 439 471 && !parent->power.ignore_children) { 440 - __pm_runtime_resume(parent, 0); 472 + rpm_resume(parent, 0); 441 473 if (parent->power.runtime_status != RPM_ACTIVE) 442 474 retval = -EBUSY; 443 475 } ··· 489 521 wake_up_all(&dev->power.wait_queue); 490 522 491 523 if (!retval) 492 - __pm_runtime_idle(dev, RPM_ASYNC); 524 + rpm_idle(dev, RPM_ASYNC); 493 525 494 526 out: 495 527 if (parent) { ··· 504 536 505 537 return retval; 506 538 } 507 - 508 - /** 509 - * pm_runtime_resume - Carry out run-time resume of given device. 510 - * @dev: Device to suspend. 511 - */ 512 - int pm_runtime_resume(struct device *dev) 513 - { 514 - int retval; 515 - 516 - spin_lock_irq(&dev->power.lock); 517 - retval = __pm_runtime_resume(dev, 0); 518 - spin_unlock_irq(&dev->power.lock); 519 - 520 - return retval; 521 - } 522 - EXPORT_SYMBOL_GPL(pm_runtime_resume); 523 539 524 540 /** 525 541 * pm_runtime_work - Universal run-time PM work function. ··· 530 578 case RPM_REQ_NONE: 531 579 break; 532 580 case RPM_REQ_IDLE: 533 - __pm_runtime_idle(dev, RPM_NOWAIT); 581 + rpm_idle(dev, RPM_NOWAIT); 534 582 break; 535 583 case RPM_REQ_SUSPEND: 536 - __pm_runtime_suspend(dev, RPM_NOWAIT); 584 + rpm_suspend(dev, RPM_NOWAIT); 537 585 break; 538 586 case RPM_REQ_RESUME: 539 - __pm_runtime_resume(dev, RPM_NOWAIT); 587 + rpm_resume(dev, RPM_NOWAIT); 540 588 break; 541 589 } 542 590 543 591 out: 544 592 spin_unlock_irq(&dev->power.lock); 545 593 } 546 - 547 - /** 548 - * pm_request_idle - Submit an idle notification request for given device. 549 - * @dev: Device to handle. 550 - */ 551 - int pm_request_idle(struct device *dev) 552 - { 553 - unsigned long flags; 554 - int retval; 555 - 556 - spin_lock_irqsave(&dev->power.lock, flags); 557 - retval = __pm_runtime_idle(dev, RPM_ASYNC); 558 - spin_unlock_irqrestore(&dev->power.lock, flags); 559 - 560 - return retval; 561 - } 562 - EXPORT_SYMBOL_GPL(pm_request_idle); 563 594 564 595 /** 565 596 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). ··· 562 627 /* If 'expire' is after 'jiffies' we've been called too early. */ 563 628 if (expires > 0 && !time_after(expires, jiffies)) { 564 629 dev->power.timer_expires = 0; 565 - __pm_runtime_suspend(dev, RPM_ASYNC); 630 + rpm_suspend(dev, RPM_ASYNC); 566 631 } 567 632 568 633 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 581 646 spin_lock_irqsave(&dev->power.lock, flags); 582 647 583 648 if (!delay) { 584 - retval = __pm_runtime_suspend(dev, RPM_ASYNC); 649 + retval = rpm_suspend(dev, RPM_ASYNC); 585 650 goto out; 586 651 } 587 652 ··· 604 669 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 605 670 606 671 /** 607 - * pm_request_resume - Submit a resume request for given device. 608 - * @dev: Device to resume. 672 + * __pm_runtime_idle - Entry point for run-time idle operations. 673 + * @dev: Device to send idle notification for. 674 + * @rpmflags: Flag bits. 675 + * 676 + * If the RPM_GET_PUT flag is set, decrement the device's usage count and 677 + * return immediately if it is larger than zero. Then carry out an idle 678 + * notification, either synchronous or asynchronous. 679 + * 680 + * This routine may be called in atomic context if the RPM_ASYNC flag is set. 609 681 */ 610 - int pm_request_resume(struct device *dev) 682 + int __pm_runtime_idle(struct device *dev, int rpmflags) 683 + { 684 + unsigned long flags; 685 + int retval; 686 + 687 + if (rpmflags & RPM_GET_PUT) { 688 + if (!atomic_dec_and_test(&dev->power.usage_count)) 689 + return 0; 690 + } 691 + 692 + spin_lock_irqsave(&dev->power.lock, flags); 693 + retval = rpm_idle(dev, rpmflags); 694 + spin_unlock_irqrestore(&dev->power.lock, flags); 695 + 696 + return retval; 697 + } 698 + EXPORT_SYMBOL_GPL(__pm_runtime_idle); 699 + 700 + /** 701 + * __pm_runtime_suspend - Entry point for run-time put/suspend operations. 702 + * @dev: Device to suspend. 703 + * @rpmflags: Flag bits. 704 + * 705 + * Carry out a suspend, either synchronous or asynchronous. 706 + * 707 + * This routine may be called in atomic context if the RPM_ASYNC flag is set. 708 + */ 709 + int __pm_runtime_suspend(struct device *dev, int rpmflags) 611 710 { 612 711 unsigned long flags; 613 712 int retval; 614 713 615 714 spin_lock_irqsave(&dev->power.lock, flags); 616 - retval = __pm_runtime_resume(dev, RPM_ASYNC); 715 + retval = rpm_suspend(dev, rpmflags); 617 716 spin_unlock_irqrestore(&dev->power.lock, flags); 618 717 619 718 return retval; 620 719 } 621 - EXPORT_SYMBOL_GPL(pm_request_resume); 720 + EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 622 721 623 722 /** 624 - * __pm_runtime_get - Reference count a device and wake it up, if necessary. 625 - * @dev: Device to handle. 723 + * __pm_runtime_resume - Entry point for run-time resume operations. 724 + * @dev: Device to resume. 626 725 * @rpmflags: Flag bits. 627 726 * 628 - * Increment the usage count of the device and resume it or submit a resume 629 - * request for it, depending on the RPM_ASYNC flag bit. 727 + * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 728 + * carry out a resume, either synchronous or asynchronous. 729 + * 730 + * This routine may be called in atomic context if the RPM_ASYNC flag is set. 630 731 */ 631 - int __pm_runtime_get(struct device *dev, int rpmflags) 732 + int __pm_runtime_resume(struct device *dev, int rpmflags) 632 733 { 734 + unsigned long flags; 633 735 int retval; 634 736 635 - atomic_inc(&dev->power.usage_count); 636 - retval = (rpmflags & RPM_ASYNC) ? 637 - pm_request_resume(dev) : pm_runtime_resume(dev); 737 + if (rpmflags & RPM_GET_PUT) 738 + atomic_inc(&dev->power.usage_count); 739 + 740 + spin_lock_irqsave(&dev->power.lock, flags); 741 + retval = rpm_resume(dev, rpmflags); 742 + spin_unlock_irqrestore(&dev->power.lock, flags); 638 743 639 744 return retval; 640 745 } 641 - EXPORT_SYMBOL_GPL(__pm_runtime_get); 642 - 643 - /** 644 - * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 645 - * @dev: Device to handle. 646 - * @rpmflags: Flag bits. 647 - * 648 - * Decrement the usage count of the device and if it reaches zero, carry out a 649 - * synchronous idle notification or submit an idle notification request for it, 650 - * depending on the RPM_ASYNC flag bit. 651 - */ 652 - int __pm_runtime_put(struct device *dev, int rpmflags) 653 - { 654 - int retval = 0; 655 - 656 - if (atomic_dec_and_test(&dev->power.usage_count)) 657 - retval = (rpmflags & RPM_ASYNC) ? 658 - pm_request_idle(dev) : pm_runtime_idle(dev); 659 - 660 - return retval; 661 - } 662 - EXPORT_SYMBOL_GPL(__pm_runtime_put); 746 + EXPORT_SYMBOL_GPL(__pm_runtime_resume); 663 747 664 748 /** 665 749 * __pm_runtime_set_status - Set run-time PM status of a device. ··· 829 875 830 876 if (dev->power.request_pending 831 877 && dev->power.request == RPM_REQ_RESUME) { 832 - __pm_runtime_resume(dev, 0); 878 + rpm_resume(dev, 0); 833 879 retval = 1; 834 880 } 835 881 ··· 878 924 */ 879 925 pm_runtime_get_noresume(dev); 880 926 881 - __pm_runtime_resume(dev, 0); 927 + rpm_resume(dev, 0); 882 928 883 929 pm_runtime_put_noidle(dev); 884 930 } ··· 926 972 927 973 dev->power.runtime_auto = false; 928 974 atomic_inc(&dev->power.usage_count); 929 - __pm_runtime_resume(dev, 0); 975 + rpm_resume(dev, 0); 930 976 931 977 out: 932 978 spin_unlock_irq(&dev->power.lock); ··· 947 993 948 994 dev->power.runtime_auto = true; 949 995 if (atomic_dec_and_test(&dev->power.usage_count)) 950 - __pm_runtime_idle(dev, 0); 996 + rpm_idle(dev, 0); 951 997 952 998 out: 953 999 spin_unlock_irq(&dev->power.lock);
+46 -20
include/linux/pm_runtime.h
··· 16 16 #define RPM_ASYNC 0x01 /* Request is asynchronous */ 17 17 #define RPM_NOWAIT 0x02 /* Don't wait for concurrent 18 18 state change */ 19 + #define RPM_GET_PUT 0x04 /* Increment/decrement the 20 + usage_count */ 19 21 20 22 #ifdef CONFIG_PM_RUNTIME 21 23 22 24 extern struct workqueue_struct *pm_wq; 23 25 24 - extern int pm_runtime_idle(struct device *dev); 25 - extern int pm_runtime_suspend(struct device *dev); 26 - extern int pm_runtime_resume(struct device *dev); 27 - extern int pm_request_idle(struct device *dev); 26 + extern int __pm_runtime_idle(struct device *dev, int rpmflags); 27 + extern int __pm_runtime_suspend(struct device *dev, int rpmflags); 28 + extern int __pm_runtime_resume(struct device *dev, int rpmflags); 28 29 extern int pm_schedule_suspend(struct device *dev, unsigned int delay); 29 - extern int pm_request_resume(struct device *dev); 30 - extern int __pm_runtime_get(struct device *dev, int rpmflags); 31 - extern int __pm_runtime_put(struct device *dev, int rpmflags); 32 30 extern int __pm_runtime_set_status(struct device *dev, unsigned int status); 33 31 extern int pm_runtime_barrier(struct device *dev); 34 32 extern void pm_runtime_enable(struct device *dev); ··· 75 77 76 78 #else /* !CONFIG_PM_RUNTIME */ 77 79 78 - static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } 79 - static inline int pm_runtime_suspend(struct device *dev) { return -ENOSYS; } 80 - static inline int pm_runtime_resume(struct device *dev) { return 0; } 81 - static inline int pm_request_idle(struct device *dev) { return -ENOSYS; } 80 + static inline int __pm_runtime_idle(struct device *dev, int rpmflags) 81 + { 82 + return -ENOSYS; 83 + } 84 + static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) 85 + { 86 + return -ENOSYS; 87 + } 88 + static inline int __pm_runtime_resume(struct device *dev, int rpmflags) 89 + { 90 + return 1; 91 + } 82 92 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) 83 93 { 84 94 return -ENOSYS; 85 95 } 86 - static inline int pm_request_resume(struct device *dev) { return 0; } 87 - static inline int __pm_runtime_get(struct device *dev, int rpmflags) 88 - { return 1; } 89 - static inline int __pm_runtime_put(struct device *dev, int rpmflags) 90 - { return 0; } 91 96 static inline int __pm_runtime_set_status(struct device *dev, 92 97 unsigned int status) { return 0; } 93 98 static inline int pm_runtime_barrier(struct device *dev) { return 0; } ··· 113 112 114 113 #endif /* !CONFIG_PM_RUNTIME */ 115 114 115 + static inline int pm_runtime_idle(struct device *dev) 116 + { 117 + return __pm_runtime_idle(dev, 0); 118 + } 119 + 120 + static inline int pm_runtime_suspend(struct device *dev) 121 + { 122 + return __pm_runtime_suspend(dev, 0); 123 + } 124 + 125 + static inline int pm_runtime_resume(struct device *dev) 126 + { 127 + return __pm_runtime_resume(dev, 0); 128 + } 129 + 130 + static inline int pm_request_idle(struct device *dev) 131 + { 132 + return __pm_runtime_idle(dev, RPM_ASYNC); 133 + } 134 + 135 + static inline int pm_request_resume(struct device *dev) 136 + { 137 + return __pm_runtime_resume(dev, RPM_ASYNC); 138 + } 139 + 116 140 static inline int pm_runtime_get(struct device *dev) 117 141 { 118 - return __pm_runtime_get(dev, RPM_ASYNC); 142 + return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); 119 143 } 120 144 121 145 static inline int pm_runtime_get_sync(struct device *dev) 122 146 { 123 - return __pm_runtime_get(dev, 0); 147 + return __pm_runtime_resume(dev, RPM_GET_PUT); 124 148 } 125 149 126 150 static inline int pm_runtime_put(struct device *dev) 127 151 { 128 - return __pm_runtime_put(dev, RPM_ASYNC); 152 + return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); 129 153 } 130 154 131 155 static inline int pm_runtime_put_sync(struct device *dev) 132 156 { 133 - return __pm_runtime_put(dev, 0); 157 + return __pm_runtime_idle(dev, RPM_GET_PUT); 134 158 } 135 159 136 160 static inline int pm_runtime_set_active(struct device *dev)