PM / Runtime: Replace boolean arguments with bitflags

The "from_wq" argument in __pm_runtime_suspend() and
__pm_runtime_resume() supposedly indicates whether or not the function
was called by the PM workqueue thread, but in fact it isn't always
used this way. It really indicates whether or not the function should
return early if the requested operation is already in progress.

Along with this badly-named boolean argument, later patches in this
series will add several other boolean arguments to these functions and
others. Therefore this patch (as1422) begins the conversion process
by replacing from_wq with a bitflag argument. The same bitflags are
also used in __pm_runtime_get() and __pm_runtime_put(), where they
indicate whether or not the operation should be asynchronous.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>

authored by Alan Stern and committed by Rafael J. Wysocki 3f9af051 4769373c

+54 -44
+39 -36
drivers/base/power/runtime.c
··· 10 #include <linux/pm_runtime.h> 11 #include <linux/jiffies.h> 12 13 - static int __pm_runtime_resume(struct device *dev, bool from_wq); 14 static int __pm_request_idle(struct device *dev); 15 static int __pm_request_resume(struct device *dev); 16 ··· 164 /** 165 * __pm_runtime_suspend - Carry out run-time suspend of given device. 166 * @dev: Device to suspend. 167 - * @from_wq: If set, the function has been called via pm_wq. 168 * 169 * Check if the device can be suspended and run the ->runtime_suspend() callback 170 - * provided by its bus type. If another suspend has been started earlier, wait 171 - * for it to finish. If an idle notification or suspend request is pending or 172 * scheduled, cancel it. 173 * 174 * This function must be called under dev->power.lock with interrupts disabled. 175 */ 176 - int __pm_runtime_suspend(struct device *dev, bool from_wq) 177 __releases(&dev->power.lock) __acquires(&dev->power.lock) 178 { 179 struct device *parent = NULL; 180 bool notify = false; 181 int retval = 0; 182 183 - dev_dbg(dev, "__pm_runtime_suspend()%s!\n", 184 - from_wq ? " from workqueue" : ""); 185 186 repeat: 187 if (dev->power.runtime_error) { ··· 213 if (dev->power.runtime_status == RPM_SUSPENDING) { 214 DEFINE_WAIT(wait); 215 216 - if (from_wq) { 217 retval = -EINPROGRESS; 218 goto out; 219 } ··· 286 wake_up_all(&dev->power.wait_queue); 287 288 if (dev->power.deferred_resume) { 289 - __pm_runtime_resume(dev, false); 290 retval = -EAGAIN; 291 goto out; 292 } ··· 303 } 304 305 out: 306 - dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); 307 308 return retval; 309 } ··· 317 int retval; 318 319 spin_lock_irq(&dev->power.lock); 320 - retval = __pm_runtime_suspend(dev, false); 321 spin_unlock_irq(&dev->power.lock); 322 323 return retval; ··· 327 /** 328 * __pm_runtime_resume - Carry out run-time resume of given device. 329 * @dev: Device to resume. 330 - * @from_wq: If set, the function has been called via pm_wq. 331 * 332 * Check if the device can be woken up and run the ->runtime_resume() callback 333 - * provided by its bus type. If another resume has been started earlier, wait 334 - * for it to finish. If there's a suspend running in parallel with this 335 - * function, wait for it to finish and resume the device. Cancel any scheduled 336 - * or pending requests. 337 * 338 * This function must be called under dev->power.lock with interrupts disabled. 339 */ 340 - int __pm_runtime_resume(struct device *dev, bool from_wq) 341 __releases(&dev->power.lock) __acquires(&dev->power.lock) 342 { 343 struct device *parent = NULL; 344 int retval = 0; 345 346 - dev_dbg(dev, "__pm_runtime_resume()%s!\n", 347 - from_wq ? " from workqueue" : ""); 348 349 repeat: 350 if (dev->power.runtime_error) { ··· 366 || dev->power.runtime_status == RPM_SUSPENDING) { 367 DEFINE_WAIT(wait); 368 369 - if (from_wq) { 370 if (dev->power.runtime_status == RPM_SUSPENDING) 371 dev->power.deferred_resume = true; 372 retval = -EINPROGRESS; ··· 408 */ 409 if (!parent->power.disable_depth 410 && !parent->power.ignore_children) { 411 - __pm_runtime_resume(parent, false); 412 if (parent->power.runtime_status != RPM_ACTIVE) 413 retval = -EBUSY; 414 } ··· 471 spin_lock_irq(&dev->power.lock); 472 } 473 474 - dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); 475 476 return retval; 477 } ··· 485 int retval; 486 487 spin_lock_irq(&dev->power.lock); 488 - retval = __pm_runtime_resume(dev, false); 489 spin_unlock_irq(&dev->power.lock); 490 491 return retval; ··· 520 __pm_runtime_idle(dev); 521 break; 522 case RPM_REQ_SUSPEND: 523 - __pm_runtime_suspend(dev, true); 524 break; 525 case RPM_REQ_RESUME: 526 - __pm_runtime_resume(dev, true); 527 break; 528 } 529 ··· 783 /** 784 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 785 * @dev: Device to handle. 786 - * @sync: If set and the device is suspended, resume it synchronously. 787 * 788 * Increment the usage count of the device and resume it or submit a resume 789 - * request for it, depending on the value of @sync. 790 */ 791 - int __pm_runtime_get(struct device *dev, bool sync) 792 { 793 int retval; 794 795 atomic_inc(&dev->power.usage_count); 796 - retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 797 798 return retval; 799 } ··· 803 /** 804 * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 805 * @dev: Device to handle. 806 - * @sync: If the device's bus type is to be notified, do that synchronously. 807 * 808 * Decrement the usage count of the device and if it reaches zero, carry out a 809 * synchronous idle notification or submit an idle notification request for it, 810 - * depending on the value of @sync. 811 */ 812 - int __pm_runtime_put(struct device *dev, bool sync) 813 { 814 int retval = 0; 815 816 if (atomic_dec_and_test(&dev->power.usage_count)) 817 - retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); 818 819 return retval; 820 } ··· 970 971 if (dev->power.request_pending 972 && dev->power.request == RPM_REQ_RESUME) { 973 - __pm_runtime_resume(dev, false); 974 retval = 1; 975 } 976 ··· 1019 */ 1020 pm_runtime_get_noresume(dev); 1021 1022 - __pm_runtime_resume(dev, false); 1023 1024 pm_runtime_put_noidle(dev); 1025 } ··· 1067 1068 dev->power.runtime_auto = false; 1069 atomic_inc(&dev->power.usage_count); 1070 - __pm_runtime_resume(dev, false); 1071 1072 out: 1073 spin_unlock_irq(&dev->power.lock);
··· 10 #include <linux/pm_runtime.h> 11 #include <linux/jiffies.h> 12 13 + static int __pm_runtime_resume(struct device *dev, int rpmflags); 14 static int __pm_request_idle(struct device *dev); 15 static int __pm_request_resume(struct device *dev); 16 ··· 164 /** 165 * __pm_runtime_suspend - Carry out run-time suspend of given device. 166 * @dev: Device to suspend. 167 + * @rpmflags: Flag bits. 168 * 169 * Check if the device can be suspended and run the ->runtime_suspend() callback 170 + * provided by its bus type. If another suspend has been started earlier, 171 + * either return immediately or wait for it to finish, depending on the 172 + * RPM_NOWAIT flag. If an idle notification or suspend request is pending or 173 * scheduled, cancel it. 174 * 175 * This function must be called under dev->power.lock with interrupts disabled. 176 */ 177 + static int __pm_runtime_suspend(struct device *dev, int rpmflags) 178 __releases(&dev->power.lock) __acquires(&dev->power.lock) 179 { 180 struct device *parent = NULL; 181 bool notify = false; 182 int retval = 0; 183 184 + dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 185 186 repeat: 187 if (dev->power.runtime_error) { ··· 213 if (dev->power.runtime_status == RPM_SUSPENDING) { 214 DEFINE_WAIT(wait); 215 216 + if (rpmflags & RPM_NOWAIT) { 217 retval = -EINPROGRESS; 218 goto out; 219 } ··· 286 wake_up_all(&dev->power.wait_queue); 287 288 if (dev->power.deferred_resume) { 289 + __pm_runtime_resume(dev, 0); 290 retval = -EAGAIN; 291 goto out; 292 } ··· 303 } 304 305 out: 306 + dev_dbg(dev, "%s returns %d\n", __func__, retval); 307 308 return retval; 309 } ··· 317 int retval; 318 319 spin_lock_irq(&dev->power.lock); 320 + retval = __pm_runtime_suspend(dev, 0); 321 spin_unlock_irq(&dev->power.lock); 322 323 return retval; ··· 327 /** 328 * __pm_runtime_resume - Carry out run-time resume of given device. 329 * @dev: Device to resume. 330 + * @rpmflags: Flag bits. 331 * 332 * Check if the device can be woken up and run the ->runtime_resume() callback 333 + * provided by its bus type. If another resume has been started earlier, 334 + * either return imediately or wait for it to finish, depending on the 335 + * RPM_NOWAIT flag. If there's a suspend running in parallel with this 336 + * function, either tell the other process to resume after suspending 337 + * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT 338 + * flag. Cancel any scheduled or pending requests. 339 * 340 * This function must be called under dev->power.lock with interrupts disabled. 341 */ 342 + static int __pm_runtime_resume(struct device *dev, int rpmflags) 343 __releases(&dev->power.lock) __acquires(&dev->power.lock) 344 { 345 struct device *parent = NULL; 346 int retval = 0; 347 348 + dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 349 350 repeat: 351 if (dev->power.runtime_error) { ··· 365 || dev->power.runtime_status == RPM_SUSPENDING) { 366 DEFINE_WAIT(wait); 367 368 + if (rpmflags & RPM_NOWAIT) { 369 if (dev->power.runtime_status == RPM_SUSPENDING) 370 dev->power.deferred_resume = true; 371 retval = -EINPROGRESS; ··· 407 */ 408 if (!parent->power.disable_depth 409 && !parent->power.ignore_children) { 410 + __pm_runtime_resume(parent, 0); 411 if (parent->power.runtime_status != RPM_ACTIVE) 412 retval = -EBUSY; 413 } ··· 470 spin_lock_irq(&dev->power.lock); 471 } 472 473 + dev_dbg(dev, "%s returns %d\n", __func__, retval); 474 475 return retval; 476 } ··· 484 int retval; 485 486 spin_lock_irq(&dev->power.lock); 487 + retval = __pm_runtime_resume(dev, 0); 488 spin_unlock_irq(&dev->power.lock); 489 490 return retval; ··· 519 __pm_runtime_idle(dev); 520 break; 521 case RPM_REQ_SUSPEND: 522 + __pm_runtime_suspend(dev, RPM_NOWAIT); 523 break; 524 case RPM_REQ_RESUME: 525 + __pm_runtime_resume(dev, RPM_NOWAIT); 526 break; 527 } 528 ··· 782 /** 783 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 784 * @dev: Device to handle. 785 + * @rpmflags: Flag bits. 786 * 787 * Increment the usage count of the device and resume it or submit a resume 788 + * request for it, depending on the RPM_ASYNC flag bit. 789 */ 790 + int __pm_runtime_get(struct device *dev, int rpmflags) 791 { 792 int retval; 793 794 atomic_inc(&dev->power.usage_count); 795 + retval = (rpmflags & RPM_ASYNC) ? 796 + pm_request_resume(dev) : pm_runtime_resume(dev); 797 798 return retval; 799 } ··· 801 /** 802 * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 803 * @dev: Device to handle. 804 + * @rpmflags: Flag bits. 805 * 806 * Decrement the usage count of the device and if it reaches zero, carry out a 807 * synchronous idle notification or submit an idle notification request for it, 808 + * depending on the RPM_ASYNC flag bit. 809 */ 810 + int __pm_runtime_put(struct device *dev, int rpmflags) 811 { 812 int retval = 0; 813 814 if (atomic_dec_and_test(&dev->power.usage_count)) 815 + retval = (rpmflags & RPM_ASYNC) ? 816 + pm_request_idle(dev) : pm_runtime_idle(dev); 817 818 return retval; 819 } ··· 967 968 if (dev->power.request_pending 969 && dev->power.request == RPM_REQ_RESUME) { 970 + __pm_runtime_resume(dev, 0); 971 retval = 1; 972 } 973 ··· 1016 */ 1017 pm_runtime_get_noresume(dev); 1018 1019 + __pm_runtime_resume(dev, 0); 1020 1021 pm_runtime_put_noidle(dev); 1022 } ··· 1064 1065 dev->power.runtime_auto = false; 1066 atomic_inc(&dev->power.usage_count); 1067 + __pm_runtime_resume(dev, 0); 1068 1069 out: 1070 spin_unlock_irq(&dev->power.lock);
+15 -8
include/linux/pm_runtime.h
··· 12 #include <linux/device.h> 13 #include <linux/pm.h> 14 15 #ifdef CONFIG_PM_RUNTIME 16 17 extern struct workqueue_struct *pm_wq; ··· 27 extern int pm_request_idle(struct device *dev); 28 extern int pm_schedule_suspend(struct device *dev, unsigned int delay); 29 extern int pm_request_resume(struct device *dev); 30 - extern int __pm_runtime_get(struct device *dev, bool sync); 31 - extern int __pm_runtime_put(struct device *dev, bool sync); 32 extern int __pm_runtime_set_status(struct device *dev, unsigned int status); 33 extern int pm_runtime_barrier(struct device *dev); 34 extern void pm_runtime_enable(struct device *dev); ··· 86 return -ENOSYS; 87 } 88 static inline int pm_request_resume(struct device *dev) { return 0; } 89 - static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; } 90 - static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; } 91 static inline int __pm_runtime_set_status(struct device *dev, 92 unsigned int status) { return 0; } 93 static inline int pm_runtime_barrier(struct device *dev) { return 0; } ··· 114 115 static inline int pm_runtime_get(struct device *dev) 116 { 117 - return __pm_runtime_get(dev, false); 118 } 119 120 static inline int pm_runtime_get_sync(struct device *dev) 121 { 122 - return __pm_runtime_get(dev, true); 123 } 124 125 static inline int pm_runtime_put(struct device *dev) 126 { 127 - return __pm_runtime_put(dev, false); 128 } 129 130 static inline int pm_runtime_put_sync(struct device *dev) 131 { 132 - return __pm_runtime_put(dev, true); 133 } 134 135 static inline int pm_runtime_set_active(struct device *dev)
··· 12 #include <linux/device.h> 13 #include <linux/pm.h> 14 15 + /* Runtime PM flag argument bits */ 16 + #define RPM_ASYNC 0x01 /* Request is asynchronous */ 17 + #define RPM_NOWAIT 0x02 /* Don't wait for concurrent 18 + state change */ 19 + 20 #ifdef CONFIG_PM_RUNTIME 21 22 extern struct workqueue_struct *pm_wq; ··· 22 extern int pm_request_idle(struct device *dev); 23 extern int pm_schedule_suspend(struct device *dev, unsigned int delay); 24 extern int pm_request_resume(struct device *dev); 25 + extern int __pm_runtime_get(struct device *dev, int rpmflags); 26 + extern int __pm_runtime_put(struct device *dev, int rpmflags); 27 extern int __pm_runtime_set_status(struct device *dev, unsigned int status); 28 extern int pm_runtime_barrier(struct device *dev); 29 extern void pm_runtime_enable(struct device *dev); ··· 81 return -ENOSYS; 82 } 83 static inline int pm_request_resume(struct device *dev) { return 0; } 84 + static inline int __pm_runtime_get(struct device *dev, int rpmflags) 85 + { return 1; } 86 + static inline int __pm_runtime_put(struct device *dev, int rpmflags) 87 + { return 0; } 88 static inline int __pm_runtime_set_status(struct device *dev, 89 unsigned int status) { return 0; } 90 static inline int pm_runtime_barrier(struct device *dev) { return 0; } ··· 107 108 static inline int pm_runtime_get(struct device *dev) 109 { 110 + return __pm_runtime_get(dev, RPM_ASYNC); 111 } 112 113 static inline int pm_runtime_get_sync(struct device *dev) 114 { 115 + return __pm_runtime_get(dev, 0); 116 } 117 118 static inline int pm_runtime_put(struct device *dev) 119 { 120 + return __pm_runtime_put(dev, RPM_ASYNC); 121 } 122 123 static inline int pm_runtime_put_sync(struct device *dev) 124 { 125 + return __pm_runtime_put(dev, 0); 126 } 127 128 static inline int pm_runtime_set_active(struct device *dev)