PM / Runtime: Replace boolean arguments with bitflags

The "from_wq" argument in __pm_runtime_suspend() and
__pm_runtime_resume() supposedly indicates whether or not the function
was called by the PM workqueue thread, but in fact it isn't always
used this way. It really indicates whether or not the function should
return early if the requested operation is already in progress.

Along with this badly-named boolean argument, later patches in this
series will add several other boolean arguments to these functions and
others. Therefore this patch (as1422) begins the conversion process
by replacing from_wq with a bitflag argument. The same bitflags are
also used in __pm_runtime_get() and __pm_runtime_put(), where they
indicate whether or not the operation should be asynchronous.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>

authored by Alan Stern and committed by Rafael J. Wysocki 3f9af051 4769373c

+54 -44
+39 -36
drivers/base/power/runtime.c
··· 10 10 #include <linux/pm_runtime.h> 11 11 #include <linux/jiffies.h> 12 12 13 - static int __pm_runtime_resume(struct device *dev, bool from_wq); 13 + static int __pm_runtime_resume(struct device *dev, int rpmflags); 14 14 static int __pm_request_idle(struct device *dev); 15 15 static int __pm_request_resume(struct device *dev); 16 16 ··· 164 164 /** 165 165 * __pm_runtime_suspend - Carry out run-time suspend of given device. 166 166 * @dev: Device to suspend. 167 - * @from_wq: If set, the function has been called via pm_wq. 167 + * @rpmflags: Flag bits. 168 168 * 169 169 * Check if the device can be suspended and run the ->runtime_suspend() callback 170 - * provided by its bus type. If another suspend has been started earlier, wait 171 - * for it to finish. If an idle notification or suspend request is pending or 170 + * provided by its bus type. If another suspend has been started earlier, 171 + * either return immediately or wait for it to finish, depending on the 172 + * RPM_NOWAIT flag. If an idle notification or suspend request is pending or 172 173 * scheduled, cancel it. 173 174 * 174 175 * This function must be called under dev->power.lock with interrupts disabled. 175 176 */ 176 - int __pm_runtime_suspend(struct device *dev, bool from_wq) 177 + static int __pm_runtime_suspend(struct device *dev, int rpmflags) 177 178 __releases(&dev->power.lock) __acquires(&dev->power.lock) 178 179 { 179 180 struct device *parent = NULL; 180 181 bool notify = false; 181 182 int retval = 0; 182 183 183 - dev_dbg(dev, "__pm_runtime_suspend()%s!\n", 184 - from_wq ? " from workqueue" : ""); 184 + dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 185 185 186 186 repeat: 187 187 if (dev->power.runtime_error) { ··· 213 213 if (dev->power.runtime_status == RPM_SUSPENDING) { 214 214 DEFINE_WAIT(wait); 215 215 216 - if (from_wq) { 216 + if (rpmflags & RPM_NOWAIT) { 217 217 retval = -EINPROGRESS; 218 218 goto out; 219 219 } ··· 286 286 wake_up_all(&dev->power.wait_queue); 287 287 288 288 if (dev->power.deferred_resume) { 289 - __pm_runtime_resume(dev, false); 289 + __pm_runtime_resume(dev, 0); 290 290 retval = -EAGAIN; 291 291 goto out; 292 292 } ··· 303 303 } 304 304 305 305 out: 306 - dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); 306 + dev_dbg(dev, "%s returns %d\n", __func__, retval); 307 307 308 308 return retval; 309 309 } ··· 317 317 int retval; 318 318 319 319 spin_lock_irq(&dev->power.lock); 320 - retval = __pm_runtime_suspend(dev, false); 320 + retval = __pm_runtime_suspend(dev, 0); 321 321 spin_unlock_irq(&dev->power.lock); 322 322 323 323 return retval; ··· 327 327 /** 328 328 * __pm_runtime_resume - Carry out run-time resume of given device. 329 329 * @dev: Device to resume. 330 - * @from_wq: If set, the function has been called via pm_wq. 330 + * @rpmflags: Flag bits. 331 331 * 332 332 * Check if the device can be woken up and run the ->runtime_resume() callback 333 - * provided by its bus type. If another resume has been started earlier, wait 334 - * for it to finish. If there's a suspend running in parallel with this 335 - * function, wait for it to finish and resume the device. Cancel any scheduled 336 - * or pending requests. 333 + * provided by its bus type. If another resume has been started earlier, 334 + * either return imediately or wait for it to finish, depending on the 335 + * RPM_NOWAIT flag. If there's a suspend running in parallel with this 336 + * function, either tell the other process to resume after suspending 337 + * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT 338 + * flag. Cancel any scheduled or pending requests. 337 339 * 338 340 * This function must be called under dev->power.lock with interrupts disabled. 339 341 */ 340 - int __pm_runtime_resume(struct device *dev, bool from_wq) 342 + static int __pm_runtime_resume(struct device *dev, int rpmflags) 341 343 __releases(&dev->power.lock) __acquires(&dev->power.lock) 342 344 { 343 345 struct device *parent = NULL; 344 346 int retval = 0; 345 347 346 - dev_dbg(dev, "__pm_runtime_resume()%s!\n", 347 - from_wq ? " from workqueue" : ""); 348 + dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 348 349 349 350 repeat: 350 351 if (dev->power.runtime_error) { ··· 366 365 || dev->power.runtime_status == RPM_SUSPENDING) { 367 366 DEFINE_WAIT(wait); 368 367 369 - if (from_wq) { 368 + if (rpmflags & RPM_NOWAIT) { 370 369 if (dev->power.runtime_status == RPM_SUSPENDING) 371 370 dev->power.deferred_resume = true; 372 371 retval = -EINPROGRESS; ··· 408 407 */ 409 408 if (!parent->power.disable_depth 410 409 && !parent->power.ignore_children) { 411 - __pm_runtime_resume(parent, false); 410 + __pm_runtime_resume(parent, 0); 412 411 if (parent->power.runtime_status != RPM_ACTIVE) 413 412 retval = -EBUSY; 414 413 } ··· 471 470 spin_lock_irq(&dev->power.lock); 472 471 } 473 472 474 - dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); 473 + dev_dbg(dev, "%s returns %d\n", __func__, retval); 475 474 476 475 return retval; 477 476 } ··· 485 484 int retval; 486 485 487 486 spin_lock_irq(&dev->power.lock); 488 - retval = __pm_runtime_resume(dev, false); 487 + retval = __pm_runtime_resume(dev, 0); 489 488 spin_unlock_irq(&dev->power.lock); 490 489 491 490 return retval; ··· 520 519 __pm_runtime_idle(dev); 521 520 break; 522 521 case RPM_REQ_SUSPEND: 523 - __pm_runtime_suspend(dev, true); 522 + __pm_runtime_suspend(dev, RPM_NOWAIT); 524 523 break; 525 524 case RPM_REQ_RESUME: 526 - __pm_runtime_resume(dev, true); 525 + __pm_runtime_resume(dev, RPM_NOWAIT); 527 526 break; 528 527 } 529 528 ··· 783 782 /** 784 783 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 785 784 * @dev: Device to handle. 786 - * @sync: If set and the device is suspended, resume it synchronously. 785 + * @rpmflags: Flag bits. 787 786 * 788 787 * Increment the usage count of the device and resume it or submit a resume 789 - * request for it, depending on the value of @sync. 788 + * request for it, depending on the RPM_ASYNC flag bit. 790 789 */ 791 - int __pm_runtime_get(struct device *dev, bool sync) 790 + int __pm_runtime_get(struct device *dev, int rpmflags) 792 791 { 793 792 int retval; 794 793 795 794 atomic_inc(&dev->power.usage_count); 796 - retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 795 + retval = (rpmflags & RPM_ASYNC) ? 796 + pm_request_resume(dev) : pm_runtime_resume(dev); 797 797 798 798 return retval; 799 799 } ··· 803 801 /** 804 802 * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 805 803 * @dev: Device to handle. 806 - * @sync: If the device's bus type is to be notified, do that synchronously. 804 + * @rpmflags: Flag bits. 807 805 * 808 806 * Decrement the usage count of the device and if it reaches zero, carry out a 809 807 * synchronous idle notification or submit an idle notification request for it, 810 - * depending on the value of @sync. 808 + * depending on the RPM_ASYNC flag bit. 811 809 */ 812 - int __pm_runtime_put(struct device *dev, bool sync) 810 + int __pm_runtime_put(struct device *dev, int rpmflags) 813 811 { 814 812 int retval = 0; 815 813 816 814 if (atomic_dec_and_test(&dev->power.usage_count)) 817 - retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); 815 + retval = (rpmflags & RPM_ASYNC) ? 816 + pm_request_idle(dev) : pm_runtime_idle(dev); 818 817 819 818 return retval; 820 819 } ··· 970 967 971 968 if (dev->power.request_pending 972 969 && dev->power.request == RPM_REQ_RESUME) { 973 - __pm_runtime_resume(dev, false); 970 + __pm_runtime_resume(dev, 0); 974 971 retval = 1; 975 972 } 976 973 ··· 1019 1016 */ 1020 1017 pm_runtime_get_noresume(dev); 1021 1018 1022 - __pm_runtime_resume(dev, false); 1019 + __pm_runtime_resume(dev, 0); 1023 1020 1024 1021 pm_runtime_put_noidle(dev); 1025 1022 } ··· 1067 1064 1068 1065 dev->power.runtime_auto = false; 1069 1066 atomic_inc(&dev->power.usage_count); 1070 - __pm_runtime_resume(dev, false); 1067 + __pm_runtime_resume(dev, 0); 1071 1068 1072 1069 out: 1073 1070 spin_unlock_irq(&dev->power.lock);
+15 -8
include/linux/pm_runtime.h
··· 12 12 #include <linux/device.h> 13 13 #include <linux/pm.h> 14 14 15 + /* Runtime PM flag argument bits */ 16 + #define RPM_ASYNC 0x01 /* Request is asynchronous */ 17 + #define RPM_NOWAIT 0x02 /* Don't wait for concurrent 18 + state change */ 19 + 15 20 #ifdef CONFIG_PM_RUNTIME 16 21 17 22 extern struct workqueue_struct *pm_wq; ··· 27 22 extern int pm_request_idle(struct device *dev); 28 23 extern int pm_schedule_suspend(struct device *dev, unsigned int delay); 29 24 extern int pm_request_resume(struct device *dev); 30 - extern int __pm_runtime_get(struct device *dev, bool sync); 31 - extern int __pm_runtime_put(struct device *dev, bool sync); 25 + extern int __pm_runtime_get(struct device *dev, int rpmflags); 26 + extern int __pm_runtime_put(struct device *dev, int rpmflags); 32 27 extern int __pm_runtime_set_status(struct device *dev, unsigned int status); 33 28 extern int pm_runtime_barrier(struct device *dev); 34 29 extern void pm_runtime_enable(struct device *dev); ··· 86 81 return -ENOSYS; 87 82 } 88 83 static inline int pm_request_resume(struct device *dev) { return 0; } 89 - static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; } 90 - static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; } 84 + static inline int __pm_runtime_get(struct device *dev, int rpmflags) 85 + { return 1; } 86 + static inline int __pm_runtime_put(struct device *dev, int rpmflags) 87 + { return 0; } 91 88 static inline int __pm_runtime_set_status(struct device *dev, 92 89 unsigned int status) { return 0; } 93 90 static inline int pm_runtime_barrier(struct device *dev) { return 0; } ··· 114 107 115 108 static inline int pm_runtime_get(struct device *dev) 116 109 { 117 - return __pm_runtime_get(dev, false); 110 + return __pm_runtime_get(dev, RPM_ASYNC); 118 111 } 119 112 120 113 static inline int pm_runtime_get_sync(struct device *dev) 121 114 { 122 - return __pm_runtime_get(dev, true); 115 + return __pm_runtime_get(dev, 0); 123 116 } 124 117 125 118 static inline int pm_runtime_put(struct device *dev) 126 119 { 127 - return __pm_runtime_put(dev, false); 120 + return __pm_runtime_put(dev, RPM_ASYNC); 128 121 } 129 122 130 123 static inline int pm_runtime_put_sync(struct device *dev) 131 124 { 132 - return __pm_runtime_put(dev, true); 125 + return __pm_runtime_put(dev, 0); 133 126 } 134 127 135 128 static inline int pm_runtime_set_active(struct device *dev)