Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 *
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 */
8#include <linux/sched/mm.h>
9#include <linux/ktime.h>
10#include <linux/hrtimer.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15
16#include "../base.h"
17#include "power.h"
18
19typedef int (*pm_callback_t)(struct device *);
20
21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22{
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
46}
47
48#define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51static int rpm_resume(struct device *dev, int rpmflags);
52static int rpm_suspend(struct device *dev, int rpmflags);
53
54/**
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
57 *
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
63 * correctly.
64 */
65static void update_pm_runtime_accounting(struct device *dev)
66{
67 u64 now, last, delta;
68
69 if (dev->power.disable_depth > 0)
70 return;
71
72 last = dev->power.accounting_timestamp;
73
74 now = ktime_get_mono_fast_ns();
75 dev->power.accounting_timestamp = now;
76
77 /*
78 * Because ktime_get_mono_fast_ns() is not monotonic during
79 * timekeeping updates, ensure that 'now' is after the last saved
80 * timesptamp.
81 */
82 if (now < last)
83 return;
84
85 delta = now - last;
86
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
89 else
90 dev->power.active_time += delta;
91}
92
93static void __update_runtime_status(struct device *dev, enum rpm_status status)
94{
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
97}
98
99static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100{
101 u64 time;
102 unsigned long flags;
103
104 spin_lock_irqsave(&dev->power.lock, flags);
105
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
109 spin_unlock_irqrestore(&dev->power.lock, flags);
110
111 return time;
112}
113
114u64 pm_runtime_active_time(struct device *dev)
115{
116 return rpm_get_accounted_time(dev, false);
117}
118
119u64 pm_runtime_suspended_time(struct device *dev)
120{
121 return rpm_get_accounted_time(dev, true);
122}
123EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
125/**
126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127 * @dev: Device to handle.
128 */
129static void pm_runtime_deactivate_timer(struct device *dev)
130{
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
134 }
135}
136
137/**
138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139 * @dev: Device to handle.
140 */
141static void pm_runtime_cancel_pending(struct device *dev)
142{
143 pm_runtime_deactivate_timer(dev);
144 /*
145 * In case there's a request pending, make sure its work function will
146 * return without doing anything.
147 */
148 dev->power.request = RPM_REQ_NONE;
149}
150
151/*
152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153 * @dev: Device to handle.
154 *
155 * Compute the autosuspend-delay expiration time based on the device's
156 * power.last_busy time. If the delay has already expired or is disabled
157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
159 *
160 * This function may be called either with or without dev->power.lock held.
161 * Either way it can be racy, since power.last_busy may be updated at any time.
162 */
163u64 pm_runtime_autosuspend_expiration(struct device *dev)
164{
165 int autosuspend_delay;
166 u64 expires;
167
168 if (!dev->power.use_autosuspend)
169 return 0;
170
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172 if (autosuspend_delay < 0)
173 return 0;
174
175 expires = READ_ONCE(dev->power.last_busy);
176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177 if (expires > ktime_get_mono_fast_ns())
178 return expires; /* Expires in the future */
179
180 return 0;
181}
182EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
183
184static int dev_memalloc_noio(struct device *dev, void *data)
185{
186 return dev->power.memalloc_noio;
187}
188
189/*
190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191 * @dev: Device to handle.
192 * @enable: True for setting the flag and False for clearing the flag.
193 *
194 * Set the flag for all devices in the path from the device to the
195 * root device in the device tree if @enable is true, otherwise clear
196 * the flag for devices in the path whose siblings don't set the flag.
197 *
198 * The function should only be called by block device, or network
199 * device driver for solving the deadlock problem during runtime
200 * resume/suspend:
201 *
202 * If memory allocation with GFP_KERNEL is called inside runtime
203 * resume/suspend callback of any one of its ancestors(or the
204 * block device itself), the deadlock may be triggered inside the
205 * memory allocation since it might not complete until the block
206 * device becomes active and the involed page I/O finishes. The
207 * situation is pointed out first by Alan Stern. Network device
208 * are involved in iSCSI kind of situation.
209 *
210 * The lock of dev_hotplug_mutex is held in the function for handling
211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
212 * in async probe().
213 *
214 * The function should be called between device_add() and device_del()
215 * on the affected device(block/network device).
216 */
217void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
218{
219 static DEFINE_MUTEX(dev_hotplug_mutex);
220
221 mutex_lock(&dev_hotplug_mutex);
222 for (;;) {
223 bool enabled;
224
225 /* hold power lock since bitfield is not SMP-safe. */
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
230
231 /*
232 * not need to enable ancestors any more if the device
233 * has been enabled.
234 */
235 if (enabled && enable)
236 break;
237
238 dev = dev->parent;
239
240 /*
241 * clear flag of the parent device only if all the
242 * children don't set the flag because ancestor's
243 * flag was set by any one of the descendants.
244 */
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL,
247 dev_memalloc_noio)))
248 break;
249 }
250 mutex_unlock(&dev_hotplug_mutex);
251}
252EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
253
254/**
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
256 * @dev: Device to test.
257 */
258static int rpm_check_suspend_allowed(struct device *dev)
259{
260 int retval = 0;
261
262 if (dev->power.runtime_error)
263 retval = -EINVAL;
264 else if (dev->power.disable_depth > 0)
265 retval = -EACCES;
266 else if (atomic_read(&dev->power.usage_count) > 0)
267 retval = -EAGAIN;
268 else if (!dev->power.ignore_children &&
269 atomic_read(&dev->power.child_count))
270 retval = -EBUSY;
271
272 /* Pending resume requests take precedence over suspends. */
273 else if ((dev->power.deferred_resume
274 && dev->power.runtime_status == RPM_SUSPENDING)
275 || (dev->power.request_pending
276 && dev->power.request == RPM_REQ_RESUME))
277 retval = -EAGAIN;
278 else if (__dev_pm_qos_resume_latency(dev) == 0)
279 retval = -EPERM;
280 else if (dev->power.runtime_status == RPM_SUSPENDED)
281 retval = 1;
282
283 return retval;
284}
285
286static int rpm_get_suppliers(struct device *dev)
287{
288 struct device_link *link;
289
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291 device_links_read_lock_held()) {
292 int retval;
293
294 if (!(link->flags & DL_FLAG_PM_RUNTIME))
295 continue;
296
297 retval = pm_runtime_get_sync(link->supplier);
298 /* Ignore suppliers with disabled runtime PM. */
299 if (retval < 0 && retval != -EACCES) {
300 pm_runtime_put_noidle(link->supplier);
301 return retval;
302 }
303 refcount_inc(&link->rpm_active);
304 }
305 return 0;
306}
307
308static void rpm_put_suppliers(struct device *dev)
309{
310 struct device_link *link;
311
312 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
313 device_links_read_lock_held()) {
314
315 while (refcount_dec_not_one(&link->rpm_active))
316 pm_runtime_put(link->supplier);
317 }
318}
319
320/**
321 * __rpm_callback - Run a given runtime PM callback for a given device.
322 * @cb: Runtime PM callback to run.
323 * @dev: Device to run the callback for.
324 */
325static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
326 __releases(&dev->power.lock) __acquires(&dev->power.lock)
327{
328 bool use_links = dev->power.links_count > 0;
329 bool get = false;
330 int retval, idx;
331 bool put;
332
333 if (dev->power.irq_safe) {
334 spin_unlock(&dev->power.lock);
335 } else if (!use_links) {
336 spin_unlock_irq(&dev->power.lock);
337 } else {
338 get = dev->power.runtime_status == RPM_RESUMING;
339
340 spin_unlock_irq(&dev->power.lock);
341
342 /* Resume suppliers if necessary. */
343 if (get) {
344 idx = device_links_read_lock();
345
346 retval = rpm_get_suppliers(dev);
347 if (retval)
348 goto fail;
349
350 device_links_read_unlock(idx);
351 }
352 }
353
354 retval = cb(dev);
355
356 if (dev->power.irq_safe) {
357 spin_lock(&dev->power.lock);
358 return retval;
359 }
360
361 spin_lock_irq(&dev->power.lock);
362
363 if (!use_links)
364 return retval;
365
366 /*
367 * If the device is suspending and the callback has returned success,
368 * drop the usage counters of the suppliers that have been reference
369 * counted on its resume.
370 *
371 * Do that if the resume fails too.
372 */
373 put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
374 if (put)
375 __update_runtime_status(dev, RPM_SUSPENDED);
376 else
377 put = get && retval;
378
379 if (put) {
380 spin_unlock_irq(&dev->power.lock);
381
382 idx = device_links_read_lock();
383
384fail:
385 rpm_put_suppliers(dev);
386
387 device_links_read_unlock(idx);
388
389 spin_lock_irq(&dev->power.lock);
390 }
391
392 return retval;
393}
394
395/**
396 * rpm_idle - Notify device bus type if the device can be suspended.
397 * @dev: Device to notify the bus type about.
398 * @rpmflags: Flag bits.
399 *
400 * Check if the device's runtime PM status allows it to be suspended. If
401 * another idle notification has been started earlier, return immediately. If
402 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
403 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
404 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
405 *
406 * This function must be called under dev->power.lock with interrupts disabled.
407 */
408static int rpm_idle(struct device *dev, int rpmflags)
409{
410 int (*callback)(struct device *);
411 int retval;
412
413 trace_rpm_idle_rcuidle(dev, rpmflags);
414 retval = rpm_check_suspend_allowed(dev);
415 if (retval < 0)
416 ; /* Conditions are wrong. */
417
418 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
419 else if (dev->power.runtime_status != RPM_ACTIVE)
420 retval = -EAGAIN;
421
422 /*
423 * Any pending request other than an idle notification takes
424 * precedence over us, except that the timer may be running.
425 */
426 else if (dev->power.request_pending &&
427 dev->power.request > RPM_REQ_IDLE)
428 retval = -EAGAIN;
429
430 /* Act as though RPM_NOWAIT is always set. */
431 else if (dev->power.idle_notification)
432 retval = -EINPROGRESS;
433 if (retval)
434 goto out;
435
436 /* Pending requests need to be canceled. */
437 dev->power.request = RPM_REQ_NONE;
438
439 if (dev->power.no_callbacks)
440 goto out;
441
442 /* Carry out an asynchronous or a synchronous idle notification. */
443 if (rpmflags & RPM_ASYNC) {
444 dev->power.request = RPM_REQ_IDLE;
445 if (!dev->power.request_pending) {
446 dev->power.request_pending = true;
447 queue_work(pm_wq, &dev->power.work);
448 }
449 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
450 return 0;
451 }
452
453 dev->power.idle_notification = true;
454
455 callback = RPM_GET_CALLBACK(dev, runtime_idle);
456
457 if (callback)
458 retval = __rpm_callback(callback, dev);
459
460 dev->power.idle_notification = false;
461 wake_up_all(&dev->power.wait_queue);
462
463 out:
464 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
465 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
466}
467
468/**
469 * rpm_callback - Run a given runtime PM callback for a given device.
470 * @cb: Runtime PM callback to run.
471 * @dev: Device to run the callback for.
472 */
473static int rpm_callback(int (*cb)(struct device *), struct device *dev)
474{
475 int retval;
476
477 if (!cb)
478 return -ENOSYS;
479
480 if (dev->power.memalloc_noio) {
481 unsigned int noio_flag;
482
483 /*
484 * Deadlock might be caused if memory allocation with
485 * GFP_KERNEL happens inside runtime_suspend and
486 * runtime_resume callbacks of one block device's
487 * ancestor or the block device itself. Network
488 * device might be thought as part of iSCSI block
489 * device, so network device and its ancestor should
490 * be marked as memalloc_noio too.
491 */
492 noio_flag = memalloc_noio_save();
493 retval = __rpm_callback(cb, dev);
494 memalloc_noio_restore(noio_flag);
495 } else {
496 retval = __rpm_callback(cb, dev);
497 }
498
499 dev->power.runtime_error = retval;
500 return retval != -EACCES ? retval : -EIO;
501}
502
503/**
504 * rpm_suspend - Carry out runtime suspend of given device.
505 * @dev: Device to suspend.
506 * @rpmflags: Flag bits.
507 *
508 * Check if the device's runtime PM status allows it to be suspended.
509 * Cancel a pending idle notification, autosuspend or suspend. If
510 * another suspend has been started earlier, either return immediately
511 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
512 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
513 * otherwise run the ->runtime_suspend() callback directly. When
514 * ->runtime_suspend succeeded, if a deferred resume was requested while
515 * the callback was running then carry it out, otherwise send an idle
516 * notification for its parent (if the suspend succeeded and both
517 * ignore_children of parent->power and irq_safe of dev->power are not set).
518 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
519 * flag is set and the next autosuspend-delay expiration time is in the
520 * future, schedule another autosuspend attempt.
521 *
522 * This function must be called under dev->power.lock with interrupts disabled.
523 */
524static int rpm_suspend(struct device *dev, int rpmflags)
525 __releases(&dev->power.lock) __acquires(&dev->power.lock)
526{
527 int (*callback)(struct device *);
528 struct device *parent = NULL;
529 int retval;
530
531 trace_rpm_suspend_rcuidle(dev, rpmflags);
532
533 repeat:
534 retval = rpm_check_suspend_allowed(dev);
535 if (retval < 0)
536 goto out; /* Conditions are wrong. */
537
538 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
539 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
540 retval = -EAGAIN;
541 if (retval)
542 goto out;
543
544 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
545 if ((rpmflags & RPM_AUTO)
546 && dev->power.runtime_status != RPM_SUSPENDING) {
547 u64 expires = pm_runtime_autosuspend_expiration(dev);
548
549 if (expires != 0) {
550 /* Pending requests need to be canceled. */
551 dev->power.request = RPM_REQ_NONE;
552
553 /*
554 * Optimization: If the timer is already running and is
555 * set to expire at or before the autosuspend delay,
556 * avoid the overhead of resetting it. Just let it
557 * expire; pm_suspend_timer_fn() will take care of the
558 * rest.
559 */
560 if (!(dev->power.timer_expires &&
561 dev->power.timer_expires <= expires)) {
562 /*
563 * We add a slack of 25% to gather wakeups
564 * without sacrificing the granularity.
565 */
566 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
567 (NSEC_PER_MSEC >> 2);
568
569 dev->power.timer_expires = expires;
570 hrtimer_start_range_ns(&dev->power.suspend_timer,
571 ns_to_ktime(expires),
572 slack,
573 HRTIMER_MODE_ABS);
574 }
575 dev->power.timer_autosuspends = 1;
576 goto out;
577 }
578 }
579
580 /* Other scheduled or pending requests need to be canceled. */
581 pm_runtime_cancel_pending(dev);
582
583 if (dev->power.runtime_status == RPM_SUSPENDING) {
584 DEFINE_WAIT(wait);
585
586 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
587 retval = -EINPROGRESS;
588 goto out;
589 }
590
591 if (dev->power.irq_safe) {
592 spin_unlock(&dev->power.lock);
593
594 cpu_relax();
595
596 spin_lock(&dev->power.lock);
597 goto repeat;
598 }
599
600 /* Wait for the other suspend running in parallel with us. */
601 for (;;) {
602 prepare_to_wait(&dev->power.wait_queue, &wait,
603 TASK_UNINTERRUPTIBLE);
604 if (dev->power.runtime_status != RPM_SUSPENDING)
605 break;
606
607 spin_unlock_irq(&dev->power.lock);
608
609 schedule();
610
611 spin_lock_irq(&dev->power.lock);
612 }
613 finish_wait(&dev->power.wait_queue, &wait);
614 goto repeat;
615 }
616
617 if (dev->power.no_callbacks)
618 goto no_callback; /* Assume success. */
619
620 /* Carry out an asynchronous or a synchronous suspend. */
621 if (rpmflags & RPM_ASYNC) {
622 dev->power.request = (rpmflags & RPM_AUTO) ?
623 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
624 if (!dev->power.request_pending) {
625 dev->power.request_pending = true;
626 queue_work(pm_wq, &dev->power.work);
627 }
628 goto out;
629 }
630
631 __update_runtime_status(dev, RPM_SUSPENDING);
632
633 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
634
635 dev_pm_enable_wake_irq_check(dev, true);
636 retval = rpm_callback(callback, dev);
637 if (retval)
638 goto fail;
639
640 no_callback:
641 __update_runtime_status(dev, RPM_SUSPENDED);
642 pm_runtime_deactivate_timer(dev);
643
644 if (dev->parent) {
645 parent = dev->parent;
646 atomic_add_unless(&parent->power.child_count, -1, 0);
647 }
648 wake_up_all(&dev->power.wait_queue);
649
650 if (dev->power.deferred_resume) {
651 dev->power.deferred_resume = false;
652 rpm_resume(dev, 0);
653 retval = -EAGAIN;
654 goto out;
655 }
656
657 /* Maybe the parent is now able to suspend. */
658 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
659 spin_unlock(&dev->power.lock);
660
661 spin_lock(&parent->power.lock);
662 rpm_idle(parent, RPM_ASYNC);
663 spin_unlock(&parent->power.lock);
664
665 spin_lock(&dev->power.lock);
666 }
667
668 out:
669 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
670
671 return retval;
672
673 fail:
674 dev_pm_disable_wake_irq_check(dev);
675 __update_runtime_status(dev, RPM_ACTIVE);
676 dev->power.deferred_resume = false;
677 wake_up_all(&dev->power.wait_queue);
678
679 if (retval == -EAGAIN || retval == -EBUSY) {
680 dev->power.runtime_error = 0;
681
682 /*
683 * If the callback routine failed an autosuspend, and
684 * if the last_busy time has been updated so that there
685 * is a new autosuspend expiration time, automatically
686 * reschedule another autosuspend.
687 */
688 if ((rpmflags & RPM_AUTO) &&
689 pm_runtime_autosuspend_expiration(dev) != 0)
690 goto repeat;
691 } else {
692 pm_runtime_cancel_pending(dev);
693 }
694 goto out;
695}
696
697/**
698 * rpm_resume - Carry out runtime resume of given device.
699 * @dev: Device to resume.
700 * @rpmflags: Flag bits.
701 *
702 * Check if the device's runtime PM status allows it to be resumed. Cancel
703 * any scheduled or pending requests. If another resume has been started
704 * earlier, either return immediately or wait for it to finish, depending on the
705 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
706 * parallel with this function, either tell the other process to resume after
707 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
708 * flag is set then queue a resume request; otherwise run the
709 * ->runtime_resume() callback directly. Queue an idle notification for the
710 * device if the resume succeeded.
711 *
712 * This function must be called under dev->power.lock with interrupts disabled.
713 */
714static int rpm_resume(struct device *dev, int rpmflags)
715 __releases(&dev->power.lock) __acquires(&dev->power.lock)
716{
717 int (*callback)(struct device *);
718 struct device *parent = NULL;
719 int retval = 0;
720
721 trace_rpm_resume_rcuidle(dev, rpmflags);
722
723 repeat:
724 if (dev->power.runtime_error)
725 retval = -EINVAL;
726 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
727 && dev->power.runtime_status == RPM_ACTIVE)
728 retval = 1;
729 else if (dev->power.disable_depth > 0)
730 retval = -EACCES;
731 if (retval)
732 goto out;
733
734 /*
735 * Other scheduled or pending requests need to be canceled. Small
736 * optimization: If an autosuspend timer is running, leave it running
737 * rather than cancelling it now only to restart it again in the near
738 * future.
739 */
740 dev->power.request = RPM_REQ_NONE;
741 if (!dev->power.timer_autosuspends)
742 pm_runtime_deactivate_timer(dev);
743
744 if (dev->power.runtime_status == RPM_ACTIVE) {
745 retval = 1;
746 goto out;
747 }
748
749 if (dev->power.runtime_status == RPM_RESUMING
750 || dev->power.runtime_status == RPM_SUSPENDING) {
751 DEFINE_WAIT(wait);
752
753 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
754 if (dev->power.runtime_status == RPM_SUSPENDING)
755 dev->power.deferred_resume = true;
756 else
757 retval = -EINPROGRESS;
758 goto out;
759 }
760
761 if (dev->power.irq_safe) {
762 spin_unlock(&dev->power.lock);
763
764 cpu_relax();
765
766 spin_lock(&dev->power.lock);
767 goto repeat;
768 }
769
770 /* Wait for the operation carried out in parallel with us. */
771 for (;;) {
772 prepare_to_wait(&dev->power.wait_queue, &wait,
773 TASK_UNINTERRUPTIBLE);
774 if (dev->power.runtime_status != RPM_RESUMING
775 && dev->power.runtime_status != RPM_SUSPENDING)
776 break;
777
778 spin_unlock_irq(&dev->power.lock);
779
780 schedule();
781
782 spin_lock_irq(&dev->power.lock);
783 }
784 finish_wait(&dev->power.wait_queue, &wait);
785 goto repeat;
786 }
787
788 /*
789 * See if we can skip waking up the parent. This is safe only if
790 * power.no_callbacks is set, because otherwise we don't know whether
791 * the resume will actually succeed.
792 */
793 if (dev->power.no_callbacks && !parent && dev->parent) {
794 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
795 if (dev->parent->power.disable_depth > 0
796 || dev->parent->power.ignore_children
797 || dev->parent->power.runtime_status == RPM_ACTIVE) {
798 atomic_inc(&dev->parent->power.child_count);
799 spin_unlock(&dev->parent->power.lock);
800 retval = 1;
801 goto no_callback; /* Assume success. */
802 }
803 spin_unlock(&dev->parent->power.lock);
804 }
805
806 /* Carry out an asynchronous or a synchronous resume. */
807 if (rpmflags & RPM_ASYNC) {
808 dev->power.request = RPM_REQ_RESUME;
809 if (!dev->power.request_pending) {
810 dev->power.request_pending = true;
811 queue_work(pm_wq, &dev->power.work);
812 }
813 retval = 0;
814 goto out;
815 }
816
817 if (!parent && dev->parent) {
818 /*
819 * Increment the parent's usage counter and resume it if
820 * necessary. Not needed if dev is irq-safe; then the
821 * parent is permanently resumed.
822 */
823 parent = dev->parent;
824 if (dev->power.irq_safe)
825 goto skip_parent;
826 spin_unlock(&dev->power.lock);
827
828 pm_runtime_get_noresume(parent);
829
830 spin_lock(&parent->power.lock);
831 /*
832 * Resume the parent if it has runtime PM enabled and not been
833 * set to ignore its children.
834 */
835 if (!parent->power.disable_depth
836 && !parent->power.ignore_children) {
837 rpm_resume(parent, 0);
838 if (parent->power.runtime_status != RPM_ACTIVE)
839 retval = -EBUSY;
840 }
841 spin_unlock(&parent->power.lock);
842
843 spin_lock(&dev->power.lock);
844 if (retval)
845 goto out;
846 goto repeat;
847 }
848 skip_parent:
849
850 if (dev->power.no_callbacks)
851 goto no_callback; /* Assume success. */
852
853 __update_runtime_status(dev, RPM_RESUMING);
854
855 callback = RPM_GET_CALLBACK(dev, runtime_resume);
856
857 dev_pm_disable_wake_irq_check(dev);
858 retval = rpm_callback(callback, dev);
859 if (retval) {
860 __update_runtime_status(dev, RPM_SUSPENDED);
861 pm_runtime_cancel_pending(dev);
862 dev_pm_enable_wake_irq_check(dev, false);
863 } else {
864 no_callback:
865 __update_runtime_status(dev, RPM_ACTIVE);
866 pm_runtime_mark_last_busy(dev);
867 if (parent)
868 atomic_inc(&parent->power.child_count);
869 }
870 wake_up_all(&dev->power.wait_queue);
871
872 if (retval >= 0)
873 rpm_idle(dev, RPM_ASYNC);
874
875 out:
876 if (parent && !dev->power.irq_safe) {
877 spin_unlock_irq(&dev->power.lock);
878
879 pm_runtime_put(parent);
880
881 spin_lock_irq(&dev->power.lock);
882 }
883
884 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
885
886 return retval;
887}
888
889/**
890 * pm_runtime_work - Universal runtime PM work function.
891 * @work: Work structure used for scheduling the execution of this function.
892 *
893 * Use @work to get the device object the work is to be done for, determine what
894 * is to be done and execute the appropriate runtime PM function.
895 */
896static void pm_runtime_work(struct work_struct *work)
897{
898 struct device *dev = container_of(work, struct device, power.work);
899 enum rpm_request req;
900
901 spin_lock_irq(&dev->power.lock);
902
903 if (!dev->power.request_pending)
904 goto out;
905
906 req = dev->power.request;
907 dev->power.request = RPM_REQ_NONE;
908 dev->power.request_pending = false;
909
910 switch (req) {
911 case RPM_REQ_NONE:
912 break;
913 case RPM_REQ_IDLE:
914 rpm_idle(dev, RPM_NOWAIT);
915 break;
916 case RPM_REQ_SUSPEND:
917 rpm_suspend(dev, RPM_NOWAIT);
918 break;
919 case RPM_REQ_AUTOSUSPEND:
920 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
921 break;
922 case RPM_REQ_RESUME:
923 rpm_resume(dev, RPM_NOWAIT);
924 break;
925 }
926
927 out:
928 spin_unlock_irq(&dev->power.lock);
929}
930
931/**
932 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
933 * @data: Device pointer passed by pm_schedule_suspend().
934 *
935 * Check if the time is right and queue a suspend request.
936 */
937static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
938{
939 struct device *dev = container_of(timer, struct device, power.suspend_timer);
940 unsigned long flags;
941 u64 expires;
942
943 spin_lock_irqsave(&dev->power.lock, flags);
944
945 expires = dev->power.timer_expires;
946 /*
947 * If 'expires' is after the current time, we've been called
948 * too early.
949 */
950 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
951 dev->power.timer_expires = 0;
952 rpm_suspend(dev, dev->power.timer_autosuspends ?
953 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
954 }
955
956 spin_unlock_irqrestore(&dev->power.lock, flags);
957
958 return HRTIMER_NORESTART;
959}
960
961/**
962 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
963 * @dev: Device to suspend.
964 * @delay: Time to wait before submitting a suspend request, in milliseconds.
965 */
966int pm_schedule_suspend(struct device *dev, unsigned int delay)
967{
968 unsigned long flags;
969 u64 expires;
970 int retval;
971
972 spin_lock_irqsave(&dev->power.lock, flags);
973
974 if (!delay) {
975 retval = rpm_suspend(dev, RPM_ASYNC);
976 goto out;
977 }
978
979 retval = rpm_check_suspend_allowed(dev);
980 if (retval)
981 goto out;
982
983 /* Other scheduled or pending requests need to be canceled. */
984 pm_runtime_cancel_pending(dev);
985
986 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
987 dev->power.timer_expires = expires;
988 dev->power.timer_autosuspends = 0;
989 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
990
991 out:
992 spin_unlock_irqrestore(&dev->power.lock, flags);
993
994 return retval;
995}
996EXPORT_SYMBOL_GPL(pm_schedule_suspend);
997
998/**
999 * __pm_runtime_idle - Entry point for runtime idle operations.
1000 * @dev: Device to send idle notification for.
1001 * @rpmflags: Flag bits.
1002 *
1003 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1004 * return immediately if it is larger than zero. Then carry out an idle
1005 * notification, either synchronous or asynchronous.
1006 *
1007 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1008 * or if pm_runtime_irq_safe() has been called.
1009 */
1010int __pm_runtime_idle(struct device *dev, int rpmflags)
1011{
1012 unsigned long flags;
1013 int retval;
1014
1015 if (rpmflags & RPM_GET_PUT) {
1016 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1017 trace_rpm_usage_rcuidle(dev, rpmflags);
1018 return 0;
1019 }
1020 }
1021
1022 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1023
1024 spin_lock_irqsave(&dev->power.lock, flags);
1025 retval = rpm_idle(dev, rpmflags);
1026 spin_unlock_irqrestore(&dev->power.lock, flags);
1027
1028 return retval;
1029}
1030EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1031
1032/**
1033 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1034 * @dev: Device to suspend.
1035 * @rpmflags: Flag bits.
1036 *
1037 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1038 * return immediately if it is larger than zero. Then carry out a suspend,
1039 * either synchronous or asynchronous.
1040 *
1041 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1042 * or if pm_runtime_irq_safe() has been called.
1043 */
1044int __pm_runtime_suspend(struct device *dev, int rpmflags)
1045{
1046 unsigned long flags;
1047 int retval;
1048
1049 if (rpmflags & RPM_GET_PUT) {
1050 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1051 trace_rpm_usage_rcuidle(dev, rpmflags);
1052 return 0;
1053 }
1054 }
1055
1056 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1057
1058 spin_lock_irqsave(&dev->power.lock, flags);
1059 retval = rpm_suspend(dev, rpmflags);
1060 spin_unlock_irqrestore(&dev->power.lock, flags);
1061
1062 return retval;
1063}
1064EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1065
1066/**
1067 * __pm_runtime_resume - Entry point for runtime resume operations.
1068 * @dev: Device to resume.
1069 * @rpmflags: Flag bits.
1070 *
1071 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1072 * carry out a resume, either synchronous or asynchronous.
1073 *
1074 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1075 * or if pm_runtime_irq_safe() has been called.
1076 */
1077int __pm_runtime_resume(struct device *dev, int rpmflags)
1078{
1079 unsigned long flags;
1080 int retval;
1081
1082 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1083 dev->power.runtime_status != RPM_ACTIVE);
1084
1085 if (rpmflags & RPM_GET_PUT)
1086 atomic_inc(&dev->power.usage_count);
1087
1088 spin_lock_irqsave(&dev->power.lock, flags);
1089 retval = rpm_resume(dev, rpmflags);
1090 spin_unlock_irqrestore(&dev->power.lock, flags);
1091
1092 return retval;
1093}
1094EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1095
1096/**
1097 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1098 * @dev: Device to handle.
1099 * @ign_usage_count: Whether or not to look at the current usage counter value.
1100 *
1101 * Return -EINVAL if runtime PM is disabled for @dev.
1102 *
1103 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1104 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1105 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1106 * without changing the usage counter.
1107 *
1108 * If @ign_usage_count is %true, this function can be used to prevent suspending
1109 * the device when its runtime PM status is %RPM_ACTIVE.
1110 *
1111 * If @ign_usage_count is %false, this function can be used to prevent
1112 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1113 * runtime PM usage counter is not zero.
1114 *
1115 * The caller is responsible for decrementing the runtime PM usage counter of
1116 * @dev after this function has returned a positive value for it.
1117 */
1118int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1119{
1120 unsigned long flags;
1121 int retval;
1122
1123 spin_lock_irqsave(&dev->power.lock, flags);
1124 if (dev->power.disable_depth > 0) {
1125 retval = -EINVAL;
1126 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1127 retval = 0;
1128 } else if (ign_usage_count) {
1129 retval = 1;
1130 atomic_inc(&dev->power.usage_count);
1131 } else {
1132 retval = atomic_inc_not_zero(&dev->power.usage_count);
1133 }
1134 trace_rpm_usage_rcuidle(dev, 0);
1135 spin_unlock_irqrestore(&dev->power.lock, flags);
1136
1137 return retval;
1138}
1139EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1140
1141/**
1142 * __pm_runtime_set_status - Set runtime PM status of a device.
1143 * @dev: Device to handle.
1144 * @status: New runtime PM status of the device.
1145 *
1146 * If runtime PM of the device is disabled or its power.runtime_error field is
1147 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1148 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1149 * However, if the device has a parent and the parent is not active, and the
1150 * parent's power.ignore_children flag is unset, the device's status cannot be
1151 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1152 *
1153 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1154 * and the device parent's counter of unsuspended children is modified to
1155 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1156 * notification request for the parent is submitted.
1157 *
1158 * If @dev has any suppliers (as reflected by device links to them), and @status
1159 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1160 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1161 * of the @status value) and the suppliers will be deacticated on exit. The
1162 * error returned by the failing supplier activation will be returned in that
1163 * case.
1164 */
1165int __pm_runtime_set_status(struct device *dev, unsigned int status)
1166{
1167 struct device *parent = dev->parent;
1168 bool notify_parent = false;
1169 int error = 0;
1170
1171 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1172 return -EINVAL;
1173
1174 spin_lock_irq(&dev->power.lock);
1175
1176 /*
1177 * Prevent PM-runtime from being enabled for the device or return an
1178 * error if it is enabled already and working.
1179 */
1180 if (dev->power.runtime_error || dev->power.disable_depth)
1181 dev->power.disable_depth++;
1182 else
1183 error = -EAGAIN;
1184
1185 spin_unlock_irq(&dev->power.lock);
1186
1187 if (error)
1188 return error;
1189
1190 /*
1191 * If the new status is RPM_ACTIVE, the suppliers can be activated
1192 * upfront regardless of the current status, because next time
1193 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1194 * involved will be dropped down to one anyway.
1195 */
1196 if (status == RPM_ACTIVE) {
1197 int idx = device_links_read_lock();
1198
1199 error = rpm_get_suppliers(dev);
1200 if (error)
1201 status = RPM_SUSPENDED;
1202
1203 device_links_read_unlock(idx);
1204 }
1205
1206 spin_lock_irq(&dev->power.lock);
1207
1208 if (dev->power.runtime_status == status || !parent)
1209 goto out_set;
1210
1211 if (status == RPM_SUSPENDED) {
1212 atomic_add_unless(&parent->power.child_count, -1, 0);
1213 notify_parent = !parent->power.ignore_children;
1214 } else {
1215 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1216
1217 /*
1218 * It is invalid to put an active child under a parent that is
1219 * not active, has runtime PM enabled and the
1220 * 'power.ignore_children' flag unset.
1221 */
1222 if (!parent->power.disable_depth
1223 && !parent->power.ignore_children
1224 && parent->power.runtime_status != RPM_ACTIVE) {
1225 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1226 dev_name(dev),
1227 dev_name(parent));
1228 error = -EBUSY;
1229 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1230 atomic_inc(&parent->power.child_count);
1231 }
1232
1233 spin_unlock(&parent->power.lock);
1234
1235 if (error) {
1236 status = RPM_SUSPENDED;
1237 goto out;
1238 }
1239 }
1240
1241 out_set:
1242 __update_runtime_status(dev, status);
1243 if (!error)
1244 dev->power.runtime_error = 0;
1245
1246 out:
1247 spin_unlock_irq(&dev->power.lock);
1248
1249 if (notify_parent)
1250 pm_request_idle(parent);
1251
1252 if (status == RPM_SUSPENDED) {
1253 int idx = device_links_read_lock();
1254
1255 rpm_put_suppliers(dev);
1256
1257 device_links_read_unlock(idx);
1258 }
1259
1260 pm_runtime_enable(dev);
1261
1262 return error;
1263}
1264EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1265
1266/**
1267 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1268 * @dev: Device to handle.
1269 *
1270 * Flush all pending requests for the device from pm_wq and wait for all
1271 * runtime PM operations involving the device in progress to complete.
1272 *
1273 * Should be called under dev->power.lock with interrupts disabled.
1274 */
1275static void __pm_runtime_barrier(struct device *dev)
1276{
1277 pm_runtime_deactivate_timer(dev);
1278
1279 if (dev->power.request_pending) {
1280 dev->power.request = RPM_REQ_NONE;
1281 spin_unlock_irq(&dev->power.lock);
1282
1283 cancel_work_sync(&dev->power.work);
1284
1285 spin_lock_irq(&dev->power.lock);
1286 dev->power.request_pending = false;
1287 }
1288
1289 if (dev->power.runtime_status == RPM_SUSPENDING
1290 || dev->power.runtime_status == RPM_RESUMING
1291 || dev->power.idle_notification) {
1292 DEFINE_WAIT(wait);
1293
1294 /* Suspend, wake-up or idle notification in progress. */
1295 for (;;) {
1296 prepare_to_wait(&dev->power.wait_queue, &wait,
1297 TASK_UNINTERRUPTIBLE);
1298 if (dev->power.runtime_status != RPM_SUSPENDING
1299 && dev->power.runtime_status != RPM_RESUMING
1300 && !dev->power.idle_notification)
1301 break;
1302 spin_unlock_irq(&dev->power.lock);
1303
1304 schedule();
1305
1306 spin_lock_irq(&dev->power.lock);
1307 }
1308 finish_wait(&dev->power.wait_queue, &wait);
1309 }
1310}
1311
1312/**
1313 * pm_runtime_barrier - Flush pending requests and wait for completions.
1314 * @dev: Device to handle.
1315 *
1316 * Prevent the device from being suspended by incrementing its usage counter and
1317 * if there's a pending resume request for the device, wake the device up.
1318 * Next, make sure that all pending requests for the device have been flushed
1319 * from pm_wq and wait for all runtime PM operations involving the device in
1320 * progress to complete.
1321 *
1322 * Return value:
1323 * 1, if there was a resume request pending and the device had to be woken up,
1324 * 0, otherwise
1325 */
1326int pm_runtime_barrier(struct device *dev)
1327{
1328 int retval = 0;
1329
1330 pm_runtime_get_noresume(dev);
1331 spin_lock_irq(&dev->power.lock);
1332
1333 if (dev->power.request_pending
1334 && dev->power.request == RPM_REQ_RESUME) {
1335 rpm_resume(dev, 0);
1336 retval = 1;
1337 }
1338
1339 __pm_runtime_barrier(dev);
1340
1341 spin_unlock_irq(&dev->power.lock);
1342 pm_runtime_put_noidle(dev);
1343
1344 return retval;
1345}
1346EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1347
1348/**
1349 * __pm_runtime_disable - Disable runtime PM of a device.
1350 * @dev: Device to handle.
1351 * @check_resume: If set, check if there's a resume request for the device.
1352 *
1353 * Increment power.disable_depth for the device and if it was zero previously,
1354 * cancel all pending runtime PM requests for the device and wait for all
1355 * operations in progress to complete. The device can be either active or
1356 * suspended after its runtime PM has been disabled.
1357 *
1358 * If @check_resume is set and there's a resume request pending when
1359 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1360 * function will wake up the device before disabling its runtime PM.
1361 */
1362void __pm_runtime_disable(struct device *dev, bool check_resume)
1363{
1364 spin_lock_irq(&dev->power.lock);
1365
1366 if (dev->power.disable_depth > 0) {
1367 dev->power.disable_depth++;
1368 goto out;
1369 }
1370
1371 /*
1372 * Wake up the device if there's a resume request pending, because that
1373 * means there probably is some I/O to process and disabling runtime PM
1374 * shouldn't prevent the device from processing the I/O.
1375 */
1376 if (check_resume && dev->power.request_pending
1377 && dev->power.request == RPM_REQ_RESUME) {
1378 /*
1379 * Prevent suspends and idle notifications from being carried
1380 * out after we have woken up the device.
1381 */
1382 pm_runtime_get_noresume(dev);
1383
1384 rpm_resume(dev, 0);
1385
1386 pm_runtime_put_noidle(dev);
1387 }
1388
1389 /* Update time accounting before disabling PM-runtime. */
1390 update_pm_runtime_accounting(dev);
1391
1392 if (!dev->power.disable_depth++)
1393 __pm_runtime_barrier(dev);
1394
1395 out:
1396 spin_unlock_irq(&dev->power.lock);
1397}
1398EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1399
1400/**
1401 * pm_runtime_enable - Enable runtime PM of a device.
1402 * @dev: Device to handle.
1403 */
1404void pm_runtime_enable(struct device *dev)
1405{
1406 unsigned long flags;
1407
1408 spin_lock_irqsave(&dev->power.lock, flags);
1409
1410 if (dev->power.disable_depth > 0) {
1411 dev->power.disable_depth--;
1412
1413 /* About to enable runtime pm, set accounting_timestamp to now */
1414 if (!dev->power.disable_depth)
1415 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1416 } else {
1417 dev_warn(dev, "Unbalanced %s!\n", __func__);
1418 }
1419
1420 WARN(!dev->power.disable_depth &&
1421 dev->power.runtime_status == RPM_SUSPENDED &&
1422 !dev->power.ignore_children &&
1423 atomic_read(&dev->power.child_count) > 0,
1424 "Enabling runtime PM for inactive device (%s) with active children\n",
1425 dev_name(dev));
1426
1427 spin_unlock_irqrestore(&dev->power.lock, flags);
1428}
1429EXPORT_SYMBOL_GPL(pm_runtime_enable);
1430
1431/**
1432 * pm_runtime_forbid - Block runtime PM of a device.
1433 * @dev: Device to handle.
1434 *
1435 * Increase the device's usage count and clear its power.runtime_auto flag,
1436 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1437 * for it.
1438 */
1439void pm_runtime_forbid(struct device *dev)
1440{
1441 spin_lock_irq(&dev->power.lock);
1442 if (!dev->power.runtime_auto)
1443 goto out;
1444
1445 dev->power.runtime_auto = false;
1446 atomic_inc(&dev->power.usage_count);
1447 rpm_resume(dev, 0);
1448
1449 out:
1450 spin_unlock_irq(&dev->power.lock);
1451}
1452EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1453
1454/**
1455 * pm_runtime_allow - Unblock runtime PM of a device.
1456 * @dev: Device to handle.
1457 *
1458 * Decrease the device's usage count and set its power.runtime_auto flag.
1459 */
1460void pm_runtime_allow(struct device *dev)
1461{
1462 spin_lock_irq(&dev->power.lock);
1463 if (dev->power.runtime_auto)
1464 goto out;
1465
1466 dev->power.runtime_auto = true;
1467 if (atomic_dec_and_test(&dev->power.usage_count))
1468 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1469 else
1470 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1471
1472 out:
1473 spin_unlock_irq(&dev->power.lock);
1474}
1475EXPORT_SYMBOL_GPL(pm_runtime_allow);
1476
1477/**
1478 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1479 * @dev: Device to handle.
1480 *
1481 * Set the power.no_callbacks flag, which tells the PM core that this
1482 * device is power-managed through its parent and has no runtime PM
1483 * callbacks of its own. The runtime sysfs attributes will be removed.
1484 */
1485void pm_runtime_no_callbacks(struct device *dev)
1486{
1487 spin_lock_irq(&dev->power.lock);
1488 dev->power.no_callbacks = 1;
1489 spin_unlock_irq(&dev->power.lock);
1490 if (device_is_registered(dev))
1491 rpm_sysfs_remove(dev);
1492}
1493EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1494
1495/**
1496 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1497 * @dev: Device to handle
1498 *
1499 * Set the power.irq_safe flag, which tells the PM core that the
1500 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1501 * always be invoked with the spinlock held and interrupts disabled. It also
1502 * causes the parent's usage counter to be permanently incremented, preventing
1503 * the parent from runtime suspending -- otherwise an irq-safe child might have
1504 * to wait for a non-irq-safe parent.
1505 */
1506void pm_runtime_irq_safe(struct device *dev)
1507{
1508 if (dev->parent)
1509 pm_runtime_get_sync(dev->parent);
1510 spin_lock_irq(&dev->power.lock);
1511 dev->power.irq_safe = 1;
1512 spin_unlock_irq(&dev->power.lock);
1513}
1514EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1515
1516/**
1517 * update_autosuspend - Handle a change to a device's autosuspend settings.
1518 * @dev: Device to handle.
1519 * @old_delay: The former autosuspend_delay value.
1520 * @old_use: The former use_autosuspend value.
1521 *
1522 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1523 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1524 *
1525 * This function must be called under dev->power.lock with interrupts disabled.
1526 */
1527static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1528{
1529 int delay = dev->power.autosuspend_delay;
1530
1531 /* Should runtime suspend be prevented now? */
1532 if (dev->power.use_autosuspend && delay < 0) {
1533
1534 /* If it used to be allowed then prevent it. */
1535 if (!old_use || old_delay >= 0) {
1536 atomic_inc(&dev->power.usage_count);
1537 rpm_resume(dev, 0);
1538 } else {
1539 trace_rpm_usage_rcuidle(dev, 0);
1540 }
1541 }
1542
1543 /* Runtime suspend should be allowed now. */
1544 else {
1545
1546 /* If it used to be prevented then allow it. */
1547 if (old_use && old_delay < 0)
1548 atomic_dec(&dev->power.usage_count);
1549
1550 /* Maybe we can autosuspend now. */
1551 rpm_idle(dev, RPM_AUTO);
1552 }
1553}
1554
1555/**
1556 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1557 * @dev: Device to handle.
1558 * @delay: Value of the new delay in milliseconds.
1559 *
1560 * Set the device's power.autosuspend_delay value. If it changes to negative
1561 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1562 * changes the other way, allow runtime suspends.
1563 */
1564void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1565{
1566 int old_delay, old_use;
1567
1568 spin_lock_irq(&dev->power.lock);
1569 old_delay = dev->power.autosuspend_delay;
1570 old_use = dev->power.use_autosuspend;
1571 dev->power.autosuspend_delay = delay;
1572 update_autosuspend(dev, old_delay, old_use);
1573 spin_unlock_irq(&dev->power.lock);
1574}
1575EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1576
1577/**
1578 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1579 * @dev: Device to handle.
1580 * @use: New value for use_autosuspend.
1581 *
1582 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1583 * suspends as needed.
1584 */
1585void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1586{
1587 int old_delay, old_use;
1588
1589 spin_lock_irq(&dev->power.lock);
1590 old_delay = dev->power.autosuspend_delay;
1591 old_use = dev->power.use_autosuspend;
1592 dev->power.use_autosuspend = use;
1593 update_autosuspend(dev, old_delay, old_use);
1594 spin_unlock_irq(&dev->power.lock);
1595}
1596EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1597
1598/**
1599 * pm_runtime_init - Initialize runtime PM fields in given device object.
1600 * @dev: Device object to initialize.
1601 */
1602void pm_runtime_init(struct device *dev)
1603{
1604 dev->power.runtime_status = RPM_SUSPENDED;
1605 dev->power.idle_notification = false;
1606
1607 dev->power.disable_depth = 1;
1608 atomic_set(&dev->power.usage_count, 0);
1609
1610 dev->power.runtime_error = 0;
1611
1612 atomic_set(&dev->power.child_count, 0);
1613 pm_suspend_ignore_children(dev, false);
1614 dev->power.runtime_auto = true;
1615
1616 dev->power.request_pending = false;
1617 dev->power.request = RPM_REQ_NONE;
1618 dev->power.deferred_resume = false;
1619 INIT_WORK(&dev->power.work, pm_runtime_work);
1620
1621 dev->power.timer_expires = 0;
1622 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1623 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1624
1625 init_waitqueue_head(&dev->power.wait_queue);
1626}
1627
1628/**
1629 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1630 * @dev: Device object to re-initialize.
1631 */
1632void pm_runtime_reinit(struct device *dev)
1633{
1634 if (!pm_runtime_enabled(dev)) {
1635 if (dev->power.runtime_status == RPM_ACTIVE)
1636 pm_runtime_set_suspended(dev);
1637 if (dev->power.irq_safe) {
1638 spin_lock_irq(&dev->power.lock);
1639 dev->power.irq_safe = 0;
1640 spin_unlock_irq(&dev->power.lock);
1641 if (dev->parent)
1642 pm_runtime_put(dev->parent);
1643 }
1644 }
1645}
1646
1647/**
1648 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1649 * @dev: Device object being removed from device hierarchy.
1650 */
1651void pm_runtime_remove(struct device *dev)
1652{
1653 __pm_runtime_disable(dev, false);
1654 pm_runtime_reinit(dev);
1655}
1656
1657/**
1658 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1659 * @dev: Consumer device.
1660 */
1661void pm_runtime_get_suppliers(struct device *dev)
1662{
1663 struct device_link *link;
1664 int idx;
1665
1666 idx = device_links_read_lock();
1667
1668 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1669 device_links_read_lock_held())
1670 if (link->flags & DL_FLAG_PM_RUNTIME) {
1671 link->supplier_preactivated = true;
1672 refcount_inc(&link->rpm_active);
1673 pm_runtime_get_sync(link->supplier);
1674 }
1675
1676 device_links_read_unlock(idx);
1677}
1678
1679/**
1680 * pm_runtime_put_suppliers - Drop references to supplier devices.
1681 * @dev: Consumer device.
1682 */
1683void pm_runtime_put_suppliers(struct device *dev)
1684{
1685 struct device_link *link;
1686 int idx;
1687
1688 idx = device_links_read_lock();
1689
1690 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1691 device_links_read_lock_held())
1692 if (link->supplier_preactivated) {
1693 link->supplier_preactivated = false;
1694 if (refcount_dec_not_one(&link->rpm_active))
1695 pm_runtime_put(link->supplier);
1696 }
1697
1698 device_links_read_unlock(idx);
1699}
1700
1701void pm_runtime_new_link(struct device *dev)
1702{
1703 spin_lock_irq(&dev->power.lock);
1704 dev->power.links_count++;
1705 spin_unlock_irq(&dev->power.lock);
1706}
1707
1708static void pm_runtime_drop_link_count(struct device *dev)
1709{
1710 spin_lock_irq(&dev->power.lock);
1711 WARN_ON(dev->power.links_count == 0);
1712 dev->power.links_count--;
1713 spin_unlock_irq(&dev->power.lock);
1714}
1715
1716/**
1717 * pm_runtime_drop_link - Prepare for device link removal.
1718 * @link: Device link going away.
1719 *
1720 * Drop the link count of the consumer end of @link and decrement the supplier
1721 * device's runtime PM usage counter as many times as needed to drop all of the
1722 * PM runtime reference to it from the consumer.
1723 */
1724void pm_runtime_drop_link(struct device_link *link)
1725{
1726 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1727 return;
1728
1729 pm_runtime_drop_link_count(link->consumer);
1730
1731 while (refcount_dec_not_one(&link->rpm_active))
1732 pm_runtime_put(link->supplier);
1733}
1734
1735static bool pm_runtime_need_not_resume(struct device *dev)
1736{
1737 return atomic_read(&dev->power.usage_count) <= 1 &&
1738 (atomic_read(&dev->power.child_count) == 0 ||
1739 dev->power.ignore_children);
1740}
1741
1742/**
1743 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1744 * @dev: Device to suspend.
1745 *
1746 * Disable runtime PM so we safely can check the device's runtime PM status and
1747 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1748 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1749 * usage and children counters don't indicate that the device was in use before
1750 * the system-wide transition under way, decrement its parent's children counter
1751 * (if there is a parent). Keep runtime PM disabled to preserve the state
1752 * unless we encounter errors.
1753 *
1754 * Typically this function may be invoked from a system suspend callback to make
1755 * sure the device is put into low power state and it should only be used during
1756 * system-wide PM transitions to sleep states. It assumes that the analogous
1757 * pm_runtime_force_resume() will be used to resume the device.
1758 */
1759int pm_runtime_force_suspend(struct device *dev)
1760{
1761 int (*callback)(struct device *);
1762 int ret;
1763
1764 pm_runtime_disable(dev);
1765 if (pm_runtime_status_suspended(dev))
1766 return 0;
1767
1768 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1769
1770 ret = callback ? callback(dev) : 0;
1771 if (ret)
1772 goto err;
1773
1774 /*
1775 * If the device can stay in suspend after the system-wide transition
1776 * to the working state that will follow, drop the children counter of
1777 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1778 * function will be called again for it in the meantime.
1779 */
1780 if (pm_runtime_need_not_resume(dev))
1781 pm_runtime_set_suspended(dev);
1782 else
1783 __update_runtime_status(dev, RPM_SUSPENDED);
1784
1785 return 0;
1786
1787err:
1788 pm_runtime_enable(dev);
1789 return ret;
1790}
1791EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1792
1793/**
1794 * pm_runtime_force_resume - Force a device into resume state if needed.
1795 * @dev: Device to resume.
1796 *
1797 * Prior invoking this function we expect the user to have brought the device
1798 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1799 * those actions and bring the device into full power, if it is expected to be
1800 * used on system resume. In the other case, we defer the resume to be managed
1801 * via runtime PM.
1802 *
1803 * Typically this function may be invoked from a system resume callback.
1804 */
1805int pm_runtime_force_resume(struct device *dev)
1806{
1807 int (*callback)(struct device *);
1808 int ret = 0;
1809
1810 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1811 goto out;
1812
1813 /*
1814 * The value of the parent's children counter is correct already, so
1815 * just update the status of the device.
1816 */
1817 __update_runtime_status(dev, RPM_ACTIVE);
1818
1819 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1820
1821 ret = callback ? callback(dev) : 0;
1822 if (ret) {
1823 pm_runtime_set_suspended(dev);
1824 goto out;
1825 }
1826
1827 pm_runtime_mark_last_busy(dev);
1828out:
1829 pm_runtime_enable(dev);
1830 return ret;
1831}
1832EXPORT_SYMBOL_GPL(pm_runtime_force_resume);