Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7#define pr_fmt(fmt) "PM: " fmt
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/kernel.h>
12#include <linux/io.h>
13#include <linux/platform_device.h>
14#include <linux/pm_opp.h>
15#include <linux/pm_runtime.h>
16#include <linux/pm_domain.h>
17#include <linux/pm_qos.h>
18#include <linux/pm_clock.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/suspend.h>
23#include <linux/export.h>
24#include <linux/cpu.h>
25#include <linux/debugfs.h>
26
27/* Provides a unique ID for each genpd device */
28static DEFINE_IDA(genpd_ida);
29
30/* The bus for genpd_providers. */
31static const struct bus_type genpd_provider_bus_type = {
32 .name = "genpd_provider",
33};
34
35/* The parent for genpd_provider devices. */
36static struct device genpd_provider_bus = {
37 .init_name = "genpd_provider",
38};
39
40#define GENPD_RETRY_MAX_MS 250 /* Approximate */
41
42#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
43({ \
44 type (*__routine)(struct device *__d); \
45 type __ret = (type)0; \
46 \
47 __routine = genpd->dev_ops.callback; \
48 if (__routine) { \
49 __ret = __routine(dev); \
50 } \
51 __ret; \
52})
53
54static LIST_HEAD(gpd_list);
55static DEFINE_MUTEX(gpd_list_lock);
56
57struct genpd_lock_ops {
58 void (*lock)(struct generic_pm_domain *genpd);
59 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
60 int (*lock_interruptible)(struct generic_pm_domain *genpd);
61 void (*unlock)(struct generic_pm_domain *genpd);
62};
63
64static void genpd_lock_mtx(struct generic_pm_domain *genpd)
65{
66 mutex_lock(&genpd->mlock);
67}
68
69static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
70 int depth)
71{
72 mutex_lock_nested(&genpd->mlock, depth);
73}
74
75static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
76{
77 return mutex_lock_interruptible(&genpd->mlock);
78}
79
80static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
81{
82 return mutex_unlock(&genpd->mlock);
83}
84
85static const struct genpd_lock_ops genpd_mtx_ops = {
86 .lock = genpd_lock_mtx,
87 .lock_nested = genpd_lock_nested_mtx,
88 .lock_interruptible = genpd_lock_interruptible_mtx,
89 .unlock = genpd_unlock_mtx,
90};
91
92static void genpd_lock_spin(struct generic_pm_domain *genpd)
93 __acquires(&genpd->slock)
94{
95 unsigned long flags;
96
97 spin_lock_irqsave(&genpd->slock, flags);
98 genpd->lock_flags = flags;
99}
100
101static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
102 int depth)
103 __acquires(&genpd->slock)
104{
105 unsigned long flags;
106
107 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
108 genpd->lock_flags = flags;
109}
110
111static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
112 __acquires(&genpd->slock)
113{
114 unsigned long flags;
115
116 spin_lock_irqsave(&genpd->slock, flags);
117 genpd->lock_flags = flags;
118 return 0;
119}
120
121static void genpd_unlock_spin(struct generic_pm_domain *genpd)
122 __releases(&genpd->slock)
123{
124 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
125}
126
127static const struct genpd_lock_ops genpd_spin_ops = {
128 .lock = genpd_lock_spin,
129 .lock_nested = genpd_lock_nested_spin,
130 .lock_interruptible = genpd_lock_interruptible_spin,
131 .unlock = genpd_unlock_spin,
132};
133
134static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
135 __acquires(&genpd->raw_slock)
136{
137 unsigned long flags;
138
139 raw_spin_lock_irqsave(&genpd->raw_slock, flags);
140 genpd->raw_lock_flags = flags;
141}
142
143static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
144 int depth)
145 __acquires(&genpd->raw_slock)
146{
147 unsigned long flags;
148
149 raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
150 genpd->raw_lock_flags = flags;
151}
152
153static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
154 __acquires(&genpd->raw_slock)
155{
156 unsigned long flags;
157
158 raw_spin_lock_irqsave(&genpd->raw_slock, flags);
159 genpd->raw_lock_flags = flags;
160 return 0;
161}
162
163static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
164 __releases(&genpd->raw_slock)
165{
166 raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
167}
168
169static const struct genpd_lock_ops genpd_raw_spin_ops = {
170 .lock = genpd_lock_raw_spin,
171 .lock_nested = genpd_lock_nested_raw_spin,
172 .lock_interruptible = genpd_lock_interruptible_raw_spin,
173 .unlock = genpd_unlock_raw_spin,
174};
175
176#define genpd_lock(p) p->lock_ops->lock(p)
177#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
178#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
179#define genpd_unlock(p) p->lock_ops->unlock(p)
180
181#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
182#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
183#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
184#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
185#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
186#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
187#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
188#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
189#define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
190#define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON)
191
192static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
193 const struct generic_pm_domain *genpd)
194{
195 bool ret;
196
197 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
198
199 /*
200 * Warn once if an IRQ safe device is attached to a domain, which
201 * callbacks are allowed to sleep. This indicates a suboptimal
202 * configuration for PM, but it doesn't matter for an always on domain.
203 */
204 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
205 return ret;
206
207 if (ret)
208 dev_warn_once(dev, "PM domain %s will not be powered off\n",
209 dev_name(&genpd->dev));
210
211 return ret;
212}
213
214static int genpd_runtime_suspend(struct device *dev);
215
216/*
217 * Get the generic PM domain for a particular struct device.
218 * This validates the struct device pointer, the PM domain pointer,
219 * and checks that the PM domain pointer is a real generic PM domain.
220 * Any failure results in NULL being returned.
221 */
222static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
223{
224 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
225 return NULL;
226
227 /* A genpd's always have its ->runtime_suspend() callback assigned. */
228 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
229 return pd_to_genpd(dev->pm_domain);
230
231 return NULL;
232}
233
234/*
235 * This should only be used where we are certain that the pm_domain
236 * attached to the device is a genpd domain.
237 */
238static struct generic_pm_domain *dev_to_genpd(struct device *dev)
239{
240 if (IS_ERR_OR_NULL(dev->pm_domain))
241 return ERR_PTR(-EINVAL);
242
243 return pd_to_genpd(dev->pm_domain);
244}
245
246struct device *dev_to_genpd_dev(struct device *dev)
247{
248 struct generic_pm_domain *genpd = dev_to_genpd(dev);
249
250 if (IS_ERR(genpd))
251 return ERR_CAST(genpd);
252
253 return &genpd->dev;
254}
255
256static int genpd_stop_dev(const struct generic_pm_domain *genpd,
257 struct device *dev)
258{
259 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
260}
261
262static int genpd_start_dev(const struct generic_pm_domain *genpd,
263 struct device *dev)
264{
265 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
266}
267
268static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
269{
270 bool ret = false;
271
272 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
273 ret = !!atomic_dec_and_test(&genpd->sd_count);
274
275 return ret;
276}
277
278static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
279{
280 atomic_inc(&genpd->sd_count);
281 smp_mb__after_atomic();
282}
283
284#ifdef CONFIG_DEBUG_FS
285static struct dentry *genpd_debugfs_dir;
286
287static void genpd_debug_add(struct generic_pm_domain *genpd);
288
289static void genpd_debug_remove(struct generic_pm_domain *genpd)
290{
291 if (!genpd_debugfs_dir)
292 return;
293
294 debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
295}
296
297static void genpd_update_accounting(struct generic_pm_domain *genpd)
298{
299 u64 delta, now;
300
301 now = ktime_get_mono_fast_ns();
302 if (now <= genpd->accounting_time)
303 return;
304
305 delta = now - genpd->accounting_time;
306
307 /*
308 * If genpd->status is active, it means we are just
309 * out of off and so update the idle time and vice
310 * versa.
311 */
312 if (genpd->status == GENPD_STATE_ON)
313 genpd->states[genpd->state_idx].idle_time += delta;
314 else
315 genpd->on_time += delta;
316
317 genpd->accounting_time = now;
318}
319
320static void genpd_reflect_residency(struct generic_pm_domain *genpd)
321{
322 struct genpd_governor_data *gd = genpd->gd;
323 struct genpd_power_state *state, *next_state;
324 unsigned int state_idx;
325 s64 sleep_ns, target_ns;
326
327 if (!gd || !gd->reflect_residency)
328 return;
329
330 sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
331 state_idx = genpd->state_idx;
332 state = &genpd->states[state_idx];
333 target_ns = state->power_off_latency_ns + state->residency_ns;
334
335 if (sleep_ns < target_ns) {
336 state->above++;
337 } else if (state_idx < (genpd->state_count -1)) {
338 next_state = &genpd->states[state_idx + 1];
339 target_ns = next_state->power_off_latency_ns +
340 next_state->residency_ns;
341
342 if (sleep_ns >= target_ns)
343 state->below++;
344 }
345
346 gd->reflect_residency = false;
347}
348#else
349static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
350static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
351static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
352static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
353#endif
354
355static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
356 unsigned int state)
357{
358 struct generic_pm_domain_data *pd_data;
359 struct pm_domain_data *pdd;
360 struct gpd_link *link;
361
362 /* New requested state is same as Max requested state */
363 if (state == genpd->performance_state)
364 return state;
365
366 /* New requested state is higher than Max requested state */
367 if (state > genpd->performance_state)
368 return state;
369
370 /* Traverse all devices within the domain */
371 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
372 pd_data = to_gpd_data(pdd);
373
374 if (pd_data->performance_state > state)
375 state = pd_data->performance_state;
376 }
377
378 /*
379 * Traverse all sub-domains within the domain. This can be
380 * done without any additional locking as the link->performance_state
381 * field is protected by the parent genpd->lock, which is already taken.
382 *
383 * Also note that link->performance_state (subdomain's performance state
384 * requirement to parent domain) is different from
385 * link->child->performance_state (current performance state requirement
386 * of the devices/sub-domains of the subdomain) and so can have a
387 * different value.
388 *
389 * Note that we also take vote from powered-off sub-domains into account
390 * as the same is done for devices right now.
391 */
392 list_for_each_entry(link, &genpd->parent_links, parent_node) {
393 if (link->performance_state > state)
394 state = link->performance_state;
395 }
396
397 return state;
398}
399
400static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
401 struct generic_pm_domain *parent,
402 unsigned int pstate)
403{
404 if (!parent->set_performance_state)
405 return pstate;
406
407 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
408 parent->opp_table,
409 pstate);
410}
411
412static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
413 unsigned int state, int depth);
414
415static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
416{
417 struct generic_pm_domain *parent = link->parent;
418 int parent_state;
419
420 genpd_lock_nested(parent, depth + 1);
421
422 parent_state = link->prev_performance_state;
423 link->performance_state = parent_state;
424
425 parent_state = _genpd_reeval_performance_state(parent, parent_state);
426 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
427 pr_err("%s: Failed to roll back to %d performance state\n",
428 parent->name, parent_state);
429 }
430
431 genpd_unlock(parent);
432}
433
434static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
435 struct gpd_link *link,
436 unsigned int state, int depth)
437{
438 struct generic_pm_domain *parent = link->parent;
439 int parent_state, ret;
440
441 /* Find parent's performance state */
442 ret = genpd_xlate_performance_state(genpd, parent, state);
443 if (unlikely(ret < 0))
444 return ret;
445
446 parent_state = ret;
447
448 genpd_lock_nested(parent, depth + 1);
449
450 link->prev_performance_state = link->performance_state;
451 link->performance_state = parent_state;
452
453 parent_state = _genpd_reeval_performance_state(parent, parent_state);
454 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
455 if (ret)
456 link->performance_state = link->prev_performance_state;
457
458 genpd_unlock(parent);
459
460 return ret;
461}
462
463static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
464 unsigned int state, int depth)
465{
466 struct gpd_link *link = NULL;
467 int ret;
468
469 if (state == genpd->performance_state)
470 return 0;
471
472 /* When scaling up, propagate to parents first in normal order */
473 if (state > genpd->performance_state) {
474 list_for_each_entry(link, &genpd->child_links, child_node) {
475 ret = _genpd_set_parent_state(genpd, link, state, depth);
476 if (ret)
477 goto rollback_parents_up;
478 }
479 }
480
481 if (genpd->set_performance_state) {
482 ret = genpd->set_performance_state(genpd, state);
483 if (ret) {
484 if (link)
485 goto rollback_parents_up;
486 return ret;
487 }
488 }
489
490 /* When scaling down, propagate to parents last in reverse order */
491 if (state < genpd->performance_state) {
492 list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
493 ret = _genpd_set_parent_state(genpd, link, state, depth);
494 if (ret)
495 goto rollback_parents_down;
496 }
497 }
498
499 genpd->performance_state = state;
500 return 0;
501
502rollback_parents_up:
503 list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
504 _genpd_rollback_parent_state(link, depth);
505 return ret;
506rollback_parents_down:
507 list_for_each_entry_continue(link, &genpd->child_links, child_node)
508 _genpd_rollback_parent_state(link, depth);
509 return ret;
510}
511
512static int genpd_set_performance_state(struct device *dev, unsigned int state)
513{
514 struct generic_pm_domain *genpd = dev_to_genpd(dev);
515 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
516 unsigned int prev_state;
517 int ret;
518
519 prev_state = gpd_data->performance_state;
520 if (prev_state == state)
521 return 0;
522
523 gpd_data->performance_state = state;
524 state = _genpd_reeval_performance_state(genpd, state);
525
526 ret = _genpd_set_performance_state(genpd, state, 0);
527 if (ret)
528 gpd_data->performance_state = prev_state;
529
530 return ret;
531}
532
533static int genpd_drop_performance_state(struct device *dev)
534{
535 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
536
537 if (!genpd_set_performance_state(dev, 0))
538 return prev_state;
539
540 return 0;
541}
542
543static void genpd_restore_performance_state(struct device *dev,
544 unsigned int state)
545{
546 if (state)
547 genpd_set_performance_state(dev, state);
548}
549
550static int genpd_dev_pm_set_performance_state(struct device *dev,
551 unsigned int state)
552{
553 struct generic_pm_domain *genpd = dev_to_genpd(dev);
554 int ret = 0;
555
556 genpd_lock(genpd);
557 if (pm_runtime_suspended(dev)) {
558 dev_gpd_data(dev)->rpm_pstate = state;
559 } else {
560 ret = genpd_set_performance_state(dev, state);
561 if (!ret)
562 dev_gpd_data(dev)->rpm_pstate = 0;
563 }
564 genpd_unlock(genpd);
565
566 return ret;
567}
568
569/**
570 * dev_pm_genpd_set_performance_state- Set performance state of device's power
571 * domain.
572 *
573 * @dev: Device for which the performance-state needs to be set.
574 * @state: Target performance state of the device. This can be set as 0 when the
575 * device doesn't have any performance state constraints left (And so
576 * the device wouldn't participate anymore to find the target
577 * performance state of the genpd).
578 *
579 * It is assumed that the users guarantee that the genpd wouldn't be detached
580 * while this routine is getting called.
581 *
582 * Returns 0 on success and negative error values on failures.
583 */
584int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
585{
586 struct generic_pm_domain *genpd;
587
588 genpd = dev_to_genpd_safe(dev);
589 if (!genpd)
590 return -ENODEV;
591
592 if (WARN_ON(!dev->power.subsys_data ||
593 !dev->power.subsys_data->domain_data))
594 return -EINVAL;
595
596 return genpd_dev_pm_set_performance_state(dev, state);
597}
598EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
599
600/**
601 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
602 *
603 * @dev: Device to handle
604 * @next: impending interrupt/wakeup for the device
605 *
606 *
607 * Allow devices to inform of the next wakeup. It's assumed that the users
608 * guarantee that the genpd wouldn't be detached while this routine is getting
609 * called. Additionally, it's also assumed that @dev isn't runtime suspended
610 * (RPM_SUSPENDED)."
611 * Although devices are expected to update the next_wakeup after the end of
612 * their usecase as well, it is possible the devices themselves may not know
613 * about that, so stale @next will be ignored when powering off the domain.
614 */
615void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
616{
617 struct generic_pm_domain *genpd;
618 struct gpd_timing_data *td;
619
620 genpd = dev_to_genpd_safe(dev);
621 if (!genpd)
622 return;
623
624 td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
625 if (td)
626 td->next_wakeup = next;
627}
628EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
629
630/**
631 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
632 * @dev: A device that is attached to the genpd.
633 *
634 * This routine should typically be called for a device, at the point of when a
635 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
636 *
637 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
638 * valid value have been set.
639 */
640ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
641{
642 struct generic_pm_domain *genpd;
643
644 genpd = dev_to_genpd_safe(dev);
645 if (!genpd)
646 return KTIME_MAX;
647
648 if (genpd->gd)
649 return genpd->gd->next_hrtimer;
650
651 return KTIME_MAX;
652}
653EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
654
655/*
656 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
657 *
658 * @dev: A device that is attached to the genpd.
659 *
660 * Allows a consumer of the genpd to notify the provider that the next power off
661 * should be synchronous.
662 *
663 * It is assumed that the users guarantee that the genpd wouldn't be detached
664 * while this routine is getting called.
665 */
666void dev_pm_genpd_synced_poweroff(struct device *dev)
667{
668 struct generic_pm_domain *genpd;
669
670 genpd = dev_to_genpd_safe(dev);
671 if (!genpd)
672 return;
673
674 genpd_lock(genpd);
675 genpd->synced_poweroff = true;
676 genpd_unlock(genpd);
677}
678EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
679
680/**
681 * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
682 *
683 * @dev: Device for which the HW-mode should be changed.
684 * @enable: Value to set or unset the HW-mode.
685 *
686 * Some PM domains can rely on HW signals to control the power for a device. To
687 * allow a consumer driver to switch the behaviour for its device in runtime,
688 * which may be beneficial from a latency or energy point of view, this function
689 * may be called.
690 *
691 * It is assumed that the users guarantee that the genpd wouldn't be detached
692 * while this routine is getting called.
693 *
694 * Return: Returns 0 on success and negative error values on failures.
695 */
696int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
697{
698 struct generic_pm_domain *genpd;
699 int ret = 0;
700
701 genpd = dev_to_genpd_safe(dev);
702 if (!genpd)
703 return -ENODEV;
704
705 if (!genpd->set_hwmode_dev)
706 return -EOPNOTSUPP;
707
708 genpd_lock(genpd);
709
710 if (dev_gpd_data(dev)->hw_mode == enable)
711 goto out;
712
713 ret = genpd->set_hwmode_dev(genpd, dev, enable);
714 if (!ret)
715 dev_gpd_data(dev)->hw_mode = enable;
716
717out:
718 genpd_unlock(genpd);
719 return ret;
720}
721EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
722
723/**
724 * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
725 *
726 * @dev: Device for which the current HW-mode setting should be fetched.
727 *
728 * This helper function allows consumer drivers to fetch the current HW mode
729 * setting of its the device.
730 *
731 * It is assumed that the users guarantee that the genpd wouldn't be detached
732 * while this routine is getting called.
733 *
734 * Return: Returns the HW mode setting of device from SW cached hw_mode.
735 */
736bool dev_pm_genpd_get_hwmode(struct device *dev)
737{
738 return dev_gpd_data(dev)->hw_mode;
739}
740EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
741
742/**
743 * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
744 *
745 * @dev: Device for which the PM domain may need to stay on for.
746 * @on: Value to set or unset for the condition.
747 *
748 * For some usecases a consumer driver requires its device to remain power-on
749 * from the PM domain perspective during runtime. This function allows the
750 * behaviour to be dynamically controlled for a device attached to a genpd.
751 *
752 * It is assumed that the users guarantee that the genpd wouldn't be detached
753 * while this routine is getting called.
754 *
755 * Return: Returns 0 on success and negative error values on failures.
756 */
757int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
758{
759 struct generic_pm_domain *genpd;
760
761 genpd = dev_to_genpd_safe(dev);
762 if (!genpd)
763 return -ENODEV;
764
765 genpd_lock(genpd);
766 dev_gpd_data(dev)->rpm_always_on = on;
767 genpd_unlock(genpd);
768
769 return 0;
770}
771EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
772
773/**
774 * dev_pm_genpd_is_on() - Get device's current power domain status
775 *
776 * @dev: Device to get the current power status
777 *
778 * This function checks whether the generic power domain associated with the
779 * given device is on or not by verifying if genpd_status_on equals
780 * GENPD_STATE_ON.
781 *
782 * Note: this function returns the power status of the genpd at the time of the
783 * call. The power status may change after due to activity from other devices
784 * sharing the same genpd. Therefore, this information should not be relied for
785 * long-term decisions about the device power state.
786 *
787 * Return: 'true' if the device's power domain is on, 'false' otherwise.
788 */
789bool dev_pm_genpd_is_on(struct device *dev)
790{
791 struct generic_pm_domain *genpd;
792 bool is_on;
793
794 genpd = dev_to_genpd_safe(dev);
795 if (!genpd)
796 return false;
797
798 genpd_lock(genpd);
799 is_on = genpd_status_on(genpd);
800 genpd_unlock(genpd);
801
802 return is_on;
803}
804EXPORT_SYMBOL_GPL(dev_pm_genpd_is_on);
805
806/**
807 * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
808 *
809 * @genpd: The PM domain the idle-state belongs to.
810 * @state_idx: The index of the idle-state that failed.
811 *
812 * In some special cases the ->power_off() callback is asynchronously powering
813 * off the PM domain, leading to that it may return zero to indicate success,
814 * even though the actual power-off could fail. To account for this correctly in
815 * the rejected/usage counts for the idle-state statistics, users can call this
816 * function to adjust the values.
817 *
818 * It is assumed that the users guarantee that the genpd doesn't get removed
819 * while this routine is getting called.
820 */
821void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
822 unsigned int state_idx)
823{
824 genpd_lock(genpd);
825 genpd->states[genpd->state_idx].rejected++;
826 genpd->states[genpd->state_idx].usage--;
827 genpd_unlock(genpd);
828}
829EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
830
831static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
832{
833 unsigned int state_idx = genpd->state_idx;
834 ktime_t time_start;
835 s64 elapsed_ns;
836 int ret;
837
838 /* Notify consumers that we are about to power on. */
839 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
840 GENPD_NOTIFY_PRE_ON,
841 GENPD_NOTIFY_OFF, NULL);
842 ret = notifier_to_errno(ret);
843 if (ret)
844 return ret;
845
846 if (!genpd->power_on)
847 goto out;
848
849 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
850 if (!timed) {
851 ret = genpd->power_on(genpd);
852 if (ret)
853 goto err;
854
855 goto out;
856 }
857
858 time_start = ktime_get();
859 ret = genpd->power_on(genpd);
860 if (ret)
861 goto err;
862
863 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
864 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
865 goto out;
866
867 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
868 genpd->gd->max_off_time_changed = true;
869 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
870 dev_name(&genpd->dev), "on", elapsed_ns);
871
872out:
873 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
874 genpd->synced_poweroff = false;
875 return 0;
876err:
877 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
878 NULL);
879 return ret;
880}
881
882static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
883{
884 unsigned int state_idx = genpd->state_idx;
885 ktime_t time_start;
886 s64 elapsed_ns;
887 int ret;
888
889 /* Notify consumers that we are about to power off. */
890 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
891 GENPD_NOTIFY_PRE_OFF,
892 GENPD_NOTIFY_ON, NULL);
893 ret = notifier_to_errno(ret);
894 if (ret)
895 return ret;
896
897 if (!genpd->power_off)
898 goto out;
899
900 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
901 if (!timed) {
902 ret = genpd->power_off(genpd);
903 if (ret)
904 goto busy;
905
906 goto out;
907 }
908
909 time_start = ktime_get();
910 ret = genpd->power_off(genpd);
911 if (ret)
912 goto busy;
913
914 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
915 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
916 goto out;
917
918 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
919 genpd->gd->max_off_time_changed = true;
920 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
921 dev_name(&genpd->dev), "off", elapsed_ns);
922
923out:
924 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
925 NULL);
926 return 0;
927busy:
928 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
929 return ret;
930}
931
932/**
933 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
934 * @genpd: PM domain to power off.
935 *
936 * Queue up the execution of genpd_power_off() unless it's already been done
937 * before.
938 */
939static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
940{
941 queue_work(pm_wq, &genpd->power_off_work);
942}
943
944/**
945 * genpd_power_off - Remove power from a given PM domain.
946 * @genpd: PM domain to power down.
947 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
948 * RPM status of the releated device is in an intermediate state, not yet turned
949 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
950 * be RPM_SUSPENDED, while it tries to power off the PM domain.
951 * @depth: nesting count for lockdep.
952 *
953 * If all of the @genpd's devices have been suspended and all of its subdomains
954 * have been powered down, remove power from @genpd.
955 */
956static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
957 unsigned int depth)
958{
959 struct pm_domain_data *pdd;
960 struct gpd_link *link;
961 unsigned int not_suspended = 0;
962
963 /*
964 * Do not try to power off the domain in the following situations:
965 * The domain is already in the "power off" state.
966 * System suspend is in progress.
967 * The domain is configured as always on.
968 * The domain was on at boot and still need to stay on.
969 * The domain has a subdomain being powered on.
970 */
971 if (!genpd_status_on(genpd) || genpd->prepared_count > 0 ||
972 genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) ||
973 genpd->stay_on || atomic_read(&genpd->sd_count) > 0)
974 return;
975
976 /*
977 * The children must be in their deepest (powered-off) states to allow
978 * the parent to be powered off. Note that, there's no need for
979 * additional locking, as powering on a child, requires the parent's
980 * lock to be acquired first.
981 */
982 list_for_each_entry(link, &genpd->parent_links, parent_node) {
983 struct generic_pm_domain *child = link->child;
984 if (child->state_idx < child->state_count - 1)
985 return;
986 }
987
988 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
989 /*
990 * Do not allow PM domain to be powered off, when an IRQ safe
991 * device is part of a non-IRQ safe domain.
992 */
993 if (!pm_runtime_suspended(pdd->dev) ||
994 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
995 not_suspended++;
996
997 /* The device may need its PM domain to stay powered on. */
998 if (to_gpd_data(pdd)->rpm_always_on)
999 return;
1000 }
1001
1002 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
1003 return;
1004
1005 if (genpd->gov && genpd->gov->power_down_ok) {
1006 if (!genpd->gov->power_down_ok(&genpd->domain))
1007 return;
1008 }
1009
1010 /* Default to shallowest state. */
1011 if (!genpd->gov)
1012 genpd->state_idx = 0;
1013
1014 /* Don't power off, if a child domain is waiting to power on. */
1015 if (atomic_read(&genpd->sd_count) > 0)
1016 return;
1017
1018 if (_genpd_power_off(genpd, true)) {
1019 genpd->states[genpd->state_idx].rejected++;
1020 return;
1021 }
1022
1023 genpd->status = GENPD_STATE_OFF;
1024 genpd_update_accounting(genpd);
1025 genpd->states[genpd->state_idx].usage++;
1026
1027 list_for_each_entry(link, &genpd->child_links, child_node) {
1028 genpd_sd_counter_dec(link->parent);
1029 genpd_lock_nested(link->parent, depth + 1);
1030 genpd_power_off(link->parent, false, depth + 1);
1031 genpd_unlock(link->parent);
1032 }
1033}
1034
1035/**
1036 * genpd_power_on - Restore power to a given PM domain and its parents.
1037 * @genpd: PM domain to power up.
1038 * @depth: nesting count for lockdep.
1039 *
1040 * Restore power to @genpd and all of its parents so that it is possible to
1041 * resume a device belonging to it.
1042 */
1043static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
1044{
1045 struct gpd_link *link;
1046 int ret = 0;
1047
1048 if (genpd_status_on(genpd))
1049 return 0;
1050
1051 /* Reflect over the entered idle-states residency for debugfs. */
1052 genpd_reflect_residency(genpd);
1053
1054 /*
1055 * The list is guaranteed not to change while the loop below is being
1056 * executed, unless one of the parents' .power_on() callbacks fiddles
1057 * with it.
1058 */
1059 list_for_each_entry(link, &genpd->child_links, child_node) {
1060 struct generic_pm_domain *parent = link->parent;
1061
1062 genpd_sd_counter_inc(parent);
1063
1064 genpd_lock_nested(parent, depth + 1);
1065 ret = genpd_power_on(parent, depth + 1);
1066 genpd_unlock(parent);
1067
1068 if (ret) {
1069 genpd_sd_counter_dec(parent);
1070 goto err;
1071 }
1072 }
1073
1074 ret = _genpd_power_on(genpd, true);
1075 if (ret)
1076 goto err;
1077
1078 genpd->status = GENPD_STATE_ON;
1079 genpd_update_accounting(genpd);
1080
1081 return 0;
1082
1083 err:
1084 list_for_each_entry_continue_reverse(link,
1085 &genpd->child_links,
1086 child_node) {
1087 genpd_sd_counter_dec(link->parent);
1088 genpd_lock_nested(link->parent, depth + 1);
1089 genpd_power_off(link->parent, false, depth + 1);
1090 genpd_unlock(link->parent);
1091 }
1092
1093 return ret;
1094}
1095
1096static int genpd_dev_pm_start(struct device *dev)
1097{
1098 struct generic_pm_domain *genpd = dev_to_genpd(dev);
1099
1100 return genpd_start_dev(genpd, dev);
1101}
1102
1103static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
1104 unsigned long val, void *ptr)
1105{
1106 struct generic_pm_domain_data *gpd_data;
1107 struct device *dev;
1108
1109 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
1110 dev = gpd_data->base.dev;
1111
1112 for (;;) {
1113 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
1114 struct pm_domain_data *pdd;
1115 struct gpd_timing_data *td;
1116
1117 spin_lock_irq(&dev->power.lock);
1118
1119 pdd = dev->power.subsys_data ?
1120 dev->power.subsys_data->domain_data : NULL;
1121 if (pdd) {
1122 td = to_gpd_data(pdd)->td;
1123 if (td) {
1124 td->constraint_changed = true;
1125 genpd = dev_to_genpd(dev);
1126 }
1127 }
1128
1129 spin_unlock_irq(&dev->power.lock);
1130
1131 if (!IS_ERR(genpd)) {
1132 genpd_lock(genpd);
1133 genpd->gd->max_off_time_changed = true;
1134 genpd_unlock(genpd);
1135 }
1136
1137 dev = dev->parent;
1138 if (!dev || dev->power.ignore_children)
1139 break;
1140 }
1141
1142 return NOTIFY_DONE;
1143}
1144
1145/**
1146 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1147 * @work: Work structure used for scheduling the execution of this function.
1148 */
1149static void genpd_power_off_work_fn(struct work_struct *work)
1150{
1151 struct generic_pm_domain *genpd;
1152
1153 genpd = container_of(work, struct generic_pm_domain, power_off_work);
1154
1155 genpd_lock(genpd);
1156 genpd_power_off(genpd, false, 0);
1157 genpd_unlock(genpd);
1158}
1159
1160/**
1161 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1162 * @dev: Device to handle.
1163 */
1164static int __genpd_runtime_suspend(struct device *dev)
1165{
1166 int (*cb)(struct device *__dev);
1167
1168 if (dev->type && dev->type->pm)
1169 cb = dev->type->pm->runtime_suspend;
1170 else if (dev->class && dev->class->pm)
1171 cb = dev->class->pm->runtime_suspend;
1172 else if (dev->bus && dev->bus->pm)
1173 cb = dev->bus->pm->runtime_suspend;
1174 else
1175 cb = NULL;
1176
1177 if (!cb && dev->driver && dev->driver->pm)
1178 cb = dev->driver->pm->runtime_suspend;
1179
1180 return cb ? cb(dev) : 0;
1181}
1182
1183/**
1184 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1185 * @dev: Device to handle.
1186 */
1187static int __genpd_runtime_resume(struct device *dev)
1188{
1189 int (*cb)(struct device *__dev);
1190
1191 if (dev->type && dev->type->pm)
1192 cb = dev->type->pm->runtime_resume;
1193 else if (dev->class && dev->class->pm)
1194 cb = dev->class->pm->runtime_resume;
1195 else if (dev->bus && dev->bus->pm)
1196 cb = dev->bus->pm->runtime_resume;
1197 else
1198 cb = NULL;
1199
1200 if (!cb && dev->driver && dev->driver->pm)
1201 cb = dev->driver->pm->runtime_resume;
1202
1203 return cb ? cb(dev) : 0;
1204}
1205
1206/**
1207 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1208 * @dev: Device to suspend.
1209 *
1210 * Carry out a runtime suspend of a device under the assumption that its
1211 * pm_domain field points to the domain member of an object of type
1212 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1213 */
1214static int genpd_runtime_suspend(struct device *dev)
1215{
1216 struct generic_pm_domain *genpd;
1217 bool (*suspend_ok)(struct device *__dev);
1218 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1219 struct gpd_timing_data *td = gpd_data->td;
1220 bool runtime_pm = pm_runtime_enabled(dev);
1221 ktime_t time_start = 0;
1222 s64 elapsed_ns;
1223 int ret;
1224
1225 dev_dbg(dev, "%s()\n", __func__);
1226
1227 genpd = dev_to_genpd(dev);
1228 if (IS_ERR(genpd))
1229 return -EINVAL;
1230
1231 /*
1232 * A runtime PM centric subsystem/driver may re-use the runtime PM
1233 * callbacks for other purposes than runtime PM. In those scenarios
1234 * runtime PM is disabled. Under these circumstances, we shall skip
1235 * validating/measuring the PM QoS latency.
1236 */
1237 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1238 if (runtime_pm && suspend_ok && !suspend_ok(dev))
1239 return -EBUSY;
1240
1241 /* Measure suspend latency. */
1242 if (td && runtime_pm)
1243 time_start = ktime_get();
1244
1245 ret = __genpd_runtime_suspend(dev);
1246 if (ret)
1247 return ret;
1248
1249 ret = genpd_stop_dev(genpd, dev);
1250 if (ret) {
1251 __genpd_runtime_resume(dev);
1252 return ret;
1253 }
1254
1255 /* Update suspend latency value if the measured time exceeds it. */
1256 if (td && runtime_pm) {
1257 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1258 if (elapsed_ns > td->suspend_latency_ns) {
1259 td->suspend_latency_ns = elapsed_ns;
1260 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1261 elapsed_ns);
1262 genpd->gd->max_off_time_changed = true;
1263 td->constraint_changed = true;
1264 }
1265 }
1266
1267 /*
1268 * If power.irq_safe is set, this routine may be run with
1269 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1270 */
1271 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1272 return 0;
1273
1274 genpd_lock(genpd);
1275 genpd_power_off(genpd, true, 0);
1276 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1277 genpd_unlock(genpd);
1278
1279 return 0;
1280}
1281
1282/**
1283 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1284 * @dev: Device to resume.
1285 *
1286 * Carry out a runtime resume of a device under the assumption that its
1287 * pm_domain field points to the domain member of an object of type
1288 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1289 */
1290static int genpd_runtime_resume(struct device *dev)
1291{
1292 struct generic_pm_domain *genpd;
1293 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1294 struct gpd_timing_data *td = gpd_data->td;
1295 bool timed = td && pm_runtime_enabled(dev);
1296 ktime_t time_start = 0;
1297 s64 elapsed_ns;
1298 int ret;
1299
1300 dev_dbg(dev, "%s()\n", __func__);
1301
1302 genpd = dev_to_genpd(dev);
1303 if (IS_ERR(genpd))
1304 return -EINVAL;
1305
1306 /*
1307 * As we don't power off a non IRQ safe domain, which holds
1308 * an IRQ safe device, we don't need to restore power to it.
1309 */
1310 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1311 goto out;
1312
1313 genpd_lock(genpd);
1314 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1315 ret = genpd_power_on(genpd, 0);
1316 genpd_unlock(genpd);
1317
1318 if (ret)
1319 return ret;
1320
1321 out:
1322 /* Measure resume latency. */
1323 if (timed)
1324 time_start = ktime_get();
1325
1326 ret = genpd_start_dev(genpd, dev);
1327 if (ret)
1328 goto err_poweroff;
1329
1330 ret = __genpd_runtime_resume(dev);
1331 if (ret)
1332 goto err_stop;
1333
1334 /* Update resume latency value if the measured time exceeds it. */
1335 if (timed) {
1336 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1337 if (elapsed_ns > td->resume_latency_ns) {
1338 td->resume_latency_ns = elapsed_ns;
1339 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1340 elapsed_ns);
1341 genpd->gd->max_off_time_changed = true;
1342 td->constraint_changed = true;
1343 }
1344 }
1345
1346 return 0;
1347
1348err_stop:
1349 genpd_stop_dev(genpd, dev);
1350err_poweroff:
1351 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1352 genpd_lock(genpd);
1353 genpd_power_off(genpd, true, 0);
1354 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1355 genpd_unlock(genpd);
1356 }
1357
1358 return ret;
1359}
1360
1361static bool pd_ignore_unused;
1362static int __init pd_ignore_unused_setup(char *__unused)
1363{
1364 pd_ignore_unused = true;
1365 return 1;
1366}
1367__setup("pd_ignore_unused", pd_ignore_unused_setup);
1368
1369/**
1370 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1371 */
1372static int __init genpd_power_off_unused(void)
1373{
1374 struct generic_pm_domain *genpd;
1375
1376 if (pd_ignore_unused) {
1377 pr_warn("genpd: Not disabling unused power domains\n");
1378 return 0;
1379 }
1380
1381 pr_info("genpd: Disabling unused power domains\n");
1382 mutex_lock(&gpd_list_lock);
1383
1384 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1385 genpd_queue_power_off_work(genpd);
1386 }
1387
1388 mutex_unlock(&gpd_list_lock);
1389
1390 return 0;
1391}
1392late_initcall_sync(genpd_power_off_unused);
1393
1394#ifdef CONFIG_PM_SLEEP
1395
1396/**
1397 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1398 * @genpd: PM domain to power off, if possible.
1399 * @use_lock: use the lock.
1400 * @depth: nesting count for lockdep.
1401 *
1402 * Check if the given PM domain can be powered off (during system suspend or
1403 * hibernation) and do that if so. Also, in that case propagate to its parents.
1404 *
1405 * This function is only called in "noirq" and "syscore" stages of system power
1406 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1407 * these cases the lock must be held.
1408 */
1409static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1410 unsigned int depth)
1411{
1412 struct gpd_link *link;
1413
1414 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1415 return;
1416
1417 if (genpd->suspended_count != genpd->device_count
1418 || atomic_read(&genpd->sd_count) > 0)
1419 return;
1420
1421 /* Check that the children are in their deepest (powered-off) state. */
1422 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1423 struct generic_pm_domain *child = link->child;
1424 if (child->state_idx < child->state_count - 1)
1425 return;
1426 }
1427
1428 if (genpd->gov && genpd->gov->system_power_down_ok) {
1429 if (!genpd->gov->system_power_down_ok(&genpd->domain))
1430 return;
1431 } else {
1432 /* Default to the deepest state. */
1433 genpd->state_idx = genpd->state_count - 1;
1434 }
1435
1436 if (_genpd_power_off(genpd, false)) {
1437 genpd->states[genpd->state_idx].rejected++;
1438 return;
1439 } else {
1440 genpd->states[genpd->state_idx].usage++;
1441 }
1442
1443 genpd->status = GENPD_STATE_OFF;
1444
1445 list_for_each_entry(link, &genpd->child_links, child_node) {
1446 genpd_sd_counter_dec(link->parent);
1447
1448 if (use_lock)
1449 genpd_lock_nested(link->parent, depth + 1);
1450
1451 genpd_sync_power_off(link->parent, use_lock, depth + 1);
1452
1453 if (use_lock)
1454 genpd_unlock(link->parent);
1455 }
1456}
1457
1458/**
1459 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1460 * @genpd: PM domain to power on.
1461 * @use_lock: use the lock.
1462 * @depth: nesting count for lockdep.
1463 *
1464 * This function is only called in "noirq" and "syscore" stages of system power
1465 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1466 * these cases the lock must be held.
1467 */
1468static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1469 unsigned int depth)
1470{
1471 struct gpd_link *link;
1472
1473 if (genpd_status_on(genpd))
1474 return;
1475
1476 list_for_each_entry(link, &genpd->child_links, child_node) {
1477 genpd_sd_counter_inc(link->parent);
1478
1479 if (use_lock)
1480 genpd_lock_nested(link->parent, depth + 1);
1481
1482 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1483
1484 if (use_lock)
1485 genpd_unlock(link->parent);
1486 }
1487
1488 _genpd_power_on(genpd, false);
1489 genpd->status = GENPD_STATE_ON;
1490}
1491
1492/**
1493 * genpd_prepare - Start power transition of a device in a PM domain.
1494 * @dev: Device to start the transition of.
1495 *
1496 * Start a power transition of a device (during a system-wide power transition)
1497 * under the assumption that its pm_domain field points to the domain member of
1498 * an object of type struct generic_pm_domain representing a PM domain
1499 * consisting of I/O devices.
1500 */
1501static int genpd_prepare(struct device *dev)
1502{
1503 struct generic_pm_domain *genpd;
1504 int ret;
1505
1506 dev_dbg(dev, "%s()\n", __func__);
1507
1508 genpd = dev_to_genpd(dev);
1509 if (IS_ERR(genpd))
1510 return -EINVAL;
1511
1512 genpd_lock(genpd);
1513 genpd->prepared_count++;
1514 genpd_unlock(genpd);
1515
1516 ret = pm_generic_prepare(dev);
1517 if (ret < 0) {
1518 genpd_lock(genpd);
1519
1520 genpd->prepared_count--;
1521
1522 genpd_unlock(genpd);
1523 }
1524
1525 /* Never return 1, as genpd don't cope with the direct_complete path. */
1526 return ret >= 0 ? 0 : ret;
1527}
1528
1529/**
1530 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1531 * I/O pm domain.
1532 * @dev: Device to suspend.
1533 * @suspend_noirq: Generic suspend_noirq callback.
1534 * @resume_noirq: Generic resume_noirq callback.
1535 *
1536 * Stop the device and remove power from the domain if all devices in it have
1537 * been stopped.
1538 */
1539static int genpd_finish_suspend(struct device *dev,
1540 int (*suspend_noirq)(struct device *dev),
1541 int (*resume_noirq)(struct device *dev))
1542{
1543 struct generic_pm_domain *genpd;
1544 int ret = 0;
1545
1546 genpd = dev_to_genpd(dev);
1547 if (IS_ERR(genpd))
1548 return -EINVAL;
1549
1550 ret = suspend_noirq(dev);
1551 if (ret)
1552 return ret;
1553
1554 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd) &&
1555 !device_out_band_wakeup(dev))
1556 return 0;
1557
1558 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1559 !pm_runtime_status_suspended(dev)) {
1560 ret = genpd_stop_dev(genpd, dev);
1561 if (ret) {
1562 resume_noirq(dev);
1563 return ret;
1564 }
1565 }
1566
1567 genpd_lock(genpd);
1568 genpd->suspended_count++;
1569 genpd_sync_power_off(genpd, true, 0);
1570 genpd_unlock(genpd);
1571
1572 return 0;
1573}
1574
1575/**
1576 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1577 * @dev: Device to suspend.
1578 *
1579 * Stop the device and remove power from the domain if all devices in it have
1580 * been stopped.
1581 */
1582static int genpd_suspend_noirq(struct device *dev)
1583{
1584 dev_dbg(dev, "%s()\n", __func__);
1585
1586 return genpd_finish_suspend(dev,
1587 pm_generic_suspend_noirq,
1588 pm_generic_resume_noirq);
1589}
1590
1591/**
1592 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1593 * @dev: Device to resume.
1594 * @resume_noirq: Generic resume_noirq callback.
1595 *
1596 * Restore power to the device's PM domain, if necessary, and start the device.
1597 */
1598static int genpd_finish_resume(struct device *dev,
1599 int (*resume_noirq)(struct device *dev))
1600{
1601 struct generic_pm_domain *genpd;
1602 int ret;
1603
1604 dev_dbg(dev, "%s()\n", __func__);
1605
1606 genpd = dev_to_genpd(dev);
1607 if (IS_ERR(genpd))
1608 return -EINVAL;
1609
1610 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd) &&
1611 !device_out_band_wakeup(dev))
1612 return resume_noirq(dev);
1613
1614 genpd_lock(genpd);
1615 genpd_sync_power_on(genpd, true, 0);
1616 genpd->suspended_count--;
1617 genpd_unlock(genpd);
1618
1619 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1620 !pm_runtime_status_suspended(dev)) {
1621 ret = genpd_start_dev(genpd, dev);
1622 if (ret)
1623 return ret;
1624 }
1625
1626 return pm_generic_resume_noirq(dev);
1627}
1628
1629/**
1630 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1631 * @dev: Device to resume.
1632 *
1633 * Restore power to the device's PM domain, if necessary, and start the device.
1634 */
1635static int genpd_resume_noirq(struct device *dev)
1636{
1637 dev_dbg(dev, "%s()\n", __func__);
1638
1639 return genpd_finish_resume(dev, pm_generic_resume_noirq);
1640}
1641
1642/**
1643 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1644 * @dev: Device to freeze.
1645 *
1646 * Carry out a late freeze of a device under the assumption that its
1647 * pm_domain field points to the domain member of an object of type
1648 * struct generic_pm_domain representing a power domain consisting of I/O
1649 * devices.
1650 */
1651static int genpd_freeze_noirq(struct device *dev)
1652{
1653 dev_dbg(dev, "%s()\n", __func__);
1654
1655 return genpd_finish_suspend(dev,
1656 pm_generic_freeze_noirq,
1657 pm_generic_thaw_noirq);
1658}
1659
1660/**
1661 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1662 * @dev: Device to thaw.
1663 *
1664 * Start the device, unless power has been removed from the domain already
1665 * before the system transition.
1666 */
1667static int genpd_thaw_noirq(struct device *dev)
1668{
1669 dev_dbg(dev, "%s()\n", __func__);
1670
1671 return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1672}
1673
1674/**
1675 * genpd_poweroff_noirq - Completion of hibernation of device in an
1676 * I/O PM domain.
1677 * @dev: Device to poweroff.
1678 *
1679 * Stop the device and remove power from the domain if all devices in it have
1680 * been stopped.
1681 */
1682static int genpd_poweroff_noirq(struct device *dev)
1683{
1684 dev_dbg(dev, "%s()\n", __func__);
1685
1686 return genpd_finish_suspend(dev,
1687 pm_generic_poweroff_noirq,
1688 pm_generic_restore_noirq);
1689}
1690
1691/**
1692 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1693 * @dev: Device to resume.
1694 *
1695 * Make sure the domain will be in the same power state as before the
1696 * hibernation the system is resuming from and start the device if necessary.
1697 */
1698static int genpd_restore_noirq(struct device *dev)
1699{
1700 dev_dbg(dev, "%s()\n", __func__);
1701
1702 return genpd_finish_resume(dev, pm_generic_restore_noirq);
1703}
1704
1705/**
1706 * genpd_complete - Complete power transition of a device in a power domain.
1707 * @dev: Device to complete the transition of.
1708 *
1709 * Complete a power transition of a device (during a system-wide power
1710 * transition) under the assumption that its pm_domain field points to the
1711 * domain member of an object of type struct generic_pm_domain representing
1712 * a power domain consisting of I/O devices.
1713 */
1714static void genpd_complete(struct device *dev)
1715{
1716 struct generic_pm_domain *genpd;
1717
1718 dev_dbg(dev, "%s()\n", __func__);
1719
1720 genpd = dev_to_genpd(dev);
1721 if (IS_ERR(genpd))
1722 return;
1723
1724 pm_generic_complete(dev);
1725
1726 genpd_lock(genpd);
1727
1728 genpd->prepared_count--;
1729 if (!genpd->prepared_count)
1730 genpd_queue_power_off_work(genpd);
1731
1732 genpd_unlock(genpd);
1733}
1734
1735static void genpd_switch_state(struct device *dev, bool suspend)
1736{
1737 struct generic_pm_domain *genpd;
1738 bool use_lock;
1739
1740 genpd = dev_to_genpd_safe(dev);
1741 if (!genpd)
1742 return;
1743
1744 use_lock = genpd_is_irq_safe(genpd);
1745
1746 if (use_lock)
1747 genpd_lock(genpd);
1748
1749 if (suspend) {
1750 genpd->suspended_count++;
1751 genpd_sync_power_off(genpd, use_lock, 0);
1752 } else {
1753 genpd_sync_power_on(genpd, use_lock, 0);
1754 genpd->suspended_count--;
1755 }
1756
1757 if (use_lock)
1758 genpd_unlock(genpd);
1759}
1760
1761/**
1762 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1763 * @dev: The device that is attached to the genpd, that can be suspended.
1764 *
1765 * This routine should typically be called for a device that needs to be
1766 * suspended during the syscore suspend phase. It may also be called during
1767 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1768 * genpd.
1769 */
1770void dev_pm_genpd_suspend(struct device *dev)
1771{
1772 genpd_switch_state(dev, true);
1773}
1774EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1775
1776/**
1777 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1778 * @dev: The device that is attached to the genpd, which needs to be resumed.
1779 *
1780 * This routine should typically be called for a device that needs to be resumed
1781 * during the syscore resume phase. It may also be called during suspend-to-idle
1782 * to resume a corresponding CPU device that is attached to a genpd.
1783 */
1784void dev_pm_genpd_resume(struct device *dev)
1785{
1786 genpd_switch_state(dev, false);
1787}
1788EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1789
1790#else /* !CONFIG_PM_SLEEP */
1791
1792#define genpd_prepare NULL
1793#define genpd_suspend_noirq NULL
1794#define genpd_resume_noirq NULL
1795#define genpd_freeze_noirq NULL
1796#define genpd_thaw_noirq NULL
1797#define genpd_poweroff_noirq NULL
1798#define genpd_restore_noirq NULL
1799#define genpd_complete NULL
1800
1801#endif /* CONFIG_PM_SLEEP */
1802
1803static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1804 bool has_governor)
1805{
1806 struct generic_pm_domain_data *gpd_data;
1807 struct gpd_timing_data *td;
1808 int ret;
1809
1810 ret = dev_pm_get_subsys_data(dev);
1811 if (ret)
1812 return ERR_PTR(ret);
1813
1814 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1815 if (!gpd_data) {
1816 ret = -ENOMEM;
1817 goto err_put;
1818 }
1819
1820 gpd_data->base.dev = dev;
1821 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1822
1823 /* Allocate data used by a governor. */
1824 if (has_governor) {
1825 td = kzalloc(sizeof(*td), GFP_KERNEL);
1826 if (!td) {
1827 ret = -ENOMEM;
1828 goto err_free;
1829 }
1830
1831 td->constraint_changed = true;
1832 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1833 td->next_wakeup = KTIME_MAX;
1834 gpd_data->td = td;
1835 }
1836
1837 spin_lock_irq(&dev->power.lock);
1838
1839 if (dev->power.subsys_data->domain_data)
1840 ret = -EINVAL;
1841 else
1842 dev->power.subsys_data->domain_data = &gpd_data->base;
1843
1844 spin_unlock_irq(&dev->power.lock);
1845
1846 if (ret)
1847 goto err_free;
1848
1849 return gpd_data;
1850
1851 err_free:
1852 kfree(gpd_data->td);
1853 kfree(gpd_data);
1854 err_put:
1855 dev_pm_put_subsys_data(dev);
1856 return ERR_PTR(ret);
1857}
1858
1859static void genpd_free_dev_data(struct device *dev,
1860 struct generic_pm_domain_data *gpd_data)
1861{
1862 spin_lock_irq(&dev->power.lock);
1863
1864 dev->power.subsys_data->domain_data = NULL;
1865
1866 spin_unlock_irq(&dev->power.lock);
1867
1868 dev_pm_opp_clear_config(gpd_data->opp_token);
1869 kfree(gpd_data->td);
1870 kfree(gpd_data);
1871 dev_pm_put_subsys_data(dev);
1872}
1873
1874static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1875 int cpu, bool set, unsigned int depth)
1876{
1877 struct gpd_link *link;
1878
1879 if (!genpd_is_cpu_domain(genpd))
1880 return;
1881
1882 list_for_each_entry(link, &genpd->child_links, child_node) {
1883 struct generic_pm_domain *parent = link->parent;
1884
1885 genpd_lock_nested(parent, depth + 1);
1886 genpd_update_cpumask(parent, cpu, set, depth + 1);
1887 genpd_unlock(parent);
1888 }
1889
1890 if (set)
1891 cpumask_set_cpu(cpu, genpd->cpus);
1892 else
1893 cpumask_clear_cpu(cpu, genpd->cpus);
1894}
1895
1896static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1897{
1898 if (cpu >= 0)
1899 genpd_update_cpumask(genpd, cpu, true, 0);
1900}
1901
1902static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1903{
1904 if (cpu >= 0)
1905 genpd_update_cpumask(genpd, cpu, false, 0);
1906}
1907
1908static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1909{
1910 int cpu;
1911
1912 if (!genpd_is_cpu_domain(genpd))
1913 return -1;
1914
1915 for_each_possible_cpu(cpu) {
1916 if (get_cpu_device(cpu) == dev)
1917 return cpu;
1918 }
1919
1920 return -1;
1921}
1922
1923static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1924 struct device *base_dev)
1925{
1926 struct genpd_governor_data *gd = genpd->gd;
1927 struct generic_pm_domain_data *gpd_data;
1928 int ret;
1929
1930 dev_dbg(dev, "%s()\n", __func__);
1931
1932 gpd_data = genpd_alloc_dev_data(dev, gd);
1933 if (IS_ERR(gpd_data))
1934 return PTR_ERR(gpd_data);
1935
1936 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1937
1938 gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1939
1940 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1941 if (ret)
1942 goto out;
1943
1944 genpd_lock(genpd);
1945
1946 genpd_set_cpumask(genpd, gpd_data->cpu);
1947
1948 genpd->device_count++;
1949 if (gd)
1950 gd->max_off_time_changed = true;
1951
1952 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1953
1954 genpd_unlock(genpd);
1955 dev_pm_domain_set(dev, &genpd->domain);
1956 out:
1957 if (ret)
1958 genpd_free_dev_data(dev, gpd_data);
1959 else
1960 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1961 DEV_PM_QOS_RESUME_LATENCY);
1962
1963 return ret;
1964}
1965
1966/**
1967 * pm_genpd_add_device - Add a device to an I/O PM domain.
1968 * @genpd: PM domain to add the device to.
1969 * @dev: Device to be added.
1970 */
1971int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1972{
1973 int ret;
1974
1975 if (!genpd || !dev)
1976 return -EINVAL;
1977
1978 mutex_lock(&gpd_list_lock);
1979 ret = genpd_add_device(genpd, dev, dev);
1980 mutex_unlock(&gpd_list_lock);
1981
1982 return ret;
1983}
1984EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1985
1986static int genpd_remove_device(struct generic_pm_domain *genpd,
1987 struct device *dev)
1988{
1989 struct generic_pm_domain_data *gpd_data;
1990 struct pm_domain_data *pdd;
1991 int ret = 0;
1992
1993 dev_dbg(dev, "%s()\n", __func__);
1994
1995 pdd = dev->power.subsys_data->domain_data;
1996 gpd_data = to_gpd_data(pdd);
1997 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1998 DEV_PM_QOS_RESUME_LATENCY);
1999
2000 genpd_lock(genpd);
2001
2002 if (genpd->prepared_count > 0) {
2003 ret = -EAGAIN;
2004 goto out;
2005 }
2006
2007 genpd->device_count--;
2008 if (genpd->gd)
2009 genpd->gd->max_off_time_changed = true;
2010
2011 genpd_clear_cpumask(genpd, gpd_data->cpu);
2012
2013 list_del_init(&pdd->list_node);
2014
2015 genpd_unlock(genpd);
2016
2017 dev_pm_domain_set(dev, NULL);
2018
2019 if (genpd->detach_dev)
2020 genpd->detach_dev(genpd, dev);
2021
2022 genpd_free_dev_data(dev, gpd_data);
2023
2024 return 0;
2025
2026 out:
2027 genpd_unlock(genpd);
2028 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
2029
2030 return ret;
2031}
2032
2033/**
2034 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
2035 * @dev: Device to be removed.
2036 */
2037int pm_genpd_remove_device(struct device *dev)
2038{
2039 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
2040
2041 if (!genpd)
2042 return -EINVAL;
2043
2044 return genpd_remove_device(genpd, dev);
2045}
2046EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
2047
2048/**
2049 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
2050 *
2051 * @dev: Device that should be associated with the notifier
2052 * @nb: The notifier block to register
2053 *
2054 * Users may call this function to add a genpd power on/off notifier for an
2055 * attached @dev. Only one notifier per device is allowed. The notifier is
2056 * sent when genpd is powering on/off the PM domain.
2057 *
2058 * It is assumed that the user guarantee that the genpd wouldn't be detached
2059 * while this routine is getting called.
2060 *
2061 * Returns 0 on success and negative error values on failures.
2062 */
2063int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
2064{
2065 struct generic_pm_domain *genpd;
2066 struct generic_pm_domain_data *gpd_data;
2067 int ret;
2068
2069 genpd = dev_to_genpd_safe(dev);
2070 if (!genpd)
2071 return -ENODEV;
2072
2073 if (WARN_ON(!dev->power.subsys_data ||
2074 !dev->power.subsys_data->domain_data))
2075 return -EINVAL;
2076
2077 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2078 if (gpd_data->power_nb)
2079 return -EEXIST;
2080
2081 genpd_lock(genpd);
2082 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
2083 genpd_unlock(genpd);
2084
2085 if (ret) {
2086 dev_warn(dev, "failed to add notifier for PM domain %s\n",
2087 dev_name(&genpd->dev));
2088 return ret;
2089 }
2090
2091 gpd_data->power_nb = nb;
2092 return 0;
2093}
2094EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
2095
2096/**
2097 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
2098 *
2099 * @dev: Device that is associated with the notifier
2100 *
2101 * Users may call this function to remove a genpd power on/off notifier for an
2102 * attached @dev.
2103 *
2104 * It is assumed that the user guarantee that the genpd wouldn't be detached
2105 * while this routine is getting called.
2106 *
2107 * Returns 0 on success and negative error values on failures.
2108 */
2109int dev_pm_genpd_remove_notifier(struct device *dev)
2110{
2111 struct generic_pm_domain *genpd;
2112 struct generic_pm_domain_data *gpd_data;
2113 int ret;
2114
2115 genpd = dev_to_genpd_safe(dev);
2116 if (!genpd)
2117 return -ENODEV;
2118
2119 if (WARN_ON(!dev->power.subsys_data ||
2120 !dev->power.subsys_data->domain_data))
2121 return -EINVAL;
2122
2123 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2124 if (!gpd_data->power_nb)
2125 return -ENODEV;
2126
2127 genpd_lock(genpd);
2128 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
2129 gpd_data->power_nb);
2130 genpd_unlock(genpd);
2131
2132 if (ret) {
2133 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
2134 dev_name(&genpd->dev));
2135 return ret;
2136 }
2137
2138 gpd_data->power_nb = NULL;
2139 return 0;
2140}
2141EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2142
2143static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2144 struct generic_pm_domain *subdomain)
2145{
2146 struct gpd_link *link, *itr;
2147 int ret = 0;
2148
2149 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2150 || genpd == subdomain)
2151 return -EINVAL;
2152
2153 /*
2154 * If the domain can be powered on/off in an IRQ safe
2155 * context, ensure that the subdomain can also be
2156 * powered on/off in that context.
2157 */
2158 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2159 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2160 dev_name(&genpd->dev), subdomain->name);
2161 return -EINVAL;
2162 }
2163
2164 link = kzalloc(sizeof(*link), GFP_KERNEL);
2165 if (!link)
2166 return -ENOMEM;
2167
2168 genpd_lock(subdomain);
2169 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2170
2171 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2172 ret = -EINVAL;
2173 goto out;
2174 }
2175
2176 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2177 if (itr->child == subdomain && itr->parent == genpd) {
2178 ret = -EINVAL;
2179 goto out;
2180 }
2181 }
2182
2183 link->parent = genpd;
2184 list_add_tail(&link->parent_node, &genpd->parent_links);
2185 link->child = subdomain;
2186 list_add_tail(&link->child_node, &subdomain->child_links);
2187 if (genpd_status_on(subdomain))
2188 genpd_sd_counter_inc(genpd);
2189
2190 out:
2191 genpd_unlock(genpd);
2192 genpd_unlock(subdomain);
2193 if (ret)
2194 kfree(link);
2195 return ret;
2196}
2197
2198/**
2199 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2200 * @genpd: Leader PM domain to add the subdomain to.
2201 * @subdomain: Subdomain to be added.
2202 */
2203int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2204 struct generic_pm_domain *subdomain)
2205{
2206 int ret;
2207
2208 mutex_lock(&gpd_list_lock);
2209 ret = genpd_add_subdomain(genpd, subdomain);
2210 mutex_unlock(&gpd_list_lock);
2211
2212 return ret;
2213}
2214EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2215
2216/**
2217 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2218 * @genpd: Leader PM domain to remove the subdomain from.
2219 * @subdomain: Subdomain to be removed.
2220 */
2221int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2222 struct generic_pm_domain *subdomain)
2223{
2224 struct gpd_link *l, *link;
2225 int ret = -EINVAL;
2226
2227 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2228 return -EINVAL;
2229
2230 genpd_lock(subdomain);
2231 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2232
2233 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2234 pr_warn("%s: unable to remove subdomain %s\n",
2235 dev_name(&genpd->dev), subdomain->name);
2236 ret = -EBUSY;
2237 goto out;
2238 }
2239
2240 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2241 if (link->child != subdomain)
2242 continue;
2243
2244 list_del(&link->parent_node);
2245 list_del(&link->child_node);
2246 kfree(link);
2247 if (genpd_status_on(subdomain))
2248 genpd_sd_counter_dec(genpd);
2249
2250 ret = 0;
2251 break;
2252 }
2253
2254out:
2255 genpd_unlock(genpd);
2256 genpd_unlock(subdomain);
2257
2258 return ret;
2259}
2260EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2261
2262static void genpd_free_default_power_state(struct genpd_power_state *states,
2263 unsigned int state_count)
2264{
2265 kfree(states);
2266}
2267
2268static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2269{
2270 struct genpd_power_state *state;
2271
2272 state = kzalloc(sizeof(*state), GFP_KERNEL);
2273 if (!state)
2274 return -ENOMEM;
2275
2276 genpd->states = state;
2277 genpd->state_count = 1;
2278 genpd->free_states = genpd_free_default_power_state;
2279
2280 return 0;
2281}
2282
2283static void genpd_provider_release(struct device *dev)
2284{
2285 /* nothing to be done here */
2286}
2287
2288static int genpd_alloc_data(struct generic_pm_domain *genpd)
2289{
2290 struct genpd_governor_data *gd = NULL;
2291 int ret;
2292
2293 if (genpd_is_cpu_domain(genpd) &&
2294 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2295 return -ENOMEM;
2296
2297 if (genpd->gov) {
2298 gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2299 if (!gd) {
2300 ret = -ENOMEM;
2301 goto free;
2302 }
2303
2304 gd->max_off_time_ns = -1;
2305 gd->max_off_time_changed = true;
2306 gd->next_wakeup = KTIME_MAX;
2307 gd->next_hrtimer = KTIME_MAX;
2308 }
2309
2310 /* Use only one "off" state if there were no states declared */
2311 if (genpd->state_count == 0) {
2312 ret = genpd_set_default_power_state(genpd);
2313 if (ret)
2314 goto free;
2315 }
2316
2317 genpd->gd = gd;
2318 device_initialize(&genpd->dev);
2319 genpd->dev.release = genpd_provider_release;
2320 genpd->dev.bus = &genpd_provider_bus_type;
2321 genpd->dev.parent = &genpd_provider_bus;
2322
2323 if (!genpd_is_dev_name_fw(genpd)) {
2324 dev_set_name(&genpd->dev, "%s", genpd->name);
2325 } else {
2326 ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2327 if (ret < 0)
2328 goto put;
2329
2330 genpd->device_id = ret;
2331 dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2332 }
2333
2334 return 0;
2335put:
2336 put_device(&genpd->dev);
2337 if (genpd->free_states == genpd_free_default_power_state) {
2338 kfree(genpd->states);
2339 genpd->states = NULL;
2340 }
2341free:
2342 if (genpd_is_cpu_domain(genpd))
2343 free_cpumask_var(genpd->cpus);
2344 kfree(gd);
2345 return ret;
2346}
2347
2348static void genpd_free_data(struct generic_pm_domain *genpd)
2349{
2350 put_device(&genpd->dev);
2351 if (genpd->device_id != -ENXIO)
2352 ida_free(&genpd_ida, genpd->device_id);
2353 if (genpd_is_cpu_domain(genpd))
2354 free_cpumask_var(genpd->cpus);
2355 if (genpd->free_states)
2356 genpd->free_states(genpd->states, genpd->state_count);
2357 kfree(genpd->gd);
2358}
2359
2360static void genpd_lock_init(struct generic_pm_domain *genpd)
2361{
2362 if (genpd_is_cpu_domain(genpd)) {
2363 raw_spin_lock_init(&genpd->raw_slock);
2364 genpd->lock_ops = &genpd_raw_spin_ops;
2365 } else if (genpd_is_irq_safe(genpd)) {
2366 spin_lock_init(&genpd->slock);
2367 genpd->lock_ops = &genpd_spin_ops;
2368 } else {
2369 mutex_init(&genpd->mlock);
2370 genpd->lock_ops = &genpd_mtx_ops;
2371 }
2372}
2373
2374#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2375static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
2376{
2377 genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off;
2378}
2379#else
2380static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
2381{
2382 genpd->stay_on = false;
2383}
2384#endif
2385
2386/**
2387 * pm_genpd_init - Initialize a generic I/O PM domain object.
2388 * @genpd: PM domain object to initialize.
2389 * @gov: PM domain governor to associate with the domain (may be NULL).
2390 * @is_off: Initial value of the domain's power_is_off field.
2391 *
2392 * Returns 0 on successful initialization, else a negative error code.
2393 */
2394int pm_genpd_init(struct generic_pm_domain *genpd,
2395 struct dev_power_governor *gov, bool is_off)
2396{
2397 int ret;
2398
2399 if (IS_ERR_OR_NULL(genpd))
2400 return -EINVAL;
2401
2402 INIT_LIST_HEAD(&genpd->parent_links);
2403 INIT_LIST_HEAD(&genpd->child_links);
2404 INIT_LIST_HEAD(&genpd->dev_list);
2405 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2406 genpd_lock_init(genpd);
2407 genpd->gov = gov;
2408 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2409 atomic_set(&genpd->sd_count, 0);
2410 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2411 genpd_set_stay_on(genpd, is_off);
2412 genpd->sync_state = GENPD_SYNC_STATE_OFF;
2413 genpd->device_count = 0;
2414 genpd->provider = NULL;
2415 genpd->device_id = -ENXIO;
2416 genpd->has_provider = false;
2417 genpd->opp_table = NULL;
2418 genpd->accounting_time = ktime_get_mono_fast_ns();
2419 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2420 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2421 genpd->domain.ops.prepare = genpd_prepare;
2422 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2423 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2424 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2425 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2426 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2427 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2428 genpd->domain.ops.complete = genpd_complete;
2429 genpd->domain.start = genpd_dev_pm_start;
2430 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2431
2432 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2433 genpd->dev_ops.stop = pm_clk_suspend;
2434 genpd->dev_ops.start = pm_clk_resume;
2435 }
2436
2437 /* The always-on governor works better with the corresponding flag. */
2438 if (gov == &pm_domain_always_on_gov)
2439 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2440
2441 /* Always-on domains must be powered on at initialization. */
2442 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2443 !genpd_status_on(genpd)) {
2444 pr_err("always-on PM domain %s is not on\n", genpd->name);
2445 return -EINVAL;
2446 }
2447
2448 /* Multiple states but no governor doesn't make sense. */
2449 if (!gov && genpd->state_count > 1)
2450 pr_warn("%s: no governor for states\n", genpd->name);
2451
2452 ret = genpd_alloc_data(genpd);
2453 if (ret)
2454 return ret;
2455
2456 mutex_lock(&gpd_list_lock);
2457 list_add(&genpd->gpd_list_node, &gpd_list);
2458 mutex_unlock(&gpd_list_lock);
2459 genpd_debug_add(genpd);
2460
2461 return 0;
2462}
2463EXPORT_SYMBOL_GPL(pm_genpd_init);
2464
2465static int genpd_remove(struct generic_pm_domain *genpd)
2466{
2467 struct gpd_link *l, *link;
2468
2469 if (IS_ERR_OR_NULL(genpd))
2470 return -EINVAL;
2471
2472 genpd_lock(genpd);
2473
2474 if (genpd->has_provider) {
2475 genpd_unlock(genpd);
2476 pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2477 return -EBUSY;
2478 }
2479
2480 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2481 genpd_unlock(genpd);
2482 pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2483 return -EBUSY;
2484 }
2485
2486 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2487 list_del(&link->parent_node);
2488 list_del(&link->child_node);
2489 kfree(link);
2490 }
2491
2492 list_del(&genpd->gpd_list_node);
2493 genpd_unlock(genpd);
2494 genpd_debug_remove(genpd);
2495 cancel_work_sync(&genpd->power_off_work);
2496 genpd_free_data(genpd);
2497
2498 pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2499
2500 return 0;
2501}
2502
2503/**
2504 * pm_genpd_remove - Remove a generic I/O PM domain
2505 * @genpd: Pointer to PM domain that is to be removed.
2506 *
2507 * To remove the PM domain, this function:
2508 * - Removes the PM domain as a subdomain to any parent domains,
2509 * if it was added.
2510 * - Removes the PM domain from the list of registered PM domains.
2511 *
2512 * The PM domain will only be removed, if the associated provider has
2513 * been removed, it is not a parent to any other PM domain and has no
2514 * devices associated with it.
2515 */
2516int pm_genpd_remove(struct generic_pm_domain *genpd)
2517{
2518 int ret;
2519
2520 mutex_lock(&gpd_list_lock);
2521 ret = genpd_remove(genpd);
2522 mutex_unlock(&gpd_list_lock);
2523
2524 return ret;
2525}
2526EXPORT_SYMBOL_GPL(pm_genpd_remove);
2527
2528#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2529
2530/*
2531 * Device Tree based PM domain providers.
2532 *
2533 * The code below implements generic device tree based PM domain providers that
2534 * bind device tree nodes with generic PM domains registered in the system.
2535 *
2536 * Any driver that registers generic PM domains and needs to support binding of
2537 * devices to these domains is supposed to register a PM domain provider, which
2538 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2539 *
2540 * Two simple mapping functions have been provided for convenience:
2541 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2542 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2543 * index.
2544 */
2545
2546/**
2547 * struct of_genpd_provider - PM domain provider registration structure
2548 * @link: Entry in global list of PM domain providers
2549 * @node: Pointer to device tree node of PM domain provider
2550 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2551 * into a PM domain.
2552 * @data: context pointer to be passed into @xlate callback
2553 */
2554struct of_genpd_provider {
2555 struct list_head link;
2556 struct device_node *node;
2557 genpd_xlate_t xlate;
2558 void *data;
2559};
2560
2561/* List of registered PM domain providers. */
2562static LIST_HEAD(of_genpd_providers);
2563/* Mutex to protect the list above. */
2564static DEFINE_MUTEX(of_genpd_mutex);
2565/* Used to prevent registering devices before the bus. */
2566static bool genpd_bus_registered;
2567
2568/**
2569 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2570 * @genpdspec: OF phandle args to map into a PM domain
2571 * @data: xlate function private data - pointer to struct generic_pm_domain
2572 *
2573 * This is a generic xlate function that can be used to model PM domains that
2574 * have their own device tree nodes. The private data of xlate function needs
2575 * to be a valid pointer to struct generic_pm_domain.
2576 */
2577static struct generic_pm_domain *genpd_xlate_simple(
2578 const struct of_phandle_args *genpdspec,
2579 void *data)
2580{
2581 return data;
2582}
2583
2584/**
2585 * genpd_xlate_onecell() - Xlate function using a single index.
2586 * @genpdspec: OF phandle args to map into a PM domain
2587 * @data: xlate function private data - pointer to struct genpd_onecell_data
2588 *
2589 * This is a generic xlate function that can be used to model simple PM domain
2590 * controllers that have one device tree node and provide multiple PM domains.
2591 * A single cell is used as an index into an array of PM domains specified in
2592 * the genpd_onecell_data struct when registering the provider.
2593 */
2594static struct generic_pm_domain *genpd_xlate_onecell(
2595 const struct of_phandle_args *genpdspec,
2596 void *data)
2597{
2598 struct genpd_onecell_data *genpd_data = data;
2599 unsigned int idx = genpdspec->args[0];
2600
2601 if (genpdspec->args_count != 1)
2602 return ERR_PTR(-EINVAL);
2603
2604 if (idx >= genpd_data->num_domains) {
2605 pr_err("%s: invalid domain index %u\n", __func__, idx);
2606 return ERR_PTR(-EINVAL);
2607 }
2608
2609 if (!genpd_data->domains[idx])
2610 return ERR_PTR(-ENOENT);
2611
2612 return genpd_data->domains[idx];
2613}
2614
2615/**
2616 * genpd_add_provider() - Register a PM domain provider for a node
2617 * @np: Device node pointer associated with the PM domain provider.
2618 * @xlate: Callback for decoding PM domain from phandle arguments.
2619 * @data: Context pointer for @xlate callback.
2620 */
2621static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2622 void *data)
2623{
2624 struct of_genpd_provider *cp;
2625
2626 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2627 if (!cp)
2628 return -ENOMEM;
2629
2630 cp->node = of_node_get(np);
2631 cp->data = data;
2632 cp->xlate = xlate;
2633 fwnode_dev_initialized(of_fwnode_handle(np), true);
2634
2635 mutex_lock(&of_genpd_mutex);
2636 list_add(&cp->link, &of_genpd_providers);
2637 mutex_unlock(&of_genpd_mutex);
2638 pr_debug("Added domain provider from %pOF\n", np);
2639
2640 return 0;
2641}
2642
2643static bool genpd_present(const struct generic_pm_domain *genpd)
2644{
2645 bool ret = false;
2646 const struct generic_pm_domain *gpd;
2647
2648 mutex_lock(&gpd_list_lock);
2649 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2650 if (gpd == genpd) {
2651 ret = true;
2652 break;
2653 }
2654 }
2655 mutex_unlock(&gpd_list_lock);
2656
2657 return ret;
2658}
2659
2660static void genpd_sync_state(struct device *dev)
2661{
2662 return of_genpd_sync_state(dev->of_node);
2663}
2664
2665/**
2666 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2667 * @np: Device node pointer associated with the PM domain provider.
2668 * @genpd: Pointer to PM domain associated with the PM domain provider.
2669 */
2670int of_genpd_add_provider_simple(struct device_node *np,
2671 struct generic_pm_domain *genpd)
2672{
2673 struct fwnode_handle *fwnode;
2674 struct device *dev;
2675 int ret;
2676
2677 if (!np || !genpd)
2678 return -EINVAL;
2679
2680 if (!genpd_bus_registered)
2681 return -ENODEV;
2682
2683 if (!genpd_present(genpd))
2684 return -EINVAL;
2685
2686 genpd->dev.of_node = np;
2687
2688 fwnode = of_fwnode_handle(np);
2689 dev = get_dev_from_fwnode(fwnode);
2690 if (!dev && !genpd_is_no_sync_state(genpd)) {
2691 genpd->sync_state = GENPD_SYNC_STATE_SIMPLE;
2692 device_set_node(&genpd->dev, fwnode);
2693 } else {
2694 dev_set_drv_sync_state(dev, genpd_sync_state);
2695 }
2696
2697 put_device(dev);
2698
2699 ret = device_add(&genpd->dev);
2700 if (ret)
2701 return ret;
2702
2703 /* Parse genpd OPP table */
2704 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2705 ret = dev_pm_opp_of_add_table(&genpd->dev);
2706 if (ret) {
2707 dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2708 goto err_del;
2709 }
2710
2711 /*
2712 * Save table for faster processing while setting performance
2713 * state.
2714 */
2715 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2716 WARN_ON(IS_ERR(genpd->opp_table));
2717 }
2718
2719 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2720 if (ret)
2721 goto err_opp;
2722
2723 genpd->provider = fwnode;
2724 genpd->has_provider = true;
2725
2726 return 0;
2727
2728err_opp:
2729 if (genpd->opp_table) {
2730 dev_pm_opp_put_opp_table(genpd->opp_table);
2731 dev_pm_opp_of_remove_table(&genpd->dev);
2732 }
2733err_del:
2734 device_del(&genpd->dev);
2735 return ret;
2736}
2737EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2738
2739/**
2740 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2741 * @np: Device node pointer associated with the PM domain provider.
2742 * @data: Pointer to the data associated with the PM domain provider.
2743 */
2744int of_genpd_add_provider_onecell(struct device_node *np,
2745 struct genpd_onecell_data *data)
2746{
2747 struct generic_pm_domain *genpd;
2748 struct fwnode_handle *fwnode;
2749 struct device *dev;
2750 unsigned int i;
2751 int ret = -EINVAL;
2752 bool sync_state = false;
2753
2754 if (!np || !data)
2755 return -EINVAL;
2756
2757 if (!genpd_bus_registered)
2758 return -ENODEV;
2759
2760 if (!data->xlate)
2761 data->xlate = genpd_xlate_onecell;
2762
2763 fwnode = of_fwnode_handle(np);
2764 dev = get_dev_from_fwnode(fwnode);
2765 if (!dev)
2766 sync_state = true;
2767 else
2768 dev_set_drv_sync_state(dev, genpd_sync_state);
2769
2770 put_device(dev);
2771
2772 for (i = 0; i < data->num_domains; i++) {
2773 genpd = data->domains[i];
2774
2775 if (!genpd)
2776 continue;
2777 if (!genpd_present(genpd))
2778 goto error;
2779
2780 genpd->dev.of_node = np;
2781
2782 if (sync_state && !genpd_is_no_sync_state(genpd)) {
2783 genpd->sync_state = GENPD_SYNC_STATE_ONECELL;
2784 device_set_node(&genpd->dev, fwnode);
2785 sync_state = false;
2786 }
2787
2788 ret = device_add(&genpd->dev);
2789 if (ret)
2790 goto error;
2791
2792 /* Parse genpd OPP table */
2793 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2794 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2795 if (ret) {
2796 dev_err_probe(&genpd->dev, ret,
2797 "Failed to add OPP table for index %d\n", i);
2798 device_del(&genpd->dev);
2799 goto error;
2800 }
2801
2802 /*
2803 * Save table for faster processing while setting
2804 * performance state.
2805 */
2806 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2807 WARN_ON(IS_ERR(genpd->opp_table));
2808 }
2809
2810 genpd->provider = fwnode;
2811 genpd->has_provider = true;
2812 }
2813
2814 ret = genpd_add_provider(np, data->xlate, data);
2815 if (ret < 0)
2816 goto error;
2817
2818 return 0;
2819
2820error:
2821 while (i--) {
2822 genpd = data->domains[i];
2823
2824 if (!genpd)
2825 continue;
2826
2827 genpd->provider = NULL;
2828 genpd->has_provider = false;
2829
2830 if (genpd->opp_table) {
2831 dev_pm_opp_put_opp_table(genpd->opp_table);
2832 dev_pm_opp_of_remove_table(&genpd->dev);
2833 }
2834
2835 device_del(&genpd->dev);
2836 }
2837
2838 return ret;
2839}
2840EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2841
2842/**
2843 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2844 * @np: Device node pointer associated with the PM domain provider
2845 */
2846void of_genpd_del_provider(struct device_node *np)
2847{
2848 struct of_genpd_provider *cp, *tmp;
2849 struct generic_pm_domain *gpd;
2850
2851 mutex_lock(&gpd_list_lock);
2852 mutex_lock(&of_genpd_mutex);
2853 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2854 if (cp->node == np) {
2855 /*
2856 * For each PM domain associated with the
2857 * provider, set the 'has_provider' to false
2858 * so that the PM domain can be safely removed.
2859 */
2860 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2861 if (gpd->provider == of_fwnode_handle(np)) {
2862 gpd->has_provider = false;
2863
2864 if (gpd->opp_table) {
2865 dev_pm_opp_put_opp_table(gpd->opp_table);
2866 dev_pm_opp_of_remove_table(&gpd->dev);
2867 }
2868
2869 device_del(&gpd->dev);
2870 }
2871 }
2872
2873 fwnode_dev_initialized(of_fwnode_handle(cp->node), false);
2874 list_del(&cp->link);
2875 of_node_put(cp->node);
2876 kfree(cp);
2877 break;
2878 }
2879 }
2880 mutex_unlock(&of_genpd_mutex);
2881 mutex_unlock(&gpd_list_lock);
2882}
2883EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2884
2885/**
2886 * genpd_get_from_provider() - Look-up PM domain
2887 * @genpdspec: OF phandle args to use for look-up
2888 *
2889 * Looks for a PM domain provider under the node specified by @genpdspec and if
2890 * found, uses xlate function of the provider to map phandle args to a PM
2891 * domain.
2892 *
2893 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2894 * on failure.
2895 */
2896static struct generic_pm_domain *genpd_get_from_provider(
2897 const struct of_phandle_args *genpdspec)
2898{
2899 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2900 struct of_genpd_provider *provider;
2901
2902 if (!genpdspec)
2903 return ERR_PTR(-EINVAL);
2904
2905 mutex_lock(&of_genpd_mutex);
2906
2907 /* Check if we have such a provider in our array */
2908 list_for_each_entry(provider, &of_genpd_providers, link) {
2909 if (provider->node == genpdspec->np)
2910 genpd = provider->xlate(genpdspec, provider->data);
2911 if (!IS_ERR(genpd))
2912 break;
2913 }
2914
2915 mutex_unlock(&of_genpd_mutex);
2916
2917 return genpd;
2918}
2919
2920/**
2921 * of_genpd_add_device() - Add a device to an I/O PM domain
2922 * @genpdspec: OF phandle args to use for look-up PM domain
2923 * @dev: Device to be added.
2924 *
2925 * Looks-up an I/O PM domain based upon phandle args provided and adds
2926 * the device to the PM domain. Returns a negative error code on failure.
2927 */
2928int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2929{
2930 struct generic_pm_domain *genpd;
2931 int ret;
2932
2933 if (!dev)
2934 return -EINVAL;
2935
2936 mutex_lock(&gpd_list_lock);
2937
2938 genpd = genpd_get_from_provider(genpdspec);
2939 if (IS_ERR(genpd)) {
2940 ret = PTR_ERR(genpd);
2941 goto out;
2942 }
2943
2944 ret = genpd_add_device(genpd, dev, dev);
2945
2946out:
2947 mutex_unlock(&gpd_list_lock);
2948
2949 return ret;
2950}
2951EXPORT_SYMBOL_GPL(of_genpd_add_device);
2952
2953/**
2954 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2955 * @parent_spec: OF phandle args to use for parent PM domain look-up
2956 * @subdomain_spec: OF phandle args to use for subdomain look-up
2957 *
2958 * Looks-up a parent PM domain and subdomain based upon phandle args
2959 * provided and adds the subdomain to the parent PM domain. Returns a
2960 * negative error code on failure.
2961 */
2962int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2963 const struct of_phandle_args *subdomain_spec)
2964{
2965 struct generic_pm_domain *parent, *subdomain;
2966 int ret;
2967
2968 mutex_lock(&gpd_list_lock);
2969
2970 parent = genpd_get_from_provider(parent_spec);
2971 if (IS_ERR(parent)) {
2972 ret = PTR_ERR(parent);
2973 goto out;
2974 }
2975
2976 subdomain = genpd_get_from_provider(subdomain_spec);
2977 if (IS_ERR(subdomain)) {
2978 ret = PTR_ERR(subdomain);
2979 goto out;
2980 }
2981
2982 ret = genpd_add_subdomain(parent, subdomain);
2983
2984out:
2985 mutex_unlock(&gpd_list_lock);
2986
2987 return ret == -ENOENT ? -EPROBE_DEFER : ret;
2988}
2989EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2990
2991/**
2992 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2993 * @parent_spec: OF phandle args to use for parent PM domain look-up
2994 * @subdomain_spec: OF phandle args to use for subdomain look-up
2995 *
2996 * Looks-up a parent PM domain and subdomain based upon phandle args
2997 * provided and removes the subdomain from the parent PM domain. Returns a
2998 * negative error code on failure.
2999 */
3000int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
3001 const struct of_phandle_args *subdomain_spec)
3002{
3003 struct generic_pm_domain *parent, *subdomain;
3004 int ret;
3005
3006 mutex_lock(&gpd_list_lock);
3007
3008 parent = genpd_get_from_provider(parent_spec);
3009 if (IS_ERR(parent)) {
3010 ret = PTR_ERR(parent);
3011 goto out;
3012 }
3013
3014 subdomain = genpd_get_from_provider(subdomain_spec);
3015 if (IS_ERR(subdomain)) {
3016 ret = PTR_ERR(subdomain);
3017 goto out;
3018 }
3019
3020 ret = pm_genpd_remove_subdomain(parent, subdomain);
3021
3022out:
3023 mutex_unlock(&gpd_list_lock);
3024
3025 return ret;
3026}
3027EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
3028
3029/**
3030 * of_genpd_remove_last - Remove the last PM domain registered for a provider
3031 * @np: Pointer to device node associated with provider
3032 *
3033 * Find the last PM domain that was added by a particular provider and
3034 * remove this PM domain from the list of PM domains. The provider is
3035 * identified by the 'provider' device structure that is passed. The PM
3036 * domain will only be removed, if the provider associated with domain
3037 * has been removed.
3038 *
3039 * Returns a valid pointer to struct generic_pm_domain on success or
3040 * ERR_PTR() on failure.
3041 */
3042struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
3043{
3044 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
3045 int ret;
3046
3047 if (IS_ERR_OR_NULL(np))
3048 return ERR_PTR(-EINVAL);
3049
3050 mutex_lock(&gpd_list_lock);
3051 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
3052 if (gpd->provider == of_fwnode_handle(np)) {
3053 ret = genpd_remove(gpd);
3054 genpd = ret ? ERR_PTR(ret) : gpd;
3055 break;
3056 }
3057 }
3058 mutex_unlock(&gpd_list_lock);
3059
3060 return genpd;
3061}
3062EXPORT_SYMBOL_GPL(of_genpd_remove_last);
3063
3064static void genpd_release_dev(struct device *dev)
3065{
3066 of_node_put(dev->of_node);
3067 kfree(dev);
3068}
3069
3070static const struct bus_type genpd_bus_type = {
3071 .name = "genpd",
3072};
3073
3074/**
3075 * genpd_dev_pm_detach - Detach a device from its PM domain.
3076 * @dev: Device to detach.
3077 * @power_off: Currently not used
3078 *
3079 * Try to locate a corresponding generic PM domain, which the device was
3080 * attached to previously. If such is found, the device is detached from it.
3081 */
3082static void genpd_dev_pm_detach(struct device *dev, bool power_off)
3083{
3084 struct generic_pm_domain *pd;
3085 unsigned int i;
3086 int ret = 0;
3087
3088 pd = dev_to_genpd(dev);
3089 if (IS_ERR(pd))
3090 return;
3091
3092 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
3093
3094 /* Drop the default performance state */
3095 if (dev_gpd_data(dev)->default_pstate) {
3096 dev_pm_genpd_set_performance_state(dev, 0);
3097 dev_gpd_data(dev)->default_pstate = 0;
3098 }
3099
3100 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
3101 ret = genpd_remove_device(pd, dev);
3102 if (ret != -EAGAIN)
3103 break;
3104
3105 mdelay(i);
3106 cond_resched();
3107 }
3108
3109 if (ret < 0) {
3110 dev_err(dev, "failed to remove from PM domain %s: %d",
3111 pd->name, ret);
3112 return;
3113 }
3114
3115 /* Check if PM domain can be powered off after removing this device. */
3116 genpd_queue_power_off_work(pd);
3117
3118 /* Unregister the device if it was created by genpd. */
3119 if (dev->bus == &genpd_bus_type)
3120 device_unregister(dev);
3121}
3122
3123static void genpd_dev_pm_sync(struct device *dev)
3124{
3125 struct generic_pm_domain *pd;
3126
3127 pd = dev_to_genpd(dev);
3128 if (IS_ERR(pd))
3129 return;
3130
3131 genpd_queue_power_off_work(pd);
3132}
3133
3134static int genpd_set_required_opp_dev(struct device *dev,
3135 struct device *base_dev)
3136{
3137 struct dev_pm_opp_config config = {
3138 .required_dev = dev,
3139 };
3140 int ret;
3141
3142 /* Limit support to non-providers for now. */
3143 if (of_property_present(base_dev->of_node, "#power-domain-cells"))
3144 return 0;
3145
3146 if (!dev_pm_opp_of_has_required_opp(base_dev))
3147 return 0;
3148
3149 ret = dev_pm_opp_set_config(base_dev, &config);
3150 if (ret < 0)
3151 return ret;
3152
3153 dev_gpd_data(dev)->opp_token = ret;
3154 return 0;
3155}
3156
3157static int genpd_set_required_opp(struct device *dev, unsigned int index)
3158{
3159 int ret, pstate;
3160
3161 /* Set the default performance state */
3162 pstate = of_get_required_opp_performance_state(dev->of_node, index);
3163 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
3164 ret = pstate;
3165 goto err;
3166 } else if (pstate > 0) {
3167 ret = dev_pm_genpd_set_performance_state(dev, pstate);
3168 if (ret)
3169 goto err;
3170 dev_gpd_data(dev)->default_pstate = pstate;
3171 }
3172
3173 return 0;
3174err:
3175 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
3176 dev_to_genpd(dev)->name, ret);
3177 return ret;
3178}
3179
3180static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
3181 unsigned int index, unsigned int num_domains,
3182 bool power_on)
3183{
3184 struct of_phandle_args pd_args;
3185 struct generic_pm_domain *pd;
3186 int ret;
3187
3188 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
3189 "#power-domain-cells", index, &pd_args);
3190 if (ret < 0)
3191 return ret;
3192
3193 mutex_lock(&gpd_list_lock);
3194 pd = genpd_get_from_provider(&pd_args);
3195 of_node_put(pd_args.np);
3196 if (IS_ERR(pd)) {
3197 mutex_unlock(&gpd_list_lock);
3198 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
3199 __func__, PTR_ERR(pd));
3200 return driver_deferred_probe_check_state(base_dev);
3201 }
3202
3203 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
3204
3205 ret = genpd_add_device(pd, dev, base_dev);
3206 mutex_unlock(&gpd_list_lock);
3207
3208 if (ret < 0)
3209 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
3210
3211 dev->pm_domain->detach = genpd_dev_pm_detach;
3212 dev->pm_domain->sync = genpd_dev_pm_sync;
3213
3214 /*
3215 * For a single PM domain the index of the required OPP must be zero, so
3216 * let's try to assign a required dev in that case. In the multiple PM
3217 * domains case, we need platform code to specify the index.
3218 */
3219 if (num_domains == 1) {
3220 ret = genpd_set_required_opp_dev(dev, base_dev);
3221 if (ret)
3222 goto err;
3223 }
3224
3225 ret = genpd_set_required_opp(dev, index);
3226 if (ret)
3227 goto err;
3228
3229 if (power_on) {
3230 genpd_lock(pd);
3231 ret = genpd_power_on(pd, 0);
3232 genpd_unlock(pd);
3233 }
3234
3235 if (ret) {
3236 /* Drop the default performance state */
3237 if (dev_gpd_data(dev)->default_pstate) {
3238 dev_pm_genpd_set_performance_state(dev, 0);
3239 dev_gpd_data(dev)->default_pstate = 0;
3240 }
3241
3242 genpd_remove_device(pd, dev);
3243 return -EPROBE_DEFER;
3244 }
3245
3246 return 1;
3247
3248err:
3249 genpd_remove_device(pd, dev);
3250 return ret;
3251}
3252
3253/**
3254 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3255 * @dev: Device to attach.
3256 *
3257 * Parse device's OF node to find a PM domain specifier. If such is found,
3258 * attaches the device to retrieved pm_domain ops.
3259 *
3260 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3261 * PM domain or when multiple power-domains exists for it, else a negative error
3262 * code. Note that if a power-domain exists for the device, but it cannot be
3263 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3264 * not probed and to re-try again later.
3265 */
3266int genpd_dev_pm_attach(struct device *dev)
3267{
3268 if (!dev->of_node)
3269 return 0;
3270
3271 /*
3272 * Devices with multiple PM domains must be attached separately, as we
3273 * can only attach one PM domain per device.
3274 */
3275 if (of_count_phandle_with_args(dev->of_node, "power-domains",
3276 "#power-domain-cells") != 1)
3277 return 0;
3278
3279 return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3280}
3281EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3282
3283/**
3284 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3285 * @dev: The device used to lookup the PM domain.
3286 * @index: The index of the PM domain.
3287 *
3288 * Parse device's OF node to find a PM domain specifier at the provided @index.
3289 * If such is found, creates a virtual device and attaches it to the retrieved
3290 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3291 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3292 *
3293 * Returns the created virtual device if successfully attached PM domain, NULL
3294 * when the device don't need a PM domain, else an ERR_PTR() in case of
3295 * failures. If a power-domain exists for the device, but cannot be found or
3296 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3297 * is not probed and to re-try again later.
3298 */
3299struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3300 unsigned int index)
3301{
3302 struct device *virt_dev;
3303 int num_domains;
3304 int ret;
3305
3306 if (!dev->of_node)
3307 return NULL;
3308
3309 /* Verify that the index is within a valid range. */
3310 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3311 "#power-domain-cells");
3312 if (num_domains < 0 || index >= num_domains)
3313 return NULL;
3314
3315 if (!genpd_bus_registered)
3316 return ERR_PTR(-ENODEV);
3317
3318 /* Allocate and register device on the genpd bus. */
3319 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
3320 if (!virt_dev)
3321 return ERR_PTR(-ENOMEM);
3322
3323 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3324 virt_dev->bus = &genpd_bus_type;
3325 virt_dev->release = genpd_release_dev;
3326 virt_dev->of_node = of_node_get(dev->of_node);
3327
3328 ret = device_register(virt_dev);
3329 if (ret) {
3330 put_device(virt_dev);
3331 return ERR_PTR(ret);
3332 }
3333
3334 /* Try to attach the device to the PM domain at the specified index. */
3335 ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3336 if (ret < 1) {
3337 device_unregister(virt_dev);
3338 return ret ? ERR_PTR(ret) : NULL;
3339 }
3340
3341 pm_runtime_enable(virt_dev);
3342 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3343
3344 return virt_dev;
3345}
3346EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3347
3348/**
3349 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3350 * @dev: The device used to lookup the PM domain.
3351 * @name: The name of the PM domain.
3352 *
3353 * Parse device's OF node to find a PM domain specifier using the
3354 * power-domain-names DT property. For further description see
3355 * genpd_dev_pm_attach_by_id().
3356 */
3357struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3358{
3359 int index;
3360
3361 if (!dev->of_node)
3362 return NULL;
3363
3364 index = of_property_match_string(dev->of_node, "power-domain-names",
3365 name);
3366 if (index < 0)
3367 return NULL;
3368
3369 return genpd_dev_pm_attach_by_id(dev, index);
3370}
3371
3372static const struct of_device_id idle_state_match[] = {
3373 { .compatible = "domain-idle-state", },
3374 { }
3375};
3376
3377static int genpd_parse_state(struct genpd_power_state *genpd_state,
3378 struct device_node *state_node)
3379{
3380 int err;
3381 u32 residency;
3382 u32 entry_latency, exit_latency;
3383
3384 err = of_property_read_u32(state_node, "entry-latency-us",
3385 &entry_latency);
3386 if (err) {
3387 pr_debug(" * %pOF missing entry-latency-us property\n",
3388 state_node);
3389 return -EINVAL;
3390 }
3391
3392 err = of_property_read_u32(state_node, "exit-latency-us",
3393 &exit_latency);
3394 if (err) {
3395 pr_debug(" * %pOF missing exit-latency-us property\n",
3396 state_node);
3397 return -EINVAL;
3398 }
3399
3400 err = of_property_read_u32(state_node, "min-residency-us", &residency);
3401 if (!err)
3402 genpd_state->residency_ns = 1000LL * residency;
3403
3404 of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
3405
3406 genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3407 genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3408 genpd_state->fwnode = of_fwnode_handle(state_node);
3409
3410 return 0;
3411}
3412
3413static int genpd_iterate_idle_states(struct device_node *dn,
3414 struct genpd_power_state *states)
3415{
3416 int ret;
3417 struct of_phandle_iterator it;
3418 struct device_node *np;
3419 int i = 0;
3420
3421 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3422 if (ret <= 0)
3423 return ret == -ENOENT ? 0 : ret;
3424
3425 /* Loop over the phandles until all the requested entry is found */
3426 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3427 np = it.node;
3428 if (!of_match_node(idle_state_match, np))
3429 continue;
3430
3431 if (!of_device_is_available(np))
3432 continue;
3433
3434 if (states) {
3435 ret = genpd_parse_state(&states[i], np);
3436 if (ret) {
3437 pr_err("Parsing idle state node %pOF failed with err %d\n",
3438 np, ret);
3439 of_node_put(np);
3440 return ret;
3441 }
3442 }
3443 i++;
3444 }
3445
3446 return i;
3447}
3448
3449/**
3450 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3451 *
3452 * @dn: The genpd device node
3453 * @states: The pointer to which the state array will be saved.
3454 * @n: The count of elements in the array returned from this function.
3455 *
3456 * Returns the device states parsed from the OF node. The memory for the states
3457 * is allocated by this function and is the responsibility of the caller to
3458 * free the memory after use. If any or zero compatible domain idle states is
3459 * found it returns 0 and in case of errors, a negative error code is returned.
3460 */
3461int of_genpd_parse_idle_states(struct device_node *dn,
3462 struct genpd_power_state **states, int *n)
3463{
3464 struct genpd_power_state *st;
3465 int ret;
3466
3467 ret = genpd_iterate_idle_states(dn, NULL);
3468 if (ret < 0)
3469 return ret;
3470
3471 if (!ret) {
3472 *states = NULL;
3473 *n = 0;
3474 return 0;
3475 }
3476
3477 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3478 if (!st)
3479 return -ENOMEM;
3480
3481 ret = genpd_iterate_idle_states(dn, st);
3482 if (ret <= 0) {
3483 kfree(st);
3484 return ret < 0 ? ret : -EINVAL;
3485 }
3486
3487 *states = st;
3488 *n = ret;
3489
3490 return 0;
3491}
3492EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3493
3494/**
3495 * of_genpd_sync_state() - A common sync_state function for genpd providers
3496 * @np: The device node the genpd provider is associated with.
3497 *
3498 * The @np that corresponds to a genpd provider may provide one or multiple
3499 * genpds. This function makes use @np to find the genpds that belongs to the
3500 * provider. For each genpd we try a power-off.
3501 */
3502void of_genpd_sync_state(struct device_node *np)
3503{
3504 struct generic_pm_domain *genpd;
3505
3506 if (!np)
3507 return;
3508
3509 mutex_lock(&gpd_list_lock);
3510 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3511 if (genpd->provider == of_fwnode_handle(np)) {
3512 genpd_lock(genpd);
3513 genpd->stay_on = false;
3514 genpd_power_off(genpd, false, 0);
3515 genpd_unlock(genpd);
3516 }
3517 }
3518 mutex_unlock(&gpd_list_lock);
3519}
3520EXPORT_SYMBOL_GPL(of_genpd_sync_state);
3521
3522static int genpd_provider_probe(struct device *dev)
3523{
3524 return 0;
3525}
3526
3527static void genpd_provider_sync_state(struct device *dev)
3528{
3529 struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev);
3530
3531 switch (genpd->sync_state) {
3532 case GENPD_SYNC_STATE_OFF:
3533 break;
3534
3535 case GENPD_SYNC_STATE_ONECELL:
3536 of_genpd_sync_state(dev->of_node);
3537 break;
3538
3539 case GENPD_SYNC_STATE_SIMPLE:
3540 genpd_lock(genpd);
3541 genpd->stay_on = false;
3542 genpd_power_off(genpd, false, 0);
3543 genpd_unlock(genpd);
3544 break;
3545
3546 default:
3547 break;
3548 }
3549}
3550
3551static struct device_driver genpd_provider_drv = {
3552 .name = "genpd_provider",
3553 .bus = &genpd_provider_bus_type,
3554 .probe = genpd_provider_probe,
3555 .sync_state = genpd_provider_sync_state,
3556 .suppress_bind_attrs = true,
3557};
3558
3559static int __init genpd_bus_init(void)
3560{
3561 int ret;
3562
3563 ret = device_register(&genpd_provider_bus);
3564 if (ret) {
3565 put_device(&genpd_provider_bus);
3566 return ret;
3567 }
3568
3569 ret = bus_register(&genpd_provider_bus_type);
3570 if (ret)
3571 goto err_dev;
3572
3573 ret = bus_register(&genpd_bus_type);
3574 if (ret)
3575 goto err_prov_bus;
3576
3577 ret = driver_register(&genpd_provider_drv);
3578 if (ret)
3579 goto err_bus;
3580
3581 genpd_bus_registered = true;
3582 return 0;
3583
3584err_bus:
3585 bus_unregister(&genpd_bus_type);
3586err_prov_bus:
3587 bus_unregister(&genpd_provider_bus_type);
3588err_dev:
3589 device_unregister(&genpd_provider_bus);
3590 return ret;
3591}
3592core_initcall(genpd_bus_init);
3593
3594#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3595
3596
3597/*** debugfs support ***/
3598
3599#ifdef CONFIG_DEBUG_FS
3600/*
3601 * TODO: This function is a slightly modified version of rtpm_status_show
3602 * from sysfs.c, so generalize it.
3603 */
3604static void rtpm_status_str(struct seq_file *s, struct device *dev)
3605{
3606 static const char * const status_lookup[] = {
3607 [RPM_ACTIVE] = "active",
3608 [RPM_RESUMING] = "resuming",
3609 [RPM_SUSPENDED] = "suspended",
3610 [RPM_SUSPENDING] = "suspending"
3611 };
3612 const char *p = "";
3613
3614 if (dev->power.runtime_error)
3615 p = "error";
3616 else if (dev->power.disable_depth)
3617 p = "unsupported";
3618 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3619 p = status_lookup[dev->power.runtime_status];
3620 else
3621 WARN_ON(1);
3622
3623 seq_printf(s, "%-26s ", p);
3624}
3625
3626static void perf_status_str(struct seq_file *s, struct device *dev)
3627{
3628 struct generic_pm_domain_data *gpd_data;
3629
3630 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3631
3632 seq_printf(s, "%-10u ", gpd_data->performance_state);
3633}
3634
3635static void mode_status_str(struct seq_file *s, struct device *dev)
3636{
3637 struct generic_pm_domain_data *gpd_data;
3638
3639 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3640
3641 seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3642}
3643
3644static int genpd_summary_one(struct seq_file *s,
3645 struct generic_pm_domain *genpd)
3646{
3647 static const char * const status_lookup[] = {
3648 [GENPD_STATE_ON] = "on",
3649 [GENPD_STATE_OFF] = "off"
3650 };
3651 struct pm_domain_data *pm_data;
3652 struct gpd_link *link;
3653 char state[16];
3654 int ret;
3655
3656 ret = genpd_lock_interruptible(genpd);
3657 if (ret)
3658 return -ERESTARTSYS;
3659
3660 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3661 goto exit;
3662 if (!genpd_status_on(genpd))
3663 snprintf(state, sizeof(state), "%s-%u",
3664 status_lookup[genpd->status], genpd->state_idx);
3665 else
3666 snprintf(state, sizeof(state), "%s",
3667 status_lookup[genpd->status]);
3668 seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state);
3669
3670 /*
3671 * Modifications on the list require holding locks on both
3672 * parent and child, so we are safe.
3673 * Also the device name is immutable.
3674 */
3675 list_for_each_entry(link, &genpd->parent_links, parent_node) {
3676 if (list_is_first(&link->parent_node, &genpd->parent_links))
3677 seq_printf(s, "\n%48s", " ");
3678 seq_printf(s, "%s", link->child->name);
3679 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3680 seq_puts(s, ", ");
3681 }
3682
3683 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3684 seq_printf(s, "\n %-30s ", dev_name(pm_data->dev));
3685 rtpm_status_str(s, pm_data->dev);
3686 perf_status_str(s, pm_data->dev);
3687 mode_status_str(s, pm_data->dev);
3688 }
3689
3690 seq_puts(s, "\n");
3691exit:
3692 genpd_unlock(genpd);
3693
3694 return 0;
3695}
3696
3697static int summary_show(struct seq_file *s, void *data)
3698{
3699 struct generic_pm_domain *genpd;
3700 int ret = 0;
3701
3702 seq_puts(s, "domain status children performance\n");
3703 seq_puts(s, " /device runtime status managed by\n");
3704 seq_puts(s, "------------------------------------------------------------------------------\n");
3705
3706 ret = mutex_lock_interruptible(&gpd_list_lock);
3707 if (ret)
3708 return -ERESTARTSYS;
3709
3710 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3711 ret = genpd_summary_one(s, genpd);
3712 if (ret)
3713 break;
3714 }
3715 mutex_unlock(&gpd_list_lock);
3716
3717 return ret;
3718}
3719
3720static int status_show(struct seq_file *s, void *data)
3721{
3722 static const char * const status_lookup[] = {
3723 [GENPD_STATE_ON] = "on",
3724 [GENPD_STATE_OFF] = "off"
3725 };
3726
3727 struct generic_pm_domain *genpd = s->private;
3728 int ret = 0;
3729
3730 ret = genpd_lock_interruptible(genpd);
3731 if (ret)
3732 return -ERESTARTSYS;
3733
3734 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3735 goto exit;
3736
3737 if (genpd->status == GENPD_STATE_OFF)
3738 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3739 genpd->state_idx);
3740 else
3741 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3742exit:
3743 genpd_unlock(genpd);
3744 return ret;
3745}
3746
3747static int sub_domains_show(struct seq_file *s, void *data)
3748{
3749 struct generic_pm_domain *genpd = s->private;
3750 struct gpd_link *link;
3751 int ret = 0;
3752
3753 ret = genpd_lock_interruptible(genpd);
3754 if (ret)
3755 return -ERESTARTSYS;
3756
3757 list_for_each_entry(link, &genpd->parent_links, parent_node)
3758 seq_printf(s, "%s\n", link->child->name);
3759
3760 genpd_unlock(genpd);
3761 return ret;
3762}
3763
3764static int idle_states_show(struct seq_file *s, void *data)
3765{
3766 struct generic_pm_domain *genpd = s->private;
3767 u64 now, delta, idle_time = 0;
3768 unsigned int i;
3769 int ret = 0;
3770
3771 ret = genpd_lock_interruptible(genpd);
3772 if (ret)
3773 return -ERESTARTSYS;
3774
3775 seq_puts(s, "State Time Spent(ms) Usage Rejected Above Below\n");
3776
3777 for (i = 0; i < genpd->state_count; i++) {
3778 struct genpd_power_state *state = &genpd->states[i];
3779 char state_name[15];
3780
3781 idle_time += state->idle_time;
3782
3783 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3784 now = ktime_get_mono_fast_ns();
3785 if (now > genpd->accounting_time) {
3786 delta = now - genpd->accounting_time;
3787 idle_time += delta;
3788 }
3789 }
3790
3791 if (!state->name)
3792 snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
3793
3794 do_div(idle_time, NSEC_PER_MSEC);
3795 seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n",
3796 state->name ?: state_name, idle_time,
3797 state->usage, state->rejected, state->above,
3798 state->below);
3799 }
3800
3801 genpd_unlock(genpd);
3802 return ret;
3803}
3804
3805static int active_time_show(struct seq_file *s, void *data)
3806{
3807 struct generic_pm_domain *genpd = s->private;
3808 u64 now, on_time, delta = 0;
3809 int ret = 0;
3810
3811 ret = genpd_lock_interruptible(genpd);
3812 if (ret)
3813 return -ERESTARTSYS;
3814
3815 if (genpd->status == GENPD_STATE_ON) {
3816 now = ktime_get_mono_fast_ns();
3817 if (now > genpd->accounting_time)
3818 delta = now - genpd->accounting_time;
3819 }
3820
3821 on_time = genpd->on_time + delta;
3822 do_div(on_time, NSEC_PER_MSEC);
3823 seq_printf(s, "%llu ms\n", on_time);
3824
3825 genpd_unlock(genpd);
3826 return ret;
3827}
3828
3829static int total_idle_time_show(struct seq_file *s, void *data)
3830{
3831 struct generic_pm_domain *genpd = s->private;
3832 u64 now, delta, total = 0;
3833 unsigned int i;
3834 int ret = 0;
3835
3836 ret = genpd_lock_interruptible(genpd);
3837 if (ret)
3838 return -ERESTARTSYS;
3839
3840 for (i = 0; i < genpd->state_count; i++) {
3841 total += genpd->states[i].idle_time;
3842
3843 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3844 now = ktime_get_mono_fast_ns();
3845 if (now > genpd->accounting_time) {
3846 delta = now - genpd->accounting_time;
3847 total += delta;
3848 }
3849 }
3850 }
3851
3852 do_div(total, NSEC_PER_MSEC);
3853 seq_printf(s, "%llu ms\n", total);
3854
3855 genpd_unlock(genpd);
3856 return ret;
3857}
3858
3859
3860static int devices_show(struct seq_file *s, void *data)
3861{
3862 struct generic_pm_domain *genpd = s->private;
3863 struct pm_domain_data *pm_data;
3864 int ret = 0;
3865
3866 ret = genpd_lock_interruptible(genpd);
3867 if (ret)
3868 return -ERESTARTSYS;
3869
3870 list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3871 seq_printf(s, "%s\n", dev_name(pm_data->dev));
3872
3873 genpd_unlock(genpd);
3874 return ret;
3875}
3876
3877static int perf_state_show(struct seq_file *s, void *data)
3878{
3879 struct generic_pm_domain *genpd = s->private;
3880
3881 if (genpd_lock_interruptible(genpd))
3882 return -ERESTARTSYS;
3883
3884 seq_printf(s, "%u\n", genpd->performance_state);
3885
3886 genpd_unlock(genpd);
3887 return 0;
3888}
3889
3890DEFINE_SHOW_ATTRIBUTE(summary);
3891DEFINE_SHOW_ATTRIBUTE(status);
3892DEFINE_SHOW_ATTRIBUTE(sub_domains);
3893DEFINE_SHOW_ATTRIBUTE(idle_states);
3894DEFINE_SHOW_ATTRIBUTE(active_time);
3895DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3896DEFINE_SHOW_ATTRIBUTE(devices);
3897DEFINE_SHOW_ATTRIBUTE(perf_state);
3898
3899static void genpd_debug_add(struct generic_pm_domain *genpd)
3900{
3901 struct dentry *d;
3902
3903 if (!genpd_debugfs_dir)
3904 return;
3905
3906 d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3907
3908 debugfs_create_file("current_state", 0444,
3909 d, genpd, &status_fops);
3910 debugfs_create_file("sub_domains", 0444,
3911 d, genpd, &sub_domains_fops);
3912 debugfs_create_file("idle_states", 0444,
3913 d, genpd, &idle_states_fops);
3914 debugfs_create_file("active_time", 0444,
3915 d, genpd, &active_time_fops);
3916 debugfs_create_file("total_idle_time", 0444,
3917 d, genpd, &total_idle_time_fops);
3918 debugfs_create_file("devices", 0444,
3919 d, genpd, &devices_fops);
3920 if (genpd->set_performance_state)
3921 debugfs_create_file("perf_state", 0444,
3922 d, genpd, &perf_state_fops);
3923}
3924
3925static int __init genpd_debug_init(void)
3926{
3927 struct generic_pm_domain *genpd;
3928
3929 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3930
3931 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3932 NULL, &summary_fops);
3933
3934 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3935 genpd_debug_add(genpd);
3936
3937 return 0;
3938}
3939late_initcall(genpd_debug_init);
3940
3941static void __exit genpd_debug_exit(void)
3942{
3943 debugfs_remove_recursive(genpd_debugfs_dir);
3944}
3945__exitcall(genpd_debug_exit);
3946#endif /* CONFIG_DEBUG_FS */