Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/domain_governor.c - Governors for device PM domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7#include <linux/kernel.h>
8#include <linux/pm_domain.h>
9#include <linux/pm_qos.h>
10#include <linux/hrtimer.h>
11#include <linux/cpu.h>
12#include <linux/cpuidle.h>
13#include <linux/cpumask.h>
14#include <linux/ktime.h>
15
16static int dev_update_qos_constraint(struct device *dev, void *data)
17{
18 s64 *constraint_ns_p = data;
19 s64 constraint_ns;
20
21 if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
22 struct gpd_timing_data *td = dev_gpd_data(dev)->td;
23
24 /*
25 * Only take suspend-time QoS constraints of devices into
26 * account, because constraints updated after the device has
27 * been suspended are not guaranteed to be taken into account
28 * anyway. In order for them to take effect, the device has to
29 * be resumed and suspended again.
30 */
31 constraint_ns = td ? td->effective_constraint_ns :
32 PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
33 } else {
34 /*
35 * The child is not in a domain and there's no info on its
36 * suspend/resume latencies, so assume them to be negligible and
37 * take its current PM QoS constraint (that's the only thing
38 * known at this point anyway).
39 */
40 constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
41 constraint_ns *= NSEC_PER_USEC;
42 }
43
44 if (constraint_ns < *constraint_ns_p)
45 *constraint_ns_p = constraint_ns;
46
47 return 0;
48}
49
50/**
51 * default_suspend_ok - Default PM domain governor routine to suspend devices.
52 * @dev: Device to check.
53 *
54 * Returns: true if OK to suspend, false if not OK to suspend
55 */
56static bool default_suspend_ok(struct device *dev)
57{
58 struct gpd_timing_data *td = dev_gpd_data(dev)->td;
59 unsigned long flags;
60 s64 constraint_ns;
61
62 dev_dbg(dev, "%s()\n", __func__);
63
64 spin_lock_irqsave(&dev->power.lock, flags);
65
66 if (!td->constraint_changed) {
67 bool ret = td->cached_suspend_ok;
68
69 spin_unlock_irqrestore(&dev->power.lock, flags);
70 return ret;
71 }
72 td->constraint_changed = false;
73 td->cached_suspend_ok = false;
74 td->effective_constraint_ns = 0;
75 constraint_ns = __dev_pm_qos_resume_latency(dev);
76
77 spin_unlock_irqrestore(&dev->power.lock, flags);
78
79 if (constraint_ns == 0)
80 return false;
81
82 constraint_ns *= NSEC_PER_USEC;
83 /*
84 * We can walk the children without any additional locking, because
85 * they all have been suspended at this point and their
86 * effective_constraint_ns fields won't be modified in parallel with us.
87 */
88 if (!dev->power.ignore_children)
89 device_for_each_child(dev, &constraint_ns,
90 dev_update_qos_constraint);
91
92 if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
93 /* "No restriction", so the device is allowed to suspend. */
94 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
95 td->cached_suspend_ok = true;
96 } else if (constraint_ns == 0) {
97 /*
98 * This triggers if one of the children that don't belong to a
99 * domain has a zero PM QoS constraint and it's better not to
100 * suspend then. effective_constraint_ns is zero already and
101 * cached_suspend_ok is false, so bail out.
102 */
103 return false;
104 } else {
105 constraint_ns -= td->suspend_latency_ns +
106 td->resume_latency_ns;
107 /*
108 * effective_constraint_ns is zero already and cached_suspend_ok
109 * is false, so if the computed value is not positive, return
110 * right away.
111 */
112 if (constraint_ns <= 0)
113 return false;
114
115 td->effective_constraint_ns = constraint_ns;
116 td->cached_suspend_ok = true;
117 }
118
119 /*
120 * The children have been suspended already, so we don't need to take
121 * their suspend latencies into account here.
122 */
123 return td->cached_suspend_ok;
124}
125
126static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
127{
128 ktime_t domain_wakeup = KTIME_MAX;
129 ktime_t next_wakeup;
130 struct pm_domain_data *pdd;
131 struct gpd_link *link;
132
133 if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
134 return;
135
136 /*
137 * Devices that have a predictable wakeup pattern, may specify
138 * their next wakeup. Let's find the next wakeup from all the
139 * devices attached to this domain and from all the sub-domains.
140 * It is possible that component's a next wakeup may have become
141 * stale when we read that here. We will ignore to ensure the domain
142 * is able to enter its optimal idle state.
143 */
144 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
145 next_wakeup = to_gpd_data(pdd)->td->next_wakeup;
146 if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
147 if (ktime_before(next_wakeup, domain_wakeup))
148 domain_wakeup = next_wakeup;
149 }
150
151 list_for_each_entry(link, &genpd->parent_links, parent_node) {
152 struct genpd_governor_data *cgd = link->child->gd;
153
154 next_wakeup = cgd ? cgd->next_wakeup : KTIME_MAX;
155 if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
156 if (ktime_before(next_wakeup, domain_wakeup))
157 domain_wakeup = next_wakeup;
158 }
159
160 genpd->gd->next_wakeup = domain_wakeup;
161}
162
163static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
164 unsigned int state, ktime_t now)
165{
166 ktime_t domain_wakeup = genpd->gd->next_wakeup;
167 s64 idle_time_ns, min_sleep_ns;
168
169 min_sleep_ns = genpd->states[state].power_off_latency_ns +
170 genpd->states[state].residency_ns;
171
172 idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
173
174 return idle_time_ns >= min_sleep_ns;
175}
176
177static bool __default_power_down_ok(struct dev_pm_domain *pd,
178 unsigned int state)
179{
180 struct generic_pm_domain *genpd = pd_to_genpd(pd);
181 struct gpd_link *link;
182 struct pm_domain_data *pdd;
183 s64 min_off_time_ns;
184 s64 off_on_time_ns;
185
186 off_on_time_ns = genpd->states[state].power_off_latency_ns +
187 genpd->states[state].power_on_latency_ns;
188
189 min_off_time_ns = -1;
190 /*
191 * Check if subdomains can be off for enough time.
192 *
193 * All subdomains have been powered off already at this point.
194 */
195 list_for_each_entry(link, &genpd->parent_links, parent_node) {
196 struct genpd_governor_data *cgd = link->child->gd;
197
198 s64 sd_max_off_ns = cgd ? cgd->max_off_time_ns : -1;
199
200 if (sd_max_off_ns < 0)
201 continue;
202
203 /*
204 * Check if the subdomain is allowed to be off long enough for
205 * the current domain to turn off and on (that's how much time
206 * it will have to wait worst case).
207 */
208 if (sd_max_off_ns <= off_on_time_ns)
209 return false;
210
211 if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
212 min_off_time_ns = sd_max_off_ns;
213 }
214
215 /*
216 * Check if the devices in the domain can be off enough time.
217 */
218 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
219 struct gpd_timing_data *td;
220 s64 constraint_ns;
221
222 /*
223 * Check if the device is allowed to be off long enough for the
224 * domain to turn off and on (that's how much time it will
225 * have to wait worst case).
226 */
227 td = to_gpd_data(pdd)->td;
228 constraint_ns = td->effective_constraint_ns;
229 /*
230 * Zero means "no suspend at all" and this runs only when all
231 * devices in the domain are suspended, so it must be positive.
232 */
233 if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
234 continue;
235
236 if (constraint_ns <= off_on_time_ns)
237 return false;
238
239 if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
240 min_off_time_ns = constraint_ns;
241 }
242
243 /*
244 * If the computed minimum device off time is negative, there are no
245 * latency constraints, so the domain can spend arbitrary time in the
246 * "off" state.
247 */
248 if (min_off_time_ns < 0)
249 return true;
250
251 /*
252 * The difference between the computed minimum subdomain or device off
253 * time and the time needed to turn the domain on is the maximum
254 * theoretical time this domain can spend in the "off" state.
255 */
256 genpd->gd->max_off_time_ns = min_off_time_ns -
257 genpd->states[state].power_on_latency_ns;
258 return true;
259}
260
261/**
262 * _default_power_down_ok - Default generic PM domain power off governor routine.
263 * @pd: PM domain to check.
264 * @now: current ktime.
265 *
266 * This routine must be executed under the PM domain's lock.
267 *
268 * Returns: true if OK to power down, false if not OK to power down
269 */
270static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
271{
272 struct generic_pm_domain *genpd = pd_to_genpd(pd);
273 struct genpd_governor_data *gd = genpd->gd;
274 int state_idx = genpd->state_count - 1;
275 struct gpd_link *link;
276
277 /*
278 * Find the next wakeup from devices that can determine their own wakeup
279 * to find when the domain would wakeup and do it for every device down
280 * the hierarchy. It is not worth while to sleep if the state's residency
281 * cannot be met.
282 */
283 update_domain_next_wakeup(genpd, now);
284 if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (gd->next_wakeup != KTIME_MAX)) {
285 /* Let's find out the deepest domain idle state, the devices prefer */
286 while (state_idx >= 0) {
287 if (next_wakeup_allows_state(genpd, state_idx, now)) {
288 gd->max_off_time_changed = true;
289 break;
290 }
291 state_idx--;
292 }
293
294 if (state_idx < 0) {
295 state_idx = 0;
296 gd->cached_power_down_ok = false;
297 goto done;
298 }
299 }
300
301 if (!gd->max_off_time_changed) {
302 genpd->state_idx = gd->cached_power_down_state_idx;
303 return gd->cached_power_down_ok;
304 }
305
306 /*
307 * We have to invalidate the cached results for the parents, so
308 * use the observation that default_power_down_ok() is not
309 * going to be called for any parent until this instance
310 * returns.
311 */
312 list_for_each_entry(link, &genpd->child_links, child_node) {
313 struct genpd_governor_data *pgd = link->parent->gd;
314
315 if (pgd)
316 pgd->max_off_time_changed = true;
317 }
318
319 gd->max_off_time_ns = -1;
320 gd->max_off_time_changed = false;
321 gd->cached_power_down_ok = true;
322
323 /*
324 * Find a state to power down to, starting from the state
325 * determined by the next wakeup.
326 */
327 while (!__default_power_down_ok(pd, state_idx)) {
328 if (state_idx == 0) {
329 gd->cached_power_down_ok = false;
330 break;
331 }
332 state_idx--;
333 }
334
335done:
336 genpd->state_idx = state_idx;
337 gd->cached_power_down_state_idx = genpd->state_idx;
338 return gd->cached_power_down_ok;
339}
340
341static bool default_power_down_ok(struct dev_pm_domain *pd)
342{
343 return _default_power_down_ok(pd, ktime_get());
344}
345
346#ifdef CONFIG_CPU_IDLE
347static bool cpu_power_down_ok(struct dev_pm_domain *pd)
348{
349 struct generic_pm_domain *genpd = pd_to_genpd(pd);
350 struct cpuidle_device *dev;
351 ktime_t domain_wakeup, next_hrtimer;
352 ktime_t now = ktime_get();
353 struct device *cpu_dev;
354 s64 cpu_constraint, global_constraint, wakeup_constraint;
355 s64 idle_duration_ns;
356 int cpu, i;
357
358 /* Validate dev PM QoS constraints. */
359 if (!_default_power_down_ok(pd, now))
360 return false;
361
362 if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
363 return true;
364
365 wakeup_constraint = cpu_wakeup_latency_qos_limit();
366 global_constraint = cpu_latency_qos_limit();
367 if (global_constraint > wakeup_constraint)
368 global_constraint = wakeup_constraint;
369
370 /*
371 * Find the next wakeup for any of the online CPUs within the PM domain
372 * and its subdomains. Note, we only need the genpd->cpus, as it already
373 * contains a mask of all CPUs from subdomains.
374 */
375 domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
376 for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
377 dev = per_cpu(cpuidle_devices, cpu);
378 if (dev) {
379 next_hrtimer = READ_ONCE(dev->next_hrtimer);
380 if (ktime_before(next_hrtimer, domain_wakeup))
381 domain_wakeup = next_hrtimer;
382 }
383
384 cpu_dev = get_cpu_device(cpu);
385 if (cpu_dev) {
386 cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev);
387 if (cpu_constraint < global_constraint)
388 global_constraint = cpu_constraint;
389 }
390 }
391
392 global_constraint *= NSEC_PER_USEC;
393 /* The minimum idle duration is from now - until the next wakeup. */
394 idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
395 if (idle_duration_ns <= 0)
396 return false;
397
398 /* Store the next domain_wakeup to allow consumers to use it. */
399 genpd->gd->next_hrtimer = domain_wakeup;
400
401 /*
402 * Find the deepest idle state that has its residency value satisfied
403 * and by also taking into account the power off latency for the state.
404 * Start at the state picked by the dev PM QoS constraint validation.
405 */
406 i = genpd->state_idx;
407 do {
408 if ((idle_duration_ns >= (genpd->states[i].residency_ns +
409 genpd->states[i].power_off_latency_ns)) &&
410 (global_constraint >= (genpd->states[i].power_on_latency_ns +
411 genpd->states[i].power_off_latency_ns)))
412 break;
413
414 } while (--i >= 0);
415
416 if (i < 0)
417 return false;
418
419 if (cpus_peek_for_pending_ipi(genpd->cpus))
420 return false;
421
422 genpd->state_idx = i;
423 genpd->gd->last_enter = now;
424 genpd->gd->reflect_residency = true;
425 return true;
426}
427
428static bool cpu_system_power_down_ok(struct dev_pm_domain *pd)
429{
430 s64 constraint_ns = cpu_wakeup_latency_qos_limit() * NSEC_PER_USEC;
431 struct generic_pm_domain *genpd = pd_to_genpd(pd);
432 int state_idx = genpd->state_count - 1;
433
434 if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) {
435 genpd->state_idx = state_idx;
436 return true;
437 }
438
439 /* Find the deepest state for the latency constraint. */
440 while (state_idx >= 0) {
441 s64 latency_ns = genpd->states[state_idx].power_off_latency_ns +
442 genpd->states[state_idx].power_on_latency_ns;
443
444 if (latency_ns <= constraint_ns) {
445 genpd->state_idx = state_idx;
446 return true;
447 }
448 state_idx--;
449 }
450
451 return false;
452}
453
454struct dev_power_governor pm_domain_cpu_gov = {
455 .suspend_ok = default_suspend_ok,
456 .power_down_ok = cpu_power_down_ok,
457 .system_power_down_ok = cpu_system_power_down_ok,
458};
459#endif
460
461struct dev_power_governor simple_qos_governor = {
462 .suspend_ok = default_suspend_ok,
463 .power_down_ok = default_power_down_ok,
464};
465
466/*
467 * pm_domain_always_on_gov - A governor implementing an always-on policy
468 */
469struct dev_power_governor pm_domain_always_on_gov = {
470 .suspend_ok = default_suspend_ok,
471};