Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/bitops.h>
7#include <linux/delay.h>
8#include <linux/err.h>
9#include <linux/export.h>
10#include <linux/jiffies.h>
11#include <linux/kernel.h>
12#include <linux/ktime.h>
13#include <linux/pm_domain.h>
14#include <linux/pm_runtime.h>
15#include <linux/regmap.h>
16#include <linux/regulator/consumer.h>
17#include <linux/reset-controller.h>
18#include <linux/slab.h>
19#include "gdsc.h"
20
21#define PWR_ON_MASK BIT(31)
22#define EN_REST_WAIT_MASK GENMASK_ULL(23, 20)
23#define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16)
24#define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12)
25#define SW_OVERRIDE_MASK BIT(2)
26#define HW_CONTROL_MASK BIT(1)
27#define SW_COLLAPSE_MASK BIT(0)
28#define GMEM_CLAMP_IO_MASK BIT(0)
29#define GMEM_RESET_MASK BIT(4)
30
31/* CFG_GDSCR */
32#define GDSC_POWER_UP_COMPLETE BIT(16)
33#define GDSC_POWER_DOWN_COMPLETE BIT(15)
34#define GDSC_RETAIN_FF_ENABLE BIT(11)
35#define CFG_GDSCR_OFFSET 0x4
36
37/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
38#define EN_REST_WAIT_VAL 0x2
39#define EN_FEW_WAIT_VAL 0x8
40#define CLK_DIS_WAIT_VAL 0x2
41
42/* Transition delay shifts */
43#define EN_REST_WAIT_SHIFT 20
44#define EN_FEW_WAIT_SHIFT 16
45#define CLK_DIS_WAIT_SHIFT 12
46
47#define RETAIN_MEM BIT(14)
48#define RETAIN_PERIPH BIT(13)
49
50#define TIMEOUT_US 500
51
52#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
53
54enum gdsc_status {
55 GDSC_OFF,
56 GDSC_ON
57};
58
59static int gdsc_pm_runtime_get(struct gdsc *sc)
60{
61 if (!sc->dev)
62 return 0;
63
64 return pm_runtime_resume_and_get(sc->dev);
65}
66
67static int gdsc_pm_runtime_put(struct gdsc *sc)
68{
69 if (!sc->dev)
70 return 0;
71
72 return pm_runtime_put_sync(sc->dev);
73}
74
75/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
76static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
77{
78 unsigned int reg;
79 u32 val;
80 int ret;
81
82 if (sc->flags & POLL_CFG_GDSCR)
83 reg = sc->gdscr + CFG_GDSCR_OFFSET;
84 else if (sc->gds_hw_ctrl)
85 reg = sc->gds_hw_ctrl;
86 else
87 reg = sc->gdscr;
88
89 ret = regmap_read(sc->regmap, reg, &val);
90 if (ret)
91 return ret;
92
93 if (sc->flags & POLL_CFG_GDSCR) {
94 switch (status) {
95 case GDSC_ON:
96 return !!(val & GDSC_POWER_UP_COMPLETE);
97 case GDSC_OFF:
98 return !!(val & GDSC_POWER_DOWN_COMPLETE);
99 }
100 }
101
102 switch (status) {
103 case GDSC_ON:
104 return !!(val & PWR_ON_MASK);
105 case GDSC_OFF:
106 return !(val & PWR_ON_MASK);
107 }
108
109 return -EINVAL;
110}
111
112static int gdsc_hwctrl(struct gdsc *sc, bool en)
113{
114 u32 val = en ? HW_CONTROL_MASK : 0;
115
116 return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
117}
118
119static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
120{
121 ktime_t start;
122
123 start = ktime_get();
124 do {
125 if (gdsc_check_status(sc, status))
126 return 0;
127 } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
128
129 if (gdsc_check_status(sc, status))
130 return 0;
131
132 return -ETIMEDOUT;
133}
134
135static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
136{
137 u32 reg, mask;
138 int ret;
139
140 if (sc->collapse_mask) {
141 reg = sc->collapse_ctrl;
142 mask = sc->collapse_mask;
143 } else {
144 reg = sc->gdscr;
145 mask = SW_COLLAPSE_MASK;
146 }
147
148 ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0);
149 if (ret)
150 return ret;
151
152 return 0;
153}
154
155static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
156{
157 int ret;
158
159 if (status == GDSC_ON && sc->rsupply) {
160 ret = regulator_enable(sc->rsupply);
161 if (ret < 0)
162 return ret;
163 }
164
165 ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
166
167 /* If disabling votable gdscs, don't poll on status */
168 if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
169 /*
170 * Add a short delay here to ensure that an enable
171 * right after it was disabled does not put it in an
172 * unknown state
173 */
174 udelay(TIMEOUT_US);
175 return 0;
176 }
177
178 if (sc->gds_hw_ctrl) {
179 /*
180 * The gds hw controller asserts/de-asserts the status bit soon
181 * after it receives a power on/off request from a master.
182 * The controller then takes around 8 xo cycles to start its
183 * internal state machine and update the status bit. During
184 * this time, the status bit does not reflect the true status
185 * of the core.
186 * Add a delay of 1 us between writing to the SW_COLLAPSE bit
187 * and polling the status bit.
188 */
189 udelay(1);
190 }
191
192 ret = gdsc_poll_status(sc, status);
193 WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
194
195 if (!ret && status == GDSC_OFF && sc->rsupply) {
196 ret = regulator_disable(sc->rsupply);
197 if (ret < 0)
198 return ret;
199 }
200
201 return ret;
202}
203
204static inline int gdsc_deassert_reset(struct gdsc *sc)
205{
206 int i;
207
208 for (i = 0; i < sc->reset_count; i++)
209 sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]);
210 return 0;
211}
212
213static inline int gdsc_assert_reset(struct gdsc *sc)
214{
215 int i;
216
217 for (i = 0; i < sc->reset_count; i++)
218 sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]);
219 return 0;
220}
221
222static inline void gdsc_force_mem_on(struct gdsc *sc)
223{
224 int i;
225 u32 mask = RETAIN_MEM;
226
227 if (!(sc->flags & NO_RET_PERIPH))
228 mask |= RETAIN_PERIPH;
229
230 for (i = 0; i < sc->cxc_count; i++)
231 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
232}
233
234static inline void gdsc_clear_mem_on(struct gdsc *sc)
235{
236 int i;
237 u32 mask = RETAIN_MEM;
238
239 if (!(sc->flags & NO_RET_PERIPH))
240 mask |= RETAIN_PERIPH;
241
242 for (i = 0; i < sc->cxc_count; i++)
243 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
244}
245
246static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
247{
248 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
249 GMEM_CLAMP_IO_MASK, 0);
250}
251
252static inline void gdsc_assert_clamp_io(struct gdsc *sc)
253{
254 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
255 GMEM_CLAMP_IO_MASK, 1);
256}
257
258static inline void gdsc_assert_reset_aon(struct gdsc *sc)
259{
260 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
261 GMEM_RESET_MASK, 1);
262 udelay(1);
263 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
264 GMEM_RESET_MASK, 0);
265}
266
267static void gdsc_retain_ff_on(struct gdsc *sc)
268{
269 u32 mask = GDSC_RETAIN_FF_ENABLE;
270
271 regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
272}
273
274static int _gdsc_enable(struct gdsc *sc)
275{
276 int ret;
277
278 if (sc->pwrsts == PWRSTS_ON)
279 return gdsc_deassert_reset(sc);
280
281 if (sc->flags & SW_RESET) {
282 gdsc_assert_reset(sc);
283 udelay(1);
284 gdsc_deassert_reset(sc);
285 }
286
287 if (sc->flags & CLAMP_IO) {
288 if (sc->flags & AON_RESET)
289 gdsc_assert_reset_aon(sc);
290 gdsc_deassert_clamp_io(sc);
291 }
292
293 ret = gdsc_toggle_logic(sc, GDSC_ON);
294 if (ret)
295 return ret;
296
297 if (sc->pwrsts & PWRSTS_OFF)
298 gdsc_force_mem_on(sc);
299
300 /*
301 * If clocks to this power domain were already on, they will take an
302 * additional 4 clock cycles to re-enable after the power domain is
303 * enabled. Delay to account for this. A delay is also needed to ensure
304 * clocks are not enabled within 400ns of enabling power to the
305 * memories.
306 */
307 udelay(1);
308
309 /* Turn on HW trigger mode if supported */
310 if (sc->flags & HW_CTRL) {
311 ret = gdsc_hwctrl(sc, true);
312 if (ret)
313 return ret;
314 /*
315 * Wait for the GDSC to go through a power down and
316 * up cycle. In case a firmware ends up polling status
317 * bits for the gdsc, it might read an 'on' status before
318 * the GDSC can finish the power cycle.
319 * We wait 1us before returning to ensure the firmware
320 * can't immediately poll the status bits.
321 */
322 udelay(1);
323 }
324
325 if (sc->flags & RETAIN_FF_ENABLE)
326 gdsc_retain_ff_on(sc);
327
328 return 0;
329}
330
331static int gdsc_enable(struct generic_pm_domain *domain)
332{
333 struct gdsc *sc = domain_to_gdsc(domain);
334 int ret;
335
336 ret = gdsc_pm_runtime_get(sc);
337 if (ret)
338 return ret;
339
340 return _gdsc_enable(sc);
341}
342
343static int _gdsc_disable(struct gdsc *sc)
344{
345 int ret;
346
347 if (sc->pwrsts == PWRSTS_ON)
348 return gdsc_assert_reset(sc);
349
350 /* Turn off HW trigger mode if supported */
351 if (sc->flags & HW_CTRL) {
352 ret = gdsc_hwctrl(sc, false);
353 if (ret < 0)
354 return ret;
355 /*
356 * Wait for the GDSC to go through a power down and
357 * up cycle. In case we end up polling status
358 * bits for the gdsc before the power cycle is completed
359 * it might read an 'on' status wrongly.
360 */
361 udelay(1);
362
363 ret = gdsc_poll_status(sc, GDSC_ON);
364 if (ret)
365 return ret;
366 }
367
368 if (sc->pwrsts & PWRSTS_OFF)
369 gdsc_clear_mem_on(sc);
370
371 ret = gdsc_toggle_logic(sc, GDSC_OFF);
372 if (ret)
373 return ret;
374
375 if (sc->flags & CLAMP_IO)
376 gdsc_assert_clamp_io(sc);
377
378 return 0;
379}
380
381static int gdsc_disable(struct generic_pm_domain *domain)
382{
383 struct gdsc *sc = domain_to_gdsc(domain);
384 int ret;
385
386 ret = _gdsc_disable(sc);
387
388 gdsc_pm_runtime_put(sc);
389
390 return ret;
391}
392
393static int gdsc_init(struct gdsc *sc)
394{
395 u32 mask, val;
396 int on, ret;
397
398 /*
399 * Disable HW trigger: collapse/restore occur based on registers writes.
400 * Disable SW override: Use hardware state-machine for sequencing.
401 * Configure wait time between states.
402 */
403 mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
404 EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
405
406 if (!sc->en_rest_wait_val)
407 sc->en_rest_wait_val = EN_REST_WAIT_VAL;
408 if (!sc->en_few_wait_val)
409 sc->en_few_wait_val = EN_FEW_WAIT_VAL;
410 if (!sc->clk_dis_wait_val)
411 sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
412
413 val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
414 sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
415 sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
416
417 ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
418 if (ret)
419 return ret;
420
421 /* Force gdsc ON if only ON state is supported */
422 if (sc->pwrsts == PWRSTS_ON) {
423 ret = gdsc_toggle_logic(sc, GDSC_ON);
424 if (ret)
425 return ret;
426 }
427
428 on = gdsc_check_status(sc, GDSC_ON);
429 if (on < 0)
430 return on;
431
432 if (on) {
433 /* The regulator must be on, sync the kernel state */
434 if (sc->rsupply) {
435 ret = regulator_enable(sc->rsupply);
436 if (ret < 0)
437 return ret;
438 }
439
440 /* ...and the power-domain */
441 ret = gdsc_pm_runtime_get(sc);
442 if (ret) {
443 if (sc->rsupply)
444 regulator_disable(sc->rsupply);
445 return ret;
446 }
447
448 /*
449 * Votable GDSCs can be ON due to Vote from other masters.
450 * If a Votable GDSC is ON, make sure we have a Vote.
451 */
452 if (sc->flags & VOTABLE) {
453 ret = gdsc_update_collapse_bit(sc, false);
454 if (ret)
455 return ret;
456 }
457
458 /* Turn on HW trigger mode if supported */
459 if (sc->flags & HW_CTRL) {
460 ret = gdsc_hwctrl(sc, true);
461 if (ret < 0)
462 return ret;
463 }
464
465 /*
466 * Make sure the retain bit is set if the GDSC is already on,
467 * otherwise we end up turning off the GDSC and destroying all
468 * the register contents that we thought we were saving.
469 */
470 if (sc->flags & RETAIN_FF_ENABLE)
471 gdsc_retain_ff_on(sc);
472 } else if (sc->flags & ALWAYS_ON) {
473 /* If ALWAYS_ON GDSCs are not ON, turn them ON */
474 gdsc_enable(&sc->pd);
475 on = true;
476 }
477
478 if (on || (sc->pwrsts & PWRSTS_RET))
479 gdsc_force_mem_on(sc);
480 else
481 gdsc_clear_mem_on(sc);
482
483 if (sc->flags & ALWAYS_ON)
484 sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
485 if (!sc->pd.power_off)
486 sc->pd.power_off = gdsc_disable;
487 if (!sc->pd.power_on)
488 sc->pd.power_on = gdsc_enable;
489 pm_genpd_init(&sc->pd, NULL, !on);
490
491 return 0;
492}
493
494int gdsc_register(struct gdsc_desc *desc,
495 struct reset_controller_dev *rcdev, struct regmap *regmap)
496{
497 int i, ret;
498 struct genpd_onecell_data *data;
499 struct device *dev = desc->dev;
500 struct gdsc **scs = desc->scs;
501 size_t num = desc->num;
502
503 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
504 if (!data)
505 return -ENOMEM;
506
507 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
508 GFP_KERNEL);
509 if (!data->domains)
510 return -ENOMEM;
511
512 for (i = 0; i < num; i++) {
513 if (!scs[i] || !scs[i]->supply)
514 continue;
515
516 scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
517 if (IS_ERR(scs[i]->rsupply))
518 return PTR_ERR(scs[i]->rsupply);
519 }
520
521 data->num_domains = num;
522 for (i = 0; i < num; i++) {
523 if (!scs[i])
524 continue;
525 if (pm_runtime_enabled(dev))
526 scs[i]->dev = dev;
527 scs[i]->regmap = regmap;
528 scs[i]->rcdev = rcdev;
529 ret = gdsc_init(scs[i]);
530 if (ret)
531 return ret;
532 data->domains[i] = &scs[i]->pd;
533 }
534
535 /* Add subdomains */
536 for (i = 0; i < num; i++) {
537 if (!scs[i])
538 continue;
539 if (scs[i]->parent)
540 pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
541 else if (!IS_ERR_OR_NULL(dev->pm_domain))
542 pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
543 }
544
545 return of_genpd_add_provider_onecell(dev->of_node, data);
546}
547
548void gdsc_unregister(struct gdsc_desc *desc)
549{
550 int i;
551 struct device *dev = desc->dev;
552 struct gdsc **scs = desc->scs;
553 size_t num = desc->num;
554
555 /* Remove subdomains */
556 for (i = 0; i < num; i++) {
557 if (!scs[i])
558 continue;
559 if (scs[i]->parent)
560 pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
561 else if (!IS_ERR_OR_NULL(dev->pm_domain))
562 pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
563 }
564 of_genpd_del_provider(dev->of_node);
565}
566
567/*
568 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
569 * running in the CX domain so the CPU doesn't need to know anything about the
570 * GX domain EXCEPT....
571 *
572 * Hardware constraints dictate that the GX be powered down before the CX. If
573 * the GMU crashes it could leave the GX on. In order to successfully bring back
574 * the device the CPU needs to disable the GX headswitch. There being no sane
575 * way to reach in and touch that register from deep inside the GPU driver we
576 * need to set up the infrastructure to be able to ensure that the GPU can
577 * ensure that the GX is off during this super special case. We do this by
578 * defining a GX gdsc with a dummy enable function and a "default" disable
579 * function.
580 *
581 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
582 * driver. During power up, nothing will happen from the CPU (and the GMU will
583 * power up normally but during power down this will ensure that the GX domain
584 * is *really* off - this gives us a semi standard way of doing what we need.
585 */
586int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
587{
588 /* Do nothing but give genpd the impression that we were successful */
589 return 0;
590}
591EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);