Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 MediaTek Inc.
4 * Author: Owen Chen <owen.chen@mediatek.com>
5 */
6
7#include <linux/clk.h>
8#include <linux/clk-provider.h>
9#include <linux/compiler_types.h>
10#include <linux/container_of.h>
11#include <linux/dev_printk.h>
12#include <linux/err.h>
13#include <linux/mfd/syscon.h>
14#include <linux/module.h>
15#include <linux/regmap.h>
16#include <linux/spinlock.h>
17#include <linux/slab.h>
18
19#include "clk-mtk.h"
20#include "clk-mux.h"
21
22#define MTK_WAIT_FENC_DONE_US 30
23
24struct mtk_clk_mux {
25 struct clk_hw hw;
26 struct regmap *regmap;
27 struct regmap *regmap_hwv;
28 const struct mtk_mux *data;
29 spinlock_t *lock;
30 bool reparent;
31};
32
33static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
34{
35 return container_of(hw, struct mtk_clk_mux, hw);
36}
37
38static int mtk_clk_mux_fenc_enable_setclr(struct clk_hw *hw)
39{
40 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
41 unsigned long flags;
42 u32 val;
43 int ret;
44
45 if (mux->lock)
46 spin_lock_irqsave(mux->lock, flags);
47 else
48 __acquire(mux->lock);
49
50 regmap_write(mux->regmap, mux->data->clr_ofs,
51 BIT(mux->data->gate_shift));
52
53 ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
54 val, val & BIT(mux->data->fenc_shift), 1,
55 MTK_WAIT_FENC_DONE_US);
56
57 if (mux->lock)
58 spin_unlock_irqrestore(mux->lock, flags);
59 else
60 __release(mux->lock);
61
62 return ret;
63}
64
65static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
66{
67 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
68 unsigned long flags = 0;
69
70 if (mux->lock)
71 spin_lock_irqsave(mux->lock, flags);
72 else
73 __acquire(mux->lock);
74
75 regmap_write(mux->regmap, mux->data->clr_ofs,
76 BIT(mux->data->gate_shift));
77
78 /*
79 * If the parent has been changed when the clock was disabled, it will
80 * not be effective yet. Set the update bit to ensure the mux gets
81 * updated.
82 */
83 if (mux->reparent && mux->data->upd_shift >= 0) {
84 regmap_write(mux->regmap, mux->data->upd_ofs,
85 BIT(mux->data->upd_shift));
86 mux->reparent = false;
87 }
88
89 if (mux->lock)
90 spin_unlock_irqrestore(mux->lock, flags);
91 else
92 __release(mux->lock);
93
94 return 0;
95}
96
97static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
98{
99 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
100
101 regmap_write(mux->regmap, mux->data->set_ofs,
102 BIT(mux->data->gate_shift));
103}
104
105static int mtk_clk_mux_fenc_is_enabled(struct clk_hw *hw)
106{
107 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
108 u32 val;
109
110 regmap_read(mux->regmap, mux->data->fenc_sta_mon_ofs, &val);
111
112 return !!(val & BIT(mux->data->fenc_shift));
113}
114
115static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
116{
117 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
118 u32 val;
119
120 regmap_read(mux->regmap, mux->data->mux_ofs, &val);
121
122 return (val & BIT(mux->data->gate_shift)) == 0;
123}
124
125static int mtk_clk_mux_hwv_fenc_enable(struct clk_hw *hw)
126{
127 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
128 u32 val;
129 int ret;
130
131 regmap_write(mux->regmap_hwv, mux->data->hwv_set_ofs,
132 BIT(mux->data->gate_shift));
133
134 ret = regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
135 val, val & BIT(mux->data->gate_shift), 0,
136 MTK_WAIT_HWV_DONE_US);
137 if (ret)
138 return ret;
139
140 ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
141 val, val & BIT(mux->data->fenc_shift), 1,
142 MTK_WAIT_FENC_DONE_US);
143
144 return ret;
145}
146
147static void mtk_clk_mux_hwv_disable(struct clk_hw *hw)
148{
149 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
150 u32 val;
151
152 regmap_write(mux->regmap_hwv, mux->data->hwv_clr_ofs,
153 BIT(mux->data->gate_shift));
154
155 regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
156 val, (val & BIT(mux->data->gate_shift)),
157 0, MTK_WAIT_HWV_DONE_US);
158}
159
160static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
161{
162 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
163 u32 mask = GENMASK(mux->data->mux_width - 1, 0);
164 u32 val;
165
166 regmap_read(mux->regmap, mux->data->mux_ofs, &val);
167 val = (val >> mux->data->mux_shift) & mask;
168
169 if (mux->data->parent_index) {
170 int i;
171
172 for (i = 0; i < mux->data->num_parents; i++)
173 if (mux->data->parent_index[i] == val)
174 return i;
175
176 /* Not found: return an impossible index to generate error */
177 return mux->data->num_parents + 1;
178 }
179
180 return val;
181}
182
183static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
184{
185 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
186 u32 mask = GENMASK(mux->data->mux_width - 1, 0);
187 u32 val, orig;
188 unsigned long flags = 0;
189
190 if (mux->lock)
191 spin_lock_irqsave(mux->lock, flags);
192 else
193 __acquire(mux->lock);
194
195 if (mux->data->parent_index)
196 index = mux->data->parent_index[index];
197
198 regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
199 val = (orig & ~(mask << mux->data->mux_shift))
200 | (index << mux->data->mux_shift);
201
202 if (val != orig) {
203 regmap_write(mux->regmap, mux->data->clr_ofs,
204 mask << mux->data->mux_shift);
205 regmap_write(mux->regmap, mux->data->set_ofs,
206 index << mux->data->mux_shift);
207
208 if (mux->data->upd_shift >= 0) {
209 regmap_write(mux->regmap, mux->data->upd_ofs,
210 BIT(mux->data->upd_shift));
211 mux->reparent = true;
212 }
213 }
214
215 if (mux->lock)
216 spin_unlock_irqrestore(mux->lock, flags);
217 else
218 __release(mux->lock);
219
220 return 0;
221}
222
223static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
224 struct clk_rate_request *req)
225{
226 return clk_mux_determine_rate_flags(hw, req, 0);
227}
228
229static bool mtk_clk_mux_uses_hwv(const struct clk_ops *ops)
230{
231 if (ops == &mtk_mux_gate_hwv_fenc_clr_set_upd_ops)
232 return true;
233
234 return false;
235}
236
237const struct clk_ops mtk_mux_clr_set_upd_ops = {
238 .get_parent = mtk_clk_mux_get_parent,
239 .set_parent = mtk_clk_mux_set_parent_setclr_lock,
240 .determine_rate = mtk_clk_mux_determine_rate,
241};
242EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
243
244const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
245 .enable = mtk_clk_mux_enable_setclr,
246 .disable = mtk_clk_mux_disable_setclr,
247 .is_enabled = mtk_clk_mux_is_enabled,
248 .get_parent = mtk_clk_mux_get_parent,
249 .set_parent = mtk_clk_mux_set_parent_setclr_lock,
250 .determine_rate = mtk_clk_mux_determine_rate,
251};
252EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
253
254const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops = {
255 .enable = mtk_clk_mux_fenc_enable_setclr,
256 .disable = mtk_clk_mux_disable_setclr,
257 .is_enabled = mtk_clk_mux_fenc_is_enabled,
258 .get_parent = mtk_clk_mux_get_parent,
259 .set_parent = mtk_clk_mux_set_parent_setclr_lock,
260 .determine_rate = mtk_clk_mux_determine_rate,
261};
262EXPORT_SYMBOL_GPL(mtk_mux_gate_fenc_clr_set_upd_ops);
263
264const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops = {
265 .enable = mtk_clk_mux_hwv_fenc_enable,
266 .disable = mtk_clk_mux_hwv_disable,
267 .is_enabled = mtk_clk_mux_fenc_is_enabled,
268 .get_parent = mtk_clk_mux_get_parent,
269 .set_parent = mtk_clk_mux_set_parent_setclr_lock,
270 .determine_rate = mtk_clk_mux_determine_rate,
271};
272EXPORT_SYMBOL_GPL(mtk_mux_gate_hwv_fenc_clr_set_upd_ops);
273
274static struct clk_hw *mtk_clk_register_mux(struct device *dev,
275 const struct mtk_mux *mux,
276 struct regmap *regmap,
277 struct regmap *regmap_hwv,
278 spinlock_t *lock)
279{
280 struct mtk_clk_mux *clk_mux;
281 struct clk_init_data init = {};
282 int ret;
283
284 clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
285 if (!clk_mux)
286 return ERR_PTR(-ENOMEM);
287
288 init.name = mux->name;
289 init.flags = mux->flags;
290 init.parent_names = mux->parent_names;
291 init.num_parents = mux->num_parents;
292 init.ops = mux->ops;
293 if (mtk_clk_mux_uses_hwv(init.ops) && !regmap_hwv)
294 return dev_err_ptr_probe(
295 dev, -ENXIO,
296 "regmap not found for hardware voter clocks\n");
297
298 clk_mux->regmap = regmap;
299 clk_mux->regmap_hwv = regmap_hwv;
300 clk_mux->data = mux;
301 clk_mux->lock = lock;
302 clk_mux->hw.init = &init;
303
304 ret = clk_hw_register(dev, &clk_mux->hw);
305 if (ret) {
306 kfree(clk_mux);
307 return ERR_PTR(ret);
308 }
309
310 return &clk_mux->hw;
311}
312
313static void mtk_clk_unregister_mux(struct clk_hw *hw)
314{
315 struct mtk_clk_mux *mux;
316 if (!hw)
317 return;
318
319 mux = to_mtk_clk_mux(hw);
320
321 clk_hw_unregister(hw);
322 kfree(mux);
323}
324
325int mtk_clk_register_muxes(struct device *dev,
326 const struct mtk_mux *muxes,
327 int num, struct device_node *node,
328 spinlock_t *lock,
329 struct clk_hw_onecell_data *clk_data)
330{
331 struct regmap *regmap;
332 struct regmap *regmap_hwv;
333 struct clk_hw *hw;
334 int i;
335
336 regmap = device_node_to_regmap(node);
337 if (IS_ERR(regmap)) {
338 pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
339 return PTR_ERR(regmap);
340 }
341
342 regmap_hwv = mtk_clk_get_hwv_regmap(node);
343 if (IS_ERR(regmap_hwv))
344 return dev_err_probe(
345 dev, PTR_ERR(regmap_hwv),
346 "Cannot find hardware voter regmap for %pOF\n", node);
347
348 for (i = 0; i < num; i++) {
349 const struct mtk_mux *mux = &muxes[i];
350
351 if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) {
352 pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
353 node, mux->id);
354 continue;
355 }
356
357 hw = mtk_clk_register_mux(dev, mux, regmap, regmap_hwv, lock);
358
359 if (IS_ERR(hw)) {
360 pr_err("Failed to register clk %s: %pe\n", mux->name,
361 hw);
362 goto err;
363 }
364
365 clk_data->hws[mux->id] = hw;
366 }
367
368 return 0;
369
370err:
371 while (--i >= 0) {
372 const struct mtk_mux *mux = &muxes[i];
373
374 if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
375 continue;
376
377 mtk_clk_unregister_mux(clk_data->hws[mux->id]);
378 clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
379 }
380
381 return PTR_ERR(hw);
382}
383EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
384
385void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
386 struct clk_hw_onecell_data *clk_data)
387{
388 int i;
389
390 if (!clk_data)
391 return;
392
393 for (i = num; i > 0; i--) {
394 const struct mtk_mux *mux = &muxes[i - 1];
395
396 if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
397 continue;
398
399 mtk_clk_unregister_mux(clk_data->hws[mux->id]);
400 clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
401 }
402}
403EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
404
405/*
406 * This clock notifier is called when the frequency of the parent
407 * PLL clock is to be changed. The idea is to switch the parent to a
408 * stable clock, such as the main oscillator, while the PLL frequency
409 * stabilizes.
410 */
411static int mtk_clk_mux_notifier_cb(struct notifier_block *nb,
412 unsigned long event, void *_data)
413{
414 struct clk_notifier_data *data = _data;
415 struct clk_hw *hw = __clk_get_hw(data->clk);
416 struct mtk_mux_nb *mux_nb = to_mtk_mux_nb(nb);
417 int ret = 0;
418
419 switch (event) {
420 case PRE_RATE_CHANGE:
421 mux_nb->original_index = mux_nb->ops->get_parent(hw);
422 ret = mux_nb->ops->set_parent(hw, mux_nb->bypass_index);
423 break;
424 case POST_RATE_CHANGE:
425 case ABORT_RATE_CHANGE:
426 ret = mux_nb->ops->set_parent(hw, mux_nb->original_index);
427 break;
428 }
429
430 return notifier_from_errno(ret);
431}
432
433int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
434 struct mtk_mux_nb *mux_nb)
435{
436 mux_nb->nb.notifier_call = mtk_clk_mux_notifier_cb;
437
438 return devm_clk_notifier_register(dev, clk, &mux_nb->nb);
439}
440EXPORT_SYMBOL_GPL(devm_mtk_clk_mux_notifier_register);
441
442MODULE_LICENSE("GPL");