Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: ti: dpll: convert DPLL support code to use clk_hw instead of clk ptrs

Convert DPLL support code to use clk_hw pointers for reference and bypass
clocks. This allows us to use clk_hw_* APIs for accessing any required
parameters for these clocks, avoiding some locking problems at least with
DPLL enable code; this used clk_get_rate which uses mutex but isn't
good under clk_enable / clk_disable.

Signed-off-by: Tero Kristo <t-kristo@ti.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>

authored by

Tero Kristo and committed by
Stephen Boyd
b6f51284 1e594039

+56 -32
+17 -5
drivers/clk/ti/apll.c
··· 140 140 struct dpll_data *ad = clk_hw->dpll_data; 141 141 struct clk *clk; 142 142 143 - ad->clk_ref = of_clk_get(node, 0); 144 - ad->clk_bypass = of_clk_get(node, 1); 145 - 146 - if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) { 147 - pr_debug("clk-ref or clk-bypass for %s not ready, retry\n", 143 + clk = of_clk_get(node, 0); 144 + if (IS_ERR(clk)) { 145 + pr_debug("clk-ref for %s not ready, retry\n", 148 146 node->name); 149 147 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) 150 148 return; 151 149 152 150 goto cleanup; 153 151 } 152 + 153 + ad->clk_ref = __clk_get_hw(clk); 154 + 155 + clk = of_clk_get(node, 1); 156 + if (IS_ERR(clk)) { 157 + pr_debug("clk-bypass for %s not ready, retry\n", 158 + node->name); 159 + if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) 160 + return; 161 + 162 + goto cleanup; 163 + } 164 + 165 + ad->clk_bypass = __clk_get_hw(clk); 154 166 155 167 clk = clk_register(NULL, &clk_hw->hw); 156 168 if (!IS_ERR(clk)) {
+3 -3
drivers/clk/ti/clkt_dpll.c
··· 254 254 v >>= __ffs(dd->enable_mask); 255 255 256 256 if (_omap2_dpll_is_in_bypass(v)) 257 - return clk_get_rate(dd->clk_bypass); 257 + return clk_hw_get_rate(dd->clk_bypass); 258 258 259 259 v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); 260 260 dpll_mult = v & dd->mult_mask; ··· 262 262 dpll_div = v & dd->div1_mask; 263 263 dpll_div >>= __ffs(dd->div1_mask); 264 264 265 - dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult; 265 + dpll_clk = (u64)clk_hw_get_rate(dd->clk_ref) * dpll_mult; 266 266 do_div(dpll_clk, dpll_div + 1); 267 267 268 268 return dpll_clk; ··· 301 301 302 302 dd = clk->dpll_data; 303 303 304 - ref_rate = clk_get_rate(dd->clk_ref); 304 + ref_rate = clk_hw_get_rate(dd->clk_ref); 305 305 clk_name = clk_hw_get_name(hw); 306 306 pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", 307 307 clk_name, target_rate);
+20 -7
drivers/clk/ti/dpll.c
··· 147 147 struct dpll_data *dd = clk_hw->dpll_data; 148 148 struct clk *clk; 149 149 150 - dd->clk_ref = of_clk_get(node, 0); 151 - dd->clk_bypass = of_clk_get(node, 1); 152 - 153 - if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) { 154 - pr_debug("clk-ref or clk-bypass missing for %s, retry later\n", 150 + clk = of_clk_get(node, 0); 151 + if (IS_ERR(clk)) { 152 + pr_debug("clk-ref missing for %s, retry later\n", 155 153 node->name); 156 154 if (!ti_clk_retry_init(node, hw, _register_dpll)) 157 155 return; 158 156 159 157 goto cleanup; 160 158 } 159 + 160 + dd->clk_ref = __clk_get_hw(clk); 161 + 162 + clk = of_clk_get(node, 1); 163 + 164 + if (IS_ERR(clk)) { 165 + pr_debug("clk-bypass missing for %s, retry later\n", 166 + node->name); 167 + if (!ti_clk_retry_init(node, hw, _register_dpll)) 168 + return; 169 + 170 + goto cleanup; 171 + } 172 + 173 + dd->clk_bypass = __clk_get_hw(clk); 161 174 162 175 /* register the clock */ 163 176 clk = clk_register(NULL, &clk_hw->hw); ··· 264 251 dd->recal_en_bit = dpll->recal_en_bit; 265 252 dd->recal_st_bit = dpll->recal_st_bit; 266 253 267 - dd->clk_ref = clk_ref; 268 - dd->clk_bypass = clk_bypass; 254 + dd->clk_ref = __clk_get_hw(clk_ref); 255 + dd->clk_bypass = __clk_get_hw(clk_bypass); 269 256 270 257 if (dpll->flags & CLKF_CORE) 271 258 ops = &omap3_dpll_core_ck_ops;
+8 -9
drivers/clk/ti/dpll3xxx.c
··· 98 98 unsigned long fint; 99 99 u16 f = 0; 100 100 101 - fint = clk_get_rate(clk->dpll_data->clk_ref) / n; 101 + fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n; 102 102 103 103 pr_debug("clock: fint is %lu\n", fint); 104 104 ··· 460 460 461 461 parent = clk_hw_get_parent(hw); 462 462 463 - if (clk_hw_get_rate(hw) == 464 - clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) { 465 - WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); 463 + if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) { 464 + WARN_ON(parent != dd->clk_bypass); 466 465 r = _omap3_noncore_dpll_bypass(clk); 467 466 } else { 468 - WARN_ON(parent != __clk_get_hw(dd->clk_ref)); 467 + WARN_ON(parent != dd->clk_ref); 469 468 r = _omap3_noncore_dpll_lock(clk); 470 469 } 471 470 ··· 512 513 if (!dd) 513 514 return -EINVAL; 514 515 515 - if (clk_get_rate(dd->clk_bypass) == req->rate && 516 + if (clk_hw_get_rate(dd->clk_bypass) == req->rate && 516 517 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 517 - req->best_parent_hw = __clk_get_hw(dd->clk_bypass); 518 + req->best_parent_hw = dd->clk_bypass; 518 519 } else { 519 520 req->rate = omap2_dpll_round_rate(hw, req->rate, 520 521 &req->best_parent_rate); 521 - req->best_parent_hw = __clk_get_hw(dd->clk_ref); 522 + req->best_parent_hw = dd->clk_ref; 522 523 } 523 524 524 525 req->best_parent_rate = req->rate; ··· 576 577 if (!dd) 577 578 return -EINVAL; 578 579 579 - if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref)) 580 + if (clk_hw_get_parent(hw) != dd->clk_ref) 580 581 return -EINVAL; 581 582 582 583 if (dd->last_rounded_rate == 0)
+4 -4
drivers/clk/ti/dpll44xx.c
··· 94 94 { 95 95 long fint, fout; 96 96 97 - fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1); 97 + fint = clk_hw_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1); 98 98 fout = fint * dd->last_rounded_m; 99 99 100 100 if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX)) ··· 212 212 if (!dd) 213 213 return -EINVAL; 214 214 215 - if (clk_get_rate(dd->clk_bypass) == req->rate && 215 + if (clk_hw_get_rate(dd->clk_bypass) == req->rate && 216 216 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 217 - req->best_parent_hw = __clk_get_hw(dd->clk_bypass); 217 + req->best_parent_hw = dd->clk_bypass; 218 218 } else { 219 219 req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate, 220 220 &req->best_parent_rate); 221 - req->best_parent_hw = __clk_get_hw(dd->clk_ref); 221 + req->best_parent_hw = dd->clk_ref; 222 222 } 223 223 224 224 req->best_parent_rate = req->rate;
+4 -4
include/linux/clk/ti.h
··· 23 23 * @mult_div1_reg: register containing the DPLL M and N bitfields 24 24 * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg 25 25 * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg 26 - * @clk_bypass: struct clk pointer to the clock's bypass clock input 27 - * @clk_ref: struct clk pointer to the clock's reference clock input 26 + * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input 27 + * @clk_ref: struct clk_hw pointer to the clock's reference clock input 28 28 * @control_reg: register containing the DPLL mode bitfield 29 29 * @enable_mask: mask of the DPLL mode bitfield in @control_reg 30 30 * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() ··· 69 69 void __iomem *mult_div1_reg; 70 70 u32 mult_mask; 71 71 u32 div1_mask; 72 - struct clk *clk_bypass; 73 - struct clk *clk_ref; 72 + struct clk_hw *clk_bypass; 73 + struct clk_hw *clk_ref; 74 74 void __iomem *control_reg; 75 75 u32 enable_mask; 76 76 unsigned long last_rounded_rate;