Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: rockchip: add support for half divider

The new Rockchip socs have optional half divider:
The formula is shown as:
freq_out = 2*freq_in / (2*div + 3)
Is this the same for all of new SoCs.

So we use "branch_half_divider" + "COMPOSITE_NOMUX_HALFDIV \
DIV_HALF \ COMPOSITE_HALFDIV \ CMPOSITE_NOGATE_HALFDIV"
to hook that special divider clock-type into our clock-tree.

Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>
Signed-off-by: Heiko Stuebner <heiko@sntech.de>

authored by

Elaine Zhang and committed by
Heiko Stuebner
956060a5 d409d59f

+323
+1
drivers/clk/rockchip/Makefile
··· 6 6 obj-y += clk.o 7 7 obj-y += clk-pll.o 8 8 obj-y += clk-cpu.o 9 + obj-y += clk-half-divider.o 9 10 obj-y += clk-inverter.o 10 11 obj-y += clk-mmc-phase.o 11 12 obj-y += clk-muxgrf.o
+227
drivers/clk/rockchip/clk-half-divider.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd 4 + */ 5 + 6 + #include <linux/slab.h> 7 + #include <linux/clk-provider.h> 8 + #include "clk.h" 9 + 10 + #define div_mask(width) ((1 << (width)) - 1) 11 + 12 + static bool _is_best_half_div(unsigned long rate, unsigned long now, 13 + unsigned long best, unsigned long flags) 14 + { 15 + if (flags & CLK_DIVIDER_ROUND_CLOSEST) 16 + return abs(rate - now) < abs(rate - best); 17 + 18 + return now <= rate && now > best; 19 + } 20 + 21 + static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw, 22 + unsigned long parent_rate) 23 + { 24 + struct clk_divider *divider = to_clk_divider(hw); 25 + unsigned int val; 26 + 27 + val = clk_readl(divider->reg) >> divider->shift; 28 + val &= div_mask(divider->width); 29 + val = val * 2 + 3; 30 + 31 + return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val); 32 + } 33 + 34 + static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 35 + unsigned long *best_parent_rate, u8 width, 36 + unsigned long flags) 37 + { 38 + unsigned int i, bestdiv = 0; 39 + unsigned long parent_rate, best = 0, now, maxdiv; 40 + unsigned long parent_rate_saved = *best_parent_rate; 41 + 42 + if (!rate) 43 + rate = 1; 44 + 45 + maxdiv = div_mask(width); 46 + 47 + if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { 48 + parent_rate = *best_parent_rate; 49 + bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate); 50 + if (bestdiv < 3) 51 + bestdiv = 0; 52 + else 53 + bestdiv = (bestdiv - 3) / 2; 54 + bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 55 + return bestdiv; 56 + } 57 + 58 + /* 59 + * The maximum divider we can use without overflowing 60 + * unsigned long in rate * i below 61 + */ 62 + maxdiv = min(ULONG_MAX / rate, maxdiv); 63 + 64 + for (i = 0; i <= maxdiv; i++) { 65 + if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) { 66 + /* 67 + * It's the most ideal case if the requested rate can be 68 + * divided from parent clock without needing to change 69 + * parent rate, so return the divider immediately. 70 + */ 71 + *best_parent_rate = parent_rate_saved; 72 + return i; 73 + } 74 + parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 75 + ((u64)rate * (i * 2 + 3)) / 2); 76 + now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), 77 + (i * 2 + 3)); 78 + 79 + if (_is_best_half_div(rate, now, best, flags)) { 80 + bestdiv = i; 81 + best = now; 82 + *best_parent_rate = parent_rate; 83 + } 84 + } 85 + 86 + if (!bestdiv) { 87 + bestdiv = div_mask(width); 88 + *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1); 89 + } 90 + 91 + return bestdiv; 92 + } 93 + 94 + static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate, 95 + unsigned long *prate) 96 + { 97 + struct clk_divider *divider = to_clk_divider(hw); 98 + int div; 99 + 100 + div = clk_half_divider_bestdiv(hw, rate, prate, 101 + divider->width, 102 + divider->flags); 103 + 104 + return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3); 105 + } 106 + 107 + static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate, 108 + unsigned long parent_rate) 109 + { 110 + struct clk_divider *divider = to_clk_divider(hw); 111 + unsigned int value; 112 + unsigned long flags = 0; 113 + u32 val; 114 + 115 + value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate); 116 + value = (value - 3) / 2; 117 + value = min_t(unsigned int, value, div_mask(divider->width)); 118 + 119 + if (divider->lock) 120 + spin_lock_irqsave(divider->lock, flags); 121 + else 122 + __acquire(divider->lock); 123 + 124 + if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 125 + val = div_mask(divider->width) << (divider->shift + 16); 126 + } else { 127 + val = clk_readl(divider->reg); 128 + val &= ~(div_mask(divider->width) << divider->shift); 129 + } 130 + val |= value << divider->shift; 131 + clk_writel(val, divider->reg); 132 + 133 + if (divider->lock) 134 + spin_unlock_irqrestore(divider->lock, flags); 135 + else 136 + __release(divider->lock); 137 + 138 + return 0; 139 + } 140 + 141 + const struct clk_ops clk_half_divider_ops = { 142 + .recalc_rate = clk_half_divider_recalc_rate, 143 + .round_rate = clk_half_divider_round_rate, 144 + .set_rate = clk_half_divider_set_rate, 145 + }; 146 + EXPORT_SYMBOL_GPL(clk_half_divider_ops); 147 + 148 + /** 149 + * Register a clock branch. 150 + * Most clock branches have a form like 151 + * 152 + * src1 --|--\ 153 + * |M |--[GATE]-[DIV]- 154 + * src2 --|--/ 155 + * 156 + * sometimes without one of those components. 157 + */ 158 + struct clk *rockchip_clk_register_halfdiv(const char *name, 159 + const char *const *parent_names, 160 + u8 num_parents, void __iomem *base, 161 + int muxdiv_offset, u8 mux_shift, 162 + u8 mux_width, u8 mux_flags, 163 + u8 div_shift, u8 div_width, 164 + u8 div_flags, int gate_offset, 165 + u8 gate_shift, u8 gate_flags, 166 + unsigned long flags, 167 + spinlock_t *lock) 168 + { 169 + struct clk *clk; 170 + struct clk_mux *mux = NULL; 171 + struct clk_gate *gate = NULL; 172 + struct clk_divider *div = NULL; 173 + const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 174 + *gate_ops = NULL; 175 + 176 + if (num_parents > 1) { 177 + mux = kzalloc(sizeof(*mux), GFP_KERNEL); 178 + if (!mux) 179 + return ERR_PTR(-ENOMEM); 180 + 181 + mux->reg = base + muxdiv_offset; 182 + mux->shift = mux_shift; 183 + mux->mask = BIT(mux_width) - 1; 184 + mux->flags = mux_flags; 185 + mux->lock = lock; 186 + mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 187 + : &clk_mux_ops; 188 + } 189 + 190 + if (gate_offset >= 0) { 191 + gate = kzalloc(sizeof(*gate), GFP_KERNEL); 192 + if (!gate) 193 + goto err_gate; 194 + 195 + gate->flags = gate_flags; 196 + gate->reg = base + gate_offset; 197 + gate->bit_idx = gate_shift; 198 + gate->lock = lock; 199 + gate_ops = &clk_gate_ops; 200 + } 201 + 202 + if (div_width > 0) { 203 + div = kzalloc(sizeof(*div), GFP_KERNEL); 204 + if (!div) 205 + goto err_div; 206 + 207 + div->flags = div_flags; 208 + div->reg = base + muxdiv_offset; 209 + div->shift = div_shift; 210 + div->width = div_width; 211 + div->lock = lock; 212 + div_ops = &clk_half_divider_ops; 213 + } 214 + 215 + clk = clk_register_composite(NULL, name, parent_names, num_parents, 216 + mux ? &mux->hw : NULL, mux_ops, 217 + div ? &div->hw : NULL, div_ops, 218 + gate ? &gate->hw : NULL, gate_ops, 219 + flags); 220 + 221 + return clk; 222 + err_div: 223 + kfree(gate); 224 + err_gate: 225 + kfree(mux); 226 + return ERR_PTR(-ENOMEM); 227 + }
+10
drivers/clk/rockchip/clk.c
··· 492 492 list->gate_flags, flags, list->child, 493 493 &ctx->lock); 494 494 break; 495 + case branch_half_divider: 496 + clk = rockchip_clk_register_halfdiv(list->name, 497 + list->parent_names, list->num_parents, 498 + ctx->reg_base, list->muxdiv_offset, 499 + list->mux_shift, list->mux_width, 500 + list->mux_flags, list->div_shift, 501 + list->div_width, list->div_flags, 502 + list->gate_offset, list->gate_shift, 503 + list->gate_flags, flags, &ctx->lock); 504 + break; 495 505 case branch_gate: 496 506 flags |= CLK_SET_RATE_PARENT; 497 507
+85
drivers/clk/rockchip/clk.h
··· 354 354 branch_inverter, 355 355 branch_factor, 356 356 branch_ddrclk, 357 + branch_half_divider, 357 358 }; 358 359 359 360 struct rockchip_clk_branch { ··· 685 684 .gate_flags = gf, \ 686 685 } 687 686 687 + #define COMPOSITE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ 688 + df, go, gs, gf) \ 689 + { \ 690 + .id = _id, \ 691 + .branch_type = branch_half_divider, \ 692 + .name = cname, \ 693 + .parent_names = pnames, \ 694 + .num_parents = ARRAY_SIZE(pnames), \ 695 + .flags = f, \ 696 + .muxdiv_offset = mo, \ 697 + .mux_shift = ms, \ 698 + .mux_width = mw, \ 699 + .mux_flags = mf, \ 700 + .div_shift = ds, \ 701 + .div_width = dw, \ 702 + .div_flags = df, \ 703 + .gate_offset = go, \ 704 + .gate_shift = gs, \ 705 + .gate_flags = gf, \ 706 + } 707 + 708 + #define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, \ 709 + ds, dw, df) \ 710 + { \ 711 + .id = _id, \ 712 + .branch_type = branch_half_divider, \ 713 + .name = cname, \ 714 + .parent_names = pnames, \ 715 + .num_parents = ARRAY_SIZE(pnames), \ 716 + .flags = f, \ 717 + .muxdiv_offset = mo, \ 718 + .mux_shift = ms, \ 719 + .mux_width = mw, \ 720 + .mux_flags = mf, \ 721 + .div_shift = ds, \ 722 + .div_width = dw, \ 723 + .div_flags = df, \ 724 + .gate_offset = -1, \ 725 + } 726 + 727 + #define COMPOSITE_NOMUX_HALFDIV(_id, cname, pname, f, mo, ds, dw, df, \ 728 + go, gs, gf) \ 729 + { \ 730 + .id = _id, \ 731 + .branch_type = branch_half_divider, \ 732 + .name = cname, \ 733 + .parent_names = (const char *[]){ pname }, \ 734 + .num_parents = 1, \ 735 + .flags = f, \ 736 + .muxdiv_offset = mo, \ 737 + .div_shift = ds, \ 738 + .div_width = dw, \ 739 + .div_flags = df, \ 740 + .gate_offset = go, \ 741 + .gate_shift = gs, \ 742 + .gate_flags = gf, \ 743 + } 744 + 745 + #define DIV_HALF(_id, cname, pname, f, o, s, w, df) \ 746 + { \ 747 + .id = _id, \ 748 + .branch_type = branch_half_divider, \ 749 + .name = cname, \ 750 + .parent_names = (const char *[]){ pname }, \ 751 + .num_parents = 1, \ 752 + .flags = f, \ 753 + .muxdiv_offset = o, \ 754 + .div_shift = s, \ 755 + .div_width = w, \ 756 + .div_flags = df, \ 757 + .gate_offset = -1, \ 758 + } 759 + 688 760 struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, 689 761 void __iomem *base, unsigned long nr_clks); 690 762 void rockchip_clk_of_add_provider(struct device_node *np, ··· 781 707 unsigned int reg, void (*cb)(void)); 782 708 783 709 #define ROCKCHIP_SOFTRST_HIWORD_MASK BIT(0) 710 + 711 + struct clk *rockchip_clk_register_halfdiv(const char *name, 712 + const char *const *parent_names, 713 + u8 num_parents, void __iomem *base, 714 + int muxdiv_offset, u8 mux_shift, 715 + u8 mux_width, u8 mux_flags, 716 + u8 div_shift, u8 div_width, 717 + u8 div_flags, int gate_offset, 718 + u8 gate_shift, u8 gate_flags, 719 + unsigned long flags, 720 + spinlock_t *lock); 784 721 785 722 #ifdef CONFIG_RESET_CONTROLLER 786 723 void rockchip_register_softrst(struct device_node *np,