Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: intel: Add CGU clock driver for a new SoC

Clock Generation Unit(CGU) is a new clock controller IP of a forthcoming
Intel network processor SoC named Lightning Mountain(LGM). It provides
programming interfaces to control & configure all CPU & peripheral clocks.
Add common clock framework based clock controller driver for CGU.

Signed-off-by: Rahul Tanwar <rahul.tanwar@linux.intel.com>
Link: https://lkml.kernel.org/r/42a4f71847714df482bacffdcd84341a4052800b.1587102634.git.rahul.tanwar@linux.intel.com
[sboyd@kernel.org: Kill init function to alloc and cleanup newline]
Signed-off-by: Stephen Boyd <sboyd@kernel.org>

authored by

Rahul Tanwar and committed by
Stephen Boyd
d058fd9e e2266f4c

+1612
+1
drivers/clk/Kconfig
··· 360 360 source "drivers/clk/tegra/Kconfig" 361 361 source "drivers/clk/ti/Kconfig" 362 362 source "drivers/clk/uniphier/Kconfig" 363 + source "drivers/clk/x86/Kconfig" 363 364 source "drivers/clk/zynqmp/Kconfig" 364 365 365 366 endmenu
+8
drivers/clk/x86/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + config CLK_LGM_CGU 3 + depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST) 4 + select OF_EARLY_FLATTREE 5 + bool "Clock driver for Lightning Mountain(LGM) platform" 6 + help 7 + Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM) 8 + network processor SoC.
+1
drivers/clk/x86/Makefile
··· 3 3 obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-st.o 4 4 clk-x86-lpss-objs := clk-lpt.o 5 5 obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o 6 + obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
+156
drivers/clk/x86/clk-cgu-pll.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 Intel Corporation. 4 + * Zhu YiXin <yixin.zhu@intel.com> 5 + * Rahul Tanwar <rahul.tanwar@intel.com> 6 + */ 7 + 8 + #include <linux/clk-provider.h> 9 + #include <linux/delay.h> 10 + #include <linux/device.h> 11 + #include <linux/iopoll.h> 12 + #include <linux/of.h> 13 + 14 + #include "clk-cgu.h" 15 + 16 + #define to_lgm_clk_pll(_hw) container_of(_hw, struct lgm_clk_pll, hw) 17 + #define PLL_REF_DIV(x) ((x) + 0x08) 18 + 19 + /* 20 + * Calculate formula: 21 + * rate = (prate * mult + (prate * frac) / frac_div) / div 22 + */ 23 + static unsigned long 24 + lgm_pll_calc_rate(unsigned long prate, unsigned int mult, 25 + unsigned int div, unsigned int frac, unsigned int frac_div) 26 + { 27 + u64 crate, frate, rate64; 28 + 29 + rate64 = prate; 30 + crate = rate64 * mult; 31 + frate = rate64 * frac; 32 + do_div(frate, frac_div); 33 + crate += frate; 34 + do_div(crate, div); 35 + 36 + return crate; 37 + } 38 + 39 + static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) 40 + { 41 + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 42 + unsigned int div, mult, frac; 43 + unsigned long flags; 44 + 45 + spin_lock_irqsave(&pll->lock, flags); 46 + mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12); 47 + div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6); 48 + frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24); 49 + spin_unlock_irqrestore(&pll->lock, flags); 50 + 51 + if (pll->type == TYPE_LJPLL) 52 + div *= 4; 53 + 54 + return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24)); 55 + } 56 + 57 + static int lgm_pll_is_enabled(struct clk_hw *hw) 58 + { 59 + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 60 + unsigned long flags; 61 + unsigned int ret; 62 + 63 + spin_lock_irqsave(&pll->lock, flags); 64 + ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1); 65 + spin_unlock_irqrestore(&pll->lock, flags); 66 + 67 + return ret; 68 + } 69 + 70 + static int lgm_pll_enable(struct clk_hw *hw) 71 + { 72 + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 73 + unsigned long flags; 74 + u32 val; 75 + int ret; 76 + 77 + spin_lock_irqsave(&pll->lock, flags); 78 + lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1); 79 + ret = readl_poll_timeout_atomic(pll->membase + pll->reg, 80 + val, (val & 0x1), 1, 100); 81 + spin_unlock_irqrestore(&pll->lock, flags); 82 + 83 + return ret; 84 + } 85 + 86 + static void lgm_pll_disable(struct clk_hw *hw) 87 + { 88 + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 89 + unsigned long flags; 90 + 91 + spin_lock_irqsave(&pll->lock, flags); 92 + lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0); 93 + spin_unlock_irqrestore(&pll->lock, flags); 94 + } 95 + 96 + static const struct clk_ops lgm_pll_ops = { 97 + .recalc_rate = lgm_pll_recalc_rate, 98 + .is_enabled = lgm_pll_is_enabled, 99 + .enable = lgm_pll_enable, 100 + .disable = lgm_pll_disable, 101 + }; 102 + 103 + static struct clk_hw * 104 + lgm_clk_register_pll(struct lgm_clk_provider *ctx, 105 + const struct lgm_pll_clk_data *list) 106 + { 107 + struct clk_init_data init = {}; 108 + struct lgm_clk_pll *pll; 109 + struct device *dev = ctx->dev; 110 + struct clk_hw *hw; 111 + int ret; 112 + 113 + init.ops = &lgm_pll_ops; 114 + init.name = list->name; 115 + init.flags = list->flags; 116 + init.parent_data = list->parent_data; 117 + init.num_parents = list->num_parents; 118 + 119 + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); 120 + if (!pll) 121 + return ERR_PTR(-ENOMEM); 122 + 123 + pll->membase = ctx->membase; 124 + pll->lock = ctx->lock; 125 + pll->reg = list->reg; 126 + pll->flags = list->flags; 127 + pll->type = list->type; 128 + pll->hw.init = &init; 129 + 130 + hw = &pll->hw; 131 + ret = clk_hw_register(dev, hw); 132 + if (ret) 133 + return ERR_PTR(ret); 134 + 135 + return hw; 136 + } 137 + 138 + int lgm_clk_register_plls(struct lgm_clk_provider *ctx, 139 + const struct lgm_pll_clk_data *list, 140 + unsigned int nr_clk) 141 + { 142 + struct clk_hw *hw; 143 + int i; 144 + 145 + for (i = 0; i < nr_clk; i++, list++) { 146 + hw = lgm_clk_register_pll(ctx, list); 147 + if (IS_ERR(hw)) { 148 + dev_err(ctx->dev, "failed to register pll: %s\n", 149 + list->name); 150 + return PTR_ERR(hw); 151 + } 152 + ctx->clk_data.hws[list->id] = hw; 153 + } 154 + 155 + return 0; 156 + }
+636
drivers/clk/x86/clk-cgu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 Intel Corporation. 4 + * Zhu YiXin <yixin.zhu@intel.com> 5 + * Rahul Tanwar <rahul.tanwar@intel.com> 6 + */ 7 + #include <linux/clk-provider.h> 8 + #include <linux/device.h> 9 + #include <linux/of.h> 10 + 11 + #include "clk-cgu.h" 12 + 13 + #define GATE_HW_REG_STAT(reg) ((reg) + 0x0) 14 + #define GATE_HW_REG_EN(reg) ((reg) + 0x4) 15 + #define GATE_HW_REG_DIS(reg) ((reg) + 0x8) 16 + #define MAX_DDIV_REG 8 17 + #define MAX_DIVIDER_VAL 64 18 + 19 + #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw) 20 + #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw) 21 + #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw) 22 + #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw) 23 + 24 + static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, 25 + const struct lgm_clk_branch *list) 26 + { 27 + unsigned long flags; 28 + 29 + if (list->div_flags & CLOCK_FLAG_VAL_INIT) { 30 + spin_lock_irqsave(&ctx->lock, flags); 31 + lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 32 + list->div_width, list->div_val); 33 + spin_unlock_irqrestore(&ctx->lock, flags); 34 + } 35 + 36 + return clk_hw_register_fixed_rate(NULL, list->name, 37 + list->parent_data[0].name, 38 + list->flags, list->mux_flags); 39 + } 40 + 41 + static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) 42 + { 43 + struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 44 + unsigned long flags; 45 + u32 val; 46 + 47 + spin_lock_irqsave(&mux->lock, flags); 48 + if (mux->flags & MUX_CLK_SW) 49 + val = mux->reg; 50 + else 51 + val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, 52 + mux->width); 53 + spin_unlock_irqrestore(&mux->lock, flags); 54 + return clk_mux_val_to_index(hw, NULL, mux->flags, val); 55 + } 56 + 57 + static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) 58 + { 59 + struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 60 + unsigned long flags; 61 + u32 val; 62 + 63 + val = clk_mux_index_to_val(NULL, mux->flags, index); 64 + spin_lock_irqsave(&mux->lock, flags); 65 + if (mux->flags & MUX_CLK_SW) 66 + mux->reg = val; 67 + else 68 + lgm_set_clk_val(mux->membase, mux->reg, mux->shift, 69 + mux->width, val); 70 + spin_unlock_irqrestore(&mux->lock, flags); 71 + 72 + return 0; 73 + } 74 + 75 + static int lgm_clk_mux_determine_rate(struct clk_hw *hw, 76 + struct clk_rate_request *req) 77 + { 78 + struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 79 + 80 + return clk_mux_determine_rate_flags(hw, req, mux->flags); 81 + } 82 + 83 + static const struct clk_ops lgm_clk_mux_ops = { 84 + .get_parent = lgm_clk_mux_get_parent, 85 + .set_parent = lgm_clk_mux_set_parent, 86 + .determine_rate = lgm_clk_mux_determine_rate, 87 + }; 88 + 89 + static struct clk_hw * 90 + lgm_clk_register_mux(struct lgm_clk_provider *ctx, 91 + const struct lgm_clk_branch *list) 92 + { 93 + unsigned long flags, cflags = list->mux_flags; 94 + struct device *dev = ctx->dev; 95 + u8 shift = list->mux_shift; 96 + u8 width = list->mux_width; 97 + struct clk_init_data init = {}; 98 + struct lgm_clk_mux *mux; 99 + u32 reg = list->mux_off; 100 + struct clk_hw *hw; 101 + int ret; 102 + 103 + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 104 + if (!mux) 105 + return ERR_PTR(-ENOMEM); 106 + 107 + init.name = list->name; 108 + init.ops = &lgm_clk_mux_ops; 109 + init.flags = list->flags; 110 + init.parent_data = list->parent_data; 111 + init.num_parents = list->num_parents; 112 + 113 + mux->membase = ctx->membase; 114 + mux->lock = ctx->lock; 115 + mux->reg = reg; 116 + mux->shift = shift; 117 + mux->width = width; 118 + mux->flags = cflags; 119 + mux->hw.init = &init; 120 + 121 + hw = &mux->hw; 122 + ret = clk_hw_register(dev, hw); 123 + if (ret) 124 + return ERR_PTR(ret); 125 + 126 + if (cflags & CLOCK_FLAG_VAL_INIT) { 127 + spin_lock_irqsave(&mux->lock, flags); 128 + lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); 129 + spin_unlock_irqrestore(&mux->lock, flags); 130 + } 131 + 132 + return hw; 133 + } 134 + 135 + static unsigned long 136 + lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 137 + { 138 + struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 139 + unsigned long flags; 140 + unsigned int val; 141 + 142 + spin_lock_irqsave(&divider->lock, flags); 143 + val = lgm_get_clk_val(divider->membase, divider->reg, 144 + divider->shift, divider->width); 145 + spin_unlock_irqrestore(&divider->lock, flags); 146 + 147 + return divider_recalc_rate(hw, parent_rate, val, divider->table, 148 + divider->flags, divider->width); 149 + } 150 + 151 + static long 152 + lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 153 + unsigned long *prate) 154 + { 155 + struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 156 + 157 + return divider_round_rate(hw, rate, prate, divider->table, 158 + divider->width, divider->flags); 159 + } 160 + 161 + static int 162 + lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 163 + unsigned long prate) 164 + { 165 + struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 166 + unsigned long flags; 167 + int value; 168 + 169 + value = divider_get_val(rate, prate, divider->table, 170 + divider->width, divider->flags); 171 + if (value < 0) 172 + return value; 173 + 174 + spin_lock_irqsave(&divider->lock, flags); 175 + lgm_set_clk_val(divider->membase, divider->reg, 176 + divider->shift, divider->width, value); 177 + spin_unlock_irqrestore(&divider->lock, flags); 178 + 179 + return 0; 180 + } 181 + 182 + static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) 183 + { 184 + struct lgm_clk_divider *div = to_lgm_clk_divider(hw); 185 + unsigned long flags; 186 + 187 + spin_lock_irqsave(&div->lock, flags); 188 + lgm_set_clk_val(div->membase, div->reg, div->shift_gate, 189 + div->width_gate, enable); 190 + spin_unlock_irqrestore(&div->lock, flags); 191 + return 0; 192 + } 193 + 194 + static int lgm_clk_divider_enable(struct clk_hw *hw) 195 + { 196 + return lgm_clk_divider_enable_disable(hw, 1); 197 + } 198 + 199 + static void lgm_clk_divider_disable(struct clk_hw *hw) 200 + { 201 + lgm_clk_divider_enable_disable(hw, 0); 202 + } 203 + 204 + static const struct clk_ops lgm_clk_divider_ops = { 205 + .recalc_rate = lgm_clk_divider_recalc_rate, 206 + .round_rate = lgm_clk_divider_round_rate, 207 + .set_rate = lgm_clk_divider_set_rate, 208 + .enable = lgm_clk_divider_enable, 209 + .disable = lgm_clk_divider_disable, 210 + }; 211 + 212 + static struct clk_hw * 213 + lgm_clk_register_divider(struct lgm_clk_provider *ctx, 214 + const struct lgm_clk_branch *list) 215 + { 216 + unsigned long flags, cflags = list->div_flags; 217 + struct device *dev = ctx->dev; 218 + struct lgm_clk_divider *div; 219 + struct clk_init_data init = {}; 220 + u8 shift = list->div_shift; 221 + u8 width = list->div_width; 222 + u8 shift_gate = list->div_shift_gate; 223 + u8 width_gate = list->div_width_gate; 224 + u32 reg = list->div_off; 225 + struct clk_hw *hw; 226 + int ret; 227 + 228 + div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); 229 + if (!div) 230 + return ERR_PTR(-ENOMEM); 231 + 232 + init.name = list->name; 233 + init.ops = &lgm_clk_divider_ops; 234 + init.flags = list->flags; 235 + init.parent_data = list->parent_data; 236 + init.num_parents = 1; 237 + 238 + div->membase = ctx->membase; 239 + div->lock = ctx->lock; 240 + div->reg = reg; 241 + div->shift = shift; 242 + div->width = width; 243 + div->shift_gate = shift_gate; 244 + div->width_gate = width_gate; 245 + div->flags = cflags; 246 + div->table = list->div_table; 247 + div->hw.init = &init; 248 + 249 + hw = &div->hw; 250 + ret = clk_hw_register(dev, hw); 251 + if (ret) 252 + return ERR_PTR(ret); 253 + 254 + if (cflags & CLOCK_FLAG_VAL_INIT) { 255 + spin_lock_irqsave(&div->lock, flags); 256 + lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); 257 + spin_unlock_irqrestore(&div->lock, flags); 258 + } 259 + 260 + return hw; 261 + } 262 + 263 + static struct clk_hw * 264 + lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, 265 + const struct lgm_clk_branch *list) 266 + { 267 + unsigned long flags; 268 + struct clk_hw *hw; 269 + 270 + hw = clk_hw_register_fixed_factor(ctx->dev, list->name, 271 + list->parent_data[0].name, list->flags, 272 + list->mult, list->div); 273 + if (IS_ERR(hw)) 274 + return ERR_CAST(hw); 275 + 276 + if (list->div_flags & CLOCK_FLAG_VAL_INIT) { 277 + spin_lock_irqsave(&ctx->lock, flags); 278 + lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 279 + list->div_width, list->div_val); 280 + spin_unlock_irqrestore(&ctx->lock, flags); 281 + } 282 + 283 + return hw; 284 + } 285 + 286 + static int lgm_clk_gate_enable(struct clk_hw *hw) 287 + { 288 + struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 289 + unsigned long flags; 290 + unsigned int reg; 291 + 292 + spin_lock_irqsave(&gate->lock, flags); 293 + reg = GATE_HW_REG_EN(gate->reg); 294 + lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 295 + spin_unlock_irqrestore(&gate->lock, flags); 296 + 297 + return 0; 298 + } 299 + 300 + static void lgm_clk_gate_disable(struct clk_hw *hw) 301 + { 302 + struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 303 + unsigned long flags; 304 + unsigned int reg; 305 + 306 + spin_lock_irqsave(&gate->lock, flags); 307 + reg = GATE_HW_REG_DIS(gate->reg); 308 + lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 309 + spin_unlock_irqrestore(&gate->lock, flags); 310 + } 311 + 312 + static int lgm_clk_gate_is_enabled(struct clk_hw *hw) 313 + { 314 + struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 315 + unsigned int reg, ret; 316 + unsigned long flags; 317 + 318 + spin_lock_irqsave(&gate->lock, flags); 319 + reg = GATE_HW_REG_STAT(gate->reg); 320 + ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); 321 + spin_unlock_irqrestore(&gate->lock, flags); 322 + 323 + return ret; 324 + } 325 + 326 + static const struct clk_ops lgm_clk_gate_ops = { 327 + .enable = lgm_clk_gate_enable, 328 + .disable = lgm_clk_gate_disable, 329 + .is_enabled = lgm_clk_gate_is_enabled, 330 + }; 331 + 332 + static struct clk_hw * 333 + lgm_clk_register_gate(struct lgm_clk_provider *ctx, 334 + const struct lgm_clk_branch *list) 335 + { 336 + unsigned long flags, cflags = list->gate_flags; 337 + const char *pname = list->parent_data[0].name; 338 + struct device *dev = ctx->dev; 339 + u8 shift = list->gate_shift; 340 + struct clk_init_data init = {}; 341 + struct lgm_clk_gate *gate; 342 + u32 reg = list->gate_off; 343 + struct clk_hw *hw; 344 + int ret; 345 + 346 + gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL); 347 + if (!gate) 348 + return ERR_PTR(-ENOMEM); 349 + 350 + init.name = list->name; 351 + init.ops = &lgm_clk_gate_ops; 352 + init.flags = list->flags; 353 + init.parent_names = pname ? &pname : NULL; 354 + init.num_parents = pname ? 1 : 0; 355 + 356 + gate->membase = ctx->membase; 357 + gate->lock = ctx->lock; 358 + gate->reg = reg; 359 + gate->shift = shift; 360 + gate->flags = cflags; 361 + gate->hw.init = &init; 362 + 363 + hw = &gate->hw; 364 + ret = clk_hw_register(dev, hw); 365 + if (ret) 366 + return ERR_PTR(ret); 367 + 368 + if (cflags & CLOCK_FLAG_VAL_INIT) { 369 + spin_lock_irqsave(&gate->lock, flags); 370 + lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); 371 + spin_unlock_irqrestore(&gate->lock, flags); 372 + } 373 + 374 + return hw; 375 + } 376 + 377 + int lgm_clk_register_branches(struct lgm_clk_provider *ctx, 378 + const struct lgm_clk_branch *list, 379 + unsigned int nr_clk) 380 + { 381 + struct clk_hw *hw; 382 + unsigned int idx; 383 + 384 + for (idx = 0; idx < nr_clk; idx++, list++) { 385 + switch (list->type) { 386 + case CLK_TYPE_FIXED: 387 + hw = lgm_clk_register_fixed(ctx, list); 388 + break; 389 + case CLK_TYPE_MUX: 390 + hw = lgm_clk_register_mux(ctx, list); 391 + break; 392 + case CLK_TYPE_DIVIDER: 393 + hw = lgm_clk_register_divider(ctx, list); 394 + break; 395 + case CLK_TYPE_FIXED_FACTOR: 396 + hw = lgm_clk_register_fixed_factor(ctx, list); 397 + break; 398 + case CLK_TYPE_GATE: 399 + hw = lgm_clk_register_gate(ctx, list); 400 + break; 401 + default: 402 + dev_err(ctx->dev, "invalid clk type\n"); 403 + return -EINVAL; 404 + } 405 + 406 + if (IS_ERR(hw)) { 407 + dev_err(ctx->dev, 408 + "register clk: %s, type: %u failed!\n", 409 + list->name, list->type); 410 + return -EIO; 411 + } 412 + ctx->clk_data.hws[list->id] = hw; 413 + } 414 + 415 + return 0; 416 + } 417 + 418 + static unsigned long 419 + lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 420 + { 421 + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 422 + unsigned int div0, div1, exdiv; 423 + unsigned long flags; 424 + u64 prate; 425 + 426 + spin_lock_irqsave(&ddiv->lock, flags); 427 + div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg, 428 + ddiv->shift0, ddiv->width0) + 1; 429 + div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg, 430 + ddiv->shift1, ddiv->width1) + 1; 431 + exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg, 432 + ddiv->shift2, ddiv->width2); 433 + spin_unlock_irqrestore(&ddiv->lock, flags); 434 + 435 + prate = (u64)parent_rate; 436 + do_div(prate, div0); 437 + do_div(prate, div1); 438 + 439 + if (exdiv) { 440 + do_div(prate, ddiv->div); 441 + prate *= ddiv->mult; 442 + } 443 + 444 + return prate; 445 + } 446 + 447 + static int lgm_clk_ddiv_enable(struct clk_hw *hw) 448 + { 449 + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 450 + unsigned long flags; 451 + 452 + spin_lock_irqsave(&ddiv->lock, flags); 453 + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 454 + ddiv->width_gate, 1); 455 + spin_unlock_irqrestore(&ddiv->lock, flags); 456 + return 0; 457 + } 458 + 459 + static void lgm_clk_ddiv_disable(struct clk_hw *hw) 460 + { 461 + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 462 + unsigned long flags; 463 + 464 + spin_lock_irqsave(&ddiv->lock, flags); 465 + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 466 + ddiv->width_gate, 0); 467 + spin_unlock_irqrestore(&ddiv->lock, flags); 468 + } 469 + 470 + static int 471 + lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2) 472 + { 473 + u32 idx, temp; 474 + 475 + *ddiv1 = 1; 476 + *ddiv2 = 1; 477 + 478 + if (div > MAX_DIVIDER_VAL) 479 + div = MAX_DIVIDER_VAL; 480 + 481 + if (div > 1) { 482 + for (idx = 2; idx <= MAX_DDIV_REG; idx++) { 483 + temp = DIV_ROUND_UP_ULL((u64)div, idx); 484 + if (div % idx == 0 && temp <= MAX_DDIV_REG) 485 + break; 486 + } 487 + 488 + if (idx > MAX_DDIV_REG) 489 + return -EINVAL; 490 + 491 + *ddiv1 = temp; 492 + *ddiv2 = idx; 493 + } 494 + 495 + return 0; 496 + } 497 + 498 + static int 499 + lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, 500 + unsigned long prate) 501 + { 502 + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 503 + u32 div, ddiv1, ddiv2; 504 + unsigned long flags; 505 + 506 + div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); 507 + 508 + spin_lock_irqsave(&ddiv->lock, flags); 509 + if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 510 + div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 511 + div = div * 2; 512 + } 513 + 514 + if (div <= 0) { 515 + spin_unlock_irqrestore(&ddiv->lock, flags); 516 + return -EINVAL; 517 + } 518 + 519 + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { 520 + spin_unlock_irqrestore(&ddiv->lock, flags); 521 + return -EINVAL; 522 + } 523 + 524 + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, 525 + ddiv1 - 1); 526 + 527 + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, 528 + ddiv2 - 1); 529 + spin_unlock_irqrestore(&ddiv->lock, flags); 530 + 531 + return 0; 532 + } 533 + 534 + static long 535 + lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, 536 + unsigned long *prate) 537 + { 538 + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 539 + u32 div, ddiv1, ddiv2; 540 + unsigned long flags; 541 + u64 rate64 = rate; 542 + 543 + div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); 544 + 545 + /* if predivide bit is enabled, modify div by factor of 2.5 */ 546 + spin_lock_irqsave(&ddiv->lock, flags); 547 + if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 548 + div = div * 2; 549 + div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 550 + } 551 + 552 + if (div <= 0) { 553 + spin_unlock_irqrestore(&ddiv->lock, flags); 554 + return *prate; 555 + } 556 + 557 + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) { 558 + if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) { 559 + spin_unlock_irqrestore(&ddiv->lock, flags); 560 + return -EINVAL; 561 + } 562 + } 563 + 564 + rate64 = *prate; 565 + do_div(rate64, ddiv1); 566 + do_div(rate64, ddiv2); 567 + 568 + /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ 569 + if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 570 + rate64 = rate64 * 2; 571 + rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); 572 + } 573 + spin_unlock_irqrestore(&ddiv->lock, flags); 574 + 575 + return rate64; 576 + } 577 + 578 + static const struct clk_ops lgm_clk_ddiv_ops = { 579 + .recalc_rate = lgm_clk_ddiv_recalc_rate, 580 + .enable = lgm_clk_ddiv_enable, 581 + .disable = lgm_clk_ddiv_disable, 582 + .set_rate = lgm_clk_ddiv_set_rate, 583 + .round_rate = lgm_clk_ddiv_round_rate, 584 + }; 585 + 586 + int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, 587 + const struct lgm_clk_ddiv_data *list, 588 + unsigned int nr_clk) 589 + { 590 + struct device *dev = ctx->dev; 591 + struct clk_init_data init = {}; 592 + struct lgm_clk_ddiv *ddiv; 593 + struct clk_hw *hw; 594 + unsigned int idx; 595 + int ret; 596 + 597 + for (idx = 0; idx < nr_clk; idx++, list++) { 598 + ddiv = NULL; 599 + ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL); 600 + if (!ddiv) 601 + return -ENOMEM; 602 + 603 + memset(&init, 0, sizeof(init)); 604 + init.name = list->name; 605 + init.ops = &lgm_clk_ddiv_ops; 606 + init.flags = list->flags; 607 + init.parent_data = list->parent_data; 608 + init.num_parents = 1; 609 + 610 + ddiv->membase = ctx->membase; 611 + ddiv->lock = ctx->lock; 612 + ddiv->reg = list->reg; 613 + ddiv->shift0 = list->shift0; 614 + ddiv->width0 = list->width0; 615 + ddiv->shift1 = list->shift1; 616 + ddiv->width1 = list->width1; 617 + ddiv->shift_gate = list->shift_gate; 618 + ddiv->width_gate = list->width_gate; 619 + ddiv->shift2 = list->ex_shift; 620 + ddiv->width2 = list->ex_width; 621 + ddiv->flags = list->div_flags; 622 + ddiv->mult = 2; 623 + ddiv->div = 5; 624 + ddiv->hw.init = &init; 625 + 626 + hw = &ddiv->hw; 627 + ret = clk_hw_register(dev, hw); 628 + if (ret) { 629 + dev_err(dev, "register clk: %s failed!\n", list->name); 630 + return ret; 631 + } 632 + ctx->clk_data.hws[list->id] = hw; 633 + } 634 + 635 + return 0; 636 + }
+335
drivers/clk/x86/clk-cgu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright(c) 2020 Intel Corporation. 4 + * Zhu YiXin <yixin.zhu@intel.com> 5 + * Rahul Tanwar <rahul.tanwar@intel.com> 6 + */ 7 + 8 + #ifndef __CLK_CGU_H 9 + #define __CLK_CGU_H 10 + 11 + #include <linux/io.h> 12 + 13 + struct lgm_clk_mux { 14 + struct clk_hw hw; 15 + void __iomem *membase; 16 + unsigned int reg; 17 + u8 shift; 18 + u8 width; 19 + unsigned long flags; 20 + spinlock_t lock; 21 + }; 22 + 23 + struct lgm_clk_divider { 24 + struct clk_hw hw; 25 + void __iomem *membase; 26 + unsigned int reg; 27 + u8 shift; 28 + u8 width; 29 + u8 shift_gate; 30 + u8 width_gate; 31 + unsigned long flags; 32 + const struct clk_div_table *table; 33 + spinlock_t lock; 34 + }; 35 + 36 + struct lgm_clk_ddiv { 37 + struct clk_hw hw; 38 + void __iomem *membase; 39 + unsigned int reg; 40 + u8 shift0; 41 + u8 width0; 42 + u8 shift1; 43 + u8 width1; 44 + u8 shift2; 45 + u8 width2; 46 + u8 shift_gate; 47 + u8 width_gate; 48 + unsigned int mult; 49 + unsigned int div; 50 + unsigned long flags; 51 + spinlock_t lock; 52 + }; 53 + 54 + struct lgm_clk_gate { 55 + struct clk_hw hw; 56 + void __iomem *membase; 57 + unsigned int reg; 58 + u8 shift; 59 + unsigned long flags; 60 + spinlock_t lock; 61 + }; 62 + 63 + enum lgm_clk_type { 64 + CLK_TYPE_FIXED, 65 + CLK_TYPE_MUX, 66 + CLK_TYPE_DIVIDER, 67 + CLK_TYPE_FIXED_FACTOR, 68 + CLK_TYPE_GATE, 69 + CLK_TYPE_NONE, 70 + }; 71 + 72 + /** 73 + * struct lgm_clk_provider 74 + * @membase: IO mem base address for CGU. 75 + * @np: device node 76 + * @dev: device 77 + * @clk_data: array of hw clocks and clk number. 78 + */ 79 + struct lgm_clk_provider { 80 + void __iomem *membase; 81 + struct device_node *np; 82 + struct device *dev; 83 + struct clk_hw_onecell_data clk_data; 84 + spinlock_t lock; 85 + }; 86 + 87 + enum pll_type { 88 + TYPE_ROPLL, 89 + TYPE_LJPLL, 90 + TYPE_NONE, 91 + }; 92 + 93 + struct lgm_clk_pll { 94 + struct clk_hw hw; 95 + void __iomem *membase; 96 + unsigned int reg; 97 + unsigned long flags; 98 + enum pll_type type; 99 + spinlock_t lock; 100 + }; 101 + 102 + /** 103 + * struct lgm_pll_clk_data 104 + * @id: platform specific id of the clock. 105 + * @name: name of this pll clock. 106 + * @parent_data: parent clock data. 107 + * @num_parents: number of parents. 108 + * @flags: optional flags for basic clock. 109 + * @type: platform type of pll. 110 + * @reg: offset of the register. 111 + */ 112 + struct lgm_pll_clk_data { 113 + unsigned int id; 114 + const char *name; 115 + const struct clk_parent_data *parent_data; 116 + u8 num_parents; 117 + unsigned long flags; 118 + enum pll_type type; 119 + int reg; 120 + }; 121 + 122 + #define LGM_PLL(_id, _name, _pdata, _flags, \ 123 + _reg, _type) \ 124 + { \ 125 + .id = _id, \ 126 + .name = _name, \ 127 + .parent_data = _pdata, \ 128 + .num_parents = ARRAY_SIZE(_pdata), \ 129 + .flags = _flags, \ 130 + .reg = _reg, \ 131 + .type = _type, \ 132 + } 133 + 134 + struct lgm_clk_ddiv_data { 135 + unsigned int id; 136 + const char *name; 137 + const struct clk_parent_data *parent_data; 138 + u8 flags; 139 + unsigned long div_flags; 140 + unsigned int reg; 141 + u8 shift0; 142 + u8 width0; 143 + u8 shift1; 144 + u8 width1; 145 + u8 shift_gate; 146 + u8 width_gate; 147 + u8 ex_shift; 148 + u8 ex_width; 149 + }; 150 + 151 + #define LGM_DDIV(_id, _name, _pname, _flags, _reg, \ 152 + _shft0, _wdth0, _shft1, _wdth1, \ 153 + _shft_gate, _wdth_gate, _xshft, _df) \ 154 + { \ 155 + .id = _id, \ 156 + .name = _name, \ 157 + .parent_data = &(const struct clk_parent_data){ \ 158 + .fw_name = _pname, \ 159 + .name = _pname, \ 160 + }, \ 161 + .flags = _flags, \ 162 + .reg = _reg, \ 163 + .shift0 = _shft0, \ 164 + .width0 = _wdth0, \ 165 + .shift1 = _shft1, \ 166 + .width1 = _wdth1, \ 167 + .shift_gate = _shft_gate, \ 168 + .width_gate = _wdth_gate, \ 169 + .ex_shift = _xshft, \ 170 + .ex_width = 1, \ 171 + .div_flags = _df, \ 172 + } 173 + 174 + struct lgm_clk_branch { 175 + unsigned int id; 176 + enum lgm_clk_type type; 177 + const char *name; 178 + const struct clk_parent_data *parent_data; 179 + u8 num_parents; 180 + unsigned long flags; 181 + unsigned int mux_off; 182 + u8 mux_shift; 183 + u8 mux_width; 184 + unsigned long mux_flags; 185 + unsigned int mux_val; 186 + unsigned int div_off; 187 + u8 div_shift; 188 + u8 div_width; 189 + u8 div_shift_gate; 190 + u8 div_width_gate; 191 + unsigned long div_flags; 192 + unsigned int div_val; 193 + const struct clk_div_table *div_table; 194 + unsigned int gate_off; 195 + u8 gate_shift; 196 + unsigned long gate_flags; 197 + unsigned int gate_val; 198 + unsigned int mult; 199 + unsigned int div; 200 + }; 201 + 202 + /* clock flags definition */ 203 + #define CLOCK_FLAG_VAL_INIT BIT(16) 204 + #define MUX_CLK_SW BIT(17) 205 + 206 + #define LGM_MUX(_id, _name, _pdata, _f, _reg, \ 207 + _shift, _width, _cf, _v) \ 208 + { \ 209 + .id = _id, \ 210 + .type = CLK_TYPE_MUX, \ 211 + .name = _name, \ 212 + .parent_data = _pdata, \ 213 + .num_parents = ARRAY_SIZE(_pdata), \ 214 + .flags = _f, \ 215 + .mux_off = _reg, \ 216 + .mux_shift = _shift, \ 217 + .mux_width = _width, \ 218 + .mux_flags = _cf, \ 219 + .mux_val = _v, \ 220 + } 221 + 222 + #define LGM_DIV(_id, _name, _pname, _f, _reg, _shift, _width, \ 223 + _shift_gate, _width_gate, _cf, _v, _dtable) \ 224 + { \ 225 + .id = _id, \ 226 + .type = CLK_TYPE_DIVIDER, \ 227 + .name = _name, \ 228 + .parent_data = &(const struct clk_parent_data){ \ 229 + .fw_name = _pname, \ 230 + .name = _pname, \ 231 + }, \ 232 + .num_parents = 1, \ 233 + .flags = _f, \ 234 + .div_off = _reg, \ 235 + .div_shift = _shift, \ 236 + .div_width = _width, \ 237 + .div_shift_gate = _shift_gate, \ 238 + .div_width_gate = _width_gate, \ 239 + .div_flags = _cf, \ 240 + .div_val = _v, \ 241 + .div_table = _dtable, \ 242 + } 243 + 244 + #define LGM_GATE(_id, _name, _pname, _f, _reg, \ 245 + _shift, _cf, _v) \ 246 + { \ 247 + .id = _id, \ 248 + .type = CLK_TYPE_GATE, \ 249 + .name = _name, \ 250 + .parent_data = &(const struct clk_parent_data){ \ 251 + .fw_name = _pname, \ 252 + .name = _pname, \ 253 + }, \ 254 + .num_parents = !_pname ? 0 : 1, \ 255 + .flags = _f, \ 256 + .gate_off = _reg, \ 257 + .gate_shift = _shift, \ 258 + .gate_flags = _cf, \ 259 + .gate_val = _v, \ 260 + } 261 + 262 + #define LGM_FIXED(_id, _name, _pname, _f, _reg, \ 263 + _shift, _width, _cf, _freq, _v) \ 264 + { \ 265 + .id = _id, \ 266 + .type = CLK_TYPE_FIXED, \ 267 + .name = _name, \ 268 + .parent_data = &(const struct clk_parent_data){ \ 269 + .fw_name = _pname, \ 270 + .name = _pname, \ 271 + }, \ 272 + .num_parents = !_pname ? 0 : 1, \ 273 + .flags = _f, \ 274 + .div_off = _reg, \ 275 + .div_shift = _shift, \ 276 + .div_width = _width, \ 277 + .div_flags = _cf, \ 278 + .div_val = _v, \ 279 + .mux_flags = _freq, \ 280 + } 281 + 282 + #define LGM_FIXED_FACTOR(_id, _name, _pname, _f, _reg, \ 283 + _shift, _width, _cf, _v, _m, _d) \ 284 + { \ 285 + .id = _id, \ 286 + .type = CLK_TYPE_FIXED_FACTOR, \ 287 + .name = _name, \ 288 + .parent_data = &(const struct clk_parent_data){ \ 289 + .fw_name = _pname, \ 290 + .name = _pname, \ 291 + }, \ 292 + .num_parents = 1, \ 293 + .flags = _f, \ 294 + .div_off = _reg, \ 295 + .div_shift = _shift, \ 296 + .div_width = _width, \ 297 + .div_flags = _cf, \ 298 + .div_val = _v, \ 299 + .mult = _m, \ 300 + .div = _d, \ 301 + } 302 + 303 + static inline void lgm_set_clk_val(void __iomem *membase, u32 reg, 304 + u8 shift, u8 width, u32 set_val) 305 + { 306 + u32 mask = (GENMASK(width - 1, 0) << shift); 307 + u32 regval; 308 + 309 + regval = readl(membase + reg); 310 + regval = (regval & ~mask) | ((set_val << shift) & mask); 311 + writel(regval, membase + reg); 312 + } 313 + 314 + static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg, 315 + u8 shift, u8 width) 316 + { 317 + u32 mask = (GENMASK(width - 1, 0) << shift); 318 + u32 val; 319 + 320 + val = readl(membase + reg); 321 + val = (val & mask) >> shift; 322 + 323 + return val; 324 + } 325 + 326 + int lgm_clk_register_branches(struct lgm_clk_provider *ctx, 327 + const struct lgm_clk_branch *list, 328 + unsigned int nr_clk); 329 + int lgm_clk_register_plls(struct lgm_clk_provider *ctx, 330 + const struct lgm_pll_clk_data *list, 331 + unsigned int nr_clk); 332 + int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, 333 + const struct lgm_clk_ddiv_data *list, 334 + unsigned int nr_clk); 335 + #endif /* __CLK_CGU_H */
+475
drivers/clk/x86/clk-lgm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 Intel Corporation. 4 + * Zhu YiXin <yixin.zhu@intel.com> 5 + * Rahul Tanwar <rahul.tanwar@intel.com> 6 + */ 7 + #include <linux/clk-provider.h> 8 + #include <linux/of.h> 9 + #include <linux/platform_device.h> 10 + #include <dt-bindings/clock/intel,lgm-clk.h> 11 + #include "clk-cgu.h" 12 + 13 + #define PLL_DIV_WIDTH 4 14 + #define PLL_DDIV_WIDTH 3 15 + 16 + /* Gate0 clock shift */ 17 + #define G_C55_SHIFT 7 18 + #define G_QSPI_SHIFT 9 19 + #define G_EIP197_SHIFT 11 20 + #define G_VAULT130_SHIFT 12 21 + #define G_TOE_SHIFT 13 22 + #define G_SDXC_SHIFT 14 23 + #define G_EMMC_SHIFT 15 24 + #define G_SPIDBG_SHIFT 17 25 + #define G_DMA3_SHIFT 28 26 + 27 + /* Gate1 clock shift */ 28 + #define G_DMA0_SHIFT 0 29 + #define G_LEDC0_SHIFT 1 30 + #define G_LEDC1_SHIFT 2 31 + #define G_I2S0_SHIFT 3 32 + #define G_I2S1_SHIFT 4 33 + #define G_EBU_SHIFT 5 34 + #define G_PWM_SHIFT 6 35 + #define G_I2C0_SHIFT 7 36 + #define G_I2C1_SHIFT 8 37 + #define G_I2C2_SHIFT 9 38 + #define G_I2C3_SHIFT 10 39 + 40 + #define G_SSC0_SHIFT 12 41 + #define G_SSC1_SHIFT 13 42 + #define G_SSC2_SHIFT 14 43 + #define G_SSC3_SHIFT 15 44 + 45 + #define G_GPTC0_SHIFT 17 46 + #define G_GPTC1_SHIFT 18 47 + #define G_GPTC2_SHIFT 19 48 + #define G_GPTC3_SHIFT 20 49 + 50 + #define G_ASC0_SHIFT 22 51 + #define G_ASC1_SHIFT 23 52 + #define G_ASC2_SHIFT 24 53 + #define G_ASC3_SHIFT 25 54 + 55 + #define G_PCM0_SHIFT 27 56 + #define G_PCM1_SHIFT 28 57 + #define G_PCM2_SHIFT 29 58 + 59 + /* Gate2 clock shift */ 60 + #define G_PCIE10_SHIFT 1 61 + #define G_PCIE11_SHIFT 2 62 + #define G_PCIE30_SHIFT 3 63 + #define G_PCIE31_SHIFT 4 64 + #define G_PCIE20_SHIFT 5 65 + #define G_PCIE21_SHIFT 6 66 + #define G_PCIE40_SHIFT 7 67 + #define G_PCIE41_SHIFT 8 68 + 69 + #define G_XPCS0_SHIFT 10 70 + #define G_XPCS1_SHIFT 11 71 + #define G_XPCS2_SHIFT 12 72 + #define G_XPCS3_SHIFT 13 73 + #define G_SATA0_SHIFT 14 74 + #define G_SATA1_SHIFT 15 75 + #define G_SATA2_SHIFT 16 76 + #define G_SATA3_SHIFT 17 77 + 78 + /* Gate3 clock shift */ 79 + #define G_ARCEM4_SHIFT 0 80 + #define G_IDMAR1_SHIFT 2 81 + #define G_IDMAT0_SHIFT 3 82 + #define G_IDMAT1_SHIFT 4 83 + #define G_IDMAT2_SHIFT 5 84 + 85 + #define G_PPV4_SHIFT 8 86 + #define G_GSWIPO_SHIFT 9 87 + #define G_CQEM_SHIFT 10 88 + #define G_XPCS5_SHIFT 14 89 + #define G_USB1_SHIFT 25 90 + #define G_USB2_SHIFT 26 91 + 92 + 93 + /* Register definition */ 94 + #define CGU_PLL0CZ_CFG0 0x000 95 + #define CGU_PLL0CM0_CFG0 0x020 96 + #define CGU_PLL0CM1_CFG0 0x040 97 + #define CGU_PLL0B_CFG0 0x060 98 + #define CGU_PLL1_CFG0 0x080 99 + #define CGU_PLL2_CFG0 0x0A0 100 + #define CGU_PLLPP_CFG0 0x0C0 101 + #define CGU_LJPLL3_CFG0 0x0E0 102 + #define CGU_LJPLL4_CFG0 0x100 103 + #define CGU_C55_PCMCR 0x18C 104 + #define CGU_PCMCR 0x190 105 + #define CGU_IF_CLK1 0x1A0 106 + #define CGU_IF_CLK2 0x1A4 107 + #define CGU_GATE0 0x300 108 + #define CGU_GATE1 0x310 109 + #define CGU_GATE2 0x320 110 + #define CGU_GATE3 0x310 111 + 112 + #define PLL_DIV(x) ((x) + 0x04) 113 + #define PLL_SSC(x) ((x) + 0x10) 114 + 115 + #define CLK_NR_CLKS (LGM_GCLK_USB2 + 1) 116 + 117 + /* 118 + * Below table defines the pair's of regval & effective dividers. 119 + * It's more efficient to provide an explicit table due to non-linear 120 + * relation between values. 121 + */ 122 + static const struct clk_div_table pll_div[] = { 123 + { .val = 0, .div = 1 }, 124 + { .val = 1, .div = 2 }, 125 + { .val = 2, .div = 3 }, 126 + { .val = 3, .div = 4 }, 127 + { .val = 4, .div = 5 }, 128 + { .val = 5, .div = 6 }, 129 + { .val = 6, .div = 8 }, 130 + { .val = 7, .div = 10 }, 131 + { .val = 8, .div = 12 }, 132 + { .val = 9, .div = 16 }, 133 + { .val = 10, .div = 20 }, 134 + { .val = 11, .div = 24 }, 135 + { .val = 12, .div = 32 }, 136 + { .val = 13, .div = 40 }, 137 + { .val = 14, .div = 48 }, 138 + { .val = 15, .div = 64 }, 139 + {} 140 + }; 141 + 142 + static const struct clk_div_table dcl_div[] = { 143 + { .val = 0, .div = 6 }, 144 + { .val = 1, .div = 12 }, 145 + { .val = 2, .div = 24 }, 146 + { .val = 3, .div = 32 }, 147 + { .val = 4, .div = 48 }, 148 + { .val = 5, .div = 96 }, 149 + {} 150 + }; 151 + 152 + static const struct clk_parent_data pll_p[] = { 153 + { .fw_name = "osc", .name = "osc" }, 154 + }; 155 + static const struct clk_parent_data pllcm_p[] = { 156 + { .fw_name = "cpu_cm", .name = "cpu_cm" }, 157 + }; 158 + static const struct clk_parent_data emmc_p[] = { 159 + { .fw_name = "emmc4", .name = "emmc4" }, 160 + { .fw_name = "noc4", .name = "noc4" }, 161 + }; 162 + static const struct clk_parent_data sdxc_p[] = { 163 + { .fw_name = "sdxc3", .name = "sdxc3" }, 164 + { .fw_name = "sdxc2", .name = "sdxc2" }, 165 + }; 166 + static const struct clk_parent_data pcm_p[] = { 167 + { .fw_name = "v_docsis", .name = "v_docsis" }, 168 + { .fw_name = "dcl", .name = "dcl" }, 169 + }; 170 + static const struct clk_parent_data cbphy_p[] = { 171 + { .fw_name = "dd_serdes", .name = "dd_serdes" }, 172 + { .fw_name = "dd_pcie", .name = "dd_pcie" }, 173 + }; 174 + 175 + static const struct lgm_pll_clk_data lgm_pll_clks[] = { 176 + LGM_PLL(LGM_CLK_PLL0CZ, "pll0cz", pll_p, CLK_IGNORE_UNUSED, 177 + CGU_PLL0CZ_CFG0, TYPE_ROPLL), 178 + LGM_PLL(LGM_CLK_PLL0CM0, "pll0cm0", pllcm_p, CLK_IGNORE_UNUSED, 179 + CGU_PLL0CM0_CFG0, TYPE_ROPLL), 180 + LGM_PLL(LGM_CLK_PLL0CM1, "pll0cm1", pllcm_p, CLK_IGNORE_UNUSED, 181 + CGU_PLL0CM1_CFG0, TYPE_ROPLL), 182 + LGM_PLL(LGM_CLK_PLL0B, "pll0b", pll_p, CLK_IGNORE_UNUSED, 183 + CGU_PLL0B_CFG0, TYPE_ROPLL), 184 + LGM_PLL(LGM_CLK_PLL1, "pll1", pll_p, 0, CGU_PLL1_CFG0, TYPE_ROPLL), 185 + LGM_PLL(LGM_CLK_PLL2, "pll2", pll_p, CLK_IGNORE_UNUSED, 186 + CGU_PLL2_CFG0, TYPE_ROPLL), 187 + LGM_PLL(LGM_CLK_PLLPP, "pllpp", pll_p, 0, CGU_PLLPP_CFG0, TYPE_ROPLL), 188 + LGM_PLL(LGM_CLK_LJPLL3, "ljpll3", pll_p, 0, CGU_LJPLL3_CFG0, TYPE_LJPLL), 189 + LGM_PLL(LGM_CLK_LJPLL4, "ljpll4", pll_p, 0, CGU_LJPLL4_CFG0, TYPE_LJPLL), 190 + }; 191 + 192 + static const struct lgm_clk_branch lgm_branch_clks[] = { 193 + LGM_DIV(LGM_CLK_PP_HW, "pp_hw", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), 194 + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), 195 + LGM_DIV(LGM_CLK_PP_UC, "pp_uc", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), 196 + 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div), 197 + LGM_DIV(LGM_CLK_PP_FXD, "pp_fxd", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), 198 + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), 199 + LGM_DIV(LGM_CLK_PP_TBM, "pp_tbm", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), 200 + 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div), 201 + LGM_DIV(LGM_CLK_DDR, "ddr", "pll2", CLK_IGNORE_UNUSED, 202 + PLL_DIV(CGU_PLL2_CFG0), 0, PLL_DIV_WIDTH, 24, 1, 0, 0, 203 + pll_div), 204 + LGM_DIV(LGM_CLK_CM, "cpu_cm", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0), 205 + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), 206 + 207 + LGM_DIV(LGM_CLK_IC, "cpu_ic", "pll0cz", CLK_IGNORE_UNUSED, 208 + PLL_DIV(CGU_PLL0CZ_CFG0), 4, PLL_DIV_WIDTH, 25, 209 + 1, 0, 0, pll_div), 210 + 211 + LGM_DIV(LGM_CLK_SDXC3, "sdxc3", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0), 212 + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), 213 + 214 + LGM_DIV(LGM_CLK_CPU0, "cm0", "pll0cm0", 215 + CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM0_CFG0), 216 + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), 217 + LGM_DIV(LGM_CLK_CPU1, "cm1", "pll0cm1", 218 + CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM1_CFG0), 219 + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), 220 + 221 + /* 222 + * Marking ngi_clk (next generation interconnect) and noc_clk 223 + * (network on chip peripheral clk) as critical clocks because 224 + * these are shared parent clock sources for many different 225 + * peripherals. 226 + */ 227 + LGM_DIV(LGM_CLK_NGI, "ngi", "pll0b", 228 + (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0), 229 + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), 230 + LGM_DIV(LGM_CLK_NOC4, "noc4", "pll0b", 231 + (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0), 232 + 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div), 233 + LGM_DIV(LGM_CLK_SW, "switch", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0), 234 + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), 235 + LGM_DIV(LGM_CLK_QSPI, "qspi", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0), 236 + 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div), 237 + LGM_DIV(LGM_CLK_CT, "v_ct", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0), 238 + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), 239 + LGM_DIV(LGM_CLK_DSP, "v_dsp", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0), 240 + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), 241 + LGM_DIV(LGM_CLK_VIF, "v_ifclk", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0), 242 + 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div), 243 + 244 + LGM_FIXED_FACTOR(LGM_CLK_EMMC4, "emmc4", "sdxc3", 0, 0, 245 + 0, 0, 0, 0, 1, 4), 246 + LGM_FIXED_FACTOR(LGM_CLK_SDXC2, "sdxc2", "noc4", 0, 0, 247 + 0, 0, 0, 0, 1, 4), 248 + LGM_MUX(LGM_CLK_EMMC, "emmc", emmc_p, 0, CGU_IF_CLK1, 249 + 0, 1, CLK_MUX_ROUND_CLOSEST, 0), 250 + LGM_MUX(LGM_CLK_SDXC, "sdxc", sdxc_p, 0, CGU_IF_CLK1, 251 + 1, 1, CLK_MUX_ROUND_CLOSEST, 0), 252 + LGM_FIXED(LGM_CLK_OSC, "osc", NULL, 0, 0, 0, 0, 0, 40000000, 0), 253 + LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1, 254 + 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2), 255 + LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0), 256 + LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR, 257 + 25, 3, 0, 0, 0, 0, dcl_div), 258 + LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR, 259 + 0, 1, CLK_MUX_ROUND_CLOSEST, 0), 260 + LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr", 261 + CLK_IGNORE_UNUSED, 0, 262 + 0, 0, 0, 0, 2, 1), 263 + LGM_FIXED_FACTOR(LGM_CLK_PONDEF, "pondef", "dd_pool", 264 + CLK_SET_RATE_PARENT, 0, 0, 0, 0, 0, 1, 2), 265 + LGM_MUX(LGM_CLK_CBPHY0, "cbphy0", cbphy_p, 0, 0, 266 + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), 267 + LGM_MUX(LGM_CLK_CBPHY1, "cbphy1", cbphy_p, 0, 0, 268 + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), 269 + LGM_MUX(LGM_CLK_CBPHY2, "cbphy2", cbphy_p, 0, 0, 270 + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), 271 + LGM_MUX(LGM_CLK_CBPHY3, "cbphy3", cbphy_p, 0, 0, 272 + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), 273 + 274 + LGM_GATE(LGM_GCLK_C55, "g_c55", NULL, 0, CGU_GATE0, 275 + G_C55_SHIFT, 0, 0), 276 + LGM_GATE(LGM_GCLK_QSPI, "g_qspi", "qspi", 0, CGU_GATE0, 277 + G_QSPI_SHIFT, 0, 0), 278 + LGM_GATE(LGM_GCLK_EIP197, "g_eip197", NULL, 0, CGU_GATE0, 279 + G_EIP197_SHIFT, 0, 0), 280 + LGM_GATE(LGM_GCLK_VAULT, "g_vault130", NULL, 0, CGU_GATE0, 281 + G_VAULT130_SHIFT, 0, 0), 282 + LGM_GATE(LGM_GCLK_TOE, "g_toe", NULL, 0, CGU_GATE0, 283 + G_TOE_SHIFT, 0, 0), 284 + LGM_GATE(LGM_GCLK_SDXC, "g_sdxc", "sdxc", 0, CGU_GATE0, 285 + G_SDXC_SHIFT, 0, 0), 286 + LGM_GATE(LGM_GCLK_EMMC, "g_emmc", "emmc", 0, CGU_GATE0, 287 + G_EMMC_SHIFT, 0, 0), 288 + LGM_GATE(LGM_GCLK_SPI_DBG, "g_spidbg", NULL, 0, CGU_GATE0, 289 + G_SPIDBG_SHIFT, 0, 0), 290 + LGM_GATE(LGM_GCLK_DMA3, "g_dma3", NULL, 0, CGU_GATE0, 291 + G_DMA3_SHIFT, 0, 0), 292 + 293 + LGM_GATE(LGM_GCLK_DMA0, "g_dma0", NULL, 0, CGU_GATE1, 294 + G_DMA0_SHIFT, 0, 0), 295 + LGM_GATE(LGM_GCLK_LEDC0, "g_ledc0", NULL, 0, CGU_GATE1, 296 + G_LEDC0_SHIFT, 0, 0), 297 + LGM_GATE(LGM_GCLK_LEDC1, "g_ledc1", NULL, 0, CGU_GATE1, 298 + G_LEDC1_SHIFT, 0, 0), 299 + LGM_GATE(LGM_GCLK_I2S0, "g_i2s0", NULL, 0, CGU_GATE1, 300 + G_I2S0_SHIFT, 0, 0), 301 + LGM_GATE(LGM_GCLK_I2S1, "g_i2s1", NULL, 0, CGU_GATE1, 302 + G_I2S1_SHIFT, 0, 0), 303 + LGM_GATE(LGM_GCLK_EBU, "g_ebu", NULL, 0, CGU_GATE1, 304 + G_EBU_SHIFT, 0, 0), 305 + LGM_GATE(LGM_GCLK_PWM, "g_pwm", NULL, 0, CGU_GATE1, 306 + G_PWM_SHIFT, 0, 0), 307 + LGM_GATE(LGM_GCLK_I2C0, "g_i2c0", NULL, 0, CGU_GATE1, 308 + G_I2C0_SHIFT, 0, 0), 309 + LGM_GATE(LGM_GCLK_I2C1, "g_i2c1", NULL, 0, CGU_GATE1, 310 + G_I2C1_SHIFT, 0, 0), 311 + LGM_GATE(LGM_GCLK_I2C2, "g_i2c2", NULL, 0, CGU_GATE1, 312 + G_I2C2_SHIFT, 0, 0), 313 + LGM_GATE(LGM_GCLK_I2C3, "g_i2c3", NULL, 0, CGU_GATE1, 314 + G_I2C3_SHIFT, 0, 0), 315 + LGM_GATE(LGM_GCLK_SSC0, "g_ssc0", "noc4", 0, CGU_GATE1, 316 + G_SSC0_SHIFT, 0, 0), 317 + LGM_GATE(LGM_GCLK_SSC1, "g_ssc1", "noc4", 0, CGU_GATE1, 318 + G_SSC1_SHIFT, 0, 0), 319 + LGM_GATE(LGM_GCLK_SSC2, "g_ssc2", "noc4", 0, CGU_GATE1, 320 + G_SSC2_SHIFT, 0, 0), 321 + LGM_GATE(LGM_GCLK_SSC3, "g_ssc3", "noc4", 0, CGU_GATE1, 322 + G_SSC3_SHIFT, 0, 0), 323 + LGM_GATE(LGM_GCLK_GPTC0, "g_gptc0", "noc4", 0, CGU_GATE1, 324 + G_GPTC0_SHIFT, 0, 0), 325 + LGM_GATE(LGM_GCLK_GPTC1, "g_gptc1", "noc4", 0, CGU_GATE1, 326 + G_GPTC1_SHIFT, 0, 0), 327 + LGM_GATE(LGM_GCLK_GPTC2, "g_gptc2", "noc4", 0, CGU_GATE1, 328 + G_GPTC2_SHIFT, 0, 0), 329 + LGM_GATE(LGM_GCLK_GPTC3, "g_gptc3", "osc", 0, CGU_GATE1, 330 + G_GPTC3_SHIFT, 0, 0), 331 + LGM_GATE(LGM_GCLK_ASC0, "g_asc0", "noc4", 0, CGU_GATE1, 332 + G_ASC0_SHIFT, 0, 0), 333 + LGM_GATE(LGM_GCLK_ASC1, "g_asc1", "noc4", 0, CGU_GATE1, 334 + G_ASC1_SHIFT, 0, 0), 335 + LGM_GATE(LGM_GCLK_ASC2, "g_asc2", "noc4", 0, CGU_GATE1, 336 + G_ASC2_SHIFT, 0, 0), 337 + LGM_GATE(LGM_GCLK_ASC3, "g_asc3", "osc", 0, CGU_GATE1, 338 + G_ASC3_SHIFT, 0, 0), 339 + LGM_GATE(LGM_GCLK_PCM0, "g_pcm0", NULL, 0, CGU_GATE1, 340 + G_PCM0_SHIFT, 0, 0), 341 + LGM_GATE(LGM_GCLK_PCM1, "g_pcm1", NULL, 0, CGU_GATE1, 342 + G_PCM1_SHIFT, 0, 0), 343 + LGM_GATE(LGM_GCLK_PCM2, "g_pcm2", NULL, 0, CGU_GATE1, 344 + G_PCM2_SHIFT, 0, 0), 345 + 346 + LGM_GATE(LGM_GCLK_PCIE10, "g_pcie10", NULL, 0, CGU_GATE2, 347 + G_PCIE10_SHIFT, 0, 0), 348 + LGM_GATE(LGM_GCLK_PCIE11, "g_pcie11", NULL, 0, CGU_GATE2, 349 + G_PCIE11_SHIFT, 0, 0), 350 + LGM_GATE(LGM_GCLK_PCIE30, "g_pcie30", NULL, 0, CGU_GATE2, 351 + G_PCIE30_SHIFT, 0, 0), 352 + LGM_GATE(LGM_GCLK_PCIE31, "g_pcie31", NULL, 0, CGU_GATE2, 353 + G_PCIE31_SHIFT, 0, 0), 354 + LGM_GATE(LGM_GCLK_PCIE20, "g_pcie20", NULL, 0, CGU_GATE2, 355 + G_PCIE20_SHIFT, 0, 0), 356 + LGM_GATE(LGM_GCLK_PCIE21, "g_pcie21", NULL, 0, CGU_GATE2, 357 + G_PCIE21_SHIFT, 0, 0), 358 + LGM_GATE(LGM_GCLK_PCIE40, "g_pcie40", NULL, 0, CGU_GATE2, 359 + G_PCIE40_SHIFT, 0, 0), 360 + LGM_GATE(LGM_GCLK_PCIE41, "g_pcie41", NULL, 0, CGU_GATE2, 361 + G_PCIE41_SHIFT, 0, 0), 362 + LGM_GATE(LGM_GCLK_XPCS0, "g_xpcs0", NULL, 0, CGU_GATE2, 363 + G_XPCS0_SHIFT, 0, 0), 364 + LGM_GATE(LGM_GCLK_XPCS1, "g_xpcs1", NULL, 0, CGU_GATE2, 365 + G_XPCS1_SHIFT, 0, 0), 366 + LGM_GATE(LGM_GCLK_XPCS2, "g_xpcs2", NULL, 0, CGU_GATE2, 367 + G_XPCS2_SHIFT, 0, 0), 368 + LGM_GATE(LGM_GCLK_XPCS3, "g_xpcs3", NULL, 0, CGU_GATE2, 369 + G_XPCS3_SHIFT, 0, 0), 370 + LGM_GATE(LGM_GCLK_SATA0, "g_sata0", NULL, 0, CGU_GATE2, 371 + G_SATA0_SHIFT, 0, 0), 372 + LGM_GATE(LGM_GCLK_SATA1, "g_sata1", NULL, 0, CGU_GATE2, 373 + G_SATA1_SHIFT, 0, 0), 374 + LGM_GATE(LGM_GCLK_SATA2, "g_sata2", NULL, 0, CGU_GATE2, 375 + G_SATA2_SHIFT, 0, 0), 376 + LGM_GATE(LGM_GCLK_SATA3, "g_sata3", NULL, 0, CGU_GATE2, 377 + G_SATA3_SHIFT, 0, 0), 378 + 379 + LGM_GATE(LGM_GCLK_ARCEM4, "g_arcem4", NULL, 0, CGU_GATE3, 380 + G_ARCEM4_SHIFT, 0, 0), 381 + LGM_GATE(LGM_GCLK_IDMAR1, "g_idmar1", NULL, 0, CGU_GATE3, 382 + G_IDMAR1_SHIFT, 0, 0), 383 + LGM_GATE(LGM_GCLK_IDMAT0, "g_idmat0", NULL, 0, CGU_GATE3, 384 + G_IDMAT0_SHIFT, 0, 0), 385 + LGM_GATE(LGM_GCLK_IDMAT1, "g_idmat1", NULL, 0, CGU_GATE3, 386 + G_IDMAT1_SHIFT, 0, 0), 387 + LGM_GATE(LGM_GCLK_IDMAT2, "g_idmat2", NULL, 0, CGU_GATE3, 388 + G_IDMAT2_SHIFT, 0, 0), 389 + LGM_GATE(LGM_GCLK_PPV4, "g_ppv4", NULL, 0, CGU_GATE3, 390 + G_PPV4_SHIFT, 0, 0), 391 + LGM_GATE(LGM_GCLK_GSWIPO, "g_gswipo", "switch", 0, CGU_GATE3, 392 + G_GSWIPO_SHIFT, 0, 0), 393 + LGM_GATE(LGM_GCLK_CQEM, "g_cqem", "switch", 0, CGU_GATE3, 394 + G_CQEM_SHIFT, 0, 0), 395 + LGM_GATE(LGM_GCLK_XPCS5, "g_xpcs5", NULL, 0, CGU_GATE3, 396 + G_XPCS5_SHIFT, 0, 0), 397 + LGM_GATE(LGM_GCLK_USB1, "g_usb1", NULL, 0, CGU_GATE3, 398 + G_USB1_SHIFT, 0, 0), 399 + LGM_GATE(LGM_GCLK_USB2, "g_usb2", NULL, 0, CGU_GATE3, 400 + G_USB2_SHIFT, 0, 0), 401 + }; 402 + 403 + 404 + static const struct lgm_clk_ddiv_data lgm_ddiv_clks[] = { 405 + LGM_DDIV(LGM_CLK_CML, "dd_cml", "ljpll3", 0, 406 + PLL_DIV(CGU_LJPLL3_CFG0), 0, PLL_DDIV_WIDTH, 407 + 3, PLL_DDIV_WIDTH, 24, 1, 29, 0), 408 + LGM_DDIV(LGM_CLK_SERDES, "dd_serdes", "ljpll3", 0, 409 + PLL_DIV(CGU_LJPLL3_CFG0), 6, PLL_DDIV_WIDTH, 410 + 9, PLL_DDIV_WIDTH, 25, 1, 28, 0), 411 + LGM_DDIV(LGM_CLK_POOL, "dd_pool", "ljpll3", 0, 412 + PLL_DIV(CGU_LJPLL3_CFG0), 12, PLL_DDIV_WIDTH, 413 + 15, PLL_DDIV_WIDTH, 26, 1, 28, 0), 414 + LGM_DDIV(LGM_CLK_PTP, "dd_ptp", "ljpll3", 0, 415 + PLL_DIV(CGU_LJPLL3_CFG0), 18, PLL_DDIV_WIDTH, 416 + 21, PLL_DDIV_WIDTH, 27, 1, 28, 0), 417 + LGM_DDIV(LGM_CLK_PCIE, "dd_pcie", "ljpll4", 0, 418 + PLL_DIV(CGU_LJPLL4_CFG0), 0, PLL_DDIV_WIDTH, 419 + 3, PLL_DDIV_WIDTH, 24, 1, 29, 0), 420 + }; 421 + 422 + static int lgm_cgu_probe(struct platform_device *pdev) 423 + { 424 + struct lgm_clk_provider *ctx; 425 + struct device *dev = &pdev->dev; 426 + struct device_node *np = dev->of_node; 427 + int ret; 428 + 429 + ctx = devm_kzalloc(dev, struct_size(ctx, clk_data.hws, CLK_NR_CLKS), 430 + GFP_KERNEL); 431 + if (!ctx) 432 + return -ENOMEM; 433 + 434 + ctx->clk_data.num = CLK_NR_CLKS; 435 + 436 + ctx->membase = devm_platform_ioremap_resource(pdev, 0); 437 + if (IS_ERR(ctx->membase)) 438 + return PTR_ERR(ctx->membase); 439 + 440 + ctx->np = np; 441 + ctx->dev = dev; 442 + spin_lock_init(&ctx->lock); 443 + 444 + ret = lgm_clk_register_plls(ctx, lgm_pll_clks, 445 + ARRAY_SIZE(lgm_pll_clks)); 446 + if (ret) 447 + return ret; 448 + 449 + ret = lgm_clk_register_branches(ctx, lgm_branch_clks, 450 + ARRAY_SIZE(lgm_branch_clks)); 451 + if (ret) 452 + return ret; 453 + 454 + ret = lgm_clk_register_ddiv(ctx, lgm_ddiv_clks, 455 + ARRAY_SIZE(lgm_ddiv_clks)); 456 + if (ret) 457 + return ret; 458 + 459 + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, 460 + &ctx->clk_data); 461 + } 462 + 463 + static const struct of_device_id of_lgm_cgu_match[] = { 464 + { .compatible = "intel,cgu-lgm" }, 465 + {} 466 + }; 467 + 468 + static struct platform_driver lgm_cgu_driver = { 469 + .probe = lgm_cgu_probe, 470 + .driver = { 471 + .name = "cgu-lgm", 472 + .of_match_table = of_lgm_cgu_match, 473 + }, 474 + }; 475 + builtin_platform_driver(lgm_cgu_driver);