Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: mxl: Remove redundant spinlocks

Patch 1/4 of this patch series switches from direct readl/writel
based register access to regmap based register access. Instead
of using direct readl/writel, regmap API's are used to read, write
& read-modify-write clk registers. Regmap API's already use their
own spinlocks to serialize the register accesses across multiple
cores in which case additional driver spinlocks becomes redundant.

Hence, remove redundant spinlocks from driver in this patch 2/4.

Reviewed-by: Yi xin Zhu <yzhu@maxlinear.com>
Signed-off-by: Rahul Tanwar <rtanwar@maxlinear.com>
Link: https://lore.kernel.org/r/a8a02c8773b88924503a9fdaacd37dd2e6488bf3.1665642720.git.rtanwar@maxlinear.com
Signed-off-by: Stephen Boyd <sboyd@kernel.org>

authored by

Rahul Tanwar and committed by
Stephen Boyd
eaabee88 03617731

+9 -91
-13
drivers/clk/x86/clk-cgu-pll.c
··· 41 41 { 42 42 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 43 43 unsigned int div, mult, frac; 44 - unsigned long flags; 45 44 46 - spin_lock_irqsave(&pll->lock, flags); 47 45 mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12); 48 46 div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6); 49 47 frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24); 50 - spin_unlock_irqrestore(&pll->lock, flags); 51 48 52 49 if (pll->type == TYPE_LJPLL) 53 50 div *= 4; ··· 55 58 static int lgm_pll_is_enabled(struct clk_hw *hw) 56 59 { 57 60 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 58 - unsigned long flags; 59 61 unsigned int ret; 60 62 61 - spin_lock_irqsave(&pll->lock, flags); 62 63 ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1); 63 - spin_unlock_irqrestore(&pll->lock, flags); 64 64 65 65 return ret; 66 66 } ··· 65 71 static int lgm_pll_enable(struct clk_hw *hw) 66 72 { 67 73 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 68 - unsigned long flags; 69 74 u32 val; 70 75 int ret; 71 76 72 - spin_lock_irqsave(&pll->lock, flags); 73 77 lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1); 74 78 ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg, 75 79 val, (val & 0x1), 1, 100); 76 80 77 - spin_unlock_irqrestore(&pll->lock, flags); 78 81 79 82 return ret; 80 83 } ··· 79 88 static void lgm_pll_disable(struct clk_hw *hw) 80 89 { 81 90 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); 82 - unsigned long flags; 83 91 84 - spin_lock_irqsave(&pll->lock, flags); 85 92 lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0); 86 - spin_unlock_irqrestore(&pll->lock, flags); 87 93 } 88 94 89 95 static const struct clk_ops lgm_pll_ops = { ··· 111 123 return ERR_PTR(-ENOMEM); 112 124 113 125 pll->membase = ctx->membase; 114 - pll->lock = ctx->lock; 115 126 pll->reg = list->reg; 116 127 pll->flags = list->flags; 117 128 pll->type = list->type;
+9 -71
drivers/clk/x86/clk-cgu.c
··· 25 25 static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, 26 26 const struct lgm_clk_branch *list) 27 27 { 28 - unsigned long flags; 29 28 30 - if (list->div_flags & CLOCK_FLAG_VAL_INIT) { 31 - spin_lock_irqsave(&ctx->lock, flags); 29 + if (list->div_flags & CLOCK_FLAG_VAL_INIT) 32 30 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 33 31 list->div_width, list->div_val); 34 - spin_unlock_irqrestore(&ctx->lock, flags); 35 - } 36 32 37 33 return clk_hw_register_fixed_rate(NULL, list->name, 38 34 list->parent_data[0].name, ··· 38 42 static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) 39 43 { 40 44 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 41 - unsigned long flags; 42 45 u32 val; 43 46 44 - spin_lock_irqsave(&mux->lock, flags); 45 47 if (mux->flags & MUX_CLK_SW) 46 48 val = mux->reg; 47 49 else 48 50 val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, 49 51 mux->width); 50 - spin_unlock_irqrestore(&mux->lock, flags); 51 52 return clk_mux_val_to_index(hw, NULL, mux->flags, val); 52 53 } 53 54 54 55 static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) 55 56 { 56 57 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 57 - unsigned long flags; 58 58 u32 val; 59 59 60 60 val = clk_mux_index_to_val(NULL, mux->flags, index); 61 - spin_lock_irqsave(&mux->lock, flags); 62 61 if (mux->flags & MUX_CLK_SW) 63 62 mux->reg = val; 64 63 else 65 64 lgm_set_clk_val(mux->membase, mux->reg, mux->shift, 66 65 mux->width, val); 67 - spin_unlock_irqrestore(&mux->lock, flags); 68 66 69 67 return 0; 70 68 } ··· 81 91 lgm_clk_register_mux(struct lgm_clk_provider *ctx, 82 92 const struct lgm_clk_branch *list) 83 93 { 84 - unsigned long flags, cflags = list->mux_flags; 94 + unsigned long cflags = list->mux_flags; 85 95 struct device *dev = ctx->dev; 86 96 u8 shift = list->mux_shift; 87 97 u8 width = list->mux_width; ··· 102 112 init.num_parents = list->num_parents; 103 113 104 114 mux->membase = ctx->membase; 105 - mux->lock = ctx->lock; 106 115 mux->reg = reg; 107 116 mux->shift = shift; 108 117 mux->width = width; ··· 113 124 if (ret) 114 125 return ERR_PTR(ret); 115 126 116 - if (cflags & CLOCK_FLAG_VAL_INIT) { 117 - spin_lock_irqsave(&mux->lock, flags); 127 + if (cflags & CLOCK_FLAG_VAL_INIT) 118 128 lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); 119 - spin_unlock_irqrestore(&mux->lock, flags); 120 - } 121 129 122 130 return hw; 123 131 } ··· 123 137 lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 124 138 { 125 139 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 126 - unsigned long flags; 127 140 unsigned int val; 128 141 129 - spin_lock_irqsave(&divider->lock, flags); 130 142 val = lgm_get_clk_val(divider->membase, divider->reg, 131 143 divider->shift, divider->width); 132 - spin_unlock_irqrestore(&divider->lock, flags); 133 144 134 145 return divider_recalc_rate(hw, parent_rate, val, divider->table, 135 146 divider->flags, divider->width); ··· 147 164 unsigned long prate) 148 165 { 149 166 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 150 - unsigned long flags; 151 167 int value; 152 168 153 169 value = divider_get_val(rate, prate, divider->table, ··· 154 172 if (value < 0) 155 173 return value; 156 174 157 - spin_lock_irqsave(&divider->lock, flags); 158 175 lgm_set_clk_val(divider->membase, divider->reg, 159 176 divider->shift, divider->width, value); 160 - spin_unlock_irqrestore(&divider->lock, flags); 161 177 162 178 return 0; 163 179 } ··· 163 183 static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) 164 184 { 165 185 struct lgm_clk_divider *div = to_lgm_clk_divider(hw); 166 - unsigned long flags; 167 186 168 - spin_lock_irqsave(&div->lock, flags); 169 187 lgm_set_clk_val(div->membase, div->reg, div->shift_gate, 170 188 div->width_gate, enable); 171 - spin_unlock_irqrestore(&div->lock, flags); 172 189 return 0; 173 190 } 174 191 ··· 191 214 lgm_clk_register_divider(struct lgm_clk_provider *ctx, 192 215 const struct lgm_clk_branch *list) 193 216 { 194 - unsigned long flags, cflags = list->div_flags; 217 + unsigned long cflags = list->div_flags; 195 218 struct device *dev = ctx->dev; 196 219 struct lgm_clk_divider *div; 197 220 struct clk_init_data init = {}; ··· 214 237 init.num_parents = 1; 215 238 216 239 div->membase = ctx->membase; 217 - div->lock = ctx->lock; 218 240 div->reg = reg; 219 241 div->shift = shift; 220 242 div->width = width; ··· 228 252 if (ret) 229 253 return ERR_PTR(ret); 230 254 231 - if (cflags & CLOCK_FLAG_VAL_INIT) { 232 - spin_lock_irqsave(&div->lock, flags); 255 + if (cflags & CLOCK_FLAG_VAL_INIT) 233 256 lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); 234 - spin_unlock_irqrestore(&div->lock, flags); 235 - } 236 257 237 258 return hw; 238 259 } ··· 238 265 lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, 239 266 const struct lgm_clk_branch *list) 240 267 { 241 - unsigned long flags; 242 268 struct clk_hw *hw; 243 269 244 270 hw = clk_hw_register_fixed_factor(ctx->dev, list->name, ··· 246 274 if (IS_ERR(hw)) 247 275 return ERR_CAST(hw); 248 276 249 - if (list->div_flags & CLOCK_FLAG_VAL_INIT) { 250 - spin_lock_irqsave(&ctx->lock, flags); 277 + if (list->div_flags & CLOCK_FLAG_VAL_INIT) 251 278 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 252 279 list->div_width, list->div_val); 253 - spin_unlock_irqrestore(&ctx->lock, flags); 254 - } 255 280 256 281 return hw; 257 282 } ··· 256 287 static int lgm_clk_gate_enable(struct clk_hw *hw) 257 288 { 258 289 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 259 - unsigned long flags; 260 290 unsigned int reg; 261 291 262 - spin_lock_irqsave(&gate->lock, flags); 263 292 reg = GATE_HW_REG_EN(gate->reg); 264 293 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 265 - spin_unlock_irqrestore(&gate->lock, flags); 266 294 267 295 return 0; 268 296 } ··· 267 301 static void lgm_clk_gate_disable(struct clk_hw *hw) 268 302 { 269 303 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 270 - unsigned long flags; 271 304 unsigned int reg; 272 305 273 - spin_lock_irqsave(&gate->lock, flags); 274 306 reg = GATE_HW_REG_DIS(gate->reg); 275 307 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 276 - spin_unlock_irqrestore(&gate->lock, flags); 277 308 } 278 309 279 310 static int lgm_clk_gate_is_enabled(struct clk_hw *hw) 280 311 { 281 312 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 282 313 unsigned int reg, ret; 283 - unsigned long flags; 284 314 285 - spin_lock_irqsave(&gate->lock, flags); 286 315 reg = GATE_HW_REG_STAT(gate->reg); 287 316 ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); 288 - spin_unlock_irqrestore(&gate->lock, flags); 289 317 290 318 return ret; 291 319 } ··· 294 334 lgm_clk_register_gate(struct lgm_clk_provider *ctx, 295 335 const struct lgm_clk_branch *list) 296 336 { 297 - unsigned long flags, cflags = list->gate_flags; 337 + unsigned long cflags = list->gate_flags; 298 338 const char *pname = list->parent_data[0].name; 299 339 struct device *dev = ctx->dev; 300 340 u8 shift = list->gate_shift; ··· 315 355 init.num_parents = pname ? 1 : 0; 316 356 317 357 gate->membase = ctx->membase; 318 - gate->lock = ctx->lock; 319 358 gate->reg = reg; 320 359 gate->shift = shift; 321 360 gate->flags = cflags; ··· 326 367 return ERR_PTR(ret); 327 368 328 369 if (cflags & CLOCK_FLAG_VAL_INIT) { 329 - spin_lock_irqsave(&gate->lock, flags); 330 370 lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); 331 - spin_unlock_irqrestore(&gate->lock, flags); 332 371 } 333 372 334 373 return hw; ··· 401 444 static int lgm_clk_ddiv_enable(struct clk_hw *hw) 402 445 { 403 446 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 404 - unsigned long flags; 405 447 406 - spin_lock_irqsave(&ddiv->lock, flags); 407 448 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 408 449 ddiv->width_gate, 1); 409 - spin_unlock_irqrestore(&ddiv->lock, flags); 410 450 return 0; 411 451 } 412 452 413 453 static void lgm_clk_ddiv_disable(struct clk_hw *hw) 414 454 { 415 455 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 416 - unsigned long flags; 417 456 418 - spin_lock_irqsave(&ddiv->lock, flags); 419 457 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 420 458 ddiv->width_gate, 0); 421 - spin_unlock_irqrestore(&ddiv->lock, flags); 422 459 } 423 460 424 461 static int ··· 449 498 { 450 499 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 451 500 u32 div, ddiv1, ddiv2; 452 - unsigned long flags; 453 501 454 502 div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); 455 503 456 - spin_lock_irqsave(&ddiv->lock, flags); 457 504 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 458 505 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 459 506 div = div * 2; 460 507 } 461 508 462 - if (div <= 0) { 463 - spin_unlock_irqrestore(&ddiv->lock, flags); 509 + if (div <= 0) 464 510 return -EINVAL; 465 - } 466 511 467 - if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { 468 - spin_unlock_irqrestore(&ddiv->lock, flags); 512 + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) 469 513 return -EINVAL; 470 - } 471 514 472 515 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, 473 516 ddiv1 - 1); 474 517 475 518 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, 476 519 ddiv2 - 1); 477 - spin_unlock_irqrestore(&ddiv->lock, flags); 478 520 479 521 return 0; 480 522 } ··· 478 534 { 479 535 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 480 536 u32 div, ddiv1, ddiv2; 481 - unsigned long flags; 482 537 u64 rate64; 483 538 484 539 div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); 485 540 486 541 /* if predivide bit is enabled, modify div by factor of 2.5 */ 487 - spin_lock_irqsave(&ddiv->lock, flags); 488 542 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 489 543 div = div * 2; 490 544 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 491 545 } 492 - spin_unlock_irqrestore(&ddiv->lock, flags); 493 546 494 547 if (div <= 0) 495 548 return *prate; ··· 500 559 do_div(rate64, ddiv2); 501 560 502 561 /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ 503 - spin_lock_irqsave(&ddiv->lock, flags); 504 562 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 505 563 rate64 = rate64 * 2; 506 564 rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); 507 565 } 508 - spin_unlock_irqrestore(&ddiv->lock, flags); 509 566 510 567 return rate64; 511 568 } ··· 540 601 init.num_parents = 1; 541 602 542 603 ddiv->membase = ctx->membase; 543 - ddiv->lock = ctx->lock; 544 604 ddiv->reg = list->reg; 545 605 ddiv->shift0 = list->shift0; 546 606 ddiv->width0 = list->width0;
-6
drivers/clk/x86/clk-cgu.h
··· 18 18 u8 shift; 19 19 u8 width; 20 20 unsigned long flags; 21 - spinlock_t lock; 22 21 }; 23 22 24 23 struct lgm_clk_divider { ··· 30 31 u8 width_gate; 31 32 unsigned long flags; 32 33 const struct clk_div_table *table; 33 - spinlock_t lock; 34 34 }; 35 35 36 36 struct lgm_clk_ddiv { ··· 47 49 unsigned int mult; 48 50 unsigned int div; 49 51 unsigned long flags; 50 - spinlock_t lock; 51 52 }; 52 53 53 54 struct lgm_clk_gate { ··· 55 58 unsigned int reg; 56 59 u8 shift; 57 60 unsigned long flags; 58 - spinlock_t lock; 59 61 }; 60 62 61 63 enum lgm_clk_type { ··· 78 82 struct device_node *np; 79 83 struct device *dev; 80 84 struct clk_hw_onecell_data clk_data; 81 - spinlock_t lock; 82 85 }; 83 86 84 87 enum pll_type { ··· 92 97 unsigned int reg; 93 98 unsigned long flags; 94 99 enum pll_type type; 95 - spinlock_t lock; 96 100 }; 97 101 98 102 /**
-1
drivers/clk/x86/clk-lgm.c
··· 444 444 445 445 ctx->np = np; 446 446 ctx->dev = dev; 447 - spin_lock_init(&ctx->lock); 448 447 449 448 ret = lgm_clk_register_plls(ctx, lgm_pll_clks, 450 449 ARRAY_SIZE(lgm_pll_clks));