Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'clk-round-rate-6.18' of https://github.com/masneyb/linux into clk-determine-rate

Pull clk_ops::round_rate conversion to clk_ops::determine_rate() from Brian Masney:

The round_rate() clk ops is deprecated in the clk framework in favor
of the determine_rate() clk ops, so let's go ahead and convert the
various clk drivers using the Coccinelle semantic patch posted below.
I did some minor cosmetic cleanups of the code in some cases.

Coccinelle semantic patch:

virtual patch

// Look up the current name of the round_rate function
@ has_round_rate @
identifier round_rate_name =~ ".*_round_rate";
identifier hw_param, rate_param, parent_rate_param;
@@

long round_rate_name(struct clk_hw *hw_param, unsigned long rate_param,
unsigned long *parent_rate_param)
{
...
}

// Rename the route_rate function name to determine_rate()
@ script:python generate_name depends on has_round_rate @
round_rate_name << has_round_rate.round_rate_name;
new_name;
@@

coccinelle.new_name = round_rate_name.replace("_round_rate", "_determine_rate")

// Change rate to req->rate; also change occurrences of 'return XXX'.
@ chg_rate depends on generate_name @
identifier has_round_rate.round_rate_name;
identifier has_round_rate.hw_param;
identifier has_round_rate.rate_param;
identifier has_round_rate.parent_rate_param;
identifier ERR =~ "E.*";
expression E;
@@

long round_rate_name(struct clk_hw *hw_param, unsigned long rate_param,
unsigned long *parent_rate_param)
{
<...
(
-return -ERR;
+return -ERR;
|
- return rate_param;
+ return 0;
|
- return E;
+ req->rate = E;
+
+ return 0;
|
- rate_param
+ req->rate
)
...>
}

// Coccinelle only transforms the first occurrence of the rate parameter
// Run a second time. FIXME: Is there a better way to do this?
@ chg_rate2 depends on generate_name @
identifier has_round_rate.round_rate_name;
identifier has_round_rate.hw_param;
identifier has_round_rate.rate_param;
identifier has_round_rate.parent_rate_param;
@@

long round_rate_name(struct clk_hw *hw_param, unsigned long rate_param,
unsigned long *parent_rate_param)
{
<...
- rate_param
+ req->rate
...>
}

// Change parent_rate to req->best_parent_rate
@ chg_parent_rate depends on generate_name @
identifier has_round_rate.round_rate_name;
identifier has_round_rate.hw_param;
identifier has_round_rate.rate_param;
identifier has_round_rate.parent_rate_param;
@@

long round_rate_name(struct clk_hw *hw_param, unsigned long rate_param,
unsigned long *parent_rate_param)
{
<...
(
- *parent_rate_param
+ req->best_parent_rate
|
- parent_rate_param
+ &req->best_parent_rate
)
...>
}

// Convert the function definition from round_rate() to determine_rate()
@ func_definition depends on chg_rate @
identifier has_round_rate.round_rate_name;
identifier has_round_rate.hw_param;
identifier has_round_rate.rate_param;
identifier has_round_rate.parent_rate_param;
identifier generate_name.new_name;
@@

- long round_rate_name(struct clk_hw *hw_param, unsigned long rate_param,
- unsigned long *parent_rate_param)
+ int new_name(struct clk_hw *hw, struct clk_rate_request *req)
{
...
}

// Update the ops from round_rate() to determine_rate()
@ ops depends on func_definition @
identifier has_round_rate.round_rate_name;
identifier generate_name.new_name;
@@

{
...,
- .round_rate = round_rate_name,
+ .determine_rate = new_name,
...,
}

Note that I used coccinelle 1.2 instead of 1.3 since the newer version
adds unnecessary braces as described in this post.
https://lore.kernel.org/cocci/67642477-5f3e-4b2a-914d-579a54f48cbd@intel.com/

* tag 'clk-round-rate-6.18' of https://github.com/masneyb/linux: (118 commits)
clk: scmi: migrate round_rate() to determine_rate()
clk: ti: fapll: convert from round_rate() to determine_rate()
clk: ti: dra7-atl: convert from round_rate() to determine_rate()
clk: ti: divider: convert from round_rate() to determine_rate()
clk: ti: composite: convert from round_rate() to determine_rate()
clk: ti: dpll: convert from round_rate() to determine_rate()
clk: ti: dpll: change error return from ~0 to -EINVAL
clk: ti: dpll: remove round_rate() in favor of determine_rate()
clk: tegra: tegra210-emc: convert from round_rate() to determine_rate()
clk: tegra: super: convert from round_rate() to determine_rate()
clk: tegra: pll: convert from round_rate() to determine_rate()
clk: tegra: periph: divider: convert from round_rate() to determine_rate()
clk: tegra: divider: convert from round_rate() to determine_rate()
clk: tegra: audio-sync: convert from round_rate() to determine_rate()
clk: fixed-factor: drop round_rate() clk ops
clk: divider: remove round_rate() in favor of determine_rate()
clk: visconti: pll: convert from round_rate() to determine_rate()
clk: versatile: vexpress-osc: convert from round_rate() to determine_rate()
clk: versatile: icst: convert from round_rate() to determine_rate()
clk: versaclock7: convert from round_rate() to determine_rate()
...

+1521 -1188
+4 -4
drivers/clk/actions/owl-composite.c
··· 122 122 rate, parent_rate); 123 123 } 124 124 125 - static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate, 126 - unsigned long *parent_rate) 125 + static int owl_comp_fix_fact_determine_rate(struct clk_hw *hw, 126 + struct clk_rate_request *req) 127 127 { 128 128 struct owl_composite *comp = hw_to_owl_comp(hw); 129 129 struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw; 130 130 131 - return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate); 131 + return comp->fix_fact_ops->determine_rate(&fix_fact_hw->hw, req); 132 132 } 133 133 134 134 static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw, ··· 193 193 .is_enabled = owl_comp_is_enabled, 194 194 195 195 /* fix_fact_ops */ 196 - .round_rate = owl_comp_fix_fact_round_rate, 196 + .determine_rate = owl_comp_fix_fact_determine_rate, 197 197 .recalc_rate = owl_comp_fix_fact_recalc_rate, 198 198 .set_rate = owl_comp_fix_fact_set_rate, 199 199 };
+8 -5
drivers/clk/actions/owl-divider.c
··· 23 23 div_hw->div_flags); 24 24 } 25 25 26 - static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate, 27 - unsigned long *parent_rate) 26 + static int owl_divider_determine_rate(struct clk_hw *hw, 27 + struct clk_rate_request *req) 28 28 { 29 29 struct owl_divider *div = hw_to_owl_divider(hw); 30 30 31 - return owl_divider_helper_round_rate(&div->common, &div->div_hw, 32 - rate, parent_rate); 31 + req->rate = owl_divider_helper_round_rate(&div->common, &div->div_hw, 32 + req->rate, 33 + &req->best_parent_rate); 34 + 35 + return 0; 33 36 } 34 37 35 38 unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common, ··· 92 89 93 90 const struct clk_ops owl_divider_ops = { 94 91 .recalc_rate = owl_divider_recalc_rate, 95 - .round_rate = owl_divider_round_rate, 92 + .determine_rate = owl_divider_determine_rate, 96 93 .set_rate = owl_divider_set_rate, 97 94 };
+7 -5
drivers/clk/actions/owl-factor.c
··· 130 130 return *parent_rate * mul / div; 131 131 } 132 132 133 - static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate, 134 - unsigned long *parent_rate) 133 + static int owl_factor_determine_rate(struct clk_hw *hw, 134 + struct clk_rate_request *req) 135 135 { 136 136 struct owl_factor *factor = hw_to_owl_factor(hw); 137 137 struct owl_factor_hw *factor_hw = &factor->factor_hw; 138 138 139 - return owl_factor_helper_round_rate(&factor->common, factor_hw, 140 - rate, parent_rate); 139 + req->rate = owl_factor_helper_round_rate(&factor->common, factor_hw, 140 + req->rate, &req->best_parent_rate); 141 + 142 + return 0; 141 143 } 142 144 143 145 unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common, ··· 216 214 } 217 215 218 216 const struct clk_ops owl_factor_ops = { 219 - .round_rate = owl_factor_round_rate, 217 + .determine_rate = owl_factor_determine_rate, 220 218 .recalc_rate = owl_factor_recalc_rate, 221 219 .set_rate = owl_factor_set_rate, 222 220 };
+16 -9
drivers/clk/actions/owl-pll.c
··· 56 56 return table; 57 57 } 58 58 59 - static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate, 60 - unsigned long *parent_rate) 59 + static int owl_pll_determine_rate(struct clk_hw *hw, 60 + struct clk_rate_request *req) 61 61 { 62 62 struct owl_pll *pll = hw_to_owl_pll(hw); 63 63 struct owl_pll_hw *pll_hw = &pll->pll_hw; ··· 65 65 u32 mul; 66 66 67 67 if (pll_hw->table) { 68 - clkt = _get_pll_table(pll_hw->table, rate); 69 - return clkt->rate; 68 + clkt = _get_pll_table(pll_hw->table, req->rate); 69 + req->rate = clkt->rate; 70 + 71 + return 0; 70 72 } 71 73 72 74 /* fixed frequency */ 73 - if (pll_hw->width == 0) 74 - return pll_hw->bfreq; 75 + if (pll_hw->width == 0) { 76 + req->rate = pll_hw->bfreq; 75 77 76 - mul = owl_pll_calculate_mul(pll_hw, rate); 78 + return 0; 79 + } 77 80 78 - return pll_hw->bfreq * mul; 81 + mul = owl_pll_calculate_mul(pll_hw, req->rate); 82 + 83 + req->rate = pll_hw->bfreq * mul; 84 + 85 + return 0; 79 86 } 80 87 81 88 static unsigned long owl_pll_recalc_rate(struct clk_hw *hw, ··· 195 188 .enable = owl_pll_enable, 196 189 .disable = owl_pll_disable, 197 190 .is_enabled = owl_pll_is_enabled, 198 - .round_rate = owl_pll_round_rate, 191 + .determine_rate = owl_pll_determine_rate, 199 192 .recalc_rate = owl_pll_recalc_rate, 200 193 .set_rate = owl_pll_set_rate, 201 194 };
+23 -19
drivers/clk/at91/clk-audio-pll.c
··· 270 270 return 0; 271 271 } 272 272 273 - static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate, 274 - unsigned long *parent_rate) 273 + static int clk_audio_pll_pad_determine_rate(struct clk_hw *hw, 274 + struct clk_rate_request *req) 275 275 { 276 276 struct clk_hw *pclk = clk_hw_get_parent(hw); 277 277 long best_rate = -EINVAL; ··· 283 283 int best_diff = -1; 284 284 285 285 pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__, 286 - rate, *parent_rate); 286 + req->rate, req->best_parent_rate); 287 287 288 288 /* 289 289 * Rate divisor is actually made of two different divisors, multiplied ··· 304 304 continue; 305 305 306 306 best_parent_rate = clk_hw_round_rate(pclk, 307 - rate * tmp_qd * div); 307 + req->rate * tmp_qd * div); 308 308 tmp_rate = best_parent_rate / (div * tmp_qd); 309 - tmp_diff = abs(rate - tmp_rate); 309 + tmp_diff = abs(req->rate - tmp_rate); 310 310 311 311 if (best_diff < 0 || best_diff > tmp_diff) { 312 - *parent_rate = best_parent_rate; 312 + req->best_parent_rate = best_parent_rate; 313 313 best_rate = tmp_rate; 314 314 best_diff = tmp_diff; 315 315 } ··· 318 318 pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n", 319 319 __func__, best_rate, best_parent_rate); 320 320 321 - return best_rate; 321 + req->rate = best_rate; 322 + 323 + return 0; 322 324 } 323 325 324 - static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate, 325 - unsigned long *parent_rate) 326 + static int clk_audio_pll_pmc_determine_rate(struct clk_hw *hw, 327 + struct clk_rate_request *req) 326 328 { 327 329 struct clk_hw *pclk = clk_hw_get_parent(hw); 328 330 long best_rate = -EINVAL; ··· 335 333 int best_diff = -1; 336 334 337 335 pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__, 338 - rate, *parent_rate); 336 + req->rate, req->best_parent_rate); 339 337 340 - if (!rate) 338 + if (!req->rate) 341 339 return 0; 342 340 343 341 best_parent_rate = clk_round_rate(pclk->clk, 1); 344 - div = max(best_parent_rate / rate, 1UL); 342 + div = max(best_parent_rate / req->rate, 1UL); 345 343 for (; div <= AUDIO_PLL_QDPMC_MAX; div++) { 346 - best_parent_rate = clk_round_rate(pclk->clk, rate * div); 344 + best_parent_rate = clk_round_rate(pclk->clk, req->rate * div); 347 345 tmp_rate = best_parent_rate / div; 348 - tmp_diff = abs(rate - tmp_rate); 346 + tmp_diff = abs(req->rate - tmp_rate); 349 347 350 348 if (best_diff < 0 || best_diff > tmp_diff) { 351 - *parent_rate = best_parent_rate; 349 + req->best_parent_rate = best_parent_rate; 352 350 best_rate = tmp_rate; 353 351 best_diff = tmp_diff; 354 352 tmp_qd = div; ··· 358 356 } 359 357 360 358 pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n", 361 - __func__, best_rate, *parent_rate, tmp_qd - 1); 359 + __func__, best_rate, req->best_parent_rate, tmp_qd - 1); 362 360 363 - return best_rate; 361 + req->rate = best_rate; 362 + 363 + return 0; 364 364 } 365 365 366 366 static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate, ··· 440 436 .enable = clk_audio_pll_pad_enable, 441 437 .disable = clk_audio_pll_pad_disable, 442 438 .recalc_rate = clk_audio_pll_pad_recalc_rate, 443 - .round_rate = clk_audio_pll_pad_round_rate, 439 + .determine_rate = clk_audio_pll_pad_determine_rate, 444 440 .set_rate = clk_audio_pll_pad_set_rate, 445 441 }; 446 442 ··· 448 444 .enable = clk_audio_pll_pmc_enable, 449 445 .disable = clk_audio_pll_pmc_disable, 450 446 .recalc_rate = clk_audio_pll_pmc_recalc_rate, 451 - .round_rate = clk_audio_pll_pmc_round_rate, 447 + .determine_rate = clk_audio_pll_pmc_determine_rate, 452 448 .set_rate = clk_audio_pll_pmc_set_rate, 453 449 }; 454 450
+22 -11
drivers/clk/at91/clk-h32mx.c
··· 40 40 return parent_rate; 41 41 } 42 42 43 - static long clk_sama5d4_h32mx_round_rate(struct clk_hw *hw, unsigned long rate, 44 - unsigned long *parent_rate) 43 + static int clk_sama5d4_h32mx_determine_rate(struct clk_hw *hw, 44 + struct clk_rate_request *req) 45 45 { 46 46 unsigned long div; 47 47 48 - if (rate > *parent_rate) 49 - return *parent_rate; 50 - div = *parent_rate / 2; 51 - if (rate < div) 52 - return div; 48 + if (req->rate > req->best_parent_rate) { 49 + req->rate = req->best_parent_rate; 53 50 54 - if (rate - div < *parent_rate - rate) 55 - return div; 51 + return 0; 52 + } 53 + div = req->best_parent_rate / 2; 54 + if (req->rate < div) { 55 + req->rate = div; 56 56 57 - return *parent_rate; 57 + return 0; 58 + } 59 + 60 + if (req->rate - div < req->best_parent_rate - req->rate) { 61 + req->rate = div; 62 + 63 + return 0; 64 + } 65 + 66 + req->rate = req->best_parent_rate; 67 + 68 + return 0; 58 69 } 59 70 60 71 static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate, ··· 88 77 89 78 static const struct clk_ops h32mx_ops = { 90 79 .recalc_rate = clk_sama5d4_h32mx_recalc_rate, 91 - .round_rate = clk_sama5d4_h32mx_round_rate, 80 + .determine_rate = clk_sama5d4_h32mx_determine_rate, 92 81 .set_rate = clk_sama5d4_h32mx_set_rate, 93 82 }; 94 83
+29 -19
drivers/clk/at91/clk-peripheral.c
··· 279 279 long best_diff = LONG_MIN; 280 280 u32 shift; 281 281 282 - if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) 283 - return parent_rate; 282 + if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) { 283 + req->rate = parent_rate; 284 + 285 + return 0; 286 + } 284 287 285 288 /* Fist step: check the available dividers. */ 286 289 for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { ··· 335 332 return 0; 336 333 } 337 334 338 - static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, 339 - unsigned long rate, 340 - unsigned long *parent_rate) 335 + static int clk_sam9x5_peripheral_no_parent_determine_rate(struct clk_hw *hw, 336 + struct clk_rate_request *req) 341 337 { 342 338 int shift = 0; 343 339 unsigned long best_rate; 344 340 unsigned long best_diff; 345 - unsigned long cur_rate = *parent_rate; 341 + unsigned long cur_rate = req->best_parent_rate; 346 342 unsigned long cur_diff; 347 343 struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 348 344 349 - if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) 350 - return *parent_rate; 345 + if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) { 346 + req->rate = req->best_parent_rate; 347 + 348 + return 0; 349 + } 351 350 352 351 if (periph->range.max) { 353 352 for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { 354 - cur_rate = *parent_rate >> shift; 353 + cur_rate = req->best_parent_rate >> shift; 355 354 if (cur_rate <= periph->range.max) 356 355 break; 357 356 } 358 357 } 359 358 360 - if (rate >= cur_rate) 361 - return cur_rate; 359 + if (req->rate >= cur_rate) { 360 + req->rate = cur_rate; 362 361 363 - best_diff = cur_rate - rate; 362 + return 0; 363 + } 364 + 365 + best_diff = cur_rate - req->rate; 364 366 best_rate = cur_rate; 365 367 for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { 366 - cur_rate = *parent_rate >> shift; 367 - if (cur_rate < rate) 368 - cur_diff = rate - cur_rate; 368 + cur_rate = req->best_parent_rate >> shift; 369 + if (cur_rate < req->rate) 370 + cur_diff = req->rate - cur_rate; 369 371 else 370 - cur_diff = cur_rate - rate; 372 + cur_diff = cur_rate - req->rate; 371 373 372 374 if (cur_diff < best_diff) { 373 375 best_diff = cur_diff; 374 376 best_rate = cur_rate; 375 377 } 376 378 377 - if (!best_diff || cur_rate < rate) 379 + if (!best_diff || cur_rate < req->rate) 378 380 break; 379 381 } 380 382 381 - return best_rate; 383 + req->rate = best_rate; 384 + 385 + return 0; 382 386 } 383 387 384 388 static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw, ··· 437 427 .disable = clk_sam9x5_peripheral_disable, 438 428 .is_enabled = clk_sam9x5_peripheral_is_enabled, 439 429 .recalc_rate = clk_sam9x5_peripheral_recalc_rate, 440 - .round_rate = clk_sam9x5_peripheral_round_rate, 430 + .determine_rate = clk_sam9x5_peripheral_no_parent_determine_rate, 441 431 .set_rate = clk_sam9x5_peripheral_set_rate, 442 432 .save_context = clk_sam9x5_peripheral_save_context, 443 433 .restore_context = clk_sam9x5_peripheral_restore_context,
+7 -5
drivers/clk/at91/clk-pll.c
··· 231 231 return bestrate; 232 232 } 233 233 234 - static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 235 - unsigned long *parent_rate) 234 + static int clk_pll_determine_rate(struct clk_hw *hw, 235 + struct clk_rate_request *req) 236 236 { 237 237 struct clk_pll *pll = to_clk_pll(hw); 238 238 239 - return clk_pll_get_best_div_mul(pll, rate, *parent_rate, 240 - NULL, NULL, NULL); 239 + req->rate = clk_pll_get_best_div_mul(pll, req->rate, req->best_parent_rate, 240 + NULL, NULL, NULL); 241 + 242 + return 0; 241 243 } 242 244 243 245 static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 304 302 .unprepare = clk_pll_unprepare, 305 303 .is_prepared = clk_pll_is_prepared, 306 304 .recalc_rate = clk_pll_recalc_rate, 307 - .round_rate = clk_pll_round_rate, 305 + .determine_rate = clk_pll_determine_rate, 308 306 .set_rate = clk_pll_set_rate, 309 307 .save_context = clk_pll_save_context, 310 308 .restore_context = clk_pll_restore_context,
+23 -11
drivers/clk/at91/clk-plldiv.c
··· 33 33 return parent_rate; 34 34 } 35 35 36 - static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate, 37 - unsigned long *parent_rate) 36 + static int clk_plldiv_determine_rate(struct clk_hw *hw, 37 + struct clk_rate_request *req) 38 38 { 39 39 unsigned long div; 40 40 41 - if (rate > *parent_rate) 42 - return *parent_rate; 43 - div = *parent_rate / 2; 44 - if (rate < div) 45 - return div; 41 + if (req->rate > req->best_parent_rate) { 42 + req->rate = req->best_parent_rate; 46 43 47 - if (rate - div < *parent_rate - rate) 48 - return div; 44 + return 0; 45 + } 49 46 50 - return *parent_rate; 47 + div = req->best_parent_rate / 2; 48 + if (req->rate < div) { 49 + req->rate = div; 50 + 51 + return 0; 52 + } 53 + 54 + if (req->rate - div < req->best_parent_rate - req->rate) { 55 + req->rate = div; 56 + 57 + return 0; 58 + } 59 + 60 + req->rate = req->best_parent_rate; 61 + 62 + return 0; 51 63 } 52 64 53 65 static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate, ··· 78 66 79 67 static const struct clk_ops plldiv_ops = { 80 68 .recalc_rate = clk_plldiv_recalc_rate, 81 - .round_rate = clk_plldiv_round_rate, 69 + .determine_rate = clk_plldiv_determine_rate, 82 70 .set_rate = clk_plldiv_set_rate, 83 71 }; 84 72
+18 -11
drivers/clk/at91/clk-sam9x60-pll.c
··· 230 230 return tmprate; 231 231 } 232 232 233 - static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate, 234 - unsigned long *parent_rate) 233 + static int sam9x60_frac_pll_determine_rate(struct clk_hw *hw, 234 + struct clk_rate_request *req) 235 235 { 236 236 struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 237 237 238 - return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false); 238 + req->rate = sam9x60_frac_pll_compute_mul_frac(core, req->rate, 239 + req->best_parent_rate, 240 + false); 241 + 242 + return 0; 239 243 } 240 244 241 245 static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 325 321 .unprepare = sam9x60_frac_pll_unprepare, 326 322 .is_prepared = sam9x60_frac_pll_is_prepared, 327 323 .recalc_rate = sam9x60_frac_pll_recalc_rate, 328 - .round_rate = sam9x60_frac_pll_round_rate, 324 + .determine_rate = sam9x60_frac_pll_determine_rate, 329 325 .set_rate = sam9x60_frac_pll_set_rate, 330 326 .save_context = sam9x60_frac_pll_save_context, 331 327 .restore_context = sam9x60_frac_pll_restore_context, ··· 336 332 .unprepare = sam9x60_frac_pll_unprepare, 337 333 .is_prepared = sam9x60_frac_pll_is_prepared, 338 334 .recalc_rate = sam9x60_frac_pll_recalc_rate, 339 - .round_rate = sam9x60_frac_pll_round_rate, 335 + .determine_rate = sam9x60_frac_pll_determine_rate, 340 336 .set_rate = sam9x60_frac_pll_set_rate_chg, 341 337 .save_context = sam9x60_frac_pll_save_context, 342 338 .restore_context = sam9x60_frac_pll_restore_context, ··· 491 487 return best_rate; 492 488 } 493 489 494 - static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate, 495 - unsigned long *parent_rate) 490 + static int sam9x60_div_pll_determine_rate(struct clk_hw *hw, 491 + struct clk_rate_request *req) 496 492 { 497 493 struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 498 494 499 - return sam9x60_div_pll_compute_div(core, parent_rate, rate); 495 + req->rate = sam9x60_div_pll_compute_div(core, &req->best_parent_rate, 496 + req->rate); 497 + 498 + return 0; 500 499 } 501 500 502 501 static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 608 601 .unprepare = sam9x60_div_pll_unprepare, 609 602 .is_prepared = sam9x60_div_pll_is_prepared, 610 603 .recalc_rate = sam9x60_div_pll_recalc_rate, 611 - .round_rate = sam9x60_div_pll_round_rate, 604 + .determine_rate = sam9x60_div_pll_determine_rate, 612 605 .set_rate = sam9x60_div_pll_set_rate, 613 606 .save_context = sam9x60_div_pll_save_context, 614 607 .restore_context = sam9x60_div_pll_restore_context, ··· 619 612 .unprepare = sam9x60_div_pll_unprepare, 620 613 .is_prepared = sam9x60_div_pll_is_prepared, 621 614 .recalc_rate = sam9x60_div_pll_recalc_rate, 622 - .round_rate = sam9x60_div_pll_round_rate, 615 + .determine_rate = sam9x60_div_pll_determine_rate, 623 616 .set_rate = sam9x60_div_pll_set_rate_chg, 624 617 .save_context = sam9x60_div_pll_save_context, 625 618 .restore_context = sam9x60_div_pll_restore_context, ··· 630 623 .unprepare = sam9x60_div_pll_unprepare, 631 624 .is_prepared = sam9x60_div_pll_is_prepared, 632 625 .recalc_rate = sam9x60_fixed_div_pll_recalc_rate, 633 - .round_rate = sam9x60_div_pll_round_rate, 626 + .determine_rate = sam9x60_div_pll_determine_rate, 634 627 .save_context = sam9x60_div_pll_save_context, 635 628 .restore_context = sam9x60_div_pll_restore_context, 636 629 };
+11 -9
drivers/clk/at91/clk-usb.c
··· 319 319 return 0; 320 320 } 321 321 322 - static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, 323 - unsigned long *parent_rate) 322 + static int at91rm9200_clk_usb_determine_rate(struct clk_hw *hw, 323 + struct clk_rate_request *req) 324 324 { 325 325 struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw); 326 326 struct clk_hw *parent = clk_hw_get_parent(hw); ··· 336 336 if (!usb->divisors[i]) 337 337 continue; 338 338 339 - tmp_parent_rate = rate * usb->divisors[i]; 339 + tmp_parent_rate = req->rate * usb->divisors[i]; 340 340 tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate); 341 341 tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]); 342 - if (tmprate < rate) 343 - tmpdiff = rate - tmprate; 342 + if (tmprate < req->rate) 343 + tmpdiff = req->rate - tmprate; 344 344 else 345 - tmpdiff = tmprate - rate; 345 + tmpdiff = tmprate - req->rate; 346 346 347 347 if (bestdiff < 0 || bestdiff > tmpdiff) { 348 348 bestrate = tmprate; 349 349 bestdiff = tmpdiff; 350 - *parent_rate = tmp_parent_rate; 350 + req->best_parent_rate = tmp_parent_rate; 351 351 } 352 352 353 353 if (!bestdiff) 354 354 break; 355 355 } 356 356 357 - return bestrate; 357 + req->rate = bestrate; 358 + 359 + return 0; 358 360 } 359 361 360 362 static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, ··· 386 384 387 385 static const struct clk_ops at91rm9200_usb_ops = { 388 386 .recalc_rate = at91rm9200_clk_usb_recalc_rate, 389 - .round_rate = at91rm9200_clk_usb_round_rate, 387 + .determine_rate = at91rm9200_clk_usb_determine_rate, 390 388 .set_rate = at91rm9200_clk_usb_set_rate, 391 389 }; 392 390
+7 -7
drivers/clk/axs10x/i2s_pll_clock.c
··· 108 108 return ((parent_rate / idiv) * fbdiv) / odiv; 109 109 } 110 110 111 - static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate, 112 - unsigned long *prate) 111 + static int i2s_pll_determine_rate(struct clk_hw *hw, 112 + struct clk_rate_request *req) 113 113 { 114 114 struct i2s_pll_clk *clk = to_i2s_pll_clk(hw); 115 - const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate); 115 + const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(req->best_parent_rate); 116 116 int i; 117 117 118 118 if (!pll_cfg) { 119 - dev_err(clk->dev, "invalid parent rate=%ld\n", *prate); 119 + dev_err(clk->dev, "invalid parent rate=%ld\n", req->best_parent_rate); 120 120 return -EINVAL; 121 121 } 122 122 123 123 for (i = 0; pll_cfg[i].rate != 0; i++) 124 - if (pll_cfg[i].rate == rate) 125 - return rate; 124 + if (pll_cfg[i].rate == req->rate) 125 + return 0; 126 126 127 127 return -EINVAL; 128 128 } ··· 156 156 157 157 static const struct clk_ops i2s_pll_ops = { 158 158 .recalc_rate = i2s_pll_recalc_rate, 159 - .round_rate = i2s_pll_round_rate, 159 + .determine_rate = i2s_pll_determine_rate, 160 160 .set_rate = i2s_pll_set_rate, 161 161 }; 162 162
+7 -5
drivers/clk/axs10x/pll_clock.c
··· 149 149 return rate; 150 150 } 151 151 152 - static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate, 153 - unsigned long *prate) 152 + static int axs10x_pll_determine_rate(struct clk_hw *hw, 153 + struct clk_rate_request *req) 154 154 { 155 155 int i; 156 156 long best_rate; ··· 163 163 best_rate = pll_cfg[0].rate; 164 164 165 165 for (i = 1; pll_cfg[i].rate != 0; i++) { 166 - if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate)) 166 + if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate)) 167 167 best_rate = pll_cfg[i].rate; 168 168 } 169 169 170 - return best_rate; 170 + req->rate = best_rate; 171 + 172 + return 0; 171 173 } 172 174 173 175 static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 210 208 211 209 static const struct clk_ops axs10x_pll_ops = { 212 210 .recalc_rate = axs10x_pll_recalc_rate, 213 - .round_rate = axs10x_pll_round_rate, 211 + .determine_rate = axs10x_pll_determine_rate, 214 212 .set_rate = axs10x_pll_set_rate, 215 213 }; 216 214
+16 -11
drivers/clk/baikal-t1/ccu-div.c
··· 228 228 CCU_DIV_CLKDIV_MAX(mask)); 229 229 } 230 230 231 - static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate, 232 - unsigned long *parent_rate) 231 + static int ccu_div_var_determine_rate(struct clk_hw *hw, 232 + struct clk_rate_request *req) 233 233 { 234 234 struct ccu_div *div = to_ccu_div(hw); 235 235 unsigned long divider; 236 236 237 - divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask); 237 + divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate, 238 + div->mask); 238 239 239 - return ccu_div_calc_freq(*parent_rate, divider); 240 + req->rate = ccu_div_calc_freq(req->best_parent_rate, divider); 241 + 242 + return 0; 240 243 } 241 244 242 245 /* ··· 311 308 return ccu_div_calc_freq(parent_rate, div->divider); 312 309 } 313 310 314 - static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate, 315 - unsigned long *parent_rate) 311 + static int ccu_div_fixed_determine_rate(struct clk_hw *hw, 312 + struct clk_rate_request *req) 316 313 { 317 314 struct ccu_div *div = to_ccu_div(hw); 318 315 319 - return ccu_div_calc_freq(*parent_rate, div->divider); 316 + req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider); 317 + 318 + return 0; 320 319 } 321 320 322 321 static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate, ··· 539 534 .disable = ccu_div_gate_disable, 540 535 .is_enabled = ccu_div_gate_is_enabled, 541 536 .recalc_rate = ccu_div_var_recalc_rate, 542 - .round_rate = ccu_div_var_round_rate, 537 + .determine_rate = ccu_div_var_determine_rate, 543 538 .set_rate = ccu_div_var_set_rate_fast, 544 539 .debug_init = ccu_div_var_debug_init 545 540 }; 546 541 547 542 static const struct clk_ops ccu_div_var_nogate_ops = { 548 543 .recalc_rate = ccu_div_var_recalc_rate, 549 - .round_rate = ccu_div_var_round_rate, 544 + .determine_rate = ccu_div_var_determine_rate, 550 545 .set_rate = ccu_div_var_set_rate_slow, 551 546 .debug_init = ccu_div_var_debug_init 552 547 }; ··· 556 551 .disable = ccu_div_gate_disable, 557 552 .is_enabled = ccu_div_gate_is_enabled, 558 553 .recalc_rate = ccu_div_fixed_recalc_rate, 559 - .round_rate = ccu_div_fixed_round_rate, 554 + .determine_rate = ccu_div_fixed_determine_rate, 560 555 .set_rate = ccu_div_fixed_set_rate, 561 556 .debug_init = ccu_div_gate_debug_init 562 557 }; ··· 570 565 571 566 static const struct clk_ops ccu_div_fixed_ops = { 572 567 .recalc_rate = ccu_div_fixed_recalc_rate, 573 - .round_rate = ccu_div_fixed_round_rate, 568 + .determine_rate = ccu_div_fixed_determine_rate, 574 569 .set_rate = ccu_div_fixed_set_rate, 575 570 .debug_init = ccu_div_fixed_debug_init 576 571 };
+8 -6
drivers/clk/baikal-t1/ccu-pll.c
··· 228 228 } 229 229 } 230 230 231 - static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, 232 - unsigned long *parent_rate) 231 + static int ccu_pll_determine_rate(struct clk_hw *hw, 232 + struct clk_rate_request *req) 233 233 { 234 234 unsigned long nr = 1, nf = 1, od = 1; 235 235 236 - ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od); 236 + ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od); 237 237 238 - return ccu_pll_calc_freq(*parent_rate, nr, nf, od); 238 + req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od); 239 + 240 + return 0; 239 241 } 240 242 241 243 /* ··· 483 481 .disable = ccu_pll_disable, 484 482 .is_enabled = ccu_pll_is_enabled, 485 483 .recalc_rate = ccu_pll_recalc_rate, 486 - .round_rate = ccu_pll_round_rate, 484 + .determine_rate = ccu_pll_determine_rate, 487 485 .set_rate = ccu_pll_set_rate_norst, 488 486 .debug_init = ccu_pll_debug_init 489 487 }; ··· 493 491 .disable = ccu_pll_disable, 494 492 .is_enabled = ccu_pll_is_enabled, 495 493 .recalc_rate = ccu_pll_recalc_rate, 496 - .round_rate = ccu_pll_round_rate, 494 + .determine_rate = ccu_pll_determine_rate, 497 495 .set_rate = ccu_pll_set_rate_reset, 498 496 .debug_init = ccu_pll_debug_init 499 497 };
+15 -10
drivers/clk/bcm/clk-iproc-asiu.c
··· 98 98 return clk->rate; 99 99 } 100 100 101 - static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate, 102 - unsigned long *parent_rate) 101 + static int iproc_asiu_clk_determine_rate(struct clk_hw *hw, 102 + struct clk_rate_request *req) 103 103 { 104 104 unsigned int div; 105 105 106 - if (rate == 0 || *parent_rate == 0) 106 + if (req->rate == 0 || req->best_parent_rate == 0) 107 107 return -EINVAL; 108 108 109 - if (rate == *parent_rate) 110 - return *parent_rate; 109 + if (req->rate == req->best_parent_rate) 110 + return 0; 111 111 112 - div = DIV_ROUND_CLOSEST(*parent_rate, rate); 113 - if (div < 2) 114 - return *parent_rate; 112 + div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate); 113 + if (div < 2) { 114 + req->rate = req->best_parent_rate; 115 115 116 - return *parent_rate / div; 116 + return 0; 117 + } 118 + 119 + req->rate = req->best_parent_rate / div; 120 + 121 + return 0; 117 122 } 118 123 119 124 static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 173 168 .enable = iproc_asiu_clk_enable, 174 169 .disable = iproc_asiu_clk_disable, 175 170 .recalc_rate = iproc_asiu_clk_recalc_rate, 176 - .round_rate = iproc_asiu_clk_round_rate, 171 + .determine_rate = iproc_asiu_clk_determine_rate, 177 172 .set_rate = iproc_asiu_clk_set_rate, 178 173 }; 179 174
+8 -6
drivers/clk/clk-apple-nco.c
··· 212 212 ((u64) div) * incbase + inc1); 213 213 } 214 214 215 - static long applnco_round_rate(struct clk_hw *hw, unsigned long rate, 216 - unsigned long *parent_rate) 215 + static int applnco_determine_rate(struct clk_hw *hw, 216 + struct clk_rate_request *req) 217 217 { 218 - unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1; 219 - unsigned long hi = *parent_rate / COARSE_DIV_OFFSET; 218 + unsigned long lo = req->best_parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1; 219 + unsigned long hi = req->best_parent_rate / COARSE_DIV_OFFSET; 220 220 221 - return clamp(rate, lo, hi); 221 + req->rate = clamp(req->rate, lo, hi); 222 + 223 + return 0; 222 224 } 223 225 224 226 static int applnco_enable(struct clk_hw *hw) ··· 248 246 static const struct clk_ops applnco_ops = { 249 247 .set_rate = applnco_set_rate, 250 248 .recalc_rate = applnco_recalc_rate, 251 - .round_rate = applnco_round_rate, 249 + .determine_rate = applnco_determine_rate, 252 250 .enable = applnco_enable, 253 251 .disable = applnco_disable, 254 252 .is_enabled = applnco_is_enabled,
+13 -8
drivers/clk/clk-bm1880.c
··· 608 608 return rate; 609 609 } 610 610 611 - static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, 612 - unsigned long *prate) 611 + static int bm1880_clk_div_determine_rate(struct clk_hw *hw, 612 + struct clk_rate_request *req) 613 613 { 614 614 struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw); 615 615 struct bm1880_div_clock *div = &div_hw->div; ··· 621 621 val = readl(reg_addr) >> div->shift; 622 622 val &= clk_div_mask(div->width); 623 623 624 - return divider_ro_round_rate(hw, rate, prate, div->table, 625 - div->width, div->flags, 626 - val); 624 + req->rate = divider_ro_round_rate(hw, req->rate, 625 + &req->best_parent_rate, 626 + div->table, 627 + div->width, div->flags, val); 628 + 629 + return 0; 627 630 } 628 631 629 - return divider_round_rate(hw, rate, prate, div->table, 630 - div->width, div->flags); 632 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 633 + div->table, div->width, div->flags); 634 + 635 + return 0; 631 636 } 632 637 633 638 static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 670 665 671 666 static const struct clk_ops bm1880_clk_div_ops = { 672 667 .recalc_rate = bm1880_clk_div_recalc_rate, 673 - .round_rate = bm1880_clk_div_round_rate, 668 + .determine_rate = bm1880_clk_div_determine_rate, 674 669 .set_rate = bm1880_clk_div_set_rate, 675 670 }; 676 671
+9 -7
drivers/clk/clk-cdce706.c
··· 183 183 return 0; 184 184 } 185 185 186 - static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate, 187 - unsigned long *parent_rate) 186 + static int cdce706_pll_determine_rate(struct clk_hw *hw, 187 + struct clk_rate_request *req) 188 188 { 189 189 struct cdce706_hw_data *hwd = to_hw_data(hw); 190 190 unsigned long mul, div; ··· 192 192 193 193 dev_dbg(&hwd->dev_data->client->dev, 194 194 "%s, rate: %lu, parent_rate: %lu\n", 195 - __func__, rate, *parent_rate); 195 + __func__, req->rate, req->best_parent_rate); 196 196 197 - rational_best_approximation(rate, *parent_rate, 197 + rational_best_approximation(req->rate, req->best_parent_rate, 198 198 CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX, 199 199 &mul, &div); 200 200 hwd->mul = mul; ··· 204 204 "%s, pll: %d, mul: %lu, div: %lu\n", 205 205 __func__, hwd->idx, mul, div); 206 206 207 - res = (u64)*parent_rate * hwd->mul; 207 + res = (u64)req->best_parent_rate * hwd->mul; 208 208 do_div(res, hwd->div); 209 - return res; 209 + req->rate = res; 210 + 211 + return 0; 210 212 } 211 213 212 214 static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 253 251 254 252 static const struct clk_ops cdce706_pll_ops = { 255 253 .recalc_rate = cdce706_pll_recalc_rate, 256 - .round_rate = cdce706_pll_round_rate, 254 + .determine_rate = cdce706_pll_determine_rate, 257 255 .set_rate = cdce706_pll_set_rate, 258 256 }; 259 257
+29 -21
drivers/clk/clk-cdce925.c
··· 128 128 } 129 129 } 130 130 131 - static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate, 132 - unsigned long *parent_rate) 131 + static int cdce925_pll_determine_rate(struct clk_hw *hw, 132 + struct clk_rate_request *req) 133 133 { 134 134 u16 n, m; 135 135 136 - cdce925_pll_find_rate(rate, *parent_rate, &n, &m); 137 - return (long)cdce925_pll_calculate_rate(*parent_rate, n, m); 136 + cdce925_pll_find_rate(req->rate, req->best_parent_rate, &n, &m); 137 + req->rate = (long)cdce925_pll_calculate_rate(req->best_parent_rate, n, m); 138 + 139 + return 0; 138 140 } 139 141 140 142 static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 268 266 .prepare = cdce925_pll_prepare, 269 267 .unprepare = cdce925_pll_unprepare, 270 268 .recalc_rate = cdce925_pll_recalc_rate, 271 - .round_rate = cdce925_pll_round_rate, 269 + .determine_rate = cdce925_pll_determine_rate, 272 270 .set_rate = cdce925_pll_set_rate, 273 271 }; 274 272 ··· 422 420 return rate * pdiv_best; 423 421 } 424 422 425 - static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate, 426 - unsigned long *parent_rate) 423 + static int cdce925_clk_determine_rate(struct clk_hw *hw, 424 + struct clk_rate_request *req) 427 425 { 428 - unsigned long l_parent_rate = *parent_rate; 429 - u16 divider = cdce925_calc_divider(rate, l_parent_rate); 426 + unsigned long l_parent_rate = req->best_parent_rate; 427 + u16 divider = cdce925_calc_divider(req->rate, l_parent_rate); 430 428 431 - if (l_parent_rate / divider != rate) { 432 - l_parent_rate = cdce925_clk_best_parent_rate(hw, rate); 433 - divider = cdce925_calc_divider(rate, l_parent_rate); 434 - *parent_rate = l_parent_rate; 429 + if (l_parent_rate / divider != req->rate) { 430 + l_parent_rate = cdce925_clk_best_parent_rate(hw, req->rate); 431 + divider = cdce925_calc_divider(req->rate, l_parent_rate); 432 + req->best_parent_rate = l_parent_rate; 435 433 } 436 434 437 435 if (divider) 438 - return (long)(l_parent_rate / divider); 436 + req->rate = (long)(l_parent_rate / divider); 437 + else 438 + req->rate = 0; 439 + 439 440 return 0; 440 441 } 441 442 ··· 456 451 .prepare = cdce925_clk_prepare, 457 452 .unprepare = cdce925_clk_unprepare, 458 453 .recalc_rate = cdce925_clk_recalc_rate, 459 - .round_rate = cdce925_clk_round_rate, 454 + .determine_rate = cdce925_clk_determine_rate, 460 455 .set_rate = cdce925_clk_set_rate, 461 456 }; 462 457 ··· 478 473 return (u16)divider; 479 474 } 480 475 481 - static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate, 482 - unsigned long *parent_rate) 476 + static int cdce925_clk_y1_determine_rate(struct clk_hw *hw, 477 + struct clk_rate_request *req) 483 478 { 484 - unsigned long l_parent_rate = *parent_rate; 485 - u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate); 479 + unsigned long l_parent_rate = req->best_parent_rate; 480 + u16 divider = cdce925_y1_calc_divider(req->rate, l_parent_rate); 486 481 487 482 if (divider) 488 - return (long)(l_parent_rate / divider); 483 + req->rate = (long)(l_parent_rate / divider); 484 + else 485 + req->rate = 0; 486 + 489 487 return 0; 490 488 } 491 489 ··· 506 498 .prepare = cdce925_clk_prepare, 507 499 .unprepare = cdce925_clk_unprepare, 508 500 .recalc_rate = cdce925_clk_recalc_rate, 509 - .round_rate = cdce925_clk_y1_round_rate, 501 + .determine_rate = cdce925_clk_y1_determine_rate, 510 502 .set_rate = cdce925_clk_y1_set_rate, 511 503 }; 512 504
+9 -5
drivers/clk/clk-cs2000-cp.c
··· 305 305 return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio); 306 306 } 307 307 308 - static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate, 309 - unsigned long *parent_rate) 308 + static int cs2000_determine_rate(struct clk_hw *hw, 309 + struct clk_rate_request *req) 310 310 { 311 311 struct cs2000_priv *priv = hw_to_priv(hw); 312 312 u32 ratio; 313 313 314 - ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio); 314 + ratio = cs2000_rate_to_ratio(req->best_parent_rate, req->rate, 315 + priv->lf_ratio); 315 316 316 - return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio); 317 + req->rate = cs2000_ratio_to_rate(ratio, req->best_parent_rate, 318 + priv->lf_ratio); 319 + 320 + return 0; 317 321 } 318 322 319 323 static int cs2000_select_ratio_mode(struct cs2000_priv *priv, ··· 434 430 static const struct clk_ops cs2000_ops = { 435 431 .get_parent = cs2000_get_parent, 436 432 .recalc_rate = cs2000_recalc_rate, 437 - .round_rate = cs2000_round_rate, 433 + .determine_rate = cs2000_determine_rate, 438 434 .set_rate = cs2000_set_rate, 439 435 .prepare = cs2000_enable, 440 436 .unprepare = cs2000_disable,
-23
drivers/clk/clk-divider.c
··· 431 431 } 432 432 EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent); 433 433 434 - static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 435 - unsigned long *prate) 436 - { 437 - struct clk_divider *divider = to_clk_divider(hw); 438 - 439 - /* if read only, just return current value */ 440 - if (divider->flags & CLK_DIVIDER_READ_ONLY) { 441 - u32 val; 442 - 443 - val = clk_div_readl(divider) >> divider->shift; 444 - val &= clk_div_mask(divider->width); 445 - 446 - return divider_ro_round_rate(hw, rate, prate, divider->table, 447 - divider->width, divider->flags, 448 - val); 449 - } 450 - 451 - return divider_round_rate(hw, rate, prate, divider->table, 452 - divider->width, divider->flags); 453 - } 454 - 455 434 static int clk_divider_determine_rate(struct clk_hw *hw, 456 435 struct clk_rate_request *req) 457 436 { ··· 506 527 507 528 const struct clk_ops clk_divider_ops = { 508 529 .recalc_rate = clk_divider_recalc_rate, 509 - .round_rate = clk_divider_round_rate, 510 530 .determine_rate = clk_divider_determine_rate, 511 531 .set_rate = clk_divider_set_rate, 512 532 }; ··· 513 535 514 536 const struct clk_ops clk_divider_ro_ops = { 515 537 .recalc_rate = clk_divider_recalc_rate, 516 - .round_rate = clk_divider_round_rate, 517 538 .determine_rate = clk_divider_determine_rate, 518 539 }; 519 540 EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
+10 -8
drivers/clk/clk-ep93xx.c
··· 389 389 return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]); 390 390 } 391 391 392 - static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate, 393 - unsigned long *parent_rate) 392 + static int ep93xx_div_determine_rate(struct clk_hw *hw, 393 + struct clk_rate_request *req) 394 394 { 395 395 struct ep93xx_clk *clk = ep93xx_clk_from(hw); 396 396 unsigned long best = 0, now; 397 397 unsigned int i; 398 398 399 399 for (i = 0; i < clk->num_div; i++) { 400 - if ((rate * clk->div[i]) == *parent_rate) 401 - return rate; 400 + if (req->rate * clk->div[i] == req->best_parent_rate) 401 + return 0; 402 402 403 - now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]); 404 - if (!best || is_best(rate, now, best)) 403 + now = DIV_ROUND_CLOSEST(req->best_parent_rate, clk->div[i]); 404 + if (!best || is_best(req->rate, now, best)) 405 405 best = now; 406 406 } 407 407 408 - return best; 408 + req->rate = best; 409 + 410 + return 0; 409 411 } 410 412 411 413 static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 439 437 .disable = ep93xx_clk_disable, 440 438 .is_enabled = ep93xx_clk_is_enabled, 441 439 .recalc_rate = ep93xx_div_recalc_rate, 442 - .round_rate = ep93xx_div_round_rate, 440 + .determine_rate = ep93xx_div_determine_rate, 443 441 .set_rate = ep93xx_div_set_rate, 444 442 }; 445 443
+9 -7
drivers/clk/clk-fixed-factor.c
··· 30 30 return (unsigned long)rate; 31 31 } 32 32 33 - static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, 34 - unsigned long *prate) 33 + static int clk_factor_determine_rate(struct clk_hw *hw, 34 + struct clk_rate_request *req) 35 35 { 36 36 struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); 37 37 38 38 if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { 39 39 unsigned long best_parent; 40 40 41 - best_parent = (rate / fix->mult) * fix->div; 42 - *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); 41 + best_parent = (req->rate / fix->mult) * fix->div; 42 + req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); 43 43 } 44 44 45 - return (*prate / fix->div) * fix->mult; 45 + req->rate = (req->best_parent_rate / fix->div) * fix->mult; 46 + 47 + return 0; 46 48 } 47 49 48 50 static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate, ··· 52 50 { 53 51 /* 54 52 * We must report success but we can do so unconditionally because 55 - * clk_factor_round_rate returns values that ensure this call is a 53 + * clk_factor_determine_rate returns values that ensure this call is a 56 54 * nop. 57 55 */ 58 56 ··· 71 69 } 72 70 73 71 const struct clk_ops clk_fixed_factor_ops = { 74 - .round_rate = clk_factor_round_rate, 72 + .determine_rate = clk_factor_determine_rate, 75 73 .set_rate = clk_factor_set_rate, 76 74 .recalc_rate = clk_factor_recalc_rate, 77 75 .recalc_accuracy = clk_factor_recalc_accuracy,
+16 -9
drivers/clk/clk-fractional-divider.c
··· 151 151 } 152 152 EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation); 153 153 154 - static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, 155 - unsigned long *parent_rate) 154 + static int clk_fd_determine_rate(struct clk_hw *hw, 155 + struct clk_rate_request *req) 156 156 { 157 157 struct clk_fractional_divider *fd = to_clk_fd(hw); 158 158 unsigned long m, n; 159 159 u64 ret; 160 160 161 - if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate)) 162 - return *parent_rate; 161 + if (!req->rate || (!clk_hw_can_set_rate_parent(hw) && req->rate >= req->best_parent_rate)) { 162 + req->rate = req->best_parent_rate; 163 + 164 + return 0; 165 + } 163 166 164 167 if (fd->approximation) 165 - fd->approximation(hw, rate, parent_rate, &m, &n); 168 + fd->approximation(hw, req->rate, &req->best_parent_rate, &m, &n); 166 169 else 167 - clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n); 170 + clk_fractional_divider_general_approximation(hw, req->rate, 171 + &req->best_parent_rate, 172 + &m, &n); 168 173 169 - ret = (u64)*parent_rate * m; 174 + ret = (u64)req->best_parent_rate * m; 170 175 do_div(ret, n); 171 176 172 - return ret; 177 + req->rate = ret; 178 + 179 + return 0; 173 180 } 174 181 175 182 static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, ··· 257 250 258 251 const struct clk_ops clk_fractional_divider_ops = { 259 252 .recalc_rate = clk_fd_recalc_rate, 260 - .round_rate = clk_fd_round_rate, 253 + .determine_rate = clk_fd_determine_rate, 261 254 .set_rate = clk_fd_set_rate, 262 255 #ifdef CONFIG_DEBUG_FS 263 256 .debug_init = clk_fd_debug_init,
+9 -6
drivers/clk/clk-gemini.c
··· 126 126 return 33000000; 127 127 } 128 128 129 - static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate, 130 - unsigned long *prate) 129 + static int gemini_pci_determine_rate(struct clk_hw *hw, 130 + struct clk_rate_request *req) 131 131 { 132 132 /* We support 33 and 66 MHz */ 133 - if (rate < 48000000) 134 - return 33000000; 135 - return 66000000; 133 + if (req->rate < 48000000) 134 + req->rate = 33000000; 135 + else 136 + req->rate = 66000000; 137 + 138 + return 0; 136 139 } 137 140 138 141 static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate, ··· 182 179 183 180 static const struct clk_ops gemini_pci_clk_ops = { 184 181 .recalc_rate = gemini_pci_recalc_rate, 185 - .round_rate = gemini_pci_round_rate, 182 + .determine_rate = gemini_pci_determine_rate, 186 183 .set_rate = gemini_pci_set_rate, 187 184 .enable = gemini_pci_enable, 188 185 .disable = gemini_pci_disable,
+15 -11
drivers/clk/clk-highbank.c
··· 130 130 *pdivf = divf; 131 131 } 132 132 133 - static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate, 134 - unsigned long *parent_rate) 133 + static int clk_pll_determine_rate(struct clk_hw *hw, 134 + struct clk_rate_request *req) 135 135 { 136 136 u32 divq, divf; 137 - unsigned long ref_freq = *parent_rate; 137 + unsigned long ref_freq = req->best_parent_rate; 138 138 139 - clk_pll_calc(rate, ref_freq, &divq, &divf); 139 + clk_pll_calc(req->rate, ref_freq, &divq, &divf); 140 140 141 - return (ref_freq * (divf + 1)) / (1 << divq); 141 + req->rate = (ref_freq * (divf + 1)) / (1 << divq); 142 + 143 + return 0; 142 144 } 143 145 144 146 static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate, ··· 187 185 .enable = clk_pll_enable, 188 186 .disable = clk_pll_disable, 189 187 .recalc_rate = clk_pll_recalc_rate, 190 - .round_rate = clk_pll_round_rate, 188 + .determine_rate = clk_pll_determine_rate, 191 189 .set_rate = clk_pll_set_rate, 192 190 }; 193 191 ··· 229 227 return parent_rate / div; 230 228 } 231 229 232 - static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate, 233 - unsigned long *parent_rate) 230 + static int clk_periclk_determine_rate(struct clk_hw *hw, 231 + struct clk_rate_request *req) 234 232 { 235 233 u32 div; 236 234 237 - div = *parent_rate / rate; 235 + div = req->best_parent_rate / req->rate; 238 236 div++; 239 237 div &= ~0x1; 240 238 241 - return *parent_rate / div; 239 + req->rate = req->best_parent_rate / div; 240 + 241 + return 0; 242 242 } 243 243 244 244 static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate, ··· 259 255 260 256 static const struct clk_ops periclk_ops = { 261 257 .recalc_rate = clk_periclk_recalc_rate, 262 - .round_rate = clk_periclk_round_rate, 258 + .determine_rate = clk_periclk_determine_rate, 263 259 .set_rate = clk_periclk_set_rate, 264 260 }; 265 261
+7 -5
drivers/clk/clk-hsdk-pll.c
··· 197 197 return rate; 198 198 } 199 199 200 - static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 201 - unsigned long *prate) 200 + static int hsdk_pll_determine_rate(struct clk_hw *hw, 201 + struct clk_rate_request *req) 202 202 { 203 203 int i; 204 204 unsigned long best_rate; ··· 211 211 best_rate = pll_cfg[0].rate; 212 212 213 213 for (i = 1; pll_cfg[i].rate != 0; i++) { 214 - if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate)) 214 + if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate)) 215 215 best_rate = pll_cfg[i].rate; 216 216 } 217 217 218 218 dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate); 219 219 220 - return best_rate; 220 + req->rate = best_rate; 221 + 222 + return 0; 221 223 } 222 224 223 225 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk, ··· 298 296 299 297 static const struct clk_ops hsdk_pll_ops = { 300 298 .recalc_rate = hsdk_pll_recalc_rate, 301 - .round_rate = hsdk_pll_round_rate, 299 + .determine_rate = hsdk_pll_determine_rate, 302 300 .set_rate = hsdk_pll_set_rate, 303 301 }; 304 302
+31 -22
drivers/clk/clk-lmk04832.c
··· 491 491 return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r); 492 492 } 493 493 494 - static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate, 495 - unsigned long *prate) 494 + static int lmk04832_vco_determine_rate(struct clk_hw *hw, 495 + struct clk_rate_request *req) 496 496 { 497 497 struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco); 498 498 unsigned int n, p, r; 499 499 long vco_rate; 500 500 int ret; 501 501 502 - ret = lmk04832_check_vco_ranges(lmk, rate); 502 + ret = lmk04832_check_vco_ranges(lmk, req->rate); 503 503 if (ret < 0) 504 504 return ret; 505 505 506 - vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r); 506 + vco_rate = lmk04832_calc_pll2_params(req->best_parent_rate, req->rate, 507 + &n, &p, &r); 507 508 if (vco_rate < 0) { 508 509 dev_err(lmk->dev, "PLL2 parameters out of range\n"); 509 - return vco_rate; 510 + req->rate = vco_rate; 511 + 512 + return 0; 510 513 } 511 514 512 - if (rate != vco_rate) 515 + if (req->rate != vco_rate) 513 516 return -EINVAL; 514 517 515 - return vco_rate; 518 + req->rate = vco_rate; 519 + 520 + return 0; 516 521 } 517 522 518 523 static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate, ··· 584 579 .prepare = lmk04832_vco_prepare, 585 580 .unprepare = lmk04832_vco_unprepare, 586 581 .recalc_rate = lmk04832_vco_recalc_rate, 587 - .round_rate = lmk04832_vco_round_rate, 582 + .determine_rate = lmk04832_vco_determine_rate, 588 583 .set_rate = lmk04832_vco_set_rate, 589 584 }; 590 585 ··· 893 888 return DIV_ROUND_CLOSEST(prate, sysref_div); 894 889 } 895 890 896 - static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate, 897 - unsigned long *prate) 891 + static int lmk04832_sclk_determine_rate(struct clk_hw *hw, 892 + struct clk_rate_request *req) 898 893 { 899 894 struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk); 900 895 unsigned long sclk_rate; 901 896 unsigned int sysref_div; 902 897 903 - sysref_div = DIV_ROUND_CLOSEST(*prate, rate); 904 - sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div); 898 + sysref_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate); 899 + sclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, sysref_div); 905 900 906 901 if (sysref_div < 0x07 || sysref_div > 0x1fff) { 907 902 dev_err(lmk->dev, "SYSREF divider out of range\n"); 908 903 return -EINVAL; 909 904 } 910 905 911 - if (rate != sclk_rate) 906 + if (req->rate != sclk_rate) 912 907 return -EINVAL; 913 908 914 - return sclk_rate; 909 + req->rate = sclk_rate; 910 + 911 + return 0; 915 912 } 916 913 917 914 static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 952 945 .prepare = lmk04832_sclk_prepare, 953 946 .unprepare = lmk04832_sclk_unprepare, 954 947 .recalc_rate = lmk04832_sclk_recalc_rate, 955 - .round_rate = lmk04832_sclk_round_rate, 948 + .determine_rate = lmk04832_sclk_determine_rate, 956 949 .set_rate = lmk04832_sclk_set_rate, 957 950 }; 958 951 ··· 1076 1069 return rate; 1077 1070 } 1078 1071 1079 - static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate, 1080 - unsigned long *prate) 1072 + static int lmk04832_dclk_determine_rate(struct clk_hw *hw, 1073 + struct clk_rate_request *req) 1081 1074 { 1082 1075 struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw); 1083 1076 struct lmk04832 *lmk = dclk->lmk; 1084 1077 unsigned long dclk_rate; 1085 1078 unsigned int dclk_div; 1086 1079 1087 - dclk_div = DIV_ROUND_CLOSEST(*prate, rate); 1088 - dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div); 1080 + dclk_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate); 1081 + dclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, dclk_div); 1089 1082 1090 1083 if (dclk_div < 1 || dclk_div > 0x3ff) { 1091 1084 dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw)); 1092 1085 return -EINVAL; 1093 1086 } 1094 1087 1095 - if (rate != dclk_rate) 1088 + if (req->rate != dclk_rate) 1096 1089 return -EINVAL; 1097 1090 1098 - return dclk_rate; 1091 + req->rate = dclk_rate; 1092 + 1093 + return 0; 1099 1094 } 1100 1095 1101 1096 static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 1167 1158 .prepare = lmk04832_dclk_prepare, 1168 1159 .unprepare = lmk04832_dclk_unprepare, 1169 1160 .recalc_rate = lmk04832_dclk_recalc_rate, 1170 - .round_rate = lmk04832_dclk_round_rate, 1161 + .determine_rate = lmk04832_dclk_determine_rate, 1171 1162 .set_rate = lmk04832_dclk_set_rate, 1172 1163 }; 1173 1164
+7 -5
drivers/clk/clk-loongson1.c
··· 93 93 d->flags, d->width); 94 94 } 95 95 96 - static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate, 97 - unsigned long *prate) 96 + static int ls1x_divider_determine_rate(struct clk_hw *hw, 97 + struct clk_rate_request *req) 98 98 { 99 99 struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw); 100 100 const struct ls1x_clk_div_data *d = ls1x_clk->data; 101 101 102 - return divider_round_rate(hw, rate, prate, d->table, 103 - d->width, d->flags); 102 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 103 + d->table, d->width, d->flags); 104 + 105 + return 0; 104 106 } 105 107 106 108 static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate, ··· 148 146 149 147 static const struct clk_ops ls1x_clk_divider_ops = { 150 148 .recalc_rate = ls1x_divider_recalc_rate, 151 - .round_rate = ls1x_divider_round_rate, 149 + .determine_rate = ls1x_divider_determine_rate, 152 150 .set_rate = ls1x_divider_set_rate, 153 151 }; 154 152
+17 -10
drivers/clk/clk-max9485.c
··· 159 159 return 0; 160 160 } 161 161 162 - static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate, 163 - unsigned long *parent_rate) 162 + static int max9485_clkout_determine_rate(struct clk_hw *hw, 163 + struct clk_rate_request *req) 164 164 { 165 165 const struct max9485_rate *curr, *prev = NULL; 166 166 167 167 for (curr = max9485_rates; curr->out != 0; curr++) { 168 168 /* Exact matches */ 169 - if (curr->out == rate) 170 - return rate; 169 + if (curr->out == req->rate) 170 + return 0; 171 171 172 172 /* 173 173 * Find the first entry that has a frequency higher than the 174 174 * requested one. 175 175 */ 176 - if (curr->out > rate) { 176 + if (curr->out > req->rate) { 177 177 unsigned int mid; 178 178 179 179 /* 180 180 * If this is the first entry, clamp the value to the 181 181 * lowest possible frequency. 182 182 */ 183 - if (!prev) 184 - return curr->out; 183 + if (!prev) { 184 + req->rate = curr->out; 185 + 186 + return 0; 187 + } 185 188 186 189 /* 187 190 * Otherwise, determine whether the previous entry or ··· 192 189 */ 193 190 mid = prev->out + ((curr->out - prev->out) / 2); 194 191 195 - return (mid > rate) ? prev->out : curr->out; 192 + req->rate = mid > req->rate ? prev->out : curr->out; 193 + 194 + return 0; 196 195 } 197 196 198 197 prev = curr; 199 198 } 200 199 201 200 /* If the last entry was still too high, clamp the value */ 202 - return prev->out; 201 + req->rate = prev->out; 202 + 203 + return 0; 203 204 } 204 205 205 206 struct max9485_clk { ··· 228 221 .parent_index = -1, 229 222 .ops = { 230 223 .set_rate = max9485_clkout_set_rate, 231 - .round_rate = max9485_clkout_round_rate, 224 + .determine_rate = max9485_clkout_determine_rate, 232 225 .recalc_rate = max9485_clkout_recalc_rate, 233 226 }, 234 227 },
+14 -8
drivers/clk/clk-milbeaut.c
··· 386 386 divider->flags, divider->width); 387 387 } 388 388 389 - static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 390 - unsigned long *prate) 389 + static int m10v_clk_divider_determine_rate(struct clk_hw *hw, 390 + struct clk_rate_request *req) 391 391 { 392 392 struct m10v_clk_divider *divider = to_m10v_div(hw); 393 393 ··· 398 398 val = readl(divider->reg) >> divider->shift; 399 399 val &= clk_div_mask(divider->width); 400 400 401 - return divider_ro_round_rate(hw, rate, prate, divider->table, 402 - divider->width, divider->flags, 403 - val); 401 + req->rate = divider_ro_round_rate(hw, req->rate, 402 + &req->best_parent_rate, 403 + divider->table, 404 + divider->width, 405 + divider->flags, val); 406 + 407 + return 0; 404 408 } 405 409 406 - return divider_round_rate(hw, rate, prate, divider->table, 407 - divider->width, divider->flags); 410 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 411 + divider->table, divider->width, divider->flags); 412 + 413 + return 0; 408 414 } 409 415 410 416 static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, ··· 456 450 457 451 static const struct clk_ops m10v_clk_divider_ops = { 458 452 .recalc_rate = m10v_clk_divider_recalc_rate, 459 - .round_rate = m10v_clk_divider_round_rate, 453 + .determine_rate = m10v_clk_divider_determine_rate, 460 454 .set_rate = m10v_clk_divider_set_rate, 461 455 }; 462 456
+7 -5
drivers/clk/clk-multiplier.c
··· 112 112 return bestmult; 113 113 } 114 114 115 - static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate, 116 - unsigned long *parent_rate) 115 + static int clk_multiplier_determine_rate(struct clk_hw *hw, 116 + struct clk_rate_request *req) 117 117 { 118 118 struct clk_multiplier *mult = to_clk_multiplier(hw); 119 - unsigned long factor = __bestmult(hw, rate, parent_rate, 119 + unsigned long factor = __bestmult(hw, req->rate, &req->best_parent_rate, 120 120 mult->width, mult->flags); 121 121 122 - return *parent_rate * factor; 122 + req->rate = req->best_parent_rate * factor; 123 + 124 + return 0; 123 125 } 124 126 125 127 static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate, ··· 152 150 153 151 const struct clk_ops clk_multiplier_ops = { 154 152 .recalc_rate = clk_multiplier_recalc_rate, 155 - .round_rate = clk_multiplier_round_rate, 153 + .determine_rate = clk_multiplier_determine_rate, 156 154 .set_rate = clk_multiplier_set_rate, 157 155 }; 158 156 EXPORT_SYMBOL_GPL(clk_multiplier_ops);
+16 -19
drivers/clk/clk-scmi.c
··· 54 54 return rate; 55 55 } 56 56 57 - static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate, 58 - unsigned long *parent_rate) 57 + static int scmi_clk_determine_rate(struct clk_hw *hw, 58 + struct clk_rate_request *req) 59 59 { 60 60 u64 fmin, fmax, ftmp; 61 61 struct scmi_clk *clk = to_scmi_clk(hw); ··· 67 67 * running at then. 68 68 */ 69 69 if (clk->info->rate_discrete) 70 - return rate; 70 + return 0; 71 71 72 72 fmin = clk->info->range.min_rate; 73 73 fmax = clk->info->range.max_rate; 74 - if (rate <= fmin) 75 - return fmin; 76 - else if (rate >= fmax) 77 - return fmax; 74 + if (req->rate <= fmin) { 75 + req->rate = fmin; 78 76 79 - ftmp = rate - fmin; 77 + return 0; 78 + } else if (req->rate >= fmax) { 79 + req->rate = fmax; 80 + 81 + return 0; 82 + } 83 + 84 + ftmp = req->rate - fmin; 80 85 ftmp += clk->info->range.step_size - 1; /* to round up */ 81 86 do_div(ftmp, clk->info->range.step_size); 82 87 83 - return ftmp * clk->info->range.step_size + fmin; 88 + req->rate = ftmp * clk->info->range.step_size + fmin; 89 + 90 + return 0; 84 91 } 85 92 86 93 static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 124 117 return 0; 125 118 126 119 return p_idx; 127 - } 128 - 129 - static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 130 - { 131 - /* 132 - * Suppose all the requested rates are supported, and let firmware 133 - * to handle the left work. 134 - */ 135 - return 0; 136 120 } 137 121 138 122 static int scmi_clk_enable(struct clk_hw *hw) ··· 298 300 299 301 /* Rate ops */ 300 302 ops->recalc_rate = scmi_clk_recalc_rate; 301 - ops->round_rate = scmi_clk_round_rate; 302 303 ops->determine_rate = scmi_clk_determine_rate; 303 304 if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED)) 304 305 ops->set_rate = scmi_clk_set_rate;
+10 -8
drivers/clk/clk-scpi.c
··· 32 32 return clk->scpi_ops->clk_get_val(clk->id); 33 33 } 34 34 35 - static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate, 36 - unsigned long *parent_rate) 35 + static int scpi_clk_determine_rate(struct clk_hw *hw, 36 + struct clk_rate_request *req) 37 37 { 38 38 /* 39 39 * We can't figure out what rate it will be, so just return the ··· 41 41 * after the rate is set and we'll know what rate the clock is 42 42 * running at then. 43 43 */ 44 - return rate; 44 + return 0; 45 45 } 46 46 47 47 static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 54 54 55 55 static const struct clk_ops scpi_clk_ops = { 56 56 .recalc_rate = scpi_clk_recalc_rate, 57 - .round_rate = scpi_clk_round_rate, 57 + .determine_rate = scpi_clk_determine_rate, 58 58 .set_rate = scpi_clk_set_rate, 59 59 }; 60 60 ··· 92 92 return opp->freq; 93 93 } 94 94 95 - static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate, 96 - unsigned long *parent_rate) 95 + static int scpi_dvfs_determine_rate(struct clk_hw *hw, 96 + struct clk_rate_request *req) 97 97 { 98 98 struct scpi_clk *clk = to_scpi_clk(hw); 99 99 100 - return __scpi_dvfs_round_rate(clk, rate); 100 + req->rate = __scpi_dvfs_round_rate(clk, req->rate); 101 + 102 + return 0; 101 103 } 102 104 103 105 static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate) ··· 126 124 127 125 static const struct clk_ops scpi_dvfs_ops = { 128 126 .recalc_rate = scpi_dvfs_recalc_rate, 129 - .round_rate = scpi_dvfs_round_rate, 127 + .determine_rate = scpi_dvfs_determine_rate, 130 128 .set_rate = scpi_dvfs_set_rate, 131 129 }; 132 130
+16 -8
drivers/clk/clk-si514.c
··· 227 227 return si514_calc_rate(&settings); 228 228 } 229 229 230 - static long si514_round_rate(struct clk_hw *hw, unsigned long rate, 231 - unsigned long *parent_rate) 230 + static int si514_determine_rate(struct clk_hw *hw, 231 + struct clk_rate_request *req) 232 232 { 233 233 struct clk_si514_muldiv settings; 234 234 int err; 235 235 236 - if (!rate) 236 + if (!req->rate) { 237 + req->rate = 0; 238 + 237 239 return 0; 240 + } 238 241 239 - err = si514_calc_muldiv(&settings, rate); 240 - if (err) 241 - return err; 242 + err = si514_calc_muldiv(&settings, req->rate); 243 + if (err) { 244 + req->rate = err; 242 245 243 - return si514_calc_rate(&settings); 246 + return 0; 247 + } 248 + 249 + req->rate = si514_calc_rate(&settings); 250 + 251 + return 0; 244 252 } 245 253 246 254 /* ··· 297 289 .unprepare = si514_unprepare, 298 290 .is_prepared = si514_is_prepared, 299 291 .recalc_rate = si514_recalc_rate, 300 - .round_rate = si514_round_rate, 292 + .determine_rate = si514_determine_rate, 301 293 .set_rate = si514_set_rate, 302 294 }; 303 295
+8 -6
drivers/clk/clk-si521xx.c
··· 164 164 return (unsigned long)rate; 165 165 } 166 166 167 - static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate, 168 - unsigned long *prate) 167 + static int si521xx_diff_determine_rate(struct clk_hw *hw, 168 + struct clk_rate_request *req) 169 169 { 170 170 unsigned long best_parent; 171 171 172 - best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV; 173 - *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); 172 + best_parent = (req->rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV; 173 + req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); 174 174 175 - return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT; 175 + req->rate = (req->best_parent_rate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT; 176 + 177 + return 0; 176 178 } 177 179 178 180 static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate, ··· 210 208 } 211 209 212 210 static const struct clk_ops si521xx_diff_clk_ops = { 213 - .round_rate = si521xx_diff_round_rate, 211 + .determine_rate = si521xx_diff_determine_rate, 214 212 .set_rate = si521xx_diff_set_rate, 215 213 .recalc_rate = si521xx_diff_recalc_rate, 216 214 .prepare = si521xx_diff_prepare,
+14 -8
drivers/clk/clk-si5341.c
··· 663 663 return f; 664 664 } 665 665 666 - static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate, 667 - unsigned long *parent_rate) 666 + static int si5341_synth_clk_determine_rate(struct clk_hw *hw, 667 + struct clk_rate_request *req) 668 668 { 669 669 struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); 670 670 u64 f; ··· 672 672 /* The synthesizer accuracy is such that anything in range will work */ 673 673 f = synth->data->freq_vco; 674 674 do_div(f, SI5341_SYNTH_N_MAX); 675 - if (rate < f) 676 - return f; 675 + if (req->rate < f) { 676 + req->rate = f; 677 + 678 + return 0; 679 + } 677 680 678 681 f = synth->data->freq_vco; 679 682 do_div(f, SI5341_SYNTH_N_MIN); 680 - if (rate > f) 681 - return f; 683 + if (req->rate > f) { 684 + req->rate = f; 682 685 683 - return rate; 686 + return 0; 687 + } 688 + 689 + return 0; 684 690 } 685 691 686 692 static int si5341_synth_program(struct clk_si5341_synth *synth, ··· 747 741 .prepare = si5341_synth_clk_prepare, 748 742 .unprepare = si5341_synth_clk_unprepare, 749 743 .recalc_rate = si5341_synth_clk_recalc_rate, 750 - .round_rate = si5341_synth_clk_round_rate, 744 + .determine_rate = si5341_synth_clk_determine_rate, 751 745 .set_rate = si5341_synth_clk_set_rate, 752 746 }; 753 747
+5 -5
drivers/clk/clk-si544.c
··· 307 307 return si544_calc_rate(&settings); 308 308 } 309 309 310 - static long si544_round_rate(struct clk_hw *hw, unsigned long rate, 311 - unsigned long *parent_rate) 310 + static int si544_determine_rate(struct clk_hw *hw, 311 + struct clk_rate_request *req) 312 312 { 313 313 struct clk_si544 *data = to_clk_si544(hw); 314 314 315 - if (!is_valid_frequency(data, rate)) 315 + if (!is_valid_frequency(data, req->rate)) 316 316 return -EINVAL; 317 317 318 318 /* The accuracy is less than 1 Hz, so any rate is possible */ 319 - return rate; 319 + return 0; 320 320 } 321 321 322 322 /* Calculates the maximum "small" change, 950 * rate / 1000000 */ ··· 408 408 .unprepare = si544_unprepare, 409 409 .is_prepared = si544_is_prepared, 410 410 .recalc_rate = si544_recalc_rate, 411 - .round_rate = si544_round_rate, 411 + .determine_rate = si544_determine_rate, 412 412 .set_rate = si544_set_rate, 413 413 }; 414 414
+16 -10
drivers/clk/clk-si570.c
··· 246 246 return rate; 247 247 } 248 248 249 - static long si570_round_rate(struct clk_hw *hw, unsigned long rate, 250 - unsigned long *parent_rate) 249 + static int si570_determine_rate(struct clk_hw *hw, 250 + struct clk_rate_request *req) 251 251 { 252 252 int err; 253 253 u64 rfreq; 254 254 unsigned int n1, hs_div; 255 255 struct clk_si570 *data = to_clk_si570(hw); 256 256 257 - if (!rate) 258 - return 0; 257 + if (!req->rate) { 258 + req->rate = 0; 259 259 260 - if (div64_u64(abs(rate - data->frequency) * 10000LL, 260 + return 0; 261 + } 262 + 263 + if (div64_u64(abs(req->rate - data->frequency) * 10000LL, 261 264 data->frequency) < 35) { 262 - rfreq = div64_u64((data->rfreq * rate) + 263 - div64_u64(data->frequency, 2), data->frequency); 265 + rfreq = div64_u64((data->rfreq * req->rate) + 266 + div64_u64(data->frequency, 2), 267 + data->frequency); 264 268 n1 = data->n1; 265 269 hs_div = data->hs_div; 266 270 267 271 } else { 268 - err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div); 272 + err = si570_calc_divs(req->rate, data, &rfreq, &n1, &hs_div); 269 273 if (err) { 270 274 dev_err(&data->i2c_client->dev, 271 275 "unable to round rate\n"); 276 + req->rate = 0; 277 + 272 278 return 0; 273 279 } 274 280 } 275 281 276 - return rate; 282 + return 0; 277 283 } 278 284 279 285 /** ··· 374 368 375 369 static const struct clk_ops si570_clk_ops = { 376 370 .recalc_rate = si570_recalc_rate, 377 - .round_rate = si570_round_rate, 371 + .determine_rate = si570_determine_rate, 378 372 .set_rate = si570_set_rate, 379 373 }; 380 374
+12 -10
drivers/clk/clk-sp7021.c
··· 412 412 return fbdiv; 413 413 } 414 414 415 - static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate, 416 - unsigned long *prate) 415 + static int sp_pll_determine_rate(struct clk_hw *hw, 416 + struct clk_rate_request *req) 417 417 { 418 418 struct sp_pll *clk = to_sp_pll(hw); 419 419 long ret; 420 420 421 - if (rate == *prate) { 422 - ret = *prate; /* bypass */ 421 + if (req->rate == req->best_parent_rate) { 422 + ret = req->best_parent_rate; /* bypass */ 423 423 } else if (clk->div_width == DIV_A) { 424 - ret = plla_round_rate(clk, rate); 424 + ret = plla_round_rate(clk, req->rate); 425 425 } else if (clk->div_width == DIV_TV) { 426 - ret = plltv_div(clk, rate); 426 + ret = plltv_div(clk, req->rate); 427 427 if (ret < 0) 428 - ret = *prate; 428 + ret = req->best_parent_rate; 429 429 } else { 430 - ret = sp_pll_calc_div(clk, rate) * clk->brate; 430 + ret = sp_pll_calc_div(clk, req->rate) * clk->brate; 431 431 } 432 432 433 - return ret; 433 + req->rate = ret; 434 + 435 + return 0; 434 436 } 435 437 436 438 static unsigned long sp_pll_recalc_rate(struct clk_hw *hw, ··· 537 535 .enable = sp_pll_enable, 538 536 .disable = sp_pll_disable, 539 537 .is_enabled = sp_pll_is_enabled, 540 - .round_rate = sp_pll_round_rate, 538 + .determine_rate = sp_pll_determine_rate, 541 539 .recalc_rate = sp_pll_recalc_rate, 542 540 .set_rate = sp_pll_set_rate 543 541 };
+6 -4
drivers/clk/clk-sparx5.c
··· 213 213 return conf.freq; 214 214 } 215 215 216 - static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate, 217 - unsigned long *parent_rate) 216 + static int s5_pll_determine_rate(struct clk_hw *hw, 217 + struct clk_rate_request *req) 218 218 { 219 219 struct s5_pll_conf conf; 220 220 221 - return s5_calc_params(rate, *parent_rate, &conf); 221 + req->rate = s5_calc_params(req->rate, req->best_parent_rate, &conf); 222 + 223 + return 0; 222 224 } 223 225 224 226 static const struct clk_ops s5_pll_ops = { 225 227 .enable = s5_pll_enable, 226 228 .disable = s5_pll_disable, 227 229 .set_rate = s5_pll_set_rate, 228 - .round_rate = s5_pll_round_rate, 230 + .determine_rate = s5_pll_determine_rate, 229 231 .recalc_rate = s5_pll_recalc_rate, 230 232 }; 231 233
+15 -11
drivers/clk/clk-stm32f4.c
··· 443 443 return parent_rate; 444 444 } 445 445 446 - static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate, 447 - unsigned long *prate) 446 + static int clk_apb_mul_determine_rate(struct clk_hw *hw, 447 + struct clk_rate_request *req) 448 448 { 449 449 struct clk_apb_mul *am = to_clk_apb_mul(hw); 450 450 unsigned long mult = 1; ··· 453 453 mult = 2; 454 454 455 455 if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { 456 - unsigned long best_parent = rate / mult; 456 + unsigned long best_parent = req->rate / mult; 457 457 458 - *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); 458 + req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); 459 459 } 460 460 461 - return *prate * mult; 461 + req->rate = req->best_parent_rate * mult; 462 + 463 + return 0; 462 464 } 463 465 464 466 static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate, ··· 476 474 } 477 475 478 476 static const struct clk_ops clk_apb_mul_factor_ops = { 479 - .round_rate = clk_apb_mul_round_rate, 477 + .determine_rate = clk_apb_mul_determine_rate, 480 478 .set_rate = clk_apb_mul_set_rate, 481 479 .recalc_rate = clk_apb_mul_recalc_rate, 482 480 }; ··· 672 670 return parent_rate * n; 673 671 } 674 672 675 - static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate, 676 - unsigned long *prate) 673 + static int stm32f4_pll_determine_rate(struct clk_hw *hw, 674 + struct clk_rate_request *req) 677 675 { 678 676 struct clk_gate *gate = to_clk_gate(hw); 679 677 struct stm32f4_pll *pll = to_stm32f4_pll(gate); 680 678 unsigned long n; 681 679 682 - n = rate / *prate; 680 + n = req->rate / req->best_parent_rate; 683 681 684 682 if (n < pll->n_start) 685 683 n = pll->n_start; 686 684 else if (n > 432) 687 685 n = 432; 688 686 689 - return *prate * n; 687 + req->rate = req->best_parent_rate * n; 688 + 689 + return 0; 690 690 } 691 691 692 692 static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate, ··· 753 749 .disable = stm32f4_pll_disable, 754 750 .is_enabled = stm32f4_pll_is_enabled, 755 751 .recalc_rate = stm32f4_pll_recalc, 756 - .round_rate = stm32f4_pll_round_rate, 752 + .determine_rate = stm32f4_pll_determine_rate, 757 753 .set_rate = stm32f4_pll_set_rate, 758 754 }; 759 755
+7 -5
drivers/clk/clk-tps68470.c
··· 146 146 return best_idx; 147 147 } 148 148 149 - static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate, 150 - unsigned long *parent_rate) 149 + static int tps68470_clk_determine_rate(struct clk_hw *hw, 150 + struct clk_rate_request *req) 151 151 { 152 - unsigned int idx = tps68470_clk_cfg_lookup(rate); 152 + unsigned int idx = tps68470_clk_cfg_lookup(req->rate); 153 153 154 - return clk_freqs[idx].freq; 154 + req->rate = clk_freqs[idx].freq; 155 + 156 + return 0; 155 157 } 156 158 157 159 static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 188 186 .prepare = tps68470_clk_prepare, 189 187 .unprepare = tps68470_clk_unprepare, 190 188 .recalc_rate = tps68470_clk_recalc_rate, 191 - .round_rate = tps68470_clk_round_rate, 189 + .determine_rate = tps68470_clk_determine_rate, 192 190 .set_rate = tps68470_clk_set_rate, 193 191 }; 194 192
+41 -29
drivers/clk/clk-versaclock3.c
··· 289 289 return rate; 290 290 } 291 291 292 - static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate, 293 - unsigned long *parent_rate) 292 + static int vc3_pfd_determine_rate(struct clk_hw *hw, 293 + struct clk_rate_request *req) 294 294 { 295 295 struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw); 296 296 const struct vc3_pfd_data *pfd = vc3->data; 297 297 unsigned long idiv; 298 298 299 299 /* PLL cannot operate with input clock above 50 MHz. */ 300 - if (rate > 50000000) 300 + if (req->rate > 50000000) 301 301 return -EINVAL; 302 302 303 303 /* CLKIN within range of PLL input, feed directly to PLL. */ 304 - if (*parent_rate <= 50000000) 305 - return *parent_rate; 304 + if (req->best_parent_rate <= 50000000) { 305 + req->rate = req->best_parent_rate; 306 306 307 - idiv = DIV_ROUND_UP(*parent_rate, rate); 307 + return 0; 308 + } 309 + 310 + idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate); 308 311 if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) { 309 312 if (idiv > 63) 310 313 return -EINVAL; ··· 316 313 return -EINVAL; 317 314 } 318 315 319 - return *parent_rate / idiv; 316 + req->rate = req->best_parent_rate / idiv; 317 + 318 + return 0; 320 319 } 321 320 322 321 static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate, ··· 359 354 360 355 static const struct clk_ops vc3_pfd_ops = { 361 356 .recalc_rate = vc3_pfd_recalc_rate, 362 - .round_rate = vc3_pfd_round_rate, 357 + .determine_rate = vc3_pfd_determine_rate, 363 358 .set_rate = vc3_pfd_set_rate, 364 359 }; 365 360 ··· 390 385 return rate; 391 386 } 392 387 393 - static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate, 394 - unsigned long *parent_rate) 388 + static int vc3_pll_determine_rate(struct clk_hw *hw, 389 + struct clk_rate_request *req) 395 390 { 396 391 struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw); 397 392 const struct vc3_pll_data *pll = vc3->data; 398 393 u64 div_frc; 399 394 400 - if (rate < pll->vco.min) 401 - rate = pll->vco.min; 402 - if (rate > pll->vco.max) 403 - rate = pll->vco.max; 395 + if (req->rate < pll->vco.min) 396 + req->rate = pll->vco.min; 397 + if (req->rate > pll->vco.max) 398 + req->rate = pll->vco.max; 404 399 405 - vc3->div_int = rate / *parent_rate; 400 + vc3->div_int = req->rate / req->best_parent_rate; 406 401 407 402 if (pll->num == VC3_PLL2) { 408 403 if (vc3->div_int > 0x7ff) 409 - rate = *parent_rate * 0x7ff; 404 + req->rate = req->best_parent_rate * 0x7ff; 410 405 411 406 /* Determine best fractional part, which is 16 bit wide */ 412 - div_frc = rate % *parent_rate; 407 + div_frc = req->rate % req->best_parent_rate; 413 408 div_frc *= BIT(16) - 1; 414 409 415 - vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX); 416 - rate = (*parent_rate * 417 - (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16); 410 + vc3->div_frc = min_t(u64, 411 + div64_ul(div_frc, req->best_parent_rate), 412 + U16_MAX); 413 + req->rate = (req->best_parent_rate * 414 + (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16); 418 415 } else { 419 - rate = *parent_rate * vc3->div_int; 416 + req->rate = req->best_parent_rate * vc3->div_int; 420 417 } 421 418 422 - return rate; 419 + return 0; 423 420 } 424 421 425 422 static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 448 441 449 442 static const struct clk_ops vc3_pll_ops = { 450 443 .recalc_rate = vc3_pll_recalc_rate, 451 - .round_rate = vc3_pll_round_rate, 444 + .determine_rate = vc3_pll_determine_rate, 452 445 .set_rate = vc3_pll_set_rate, 453 446 }; 454 447 ··· 505 498 div_data->flags, div_data->width); 506 499 } 507 500 508 - static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate, 509 - unsigned long *parent_rate) 501 + static int vc3_div_determine_rate(struct clk_hw *hw, 502 + struct clk_rate_request *req) 510 503 { 511 504 struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw); 512 505 const struct vc3_div_data *div_data = vc3->data; ··· 518 511 bestdiv >>= div_data->shift; 519 512 bestdiv &= VC3_DIV_MASK(div_data->width); 520 513 bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags); 521 - return DIV_ROUND_UP(*parent_rate, bestdiv); 514 + req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv); 515 + 516 + return 0; 522 517 } 523 518 524 - return divider_round_rate(hw, rate, parent_rate, div_data->table, 525 - div_data->width, div_data->flags); 519 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 520 + div_data->table, 521 + div_data->width, div_data->flags); 522 + 523 + return 0; 526 524 } 527 525 528 526 static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 546 534 547 535 static const struct clk_ops vc3_div_ops = { 548 536 .recalc_rate = vc3_div_recalc_rate, 549 - .round_rate = vc3_div_round_rate, 537 + .determine_rate = vc3_div_determine_rate, 550 538 .set_rate = vc3_div_set_rate, 551 539 }; 552 540
+40 -31
drivers/clk/clk-versaclock5.c
··· 304 304 return parent_rate; 305 305 } 306 306 307 - static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate, 308 - unsigned long *parent_rate) 307 + static int vc5_dbl_determine_rate(struct clk_hw *hw, 308 + struct clk_rate_request *req) 309 309 { 310 - if ((*parent_rate == rate) || ((*parent_rate * 2) == rate)) 311 - return rate; 310 + if ((req->best_parent_rate == req->rate) || ((req->best_parent_rate * 2) == req->rate)) 311 + return 0; 312 312 else 313 313 return -EINVAL; 314 314 } ··· 332 332 333 333 static const struct clk_ops vc5_dbl_ops = { 334 334 .recalc_rate = vc5_dbl_recalc_rate, 335 - .round_rate = vc5_dbl_round_rate, 335 + .determine_rate = vc5_dbl_determine_rate, 336 336 .set_rate = vc5_dbl_set_rate, 337 337 }; 338 338 ··· 363 363 return parent_rate / VC5_REF_DIVIDER_REF_DIV(div); 364 364 } 365 365 366 - static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate, 367 - unsigned long *parent_rate) 366 + static int vc5_pfd_determine_rate(struct clk_hw *hw, 367 + struct clk_rate_request *req) 368 368 { 369 369 unsigned long idiv; 370 370 371 371 /* PLL cannot operate with input clock above 50 MHz. */ 372 - if (rate > 50000000) 372 + if (req->rate > 50000000) 373 373 return -EINVAL; 374 374 375 375 /* CLKIN within range of PLL input, feed directly to PLL. */ 376 - if (*parent_rate <= 50000000) 377 - return *parent_rate; 376 + if (req->best_parent_rate <= 50000000) { 377 + req->rate = req->best_parent_rate; 378 378 379 - idiv = DIV_ROUND_UP(*parent_rate, rate); 379 + return 0; 380 + } 381 + 382 + idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate); 380 383 if (idiv > 127) 381 384 return -EINVAL; 382 385 383 - return *parent_rate / idiv; 386 + req->rate = req->best_parent_rate / idiv; 387 + 388 + return 0; 384 389 } 385 390 386 391 static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate, ··· 425 420 426 421 static const struct clk_ops vc5_pfd_ops = { 427 422 .recalc_rate = vc5_pfd_recalc_rate, 428 - .round_rate = vc5_pfd_round_rate, 423 + .determine_rate = vc5_pfd_determine_rate, 429 424 .set_rate = vc5_pfd_set_rate, 430 425 }; 431 426 ··· 449 444 return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24); 450 445 } 451 446 452 - static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate, 453 - unsigned long *parent_rate) 447 + static int vc5_pll_determine_rate(struct clk_hw *hw, 448 + struct clk_rate_request *req) 454 449 { 455 450 struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); 456 451 struct vc5_driver_data *vc5 = hwdata->vc5; 457 452 u32 div_int; 458 453 u64 div_frc; 459 454 460 - rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max); 455 + req->rate = clamp(req->rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max); 461 456 462 457 /* Determine integer part, which is 12 bit wide */ 463 - div_int = rate / *parent_rate; 458 + div_int = req->rate / req->best_parent_rate; 464 459 if (div_int > 0xfff) 465 - rate = *parent_rate * 0xfff; 460 + req->rate = req->best_parent_rate * 0xfff; 466 461 467 462 /* Determine best fractional part, which is 24 bit wide */ 468 - div_frc = rate % *parent_rate; 463 + div_frc = req->rate % req->best_parent_rate; 469 464 div_frc *= BIT(24) - 1; 470 - do_div(div_frc, *parent_rate); 465 + do_div(div_frc, req->best_parent_rate); 471 466 472 467 hwdata->div_int = div_int; 473 468 hwdata->div_frc = (u32)div_frc; 474 469 475 - return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24); 470 + req->rate = (req->best_parent_rate * div_int) + ((req->best_parent_rate * div_frc) >> 24); 471 + 472 + return 0; 476 473 } 477 474 478 475 static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 495 488 496 489 static const struct clk_ops vc5_pll_ops = { 497 490 .recalc_rate = vc5_pll_recalc_rate, 498 - .round_rate = vc5_pll_round_rate, 491 + .determine_rate = vc5_pll_determine_rate, 499 492 .set_rate = vc5_pll_set_rate, 500 493 }; 501 494 ··· 527 520 return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc); 528 521 } 529 522 530 - static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate, 531 - unsigned long *parent_rate) 523 + static int vc5_fod_determine_rate(struct clk_hw *hw, 524 + struct clk_rate_request *req) 532 525 { 533 526 struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); 534 527 /* VCO frequency is divided by two before entering FOD */ 535 - u32 f_in = *parent_rate / 2; 528 + u32 f_in = req->best_parent_rate / 2; 536 529 u32 div_int; 537 530 u64 div_frc; 538 531 539 532 /* Determine integer part, which is 12 bit wide */ 540 - div_int = f_in / rate; 533 + div_int = f_in / req->rate; 541 534 /* 542 535 * WARNING: The clock chip does not output signal if the integer part 543 536 * of the divider is 0xfff and fractional part is non-zero. ··· 545 538 */ 546 539 if (div_int > 0xffe) { 547 540 div_int = 0xffe; 548 - rate = f_in / div_int; 541 + req->rate = f_in / div_int; 549 542 } 550 543 551 544 /* Determine best fractional part, which is 30 bit wide */ 552 - div_frc = f_in % rate; 545 + div_frc = f_in % req->rate; 553 546 div_frc <<= 24; 554 - do_div(div_frc, rate); 547 + do_div(div_frc, req->rate); 555 548 556 549 hwdata->div_int = div_int; 557 550 hwdata->div_frc = (u32)div_frc; 558 551 559 - return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc); 552 + req->rate = div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc); 553 + 554 + return 0; 560 555 } 561 556 562 557 static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate, ··· 598 589 599 590 static const struct clk_ops vc5_fod_ops = { 600 591 .recalc_rate = vc5_fod_recalc_rate, 601 - .round_rate = vc5_fod_round_rate, 592 + .determine_rate = vc5_fod_determine_rate, 602 593 .set_rate = vc5_fod_set_rate, 603 594 }; 604 595
+18 -12
drivers/clk/clk-versaclock7.c
··· 900 900 return fod_rate; 901 901 } 902 902 903 - static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) 903 + static int vc7_fod_determine_rate(struct clk_hw *hw, 904 + struct clk_rate_request *req) 904 905 { 905 906 struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw); 906 907 unsigned long fod_rate; 907 908 908 909 pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n", 909 - __func__, clk_hw_get_name(hw), rate, *parent_rate); 910 + __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate); 910 911 911 - vc7_calc_fod_divider(rate, *parent_rate, 912 + vc7_calc_fod_divider(req->rate, req->best_parent_rate, 912 913 &fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac); 913 - fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int, 914 + fod_rate = vc7_calc_fod_2nd_stage_rate(req->best_parent_rate, fod->fod_1st_int, 914 915 fod->fod_2nd_int, fod->fod_frac); 915 916 916 917 pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n", ··· 919 918 fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac); 920 919 pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate); 921 920 922 - return fod_rate; 921 + req->rate = fod_rate; 922 + 923 + return 0; 923 924 } 924 925 925 926 static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) ··· 955 952 956 953 static const struct clk_ops vc7_fod_ops = { 957 954 .recalc_rate = vc7_fod_recalc_rate, 958 - .round_rate = vc7_fod_round_rate, 955 + .determine_rate = vc7_fod_determine_rate, 959 956 .set_rate = vc7_fod_set_rate, 960 957 }; 961 958 ··· 981 978 return iod_rate; 982 979 } 983 980 984 - static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) 981 + static int vc7_iod_determine_rate(struct clk_hw *hw, 982 + struct clk_rate_request *req) 985 983 { 986 984 struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw); 987 985 unsigned long iod_rate; 988 986 989 987 pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n", 990 - __func__, clk_hw_get_name(hw), rate, *parent_rate); 988 + __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate); 991 989 992 - vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int); 993 - iod_rate = div64_u64(*parent_rate, iod->iod_int); 990 + vc7_calc_iod_divider(req->rate, req->best_parent_rate, &iod->iod_int); 991 + iod_rate = div64_u64(req->best_parent_rate, iod->iod_int); 994 992 995 993 pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int); 996 994 pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate); 997 995 998 - return iod_rate; 996 + req->rate = iod_rate; 997 + 998 + return 0; 999 999 } 1000 1000 1001 1001 static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) ··· 1029 1023 1030 1024 static const struct clk_ops vc7_iod_ops = { 1031 1025 .recalc_rate = vc7_iod_recalc_rate, 1032 - .round_rate = vc7_iod_round_rate, 1026 + .determine_rate = vc7_iod_determine_rate, 1033 1027 .set_rate = vc7_iod_set_rate, 1034 1028 }; 1035 1029
+35 -24
drivers/clk/clk-vt8500.c
··· 128 128 return parent_rate / div; 129 129 } 130 130 131 - static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate, 132 - unsigned long *prate) 131 + static int vt8500_dclk_determine_rate(struct clk_hw *hw, 132 + struct clk_rate_request *req) 133 133 { 134 134 struct clk_device *cdev = to_clk_device(hw); 135 135 u32 divisor; 136 136 137 - if (rate == 0) 137 + if (req->rate == 0) 138 138 return 0; 139 139 140 - divisor = *prate / rate; 140 + divisor = req->best_parent_rate / req->rate; 141 141 142 142 /* If prate / rate would be decimal, incr the divisor */ 143 - if (rate * divisor < *prate) 143 + if (req->rate * divisor < req->best_parent_rate) 144 144 divisor++; 145 145 146 146 /* 147 147 * If this is a request for SDMMC we have to adjust the divisor 148 148 * when >31 to use the fixed predivisor 149 149 */ 150 - if ((cdev->div_mask == 0x3F) && (divisor > 31)) { 150 + if ((cdev->div_mask == 0x3F) && (divisor > 31)) 151 151 divisor = 64 * ((divisor / 64) + 1); 152 - } 153 152 154 - return *prate / divisor; 153 + req->rate = req->best_parent_rate / divisor; 154 + 155 + return 0; 155 156 } 156 157 157 158 static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 203 202 }; 204 203 205 204 static const struct clk_ops vt8500_divisor_clk_ops = { 206 - .round_rate = vt8500_dclk_round_rate, 205 + .determine_rate = vt8500_dclk_determine_rate, 207 206 .set_rate = vt8500_dclk_set_rate, 208 207 .recalc_rate = vt8500_dclk_recalc_rate, 209 208 }; ··· 212 211 .enable = vt8500_dclk_enable, 213 212 .disable = vt8500_dclk_disable, 214 213 .is_enabled = vt8500_dclk_is_enabled, 215 - .round_rate = vt8500_dclk_round_rate, 214 + .determine_rate = vt8500_dclk_determine_rate, 216 215 .set_rate = vt8500_dclk_set_rate, 217 216 .recalc_rate = vt8500_dclk_recalc_rate, 218 217 }; ··· 595 594 return 0; 596 595 } 597 596 598 - static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate, 599 - unsigned long *prate) 597 + static int vtwm_pll_determine_rate(struct clk_hw *hw, 598 + struct clk_rate_request *req) 600 599 { 601 600 struct clk_pll *pll = to_clk_pll(hw); 602 601 u32 filter, mul, div1, div2; ··· 605 604 606 605 switch (pll->type) { 607 606 case PLL_TYPE_VT8500: 608 - ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1); 607 + ret = vt8500_find_pll_bits(req->rate, req->best_parent_rate, 608 + &mul, &div1); 609 609 if (!ret) 610 - round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1); 610 + round_rate = VT8500_BITS_TO_FREQ(req->best_parent_rate, 611 + mul, div1); 611 612 break; 612 613 case PLL_TYPE_WM8650: 613 - ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2); 614 + ret = wm8650_find_pll_bits(req->rate, req->best_parent_rate, 615 + &mul, &div1, &div2); 614 616 if (!ret) 615 - round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2); 617 + round_rate = WM8650_BITS_TO_FREQ(req->best_parent_rate, 618 + mul, div1, div2); 616 619 break; 617 620 case PLL_TYPE_WM8750: 618 - ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2); 621 + ret = wm8750_find_pll_bits(req->rate, req->best_parent_rate, 622 + &filter, &mul, &div1, &div2); 619 623 if (!ret) 620 - round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2); 624 + round_rate = WM8750_BITS_TO_FREQ(req->best_parent_rate, 625 + mul, div1, div2); 621 626 break; 622 627 case PLL_TYPE_WM8850: 623 - ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2); 628 + ret = wm8850_find_pll_bits(req->rate, req->best_parent_rate, 629 + &mul, &div1, &div2); 624 630 if (!ret) 625 - round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2); 631 + round_rate = WM8850_BITS_TO_FREQ(req->best_parent_rate, 632 + mul, div1, div2); 626 633 break; 627 634 default: 628 - ret = -EINVAL; 635 + return -EINVAL; 629 636 } 630 637 631 638 if (ret) 632 - return ret; 639 + req->rate = ret; 640 + else 641 + req->rate = round_rate; 633 642 634 - return round_rate; 643 + return 0; 635 644 } 636 645 637 646 static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw, ··· 676 665 } 677 666 678 667 static const struct clk_ops vtwm_pll_ops = { 679 - .round_rate = vtwm_pll_round_rate, 668 + .determine_rate = vtwm_pll_determine_rate, 680 669 .set_rate = vtwm_pll_set_rate, 681 670 .recalc_rate = vtwm_pll_recalc_rate, 682 671 };
+8 -6
drivers/clk/clk-wm831x.c
··· 133 133 return 0; 134 134 } 135 135 136 - static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate, 137 - unsigned long *unused) 136 + static int wm831x_fll_determine_rate(struct clk_hw *hw, 137 + struct clk_rate_request *req) 138 138 { 139 139 int best = 0; 140 140 int i; 141 141 142 142 for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++) 143 - if (abs(wm831x_fll_auto_rates[i] - rate) < 144 - abs(wm831x_fll_auto_rates[best] - rate)) 143 + if (abs(wm831x_fll_auto_rates[i] - req->rate) < 144 + abs(wm831x_fll_auto_rates[best] - req->rate)) 145 145 best = i; 146 146 147 - return wm831x_fll_auto_rates[best]; 147 + req->rate = wm831x_fll_auto_rates[best]; 148 + 149 + return 0; 148 150 } 149 151 150 152 static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 216 214 .is_prepared = wm831x_fll_is_prepared, 217 215 .prepare = wm831x_fll_prepare, 218 216 .unprepare = wm831x_fll_unprepare, 219 - .round_rate = wm831x_fll_round_rate, 217 + .determine_rate = wm831x_fll_determine_rate, 220 218 .recalc_rate = wm831x_fll_recalc_rate, 221 219 .set_rate = wm831x_fll_set_rate, 222 220 .get_parent = wm831x_fll_get_parent,
+24 -17
drivers/clk/clk-xgene.c
··· 271 271 return ret; 272 272 } 273 273 274 - static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate, 275 - unsigned long *parent_rate) 274 + static int xgene_clk_pmd_determine_rate(struct clk_hw *hw, 275 + struct clk_rate_request *req) 276 276 { 277 277 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw); 278 278 u64 ret, scale; 279 279 280 - if (!rate || rate >= *parent_rate) 281 - return *parent_rate; 280 + if (!req->rate || req->rate >= req->best_parent_rate) { 281 + req->rate = req->best_parent_rate; 282 + 283 + return 0; 284 + } 282 285 283 286 /* freq = parent_rate * scaler / denom */ 284 - ret = rate * fd->denom; 285 - scale = DIV_ROUND_UP_ULL(ret, *parent_rate); 287 + ret = req->rate * fd->denom; 288 + scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate); 286 289 287 - ret = (u64)*parent_rate * scale; 290 + ret = (u64)req->best_parent_rate * scale; 288 291 do_div(ret, fd->denom); 289 292 290 - return ret; 293 + req->rate = ret; 294 + 295 + return 0; 291 296 } 292 297 293 298 static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate, ··· 338 333 339 334 static const struct clk_ops xgene_clk_pmd_ops = { 340 335 .recalc_rate = xgene_clk_pmd_recalc_rate, 341 - .round_rate = xgene_clk_pmd_round_rate, 336 + .determine_rate = xgene_clk_pmd_determine_rate, 342 337 .set_rate = xgene_clk_pmd_set_rate, 343 338 }; 344 339 ··· 598 593 return parent_rate / divider_save; 599 594 } 600 595 601 - static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate, 602 - unsigned long *prate) 596 + static int xgene_clk_determine_rate(struct clk_hw *hw, 597 + struct clk_rate_request *req) 603 598 { 604 599 struct xgene_clk *pclk = to_xgene_clk(hw); 605 - unsigned long parent_rate = *prate; 600 + unsigned long parent_rate = req->best_parent_rate; 606 601 u32 divider; 607 602 608 603 if (pclk->param.divider_reg) { 609 604 /* Let's compute the divider */ 610 - if (rate > parent_rate) 611 - rate = parent_rate; 612 - divider = parent_rate / rate; /* Rounded down */ 605 + if (req->rate > parent_rate) 606 + req->rate = parent_rate; 607 + divider = parent_rate / req->rate; /* Rounded down */ 613 608 } else { 614 609 divider = 1; 615 610 } 616 611 617 - return parent_rate / divider; 612 + req->rate = parent_rate / divider; 613 + 614 + return 0; 618 615 } 619 616 620 617 static const struct clk_ops xgene_clk_ops = { ··· 625 618 .is_enabled = xgene_clk_is_enabled, 626 619 .recalc_rate = xgene_clk_recalc_rate, 627 620 .set_rate = xgene_clk_set_rate, 628 - .round_rate = xgene_clk_round_rate, 621 + .determine_rate = xgene_clk_determine_rate, 629 622 }; 630 623 631 624 static struct clk *xgene_register_clk(struct device *dev,
+9 -9
drivers/clk/hisilicon/clk-hi3660-stub.c
··· 34 34 .num_parents = 0, \ 35 35 .flags = CLK_GET_RATE_NOCACHE, \ 36 36 }, \ 37 - }, 37 + } 38 38 39 39 #define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw) 40 40 ··· 67 67 return stub_clk->rate; 68 68 } 69 69 70 - static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate, 71 - unsigned long *prate) 70 + static int hi3660_stub_clk_determine_rate(struct clk_hw *hw, 71 + struct clk_rate_request *req) 72 72 { 73 73 /* 74 74 * LPM3 handles rate rounding so just return whatever 75 75 * rate is requested. 76 76 */ 77 - return rate; 77 + return 0; 78 78 } 79 79 80 80 static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 97 97 98 98 static const struct clk_ops hi3660_stub_clk_ops = { 99 99 .recalc_rate = hi3660_stub_clk_recalc_rate, 100 - .round_rate = hi3660_stub_clk_round_rate, 100 + .determine_rate = hi3660_stub_clk_determine_rate, 101 101 .set_rate = hi3660_stub_clk_set_rate, 102 102 }; 103 103 104 104 static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = { 105 - DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0") 106 - DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1") 107 - DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d") 108 - DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc") 105 + DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0"), 106 + DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1"), 107 + DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d"), 108 + DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc"), 109 109 }; 110 110 111 111 static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
+7 -5
drivers/clk/hisilicon/clk-hi6220-stub.c
··· 161 161 return ret; 162 162 } 163 163 164 - static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate, 165 - unsigned long *parent_rate) 164 + static int hi6220_stub_clk_determine_rate(struct clk_hw *hw, 165 + struct clk_rate_request *req) 166 166 { 167 167 struct hi6220_stub_clk *stub_clk = to_stub_clk(hw); 168 - unsigned long new_rate = rate / 1000; /* kHz */ 168 + unsigned long new_rate = req->rate / 1000; /* kHz */ 169 169 170 170 switch (stub_clk->id) { 171 171 case HI6220_STUB_ACPU0: ··· 181 181 break; 182 182 } 183 183 184 - return new_rate; 184 + req->rate = new_rate; 185 + 186 + return 0; 185 187 } 186 188 187 189 static const struct clk_ops hi6220_stub_clk_ops = { 188 190 .recalc_rate = hi6220_stub_clk_recalc_rate, 189 - .round_rate = hi6220_stub_clk_round_rate, 191 + .determine_rate = hi6220_stub_clk_determine_rate, 190 192 .set_rate = hi6220_stub_clk_set_rate, 191 193 }; 192 194
+7 -5
drivers/clk/hisilicon/clkdivider-hi6220.c
··· 55 55 CLK_DIVIDER_ROUND_CLOSEST, dclk->width); 56 56 } 57 57 58 - static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, 59 - unsigned long *prate) 58 + static int hi6220_clkdiv_determine_rate(struct clk_hw *hw, 59 + struct clk_rate_request *req) 60 60 { 61 61 struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw); 62 62 63 - return divider_round_rate(hw, rate, prate, dclk->table, 64 - dclk->width, CLK_DIVIDER_ROUND_CLOSEST); 63 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, dclk->table, 64 + dclk->width, CLK_DIVIDER_ROUND_CLOSEST); 65 + 66 + return 0; 65 67 } 66 68 67 69 static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, ··· 95 93 96 94 static const struct clk_ops hi6220_clkdiv_ops = { 97 95 .recalc_rate = hi6220_clkdiv_recalc_rate, 98 - .round_rate = hi6220_clkdiv_round_rate, 96 + .determine_rate = hi6220_clkdiv_determine_rate, 99 97 .set_rate = hi6220_clkdiv_set_rate, 100 98 }; 101 99
+7 -5
drivers/clk/ingenic/cgu.c
··· 174 174 n * od); 175 175 } 176 176 177 - static long 178 - ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate, 179 - unsigned long *prate) 177 + static int ingenic_pll_determine_rate(struct clk_hw *hw, 178 + struct clk_rate_request *req) 180 179 { 181 180 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 182 181 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); 183 182 184 - return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL); 183 + req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate, 184 + NULL, NULL, NULL); 185 + 186 + return 0; 185 187 } 186 188 187 189 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu, ··· 319 317 320 318 static const struct clk_ops ingenic_pll_ops = { 321 319 .recalc_rate = ingenic_pll_recalc_rate, 322 - .round_rate = ingenic_pll_round_rate, 320 + .determine_rate = ingenic_pll_determine_rate, 323 321 .set_rate = ingenic_pll_set_rate, 324 322 325 323 .enable = ingenic_pll_enable,
+12 -12
drivers/clk/ingenic/jz4780-cgu.c
··· 128 128 return parent_rate; 129 129 } 130 130 131 - static long jz4780_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate, 132 - unsigned long *parent_rate) 131 + static int jz4780_otg_phy_determine_rate(struct clk_hw *hw, 132 + struct clk_rate_request *req) 133 133 { 134 - if (req_rate < 15600000) 135 - return 12000000; 134 + if (req->rate < 15600000) 135 + req->rate = 12000000; 136 + else if (req->rate < 21600000) 137 + req->rate = 19200000; 138 + else if (req->rate < 36000000) 139 + req->rate = 24000000; 140 + else 141 + req->rate = 48000000; 136 142 137 - if (req_rate < 21600000) 138 - return 19200000; 139 - 140 - if (req_rate < 36000000) 141 - return 24000000; 142 - 143 - return 48000000; 143 + return 0; 144 144 } 145 145 146 146 static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate, ··· 212 212 213 213 static const struct clk_ops jz4780_otg_phy_ops = { 214 214 .recalc_rate = jz4780_otg_phy_recalc_rate, 215 - .round_rate = jz4780_otg_phy_round_rate, 215 + .determine_rate = jz4780_otg_phy_determine_rate, 216 216 .set_rate = jz4780_otg_phy_set_rate, 217 217 218 218 .enable = jz4780_otg_phy_enable,
+10 -9
drivers/clk/ingenic/x1000-cgu.c
··· 84 84 return parent_rate; 85 85 } 86 86 87 - static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate, 88 - unsigned long *parent_rate) 87 + static int x1000_otg_phy_determine_rate(struct clk_hw *hw, 88 + struct clk_rate_request *req) 89 89 { 90 - if (req_rate < 18000000) 91 - return 12000000; 90 + if (req->rate < 18000000) 91 + req->rate = 12000000; 92 + else if (req->rate < 36000000) 93 + req->rate = 24000000; 94 + else 95 + req->rate = 48000000; 92 96 93 - if (req_rate < 36000000) 94 - return 24000000; 95 - 96 - return 48000000; 97 + return 0; 97 98 } 98 99 99 100 static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate, ··· 162 161 163 162 static const struct clk_ops x1000_otg_phy_ops = { 164 163 .recalc_rate = x1000_otg_phy_recalc_rate, 165 - .round_rate = x1000_otg_phy_round_rate, 164 + .determine_rate = x1000_otg_phy_determine_rate, 166 165 .set_rate = x1000_otg_phy_set_rate, 167 166 168 167 .enable = x1000_usb_phy_enable,
+8 -5
drivers/clk/mediatek/clk-pll.c
··· 200 200 return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv); 201 201 } 202 202 203 - long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 204 - unsigned long *prate) 203 + int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 205 204 { 206 205 struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); 207 206 u32 pcw = 0; 208 207 int postdiv; 209 208 210 - mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate); 209 + mtk_pll_calc_values(pll, &pcw, &postdiv, req->rate, 210 + req->best_parent_rate); 211 211 212 - return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv); 212 + req->rate = __mtk_pll_recalc_rate(pll, req->best_parent_rate, pcw, 213 + postdiv); 214 + 215 + return 0; 213 216 } 214 217 215 218 int mtk_pll_prepare(struct clk_hw *hw) ··· 282 279 .prepare = mtk_pll_prepare, 283 280 .unprepare = mtk_pll_unprepare, 284 281 .recalc_rate = mtk_pll_recalc_rate, 285 - .round_rate = mtk_pll_round_rate, 282 + .determine_rate = mtk_pll_determine_rate, 286 283 .set_rate = mtk_pll_set_rate, 287 284 }; 288 285
+1 -2
drivers/clk/mediatek/clk-pll.h
··· 96 96 u32 freq, u32 fin); 97 97 int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate, 98 98 unsigned long parent_rate); 99 - long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 100 - unsigned long *prate); 99 + int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req); 101 100 102 101 struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll, 103 102 const struct mtk_pll_data *data,
+1 -1
drivers/clk/mediatek/clk-pllfh.c
··· 42 42 .prepare = mtk_pll_prepare, 43 43 .unprepare = mtk_pll_unprepare, 44 44 .recalc_rate = mtk_pll_recalc_rate, 45 - .round_rate = mtk_pll_round_rate, 45 + .determine_rate = mtk_pll_determine_rate, 46 46 .set_rate = mtk_fhctl_set_rate, 47 47 }; 48 48
+27 -17
drivers/clk/microchip/clk-core.c
··· 155 155 return parent_rate / pbclk_read_pbdiv(pb); 156 156 } 157 157 158 - static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate, 159 - unsigned long *parent_rate) 158 + static int pbclk_determine_rate(struct clk_hw *hw, 159 + struct clk_rate_request *req) 160 160 { 161 - return calc_best_divided_rate(rate, *parent_rate, 162 - PB_DIV_MAX, PB_DIV_MIN); 161 + req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate, 162 + PB_DIV_MAX, PB_DIV_MIN); 163 + 164 + return 0; 163 165 } 164 166 165 167 static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 209 207 .disable = pbclk_disable, 210 208 .is_enabled = pbclk_is_enabled, 211 209 .recalc_rate = pbclk_recalc_rate, 212 - .round_rate = pbclk_round_rate, 210 + .determine_rate = pbclk_determine_rate, 213 211 .set_rate = pbclk_set_rate, 214 212 }; 215 213 ··· 374 372 return roclk_calc_rate(parent_rate, rodiv, rotrim); 375 373 } 376 374 377 - static long roclk_round_rate(struct clk_hw *hw, unsigned long rate, 378 - unsigned long *parent_rate) 375 + static int roclk_determine_rate(struct clk_hw *hw, 376 + struct clk_rate_request *req) 379 377 { 380 378 u32 rotrim, rodiv; 381 379 382 380 /* calculate dividers for new rate */ 383 - roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim); 381 + roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim); 384 382 385 383 /* caclulate new rate (rounding) based on new rodiv & rotrim */ 386 - return roclk_calc_rate(*parent_rate, rodiv, rotrim); 384 + req->rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim); 385 + 386 + return 0; 387 387 } 388 388 389 389 static int roclk_determine_rate(struct clk_hw *hw, ··· 669 665 return rate64; 670 666 } 671 667 672 - static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate, 673 - unsigned long *parent_rate) 668 + static int spll_clk_determine_rate(struct clk_hw *hw, 669 + struct clk_rate_request *req) 674 670 { 675 671 struct pic32_sys_pll *pll = clkhw_to_spll(hw); 676 672 677 - return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL); 673 + req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate, 674 + NULL, NULL); 675 + 676 + return 0; 678 677 } 679 678 680 679 static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 732 725 /* SPLL clock operation */ 733 726 const struct clk_ops pic32_spll_ops = { 734 727 .recalc_rate = spll_clk_recalc_rate, 735 - .round_rate = spll_clk_round_rate, 728 + .determine_rate = spll_clk_determine_rate, 736 729 .set_rate = spll_clk_set_rate, 737 730 }; 738 731 ··· 787 780 return parent_rate / div; 788 781 } 789 782 790 - static long sclk_round_rate(struct clk_hw *hw, unsigned long rate, 791 - unsigned long *parent_rate) 783 + static int sclk_determine_rate(struct clk_hw *hw, 784 + struct clk_rate_request *req) 792 785 { 793 - return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1); 786 + req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate, 787 + SLEW_SYSDIV, 1); 788 + 789 + return 0; 794 790 } 795 791 796 792 static int sclk_set_rate(struct clk_hw *hw, ··· 919 909 const struct clk_ops pic32_sclk_ops = { 920 910 .get_parent = sclk_get_parent, 921 911 .set_parent = sclk_set_parent, 922 - .round_rate = sclk_round_rate, 912 + .determine_rate = sclk_determine_rate, 923 913 .set_rate = sclk_set_rate, 924 914 .recalc_rate = sclk_get_rate, 925 915 .init = sclk_init,
+10 -8
drivers/clk/mmp/clk-audio.c
··· 164 164 return 0; 165 165 } 166 166 167 - static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate, 168 - unsigned long *parent_rate) 167 + static int audio_pll_determine_rate(struct clk_hw *hw, 168 + struct clk_rate_request *req) 169 169 { 170 170 unsigned int prediv; 171 171 unsigned int postdiv; 172 172 long rounded = 0; 173 173 174 174 for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) { 175 - if (predivs[prediv].parent_rate != *parent_rate) 175 + if (predivs[prediv].parent_rate != req->best_parent_rate) 176 176 continue; 177 177 for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) { 178 178 long freq = predivs[prediv].freq_vco; 179 179 180 180 freq /= postdivs[postdiv].divisor; 181 - if (freq == rate) 182 - return rate; 183 - if (freq < rate) 181 + if (freq == req->rate) 182 + return 0; 183 + if (freq < req->rate) 184 184 continue; 185 185 if (rounded && freq > rounded) 186 186 continue; ··· 188 188 } 189 189 } 190 190 191 - return rounded; 191 + req->rate = rounded; 192 + 193 + return 0; 192 194 } 193 195 194 196 static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 230 228 231 229 static const struct clk_ops audio_pll_ops = { 232 230 .recalc_rate = audio_pll_recalc_rate, 233 - .round_rate = audio_pll_round_rate, 231 + .determine_rate = audio_pll_determine_rate, 234 232 .set_rate = audio_pll_set_rate, 235 233 }; 236 234
+14 -13
drivers/clk/mmp/clk-frac.c
··· 21 21 22 22 #define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw) 23 23 24 - static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate, 25 - unsigned long *prate) 24 + static int clk_factor_determine_rate(struct clk_hw *hw, 25 + struct clk_rate_request *req) 26 26 { 27 27 struct mmp_clk_factor *factor = to_clk_factor(hw); 28 28 u64 rate = 0, prev_rate; ··· 33 33 d = &factor->ftbl[i]; 34 34 35 35 prev_rate = rate; 36 - rate = (u64)(*prate) * d->denominator; 36 + rate = (u64)(req->best_parent_rate) * d->denominator; 37 37 do_div(rate, d->numerator * factor->masks->factor); 38 - if (rate > drate) 38 + if (rate > req->rate) 39 39 break; 40 40 } 41 - if ((i == 0) || (i == factor->ftbl_cnt)) { 42 - return rate; 43 - } else { 44 - if ((drate - prev_rate) > (rate - drate)) 45 - return rate; 46 - else 47 - return prev_rate; 48 - } 41 + 42 + if ((i == 0) || (i == factor->ftbl_cnt)) 43 + req->rate = rate; 44 + else if ((req->rate - prev_rate) > (rate - req->rate)) 45 + req->rate = rate; 46 + else 47 + req->rate = prev_rate; 48 + 49 + return 0; 49 50 } 50 51 51 52 static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, ··· 161 160 162 161 static const struct clk_ops clk_factor_ops = { 163 162 .recalc_rate = clk_factor_recalc_rate, 164 - .round_rate = clk_factor_round_rate, 163 + .determine_rate = clk_factor_determine_rate, 165 164 .set_rate = clk_factor_set_rate, 166 165 .init = clk_factor_init, 167 166 };
+10 -8
drivers/clk/mstar/clk-msc313-cpupll.c
··· 140 140 parent_rate); 141 141 } 142 142 143 - static long msc313_cpupll_round_rate(struct clk_hw *hw, unsigned long rate, 144 - unsigned long *parent_rate) 143 + static int msc313_cpupll_determine_rate(struct clk_hw *hw, 144 + struct clk_rate_request *req) 145 145 { 146 - u32 reg = msc313_cpupll_regforfrequecy(rate, *parent_rate); 147 - long rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate); 146 + u32 reg = msc313_cpupll_regforfrequecy(req->rate, req->best_parent_rate); 147 + long rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate); 148 148 149 149 /* 150 150 * This is my poor attempt at making sure the resulting 151 151 * rate doesn't overshoot the requested rate. 152 152 */ 153 - for (; rounded >= rate && reg > 0; reg--) 154 - rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate); 153 + for (; rounded >= req->rate && reg > 0; reg--) 154 + rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate); 155 155 156 - return rounded; 156 + req->rate = rounded; 157 + 158 + return 0; 157 159 } 158 160 159 161 static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) ··· 170 168 171 169 static const struct clk_ops msc313_cpupll_ops = { 172 170 .recalc_rate = msc313_cpupll_recalc_rate, 173 - .round_rate = msc313_cpupll_round_rate, 171 + .determine_rate = msc313_cpupll_determine_rate, 174 172 .set_rate = msc313_cpupll_set_rate, 175 173 }; 176 174
+7 -5
drivers/clk/mvebu/ap-cpu-clk.c
··· 210 210 return 0; 211 211 } 212 212 213 - static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate, 214 - unsigned long *parent_rate) 213 + static int ap_cpu_clk_determine_rate(struct clk_hw *hw, 214 + struct clk_rate_request *req) 215 215 { 216 - int divider = *parent_rate / rate; 216 + int divider = req->best_parent_rate / req->rate; 217 217 218 218 divider = min(divider, APN806_MAX_DIVIDER); 219 219 220 - return *parent_rate / divider; 220 + req->rate = req->best_parent_rate / divider; 221 + 222 + return 0; 221 223 } 222 224 223 225 static const struct clk_ops ap_cpu_clk_ops = { 224 226 .recalc_rate = ap_cpu_clk_recalc_rate, 225 - .round_rate = ap_cpu_clk_round_rate, 227 + .determine_rate = ap_cpu_clk_determine_rate, 226 228 .set_rate = ap_cpu_clk_set_rate, 227 229 }; 228 230
+9 -6
drivers/clk/mvebu/armada-37xx-periph.c
··· 454 454 return DIV_ROUND_UP_ULL((u64)parent_rate, div); 455 455 } 456 456 457 - static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, 458 - unsigned long *parent_rate) 457 + static int clk_pm_cpu_determine_rate(struct clk_hw *hw, 458 + struct clk_rate_request *req) 459 459 { 460 460 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); 461 461 struct regmap *base = pm_cpu->nb_pm_base; 462 - unsigned int div = *parent_rate / rate; 462 + unsigned int div = req->best_parent_rate / req->rate; 463 463 unsigned int load_level; 464 464 /* only available when DVFS is enabled */ 465 465 if (!armada_3700_pm_dvfs_is_enabled(base)) ··· 474 474 475 475 val >>= offset; 476 476 val &= ARMADA_37XX_NB_TBG_DIV_MASK; 477 - if (val == div) 477 + if (val == div) { 478 478 /* 479 479 * We found a load level matching the target 480 480 * divider, switch to this load level and 481 481 * return. 482 482 */ 483 - return *parent_rate / div; 483 + req->rate = req->best_parent_rate / div; 484 + 485 + return 0; 486 + } 484 487 } 485 488 486 489 /* We didn't find any valid divider */ ··· 603 600 604 601 static const struct clk_ops clk_pm_cpu_ops = { 605 602 .get_parent = clk_pm_cpu_get_parent, 606 - .round_rate = clk_pm_cpu_round_rate, 603 + .determine_rate = clk_pm_cpu_determine_rate, 607 604 .set_rate = clk_pm_cpu_set_rate, 608 605 .recalc_rate = clk_pm_cpu_recalc_rate, 609 606 };
+10 -8
drivers/clk/mvebu/clk-corediv.c
··· 135 135 return parent_rate / div; 136 136 } 137 137 138 - static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate, 139 - unsigned long *parent_rate) 138 + static int clk_corediv_determine_rate(struct clk_hw *hw, 139 + struct clk_rate_request *req) 140 140 { 141 141 /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */ 142 142 u32 div; 143 143 144 - div = *parent_rate / rate; 144 + div = req->best_parent_rate / req->rate; 145 145 if (div < 4) 146 146 div = 4; 147 147 else if (div > 6) 148 148 div = 8; 149 149 150 - return *parent_rate / div; 150 + req->rate = req->best_parent_rate / div; 151 + 152 + return 0; 151 153 } 152 154 153 155 static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate, ··· 201 199 .disable = clk_corediv_disable, 202 200 .is_enabled = clk_corediv_is_enabled, 203 201 .recalc_rate = clk_corediv_recalc_rate, 204 - .round_rate = clk_corediv_round_rate, 202 + .determine_rate = clk_corediv_determine_rate, 205 203 .set_rate = clk_corediv_set_rate, 206 204 }, 207 205 .ratio_reload = BIT(8), ··· 217 215 .disable = clk_corediv_disable, 218 216 .is_enabled = clk_corediv_is_enabled, 219 217 .recalc_rate = clk_corediv_recalc_rate, 220 - .round_rate = clk_corediv_round_rate, 218 + .determine_rate = clk_corediv_determine_rate, 221 219 .set_rate = clk_corediv_set_rate, 222 220 }, 223 221 .ratio_reload = BIT(8), ··· 230 228 .ndescs = ARRAY_SIZE(mvebu_corediv_desc), 231 229 .ops = { 232 230 .recalc_rate = clk_corediv_recalc_rate, 233 - .round_rate = clk_corediv_round_rate, 231 + .determine_rate = clk_corediv_determine_rate, 234 232 .set_rate = clk_corediv_set_rate, 235 233 }, 236 234 .ratio_reload = BIT(8), ··· 242 240 .ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc), 243 241 .ops = { 244 242 .recalc_rate = clk_corediv_recalc_rate, 245 - .round_rate = clk_corediv_round_rate, 243 + .determine_rate = clk_corediv_determine_rate, 246 244 .set_rate = clk_corediv_set_rate, 247 245 }, 248 246 .ratio_reload = BIT(10),
+7 -5
drivers/clk/mvebu/clk-cpu.c
··· 56 56 return parent_rate / div; 57 57 } 58 58 59 - static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate, 60 - unsigned long *parent_rate) 59 + static int clk_cpu_determine_rate(struct clk_hw *hw, 60 + struct clk_rate_request *req) 61 61 { 62 62 /* Valid ratio are 1:1, 1:2 and 1:3 */ 63 63 u32 div; 64 64 65 - div = *parent_rate / rate; 65 + div = req->best_parent_rate / req->rate; 66 66 if (div == 0) 67 67 div = 1; 68 68 else if (div > 3) 69 69 div = 3; 70 70 71 - return *parent_rate / div; 71 + req->rate = req->best_parent_rate / div; 72 + 73 + return 0; 72 74 } 73 75 74 76 static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate, ··· 161 159 162 160 static const struct clk_ops cpu_ops = { 163 161 .recalc_rate = clk_cpu_recalc_rate, 164 - .round_rate = clk_cpu_round_rate, 162 + .determine_rate = clk_cpu_determine_rate, 165 163 .set_rate = clk_cpu_set_rate, 166 164 }; 167 165
+8 -8
drivers/clk/mvebu/dove-divider.c
··· 108 108 return rate; 109 109 } 110 110 111 - static long dove_round_rate(struct clk_hw *hw, unsigned long rate, 112 - unsigned long *parent) 111 + static int dove_determine_rate(struct clk_hw *hw, 112 + struct clk_rate_request *req) 113 113 { 114 114 struct dove_clk *dc = to_dove_clk(hw); 115 - unsigned long parent_rate = *parent; 115 + unsigned long parent_rate = req->best_parent_rate; 116 116 int divider; 117 117 118 - divider = dove_calc_divider(dc, rate, parent_rate, false); 118 + divider = dove_calc_divider(dc, req->rate, parent_rate, false); 119 119 if (divider < 0) 120 120 return divider; 121 121 122 - rate = DIV_ROUND_CLOSEST(parent_rate, divider); 122 + req->rate = DIV_ROUND_CLOSEST(parent_rate, divider); 123 123 124 124 pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n", 125 - __func__, dc->name, divider, parent_rate, rate); 125 + __func__, dc->name, divider, parent_rate, req->rate); 126 126 127 - return rate; 127 + return 0; 128 128 } 129 129 130 130 static int dove_set_clock(struct clk_hw *hw, unsigned long rate, ··· 154 154 155 155 static const struct clk_ops dove_divider_ops = { 156 156 .set_rate = dove_set_clock, 157 - .round_rate = dove_round_rate, 157 + .determine_rate = dove_determine_rate, 158 158 .recalc_rate = dove_recalc_rate, 159 159 }; 160 160
+4 -4
drivers/clk/mxs/clk-div.c
··· 40 40 return div->ops->recalc_rate(&div->divider.hw, parent_rate); 41 41 } 42 42 43 - static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate, 44 - unsigned long *prate) 43 + static int clk_div_determine_rate(struct clk_hw *hw, 44 + struct clk_rate_request *req) 45 45 { 46 46 struct clk_div *div = to_clk_div(hw); 47 47 48 - return div->ops->round_rate(&div->divider.hw, rate, prate); 48 + return div->ops->determine_rate(&div->divider.hw, req); 49 49 } 50 50 51 51 static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 63 63 64 64 static const struct clk_ops clk_div_ops = { 65 65 .recalc_rate = clk_div_recalc_rate, 66 - .round_rate = clk_div_round_rate, 66 + .determine_rate = clk_div_determine_rate, 67 67 .set_rate = clk_div_set_rate, 68 68 }; 69 69
+9 -7
drivers/clk/mxs/clk-frac.c
··· 44 44 return tmp_rate >> frac->width; 45 45 } 46 46 47 - static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate, 48 - unsigned long *prate) 47 + static int clk_frac_determine_rate(struct clk_hw *hw, 48 + struct clk_rate_request *req) 49 49 { 50 50 struct clk_frac *frac = to_clk_frac(hw); 51 - unsigned long parent_rate = *prate; 51 + unsigned long parent_rate = req->best_parent_rate; 52 52 u32 div; 53 53 u64 tmp, tmp_rate, result; 54 54 55 - if (rate > parent_rate) 55 + if (req->rate > parent_rate) 56 56 return -EINVAL; 57 57 58 - tmp = rate; 58 + tmp = req->rate; 59 59 tmp <<= frac->width; 60 60 do_div(tmp, parent_rate); 61 61 div = tmp; ··· 67 67 result = tmp_rate >> frac->width; 68 68 if ((result << frac->width) < tmp_rate) 69 69 result += 1; 70 - return result; 70 + req->rate = result; 71 + 72 + return 0; 71 73 } 72 74 73 75 static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate, ··· 105 103 106 104 static const struct clk_ops clk_frac_ops = { 107 105 .recalc_rate = clk_frac_recalc_rate, 108 - .round_rate = clk_frac_round_rate, 106 + .determine_rate = clk_frac_determine_rate, 109 107 .set_rate = clk_frac_set_rate, 110 108 }; 111 109
+9 -7
drivers/clk/mxs/clk-ref.c
··· 57 57 return tmp; 58 58 } 59 59 60 - static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate, 61 - unsigned long *prate) 60 + static int clk_ref_determine_rate(struct clk_hw *hw, 61 + struct clk_rate_request *req) 62 62 { 63 - unsigned long parent_rate = *prate; 63 + unsigned long parent_rate = req->best_parent_rate; 64 64 u64 tmp = parent_rate; 65 65 u8 frac; 66 66 67 - tmp = tmp * 18 + rate / 2; 68 - do_div(tmp, rate); 67 + tmp = tmp * 18 + req->rate / 2; 68 + do_div(tmp, req->rate); 69 69 frac = clamp(tmp, 18, 35); 70 70 71 71 tmp = parent_rate; 72 72 tmp *= 18; 73 73 do_div(tmp, frac); 74 74 75 - return tmp; 75 + req->rate = tmp; 76 + 77 + return 0; 76 78 } 77 79 78 80 static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, ··· 106 104 .enable = clk_ref_enable, 107 105 .disable = clk_ref_disable, 108 106 .recalc_rate = clk_ref_recalc_rate, 109 - .round_rate = clk_ref_round_rate, 107 + .determine_rate = clk_ref_determine_rate, 110 108 .set_rate = clk_ref_set_rate, 111 109 }; 112 110
+8 -4
drivers/clk/nuvoton/clk-ma35d1-divider.c
··· 39 39 CLK_DIVIDER_ROUND_CLOSEST, dclk->width); 40 40 } 41 41 42 - static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) 42 + static int ma35d1_clkdiv_determine_rate(struct clk_hw *hw, 43 + struct clk_rate_request *req) 43 44 { 44 45 struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw); 45 46 46 - return divider_round_rate(hw, rate, prate, dclk->table, 47 - dclk->width, CLK_DIVIDER_ROUND_CLOSEST); 47 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 48 + dclk->table, dclk->width, 49 + CLK_DIVIDER_ROUND_CLOSEST); 50 + 51 + return 0; 48 52 } 49 53 50 54 static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) ··· 75 71 76 72 static const struct clk_ops ma35d1_adc_clkdiv_ops = { 77 73 .recalc_rate = ma35d1_clkdiv_recalc_rate, 78 - .round_rate = ma35d1_clkdiv_round_rate, 74 + .determine_rate = ma35d1_clkdiv_determine_rate, 79 75 .set_rate = ma35d1_clkdiv_set_rate, 80 76 }; 81 77
+18 -10
drivers/clk/nuvoton/clk-ma35d1-pll.c
··· 244 244 return 0; 245 245 } 246 246 247 - static long ma35d1_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 248 - unsigned long *parent_rate) 247 + static int ma35d1_clk_pll_determine_rate(struct clk_hw *hw, 248 + struct clk_rate_request *req) 249 249 { 250 250 struct ma35d1_clk_pll *pll = to_ma35d1_clk_pll(hw); 251 251 u32 reg_ctl[3] = { 0 }; 252 252 unsigned long pll_freq; 253 253 long ret; 254 254 255 - if (*parent_rate < PLL_FREF_MIN_FREQ || *parent_rate > PLL_FREF_MAX_FREQ) 255 + if (req->best_parent_rate < PLL_FREF_MIN_FREQ || req->best_parent_rate > PLL_FREF_MAX_FREQ) 256 256 return -EINVAL; 257 257 258 - ret = ma35d1_pll_find_closest(pll, rate, *parent_rate, reg_ctl, &pll_freq); 258 + ret = ma35d1_pll_find_closest(pll, req->rate, req->best_parent_rate, 259 + reg_ctl, &pll_freq); 259 260 if (ret < 0) 260 261 return ret; 261 262 262 263 switch (pll->id) { 263 264 case CAPLL: 264 265 reg_ctl[0] = readl_relaxed(pll->ctl0_base); 265 - pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], *parent_rate); 266 - return pll_freq; 266 + pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], req->best_parent_rate); 267 + req->rate = pll_freq; 268 + 269 + return 0; 267 270 case DDRPLL: 268 271 case APLL: 269 272 case EPLL: 270 273 case VPLL: 271 274 reg_ctl[0] = readl_relaxed(pll->ctl0_base); 272 275 reg_ctl[1] = readl_relaxed(pll->ctl1_base); 273 - pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, *parent_rate); 274 - return pll_freq; 276 + pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, req->best_parent_rate); 277 + req->rate = pll_freq; 278 + 279 + return 0; 275 280 } 281 + 282 + req->rate = 0; 283 + 276 284 return 0; 277 285 } 278 286 ··· 319 311 .unprepare = ma35d1_clk_pll_unprepare, 320 312 .set_rate = ma35d1_clk_pll_set_rate, 321 313 .recalc_rate = ma35d1_clk_pll_recalc_rate, 322 - .round_rate = ma35d1_clk_pll_round_rate, 314 + .determine_rate = ma35d1_clk_pll_determine_rate, 323 315 }; 324 316 325 317 static const struct clk_ops ma35d1_clk_fixed_pll_ops = { 326 318 .recalc_rate = ma35d1_clk_pll_recalc_rate, 327 - .round_rate = ma35d1_clk_pll_round_rate, 319 + .determine_rate = ma35d1_clk_pll_determine_rate, 328 320 }; 329 321 330 322 struct clk_hw *ma35d1_reg_clk_pll(struct device *dev, u32 id, u8 u8mode, const char *name,
+9 -7
drivers/clk/nxp/clk-lpc18xx-cgu.c
··· 370 370 return 0; 371 371 } 372 372 373 - static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate, 374 - unsigned long *prate) 373 + static int lpc18xx_pll0_determine_rate(struct clk_hw *hw, 374 + struct clk_rate_request *req) 375 375 { 376 376 unsigned long m; 377 377 378 - if (*prate < rate) { 378 + if (req->best_parent_rate < req->rate) { 379 379 pr_warn("%s: pll dividers not supported\n", __func__); 380 380 return -EINVAL; 381 381 } 382 382 383 - m = DIV_ROUND_UP_ULL(*prate, rate * 2); 383 + m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2); 384 384 if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { 385 - pr_warn("%s: unable to support rate %lu\n", __func__, rate); 385 + pr_warn("%s: unable to support rate %lu\n", __func__, req->rate); 386 386 return -EINVAL; 387 387 } 388 388 389 - return 2 * *prate * m; 389 + req->rate = 2 * req->best_parent_rate * m; 390 + 391 + return 0; 390 392 } 391 393 392 394 static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, ··· 445 443 446 444 static const struct clk_ops lpc18xx_pll0_ops = { 447 445 .recalc_rate = lpc18xx_pll0_recalc_rate, 448 - .round_rate = lpc18xx_pll0_round_rate, 446 + .determine_rate = lpc18xx_pll0_determine_rate, 449 447 .set_rate = lpc18xx_pll0_set_rate, 450 448 }; 451 449
+33 -26
drivers/clk/nxp/clk-lpc32xx.c
··· 579 579 return regmap_update_bits(clk_regmap, clk->reg, 0x1FFFF, val); 580 580 } 581 581 582 - static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 583 - unsigned long *parent_rate) 582 + static int clk_hclk_pll_determine_rate(struct clk_hw *hw, 583 + struct clk_rate_request *req) 584 584 { 585 585 struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw); 586 - u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6; 586 + u64 m_i, o = req->rate, i = req->best_parent_rate, d = (u64)req->rate << 6; 587 587 u64 m = 0, n = 0, p = 0; 588 588 int p_i, n_i; 589 589 590 - pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate); 590 + pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate, req->rate); 591 591 592 - if (rate > 266500000) 592 + if (req->rate > 266500000) 593 593 return -EINVAL; 594 594 595 595 /* Have to check all 20 possibilities to find the minimal M */ ··· 614 614 } 615 615 } 616 616 617 - if (d == (u64)rate << 6) { 617 + if (d == (u64)req->rate << 6) { 618 618 pr_err("%s: %lu: no valid PLL parameters are found\n", 619 - clk_hw_get_name(hw), rate); 619 + clk_hw_get_name(hw), req->rate); 620 620 return -EINVAL; 621 621 } 622 622 ··· 634 634 635 635 if (!d) 636 636 pr_debug("%s: %lu: found exact match: %llu/%llu/%llu\n", 637 - clk_hw_get_name(hw), rate, m, n, p); 637 + clk_hw_get_name(hw), req->rate, m, n, p); 638 638 else 639 639 pr_debug("%s: %lu: found closest: %llu/%llu/%llu - %llu\n", 640 - clk_hw_get_name(hw), rate, m, n, p, o); 640 + clk_hw_get_name(hw), req->rate, m, n, p, o); 641 641 642 - return o; 642 + req->rate = o; 643 + 644 + return 0; 643 645 } 644 646 645 - static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate, 646 - unsigned long *parent_rate) 647 + static int clk_usb_pll_determine_rate(struct clk_hw *hw, 648 + struct clk_rate_request *req) 647 649 { 648 650 struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw); 649 651 struct clk_hw *usb_div_hw, *osc_hw; 650 652 u64 d_i, n_i, m, o; 651 653 652 - pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate); 654 + pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate, 655 + req->rate); 653 656 654 657 /* 655 658 * The only supported USB clock is 48MHz, with PLL internal constraints ··· 660 657 * and post-divider must be 4, this slightly simplifies calculation of 661 658 * USB divider, USB PLL N and M parameters. 662 659 */ 663 - if (rate != 48000000) 660 + if (req->rate != 48000000) 664 661 return -EINVAL; 665 662 666 663 /* USB divider clock */ ··· 688 685 clk->m_div = m; 689 686 clk->p_div = 2; 690 687 clk->mode = PLL_NON_INTEGER; 691 - *parent_rate = div64_u64(o, d_i); 688 + req->best_parent_rate = div64_u64(o, d_i); 692 689 693 - return rate; 690 + return 0; 694 691 } 695 692 } 696 693 697 694 return -EINVAL; 698 695 } 699 696 700 - #define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _rr) \ 697 + #define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _dr) \ 701 698 static const struct clk_ops clk_ ##_name ## _ops = { \ 702 699 .enable = clk_pll_enable, \ 703 700 .disable = clk_pll_disable, \ 704 701 .is_enabled = clk_pll_is_enabled, \ 705 702 .recalc_rate = _rc, \ 706 703 .set_rate = _sr, \ 707 - .round_rate = _rr, \ 704 + .determine_rate = _dr, \ 708 705 } 709 706 710 707 LPC32XX_DEFINE_PLL_OPS(pll_397x, clk_pll_397x_recalc_rate, NULL, NULL); 711 708 LPC32XX_DEFINE_PLL_OPS(hclk_pll, clk_pll_recalc_rate, 712 - clk_pll_set_rate, clk_hclk_pll_round_rate); 709 + clk_pll_set_rate, clk_hclk_pll_determine_rate); 713 710 LPC32XX_DEFINE_PLL_OPS(usb_pll, clk_pll_recalc_rate, 714 - clk_pll_set_rate, clk_usb_pll_round_rate); 711 + clk_pll_set_rate, clk_usb_pll_determine_rate); 715 712 716 713 static int clk_ddram_is_enabled(struct clk_hw *hw) 717 714 { ··· 958 955 divider->flags, divider->width); 959 956 } 960 957 961 - static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 962 - unsigned long *prate) 958 + static int clk_divider_determine_rate(struct clk_hw *hw, 959 + struct clk_rate_request *req) 963 960 { 964 961 struct lpc32xx_clk_div *divider = to_lpc32xx_div(hw); 965 962 unsigned int bestdiv; ··· 971 968 bestdiv &= div_mask(divider->width); 972 969 bestdiv = _get_div(divider->table, bestdiv, divider->flags, 973 970 divider->width); 974 - return DIV_ROUND_UP(*prate, bestdiv); 971 + req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv); 972 + 973 + return 0; 975 974 } 976 975 977 - return divider_round_rate(hw, rate, prate, divider->table, 978 - divider->width, divider->flags); 976 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 977 + divider->table, divider->width, divider->flags); 978 + 979 + return 0; 979 980 } 980 981 981 982 static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, ··· 998 991 999 992 static const struct clk_ops lpc32xx_clk_divider_ops = { 1000 993 .recalc_rate = clk_divider_recalc_rate, 1001 - .round_rate = clk_divider_round_rate, 994 + .determine_rate = clk_divider_determine_rate, 1002 995 .set_rate = clk_divider_set_rate, 1003 996 }; 1004 997
+12 -8
drivers/clk/pistachio/clk-pll.c
··· 139 139 return NULL; 140 140 } 141 141 142 - static long pll_round_rate(struct clk_hw *hw, unsigned long rate, 143 - unsigned long *parent_rate) 142 + static int pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 144 143 { 145 144 struct pistachio_clk_pll *pll = to_pistachio_pll(hw); 146 145 unsigned int i; 147 146 148 147 for (i = 0; i < pll->nr_rates; i++) { 149 - if (i > 0 && pll->rates[i].fref == *parent_rate && 150 - pll->rates[i].fout <= rate) 151 - return pll->rates[i - 1].fout; 148 + if (i > 0 && pll->rates[i].fref == req->best_parent_rate && 149 + pll->rates[i].fout <= req->rate) { 150 + req->rate = pll->rates[i - 1].fout; 151 + 152 + return 0; 153 + } 152 154 } 153 155 154 - return pll->rates[0].fout; 156 + req->rate = pll->rates[0].fout; 157 + 158 + return 0; 155 159 } 156 160 157 161 static int pll_gf40lp_frac_enable(struct clk_hw *hw) ··· 304 300 .disable = pll_gf40lp_frac_disable, 305 301 .is_enabled = pll_gf40lp_frac_is_enabled, 306 302 .recalc_rate = pll_gf40lp_frac_recalc_rate, 307 - .round_rate = pll_round_rate, 303 + .determine_rate = pll_determine_rate, 308 304 .set_rate = pll_gf40lp_frac_set_rate, 309 305 }; 310 306 ··· 436 432 .disable = pll_gf40lp_laint_disable, 437 433 .is_enabled = pll_gf40lp_laint_is_enabled, 438 434 .recalc_rate = pll_gf40lp_laint_recalc_rate, 439 - .round_rate = pll_round_rate, 435 + .determine_rate = pll_determine_rate, 440 436 .set_rate = pll_gf40lp_laint_set_rate, 441 437 }; 442 438
+17 -10
drivers/clk/qcom/clk-regmap-divider.c
··· 15 15 return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr); 16 16 } 17 17 18 - static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate, 19 - unsigned long *prate) 18 + static int div_ro_determine_rate(struct clk_hw *hw, 19 + struct clk_rate_request *req) 20 20 { 21 21 struct clk_regmap_div *divider = to_clk_regmap_div(hw); 22 22 struct clk_regmap *clkr = &divider->clkr; ··· 26 26 val >>= divider->shift; 27 27 val &= BIT(divider->width) - 1; 28 28 29 - return divider_ro_round_rate(hw, rate, prate, NULL, divider->width, 30 - CLK_DIVIDER_ROUND_CLOSEST, val); 29 + req->rate = divider_ro_round_rate(hw, req->rate, 30 + &req->best_parent_rate, NULL, 31 + divider->width, 32 + CLK_DIVIDER_ROUND_CLOSEST, val); 33 + 34 + return 0; 31 35 } 32 36 33 - static long div_round_rate(struct clk_hw *hw, unsigned long rate, 34 - unsigned long *prate) 37 + static int div_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 35 38 { 36 39 struct clk_regmap_div *divider = to_clk_regmap_div(hw); 37 40 38 - return divider_round_rate(hw, rate, prate, NULL, divider->width, 39 - CLK_DIVIDER_ROUND_CLOSEST); 41 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 42 + NULL, 43 + divider->width, 44 + CLK_DIVIDER_ROUND_CLOSEST); 45 + 46 + return 0; 40 47 } 41 48 42 49 static int div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 77 70 } 78 71 79 72 const struct clk_ops clk_regmap_div_ops = { 80 - .round_rate = div_round_rate, 73 + .determine_rate = div_determine_rate, 81 74 .set_rate = div_set_rate, 82 75 .recalc_rate = div_recalc_rate, 83 76 }; 84 77 EXPORT_SYMBOL_GPL(clk_regmap_div_ops); 85 78 86 79 const struct clk_ops clk_regmap_div_ro_ops = { 87 - .round_rate = div_round_ro_rate, 80 + .determine_rate = div_ro_determine_rate, 88 81 .recalc_rate = div_recalc_rate, 89 82 }; 90 83 EXPORT_SYMBOL_GPL(clk_regmap_div_ro_ops);
+7 -6
drivers/clk/rockchip/clk-ddr.c
··· 55 55 return res.a0; 56 56 } 57 57 58 - static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw, 59 - unsigned long rate, 60 - unsigned long *prate) 58 + static int rockchip_ddrclk_sip_determine_rate(struct clk_hw *hw, 59 + struct clk_rate_request *req) 61 60 { 62 61 struct arm_smccc_res res; 63 62 64 - arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, rate, 0, 63 + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, req->rate, 0, 65 64 ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE, 66 65 0, 0, 0, 0, &res); 67 66 68 - return res.a0; 67 + req->rate = res.a0; 68 + 69 + return 0; 69 70 } 70 71 71 72 static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw) ··· 84 83 static const struct clk_ops rockchip_ddrclk_sip_ops = { 85 84 .recalc_rate = rockchip_ddrclk_sip_recalc_rate, 86 85 .set_rate = rockchip_ddrclk_sip_set_rate, 87 - .round_rate = rockchip_ddrclk_sip_round_rate, 86 + .determine_rate = rockchip_ddrclk_sip_determine_rate, 88 87 .get_parent = rockchip_ddrclk_get_parent, 89 88 }; 90 89
+7 -5
drivers/clk/rockchip/clk-half-divider.c
··· 92 92 return bestdiv; 93 93 } 94 94 95 - static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate, 96 - unsigned long *prate) 95 + static int clk_half_divider_determine_rate(struct clk_hw *hw, 96 + struct clk_rate_request *req) 97 97 { 98 98 struct clk_divider *divider = to_clk_divider(hw); 99 99 int div; 100 100 101 - div = clk_half_divider_bestdiv(hw, rate, prate, 101 + div = clk_half_divider_bestdiv(hw, req->rate, &req->best_parent_rate, 102 102 divider->width, 103 103 divider->flags); 104 104 105 - return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3); 105 + req->rate = DIV_ROUND_UP_ULL(((u64)req->best_parent_rate * 2), div * 2 + 3); 106 + 107 + return 0; 106 108 } 107 109 108 110 static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate, ··· 143 141 144 142 static const struct clk_ops clk_half_divider_ops = { 145 143 .recalc_rate = clk_half_divider_recalc_rate, 146 - .round_rate = clk_half_divider_round_rate, 144 + .determine_rate = clk_half_divider_determine_rate, 147 145 .set_rate = clk_half_divider_set_rate, 148 146 }; 149 147
+14 -9
drivers/clk/rockchip/clk-pll.c
··· 61 61 return NULL; 62 62 } 63 63 64 - static long rockchip_pll_round_rate(struct clk_hw *hw, 65 - unsigned long drate, unsigned long *prate) 64 + static int rockchip_pll_determine_rate(struct clk_hw *hw, 65 + struct clk_rate_request *req) 66 66 { 67 67 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); 68 68 const struct rockchip_pll_rate_table *rate_table = pll->rate_table; ··· 70 70 71 71 /* Assuming rate_table is in descending order */ 72 72 for (i = 0; i < pll->rate_count; i++) { 73 - if (drate >= rate_table[i].rate) 74 - return rate_table[i].rate; 73 + if (req->rate >= rate_table[i].rate) { 74 + req->rate = rate_table[i].rate; 75 + 76 + return 0; 77 + } 75 78 } 76 79 77 80 /* return minimum supported value */ 78 - return rate_table[i - 1].rate; 81 + req->rate = rate_table[i - 1].rate; 82 + 83 + return 0; 79 84 } 80 85 81 86 /* ··· 357 352 358 353 static const struct clk_ops rockchip_rk3036_pll_clk_ops = { 359 354 .recalc_rate = rockchip_rk3036_pll_recalc_rate, 360 - .round_rate = rockchip_pll_round_rate, 355 + .determine_rate = rockchip_pll_determine_rate, 361 356 .set_rate = rockchip_rk3036_pll_set_rate, 362 357 .enable = rockchip_rk3036_pll_enable, 363 358 .disable = rockchip_rk3036_pll_disable, ··· 576 571 577 572 static const struct clk_ops rockchip_rk3066_pll_clk_ops = { 578 573 .recalc_rate = rockchip_rk3066_pll_recalc_rate, 579 - .round_rate = rockchip_pll_round_rate, 574 + .determine_rate = rockchip_pll_determine_rate, 580 575 .set_rate = rockchip_rk3066_pll_set_rate, 581 576 .enable = rockchip_rk3066_pll_enable, 582 577 .disable = rockchip_rk3066_pll_disable, ··· 841 836 842 837 static const struct clk_ops rockchip_rk3399_pll_clk_ops = { 843 838 .recalc_rate = rockchip_rk3399_pll_recalc_rate, 844 - .round_rate = rockchip_pll_round_rate, 839 + .determine_rate = rockchip_pll_determine_rate, 845 840 .set_rate = rockchip_rk3399_pll_set_rate, 846 841 .enable = rockchip_rk3399_pll_enable, 847 842 .disable = rockchip_rk3399_pll_disable, ··· 1041 1036 1042 1037 static const struct clk_ops rockchip_rk3588_pll_clk_ops = { 1043 1038 .recalc_rate = rockchip_rk3588_pll_recalc_rate, 1044 - .round_rate = rockchip_pll_round_rate, 1039 + .determine_rate = rockchip_pll_determine_rate, 1045 1040 .set_rate = rockchip_rk3588_pll_set_rate, 1046 1041 .enable = rockchip_rk3588_pll_enable, 1047 1042 .disable = rockchip_rk3588_pll_disable,
+1 -1
drivers/clk/sifive/fu540-prci.h
··· 49 49 50 50 static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = { 51 51 .set_rate = sifive_prci_wrpll_set_rate, 52 - .round_rate = sifive_prci_wrpll_round_rate, 52 + .determine_rate = sifive_prci_wrpll_determine_rate, 53 53 .recalc_rate = sifive_prci_wrpll_recalc_rate, 54 54 .enable = sifive_prci_clock_enable, 55 55 .disable = sifive_prci_clock_disable,
+1 -1
drivers/clk/sifive/fu740-prci.h
··· 55 55 56 56 static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = { 57 57 .set_rate = sifive_prci_wrpll_set_rate, 58 - .round_rate = sifive_prci_wrpll_round_rate, 58 + .determine_rate = sifive_prci_wrpll_determine_rate, 59 59 .recalc_rate = sifive_prci_wrpll_recalc_rate, 60 60 .enable = sifive_prci_clock_enable, 61 61 .disable = sifive_prci_clock_disable,
+6 -5
drivers/clk/sifive/sifive-prci.c
··· 183 183 return wrpll_calc_output_rate(&pwd->c, parent_rate); 184 184 } 185 185 186 - long sifive_prci_wrpll_round_rate(struct clk_hw *hw, 187 - unsigned long rate, 188 - unsigned long *parent_rate) 186 + int sifive_prci_wrpll_determine_rate(struct clk_hw *hw, 187 + struct clk_rate_request *req) 189 188 { 190 189 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 191 190 struct __prci_wrpll_data *pwd = pc->pwd; ··· 192 193 193 194 memcpy(&c, &pwd->c, sizeof(c)); 194 195 195 - wrpll_configure_for_rate(&c, rate, *parent_rate); 196 + wrpll_configure_for_rate(&c, req->rate, req->best_parent_rate); 196 197 197 - return wrpll_calc_output_rate(&c, *parent_rate); 198 + req->rate = wrpll_calc_output_rate(&c, req->best_parent_rate); 199 + 200 + return 0; 198 201 } 199 202 200 203 int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
+2 -2
drivers/clk/sifive/sifive-prci.h
··· 291 291 void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd); 292 292 293 293 /* Linux clock framework integration */ 294 - long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate, 295 - unsigned long *parent_rate); 294 + int sifive_prci_wrpll_determine_rate(struct clk_hw *hw, 295 + struct clk_rate_request *req); 296 296 int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate, 297 297 unsigned long parent_rate); 298 298 int sifive_clk_is_enabled(struct clk_hw *hw);
+6 -4
drivers/clk/sophgo/clk-cv18xx-ip.c
··· 45 45 return parent_rate; 46 46 } 47 47 48 - static long gate_round_rate(struct clk_hw *hw, unsigned long rate, 49 - unsigned long *parent_rate) 48 + static int gate_determine_rate(struct clk_hw *hw, 49 + struct clk_rate_request *req) 50 50 { 51 - return *parent_rate; 51 + req->rate = req->best_parent_rate; 52 + 53 + return 0; 52 54 } 53 55 54 56 static int gate_set_rate(struct clk_hw *hw, unsigned long rate, ··· 65 63 .is_enabled = gate_is_enabled, 66 64 67 65 .recalc_rate = gate_recalc_rate, 68 - .round_rate = gate_round_rate, 66 + .determine_rate = gate_determine_rate, 69 67 .set_rate = gate_set_rate, 70 68 }; 71 69
+9 -8
drivers/clk/sophgo/clk-sg2042-clkgen.c
··· 176 176 return ret_rate; 177 177 } 178 178 179 - static long sg2042_clk_divider_round_rate(struct clk_hw *hw, 180 - unsigned long rate, 181 - unsigned long *prate) 179 + static int sg2042_clk_divider_determine_rate(struct clk_hw *hw, 180 + struct clk_rate_request *req) 182 181 { 183 182 struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw); 184 183 unsigned long ret_rate; ··· 191 192 bestdiv = readl(divider->reg) >> divider->shift; 192 193 bestdiv &= clk_div_mask(divider->width); 193 194 } 194 - ret_rate = DIV_ROUND_UP_ULL((u64)*prate, bestdiv); 195 + ret_rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv); 195 196 } else { 196 - ret_rate = divider_round_rate(hw, rate, prate, NULL, 197 + ret_rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, NULL, 197 198 divider->width, divider->div_flags); 198 199 } 199 200 200 201 pr_debug("--> %s: divider_round_rate: val = %ld\n", 201 202 clk_hw_get_name(hw), ret_rate); 202 - return ret_rate; 203 + req->rate = ret_rate; 204 + 205 + return 0; 203 206 } 204 207 205 208 static int sg2042_clk_divider_set_rate(struct clk_hw *hw, ··· 259 258 260 259 static const struct clk_ops sg2042_clk_divider_ops = { 261 260 .recalc_rate = sg2042_clk_divider_recalc_rate, 262 - .round_rate = sg2042_clk_divider_round_rate, 261 + .determine_rate = sg2042_clk_divider_determine_rate, 263 262 .set_rate = sg2042_clk_divider_set_rate, 264 263 }; 265 264 266 265 static const struct clk_ops sg2042_clk_divider_ro_ops = { 267 266 .recalc_rate = sg2042_clk_divider_recalc_rate, 268 - .round_rate = sg2042_clk_divider_round_rate, 267 + .determine_rate = sg2042_clk_divider_determine_rate, 269 268 }; 270 269 271 270 /*
+10 -18
drivers/clk/sophgo/clk-sg2042-pll.c
··· 346 346 return rate; 347 347 } 348 348 349 - static long sg2042_clk_pll_round_rate(struct clk_hw *hw, 350 - unsigned long req_rate, 351 - unsigned long *prate) 349 + static int sg2042_clk_pll_determine_rate(struct clk_hw *hw, 350 + struct clk_rate_request *req) 352 351 { 353 352 struct sg2042_pll_ctrl pctrl_table; 354 353 unsigned int value; 355 354 long proper_rate; 356 355 int ret; 357 356 358 - ret = sg2042_get_pll_ctl_setting(&pctrl_table, req_rate, *prate); 357 + ret = sg2042_get_pll_ctl_setting(&pctrl_table, 358 + min(req->rate, req->max_rate), 359 + req->best_parent_rate); 359 360 if (ret) { 360 361 proper_rate = 0; 361 362 goto out; 362 363 } 363 364 364 365 value = sg2042_pll_ctrl_encode(&pctrl_table); 365 - proper_rate = (long)sg2042_pll_recalc_rate(value, *prate); 366 + proper_rate = (long)sg2042_pll_recalc_rate(value, req->best_parent_rate); 366 367 367 368 out: 368 - pr_debug("--> %s: pll_round_rate: val = %ld\n", 369 - clk_hw_get_name(hw), proper_rate); 370 - return proper_rate; 371 - } 372 - 373 - static int sg2042_clk_pll_determine_rate(struct clk_hw *hw, 374 - struct clk_rate_request *req) 375 - { 376 - req->rate = sg2042_clk_pll_round_rate(hw, min(req->rate, req->max_rate), 377 - &req->best_parent_rate); 378 369 pr_debug("--> %s: pll_determine_rate: val = %ld\n", 379 - clk_hw_get_name(hw), req->rate); 370 + clk_hw_get_name(hw), proper_rate); 371 + req->rate = proper_rate; 372 + 380 373 return 0; 381 374 } 382 375 ··· 410 417 411 418 static const struct clk_ops sg2042_clk_pll_ops = { 412 419 .recalc_rate = sg2042_clk_pll_recalc_rate, 413 - .round_rate = sg2042_clk_pll_round_rate, 414 420 .determine_rate = sg2042_clk_pll_determine_rate, 415 421 .set_rate = sg2042_clk_pll_set_rate, 416 422 }; 417 423 418 424 static const struct clk_ops sg2042_clk_pll_ro_ops = { 419 425 .recalc_rate = sg2042_clk_pll_recalc_rate, 420 - .round_rate = sg2042_clk_pll_round_rate, 426 + .determine_rate = sg2042_clk_pll_determine_rate, 421 427 }; 422 428 423 429 /*
+7 -5
drivers/clk/spear/clk-aux-synth.c
··· 49 49 (rtbl[index].yscale * eq)) * 10000; 50 50 } 51 51 52 - static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate, 53 - unsigned long *prate) 52 + static int clk_aux_determine_rate(struct clk_hw *hw, 53 + struct clk_rate_request *req) 54 54 { 55 55 struct clk_aux *aux = to_clk_aux(hw); 56 56 int unused; 57 57 58 - return clk_round_rate_index(hw, drate, *prate, aux_calc_rate, 59 - aux->rtbl_cnt, &unused); 58 + req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate, 59 + aux_calc_rate, aux->rtbl_cnt, &unused); 60 + 61 + return 0; 60 62 } 61 63 62 64 static unsigned long clk_aux_recalc_rate(struct clk_hw *hw, ··· 129 127 130 128 static const struct clk_ops clk_aux_ops = { 131 129 .recalc_rate = clk_aux_recalc_rate, 132 - .round_rate = clk_aux_round_rate, 130 + .determine_rate = clk_aux_determine_rate, 133 131 .set_rate = clk_aux_set_rate, 134 132 }; 135 133
+7 -5
drivers/clk/spear/clk-frac-synth.c
··· 52 52 return prate; 53 53 } 54 54 55 - static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate, 56 - unsigned long *prate) 55 + static int clk_frac_determine_rate(struct clk_hw *hw, 56 + struct clk_rate_request *req) 57 57 { 58 58 struct clk_frac *frac = to_clk_frac(hw); 59 59 int unused; 60 60 61 - return clk_round_rate_index(hw, drate, *prate, frac_calc_rate, 62 - frac->rtbl_cnt, &unused); 61 + req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate, 62 + frac_calc_rate, frac->rtbl_cnt, &unused); 63 + 64 + return 0; 63 65 } 64 66 65 67 static unsigned long clk_frac_recalc_rate(struct clk_hw *hw, ··· 117 115 118 116 static const struct clk_ops clk_frac_ops = { 119 117 .recalc_rate = clk_frac_recalc_rate, 120 - .round_rate = clk_frac_round_rate, 118 + .determine_rate = clk_frac_determine_rate, 121 119 .set_rate = clk_frac_set_rate, 122 120 }; 123 121
+7 -5
drivers/clk/spear/clk-gpt-synth.c
··· 39 39 return prate; 40 40 } 41 41 42 - static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate, 43 - unsigned long *prate) 42 + static int clk_gpt_determine_rate(struct clk_hw *hw, 43 + struct clk_rate_request *req) 44 44 { 45 45 struct clk_gpt *gpt = to_clk_gpt(hw); 46 46 int unused; 47 47 48 - return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate, 49 - gpt->rtbl_cnt, &unused); 48 + req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate, 49 + gpt_calc_rate, gpt->rtbl_cnt, &unused); 50 + 51 + return 0; 50 52 } 51 53 52 54 static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw, ··· 106 104 107 105 static const struct clk_ops clk_gpt_ops = { 108 106 .recalc_rate = clk_gpt_recalc_rate, 109 - .round_rate = clk_gpt_round_rate, 107 + .determine_rate = clk_gpt_determine_rate, 110 108 .set_rate = clk_gpt_set_rate, 111 109 }; 112 110
+14 -9
drivers/clk/spear/clk-vco-pll.c
··· 110 110 return rate; 111 111 } 112 112 113 - static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate, 114 - unsigned long *prate) 113 + static int clk_pll_determine_rate(struct clk_hw *hw, 114 + struct clk_rate_request *req) 115 115 { 116 116 int unused; 117 117 118 - return clk_pll_round_rate_index(hw, drate, prate, &unused); 118 + req->rate = clk_pll_round_rate_index(hw, req->rate, 119 + &req->best_parent_rate, &unused); 120 + 121 + return 0; 119 122 } 120 123 121 124 static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long ··· 167 164 168 165 static const struct clk_ops clk_pll_ops = { 169 166 .recalc_rate = clk_pll_recalc_rate, 170 - .round_rate = clk_pll_round_rate, 167 + .determine_rate = clk_pll_determine_rate, 171 168 .set_rate = clk_pll_set_rate, 172 169 }; 173 170 ··· 179 176 return pll_calc_rate(vco->rtbl, prate, index, NULL); 180 177 } 181 178 182 - static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate, 183 - unsigned long *prate) 179 + static int clk_vco_determine_rate(struct clk_hw *hw, 180 + struct clk_rate_request *req) 184 181 { 185 182 struct clk_vco *vco = to_clk_vco(hw); 186 183 int unused; 187 184 188 - return clk_round_rate_index(hw, drate, *prate, vco_calc_rate, 189 - vco->rtbl_cnt, &unused); 185 + req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate, 186 + vco_calc_rate, vco->rtbl_cnt, &unused); 187 + 188 + return 0; 190 189 } 191 190 192 191 static unsigned long clk_vco_recalc_rate(struct clk_hw *hw, ··· 270 265 271 266 static const struct clk_ops clk_vco_ops = { 272 267 .recalc_rate = clk_vco_recalc_rate, 273 - .round_rate = clk_vco_round_rate, 268 + .determine_rate = clk_vco_determine_rate, 274 269 .set_rate = clk_vco_set_rate, 275 270 }; 276 271
+8 -5
drivers/clk/sprd/div.c
··· 9 9 10 10 #include "div.h" 11 11 12 - static long sprd_div_round_rate(struct clk_hw *hw, unsigned long rate, 13 - unsigned long *parent_rate) 12 + static int sprd_div_determine_rate(struct clk_hw *hw, 13 + struct clk_rate_request *req) 14 14 { 15 15 struct sprd_div *cd = hw_to_sprd_div(hw); 16 16 17 - return divider_round_rate(&cd->common.hw, rate, parent_rate, NULL, 18 - cd->div.width, 0); 17 + req->rate = divider_round_rate(&cd->common.hw, req->rate, 18 + &req->best_parent_rate, 19 + NULL, cd->div.width, 0); 20 + 21 + return 0; 19 22 } 20 23 21 24 unsigned long sprd_div_helper_recalc_rate(struct sprd_clk_common *common, ··· 78 75 79 76 const struct clk_ops sprd_div_ops = { 80 77 .recalc_rate = sprd_div_recalc_rate, 81 - .round_rate = sprd_div_round_rate, 78 + .determine_rate = sprd_div_determine_rate, 82 79 .set_rate = sprd_div_set_rate, 83 80 }; 84 81 EXPORT_SYMBOL_GPL(sprd_div_ops);
+4 -4
drivers/clk/sprd/pll.c
··· 254 254 return 0; 255 255 } 256 256 257 - static long sprd_pll_round_rate(struct clk_hw *hw, unsigned long rate, 258 - unsigned long *prate) 257 + static int sprd_pll_determine_rate(struct clk_hw *hw, 258 + struct clk_rate_request *req) 259 259 { 260 - return rate; 260 + return 0; 261 261 } 262 262 263 263 const struct clk_ops sprd_pll_ops = { 264 264 .prepare = sprd_pll_clk_prepare, 265 265 .recalc_rate = sprd_pll_recalc_rate, 266 - .round_rate = sprd_pll_round_rate, 266 + .determine_rate = sprd_pll_determine_rate, 267 267 .set_rate = sprd_pll_set_rate, 268 268 }; 269 269 EXPORT_SYMBOL_GPL(sprd_pll_ops);
+17 -16
drivers/clk/st/clkgen-fsyn.c
··· 375 375 return 0; 376 376 } 377 377 378 - static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, 379 - unsigned long rate, 380 - unsigned long *prate) 378 + static int quadfs_pll_fs660c32_determine_rate(struct clk_hw *hw, 379 + struct clk_rate_request *req) 381 380 { 382 381 struct stm_fs params; 383 382 384 - if (clk_fs660c32_vco_get_params(*prate, rate, &params)) 385 - return rate; 383 + if (clk_fs660c32_vco_get_params(req->best_parent_rate, req->rate, &params)) 384 + return 0; 386 385 387 - clk_fs660c32_vco_get_rate(*prate, &params, &rate); 386 + clk_fs660c32_vco_get_rate(req->best_parent_rate, &params, &req->rate); 388 387 389 388 pr_debug("%s: %s new rate %ld [ndiv=%u]\n", 390 389 __func__, clk_hw_get_name(hw), 391 - rate, (unsigned int)params.ndiv); 390 + req->rate, (unsigned int)params.ndiv); 392 391 393 - return rate; 392 + return 0; 394 393 } 395 394 396 395 static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, ··· 435 436 .disable = quadfs_pll_disable, 436 437 .is_enabled = quadfs_pll_is_enabled, 437 438 .recalc_rate = quadfs_pll_fs660c32_recalc_rate, 438 - .round_rate = quadfs_pll_fs660c32_round_rate, 439 + .determine_rate = quadfs_pll_fs660c32_determine_rate, 439 440 .set_rate = quadfs_pll_fs660c32_set_rate, 440 441 }; 441 442 ··· 813 814 return rate; 814 815 } 815 816 816 - static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate, 817 - unsigned long *prate) 817 + static int quadfs_determine_rate(struct clk_hw *hw, 818 + struct clk_rate_request *req) 818 819 { 819 820 struct stm_fs params; 820 821 821 - rate = quadfs_find_best_rate(hw, rate, *prate, &params); 822 + req->rate = quadfs_find_best_rate(hw, req->rate, 823 + req->best_parent_rate, &params); 822 824 823 825 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", 824 826 __func__, clk_hw_get_name(hw), 825 - rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, 826 - (unsigned int)params.pe, (unsigned int)params.nsdiv); 827 + req->rate, (unsigned int)params.sdiv, 828 + (unsigned int)params.mdiv, 829 + (unsigned int)params.pe, (unsigned int)params.nsdiv); 827 830 828 - return rate; 831 + return 0; 829 832 } 830 833 831 834 ··· 874 873 .enable = quadfs_fsynth_enable, 875 874 .disable = quadfs_fsynth_disable, 876 875 .is_enabled = quadfs_fsynth_is_enabled, 877 - .round_rate = quadfs_round_rate, 876 + .determine_rate = quadfs_determine_rate, 878 877 .set_rate = quadfs_set_rate, 879 878 .recalc_rate = quadfs_recalc_rate, 880 879 };
+22 -16
drivers/clk/st/clkgen-pll.c
··· 395 395 return rate; 396 396 } 397 397 398 - static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate, 399 - unsigned long *prate) 398 + static int stm_pll3200c32_determine_rate(struct clk_hw *hw, 399 + struct clk_rate_request *req) 400 400 { 401 401 struct stm_pll params; 402 402 403 - if (!clk_pll3200c32_get_params(*prate, rate, &params)) 404 - clk_pll3200c32_get_rate(*prate, &params, &rate); 403 + if (!clk_pll3200c32_get_params(req->best_parent_rate, req->rate, &params)) 404 + clk_pll3200c32_get_rate(req->best_parent_rate, &params, 405 + &req->rate); 405 406 else { 406 407 pr_debug("%s: %s rate %ld Invalid\n", __func__, 407 - __clk_get_name(hw->clk), rate); 408 + __clk_get_name(hw->clk), req->rate); 409 + req->rate = 0; 410 + 408 411 return 0; 409 412 } 410 413 411 414 pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n", 412 415 __func__, __clk_get_name(hw->clk), 413 - rate, (unsigned int)params.ndiv, 416 + req->rate, (unsigned int)params.ndiv, 414 417 (unsigned int)params.idf); 415 418 416 - return rate; 419 + return 0; 417 420 } 418 421 419 422 static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate, ··· 552 549 return rate; 553 550 } 554 551 555 - static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate, 556 - unsigned long *prate) 552 + static int stm_pll4600c28_determine_rate(struct clk_hw *hw, 553 + struct clk_rate_request *req) 557 554 { 558 555 struct stm_pll params; 559 556 560 - if (!clk_pll4600c28_get_params(*prate, rate, &params)) { 561 - clk_pll4600c28_get_rate(*prate, &params, &rate); 557 + if (!clk_pll4600c28_get_params(req->best_parent_rate, req->rate, &params)) { 558 + clk_pll4600c28_get_rate(req->best_parent_rate, &params, 559 + &req->rate); 562 560 } else { 563 561 pr_debug("%s: %s rate %ld Invalid\n", __func__, 564 - __clk_get_name(hw->clk), rate); 562 + __clk_get_name(hw->clk), req->rate); 563 + req->rate = 0; 564 + 565 565 return 0; 566 566 } 567 567 568 568 pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n", 569 569 __func__, __clk_get_name(hw->clk), 570 - rate, (unsigned int)params.ndiv, 570 + req->rate, (unsigned int)params.ndiv, 571 571 (unsigned int)params.idf); 572 572 573 - return rate; 573 + return 0; 574 574 } 575 575 576 576 static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate, ··· 634 628 .disable = clkgen_pll_disable, 635 629 .is_enabled = clkgen_pll_is_enabled, 636 630 .recalc_rate = recalc_stm_pll3200c32, 637 - .round_rate = round_rate_stm_pll3200c32, 631 + .determine_rate = stm_pll3200c32_determine_rate, 638 632 .set_rate = set_rate_stm_pll3200c32, 639 633 }; 640 634 ··· 643 637 .disable = clkgen_pll_disable, 644 638 .is_enabled = clkgen_pll_is_enabled, 645 639 .recalc_rate = recalc_stm_pll4600c28, 646 - .round_rate = round_rate_stm_pll4600c28, 640 + .determine_rate = stm_pll4600c28_determine_rate, 647 641 .set_rate = set_rate_stm_pll4600c28, 648 642 }; 649 643
+18 -10
drivers/clk/stm32/clk-stm32-core.c
··· 351 351 return ret; 352 352 } 353 353 354 - static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate, 355 - unsigned long *prate) 354 + static int clk_stm32_divider_determine_rate(struct clk_hw *hw, 355 + struct clk_rate_request *req) 356 356 { 357 357 struct clk_stm32_div *div = to_clk_stm32_divider(hw); 358 358 const struct stm32_div_cfg *divider; 359 359 360 360 if (div->div_id == NO_STM32_DIV) 361 - return rate; 361 + return 0; 362 362 363 363 divider = &div->clock_data->dividers[div->div_id]; 364 364 ··· 369 369 val = readl(div->base + divider->offset) >> divider->shift; 370 370 val &= clk_div_mask(divider->width); 371 371 372 - return divider_ro_round_rate(hw, rate, prate, divider->table, 373 - divider->width, divider->flags, 374 - val); 372 + req->rate = divider_ro_round_rate(hw, req->rate, 373 + &req->best_parent_rate, 374 + divider->table, 375 + divider->width, 376 + divider->flags, val); 377 + 378 + return 0; 375 379 } 376 380 377 - return divider_round_rate_parent(hw, clk_hw_get_parent(hw), 378 - rate, prate, divider->table, 379 - divider->width, divider->flags); 381 + req->rate = divider_round_rate_parent(hw, clk_hw_get_parent(hw), 382 + req->rate, 383 + &req->best_parent_rate, 384 + divider->table, 385 + divider->width, divider->flags); 386 + 387 + return 0; 380 388 } 381 389 382 390 static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw, ··· 400 392 401 393 const struct clk_ops clk_stm32_divider_ops = { 402 394 .recalc_rate = clk_stm32_divider_recalc_rate, 403 - .round_rate = clk_stm32_divider_round_rate, 395 + .determine_rate = clk_stm32_divider_determine_rate, 404 396 .set_rate = clk_stm32_divider_set_rate, 405 397 }; 406 398
+8 -5
drivers/clk/stm32/clk-stm32mp1.c
··· 970 970 return mult; 971 971 } 972 972 973 - static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate, 974 - unsigned long *parent_rate) 973 + static int timer_ker_determine_rate(struct clk_hw *hw, 974 + struct clk_rate_request *req) 975 975 { 976 - unsigned long factor = __bestmult(hw, rate, *parent_rate); 976 + unsigned long factor = __bestmult(hw, req->rate, 977 + req->best_parent_rate); 977 978 978 - return *parent_rate * factor; 979 + req->rate = req->best_parent_rate * factor; 980 + 981 + return 0; 979 982 } 980 983 981 984 static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate, ··· 1029 1026 1030 1027 static const struct clk_ops timer_ker_ops = { 1031 1028 .recalc_rate = timer_ker_recalc_rate, 1032 - .round_rate = timer_ker_round_rate, 1029 + .determine_rate = timer_ker_determine_rate, 1033 1030 .set_rate = timer_ker_set_rate, 1034 1031 1035 1032 };
+5 -5
drivers/clk/tegra/clk-audio-sync.c
··· 17 17 return sync->rate; 18 18 } 19 19 20 - static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate, 21 - unsigned long *prate) 20 + static int clk_sync_source_determine_rate(struct clk_hw *hw, 21 + struct clk_rate_request *req) 22 22 { 23 23 struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); 24 24 25 - if (rate > sync->max_rate) 25 + if (req->rate > sync->max_rate) 26 26 return -EINVAL; 27 27 else 28 - return rate; 28 + return 0; 29 29 } 30 30 31 31 static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate, ··· 38 38 } 39 39 40 40 const struct clk_ops tegra_clk_sync_source_ops = { 41 - .round_rate = clk_sync_source_round_rate, 41 + .determine_rate = clk_sync_source_determine_rate, 42 42 .set_rate = clk_sync_source_set_rate, 43 43 .recalc_rate = clk_sync_source_recalc_rate, 44 44 };
+18 -10
drivers/clk/tegra/clk-divider.c
··· 58 58 return rate; 59 59 } 60 60 61 - static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate, 62 - unsigned long *prate) 61 + static int clk_frac_div_determine_rate(struct clk_hw *hw, 62 + struct clk_rate_request *req) 63 63 { 64 64 struct tegra_clk_frac_div *divider = to_clk_frac_div(hw); 65 65 int div, mul; 66 - unsigned long output_rate = *prate; 66 + unsigned long output_rate = req->best_parent_rate; 67 67 68 - if (!rate) 69 - return output_rate; 68 + if (!req->rate) { 69 + req->rate = output_rate; 70 70 71 - div = get_div(divider, rate, output_rate); 72 - if (div < 0) 73 - return *prate; 71 + return 0; 72 + } 73 + 74 + div = get_div(divider, req->rate, output_rate); 75 + if (div < 0) { 76 + req->rate = req->best_parent_rate; 77 + 78 + return 0; 79 + } 74 80 75 81 mul = get_mul(divider); 76 82 77 - return DIV_ROUND_UP(output_rate * mul, div + mul); 83 + req->rate = DIV_ROUND_UP(output_rate * mul, div + mul); 84 + 85 + return 0; 78 86 } 79 87 80 88 static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 135 127 const struct clk_ops tegra_clk_frac_div_ops = { 136 128 .recalc_rate = clk_frac_div_recalc_rate, 137 129 .set_rate = clk_frac_div_set_rate, 138 - .round_rate = clk_frac_div_round_rate, 130 + .determine_rate = clk_frac_div_determine_rate, 139 131 .restore_context = clk_divider_restore_context, 140 132 }; 141 133
+1 -7
drivers/clk/tegra/clk-periph.c
··· 51 51 struct tegra_clk_periph *periph = to_clk_periph(hw); 52 52 const struct clk_ops *div_ops = periph->div_ops; 53 53 struct clk_hw *div_hw = &periph->divider.hw; 54 - long rate; 55 54 56 55 __clk_hw_set_clk(div_hw, hw); 57 56 58 - rate = div_ops->round_rate(div_hw, req->rate, &req->best_parent_rate); 59 - if (rate < 0) 60 - return rate; 61 - 62 - req->rate = (unsigned long)rate; 63 - return 0; 57 + return div_ops->determine_rate(div_hw, req); 64 58 } 65 59 66 60 static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
+31 -21
drivers/clk/tegra/clk-pll.c
··· 840 840 return ret; 841 841 } 842 842 843 - static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 844 - unsigned long *prate) 843 + static int clk_pll_determine_rate(struct clk_hw *hw, 844 + struct clk_rate_request *req) 845 845 { 846 846 struct tegra_clk_pll *pll = to_clk_pll(hw); 847 847 struct tegra_clk_pll_freq_table cfg; ··· 849 849 if (pll->params->flags & TEGRA_PLL_FIXED) { 850 850 /* PLLM/MB are used for memory; we do not change rate */ 851 851 if (pll->params->flags & (TEGRA_PLLM | TEGRA_PLLMB)) 852 - return clk_hw_get_rate(hw); 853 - return pll->params->fixed_rate; 852 + req->rate = clk_hw_get_rate(hw); 853 + else 854 + req->rate = pll->params->fixed_rate; 855 + 856 + return 0; 854 857 } 855 858 856 - if (_get_table_rate(hw, &cfg, rate, *prate) && 857 - pll->params->calc_rate(hw, &cfg, rate, *prate)) 859 + if (_get_table_rate(hw, &cfg, req->rate, req->best_parent_rate) && 860 + pll->params->calc_rate(hw, &cfg, req->rate, req->best_parent_rate)) 858 861 return -EINVAL; 859 862 860 - return cfg.output_rate; 863 + req->rate = cfg.output_rate; 864 + 865 + return 0; 861 866 } 862 867 863 868 static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, ··· 1062 1057 .enable = clk_pll_enable, 1063 1058 .disable = clk_pll_disable, 1064 1059 .recalc_rate = clk_pll_recalc_rate, 1065 - .round_rate = clk_pll_round_rate, 1060 + .determine_rate = clk_pll_determine_rate, 1066 1061 .set_rate = clk_pll_set_rate, 1067 1062 .restore_context = tegra_clk_pll_restore_context, 1068 1063 }; ··· 1200 1195 .enable = clk_pllu_enable, 1201 1196 .disable = clk_pll_disable, 1202 1197 .recalc_rate = clk_pll_recalc_rate, 1203 - .round_rate = clk_pll_round_rate, 1198 + .determine_rate = clk_pll_determine_rate, 1204 1199 .set_rate = clk_pll_set_rate, 1205 1200 }; 1206 1201 ··· 1358 1353 return ret; 1359 1354 } 1360 1355 1361 - static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate, 1362 - unsigned long *prate) 1356 + static int clk_pll_ramp_determine_rate(struct clk_hw *hw, 1357 + struct clk_rate_request *req) 1363 1358 { 1364 1359 struct tegra_clk_pll *pll = to_clk_pll(hw); 1365 1360 struct tegra_clk_pll_freq_table cfg; 1366 1361 int ret, p_div; 1367 - u64 output_rate = *prate; 1362 + u64 output_rate = req->best_parent_rate; 1368 1363 1369 - ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate); 1364 + ret = _pll_ramp_calc_pll(hw, &cfg, req->rate, req->best_parent_rate); 1370 1365 if (ret < 0) 1371 1366 return ret; 1372 1367 ··· 1380 1375 output_rate *= cfg.n; 1381 1376 do_div(output_rate, cfg.m * p_div); 1382 1377 1383 - return output_rate; 1378 + req->rate = output_rate; 1379 + 1380 + return 0; 1384 1381 } 1385 1382 1386 1383 static void _pllcx_strobe(struct tegra_clk_pll *pll) ··· 1605 1598 return rate; 1606 1599 } 1607 1600 1608 - static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate, 1609 - unsigned long *prate) 1601 + static int clk_pllre_determine_rate(struct clk_hw *hw, 1602 + struct clk_rate_request *req) 1610 1603 { 1611 1604 struct tegra_clk_pll *pll = to_clk_pll(hw); 1612 1605 1613 - return _pllre_calc_rate(pll, NULL, rate, *prate); 1606 + req->rate = _pllre_calc_rate(pll, NULL, req->rate, 1607 + req->best_parent_rate); 1608 + 1609 + return 0; 1614 1610 } 1615 1611 1616 1612 static int clk_plle_tegra114_enable(struct clk_hw *hw) ··· 2013 2003 .enable = clk_pll_enable, 2014 2004 .disable = clk_pll_disable, 2015 2005 .recalc_rate = clk_pll_recalc_rate, 2016 - .round_rate = clk_pll_ramp_round_rate, 2006 + .determine_rate = clk_pll_ramp_determine_rate, 2017 2007 .set_rate = clk_pllxc_set_rate, 2018 2008 }; 2019 2009 ··· 2022 2012 .enable = clk_pllc_enable, 2023 2013 .disable = clk_pllc_disable, 2024 2014 .recalc_rate = clk_pll_recalc_rate, 2025 - .round_rate = clk_pll_ramp_round_rate, 2015 + .determine_rate = clk_pll_ramp_determine_rate, 2026 2016 .set_rate = clk_pllc_set_rate, 2027 2017 }; 2028 2018 ··· 2031 2021 .enable = clk_pll_enable, 2032 2022 .disable = clk_pll_disable, 2033 2023 .recalc_rate = clk_pllre_recalc_rate, 2034 - .round_rate = clk_pllre_round_rate, 2024 + .determine_rate = clk_pllre_determine_rate, 2035 2025 .set_rate = clk_pllre_set_rate, 2036 2026 }; 2037 2027 ··· 2331 2321 .enable = clk_pll_enable, 2332 2322 .disable = clk_pll_disable, 2333 2323 .recalc_rate = clk_pll_recalc_rate, 2334 - .round_rate = clk_pll_ramp_round_rate, 2324 + .determine_rate = clk_pll_ramp_determine_rate, 2335 2325 .set_rate = clk_pllxc_set_rate, 2336 2326 .restore_context = tegra_clk_pll_restore_context, 2337 2327 };
+1 -8
drivers/clk/tegra/clk-super.c
··· 147 147 { 148 148 struct tegra_clk_super_mux *super = to_clk_super_mux(hw); 149 149 struct clk_hw *div_hw = &super->frac_div.hw; 150 - unsigned long rate; 151 150 152 151 __clk_hw_set_clk(div_hw, hw); 153 152 154 - rate = super->div_ops->round_rate(div_hw, req->rate, 155 - &req->best_parent_rate); 156 - if (rate < 0) 157 - return rate; 158 - 159 - req->rate = rate; 160 - return 0; 153 + return super->div_ops->determine_rate(div_hw, req); 161 154 } 162 155 163 156 static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
+17 -9
drivers/clk/tegra/clk-tegra210-emc.c
··· 86 86 return DIV_ROUND_UP(parent_rate * 2, div); 87 87 } 88 88 89 - static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate, 90 - unsigned long *prate) 89 + static int tegra210_clk_emc_determine_rate(struct clk_hw *hw, 90 + struct clk_rate_request *req) 91 91 { 92 92 struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw); 93 93 struct tegra210_clk_emc_provider *provider = emc->provider; 94 94 unsigned int i; 95 95 96 - if (!provider || !provider->configs || provider->num_configs == 0) 97 - return clk_hw_get_rate(hw); 96 + if (!provider || !provider->configs || provider->num_configs == 0) { 97 + req->rate = clk_hw_get_rate(hw); 98 98 99 - for (i = 0; i < provider->num_configs; i++) { 100 - if (provider->configs[i].rate >= rate) 101 - return provider->configs[i].rate; 99 + return 0; 102 100 } 103 101 104 - return provider->configs[i - 1].rate; 102 + for (i = 0; i < provider->num_configs; i++) { 103 + if (provider->configs[i].rate >= req->rate) { 104 + req->rate = provider->configs[i].rate; 105 + 106 + return 0; 107 + } 108 + } 109 + 110 + req->rate = provider->configs[i - 1].rate; 111 + 112 + return 0; 105 113 } 106 114 107 115 static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc, ··· 267 259 static const struct clk_ops tegra210_clk_emc_ops = { 268 260 .get_parent = tegra210_clk_emc_get_parent, 269 261 .recalc_rate = tegra210_clk_emc_recalc_rate, 270 - .round_rate = tegra210_clk_emc_round_rate, 262 + .determine_rate = tegra210_clk_emc_determine_rate, 271 263 .set_rate = tegra210_clk_emc_set_rate, 272 264 }; 273 265
+7 -5
drivers/clk/ti/clk-dra7-atl.c
··· 120 120 return parent_rate / cdesc->divider; 121 121 } 122 122 123 - static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate, 124 - unsigned long *parent_rate) 123 + static int atl_clk_determine_rate(struct clk_hw *hw, 124 + struct clk_rate_request *req) 125 125 { 126 126 unsigned divider; 127 127 128 - divider = (*parent_rate + rate / 2) / rate; 128 + divider = (req->best_parent_rate + req->rate / 2) / req->rate; 129 129 if (divider > DRA7_ATL_DIVIDER_MASK + 1) 130 130 divider = DRA7_ATL_DIVIDER_MASK + 1; 131 131 132 - return *parent_rate / divider; 132 + req->rate = req->best_parent_rate / divider; 133 + 134 + return 0; 133 135 } 134 136 135 137 static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, ··· 158 156 .disable = atl_clk_disable, 159 157 .is_enabled = atl_clk_is_enabled, 160 158 .recalc_rate = atl_clk_recalc_rate, 161 - .round_rate = atl_clk_round_rate, 159 + .determine_rate = atl_clk_determine_rate, 162 160 .set_rate = atl_clk_set_rate, 163 161 }; 164 162
+18 -18
drivers/clk/ti/clkt_dpll.c
··· 268 268 /* DPLL rate rounding code */ 269 269 270 270 /** 271 - * omap2_dpll_round_rate - round a target rate for an OMAP DPLL 271 + * omap2_dpll_determine_rate - round a target rate for an OMAP DPLL 272 272 * @hw: struct clk_hw containing the struct clk * for a DPLL 273 - * @target_rate: desired DPLL clock rate 274 - * @parent_rate: parent's DPLL clock rate 273 + * @req: rate request 275 274 * 276 275 * Given a DPLL and a desired target rate, round the target rate to a 277 276 * possible, programmable rate for this DPLL. Attempts to select the 278 277 * minimum possible n. Stores the computed (m, n) in the DPLL's 279 278 * dpll_data structure so set_rate() will not need to call this 280 - * (expensive) function again. Returns ~0 if the target rate cannot 281 - * be rounded, or the rounded rate upon success. 279 + * (expensive) function again. Returns -EINVAL if the target rate 280 + * cannot be rounded, or the rounded rate upon success. 282 281 */ 283 - long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, 284 - unsigned long *parent_rate) 282 + int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 285 283 { 286 284 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 287 285 int m, n, r, scaled_max_m; ··· 293 295 const char *clk_name; 294 296 295 297 if (!clk || !clk->dpll_data) 296 - return ~0; 298 + return -EINVAL; 297 299 298 300 dd = clk->dpll_data; 299 301 300 - if (dd->max_rate && target_rate > dd->max_rate) 301 - target_rate = dd->max_rate; 302 + if (dd->max_rate && req->rate > dd->max_rate) 303 + req->rate = dd->max_rate; 302 304 303 305 ref_rate = clk_hw_get_rate(dd->clk_ref); 304 306 clk_name = clk_hw_get_name(hw); 305 307 pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", 306 - clk_name, target_rate); 308 + clk_name, req->rate); 307 309 308 - scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); 310 + scaled_rt_rp = req->rate / (ref_rate / DPLL_SCALE_FACTOR); 309 311 scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; 310 312 311 313 dd->last_rounded_rate = 0; ··· 330 332 if (m > scaled_max_m) 331 333 break; 332 334 333 - r = _dpll_test_mult(&m, n, &new_rate, target_rate, 335 + r = _dpll_test_mult(&m, n, &new_rate, req->rate, 334 336 ref_rate); 335 337 336 338 /* m can't be set low enough for this n - try with a larger n */ ··· 338 340 continue; 339 341 340 342 /* skip rates above our target rate */ 341 - delta = target_rate - new_rate; 343 + delta = req->rate - new_rate; 342 344 if (delta < 0) 343 345 continue; 344 346 ··· 357 359 358 360 if (prev_min_delta == LONG_MAX) { 359 361 pr_debug("clock: %s: cannot round to rate %lu\n", 360 - clk_name, target_rate); 361 - return ~0; 362 + clk_name, req->rate); 363 + return -EINVAL; 362 364 } 363 365 364 366 dd->last_rounded_m = min_delta_m; 365 367 dd->last_rounded_n = min_delta_n; 366 - dd->last_rounded_rate = target_rate - prev_min_delta; 368 + dd->last_rounded_rate = req->rate - prev_min_delta; 367 369 368 - return dd->last_rounded_rate; 370 + req->rate = dd->last_rounded_rate; 371 + 372 + return 0; 369 373 }
+1 -5
drivers/clk/ti/clock.h
··· 273 273 u8 index); 274 274 int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 275 275 struct clk_rate_request *req); 276 - long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, 277 - unsigned long *parent_rate); 276 + int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req); 278 277 unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 279 278 unsigned long parent_rate); 280 279 ··· 295 296 296 297 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, 297 298 unsigned long parent_rate); 298 - long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, 299 - unsigned long target_rate, 300 - unsigned long *parent_rate); 301 299 int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 302 300 struct clk_rate_request *req); 303 301 int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
+3 -3
drivers/clk/ti/composite.c
··· 26 26 return ti_clk_divider_ops.recalc_rate(hw, parent_rate); 27 27 } 28 28 29 - static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate, 30 - unsigned long *prate) 29 + static int ti_composite_determine_rate(struct clk_hw *hw, 30 + struct clk_rate_request *req) 31 31 { 32 32 return -EINVAL; 33 33 } ··· 40 40 41 41 static const struct clk_ops ti_composite_divider_ops = { 42 42 .recalc_rate = &ti_composite_recalc_rate, 43 - .round_rate = &ti_composite_round_rate, 43 + .determine_rate = &ti_composite_determine_rate, 44 44 .set_rate = &ti_composite_set_rate, 45 45 }; 46 46
+7 -5
drivers/clk/ti/divider.c
··· 223 223 return bestdiv; 224 224 } 225 225 226 - static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 227 - unsigned long *prate) 226 + static int ti_clk_divider_determine_rate(struct clk_hw *hw, 227 + struct clk_rate_request *req) 228 228 { 229 229 int div; 230 - div = ti_clk_divider_bestdiv(hw, rate, prate); 230 + div = ti_clk_divider_bestdiv(hw, req->rate, &req->best_parent_rate); 231 231 232 - return DIV_ROUND_UP(*prate, div); 232 + req->rate = DIV_ROUND_UP(req->best_parent_rate, div); 233 + 234 + return 0; 233 235 } 234 236 235 237 static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, ··· 301 299 302 300 const struct clk_ops ti_clk_divider_ops = { 303 301 .recalc_rate = ti_clk_divider_recalc_rate, 304 - .round_rate = ti_clk_divider_round_rate, 302 + .determine_rate = ti_clk_divider_determine_rate, 305 303 .set_rate = ti_clk_divider_set_rate, 306 304 .save_context = clk_divider_save_context, 307 305 .restore_context = clk_divider_restore_context,
+2 -8
drivers/clk/ti/dpll.c
··· 25 25 .enable = &omap3_noncore_dpll_enable, 26 26 .disable = &omap3_noncore_dpll_disable, 27 27 .recalc_rate = &omap4_dpll_regm4xen_recalc, 28 - .round_rate = &omap4_dpll_regm4xen_round_rate, 29 28 .set_rate = &omap3_noncore_dpll_set_rate, 30 29 .set_parent = &omap3_noncore_dpll_set_parent, 31 30 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, ··· 47 48 .enable = &omap3_noncore_dpll_enable, 48 49 .disable = &omap3_noncore_dpll_disable, 49 50 .recalc_rate = &omap3_dpll_recalc, 50 - .round_rate = &omap2_dpll_round_rate, 51 51 .set_rate = &omap3_noncore_dpll_set_rate, 52 52 .set_parent = &omap3_noncore_dpll_set_parent, 53 53 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, ··· 59 61 static const struct clk_ops dpll_no_gate_ck_ops = { 60 62 .recalc_rate = &omap3_dpll_recalc, 61 63 .get_parent = &omap2_init_dpll_parent, 62 - .round_rate = &omap2_dpll_round_rate, 63 64 .set_rate = &omap3_noncore_dpll_set_rate, 64 65 .set_parent = &omap3_noncore_dpll_set_parent, 65 66 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, ··· 77 80 static const struct clk_ops omap2_dpll_core_ck_ops = { 78 81 .get_parent = &omap2_init_dpll_parent, 79 82 .recalc_rate = &omap2_dpllcore_recalc, 80 - .round_rate = &omap2_dpll_round_rate, 83 + .determine_rate = &omap2_dpll_determine_rate, 81 84 .set_rate = &omap2_reprogram_dpllcore, 82 85 }; 83 86 #else ··· 88 91 static const struct clk_ops omap3_dpll_core_ck_ops = { 89 92 .get_parent = &omap2_init_dpll_parent, 90 93 .recalc_rate = &omap3_dpll_recalc, 91 - .round_rate = &omap2_dpll_round_rate, 94 + .determine_rate = &omap2_dpll_determine_rate, 92 95 }; 93 96 94 97 static const struct clk_ops omap3_dpll_ck_ops = { ··· 100 103 .set_parent = &omap3_noncore_dpll_set_parent, 101 104 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 102 105 .determine_rate = &omap3_noncore_dpll_determine_rate, 103 - .round_rate = &omap2_dpll_round_rate, 104 106 }; 105 107 106 108 static const struct clk_ops omap3_dpll5_ck_ops = { ··· 111 115 .set_parent = &omap3_noncore_dpll_set_parent, 112 116 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 113 117 .determine_rate = &omap3_noncore_dpll_determine_rate, 114 - .round_rate = &omap2_dpll_round_rate, 115 118 }; 116 119 117 120 static const struct clk_ops omap3_dpll_per_ck_ops = { ··· 122 127 .set_parent = &omap3_noncore_dpll_set_parent, 123 128 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent, 124 129 .determine_rate = &omap3_noncore_dpll_determine_rate, 125 - .round_rate = &omap2_dpll_round_rate, 126 130 }; 127 131 #endif 128 132
+5 -2
drivers/clk/ti/dpll3xxx.c
··· 587 587 { 588 588 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 589 589 struct dpll_data *dd; 590 + int ret; 590 591 591 592 if (!req->rate) 592 593 return -EINVAL; ··· 600 599 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 601 600 req->best_parent_hw = dd->clk_bypass; 602 601 } else { 603 - req->rate = omap2_dpll_round_rate(hw, req->rate, 604 - &req->best_parent_rate); 602 + ret = omap2_dpll_determine_rate(hw, req); 603 + if (ret != 0) 604 + return ret; 605 + 605 606 req->best_parent_hw = dd->clk_ref; 606 607 } 607 608
+31 -58
drivers/clk/ti/dpll44xx.c
··· 134 134 } 135 135 136 136 /** 137 - * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit 138 - * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for 139 - * @target_rate: the desired rate of the DPLL 140 - * @parent_rate: clock rate of the DPLL parent 141 - * 142 - * Compute the rate that would be programmed into the DPLL hardware 143 - * for @clk if set_rate() were to be provided with the rate 144 - * @target_rate. Takes the REGM4XEN bit into consideration, which is 145 - * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before 146 - * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or 147 - * ~0 if an error occurred in omap2_dpll_round_rate(). 148 - */ 149 - long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, 150 - unsigned long target_rate, 151 - unsigned long *parent_rate) 152 - { 153 - struct clk_hw_omap *clk = to_clk_hw_omap(hw); 154 - struct dpll_data *dd; 155 - long r; 156 - 157 - if (!clk || !clk->dpll_data) 158 - return -EINVAL; 159 - 160 - dd = clk->dpll_data; 161 - 162 - dd->last_rounded_m4xen = 0; 163 - 164 - /* 165 - * First try to compute the DPLL configuration for 166 - * target rate without using the 4X multiplier. 167 - */ 168 - r = omap2_dpll_round_rate(hw, target_rate, NULL); 169 - if (r != ~0) 170 - goto out; 171 - 172 - /* 173 - * If we did not find a valid DPLL configuration, try again, but 174 - * this time see if using the 4X multiplier can help. Enabling the 175 - * 4X multiplier is equivalent to dividing the target rate by 4. 176 - */ 177 - r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT, 178 - NULL); 179 - if (r == ~0) 180 - return r; 181 - 182 - dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT; 183 - dd->last_rounded_m4xen = 1; 184 - 185 - out: 186 - omap4_dpll_lpmode_recalc(dd); 187 - 188 - return dd->last_rounded_rate; 189 - } 190 - 191 - /** 192 137 * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL 193 138 * @hw: pointer to the clock to determine rate for 194 139 * @req: target rate request 195 140 * 196 141 * Determines which DPLL mode to use for reaching a desired rate. 197 142 * Checks whether the DPLL shall be in bypass or locked mode, and if 198 - * locked, calculates the M,N values for the DPLL via round-rate. 143 + * locked, calculates the M,N values for the DPLL. 199 144 * Returns 0 on success and a negative error value otherwise. 200 145 */ 201 146 int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, ··· 160 215 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 161 216 req->best_parent_hw = dd->clk_bypass; 162 217 } else { 163 - req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate, 164 - &req->best_parent_rate); 218 + struct clk_rate_request tmp_req; 219 + long r; 220 + 221 + clk_hw_init_rate_request(hw, &tmp_req, req->rate); 222 + dd->last_rounded_m4xen = 0; 223 + 224 + /* 225 + * First try to compute the DPLL configuration for 226 + * target rate without using the 4X multiplier. 227 + */ 228 + 229 + r = omap2_dpll_determine_rate(hw, &tmp_req); 230 + if (r < 0) { 231 + /* 232 + * If we did not find a valid DPLL configuration, try again, but 233 + * this time see if using the 4X multiplier can help. Enabling the 234 + * 4X multiplier is equivalent to dividing the target rate by 4. 235 + */ 236 + tmp_req.rate /= OMAP4430_REGM4XEN_MULT; 237 + r = omap2_dpll_determine_rate(hw, &tmp_req); 238 + if (r < 0) 239 + return r; 240 + 241 + dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT; 242 + dd->last_rounded_m4xen = 1; 243 + } 244 + 245 + omap4_dpll_lpmode_recalc(dd); 246 + 247 + req->rate = dd->last_rounded_rate; 165 248 req->best_parent_hw = dd->clk_ref; 166 249 } 167 250
+27 -21
drivers/clk/ti/fapll.c
··· 214 214 return 0; 215 215 } 216 216 217 - static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate, 218 - unsigned long *parent_rate) 217 + static int ti_fapll_determine_rate(struct clk_hw *hw, 218 + struct clk_rate_request *req) 219 219 { 220 220 u32 pre_div_p, mult_n; 221 221 int error; 222 222 223 - if (!rate) 223 + if (!req->rate) 224 224 return -EINVAL; 225 225 226 - error = ti_fapll_set_div_mult(rate, *parent_rate, 226 + error = ti_fapll_set_div_mult(req->rate, req->best_parent_rate, 227 227 &pre_div_p, &mult_n); 228 - if (error) 229 - return error; 228 + if (error) { 229 + req->rate = error; 230 230 231 - rate = *parent_rate / pre_div_p; 232 - rate *= mult_n; 231 + return 0; 232 + } 233 233 234 - return rate; 234 + req->rate = req->best_parent_rate / pre_div_p; 235 + req->rate *= mult_n; 236 + 237 + return 0; 235 238 } 236 239 237 240 static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate, ··· 271 268 .is_enabled = ti_fapll_is_enabled, 272 269 .recalc_rate = ti_fapll_recalc_rate, 273 270 .get_parent = ti_fapll_get_parent, 274 - .round_rate = ti_fapll_round_rate, 271 + .determine_rate = ti_fapll_determine_rate, 275 272 .set_rate = ti_fapll_set_rate, 276 273 }; 277 274 ··· 402 399 return post_div_m; 403 400 } 404 401 405 - static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate, 406 - unsigned long *parent_rate) 402 + static int ti_fapll_synth_determine_rate(struct clk_hw *hw, 403 + struct clk_rate_request *req) 407 404 { 408 405 struct fapll_synth *synth = to_synth(hw); 409 406 struct fapll_data *fd = synth->fd; 410 407 unsigned long r; 411 408 412 - if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate) 409 + if (ti_fapll_clock_is_bypass(fd) || !synth->div || !req->rate) 413 410 return -EINVAL; 414 411 415 412 /* Only post divider m available with no fractional divider? */ ··· 417 414 unsigned long frac_rate; 418 415 u32 synth_post_div_m; 419 416 420 - frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate); 421 - synth_post_div_m = DIV_ROUND_UP(frac_rate, rate); 417 + frac_rate = ti_fapll_synth_get_frac_rate(hw, 418 + req->best_parent_rate); 419 + synth_post_div_m = DIV_ROUND_UP(frac_rate, req->rate); 422 420 r = DIV_ROUND_UP(frac_rate, synth_post_div_m); 423 421 goto out; 424 422 } 425 423 426 - r = *parent_rate * SYNTH_PHASE_K; 427 - if (rate > r) 424 + r = req->best_parent_rate * SYNTH_PHASE_K; 425 + if (req->rate > r) 428 426 goto out; 429 427 430 428 r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M); 431 - if (rate < r) 429 + if (req->rate < r) 432 430 goto out; 433 431 434 - r = rate; 432 + r = req->rate; 435 433 out: 436 - return r; 434 + req->rate = r; 435 + 436 + return 0; 437 437 } 438 438 439 439 static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate, ··· 483 477 .disable = ti_fapll_synth_disable, 484 478 .is_enabled = ti_fapll_synth_is_enabled, 485 479 .recalc_rate = ti_fapll_synth_recalc_rate, 486 - .round_rate = ti_fapll_synth_round_rate, 480 + .determine_rate = ti_fapll_synth_determine_rate, 487 481 .set_rate = ti_fapll_synth_set_rate, 488 482 }; 489 483
+8 -6
drivers/clk/ux500/clk-prcmu.c
··· 53 53 return prcmu_clock_rate(clk->cg_sel); 54 54 } 55 55 56 - static long clk_prcmu_round_rate(struct clk_hw *hw, unsigned long rate, 57 - unsigned long *parent_rate) 56 + static int clk_prcmu_determine_rate(struct clk_hw *hw, 57 + struct clk_rate_request *req) 58 58 { 59 59 struct clk_prcmu *clk = to_clk_prcmu(hw); 60 - return prcmu_round_clock_rate(clk->cg_sel, rate); 60 + req->rate = prcmu_round_clock_rate(clk->cg_sel, req->rate); 61 + 62 + return 0; 61 63 } 62 64 63 65 static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate, ··· 159 157 .prepare = clk_prcmu_prepare, 160 158 .unprepare = clk_prcmu_unprepare, 161 159 .recalc_rate = clk_prcmu_recalc_rate, 162 - .round_rate = clk_prcmu_round_rate, 160 + .determine_rate = clk_prcmu_determine_rate, 163 161 .set_rate = clk_prcmu_set_rate, 164 162 }; 165 163 ··· 171 169 172 170 static const struct clk_ops clk_prcmu_scalable_rate_ops = { 173 171 .recalc_rate = clk_prcmu_recalc_rate, 174 - .round_rate = clk_prcmu_round_rate, 172 + .determine_rate = clk_prcmu_determine_rate, 175 173 .set_rate = clk_prcmu_set_rate, 176 174 }; 177 175 ··· 189 187 .prepare = clk_prcmu_opp_volt_prepare, 190 188 .unprepare = clk_prcmu_opp_volt_unprepare, 191 189 .recalc_rate = clk_prcmu_recalc_rate, 192 - .round_rate = clk_prcmu_round_rate, 190 + .determine_rate = clk_prcmu_determine_rate, 193 191 .set_rate = clk_prcmu_set_rate, 194 192 }; 195 193
+45 -27
drivers/clk/versatile/clk-icst.c
··· 234 234 return icst->rate; 235 235 } 236 236 237 - static long icst_round_rate(struct clk_hw *hw, unsigned long rate, 238 - unsigned long *prate) 237 + static int icst_determine_rate(struct clk_hw *hw, 238 + struct clk_rate_request *req) 239 239 { 240 240 struct clk_icst *icst = to_icst(hw); 241 241 struct icst_vco vco; 242 242 243 243 if (icst->ctype == ICST_INTEGRATOR_AP_CM || 244 244 icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) { 245 - if (rate <= 12000000) 246 - return 12000000; 247 - if (rate >= 160000000) 248 - return 160000000; 249 - /* Slam to closest megahertz */ 250 - return DIV_ROUND_CLOSEST(rate, 1000000) * 1000000; 245 + if (req->rate <= 12000000) 246 + req->rate = 12000000; 247 + else if (req->rate >= 160000000) 248 + req->rate = 160000000; 249 + else { 250 + /* Slam to closest megahertz */ 251 + req->rate = DIV_ROUND_CLOSEST(req->rate, 1000000) * 1000000; 252 + } 253 + 254 + return 0; 251 255 } 252 256 253 257 if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) { 254 - if (rate <= 6000000) 255 - return 6000000; 256 - if (rate >= 66000000) 257 - return 66000000; 258 - /* Slam to closest 0.5 megahertz */ 259 - return DIV_ROUND_CLOSEST(rate, 500000) * 500000; 258 + if (req->rate <= 6000000) 259 + req->rate = 6000000; 260 + else if (req->rate >= 66000000) 261 + req->rate = 66000000; 262 + else { 263 + /* Slam to closest 0.5 megahertz */ 264 + req->rate = DIV_ROUND_CLOSEST(req->rate, 500000) * 500000; 265 + } 266 + 267 + return 0; 260 268 } 261 269 262 270 if (icst->ctype == ICST_INTEGRATOR_AP_SYS) { 263 271 /* Divides between 3 and 50 MHz in steps of 0.25 MHz */ 264 - if (rate <= 3000000) 265 - return 3000000; 266 - if (rate >= 50000000) 267 - return 5000000; 268 - /* Slam to closest 0.25 MHz */ 269 - return DIV_ROUND_CLOSEST(rate, 250000) * 250000; 272 + if (req->rate <= 3000000) 273 + req->rate = 3000000; 274 + else if (req->rate >= 50000000) 275 + req->rate = 5000000; 276 + else { 277 + /* Slam to closest 0.25 MHz */ 278 + req->rate = DIV_ROUND_CLOSEST(req->rate, 250000) * 250000; 279 + } 280 + 281 + return 0; 270 282 } 271 283 272 284 if (icst->ctype == ICST_INTEGRATOR_AP_PCI) { ··· 286 274 * If we're below or less than halfway from 25 to 33 MHz 287 275 * select 25 MHz 288 276 */ 289 - if (rate <= 25000000 || rate < 29000000) 290 - return 25000000; 291 - /* Else just return the default frequency */ 292 - return 33000000; 277 + if (req->rate <= 25000000 || req->rate < 29000000) 278 + req->rate = 25000000; 279 + else { 280 + /* Else just return the default frequency */ 281 + req->rate = 33000000; 282 + } 283 + 284 + return 0; 293 285 } 294 286 295 - vco = icst_hz_to_vco(icst->params, rate); 296 - return icst_hz(icst->params, vco); 287 + vco = icst_hz_to_vco(icst->params, req->rate); 288 + req->rate = icst_hz(icst->params, vco); 289 + 290 + return 0; 297 291 } 298 292 299 293 static int icst_set_rate(struct clk_hw *hw, unsigned long rate, ··· 347 329 348 330 static const struct clk_ops icst_ops = { 349 331 .recalc_rate = icst_recalc_rate, 350 - .round_rate = icst_round_rate, 332 + .determine_rate = icst_determine_rate, 351 333 .set_rate = icst_set_rate, 352 334 }; 353 335
+8 -8
drivers/clk/versatile/clk-vexpress-osc.c
··· 33 33 return rate; 34 34 } 35 35 36 - static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate, 37 - unsigned long *parent_rate) 36 + static int vexpress_osc_determine_rate(struct clk_hw *hw, 37 + struct clk_rate_request *req) 38 38 { 39 39 struct vexpress_osc *osc = to_vexpress_osc(hw); 40 40 41 - if (osc->rate_min && rate < osc->rate_min) 42 - rate = osc->rate_min; 41 + if (osc->rate_min && req->rate < osc->rate_min) 42 + req->rate = osc->rate_min; 43 43 44 - if (osc->rate_max && rate > osc->rate_max) 45 - rate = osc->rate_max; 44 + if (osc->rate_max && req->rate > osc->rate_max) 45 + req->rate = osc->rate_max; 46 46 47 - return rate; 47 + return 0; 48 48 } 49 49 50 50 static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate, ··· 57 57 58 58 static const struct clk_ops vexpress_osc_ops = { 59 59 .recalc_rate = vexpress_osc_recalc_rate, 60 - .round_rate = vexpress_osc_round_rate, 60 + .determine_rate = vexpress_osc_determine_rate, 61 61 .set_rate = vexpress_osc_set_rate, 62 62 }; 63 63
+11 -6
drivers/clk/visconti/pll.c
··· 100 100 return rate_table[0].rate; 101 101 } 102 102 103 - static long visconti_pll_round_rate(struct clk_hw *hw, 104 - unsigned long rate, unsigned long *prate) 103 + static int visconti_pll_determine_rate(struct clk_hw *hw, 104 + struct clk_rate_request *req) 105 105 { 106 106 struct visconti_pll *pll = to_visconti_pll(hw); 107 107 const struct visconti_pll_rate_table *rate_table = pll->rate_table; ··· 109 109 110 110 /* Assuming rate_table is in descending order */ 111 111 for (i = 0; i < pll->rate_count; i++) 112 - if (rate >= rate_table[i].rate) 113 - return rate_table[i].rate; 112 + if (req->rate >= rate_table[i].rate) { 113 + req->rate = rate_table[i].rate; 114 + 115 + return 0; 116 + } 114 117 115 118 /* return minimum supported value */ 116 - return rate_table[i - 1].rate; 119 + req->rate = rate_table[i - 1].rate; 120 + 121 + return 0; 117 122 } 118 123 119 124 static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw, ··· 237 232 .enable = visconti_pll_enable, 238 233 .disable = visconti_pll_disable, 239 234 .is_enabled = visconti_pll_is_enabled, 240 - .round_rate = visconti_pll_round_rate, 235 + .determine_rate = visconti_pll_determine_rate, 241 236 .recalc_rate = visconti_pll_recalc_rate, 242 237 .set_rate = visconti_pll_set_rate, 243 238 };
+20 -15
drivers/clk/x86/clk-cgu.c
··· 132 132 divider->flags, divider->width); 133 133 } 134 134 135 - static long 136 - lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 137 - unsigned long *prate) 135 + static int lgm_clk_divider_determine_rate(struct clk_hw *hw, 136 + struct clk_rate_request *req) 138 137 { 139 138 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 140 139 141 - return divider_round_rate(hw, rate, prate, divider->table, 142 - divider->width, divider->flags); 140 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, divider->table, 141 + divider->width, divider->flags); 142 + 143 + return 0; 143 144 } 144 145 145 146 static int ··· 183 182 184 183 static const struct clk_ops lgm_clk_divider_ops = { 185 184 .recalc_rate = lgm_clk_divider_recalc_rate, 186 - .round_rate = lgm_clk_divider_round_rate, 185 + .determine_rate = lgm_clk_divider_determine_rate, 187 186 .set_rate = lgm_clk_divider_set_rate, 188 187 .enable = lgm_clk_divider_enable, 189 188 .disable = lgm_clk_divider_disable, ··· 488 487 return 0; 489 488 } 490 489 491 - static long 492 - lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, 493 - unsigned long *prate) 490 + static int lgm_clk_ddiv_determine_rate(struct clk_hw *hw, 491 + struct clk_rate_request *req) 494 492 { 495 493 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 496 494 u32 div, ddiv1, ddiv2; 497 495 u64 rate64; 498 496 499 - div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); 497 + div = DIV_ROUND_CLOSEST_ULL((u64)req->best_parent_rate, req->rate); 500 498 501 499 /* if predivide bit is enabled, modify div by factor of 2.5 */ 502 500 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { ··· 503 503 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 504 504 } 505 505 506 - if (div <= 0) 507 - return *prate; 506 + if (div <= 0) { 507 + req->rate = req->best_parent_rate; 508 + 509 + return 0; 510 + } 508 511 509 512 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) 510 513 if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) 511 514 return -EINVAL; 512 515 513 - rate64 = *prate; 516 + rate64 = req->best_parent_rate; 514 517 do_div(rate64, ddiv1); 515 518 do_div(rate64, ddiv2); 516 519 ··· 523 520 rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); 524 521 } 525 522 526 - return rate64; 523 + req->rate = rate64; 524 + 525 + return 0; 527 526 } 528 527 529 528 static const struct clk_ops lgm_clk_ddiv_ops = { ··· 533 528 .enable = lgm_clk_ddiv_enable, 534 529 .disable = lgm_clk_ddiv_disable, 535 530 .set_rate = lgm_clk_ddiv_set_rate, 536 - .round_rate = lgm_clk_ddiv_round_rate, 531 + .determine_rate = lgm_clk_ddiv_determine_rate, 537 532 }; 538 533 539 534 int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+30 -25
drivers/clk/xilinx/clk-xlnx-clock-wizard.c
··· 322 322 return err; 323 323 } 324 324 325 - static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate, 326 - unsigned long *prate) 325 + static int clk_wzrd_determine_rate(struct clk_hw *hw, 326 + struct clk_rate_request *req) 327 327 { 328 328 u8 div; 329 329 ··· 331 331 * since we don't change parent rate we just round rate to closest 332 332 * achievable 333 333 */ 334 - div = DIV_ROUND_CLOSEST(*prate, rate); 334 + div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate); 335 335 336 - return *prate / div; 336 + req->rate = req->best_parent_rate / div; 337 + 338 + return 0; 337 339 } 338 340 339 341 static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate, ··· 644 642 divider->flags, divider->width); 645 643 } 646 644 647 - static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate, 648 - unsigned long *prate) 645 + static int clk_wzrd_determine_rate_all(struct clk_hw *hw, 646 + struct clk_rate_request *req) 649 647 { 650 648 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw); 651 649 u32 m, d, o; 652 650 int err; 653 651 654 - err = clk_wzrd_get_divisors(hw, rate, *prate); 652 + err = clk_wzrd_get_divisors(hw, req->rate, req->best_parent_rate); 655 653 if (err) 656 654 return err; 657 655 ··· 659 657 d = divider->d; 660 658 o = divider->o; 661 659 662 - rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac)); 663 - return rate; 660 + req->rate = div_u64(req->best_parent_rate * (m * 1000 + divider->m_frac), 661 + d * (o * 1000 + divider->o_frac)); 662 + return 0; 664 663 } 665 664 666 - static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate, 667 - unsigned long *prate) 665 + static int clk_wzrd_ver_determine_rate_all(struct clk_hw *hw, 666 + struct clk_rate_request *req) 668 667 { 669 668 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw); 670 669 unsigned long int_freq; 671 670 u32 m, d, o, div, f; 672 671 int err; 673 672 674 - err = clk_wzrd_get_divisors_ver(hw, rate, *prate); 673 + err = clk_wzrd_get_divisors_ver(hw, req->rate, req->best_parent_rate); 675 674 if (err) 676 675 return err; 677 676 ··· 681 678 o = divider->o; 682 679 683 680 div = d * o; 684 - int_freq = divider_recalc_rate(hw, *prate * m, div, divider->table, 681 + int_freq = divider_recalc_rate(hw, req->best_parent_rate * m, div, 682 + divider->table, 685 683 divider->flags, divider->width); 686 684 687 - if (rate > int_freq) { 688 - f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq); 689 - rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS); 685 + if (req->rate > int_freq) { 686 + f = DIV_ROUND_CLOSEST_ULL(req->rate * WZRD_FRAC_POINTS, 687 + int_freq); 688 + req->rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS); 690 689 } 691 - return rate; 690 + return 0; 692 691 } 693 692 694 693 static const struct clk_ops clk_wzrd_ver_divider_ops = { 695 - .round_rate = clk_wzrd_round_rate, 694 + .determine_rate = clk_wzrd_determine_rate, 696 695 .set_rate = clk_wzrd_ver_dynamic_reconfig, 697 696 .recalc_rate = clk_wzrd_recalc_rate_ver, 698 697 }; 699 698 700 699 static const struct clk_ops clk_wzrd_ver_div_all_ops = { 701 - .round_rate = clk_wzrd_ver_round_rate_all, 700 + .determine_rate = clk_wzrd_ver_determine_rate_all, 702 701 .set_rate = clk_wzrd_dynamic_all_ver, 703 702 .recalc_rate = clk_wzrd_recalc_rate_all_ver, 704 703 }; 705 704 706 705 static const struct clk_ops clk_wzrd_clk_divider_ops = { 707 - .round_rate = clk_wzrd_round_rate, 706 + .determine_rate = clk_wzrd_determine_rate, 708 707 .set_rate = clk_wzrd_dynamic_reconfig, 709 708 .recalc_rate = clk_wzrd_recalc_rate, 710 709 }; 711 710 712 711 static const struct clk_ops clk_wzrd_clk_div_all_ops = { 713 - .round_rate = clk_wzrd_round_rate_all, 712 + .determine_rate = clk_wzrd_determine_rate_all, 714 713 .set_rate = clk_wzrd_dynamic_all, 715 714 .recalc_rate = clk_wzrd_recalc_rate_all, 716 715 }; ··· 774 769 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL); 775 770 } 776 771 777 - static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate, 778 - unsigned long *prate) 772 + static int clk_wzrd_determine_rate_f(struct clk_hw *hw, 773 + struct clk_rate_request *req) 779 774 { 780 - return rate; 775 + return 0; 781 776 } 782 777 783 778 static const struct clk_ops clk_wzrd_clk_divider_ops_f = { 784 - .round_rate = clk_wzrd_round_rate_f, 779 + .determine_rate = clk_wzrd_determine_rate_f, 785 780 .set_rate = clk_wzrd_dynamic_reconfig_f, 786 781 .recalc_rate = clk_wzrd_recalc_ratef, 787 782 };
+9 -6
drivers/clk/xilinx/xlnx_vcu.c
··· 311 311 return 0; 312 312 } 313 313 314 - static long xvcu_pll_round_rate(struct clk_hw *hw, 315 - unsigned long rate, unsigned long *parent_rate) 314 + static int xvcu_pll_determine_rate(struct clk_hw *hw, 315 + struct clk_rate_request *req) 316 316 { 317 317 struct vcu_pll *pll = to_vcu_pll(hw); 318 318 unsigned int feedback_div; 319 319 320 - rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max); 320 + req->rate = clamp_t(unsigned long, req->rate, pll->fvco_min, 321 + pll->fvco_max); 321 322 322 - feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate); 323 + feedback_div = DIV_ROUND_CLOSEST_ULL(req->rate, req->best_parent_rate); 323 324 feedback_div = clamp_t(unsigned int, feedback_div, 25, 125); 324 325 325 - return *parent_rate * feedback_div; 326 + req->rate = req->best_parent_rate * feedback_div; 327 + 328 + return 0; 326 329 } 327 330 328 331 static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw, ··· 397 394 static const struct clk_ops vcu_pll_ops = { 398 395 .enable = xvcu_pll_enable, 399 396 .disable = xvcu_pll_disable, 400 - .round_rate = xvcu_pll_round_rate, 397 + .determine_rate = xvcu_pll_determine_rate, 401 398 .recalc_rate = xvcu_pll_recalc_rate, 402 399 .set_rate = xvcu_pll_set_rate, 403 400 };
+7 -5
drivers/clk/zynq/pll.c
··· 48 48 * @prate: Clock frequency of parent clock 49 49 * Return: frequency closest to @rate the hardware can generate. 50 50 */ 51 - static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate, 52 - unsigned long *prate) 51 + static int zynq_pll_determine_rate(struct clk_hw *hw, 52 + struct clk_rate_request *req) 53 53 { 54 54 u32 fbdiv; 55 55 56 - fbdiv = DIV_ROUND_CLOSEST(rate, *prate); 56 + fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate); 57 57 if (fbdiv < PLL_FBDIV_MIN) 58 58 fbdiv = PLL_FBDIV_MIN; 59 59 else if (fbdiv > PLL_FBDIV_MAX) 60 60 fbdiv = PLL_FBDIV_MAX; 61 61 62 - return *prate * fbdiv; 62 + req->rate = req->best_parent_rate * fbdiv; 63 + 64 + return 0; 63 65 } 64 66 65 67 /** ··· 169 167 .enable = zynq_pll_enable, 170 168 .disable = zynq_pll_disable, 171 169 .is_enabled = zynq_pll_is_enabled, 172 - .round_rate = zynq_pll_round_rate, 170 + .determine_rate = zynq_pll_determine_rate, 173 171 .recalc_rate = zynq_pll_recalc_rate 174 172 }; 175 173
+13 -10
drivers/clk/zynqmp/divider.c
··· 118 118 * 119 119 * Return: 0 on success else error+reason 120 120 */ 121 - static long zynqmp_clk_divider_round_rate(struct clk_hw *hw, 122 - unsigned long rate, 123 - unsigned long *prate) 121 + static int zynqmp_clk_divider_determine_rate(struct clk_hw *hw, 122 + struct clk_rate_request *req) 124 123 { 125 124 struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw); 126 125 const char *clk_name = clk_hw_get_name(hw); ··· 144 145 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 145 146 bestdiv = 1 << bestdiv; 146 147 147 - return DIV_ROUND_UP_ULL((u64)*prate, bestdiv); 148 + req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv); 149 + 150 + return 0; 148 151 } 149 152 150 153 width = fls(divider->max_div); 151 154 152 - rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags); 155 + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 156 + NULL, width, divider->flags); 153 157 154 - if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate)) 155 - *prate = rate; 158 + if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && 159 + (req->rate % req->best_parent_rate)) 160 + req->best_parent_rate = req->rate; 156 161 157 - return rate; 162 + return 0; 158 163 } 159 164 160 165 /** ··· 202 199 203 200 static const struct clk_ops zynqmp_clk_divider_ops = { 204 201 .recalc_rate = zynqmp_clk_divider_recalc_rate, 205 - .round_rate = zynqmp_clk_divider_round_rate, 202 + .determine_rate = zynqmp_clk_divider_determine_rate, 206 203 .set_rate = zynqmp_clk_divider_set_rate, 207 204 }; 208 205 209 206 static const struct clk_ops zynqmp_clk_divider_ro_ops = { 210 207 .recalc_rate = zynqmp_clk_divider_recalc_rate, 211 - .round_rate = zynqmp_clk_divider_round_rate, 208 + .determine_rate = zynqmp_clk_divider_determine_rate, 212 209 }; 213 210 214 211 /**
+12 -12
drivers/clk/zynqmp/pll.c
··· 98 98 * 99 99 * Return: Frequency closest to @rate the hardware can generate 100 100 */ 101 - static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate, 102 - unsigned long *prate) 101 + static int zynqmp_pll_determine_rate(struct clk_hw *hw, 102 + struct clk_rate_request *req) 103 103 { 104 104 u32 fbdiv; 105 105 u32 mult, div; 106 106 107 107 /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */ 108 - if (rate > PS_PLL_VCO_MAX) { 109 - div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX); 110 - rate = rate / div; 108 + if (req->rate > PS_PLL_VCO_MAX) { 109 + div = DIV_ROUND_UP(req->rate, PS_PLL_VCO_MAX); 110 + req->rate = req->rate / div; 111 111 } 112 - if (rate < PS_PLL_VCO_MIN) { 113 - mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); 114 - rate = rate * mult; 112 + if (req->rate < PS_PLL_VCO_MIN) { 113 + mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, req->rate); 114 + req->rate = req->rate * mult; 115 115 } 116 116 117 - fbdiv = DIV_ROUND_CLOSEST(rate, *prate); 117 + fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate); 118 118 if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) { 119 119 fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); 120 - rate = *prate * fbdiv; 120 + req->rate = req->best_parent_rate * fbdiv; 121 121 } 122 122 123 - return rate; 123 + return 0; 124 124 } 125 125 126 126 /** ··· 294 294 .enable = zynqmp_pll_enable, 295 295 .disable = zynqmp_pll_disable, 296 296 .is_enabled = zynqmp_pll_is_enabled, 297 - .round_rate = zynqmp_pll_round_rate, 297 + .determine_rate = zynqmp_pll_determine_rate, 298 298 .recalc_rate = zynqmp_pll_recalc_rate, 299 299 .set_rate = zynqmp_pll_set_rate, 300 300 };
+4 -4
include/linux/clk/ti.h
··· 34 34 * @clk_ref: struct clk_hw pointer to the clock's reference clock input 35 35 * @control_reg: register containing the DPLL mode bitfield 36 36 * @enable_mask: mask of the DPLL mode bitfield in @control_reg 37 - * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() 38 - * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate() 37 + * @last_rounded_rate: cache of the last rate result of omap2_dpll_determine_rate() 38 + * @last_rounded_m: cache of the last M result of omap2_dpll_determine_rate() 39 39 * @last_rounded_m4xen: cache of the last M4X result of 40 - * omap4_dpll_regm4xen_round_rate() 40 + * omap4_dpll_regm4xen_determine_rate() 41 41 * @last_rounded_lpmode: cache of the last lpmode result of 42 42 * omap4_dpll_lpmode_recalc() 43 43 * @max_multiplier: maximum valid non-bypass multiplier value (actual) 44 - * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() 44 + * @last_rounded_n: cache of the last N result of omap2_dpll_determine_rate() 45 45 * @min_divider: minimum valid non-bypass divider value (actual) 46 46 * @max_divider: maximum valid non-bypass divider value (actual) 47 47 * @max_rate: maximum clock rate for the DPLL