Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: change clk_ops' ->determine_rate() prototype

Clock rates are stored in an unsigned long field, but ->determine_rate()
(which returns a rounded rate from a requested one) returns a long
value (errors are reported using negative error codes), which can lead
to long overflow if the clock rate exceed 2Ghz.

Change ->determine_rate() prototype to return 0 or an error code, and pass
a pointer to a clk_rate_request structure containing the expected target
rate and the rate constraints imposed by clk users.

The clk_rate_request structure might be extended in the future to contain
other kind of constraints like the rounding policy, the maximum clock
inaccuracy or other things that are not yet supported by the CCF
(power consumption constraints ?).

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
CC: Jonathan Corbet <corbet@lwn.net>
CC: Tony Lindgren <tony@atomide.com>
CC: Ralf Baechle <ralf@linux-mips.org>
CC: "Emilio López" <emilio@elopez.com.ar>
CC: Maxime Ripard <maxime.ripard@free-electrons.com>
Acked-by: Tero Kristo <t-kristo@ti.com>
CC: Peter De Schrijver <pdeschrijver@nvidia.com>
CC: Prashant Gaikwad <pgaikwad@nvidia.com>
CC: Stephen Warren <swarren@wwwdotorg.org>
CC: Thierry Reding <thierry.reding@gmail.com>
CC: Alexandre Courbot <gnurou@gmail.com>
CC: linux-doc@vger.kernel.org
CC: linux-kernel@vger.kernel.org
CC: linux-arm-kernel@lists.infradead.org
CC: linux-omap@vger.kernel.org
CC: linux-mips@linux-mips.org
CC: linux-tegra@vger.kernel.org
[sboyd@codeaurora.org: Fix parent dereference problem in
__clk_determine_rate()]
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Tested-by: Romain Perier <romain.perier@gmail.com>
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
[sboyd@codeaurora.org: Folded in fix from Heiko for fixed-rate
clocks without parents or a rate determining op]
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>

authored by

Boris Brezillon and committed by
Stephen Boyd
0817b62c d770e558

+393 -402
+2 -6
Documentation/clk.txt
··· 71 71 long (*round_rate)(struct clk_hw *hw, 72 72 unsigned long rate, 73 73 unsigned long *parent_rate); 74 - long (*determine_rate)(struct clk_hw *hw, 75 - unsigned long rate, 76 - unsigned long min_rate, 77 - unsigned long max_rate, 78 - unsigned long *best_parent_rate, 79 - struct clk_hw **best_parent_clk); 74 + int (*determine_rate)(struct clk_hw *hw, 75 + struct clk_rate_request *req); 80 76 int (*set_parent)(struct clk_hw *hw, u8 index); 81 77 u8 (*get_parent)(struct clk_hw *hw); 82 78 int (*set_rate)(struct clk_hw *hw,
+12 -17
arch/arm/mach-omap2/dpll3xxx.c
··· 462 462 /** 463 463 * omap3_noncore_dpll_determine_rate - determine rate for a DPLL 464 464 * @hw: pointer to the clock to determine rate for 465 - * @rate: target rate for the DPLL 466 - * @best_parent_rate: pointer for returning best parent rate 467 - * @best_parent_clk: pointer for returning best parent clock 465 + * @req: target rate request 468 466 * 469 467 * Determines which DPLL mode to use for reaching a desired target rate. 470 468 * Checks whether the DPLL shall be in bypass or locked mode, and if 471 469 * locked, calculates the M,N values for the DPLL via round-rate. 472 - * Returns a positive clock rate with success, negative error value 473 - * in failure. 470 + * Returns a 0 on success, negative error value in failure. 474 471 */ 475 - long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, 476 - unsigned long min_rate, 477 - unsigned long max_rate, 478 - unsigned long *best_parent_rate, 479 - struct clk_hw **best_parent_clk) 472 + int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 473 + struct clk_rate_request *req) 480 474 { 481 475 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 482 476 struct dpll_data *dd; 483 477 484 - if (!hw || !rate) 478 + if (!req->rate) 485 479 return -EINVAL; 486 480 487 481 dd = clk->dpll_data; 488 482 if (!dd) 489 483 return -EINVAL; 490 484 491 - if (__clk_get_rate(dd->clk_bypass) == rate && 485 + if (__clk_get_rate(dd->clk_bypass) == req->rate && 492 486 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 493 - *best_parent_clk = __clk_get_hw(dd->clk_bypass); 487 + req->best_parent_hw = __clk_get_hw(dd->clk_bypass); 494 488 } else { 495 - rate = omap2_dpll_round_rate(hw, rate, best_parent_rate); 496 - *best_parent_clk = __clk_get_hw(dd->clk_ref); 489 + req->rate = omap2_dpll_round_rate(hw, req->rate, 490 + &req->best_parent_rate); 491 + req->best_parent_hw = __clk_get_hw(dd->clk_ref); 497 492 } 498 493 499 - *best_parent_rate = rate; 494 + req->best_parent_rate = req->rate; 500 495 501 - return rate; 496 + return 0; 502 497 } 503 498 504 499 /**
+12 -18
arch/arm/mach-omap2/dpll44xx.c
··· 191 191 /** 192 192 * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL 193 193 * @hw: pointer to the clock to determine rate for 194 - * @rate: target rate for the DPLL 195 - * @best_parent_rate: pointer for returning best parent rate 196 - * @best_parent_clk: pointer for returning best parent clock 194 + * @req: target rate request 197 195 * 198 196 * Determines which DPLL mode to use for reaching a desired rate. 199 197 * Checks whether the DPLL shall be in bypass or locked mode, and if 200 198 * locked, calculates the M,N values for the DPLL via round-rate. 201 - * Returns a positive clock rate with success, negative error value 202 - * in failure. 199 + * Returns 0 on success and a negative error value otherwise. 203 200 */ 204 - long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, 205 - unsigned long min_rate, 206 - unsigned long max_rate, 207 - unsigned long *best_parent_rate, 208 - struct clk_hw **best_parent_clk) 201 + int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 202 + struct clk_rate_request *req) 209 203 { 210 204 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 211 205 struct dpll_data *dd; 212 206 213 - if (!hw || !rate) 207 + if (!req->rate) 214 208 return -EINVAL; 215 209 216 210 dd = clk->dpll_data; 217 211 if (!dd) 218 212 return -EINVAL; 219 213 220 - if (__clk_get_rate(dd->clk_bypass) == rate && 214 + if (__clk_get_rate(dd->clk_bypass) == req->rate && 221 215 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 222 - *best_parent_clk = __clk_get_hw(dd->clk_bypass); 216 + req->best_parent_hw = __clk_get_hw(dd->clk_bypass); 223 217 } else { 224 - rate = omap4_dpll_regm4xen_round_rate(hw, rate, 225 - best_parent_rate); 226 - *best_parent_clk = __clk_get_hw(dd->clk_ref); 218 + req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate, 219 + &req->best_parent_rate); 220 + req->best_parent_hw = __clk_get_hw(dd->clk_ref); 227 221 } 228 222 229 - *best_parent_rate = rate; 223 + req->best_parent_rate = req->rate; 230 224 231 - return rate; 225 + return 0; 232 226 }
+25 -36
arch/mips/alchemy/common/clock.c
··· 389 389 return div1; 390 390 } 391 391 392 - static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, 393 - unsigned long *best_parent_rate, 394 - struct clk_hw **best_parent_clk, 395 - int scale, int maxdiv) 392 + static int alchemy_clk_fgcs_detr(struct clk_hw *hw, 393 + struct clk_rate_request *req, 394 + int scale, int maxdiv) 396 395 { 397 396 struct clk *pc, *bpc, *free; 398 397 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; ··· 421 422 } 422 423 423 424 pr = clk_get_rate(pc); 424 - if (pr < rate) 425 + if (pr < req->rate) 425 426 continue; 426 427 427 428 /* what can hardware actually provide */ 428 - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); 429 + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); 429 430 nr = pr / tdv; 430 - diff = rate - nr; 431 - if (nr > rate) 431 + diff = req->rate - nr; 432 + if (nr > req->rate) 432 433 continue; 433 434 434 435 if (diff < lastdiff) { ··· 447 448 */ 448 449 if (lastdiff && free) { 449 450 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { 450 - tpr = rate * j; 451 + tpr = req->rate * j; 451 452 if (tpr < 0) 452 453 break; 453 454 pr = clk_round_rate(free, tpr); 454 455 455 - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); 456 + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, 457 + NULL); 456 458 nr = pr / tdv; 457 - diff = rate - nr; 458 - if (nr > rate) 459 + diff = req->rate - nr; 460 + if (nr > req->rate) 459 461 continue; 460 462 if (diff < lastdiff) { 461 463 lastdiff = diff; ··· 469 469 } 470 470 } 471 471 472 - *best_parent_rate = bpr; 473 - *best_parent_clk = __clk_get_hw(bpc); 474 - return br; 472 + req->best_parent_rate = bpr; 473 + req->best_parent_hw = __clk_get_hw(bpc); 474 + req->rate = br; 475 + return 0; 475 476 } 476 477 477 478 static int alchemy_clk_fgv1_en(struct clk_hw *hw) ··· 563 562 return parent_rate / v; 564 563 } 565 564 566 - static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, 567 - unsigned long min_rate, 568 - unsigned long max_rate, 569 - unsigned long *best_parent_rate, 570 - struct clk_hw **best_parent_clk) 565 + static int alchemy_clk_fgv1_detr(struct clk_hw *hw, 566 + struct clk_rate_request *req) 571 567 { 572 - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 573 - best_parent_clk, 2, 512); 568 + return alchemy_clk_fgcs_detr(hw, req, 2, 512); 574 569 } 575 570 576 571 /* Au1000, Au1100, Au15x0, Au12x0 */ ··· 693 696 return t; 694 697 } 695 698 696 - static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, 697 - unsigned long min_rate, 698 - unsigned long max_rate, 699 - unsigned long *best_parent_rate, 700 - struct clk_hw **best_parent_clk) 699 + static int alchemy_clk_fgv2_detr(struct clk_hw *hw, 700 + struct clk_rate_request *req) 701 701 { 702 702 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 703 703 int scale, maxdiv; ··· 707 713 maxdiv = 512; 708 714 } 709 715 710 - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 711 - best_parent_clk, scale, maxdiv); 716 + return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); 712 717 } 713 718 714 719 /* Au1300 larger input mux, no separate disable bit, flexible divider */ ··· 910 917 return 0; 911 918 } 912 919 913 - static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, 914 - unsigned long min_rate, 915 - unsigned long max_rate, 916 - unsigned long *best_parent_rate, 917 - struct clk_hw **best_parent_clk) 920 + static int alchemy_clk_csrc_detr(struct clk_hw *hw, 921 + struct clk_rate_request *req) 918 922 { 919 923 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 920 924 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ 921 925 922 - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 923 - best_parent_clk, scale, 4); 926 + return alchemy_clk_fgcs_detr(hw, req, scale, 4); 924 927 } 925 928 926 929 static struct clk_ops alchemy_clkops_csrc = {
+13 -12
drivers/clk/at91/clk-programmable.c
··· 54 54 return parent_rate >> pres; 55 55 } 56 56 57 - static long clk_programmable_determine_rate(struct clk_hw *hw, 58 - unsigned long rate, 59 - unsigned long min_rate, 60 - unsigned long max_rate, 61 - unsigned long *best_parent_rate, 62 - struct clk_hw **best_parent_hw) 57 + static int clk_programmable_determine_rate(struct clk_hw *hw, 58 + struct clk_rate_request *req) 63 59 { 64 60 struct clk *parent = NULL; 65 61 long best_rate = -EINVAL; ··· 72 76 parent_rate = __clk_get_rate(parent); 73 77 for (shift = 0; shift < PROG_PRES_MASK; shift++) { 74 78 tmp_rate = parent_rate >> shift; 75 - if (tmp_rate <= rate) 79 + if (tmp_rate <= req->rate) 76 80 break; 77 81 } 78 82 79 - if (tmp_rate > rate) 83 + if (tmp_rate > req->rate) 80 84 continue; 81 85 82 - if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) { 86 + if (best_rate < 0 || 87 + (req->rate - tmp_rate) < (req->rate - best_rate)) { 83 88 best_rate = tmp_rate; 84 - *best_parent_rate = parent_rate; 85 - *best_parent_hw = __clk_get_hw(parent); 89 + req->best_parent_rate = parent_rate; 90 + req->best_parent_hw = __clk_get_hw(parent); 86 91 } 87 92 88 93 if (!best_rate) 89 94 break; 90 95 } 91 96 92 - return best_rate; 97 + if (best_rate < 0) 98 + return best_rate; 99 + 100 + req->rate = best_rate; 101 + return 0; 93 102 } 94 103 95 104 static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
+14 -14
drivers/clk/at91/clk-usb.c
··· 56 56 return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); 57 57 } 58 58 59 - static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, 60 - unsigned long rate, 61 - unsigned long min_rate, 62 - unsigned long max_rate, 63 - unsigned long *best_parent_rate, 64 - struct clk_hw **best_parent_hw) 59 + static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, 60 + struct clk_rate_request *req) 65 61 { 66 62 struct clk *parent = NULL; 67 63 long best_rate = -EINVAL; ··· 76 80 for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) { 77 81 unsigned long tmp_parent_rate; 78 82 79 - tmp_parent_rate = rate * div; 83 + tmp_parent_rate = req->rate * div; 80 84 tmp_parent_rate = __clk_round_rate(parent, 81 85 tmp_parent_rate); 82 86 tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); 83 - if (tmp_rate < rate) 84 - tmp_diff = rate - tmp_rate; 87 + if (tmp_rate < req->rate) 88 + tmp_diff = req->rate - tmp_rate; 85 89 else 86 - tmp_diff = tmp_rate - rate; 90 + tmp_diff = tmp_rate - req->rate; 87 91 88 92 if (best_diff < 0 || best_diff > tmp_diff) { 89 93 best_rate = tmp_rate; 90 94 best_diff = tmp_diff; 91 - *best_parent_rate = tmp_parent_rate; 92 - *best_parent_hw = __clk_get_hw(parent); 95 + req->best_parent_rate = tmp_parent_rate; 96 + req->best_parent_hw = __clk_get_hw(parent); 93 97 } 94 98 95 - if (!best_diff || tmp_rate < rate) 99 + if (!best_diff || tmp_rate < req->rate) 96 100 break; 97 101 } 98 102 ··· 100 104 break; 101 105 } 102 106 103 - return best_rate; 107 + if (best_rate < 0) 108 + return best_rate; 109 + 110 + req->rate = best_rate; 111 + return 0; 104 112 } 105 113 106 114 static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+21 -13
drivers/clk/bcm/clk-kona.c
··· 1017 1017 rate ? rate : 1, *parent_rate, NULL); 1018 1018 } 1019 1019 1020 - static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1021 - unsigned long min_rate, 1022 - unsigned long max_rate, 1023 - unsigned long *best_parent_rate, struct clk_hw **best_parent) 1020 + static int kona_peri_clk_determine_rate(struct clk_hw *hw, 1021 + struct clk_rate_request *req) 1024 1022 { 1025 1023 struct kona_clk *bcm_clk = to_kona_clk(hw); 1026 1024 struct clk *clk = hw->clk; ··· 1027 1029 unsigned long best_delta; 1028 1030 unsigned long best_rate; 1029 1031 u32 parent_count; 1032 + long rate; 1030 1033 u32 which; 1031 1034 1032 1035 /* ··· 1036 1037 */ 1037 1038 WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT); 1038 1039 parent_count = (u32)bcm_clk->init_data.num_parents; 1039 - if (parent_count < 2) 1040 - return kona_peri_clk_round_rate(hw, rate, best_parent_rate); 1040 + if (parent_count < 2) { 1041 + rate = kona_peri_clk_round_rate(hw, req->rate, 1042 + &req->best_parent_rate); 1043 + if (rate < 0) 1044 + return rate; 1045 + 1046 + req->rate = rate; 1047 + return 0; 1048 + } 1041 1049 1042 1050 /* Unless we can do better, stick with current parent */ 1043 1051 current_parent = clk_get_parent(clk); 1044 1052 parent_rate = __clk_get_rate(current_parent); 1045 - best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); 1046 - best_delta = abs(best_rate - rate); 1053 + best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate); 1054 + best_delta = abs(best_rate - req->rate); 1047 1055 1048 1056 /* Check whether any other parent clock can produce a better result */ 1049 1057 for (which = 0; which < parent_count; which++) { ··· 1064 1058 1065 1059 /* We don't support CLK_SET_RATE_PARENT */ 1066 1060 parent_rate = __clk_get_rate(parent); 1067 - other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); 1068 - delta = abs(other_rate - rate); 1061 + other_rate = kona_peri_clk_round_rate(hw, req->rate, 1062 + &parent_rate); 1063 + delta = abs(other_rate - req->rate); 1069 1064 if (delta < best_delta) { 1070 1065 best_delta = delta; 1071 1066 best_rate = other_rate; 1072 - *best_parent = __clk_get_hw(parent); 1073 - *best_parent_rate = parent_rate; 1067 + req->best_parent_hw = __clk_get_hw(parent); 1068 + req->best_parent_rate = parent_rate; 1074 1069 } 1075 1070 } 1076 1071 1077 - return best_rate; 1072 + req->rate = best_rate; 1073 + return 0; 1078 1074 } 1079 1075 1080 1076 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
+24 -24
drivers/clk/clk-composite.c
··· 55 55 return rate_ops->recalc_rate(rate_hw, parent_rate); 56 56 } 57 57 58 - static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, 59 - unsigned long min_rate, 60 - unsigned long max_rate, 61 - unsigned long *best_parent_rate, 62 - struct clk_hw **best_parent_p) 58 + static int clk_composite_determine_rate(struct clk_hw *hw, 59 + struct clk_rate_request *req) 63 60 { 64 61 struct clk_composite *composite = to_clk_composite(hw); 65 62 const struct clk_ops *rate_ops = composite->rate_ops; ··· 68 71 long tmp_rate, best_rate = 0; 69 72 unsigned long rate_diff; 70 73 unsigned long best_rate_diff = ULONG_MAX; 74 + long rate; 71 75 int i; 72 76 73 77 if (rate_hw && rate_ops && rate_ops->determine_rate) { 74 78 __clk_hw_set_clk(rate_hw, hw); 75 - return rate_ops->determine_rate(rate_hw, rate, min_rate, 76 - max_rate, 77 - best_parent_rate, 78 - best_parent_p); 79 + return rate_ops->determine_rate(rate_hw, req); 79 80 } else if (rate_hw && rate_ops && rate_ops->round_rate && 80 81 mux_hw && mux_ops && mux_ops->set_parent) { 81 - *best_parent_p = NULL; 82 + req->best_parent_hw = NULL; 82 83 83 84 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) { 84 85 parent = clk_get_parent(mux_hw->clk); 85 - *best_parent_p = __clk_get_hw(parent); 86 - *best_parent_rate = __clk_get_rate(parent); 86 + req->best_parent_hw = __clk_get_hw(parent); 87 + req->best_parent_rate = __clk_get_rate(parent); 87 88 88 - return rate_ops->round_rate(rate_hw, rate, 89 - best_parent_rate); 89 + rate = rate_ops->round_rate(rate_hw, req->rate, 90 + &req->best_parent_rate); 91 + if (rate < 0) 92 + return rate; 93 + 94 + req->rate = rate; 95 + return 0; 90 96 } 91 97 92 98 for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) { ··· 99 99 100 100 parent_rate = __clk_get_rate(parent); 101 101 102 - tmp_rate = rate_ops->round_rate(rate_hw, rate, 102 + tmp_rate = rate_ops->round_rate(rate_hw, req->rate, 103 103 &parent_rate); 104 104 if (tmp_rate < 0) 105 105 continue; 106 106 107 - rate_diff = abs(rate - tmp_rate); 107 + rate_diff = abs(req->rate - tmp_rate); 108 108 109 - if (!rate_diff || !*best_parent_p 109 + if (!rate_diff || !req->best_parent_hw 110 110 || best_rate_diff > rate_diff) { 111 - *best_parent_p = __clk_get_hw(parent); 112 - *best_parent_rate = parent_rate; 111 + req->best_parent_hw = __clk_get_hw(parent); 112 + req->best_parent_rate = parent_rate; 113 113 best_rate_diff = rate_diff; 114 114 best_rate = tmp_rate; 115 115 } 116 116 117 117 if (!rate_diff) 118 - return rate; 118 + return 0; 119 119 } 120 120 121 - return best_rate; 121 + req->rate = best_rate; 122 + return 0; 122 123 } else if (mux_hw && mux_ops && mux_ops->determine_rate) { 123 124 __clk_hw_set_clk(mux_hw, hw); 124 - return mux_ops->determine_rate(mux_hw, rate, min_rate, 125 - max_rate, best_parent_rate, 126 - best_parent_p); 125 + return mux_ops->determine_rate(mux_hw, req); 127 126 } else { 128 127 pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); 128 + req->rate = 0; 129 129 return 0; 130 130 } 131 131 }
+96 -80
drivers/clk/clk.c
··· 436 436 return now <= rate && now > best; 437 437 } 438 438 439 - static long 440 - clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, 441 - unsigned long min_rate, 442 - unsigned long max_rate, 443 - unsigned long *best_parent_rate, 444 - struct clk_hw **best_parent_p, 439 + static int 440 + clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, 445 441 unsigned long flags) 446 442 { 447 443 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 448 - int i, num_parents; 449 - unsigned long parent_rate, best = 0; 444 + int i, num_parents, ret; 445 + unsigned long best = 0; 446 + struct clk_rate_request parent_req = *req; 450 447 451 448 /* if NO_REPARENT flag set, pass through to current parent */ 452 449 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 453 450 parent = core->parent; 454 - if (core->flags & CLK_SET_RATE_PARENT) 455 - best = __clk_determine_rate(parent ? parent->hw : NULL, 456 - rate, min_rate, max_rate); 457 - else if (parent) 451 + if (core->flags & CLK_SET_RATE_PARENT) { 452 + ret = __clk_determine_rate(parent ? parent->hw : NULL, 453 + &parent_req); 454 + if (ret) 455 + return ret; 456 + 457 + best = parent_req.rate; 458 + } else if (parent) { 458 459 best = clk_core_get_rate_nolock(parent); 459 - else 460 + } else { 460 461 best = clk_core_get_rate_nolock(core); 462 + } 463 + 461 464 goto out; 462 465 } 463 466 ··· 470 467 parent = clk_core_get_parent_by_index(core, i); 471 468 if (!parent) 472 469 continue; 473 - if (core->flags & CLK_SET_RATE_PARENT) 474 - parent_rate = __clk_determine_rate(parent->hw, rate, 475 - min_rate, 476 - max_rate); 477 - else 478 - parent_rate = clk_core_get_rate_nolock(parent); 479 - if (mux_is_better_rate(rate, parent_rate, best, flags)) { 470 + 471 + if (core->flags & CLK_SET_RATE_PARENT) { 472 + parent_req = *req; 473 + ret = __clk_determine_rate(parent->hw, &parent_req); 474 + if (ret) 475 + continue; 476 + } else { 477 + parent_req.rate = clk_core_get_rate_nolock(parent); 478 + } 479 + 480 + if (mux_is_better_rate(req->rate, parent_req.rate, 481 + best, flags)) { 480 482 best_parent = parent; 481 - best = parent_rate; 483 + best = parent_req.rate; 482 484 } 483 485 } 484 486 485 487 out: 486 488 if (best_parent) 487 - *best_parent_p = best_parent->hw; 488 - *best_parent_rate = best; 489 + req->best_parent_hw = best_parent->hw; 490 + req->best_parent_rate = best; 491 + req->rate = best; 489 492 490 - return best; 493 + return 0; 491 494 } 492 495 493 496 struct clk *__clk_lookup(const char *name) ··· 524 515 * directly as a determine_rate callback (e.g. for a mux), or from a more 525 516 * complex clock that may combine a mux with other operations. 526 517 */ 527 - long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 528 - unsigned long min_rate, 529 - unsigned long max_rate, 530 - unsigned long *best_parent_rate, 531 - struct clk_hw **best_parent_p) 518 + int __clk_mux_determine_rate(struct clk_hw *hw, 519 + struct clk_rate_request *req) 532 520 { 533 - return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, 534 - best_parent_rate, 535 - best_parent_p, 0); 521 + return clk_mux_determine_rate_flags(hw, req, 0); 536 522 } 537 523 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 538 524 539 - long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, 540 - unsigned long min_rate, 541 - unsigned long max_rate, 542 - unsigned long *best_parent_rate, 543 - struct clk_hw **best_parent_p) 525 + int __clk_mux_determine_rate_closest(struct clk_hw *hw, 526 + struct clk_rate_request *req) 544 527 { 545 - return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, 546 - best_parent_rate, 547 - best_parent_p, 548 - CLK_MUX_ROUND_CLOSEST); 528 + return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 549 529 } 550 530 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 551 531 ··· 757 759 } 758 760 EXPORT_SYMBOL_GPL(clk_enable); 759 761 760 - static unsigned long clk_core_round_rate_nolock(struct clk_core *core, 761 - unsigned long rate, 762 - unsigned long min_rate, 763 - unsigned long max_rate) 762 + static int clk_core_round_rate_nolock(struct clk_core *core, 763 + struct clk_rate_request *req) 764 764 { 765 - unsigned long parent_rate = 0; 766 765 struct clk_core *parent; 767 - struct clk_hw *parent_hw; 766 + long rate; 768 767 769 768 lockdep_assert_held(&prepare_lock); 770 769 ··· 769 774 return 0; 770 775 771 776 parent = core->parent; 772 - if (parent) 773 - parent_rate = parent->rate; 777 + if (parent) { 778 + req->best_parent_hw = parent->hw; 779 + req->best_parent_rate = parent->rate; 780 + } else { 781 + req->best_parent_hw = NULL; 782 + req->best_parent_rate = 0; 783 + } 774 784 775 785 if (core->ops->determine_rate) { 776 - parent_hw = parent ? parent->hw : NULL; 777 - return core->ops->determine_rate(core->hw, rate, 778 - min_rate, max_rate, 779 - &parent_rate, &parent_hw); 780 - } else if (core->ops->round_rate) 781 - return core->ops->round_rate(core->hw, rate, &parent_rate); 782 - else if (core->flags & CLK_SET_RATE_PARENT) 783 - return clk_core_round_rate_nolock(core->parent, rate, min_rate, 784 - max_rate); 785 - else 786 - return core->rate; 786 + return core->ops->determine_rate(core->hw, req); 787 + } else if (core->ops->round_rate) { 788 + rate = core->ops->round_rate(core->hw, req->rate, 789 + &req->best_parent_rate); 790 + if (rate < 0) 791 + return rate; 792 + 793 + req->rate = rate; 794 + } else if (core->flags & CLK_SET_RATE_PARENT) { 795 + return clk_core_round_rate_nolock(parent, req); 796 + } else { 797 + req->rate = core->rate; 798 + } 799 + 800 + return 0; 787 801 } 788 802 789 803 /** ··· 804 800 * 805 801 * Useful for clk_ops such as .set_rate and .determine_rate. 806 802 */ 807 - unsigned long __clk_determine_rate(struct clk_hw *hw, 808 - unsigned long rate, 809 - unsigned long min_rate, 810 - unsigned long max_rate) 803 + int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 811 804 { 812 - if (!hw) 805 + if (!hw) { 806 + req->rate = 0; 813 807 return 0; 808 + } 814 809 815 - return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate); 810 + return clk_core_round_rate_nolock(hw->core, req); 816 811 } 817 812 EXPORT_SYMBOL_GPL(__clk_determine_rate); 818 813 ··· 824 821 */ 825 822 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 826 823 { 827 - unsigned long min_rate; 828 - unsigned long max_rate; 824 + struct clk_rate_request req; 825 + int ret; 829 826 830 827 if (!clk) 831 828 return 0; 832 829 833 - clk_core_get_boundaries(clk->core, &min_rate, &max_rate); 830 + clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 831 + req.rate = rate; 834 832 835 - return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate); 833 + ret = clk_core_round_rate_nolock(clk->core, &req); 834 + if (ret) 835 + return 0; 836 + 837 + return req.rate; 836 838 } 837 839 EXPORT_SYMBOL_GPL(__clk_round_rate); 838 840 ··· 1257 1249 { 1258 1250 struct clk_core *top = core; 1259 1251 struct clk_core *old_parent, *parent; 1260 - struct clk_hw *parent_hw; 1261 1252 unsigned long best_parent_rate = 0; 1262 1253 unsigned long new_rate; 1263 1254 unsigned long min_rate; ··· 1277 1270 1278 1271 /* find the closest rate and parent clk/rate */ 1279 1272 if (core->ops->determine_rate) { 1280 - parent_hw = parent ? parent->hw : NULL; 1281 - ret = core->ops->determine_rate(core->hw, rate, 1282 - min_rate, 1283 - max_rate, 1284 - &best_parent_rate, 1285 - &parent_hw); 1273 + struct clk_rate_request req; 1274 + 1275 + req.rate = rate; 1276 + req.min_rate = min_rate; 1277 + req.max_rate = max_rate; 1278 + if (parent) { 1279 + req.best_parent_hw = parent->hw; 1280 + req.best_parent_rate = parent->rate; 1281 + } else { 1282 + req.best_parent_hw = NULL; 1283 + req.best_parent_rate = 0; 1284 + } 1285 + 1286 + ret = core->ops->determine_rate(core->hw, &req); 1286 1287 if (ret < 0) 1287 1288 return NULL; 1288 1289 1289 - new_rate = ret; 1290 - parent = parent_hw ? parent_hw->core : NULL; 1290 + best_parent_rate = req.best_parent_rate; 1291 + new_rate = req.rate; 1292 + parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1291 1293 } else if (core->ops->round_rate) { 1292 1294 ret = core->ops->round_rate(core->hw, rate, 1293 - &best_parent_rate); 1295 + &best_parent_rate); 1294 1296 if (ret < 0) 1295 1297 return NULL; 1296 1298
+17 -22
drivers/clk/hisilicon/clk-hi3620.c
··· 294 294 } 295 295 } 296 296 297 - static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 298 - unsigned long min_rate, 299 - unsigned long max_rate, 300 - unsigned long *best_parent_rate, 301 - struct clk_hw **best_parent_p) 297 + static int mmc_clk_determine_rate(struct clk_hw *hw, 298 + struct clk_rate_request *req) 302 299 { 303 300 struct clk_mmc *mclk = to_mmc(hw); 304 - unsigned long best = 0; 305 301 306 - if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) { 307 - rate = 13000000; 308 - best = 26000000; 309 - } else if (rate <= 26000000) { 310 - rate = 25000000; 311 - best = 180000000; 312 - } else if (rate <= 52000000) { 313 - rate = 50000000; 314 - best = 360000000; 315 - } else if (rate <= 100000000) { 316 - rate = 100000000; 317 - best = 720000000; 302 + if ((req->rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) { 303 + req->rate = 13000000; 304 + req->best_parent_rate = 26000000; 305 + } else if (req->rate <= 26000000) { 306 + req->rate = 25000000; 307 + req->best_parent_rate = 180000000; 308 + } else if (req->rate <= 52000000) { 309 + req->rate = 50000000; 310 + req->best_parent_rate = 360000000; 311 + } else if (req->rate <= 100000000) { 312 + req->rate = 100000000; 313 + req->best_parent_rate = 720000000; 318 314 } else { 319 315 /* max is 180M */ 320 - rate = 180000000; 321 - best = 1440000000; 316 + req->rate = 180000000; 317 + req->best_parent_rate = 1440000000; 322 318 } 323 - *best_parent_rate = best; 324 - return rate; 319 + return 0; 325 320 } 326 321 327 322 static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len)
+9 -11
drivers/clk/mmp/clk-mix.c
··· 201 201 return ret; 202 202 } 203 203 204 - static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, 205 - unsigned long min_rate, 206 - unsigned long max_rate, 207 - unsigned long *best_parent_rate, 208 - struct clk_hw **best_parent_clk) 204 + static int mmp_clk_mix_determine_rate(struct clk_hw *hw, 205 + struct clk_rate_request *req) 209 206 { 210 207 struct mmp_clk_mix *mix = to_clk_mix(hw); 211 208 struct mmp_clk_mix_clk_table *item; ··· 218 221 parent = NULL; 219 222 mix_rate_best = 0; 220 223 parent_rate_best = 0; 221 - gap_best = rate; 224 + gap_best = req->rate; 222 225 parent_best = NULL; 223 226 224 227 if (mix->table) { ··· 230 233 item->parent_index); 231 234 parent_rate = __clk_get_rate(parent); 232 235 mix_rate = parent_rate / item->divisor; 233 - gap = abs(mix_rate - rate); 236 + gap = abs(mix_rate - req->rate); 234 237 if (parent_best == NULL || gap < gap_best) { 235 238 parent_best = parent; 236 239 parent_rate_best = parent_rate; ··· 248 251 for (j = 0; j < div_val_max; j++) { 249 252 div = _get_div(mix, j); 250 253 mix_rate = parent_rate / div; 251 - gap = abs(mix_rate - rate); 254 + gap = abs(mix_rate - req->rate); 252 255 if (parent_best == NULL || gap < gap_best) { 253 256 parent_best = parent; 254 257 parent_rate_best = parent_rate; ··· 262 265 } 263 266 264 267 found: 265 - *best_parent_rate = parent_rate_best; 266 - *best_parent_clk = __clk_get_hw(parent_best); 268 + req->best_parent_rate = parent_rate_best; 269 + req->best_parent_hw = __clk_get_hw(parent_best); 270 + req->rate = mix_rate_best; 267 271 268 - return mix_rate_best; 272 + return 0; 269 273 } 270 274 271 275 static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
+12 -8
drivers/clk/qcom/clk-pll.c
··· 135 135 return NULL; 136 136 } 137 137 138 - static long 139 - clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, 140 - unsigned long min_rate, unsigned long max_rate, 141 - unsigned long *p_rate, struct clk_hw **p) 138 + static int 139 + clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 142 140 { 141 + struct clk *parent = __clk_get_parent(hw->clk); 143 142 struct clk_pll *pll = to_clk_pll(hw); 144 143 const struct pll_freq_tbl *f; 145 144 146 - f = find_freq(pll->freq_tbl, rate); 147 - if (!f) 148 - return clk_pll_recalc_rate(hw, *p_rate); 145 + req->best_parent_hw = __clk_get_hw(parent); 146 + req->best_parent_rate = __clk_get_rate(parent); 149 147 150 - return f->freq; 148 + f = find_freq(pll->freq_tbl, req->rate); 149 + if (!f) 150 + req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate); 151 + else 152 + req->rate = f->freq; 153 + 154 + return 0; 151 155 } 152 156 153 157 static int
+20 -24
drivers/clk/qcom/clk-rcg.c
··· 404 404 return calc_rate(parent_rate, m, n, mode, pre_div); 405 405 } 406 406 407 - static long _freq_tbl_determine_rate(struct clk_hw *hw, 408 - const struct freq_tbl *f, unsigned long rate, 409 - unsigned long min_rate, unsigned long max_rate, 410 - unsigned long *p_rate, struct clk_hw **p_hw, 407 + static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 408 + struct clk_rate_request *req, 411 409 const struct parent_map *parent_map) 412 410 { 413 - unsigned long clk_flags; 411 + unsigned long clk_flags, rate = req->rate; 414 412 struct clk *p; 415 413 int index; 416 414 ··· 433 435 } else { 434 436 rate = __clk_get_rate(p); 435 437 } 436 - *p_hw = __clk_get_hw(p); 437 - *p_rate = rate; 438 + req->best_parent_hw = __clk_get_hw(p); 439 + req->best_parent_rate = rate; 440 + req->rate = f->freq; 438 441 439 - return f->freq; 442 + return 0; 440 443 } 441 444 442 - static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 443 - unsigned long min_rate, unsigned long max_rate, 444 - unsigned long *p_rate, struct clk_hw **p) 445 + static int clk_rcg_determine_rate(struct clk_hw *hw, 446 + struct clk_rate_request *req) 445 447 { 446 448 struct clk_rcg *rcg = to_clk_rcg(hw); 447 449 448 - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate, 449 - max_rate, p_rate, p, rcg->s.parent_map); 450 + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, 451 + rcg->s.parent_map); 450 452 } 451 453 452 - static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 453 - unsigned long min_rate, unsigned long max_rate, 454 - unsigned long *p_rate, struct clk_hw **p) 454 + static int clk_dyn_rcg_determine_rate(struct clk_hw *hw, 455 + struct clk_rate_request *req) 455 456 { 456 457 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 457 458 u32 reg; ··· 461 464 bank = reg_to_bank(rcg, reg); 462 465 s = &rcg->s[bank]; 463 466 464 - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate, 465 - max_rate, p_rate, p, s->parent_map); 467 + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map); 466 468 } 467 469 468 - static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, 469 - unsigned long min_rate, unsigned long max_rate, 470 - unsigned long *p_rate, struct clk_hw **p_hw) 470 + static int clk_rcg_bypass_determine_rate(struct clk_hw *hw, 471 + struct clk_rate_request *req) 471 472 { 472 473 struct clk_rcg *rcg = to_clk_rcg(hw); 473 474 const struct freq_tbl *f = rcg->freq_tbl; ··· 473 478 int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src); 474 479 475 480 p = clk_get_parent_by_index(hw->clk, index); 476 - *p_hw = __clk_get_hw(p); 477 - *p_rate = __clk_round_rate(p, rate); 481 + req->best_parent_hw = __clk_get_hw(p); 482 + req->best_parent_rate = __clk_round_rate(p, req->rate); 483 + req->rate = req->best_parent_rate; 478 484 479 - return *p_rate; 485 + return 0; 480 486 } 481 487 482 488 static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f)
+39 -39
drivers/clk/qcom/clk-rcg2.c
··· 176 176 return calc_rate(parent_rate, m, n, mode, hid_div); 177 177 } 178 178 179 - static long _freq_tbl_determine_rate(struct clk_hw *hw, 180 - const struct freq_tbl *f, unsigned long rate, 181 - unsigned long *p_rate, struct clk_hw **p_hw) 179 + static int _freq_tbl_determine_rate(struct clk_hw *hw, 180 + const struct freq_tbl *f, struct clk_rate_request *req) 182 181 { 183 - unsigned long clk_flags; 182 + unsigned long clk_flags, rate = req->rate; 184 183 struct clk *p; 185 184 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 186 185 int index; ··· 209 210 } else { 210 211 rate = __clk_get_rate(p); 211 212 } 212 - *p_hw = __clk_get_hw(p); 213 - *p_rate = rate; 213 + req->best_parent_hw = __clk_get_hw(p); 214 + req->best_parent_rate = rate; 215 + req->rate = f->freq; 214 216 215 - return f->freq; 217 + return 0; 216 218 } 217 219 218 - static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, 219 - unsigned long min_rate, unsigned long max_rate, 220 - unsigned long *p_rate, struct clk_hw **p) 220 + static int clk_rcg2_determine_rate(struct clk_hw *hw, 221 + struct clk_rate_request *req) 221 222 { 222 223 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 223 224 224 - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); 225 + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req); 225 226 } 226 227 227 228 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) ··· 373 374 return clk_edp_pixel_set_rate(hw, rate, parent_rate); 374 375 } 375 376 376 - static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 377 - unsigned long min_rate, 378 - unsigned long max_rate, 379 - unsigned long *p_rate, struct clk_hw **p) 377 + static int clk_edp_pixel_determine_rate(struct clk_hw *hw, 378 + struct clk_rate_request *req) 380 379 { 381 380 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 382 381 const struct freq_tbl *f = rcg->freq_tbl; 383 382 const struct frac_entry *frac; 384 383 int delta = 100000; 385 - s64 src_rate = *p_rate; 386 384 s64 request; 387 385 u32 mask = BIT(rcg->hid_width) - 1; 388 386 u32 hid_div; 389 387 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 388 + struct clk *p = clk_get_parent_by_index(hw->clk, index); 390 389 391 390 /* Force the correct parent */ 392 - *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, index)); 391 + req->best_parent_hw = __clk_get_hw(p); 392 + req->best_parent_rate = __clk_get_rate(p); 393 393 394 - if (src_rate == 810000000) 394 + if (req->best_parent_rate == 810000000) 395 395 frac = frac_table_810m; 396 396 else 397 397 frac = frac_table_675m; 398 398 399 399 for (; frac->num; frac++) { 400 - request = rate; 400 + request = req->rate; 401 401 request *= frac->den; 402 402 request = div_s64(request, frac->num); 403 - if ((src_rate < (request - delta)) || 404 - (src_rate > (request + delta))) 403 + if ((req->best_parent_rate < (request - delta)) || 404 + (req->best_parent_rate > (request + delta))) 405 405 continue; 406 406 407 407 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, ··· 408 410 hid_div >>= CFG_SRC_DIV_SHIFT; 409 411 hid_div &= mask; 410 412 411 - return calc_rate(src_rate, frac->num, frac->den, !!frac->den, 412 - hid_div); 413 + req->rate = calc_rate(req->best_parent_rate, 414 + frac->num, frac->den, 415 + !!frac->den, hid_div); 416 + return 0; 413 417 } 414 418 415 419 return -EINVAL; ··· 428 428 }; 429 429 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 430 430 431 - static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, 432 - unsigned long min_rate, unsigned long max_rate, 433 - unsigned long *p_rate, struct clk_hw **p_hw) 431 + static int clk_byte_determine_rate(struct clk_hw *hw, 432 + struct clk_rate_request *req) 434 433 { 435 434 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 436 435 const struct freq_tbl *f = rcg->freq_tbl; ··· 438 439 u32 mask = BIT(rcg->hid_width) - 1; 439 440 struct clk *p; 440 441 441 - if (rate == 0) 442 + if (req->rate == 0) 442 443 return -EINVAL; 443 444 444 445 p = clk_get_parent_by_index(hw->clk, index); 445 - *p_hw = __clk_get_hw(p); 446 - *p_rate = parent_rate = __clk_round_rate(p, rate); 446 + req->best_parent_hw = __clk_get_hw(p); 447 + req->best_parent_rate = parent_rate = __clk_round_rate(p, req->rate); 447 448 448 - div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 449 + div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; 449 450 div = min_t(u32, div, mask); 450 451 451 - return calc_rate(parent_rate, 0, 0, 0, div); 452 + req->rate = calc_rate(parent_rate, 0, 0, 0, div); 453 + 454 + return 0; 452 455 } 453 456 454 457 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, ··· 495 494 { } 496 495 }; 497 496 498 - static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 499 - unsigned long min_rate, 500 - unsigned long max_rate, 501 - unsigned long *p_rate, struct clk_hw **p) 497 + static int clk_pixel_determine_rate(struct clk_hw *hw, 498 + struct clk_rate_request *req) 502 499 { 503 500 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 504 501 unsigned long request, src_rate; ··· 506 507 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 507 508 struct clk *parent = clk_get_parent_by_index(hw->clk, index); 508 509 509 - *p = __clk_get_hw(parent); 510 + req->best_parent_hw = __clk_get_hw(parent); 510 511 511 512 for (; frac->num; frac++) { 512 - request = (rate * frac->den) / frac->num; 513 + request = (req->rate * frac->den) / frac->num; 513 514 514 515 src_rate = __clk_round_rate(parent, request); 515 516 if ((src_rate < (request - delta)) || 516 517 (src_rate > (request + delta))) 517 518 continue; 518 519 519 - *p_rate = src_rate; 520 - return (src_rate * frac->num) / frac->den; 520 + req->best_parent_rate = src_rate; 521 + req->rate = (src_rate * frac->num) / frac->den; 522 + return 0; 521 523 } 522 524 523 525 return -EINVAL;
+10 -11
drivers/clk/sunxi/clk-factors.c
··· 79 79 return rate; 80 80 } 81 81 82 - static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, 83 - unsigned long min_rate, 84 - unsigned long max_rate, 85 - unsigned long *best_parent_rate, 86 - struct clk_hw **best_parent_p) 82 + static int clk_factors_determine_rate(struct clk_hw *hw, 83 + struct clk_rate_request *req) 87 84 { 88 85 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 89 86 int i, num_parents; ··· 93 96 if (!parent) 94 97 continue; 95 98 if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT) 96 - parent_rate = __clk_round_rate(parent, rate); 99 + parent_rate = __clk_round_rate(parent, req->rate); 97 100 else 98 101 parent_rate = __clk_get_rate(parent); 99 102 100 - child_rate = clk_factors_round_rate(hw, rate, &parent_rate); 103 + child_rate = clk_factors_round_rate(hw, req->rate, 104 + &parent_rate); 101 105 102 - if (child_rate <= rate && child_rate > best_child_rate) { 106 + if (child_rate <= req->rate && child_rate > best_child_rate) { 103 107 best_parent = parent; 104 108 best = parent_rate; 105 109 best_child_rate = child_rate; ··· 108 110 } 109 111 110 112 if (best_parent) 111 - *best_parent_p = __clk_get_hw(best_parent); 112 - *best_parent_rate = best; 113 + req->best_parent_hw = __clk_get_hw(best_parent); 114 + req->best_parent_rate = best; 115 + req->rate = best_child_rate; 113 116 114 - return best_child_rate; 117 + return 0; 115 118 } 116 119 117 120 static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
+10 -11
drivers/clk/sunxi/clk-sun6i-ar100.c
··· 44 44 return (parent_rate >> shift) / (div + 1); 45 45 } 46 46 47 - static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, 48 - unsigned long min_rate, 49 - unsigned long max_rate, 50 - unsigned long *best_parent_rate, 51 - struct clk_hw **best_parent_clk) 47 + static int ar100_determine_rate(struct clk_hw *hw, 48 + struct clk_rate_request *req) 52 49 { 53 50 int nparents = __clk_get_num_parents(hw->clk); 54 51 long best_rate = -EINVAL; 55 52 int i; 56 53 57 - *best_parent_clk = NULL; 54 + req->best_parent_hw = NULL; 58 55 59 56 for (i = 0; i < nparents; i++) { 60 57 unsigned long parent_rate; ··· 62 65 63 66 parent = clk_get_parent_by_index(hw->clk, i); 64 67 parent_rate = __clk_get_rate(parent); 65 - div = DIV_ROUND_UP(parent_rate, rate); 68 + div = DIV_ROUND_UP(parent_rate, req->rate); 66 69 67 70 /* 68 71 * The AR100 clk contains 2 divisors: ··· 98 101 continue; 99 102 100 103 tmp_rate = (parent_rate >> shift) / div; 101 - if (!*best_parent_clk || tmp_rate > best_rate) { 102 - *best_parent_clk = __clk_get_hw(parent); 103 - *best_parent_rate = parent_rate; 104 + if (!req->best_parent_hw || tmp_rate > best_rate) { 105 + req->best_parent_hw = __clk_get_hw(parent); 106 + req->best_parent_rate = parent_rate; 104 107 best_rate = tmp_rate; 105 108 } 106 109 } 107 110 108 - return best_rate; 111 + req->rate = best_rate; 112 + 113 + return 0; 109 114 } 110 115 111 116 static int ar100_set_parent(struct clk_hw *hw, u8 index)
+9 -11
drivers/clk/sunxi/clk-sunxi.c
··· 118 118 return (parent_rate / calcm) >> calcp; 119 119 } 120 120 121 - static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 122 - unsigned long min_rate, 123 - unsigned long max_rate, 124 - unsigned long *best_parent_rate, 125 - struct clk_hw **best_parent_clk) 121 + static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, 122 + struct clk_rate_request *req) 126 123 { 127 124 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 128 125 int i, num_parents; ··· 132 135 if (!parent) 133 136 continue; 134 137 if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT) 135 - parent_rate = __clk_round_rate(parent, rate); 138 + parent_rate = __clk_round_rate(parent, req->rate); 136 139 else 137 140 parent_rate = __clk_get_rate(parent); 138 141 139 - child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i, 142 + child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i, 140 143 parent_rate); 141 144 142 - if (child_rate <= rate && child_rate > best_child_rate) { 145 + if (child_rate <= req->rate && child_rate > best_child_rate) { 143 146 best_parent = parent; 144 147 best = parent_rate; 145 148 best_child_rate = child_rate; ··· 147 150 } 148 151 149 152 if (best_parent) 150 - *best_parent_clk = __clk_get_hw(best_parent); 151 - *best_parent_rate = best; 153 + req->best_parent_hw = __clk_get_hw(best_parent); 154 + req->best_parent_rate = best; 155 + req->rate = best_child_rate; 152 156 153 - return best_child_rate; 157 + return 0; 154 158 } 155 159 156 160 static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+15 -13
drivers/clk/tegra/clk-emc.c
··· 116 116 * safer since things have EMC rate floors. Also don't touch parent_rate 117 117 * since we don't want the CCF to play with our parent clocks. 118 118 */ 119 - static long emc_determine_rate(struct clk_hw *hw, unsigned long rate, 120 - unsigned long min_rate, 121 - unsigned long max_rate, 122 - unsigned long *best_parent_rate, 123 - struct clk_hw **best_parent_hw) 119 + static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 124 120 { 125 121 struct tegra_clk_emc *tegra; 126 122 u8 ram_code = tegra_read_ram_code(); ··· 131 135 132 136 timing = tegra->timings + i; 133 137 134 - if (timing->rate > max_rate) { 138 + if (timing->rate > req->max_rate) { 135 139 i = min(i, 1); 136 - return tegra->timings[i - 1].rate; 140 + req->rate = tegra->timings[i - 1].rate; 141 + return 0; 137 142 } 138 143 139 - if (timing->rate < min_rate) 144 + if (timing->rate < req->min_rate) 140 145 continue; 141 146 142 - if (timing->rate >= rate) 143 - return timing->rate; 147 + if (timing->rate >= req->rate) { 148 + req->rate = timing->rate; 149 + return 0; 150 + } 144 151 } 145 152 146 - if (timing) 147 - return timing->rate; 153 + if (timing) { 154 + req->rate = timing->rate; 155 + return 0; 156 + } 148 157 149 - return __clk_get_rate(hw->clk); 158 + req->rate = __clk_get_rate(hw->clk); 159 + return 0; 150 160 } 151 161 152 162 static u8 emc_get_parent(struct clk_hw *hw)
+29 -20
include/linux/clk-provider.h
··· 38 38 struct dentry; 39 39 40 40 /** 41 + * struct clk_rate_request - Structure encoding the clk constraints that 42 + * a clock user might require. 43 + * 44 + * @rate: Requested clock rate. This field will be adjusted by 45 + * clock drivers according to hardware capabilities. 46 + * @min_rate: Minimum rate imposed by clk users. 47 + * @max_rate: Maximum rate a imposed by clk users. 48 + * @best_parent_rate: The best parent rate a parent can provide to fulfill the 49 + * requested constraints. 50 + * @best_parent_hw: The most appropriate parent clock that fulfills the 51 + * requested constraints. 52 + * 53 + */ 54 + struct clk_rate_request { 55 + unsigned long rate; 56 + unsigned long min_rate; 57 + unsigned long max_rate; 58 + unsigned long best_parent_rate; 59 + struct clk_hw *best_parent_hw; 60 + }; 61 + 62 + /** 41 63 * struct clk_ops - Callback operations for hardware clocks; these are to 42 64 * be provided by the clock implementation, and will be called by drivers 43 65 * through the clk_* api. ··· 198 176 unsigned long parent_rate); 199 177 long (*round_rate)(struct clk_hw *hw, unsigned long rate, 200 178 unsigned long *parent_rate); 201 - long (*determine_rate)(struct clk_hw *hw, 202 - unsigned long rate, 203 - unsigned long min_rate, 204 - unsigned long max_rate, 205 - unsigned long *best_parent_rate, 206 - struct clk_hw **best_parent_hw); 179 + int (*determine_rate)(struct clk_hw *hw, 180 + struct clk_rate_request *req); 207 181 int (*set_parent)(struct clk_hw *hw, u8 index); 208 182 u8 (*get_parent)(struct clk_hw *hw); 209 183 int (*set_rate)(struct clk_hw *hw, unsigned long rate, ··· 596 578 bool __clk_is_prepared(struct clk *clk); 597 579 bool __clk_is_enabled(struct clk *clk); 598 580 struct clk *__clk_lookup(const char *name); 599 - long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 600 - unsigned long min_rate, 601 - unsigned long max_rate, 602 - unsigned long *best_parent_rate, 603 - struct clk_hw **best_parent_p); 604 - unsigned long __clk_determine_rate(struct clk_hw *core, 605 - unsigned long rate, 606 - unsigned long min_rate, 607 - unsigned long max_rate); 608 - long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, 609 - unsigned long min_rate, 610 - unsigned long max_rate, 611 - unsigned long *best_parent_rate, 612 - struct clk_hw **best_parent_p); 581 + int __clk_mux_determine_rate(struct clk_hw *hw, 582 + struct clk_rate_request *req); 583 + int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); 584 + int __clk_mux_determine_rate_closest(struct clk_hw *hw, 585 + struct clk_rate_request *req); 613 586 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); 614 587 615 588 static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+4 -12
include/linux/clk/ti.h
··· 269 269 unsigned long rate, 270 270 unsigned long parent_rate, 271 271 u8 index); 272 - long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 273 - unsigned long rate, 274 - unsigned long min_rate, 275 - unsigned long max_rate, 276 - unsigned long *best_parent_rate, 277 - struct clk_hw **best_parent_clk); 272 + int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 273 + struct clk_rate_request *req); 278 274 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, 279 275 unsigned long parent_rate); 280 276 long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, 281 277 unsigned long target_rate, 282 278 unsigned long *parent_rate); 283 - long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 284 - unsigned long rate, 285 - unsigned long min_rate, 286 - unsigned long max_rate, 287 - unsigned long *best_parent_rate, 288 - struct clk_hw **best_parent_clk); 279 + int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 280 + struct clk_rate_request *req); 289 281 u8 omap2_init_dpll_parent(struct clk_hw *hw); 290 282 unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); 291 283 long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,