Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'clk-renesas', 'clk-kunit', 'clk-regmap' and 'clk-frac-divider' into clk-next

- Make clk kunit tests work with lockdep
- Fix clk gate kunit test for big-endian
- Convert more than a handful of clk drivers to use regmap maple tree
- Consider the CLK_FRAC_DIVIDER_ZERO_BASED in fractional divider clk
implementation

* clk-renesas: (23 commits)
clk: renesas: r9a08g045: Add clock and reset support for SDHI1 and SDHI2
clk: renesas: rzg2l: Use %x format specifier to print CLK_ON_R()
clk: renesas: Add minimal boot support for RZ/G3S SoC
clk: renesas: rzg2l: Add divider clock for RZ/G3S
clk: renesas: rzg2l: Refactor SD mux driver
clk: renesas: rzg2l: Remove CPG_SDHI_DSEL from generic header
clk: renesas: rzg2l: Add struct clk_hw_data
clk: renesas: rzg2l: Add support for RZ/G3S PLL
clk: renesas: rzg2l: Remove critical area
clk: renesas: rzg2l: Fix computation formula
clk: renesas: rzg2l: Trust value returned by hardware
clk: renesas: rzg2l: Lock around writes to mux register
clk: renesas: rzg2l: Wait for status bit of SD mux before continuing
clk: renesas: rcar-gen3: Extend SDnH divider table
dt-bindings: clock: renesas,rzg2l-cpg: Document RZ/G3S SoC
clk: renesas: r8a7795: Constify r8a7795_*_clks
clk: renesas: r9a06g032: Name anonymous structs
clk: renesas: r9a06g032: Fix kerneldoc warning
clk: renesas: rzg2l: Use u32 for flag and mux_flags
clk: renesas: rzg2l: Use FIELD_GET() for PLL register fields
...

* clk-kunit:
clk: Fix clk gate kunit test on big-endian CPUs
clk: Parameterize clk_leaf_mux_set_rate_parent
clk: Drive clk_leaf_mux_set_rate_parent test from clk_ops

* clk-regmap:
clk: versaclock7: Convert to use maple tree register cache
clk: versaclock5: Convert to use maple tree register cache
clk: versaclock3: Convert to use maple tree register cache
clk: versaclock3: Remove redundant _is_writeable()
clk: si570: Convert to use maple tree register cache
clk: si544: Convert to use maple tree register cache
clk: si5351: Convert to use maple tree register cache
clk: si5341: Convert to use maple tree register cache
clk: si514: Convert to use maple tree register cache
clk: cdce925: Convert to use maple tree register cache

* clk-frac-divider:
clk: fractional-divider: tests: Add test suite for edge cases
clk: fractional-divider: Improve approximation when zero based and export

+1313 -183
+1
Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
··· 27 27 - renesas,r9a07g043-cpg # RZ/G2UL{Type-1,Type-2} and RZ/Five 28 28 - renesas,r9a07g044-cpg # RZ/G2{L,LC} 29 29 - renesas,r9a07g054-cpg # RZ/V2L 30 + - renesas,r9a08g045-cpg # RZ/G3S 30 31 - renesas,r9a09g011-cpg # RZ/V2M 31 32 32 33 reg:
+1
drivers/clk/.kunitconfig
··· 2 2 CONFIG_COMMON_CLK=y 3 3 CONFIG_CLK_KUNIT_TEST=y 4 4 CONFIG_CLK_GATE_KUNIT_TEST=y 5 + CONFIG_CLK_FD_KUNIT_TEST=y 5 6 CONFIG_UML_PCI_OVER_VIRTIO=n
+7
drivers/clk/Kconfig
··· 526 526 help 527 527 Kunit test for the basic clk gate type. 528 528 529 + config CLK_FD_KUNIT_TEST 530 + tristate "Basic fractional divider type Kunit test" if !KUNIT_ALL_TESTS 531 + depends on KUNIT 532 + default KUNIT_ALL_TESTS 533 + help 534 + Kunit test for the clk-fractional-divider type. 535 + 529 536 endif
+1
drivers/clk/Makefile
··· 12 12 obj-$(CONFIG_COMMON_CLK) += clk-mux.o 13 13 obj-$(CONFIG_COMMON_CLK) += clk-composite.o 14 14 obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o 15 + obj-$(CONFIG_CLK_FD_KUNIT_TEST) += clk-fractional-divider_test.o 15 16 obj-$(CONFIG_COMMON_CLK) += clk-gpio.o 16 17 ifeq ($(CONFIG_OF), y) 17 18 obj-$(CONFIG_COMMON_CLK) += clk-conf.o
+1 -1
drivers/clk/clk-cdce925.c
··· 647 647 .name = "configuration0", 648 648 .reg_bits = 8, 649 649 .val_bits = 8, 650 - .cache_type = REGCACHE_RBTREE, 650 + .cache_type = REGCACHE_MAPLE, 651 651 }; 652 652 653 653 dev_dbg(&client->dev, "%s\n", __func__);
+20 -7
drivers/clk/clk-fractional-divider.c
··· 123 123 unsigned long *m, unsigned long *n) 124 124 { 125 125 struct clk_fractional_divider *fd = to_clk_fd(hw); 126 + unsigned long max_m, max_n; 126 127 127 128 /* 128 129 * Get rate closer to *parent_rate to guarantee there is no overflow ··· 139 138 rate <<= scale - fd->nwidth; 140 139 } 141 140 142 - rational_best_approximation(rate, *parent_rate, 143 - GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), 144 - m, n); 141 + if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) { 142 + max_m = 1 << fd->mwidth; 143 + max_n = 1 << fd->nwidth; 144 + } else { 145 + max_m = GENMASK(fd->mwidth - 1, 0); 146 + max_n = GENMASK(fd->nwidth - 1, 0); 147 + } 148 + 149 + rational_best_approximation(rate, *parent_rate, max_m, max_n, m, n); 145 150 } 151 + EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation); 146 152 147 153 static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, 148 154 unsigned long *parent_rate) ··· 177 169 { 178 170 struct clk_fractional_divider *fd = to_clk_fd(hw); 179 171 unsigned long flags = 0; 180 - unsigned long m, n; 172 + unsigned long m, n, max_m, max_n; 181 173 u32 mmask, nmask; 182 174 u32 val; 183 175 184 - rational_best_approximation(rate, parent_rate, 185 - GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), 186 - &m, &n); 176 + if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) { 177 + max_m = 1 << fd->mwidth; 178 + max_n = 1 << fd->nwidth; 179 + } else { 180 + max_m = GENMASK(fd->mwidth - 1, 0); 181 + max_n = GENMASK(fd->nwidth - 1, 0); 182 + } 183 + rational_best_approximation(rate, parent_rate, max_m, max_n, &m, &n); 187 184 188 185 if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) { 189 186 m--;
+147
drivers/clk/clk-fractional-divider_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Kunit test for clock fractional divider 4 + */ 5 + #include <linux/clk-provider.h> 6 + #include <kunit/test.h> 7 + 8 + #include "clk-fractional-divider.h" 9 + 10 + /* 11 + * Test the maximum denominator case for fd clock without flags. 12 + * 13 + * Expect the highest possible denominator to be used in order to get as close as possible to the 14 + * requested rate. 15 + */ 16 + static void clk_fd_test_approximation_max_denominator(struct kunit *test) 17 + { 18 + struct clk_fractional_divider *fd; 19 + unsigned long rate, parent_rate, parent_rate_before, m, n, max_n; 20 + 21 + fd = kunit_kzalloc(test, sizeof(*fd), GFP_KERNEL); 22 + KUNIT_ASSERT_NOT_NULL(test, fd); 23 + 24 + fd->mwidth = 3; 25 + fd->nwidth = 3; 26 + max_n = 7; 27 + 28 + rate = 240000000; 29 + parent_rate = (max_n + 1) * rate; /* so that it exceeds the maximum divisor */ 30 + parent_rate_before = parent_rate; 31 + 32 + clk_fractional_divider_general_approximation(&fd->hw, rate, &parent_rate, &m, &n); 33 + KUNIT_ASSERT_EQ(test, parent_rate, parent_rate_before); 34 + 35 + KUNIT_EXPECT_EQ(test, m, 1); 36 + KUNIT_EXPECT_EQ(test, n, max_n); 37 + } 38 + 39 + /* 40 + * Test the maximum numerator case for fd clock without flags. 41 + * 42 + * Expect the highest possible numerator to be used in order to get as close as possible to the 43 + * requested rate. 44 + */ 45 + static void clk_fd_test_approximation_max_numerator(struct kunit *test) 46 + { 47 + struct clk_fractional_divider *fd; 48 + unsigned long rate, parent_rate, parent_rate_before, m, n, max_m; 49 + 50 + fd = kunit_kzalloc(test, sizeof(*fd), GFP_KERNEL); 51 + KUNIT_ASSERT_NOT_NULL(test, fd); 52 + 53 + fd->mwidth = 3; 54 + max_m = 7; 55 + fd->nwidth = 3; 56 + 57 + rate = 240000000; 58 + parent_rate = rate / (max_m + 1); /* so that it exceeds the maximum numerator */ 59 + parent_rate_before = parent_rate; 60 + 61 + clk_fractional_divider_general_approximation(&fd->hw, rate, &parent_rate, &m, &n); 62 + KUNIT_ASSERT_EQ(test, parent_rate, parent_rate_before); 63 + 64 + KUNIT_EXPECT_EQ(test, m, max_m); 65 + KUNIT_EXPECT_EQ(test, n, 1); 66 + } 67 + 68 + /* 69 + * Test the maximum denominator case for zero based fd clock. 70 + * 71 + * Expect the highest possible denominator to be used in order to get as close as possible to the 72 + * requested rate. 73 + */ 74 + static void clk_fd_test_approximation_max_denominator_zero_based(struct kunit *test) 75 + { 76 + struct clk_fractional_divider *fd; 77 + unsigned long rate, parent_rate, parent_rate_before, m, n, max_n; 78 + 79 + fd = kunit_kzalloc(test, sizeof(*fd), GFP_KERNEL); 80 + KUNIT_ASSERT_NOT_NULL(test, fd); 81 + 82 + fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED; 83 + fd->mwidth = 3; 84 + fd->nwidth = 3; 85 + max_n = 8; 86 + 87 + rate = 240000000; 88 + parent_rate = (max_n + 1) * rate; /* so that it exceeds the maximum divisor */ 89 + parent_rate_before = parent_rate; 90 + 91 + clk_fractional_divider_general_approximation(&fd->hw, rate, &parent_rate, &m, &n); 92 + KUNIT_ASSERT_EQ(test, parent_rate, parent_rate_before); 93 + 94 + KUNIT_EXPECT_EQ(test, m, 1); 95 + KUNIT_EXPECT_EQ(test, n, max_n); 96 + } 97 + 98 + /* 99 + * Test the maximum numerator case for zero based fd clock. 100 + * 101 + * Expect the highest possible numerator to be used in order to get as close as possible to the 102 + * requested rate. 103 + */ 104 + static void clk_fd_test_approximation_max_numerator_zero_based(struct kunit *test) 105 + { 106 + struct clk_fractional_divider *fd; 107 + unsigned long rate, parent_rate, parent_rate_before, m, n, max_m; 108 + 109 + fd = kunit_kzalloc(test, sizeof(*fd), GFP_KERNEL); 110 + KUNIT_ASSERT_NOT_NULL(test, fd); 111 + 112 + fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED; 113 + fd->mwidth = 3; 114 + max_m = 8; 115 + fd->nwidth = 3; 116 + 117 + rate = 240000000; 118 + parent_rate = rate / (max_m + 1); /* so that it exceeds the maximum numerator */ 119 + parent_rate_before = parent_rate; 120 + 121 + clk_fractional_divider_general_approximation(&fd->hw, rate, &parent_rate, &m, &n); 122 + KUNIT_ASSERT_EQ(test, parent_rate, parent_rate_before); 123 + 124 + KUNIT_EXPECT_EQ(test, m, max_m); 125 + KUNIT_EXPECT_EQ(test, n, 1); 126 + } 127 + 128 + static struct kunit_case clk_fd_approximation_test_cases[] = { 129 + KUNIT_CASE(clk_fd_test_approximation_max_denominator), 130 + KUNIT_CASE(clk_fd_test_approximation_max_numerator), 131 + KUNIT_CASE(clk_fd_test_approximation_max_denominator_zero_based), 132 + KUNIT_CASE(clk_fd_test_approximation_max_numerator_zero_based), 133 + {} 134 + }; 135 + 136 + /* 137 + * Test suite for clk_fractional_divider_general_approximation(). 138 + */ 139 + static struct kunit_suite clk_fd_approximation_suite = { 140 + .name = "clk-fd-approximation", 141 + .test_cases = clk_fd_approximation_test_cases, 142 + }; 143 + 144 + kunit_test_suites( 145 + &clk_fd_approximation_suite 146 + ); 147 + MODULE_LICENSE("GPL");
+15 -15
drivers/clk/clk-gate_test.c
··· 131 131 void __iomem *fake_mem; 132 132 struct clk_hw *hw; 133 133 struct clk_hw *parent; 134 - u32 fake_reg; /* Keep at end, KASAN can detect out of bounds */ 134 + __le32 fake_reg; /* Keep at end, KASAN can detect out of bounds */ 135 135 }; 136 136 137 137 static struct clk_gate_test_context *clk_gate_test_alloc_ctx(struct kunit *test) ··· 166 166 167 167 KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0); 168 168 169 - KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg); 169 + KUNIT_EXPECT_EQ(test, enable_val, le32_to_cpu(ctx->fake_reg)); 170 170 KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw)); 171 171 KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw)); 172 172 KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent)); ··· 183 183 u32 disable_val = 0; 184 184 185 185 KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0); 186 - KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg); 186 + KUNIT_ASSERT_EQ(test, enable_val, le32_to_cpu(ctx->fake_reg)); 187 187 188 188 clk_disable_unprepare(clk); 189 - KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg); 189 + KUNIT_EXPECT_EQ(test, disable_val, le32_to_cpu(ctx->fake_reg)); 190 190 KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw)); 191 191 KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw)); 192 192 KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent)); ··· 246 246 247 247 KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0); 248 248 249 - KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg); 249 + KUNIT_EXPECT_EQ(test, enable_val, le32_to_cpu(ctx->fake_reg)); 250 250 KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw)); 251 251 KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw)); 252 252 KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent)); ··· 263 263 u32 disable_val = BIT(15); 264 264 265 265 KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0); 266 - KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg); 266 + KUNIT_ASSERT_EQ(test, enable_val, le32_to_cpu(ctx->fake_reg)); 267 267 268 268 clk_disable_unprepare(clk); 269 - KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg); 269 + KUNIT_EXPECT_EQ(test, disable_val, le32_to_cpu(ctx->fake_reg)); 270 270 KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw)); 271 271 KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw)); 272 272 KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent)); ··· 290 290 2000000); 291 291 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); 292 292 293 - ctx->fake_reg = BIT(15); /* Default to off */ 293 + ctx->fake_reg = cpu_to_le32(BIT(15)); /* Default to off */ 294 294 hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0, 295 295 ctx->fake_mem, 15, 296 296 CLK_GATE_SET_TO_DISABLE, NULL); ··· 319 319 320 320 KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0); 321 321 322 - KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg); 322 + KUNIT_EXPECT_EQ(test, enable_val, le32_to_cpu(ctx->fake_reg)); 323 323 KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw)); 324 324 KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw)); 325 325 KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent)); ··· 336 336 u32 disable_val = BIT(9 + 16); 337 337 338 338 KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0); 339 - KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg); 339 + KUNIT_ASSERT_EQ(test, enable_val, le32_to_cpu(ctx->fake_reg)); 340 340 341 341 clk_disable_unprepare(clk); 342 - KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg); 342 + KUNIT_EXPECT_EQ(test, disable_val, le32_to_cpu(ctx->fake_reg)); 343 343 KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw)); 344 344 KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw)); 345 345 KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent)); ··· 387 387 struct clk_gate_test_context *ctx; 388 388 389 389 ctx = clk_gate_test_alloc_ctx(test); 390 - ctx->fake_reg = BIT(7); 390 + ctx->fake_reg = cpu_to_le32(BIT(7)); 391 391 hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 7, 392 392 0, NULL); 393 393 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); ··· 402 402 struct clk_gate_test_context *ctx; 403 403 404 404 ctx = clk_gate_test_alloc_ctx(test); 405 - ctx->fake_reg = BIT(4); 405 + ctx->fake_reg = cpu_to_le32(BIT(4)); 406 406 hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 7, 407 407 0, NULL); 408 408 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); ··· 417 417 struct clk_gate_test_context *ctx; 418 418 419 419 ctx = clk_gate_test_alloc_ctx(test); 420 - ctx->fake_reg = BIT(31); 420 + ctx->fake_reg = cpu_to_le32(BIT(31)); 421 421 hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 2, 422 422 CLK_GATE_SET_TO_DISABLE, NULL); 423 423 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); ··· 432 432 struct clk_gate_test_context *ctx; 433 433 434 434 ctx = clk_gate_test_alloc_ctx(test); 435 - ctx->fake_reg = BIT(29); 435 + ctx->fake_reg = cpu_to_le32(BIT(29)); 436 436 hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 29, 437 437 CLK_GATE_SET_TO_DISABLE, NULL); 438 438 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+1 -1
drivers/clk/clk-si514.c
··· 321 321 static const struct regmap_config si514_regmap_config = { 322 322 .reg_bits = 8, 323 323 .val_bits = 8, 324 - .cache_type = REGCACHE_RBTREE, 324 + .cache_type = REGCACHE_MAPLE, 325 325 .max_register = SI514_REG_CONTROL, 326 326 .writeable_reg = si514_regmap_is_writeable, 327 327 .volatile_reg = si514_regmap_is_volatile,
+1 -1
drivers/clk/clk-si5341.c
··· 1260 1260 static const struct regmap_config si5341_regmap_config = { 1261 1261 .reg_bits = 8, 1262 1262 .val_bits = 8, 1263 - .cache_type = REGCACHE_RBTREE, 1263 + .cache_type = REGCACHE_MAPLE, 1264 1264 .ranges = si5341_regmap_ranges, 1265 1265 .num_ranges = ARRAY_SIZE(si5341_regmap_ranges), 1266 1266 .max_register = SI5341_REGISTER_MAX,
+1 -1
drivers/clk/clk-si5351.c
··· 206 206 static const struct regmap_config si5351_regmap_config = { 207 207 .reg_bits = 8, 208 208 .val_bits = 8, 209 - .cache_type = REGCACHE_RBTREE, 209 + .cache_type = REGCACHE_MAPLE, 210 210 .max_register = 187, 211 211 .writeable_reg = si5351_regmap_is_writeable, 212 212 .volatile_reg = si5351_regmap_is_volatile,
+1 -1
drivers/clk/clk-si544.c
··· 446 446 static const struct regmap_config si544_regmap_config = { 447 447 .reg_bits = 8, 448 448 .val_bits = 8, 449 - .cache_type = REGCACHE_RBTREE, 449 + .cache_type = REGCACHE_MAPLE, 450 450 .max_register = SI544_REG_PAGE_SELECT, 451 451 .volatile_reg = si544_regmap_is_volatile, 452 452 };
+1 -1
drivers/clk/clk-si570.c
··· 392 392 static const struct regmap_config si570_regmap_config = { 393 393 .reg_bits = 8, 394 394 .val_bits = 8, 395 - .cache_type = REGCACHE_RBTREE, 395 + .cache_type = REGCACHE_MAPLE, 396 396 .max_register = 137, 397 397 .writeable_reg = si570_regmap_is_writeable, 398 398 .volatile_reg = si570_regmap_is_volatile,
+1 -7
drivers/clk/clk-versaclock3.c
··· 585 585 .get_parent = vc3_clk_mux_get_parent, 586 586 }; 587 587 588 - static bool vc3_regmap_is_writeable(struct device *dev, unsigned int reg) 589 - { 590 - return true; 591 - } 592 - 593 588 static const struct regmap_config vc3_regmap_config = { 594 589 .reg_bits = 8, 595 590 .val_bits = 8, 596 - .cache_type = REGCACHE_RBTREE, 591 + .cache_type = REGCACHE_MAPLE, 597 592 .max_register = 0x24, 598 - .writeable_reg = vc3_regmap_is_writeable, 599 593 }; 600 594 601 595 static struct vc3_hw_data clk_div[5];
+1 -1
drivers/clk/clk-versaclock5.c
··· 217 217 static const struct regmap_config vc5_regmap_config = { 218 218 .reg_bits = 8, 219 219 .val_bits = 8, 220 - .cache_type = REGCACHE_RBTREE, 220 + .cache_type = REGCACHE_MAPLE, 221 221 .max_register = 0x76, 222 222 .writeable_reg = vc5_regmap_is_writeable, 223 223 };
+1 -1
drivers/clk/clk-versaclock7.c
··· 1275 1275 .ranges = vc7_range_cfg, 1276 1276 .num_ranges = ARRAY_SIZE(vc7_range_cfg), 1277 1277 .volatile_reg = vc7_volatile_reg, 1278 - .cache_type = REGCACHE_RBTREE, 1278 + .cache_type = REGCACHE_MAPLE, 1279 1279 .can_multi_write = true, 1280 1280 .reg_format_endian = REGMAP_ENDIAN_LITTLE, 1281 1281 .val_format_endian = REGMAP_ENDIAN_LITTLE,
+113 -17
drivers/clk/clk_test.c
··· 10 10 11 11 #include <kunit/test.h> 12 12 13 + static const struct clk_ops empty_clk_ops = { }; 14 + 13 15 #define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000) 14 16 #define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000) 15 17 #define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000) ··· 2157 2155 struct clk_leaf_mux_ctx { 2158 2156 struct clk_multiple_parent_ctx mux_ctx; 2159 2157 struct clk_hw hw; 2158 + struct clk_hw parent; 2159 + struct clk_rate_request *req; 2160 + int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req); 2161 + }; 2162 + 2163 + static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 2164 + { 2165 + struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw); 2166 + int ret; 2167 + struct clk_rate_request *parent_req = ctx->req; 2168 + 2169 + clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate); 2170 + ret = ctx->determine_rate_func(req->best_parent_hw, parent_req); 2171 + if (ret) 2172 + return ret; 2173 + 2174 + req->rate = parent_req->rate; 2175 + 2176 + return 0; 2177 + } 2178 + 2179 + static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = { 2180 + .determine_rate = clk_leaf_mux_determine_rate, 2181 + .set_parent = clk_dummy_single_set_parent, 2182 + .get_parent = clk_dummy_single_get_parent, 2160 2183 }; 2161 2184 2162 2185 static int ··· 2220 2193 if (ret) 2221 2194 return ret; 2222 2195 2223 - ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw, 2224 - &clk_dummy_single_parent_ops, 2196 + ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw, 2197 + &empty_clk_ops, CLK_SET_RATE_PARENT); 2198 + ret = clk_hw_register(NULL, &ctx->parent); 2199 + if (ret) 2200 + return ret; 2201 + 2202 + ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent, 2203 + &clk_leaf_mux_set_rate_parent_ops, 2225 2204 CLK_SET_RATE_PARENT); 2226 2205 ret = clk_hw_register(NULL, &ctx->hw); 2227 2206 if (ret) ··· 2241 2208 struct clk_leaf_mux_ctx *ctx = test->priv; 2242 2209 2243 2210 clk_hw_unregister(&ctx->hw); 2211 + clk_hw_unregister(&ctx->parent); 2244 2212 clk_hw_unregister(&ctx->mux_ctx.hw); 2245 2213 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw); 2246 2214 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw); 2247 2215 } 2248 2216 2217 + struct clk_leaf_mux_set_rate_parent_determine_rate_test_case { 2218 + const char *desc; 2219 + int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req); 2220 + }; 2221 + 2222 + static void 2223 + clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc( 2224 + const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc) 2225 + { 2226 + strcpy(desc, t->desc); 2227 + } 2228 + 2229 + static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case 2230 + clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = { 2231 + { 2232 + /* 2233 + * Test that __clk_determine_rate() on the parent that can't 2234 + * change rate doesn't return a clk_rate_request structure with 2235 + * the best_parent_hw pointer pointing to the parent. 2236 + */ 2237 + .desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent", 2238 + .determine_rate_func = __clk_determine_rate, 2239 + }, 2240 + { 2241 + /* 2242 + * Test that __clk_mux_determine_rate() on the parent that 2243 + * can't change rate doesn't return a clk_rate_request 2244 + * structure with the best_parent_hw pointer pointing to 2245 + * the parent. 2246 + */ 2247 + .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent", 2248 + .determine_rate_func = __clk_mux_determine_rate, 2249 + }, 2250 + { 2251 + /* 2252 + * Test that __clk_mux_determine_rate_closest() on the parent 2253 + * that can't change rate doesn't return a clk_rate_request 2254 + * structure with the best_parent_hw pointer pointing to 2255 + * the parent. 2256 + */ 2257 + .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent", 2258 + .determine_rate_func = __clk_mux_determine_rate_closest, 2259 + }, 2260 + { 2261 + /* 2262 + * Test that clk_hw_determine_rate_no_reparent() on the parent 2263 + * that can't change rate doesn't return a clk_rate_request 2264 + * structure with the best_parent_hw pointer pointing to 2265 + * the parent. 2266 + */ 2267 + .desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent", 2268 + .determine_rate_func = clk_hw_determine_rate_no_reparent, 2269 + }, 2270 + }; 2271 + 2272 + KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test, 2273 + clk_leaf_mux_set_rate_parent_determine_rate_test_cases, 2274 + clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc) 2275 + 2249 2276 /* 2250 - * Test that, for a clock that will forward any rate request to its 2251 - * parent, the rate request structure returned by __clk_determine_rate 2252 - * is sane and will be what we expect. 2277 + * Test that when a clk that can't change rate itself calls a function like 2278 + * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request 2279 + * structure that has the best_parent_hw pointer point to the clk_hw passed 2280 + * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop 2281 + * forwarding clk_rate_requests to the parent") for more background. 2253 2282 */ 2254 - static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test) 2283 + static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test) 2255 2284 { 2256 2285 struct clk_leaf_mux_ctx *ctx = test->priv; 2257 2286 struct clk_hw *hw = &ctx->hw; 2258 2287 struct clk *clk = clk_hw_get_clk(hw, NULL); 2259 2288 struct clk_rate_request req; 2260 2289 unsigned long rate; 2261 - int ret; 2290 + const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param; 2262 2291 2292 + test_param = test->param_value; 2293 + ctx->determine_rate_func = test_param->determine_rate_func; 2294 + 2295 + ctx->req = &req; 2263 2296 rate = clk_get_rate(clk); 2264 2297 KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1); 2265 - 2266 - clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2); 2267 - 2268 - ret = __clk_determine_rate(hw, &req); 2269 - KUNIT_ASSERT_EQ(test, ret, 0); 2298 + KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2)); 2270 2299 2271 2300 KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2); 2272 2301 KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2); ··· 2338 2243 } 2339 2244 2340 2245 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = { 2341 - KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate), 2246 + KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test, 2247 + clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params), 2342 2248 {} 2343 2249 }; 2344 2250 2345 2251 /* 2346 - * Test suite for a clock whose parent is a mux with multiple parents. 2347 - * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate 2348 - * requests to the mux, which will then select which parent is the best 2349 - * fit for a given rate. 2252 + * Test suite for a clock whose parent is a pass-through clk whose parent is a 2253 + * mux with multiple parents. The leaf and pass-through clocks have the 2254 + * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which 2255 + * will then select which parent is the best fit for a given rate. 2350 2256 * 2351 2257 * These tests exercise the behaviour of muxes, and the proper selection 2352 2258 * of parents.
+6 -1
drivers/clk/renesas/Kconfig
··· 37 37 select CLK_R9A07G043 if ARCH_R9A07G043 38 38 select CLK_R9A07G044 if ARCH_R9A07G044 39 39 select CLK_R9A07G054 if ARCH_R9A07G054 40 + select CLK_R9A08G045 if ARCH_R9A08G045 40 41 select CLK_R9A09G011 if ARCH_R9A09G011 41 42 select CLK_SH73A0 if ARCH_SH73A0 42 43 ··· 180 179 bool "RZ/V2L clock support" if COMPILE_TEST 181 180 select CLK_RZG2L 182 181 182 + config CLK_R9A08G045 183 + bool "RZ/G3S clock support" if COMPILE_TEST 184 + select CLK_RZG2L 185 + 183 186 config CLK_R9A09G011 184 187 bool "RZ/V2M clock support" if COMPILE_TEST 185 188 select CLK_RZG2L ··· 220 215 This is a driver for R-Car USB2 clock selector 221 216 222 217 config CLK_RZG2L 223 - bool "Renesas RZ/{G2L,G2UL,V2L} family clock support" if COMPILE_TEST 218 + bool "Renesas RZ/{G2L,G2UL,G3S,V2L} family clock support" if COMPILE_TEST 224 219 select RESET_CONTROLLER 225 220 226 221 # Generic
+1
drivers/clk/renesas/Makefile
··· 34 34 obj-$(CONFIG_CLK_R9A07G043) += r9a07g043-cpg.o 35 35 obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o 36 36 obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o 37 + obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o 37 38 obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o 38 39 obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o 39 40
+2 -2
drivers/clk/renesas/r8a7795-cpg-mssr.c
··· 51 51 MOD_CLK_BASE 52 52 }; 53 53 54 - static struct cpg_core_clk r8a7795_core_clks[] __initdata = { 54 + static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { 55 55 /* External Clock Inputs */ 56 56 DEF_INPUT("extal", CLK_EXTAL), 57 57 DEF_INPUT("extalr", CLK_EXTALR), ··· 128 128 DEF_BASE("r", R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), 129 129 }; 130 130 131 - static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { 131 + static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { 132 132 DEF_MOD("3dge", 112, R8A7795_CLK_ZG), 133 133 DEF_MOD("fdp1-1", 118, R8A7795_CLK_S0D1), 134 134 DEF_MOD("fdp1-0", 119, R8A7795_CLK_S0D1),
+36 -33
drivers/clk/renesas/r9a06g032-clocks.c
··· 102 102 * @source: the ID+1 of the parent clock element. 103 103 * Root clock uses ID of ~0 (PARENT_ID); 104 104 * @gate: clock enable/disable 105 - * @div_min: smallest permitted clock divider 106 - * @div_max: largest permitted clock divider 107 - * @reg: clock divider register offset, in 32-bit words 108 - * @div_table: optional list of fixed clock divider values; 105 + * @div: substructure for clock divider 106 + * @div.min: smallest permitted clock divider 107 + * @div.max: largest permitted clock divider 108 + * @div.reg: clock divider register offset, in 32-bit words 109 + * @div.table: optional list of fixed clock divider values; 109 110 * must be in ascending order, zero for unused 110 - * @div: divisor for fixed-factor clock 111 - * @mul: multiplier for fixed-factor clock 112 - * @group: UART group, 0=UART0/1/2, 1=UART3/4/5/6/7 113 - * @sel: select either g1/r1 or g2/r2 as clock source 114 - * @g1: 1st source gate (clock enable/disable) 115 - * @r1: 1st source reset (module reset) 116 - * @g2: 2nd source gate (clock enable/disable) 117 - * @r2: 2nd source reset (module reset) 111 + * @ffc: substructure for fixed-factor clocks 112 + * @ffc.div: divisor for fixed-factor clock 113 + * @ffc.mul: multiplier for fixed-factor clock 114 + * @dual: substructure for dual clock gates 115 + * @dual.group: UART group, 0=UART0/1/2, 1=UART3/4/5/6/7 116 + * @dual.sel: select either g1/r1 or g2/r2 as clock source 117 + * @dual.g1: 1st source gate (clock enable/disable) 118 + * @dual.r1: 1st source reset (module reset) 119 + * @dual.g2: 2nd source gate (clock enable/disable) 120 + * @dual.r2: 2nd source reset (module reset) 118 121 * 119 122 * Describes a single element in the clock tree hierarchy. 120 123 * As there are quite a large number of clock elements, this ··· 134 131 struct r9a06g032_gate gate; 135 132 /* type = K_DIV */ 136 133 struct { 137 - unsigned int div_min:10, div_max:10, reg:10; 138 - u16 div_table[4]; 139 - }; 134 + unsigned int min:10, max:10, reg:10; 135 + u16 table[4]; 136 + } div; 140 137 /* type = K_FFC */ 141 138 struct { 142 139 u16 div, mul; 143 - }; 140 + } ffc; 144 141 /* type = K_DUALGATE */ 145 142 struct { 146 143 uint16_t group:1; ··· 181 178 .type = K_FFC, \ 182 179 .index = R9A06G032_##_idx, \ 183 180 .name = _n, \ 184 - .div = _div, \ 185 - .mul = _mul \ 181 + .ffc.div = _div, \ 182 + .ffc.mul = _mul \ 186 183 } 187 184 #define D_FFC(_idx, _n, _src, _div) { \ 188 185 .type = K_FFC, \ 189 186 .index = R9A06G032_##_idx, \ 190 187 .source = 1 + R9A06G032_##_src, \ 191 188 .name = _n, \ 192 - .div = _div, \ 193 - .mul = 1 \ 189 + .ffc.div = _div, \ 190 + .ffc.mul = 1 \ 194 191 } 195 192 #define D_DIV(_idx, _n, _src, _reg, _min, _max, ...) { \ 196 193 .type = K_DIV, \ 197 194 .index = R9A06G032_##_idx, \ 198 195 .source = 1 + R9A06G032_##_src, \ 199 196 .name = _n, \ 200 - .reg = _reg, \ 201 - .div_min = _min, \ 202 - .div_max = _max, \ 203 - .div_table = { __VA_ARGS__ } \ 197 + .div.reg = _reg, \ 198 + .div.min = _min, \ 199 + .div.max = _max, \ 200 + .div.table = { __VA_ARGS__ } \ 204 201 } 205 202 #define D_UGATE(_idx, _n, _src, _g, _g1, _r1, _g2, _r2) { \ 206 203 .type = K_DUALGATE, \ ··· 1066 1063 1067 1064 div->clocks = clocks; 1068 1065 div->index = desc->index; 1069 - div->reg = desc->reg; 1066 + div->reg = desc->div.reg; 1070 1067 div->hw.init = &init; 1071 - div->min = desc->div_min; 1072 - div->max = desc->div_max; 1068 + div->min = desc->div.min; 1069 + div->max = desc->div.max; 1073 1070 /* populate (optional) divider table fixed values */ 1074 1071 for (i = 0; i < ARRAY_SIZE(div->table) && 1075 - i < ARRAY_SIZE(desc->div_table) && desc->div_table[i]; i++) { 1076 - div->table[div->table_size++] = desc->div_table[i]; 1072 + i < ARRAY_SIZE(desc->div.table) && desc->div.table[i]; i++) { 1073 + div->table[div->table_size++] = desc->div.table[i]; 1077 1074 } 1078 1075 1079 1076 clk = clk_register(NULL, &div->hw); ··· 1272 1269 1273 1270 static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks) 1274 1271 { 1275 - struct device_node *usbf_np = NULL; 1272 + struct device_node *usbf_np; 1276 1273 u32 usb; 1277 1274 1278 - while ((usbf_np = of_find_compatible_node(usbf_np, NULL, 1279 - "renesas,rzn1-usbf"))) { 1275 + for_each_compatible_node(usbf_np, NULL, "renesas,rzn1-usbf") { 1280 1276 if (of_device_is_available(usbf_np)) 1281 1277 break; 1282 1278 } ··· 1335 1333 case K_FFC: 1336 1334 clk = clk_register_fixed_factor(NULL, d->name, 1337 1335 parent_name, 0, 1338 - d->mul, d->div); 1336 + d->ffc.mul, 1337 + d->ffc.div); 1339 1338 break; 1340 1339 case K_GATE: 1341 1340 clk = r9a06g032_register_gate(clocks, parent_name, d);
+17 -2
drivers/clk/renesas/r9a07g043-cpg.c
··· 14 14 15 15 #include "rzg2l-cpg.h" 16 16 17 + /* Specific registers. */ 18 + #define CPG_PL2SDHI_DSEL (0x218) 19 + 20 + /* Clock select configuration. */ 21 + #define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2) 22 + #define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2) 23 + 24 + /* Clock status configuration. */ 25 + #define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1) 26 + #define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1) 27 + 17 28 enum clk_ids { 18 29 /* Core Clock Outputs exported to DT */ 19 30 LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2, ··· 89 78 static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; 90 79 static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; 91 80 81 + static const u32 mtable_sdhi[] = { 1, 2, 3 }; 82 + 92 83 static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { 93 84 /* External Clock Inputs */ 94 85 DEF_INPUT("extal", CLK_EXTAL), ··· 136 123 DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2), 137 124 DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), 138 125 DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), 139 - DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi), 140 - DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi), 126 + DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_shdi, 127 + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), 128 + DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI0_STS, sel_shdi, 129 + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), 141 130 DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4), 142 131 DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4), 143 132 };
+17 -2
drivers/clk/renesas/r9a07g044-cpg.c
··· 15 15 16 16 #include "rzg2l-cpg.h" 17 17 18 + /* Specific registers. */ 19 + #define CPG_PL2SDHI_DSEL (0x218) 20 + 21 + /* Clock select configuration. */ 22 + #define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2) 23 + #define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2) 24 + 25 + /* Clock status configuration. */ 26 + #define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1) 27 + #define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1) 28 + 18 29 enum clk_ids { 19 30 /* Core Clock Outputs exported to DT */ 20 31 LAST_DT_CORE_CLK = R9A07G054_CLK_DRP_A, ··· 109 98 static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; 110 99 static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" }; 111 100 101 + static const u32 mtable_sdhi[] = { 1, 2, 3 }; 102 + 112 103 static const struct { 113 104 struct cpg_core_clk common[56]; 114 105 #ifdef CONFIG_CLK_R9A07G054 ··· 176 163 DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2), 177 164 DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), 178 165 DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), 179 - DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi), 180 - DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi), 166 + DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_shdi, 167 + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), 168 + DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI0_STS, sel_shdi, 169 + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), 181 170 DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4), 182 171 DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4), 183 172 DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8),
+248
drivers/clk/renesas/r9a08g045-cpg.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * RZ/G3S CPG driver 4 + * 5 + * Copyright (C) 2023 Renesas Electronics Corp. 6 + */ 7 + 8 + #include <linux/clk-provider.h> 9 + #include <linux/device.h> 10 + #include <linux/init.h> 11 + #include <linux/kernel.h> 12 + 13 + #include <dt-bindings/clock/r9a08g045-cpg.h> 14 + 15 + #include "rzg2l-cpg.h" 16 + 17 + /* RZ/G3S Specific registers. */ 18 + #define G3S_CPG_PL2_DDIV (0x204) 19 + #define G3S_CPG_SDHI_DDIV (0x218) 20 + #define G3S_CPG_PLL_DSEL (0x240) 21 + #define G3S_CPG_SDHI_DSEL (0x244) 22 + #define G3S_CLKDIVSTATUS (0x280) 23 + #define G3S_CLKSELSTATUS (0x284) 24 + 25 + /* RZ/G3S Specific division configuration. */ 26 + #define G3S_DIVPL2B DDIV_PACK(G3S_CPG_PL2_DDIV, 4, 3) 27 + #define G3S_DIV_SDHI0 DDIV_PACK(G3S_CPG_SDHI_DDIV, 0, 1) 28 + #define G3S_DIV_SDHI1 DDIV_PACK(G3S_CPG_SDHI_DDIV, 4, 1) 29 + #define G3S_DIV_SDHI2 DDIV_PACK(G3S_CPG_SDHI_DDIV, 8, 1) 30 + 31 + /* RZ/G3S Clock status configuration. */ 32 + #define G3S_DIVPL1A_STS DDIV_PACK(G3S_CLKDIVSTATUS, 0, 1) 33 + #define G3S_DIVPL2B_STS DDIV_PACK(G3S_CLKDIVSTATUS, 5, 1) 34 + #define G3S_DIVPL3A_STS DDIV_PACK(G3S_CLKDIVSTATUS, 8, 1) 35 + #define G3S_DIVPL3B_STS DDIV_PACK(G3S_CLKDIVSTATUS, 9, 1) 36 + #define G3S_DIVPL3C_STS DDIV_PACK(G3S_CLKDIVSTATUS, 10, 1) 37 + #define G3S_DIV_SDHI0_STS DDIV_PACK(G3S_CLKDIVSTATUS, 24, 1) 38 + #define G3S_DIV_SDHI1_STS DDIV_PACK(G3S_CLKDIVSTATUS, 25, 1) 39 + #define G3S_DIV_SDHI2_STS DDIV_PACK(G3S_CLKDIVSTATUS, 26, 1) 40 + 41 + #define G3S_SEL_PLL4_STS SEL_PLL_PACK(G3S_CLKSELSTATUS, 6, 1) 42 + #define G3S_SEL_SDHI0_STS SEL_PLL_PACK(G3S_CLKSELSTATUS, 16, 1) 43 + #define G3S_SEL_SDHI1_STS SEL_PLL_PACK(G3S_CLKSELSTATUS, 17, 1) 44 + #define G3S_SEL_SDHI2_STS SEL_PLL_PACK(G3S_CLKSELSTATUS, 18, 1) 45 + 46 + /* RZ/G3S Specific clocks select. */ 47 + #define G3S_SEL_PLL4 SEL_PLL_PACK(G3S_CPG_PLL_DSEL, 6, 1) 48 + #define G3S_SEL_SDHI0 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 0, 2) 49 + #define G3S_SEL_SDHI1 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 4, 2) 50 + #define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2) 51 + 52 + /* PLL 1/4/6 configuration registers macro. */ 53 + #define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12) 54 + 55 + #define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \ 56 + DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \ 57 + .parent_names = (_parent_names), \ 58 + .num_parents = ARRAY_SIZE((_parent_names)), \ 59 + .mux_flags = CLK_MUX_HIWORD_MASK | (_mux_flags), \ 60 + .flag = (_clk_flags)) 61 + 62 + enum clk_ids { 63 + /* Core Clock Outputs exported to DT */ 64 + LAST_DT_CORE_CLK = R9A08G045_SWD, 65 + 66 + /* External Input Clocks */ 67 + CLK_EXTAL, 68 + 69 + /* Internal Core Clocks */ 70 + CLK_OSC_DIV1000, 71 + CLK_PLL1, 72 + CLK_PLL2, 73 + CLK_PLL2_DIV2, 74 + CLK_PLL2_DIV2_8, 75 + CLK_PLL2_DIV6, 76 + CLK_PLL3, 77 + CLK_PLL3_DIV2, 78 + CLK_PLL3_DIV2_4, 79 + CLK_PLL3_DIV2_8, 80 + CLK_PLL3_DIV6, 81 + CLK_PLL4, 82 + CLK_PLL6, 83 + CLK_PLL6_DIV2, 84 + CLK_SEL_SDHI0, 85 + CLK_SEL_SDHI1, 86 + CLK_SEL_SDHI2, 87 + CLK_SEL_PLL4, 88 + CLK_P1_DIV2, 89 + CLK_P3_DIV2, 90 + CLK_SD0_DIV4, 91 + CLK_SD1_DIV4, 92 + CLK_SD2_DIV4, 93 + 94 + /* Module Clocks */ 95 + MOD_CLK_BASE, 96 + }; 97 + 98 + /* Divider tables */ 99 + static const struct clk_div_table dtable_1_2[] = { 100 + { 0, 1 }, 101 + { 1, 2 }, 102 + { 0, 0 }, 103 + }; 104 + 105 + static const struct clk_div_table dtable_1_8[] = { 106 + { 0, 1 }, 107 + { 1, 2 }, 108 + { 2, 4 }, 109 + { 3, 8 }, 110 + { 0, 0 }, 111 + }; 112 + 113 + static const struct clk_div_table dtable_1_32[] = { 114 + { 0, 1 }, 115 + { 1, 2 }, 116 + { 2, 4 }, 117 + { 3, 8 }, 118 + { 4, 32 }, 119 + { 0, 0 }, 120 + }; 121 + 122 + /* Mux clock names tables. */ 123 + static const char * const sel_sdhi[] = { ".pll2_div2", ".pll6", ".pll2_div6" }; 124 + static const char * const sel_pll4[] = { ".osc_div1000", ".pll4" }; 125 + 126 + /* Mux clock indices tables. */ 127 + static const u32 mtable_sd[] = { 0, 2, 3 }; 128 + static const u32 mtable_pll4[] = { 0, 1 }; 129 + 130 + static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = { 131 + /* External Clock Inputs */ 132 + DEF_INPUT("extal", CLK_EXTAL), 133 + 134 + /* Internal Core Clocks */ 135 + DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000), 136 + DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)), 137 + DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3), 138 + DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3), 139 + DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3), 140 + DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6), 141 + DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2), 142 + DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8), 143 + DEF_FIXED(".pll2_div6", CLK_PLL2_DIV6, CLK_PLL2, 1, 6), 144 + DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2), 145 + DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), 146 + DEF_FIXED(".pll3_div2_8", CLK_PLL3_DIV2_8, CLK_PLL3_DIV2, 1, 8), 147 + DEF_FIXED(".pll3_div6", CLK_PLL3_DIV6, CLK_PLL3, 1, 6), 148 + DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 1, 2), 149 + DEF_SD_MUX(".sel_sd0", CLK_SEL_SDHI0, G3S_SEL_SDHI0, G3S_SEL_SDHI0_STS, sel_sdhi, 150 + mtable_sd, 0, NULL), 151 + DEF_SD_MUX(".sel_sd1", CLK_SEL_SDHI1, G3S_SEL_SDHI1, G3S_SEL_SDHI1_STS, sel_sdhi, 152 + mtable_sd, 0, NULL), 153 + DEF_SD_MUX(".sel_sd2", CLK_SEL_SDHI2, G3S_SEL_SDHI2, G3S_SEL_SDHI2_STS, sel_sdhi, 154 + mtable_sd, 0, NULL), 155 + DEF_SD_MUX(".sel_pll4", CLK_SEL_PLL4, G3S_SEL_PLL4, G3S_SEL_PLL4_STS, sel_pll4, 156 + mtable_pll4, CLK_SET_PARENT_GATE, NULL), 157 + 158 + /* Core output clk */ 159 + DEF_G3S_DIV("I", R9A08G045_CLK_I, CLK_PLL1, DIVPL1A, G3S_DIVPL1A_STS, dtable_1_8, 160 + 0, 0, 0, NULL), 161 + DEF_G3S_DIV("P0", R9A08G045_CLK_P0, CLK_PLL2_DIV2_8, G3S_DIVPL2B, G3S_DIVPL2B_STS, 162 + dtable_1_32, 0, 0, 0, NULL), 163 + DEF_G3S_DIV("SD0", R9A08G045_CLK_SD0, CLK_SEL_SDHI0, G3S_DIV_SDHI0, G3S_DIV_SDHI0_STS, 164 + dtable_1_2, 800000000UL, 500000000UL, CLK_SET_RATE_PARENT, 165 + rzg3s_cpg_div_clk_notifier), 166 + DEF_G3S_DIV("SD1", R9A08G045_CLK_SD1, CLK_SEL_SDHI1, G3S_DIV_SDHI1, G3S_DIV_SDHI1_STS, 167 + dtable_1_2, 800000000UL, 500000000UL, CLK_SET_RATE_PARENT, 168 + rzg3s_cpg_div_clk_notifier), 169 + DEF_G3S_DIV("SD2", R9A08G045_CLK_SD2, CLK_SEL_SDHI2, G3S_DIV_SDHI2, G3S_DIV_SDHI2_STS, 170 + dtable_1_2, 800000000UL, 500000000UL, CLK_SET_RATE_PARENT, 171 + rzg3s_cpg_div_clk_notifier), 172 + DEF_FIXED(".sd0_div4", CLK_SD0_DIV4, R9A08G045_CLK_SD0, 1, 4), 173 + DEF_FIXED(".sd1_div4", CLK_SD1_DIV4, R9A08G045_CLK_SD1, 1, 4), 174 + DEF_FIXED(".sd2_div4", CLK_SD2_DIV4, R9A08G045_CLK_SD2, 1, 4), 175 + DEF_FIXED("M0", R9A08G045_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), 176 + DEF_G3S_DIV("P1", R9A08G045_CLK_P1, CLK_PLL3_DIV2_4, DIVPL3A, G3S_DIVPL3A_STS, 177 + dtable_1_32, 0, 0, 0, NULL), 178 + DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A08G045_CLK_P1, 1, 2), 179 + DEF_G3S_DIV("P2", R9A08G045_CLK_P2, CLK_PLL3_DIV2_8, DIVPL3B, G3S_DIVPL3B_STS, 180 + dtable_1_32, 0, 0, 0, NULL), 181 + DEF_G3S_DIV("P3", R9A08G045_CLK_P3, CLK_PLL3_DIV2_4, DIVPL3C, G3S_DIVPL3C_STS, 182 + dtable_1_32, 0, 0, 0, NULL), 183 + DEF_FIXED("P3_DIV2", CLK_P3_DIV2, R9A08G045_CLK_P3, 1, 2), 184 + DEF_FIXED("S0", R9A08G045_CLK_S0, CLK_SEL_PLL4, 1, 2), 185 + DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1), 186 + DEF_FIXED("OSC2", R9A08G045_OSCCLK2, CLK_EXTAL, 1, 3), 187 + }; 188 + 189 + static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = { 190 + DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0), 191 + DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1), 192 + DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0), 193 + DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0), 194 + DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1), 195 + DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2), 196 + DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3), 197 + DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4), 198 + DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5), 199 + DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6), 200 + DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7), 201 + DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8), 202 + DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9), 203 + DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10), 204 + DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11), 205 + DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0), 206 + DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0), 207 + }; 208 + 209 + static const struct rzg2l_reset r9a08g045_resets[] = { 210 + DEF_RST(R9A08G045_GIC600_GICRESET_N, 0x814, 0), 211 + DEF_RST(R9A08G045_GIC600_DBG_GICRESET_N, 0x814, 1), 212 + DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0), 213 + DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1), 214 + DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2), 215 + DEF_RST(R9A08G045_SCIF0_RST_SYSTEM_N, 0x884, 0), 216 + DEF_RST(R9A08G045_GPIO_RSTN, 0x898, 0), 217 + DEF_RST(R9A08G045_GPIO_PORT_RESETN, 0x898, 1), 218 + DEF_RST(R9A08G045_GPIO_SPARE_RESETN, 0x898, 2), 219 + }; 220 + 221 + static const unsigned int r9a08g045_crit_mod_clks[] __initconst = { 222 + MOD_CLK_BASE + R9A08G045_GIC600_GICCLK, 223 + MOD_CLK_BASE + R9A08G045_IA55_CLK, 224 + MOD_CLK_BASE + R9A08G045_DMAC_ACLK, 225 + }; 226 + 227 + const struct rzg2l_cpg_info r9a08g045_cpg_info = { 228 + /* Core Clocks */ 229 + .core_clks = r9a08g045_core_clks, 230 + .num_core_clks = ARRAY_SIZE(r9a08g045_core_clks), 231 + .last_dt_core_clk = LAST_DT_CORE_CLK, 232 + .num_total_core_clks = MOD_CLK_BASE, 233 + 234 + /* Critical Module Clocks */ 235 + .crit_mod_clks = r9a08g045_crit_mod_clks, 236 + .num_crit_mod_clks = ARRAY_SIZE(r9a08g045_crit_mod_clks), 237 + 238 + /* Module Clocks */ 239 + .mod_clks = r9a08g045_mod_clks, 240 + .num_mod_clks = ARRAY_SIZE(r9a08g045_mod_clks), 241 + .num_hw_mod_clks = R9A08G045_VBAT_BCLK + 1, 242 + 243 + /* Resets */ 244 + .resets = r9a08g045_resets, 245 + .num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */ 246 + 247 + .has_clk_mon_regs = true, 248 + };
+14 -1
drivers/clk/renesas/rcar-cpg-lib.c
··· 70 70 #define STPnHCK BIT(9 - SDnSRCFC_SHIFT) 71 71 72 72 static const struct clk_div_table cpg_sdh_div_table[] = { 73 + /* 74 + * These values are recommended by the datasheet. Because they come 75 + * first, Linux will only use these. 76 + */ 73 77 { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 }, 74 - { STPnHCK | 4, 16 }, { 0, 0 }, 78 + { STPnHCK | 4, 16 }, 79 + /* 80 + * These values are not recommended because STPnHCK is wrong. But they 81 + * have been seen because of broken firmware. So, we support reading 82 + * them but Linux will sanitize them when initializing through 83 + * recalc_rate. 84 + */ 85 + { STPnHCK | 0, 1 }, { STPnHCK | 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 }, 86 + /* Sentinel */ 87 + { 0, 0 } 75 88 }; 76 89 77 90 struct clk * __init cpg_sdh_clk_register(const char *name,
+388 -77
drivers/clk/renesas/rzg2l-cpg.c
··· 11 11 * Copyright (C) 2015 Renesas Electronics Corp. 12 12 */ 13 13 14 + #include <linux/bitfield.h> 14 15 #include <linux/clk.h> 15 16 #include <linux/clk-provider.h> 16 17 #include <linux/clk/renesas.h> ··· 39 38 #define WARN_DEBUG(x) do { } while (0) 40 39 #endif 41 40 42 - #define DIV_RSMASK(v, s, m) ((v >> s) & m) 43 41 #define GET_SHIFT(val) ((val >> 12) & 0xff) 44 42 #define GET_WIDTH(val) ((val >> 8) & 0xf) 45 43 46 - #define KDIV(val) DIV_RSMASK(val, 16, 0xffff) 47 - #define MDIV(val) DIV_RSMASK(val, 6, 0x3ff) 48 - #define PDIV(val) DIV_RSMASK(val, 0, 0x3f) 49 - #define SDIV(val) DIV_RSMASK(val, 0, 0x7) 44 + #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val)) 45 + #define MDIV(val) FIELD_GET(GENMASK(15, 6), val) 46 + #define PDIV(val) FIELD_GET(GENMASK(5, 0), val) 47 + #define SDIV(val) FIELD_GET(GENMASK(2, 0), val) 48 + 49 + #define RZG3S_DIV_P GENMASK(28, 26) 50 + #define RZG3S_DIV_M GENMASK(25, 22) 51 + #define RZG3S_DIV_NI GENMASK(21, 13) 52 + #define RZG3S_DIV_NF GENMASK(12, 1) 50 53 51 54 #define CLK_ON_R(reg) (reg) 52 55 #define CLK_MON_R(reg) (0x180 + (reg)) ··· 61 56 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) 62 57 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) 63 58 59 + #define CPG_WEN_BIT BIT(16) 60 + 64 61 #define MAX_VCLK_FREQ (148500000) 65 62 66 - struct sd_hw_data { 63 + /** 64 + * struct clk_hw_data - clock hardware data 65 + * @hw: clock hw 66 + * @conf: clock configuration (register offset, shift, width) 67 + * @sconf: clock status configuration (register offset, shift, width) 68 + * @priv: CPG private data structure 69 + */ 70 + struct clk_hw_data { 67 71 struct clk_hw hw; 68 72 u32 conf; 73 + u32 sconf; 69 74 struct rzg2l_cpg_priv *priv; 70 75 }; 71 76 72 - #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw) 77 + #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw) 78 + 79 + /** 80 + * struct sd_mux_hw_data - SD MUX clock hardware data 81 + * @hw_data: clock hw data 82 + * @mtable: clock mux table 83 + */ 84 + struct sd_mux_hw_data { 85 + struct clk_hw_data hw_data; 86 + const u32 *mtable; 87 + }; 88 + 89 + #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data) 90 + 91 + /** 92 + * struct div_hw_data - divider clock hardware data 93 + * @hw_data: clock hw data 94 + * @dtable: pointer to divider table 95 + * @invalid_rate: invalid rate for divider 96 + * @max_rate: maximum rate for divider 97 + * @width: divider width 98 + */ 99 + struct div_hw_data { 100 + struct clk_hw_data hw_data; 101 + const struct clk_div_table *dtable; 102 + unsigned long invalid_rate; 103 + unsigned long max_rate; 104 + u32 width; 105 + }; 106 + 107 + #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data) 73 108 74 109 struct rzg2l_pll5_param { 75 110 u32 pl5_fracin; ··· 164 119 static void rzg2l_cpg_del_clk_provider(void *data) 165 120 { 166 121 of_clk_del_provider(data); 122 + } 123 + 124 + /* Must be called in atomic context. */ 125 + static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf) 126 + { 127 + u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf); 128 + u32 off = GET_REG_OFFSET(conf); 129 + u32 val; 130 + 131 + return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200); 132 + } 133 + 134 + int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, 135 + void *data) 136 + { 137 + struct clk_notifier_data *cnd = data; 138 + struct clk_hw *hw = __clk_get_hw(cnd->clk); 139 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 140 + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 141 + u32 off = GET_REG_OFFSET(clk_hw_data->conf); 142 + u32 shift = GET_SHIFT(clk_hw_data->conf); 143 + const u32 clk_src_266 = 3; 144 + unsigned long flags; 145 + int ret; 146 + 147 + if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266)) 148 + return NOTIFY_DONE; 149 + 150 + spin_lock_irqsave(&priv->rmw_lock, flags); 151 + 152 + /* 153 + * As per the HW manual, we should not directly switch from 533 MHz to 154 + * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) 155 + * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, 156 + * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 157 + * (400 MHz)). 158 + * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock 159 + * switching register is prohibited. 160 + * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and 161 + * the index to value mapping is done by adding 1 to the index. 162 + */ 163 + 164 + writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off); 165 + 166 + /* Wait for the update done. */ 167 + ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 168 + 169 + spin_unlock_irqrestore(&priv->rmw_lock, flags); 170 + 171 + if (ret) 172 + dev_err(priv->dev, "failed to switch to safe clk source\n"); 173 + 174 + return notifier_from_errno(ret); 175 + } 176 + 177 + int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event, 178 + void *data) 179 + { 180 + struct clk_notifier_data *cnd = data; 181 + struct clk_hw *hw = __clk_get_hw(cnd->clk); 182 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 183 + struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 184 + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 185 + u32 off = GET_REG_OFFSET(clk_hw_data->conf); 186 + u32 shift = GET_SHIFT(clk_hw_data->conf); 187 + unsigned long flags; 188 + int ret = 0; 189 + u32 val; 190 + 191 + if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate || 192 + div_hw_data->invalid_rate % cnd->new_rate) 193 + return NOTIFY_DONE; 194 + 195 + spin_lock_irqsave(&priv->rmw_lock, flags); 196 + 197 + val = readl(priv->base + off); 198 + val >>= shift; 199 + val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); 200 + 201 + /* 202 + * There are different constraints for the user of this notifiers as follows: 203 + * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz 204 + * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz 205 + * As SD can have only one parent having 800MHz and OCTA div can have 206 + * only one parent having 400MHz we took into account the parent rate 207 + * at the beginning of function (by checking invalid_rate % new_rate). 208 + * Now it is time to check the hardware divider and update it accordingly. 209 + */ 210 + if (!val) { 211 + writel((CPG_WEN_BIT | 1) << shift, priv->base + off); 212 + /* Wait for the update done. */ 213 + ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 214 + } 215 + 216 + spin_unlock_irqrestore(&priv->rmw_lock, flags); 217 + 218 + if (ret) 219 + dev_err(priv->dev, "Failed to downgrade the div\n"); 220 + 221 + return notifier_from_errno(ret); 222 + } 223 + 224 + static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core, 225 + struct rzg2l_cpg_priv *priv) 226 + { 227 + struct notifier_block *nb; 228 + 229 + if (!core->notifier) 230 + return 0; 231 + 232 + nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL); 233 + if (!nb) 234 + return -ENOMEM; 235 + 236 + nb->notifier_call = core->notifier; 237 + 238 + return clk_notifier_register(hw->clk, nb); 239 + } 240 + 241 + static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw, 242 + unsigned long parent_rate) 243 + { 244 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 245 + struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 246 + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 247 + u32 val; 248 + 249 + val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf)); 250 + val >>= GET_SHIFT(clk_hw_data->conf); 251 + val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); 252 + 253 + return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable, 254 + CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width); 255 + } 256 + 257 + static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 258 + { 259 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 260 + struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 261 + 262 + if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate) 263 + req->rate = div_hw_data->max_rate; 264 + 265 + return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width, 266 + CLK_DIVIDER_ROUND_CLOSEST); 267 + } 268 + 269 + static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate, 270 + unsigned long parent_rate) 271 + { 272 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 273 + struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 274 + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 275 + u32 off = GET_REG_OFFSET(clk_hw_data->conf); 276 + u32 shift = GET_SHIFT(clk_hw_data->conf); 277 + unsigned long flags; 278 + u32 val; 279 + int ret; 280 + 281 + val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width, 282 + CLK_DIVIDER_ROUND_CLOSEST); 283 + 284 + spin_lock_irqsave(&priv->rmw_lock, flags); 285 + writel((CPG_WEN_BIT | val) << shift, priv->base + off); 286 + /* Wait for the update done. */ 287 + ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 288 + spin_unlock_irqrestore(&priv->rmw_lock, flags); 289 + 290 + return ret; 291 + } 292 + 293 + static const struct clk_ops rzg3s_div_clk_ops = { 294 + .recalc_rate = rzg3s_div_clk_recalc_rate, 295 + .determine_rate = rzg3s_div_clk_determine_rate, 296 + .set_rate = rzg3s_div_clk_set_rate, 297 + }; 298 + 299 + static struct clk * __init 300 + rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks, 301 + void __iomem *base, struct rzg2l_cpg_priv *priv) 302 + { 303 + struct div_hw_data *div_hw_data; 304 + struct clk_init_data init = {}; 305 + const struct clk_div_table *clkt; 306 + struct clk_hw *clk_hw; 307 + const struct clk *parent; 308 + const char *parent_name; 309 + u32 max = 0; 310 + int ret; 311 + 312 + parent = clks[core->parent & 0xffff]; 313 + if (IS_ERR(parent)) 314 + return ERR_CAST(parent); 315 + 316 + parent_name = __clk_get_name(parent); 317 + 318 + div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL); 319 + if (!div_hw_data) 320 + return ERR_PTR(-ENOMEM); 321 + 322 + init.name = core->name; 323 + init.flags = core->flag; 324 + init.ops = &rzg3s_div_clk_ops; 325 + init.parent_names = &parent_name; 326 + init.num_parents = 1; 327 + 328 + /* Get the maximum divider to retrieve div width. */ 329 + for (clkt = core->dtable; clkt->div; clkt++) { 330 + if (max < clkt->div) 331 + max = clkt->div; 332 + } 333 + 334 + div_hw_data->hw_data.priv = priv; 335 + div_hw_data->hw_data.conf = core->conf; 336 + div_hw_data->hw_data.sconf = core->sconf; 337 + div_hw_data->dtable = core->dtable; 338 + div_hw_data->invalid_rate = core->invalid_rate; 339 + div_hw_data->max_rate = core->max_rate; 340 + div_hw_data->width = fls(max) - 1; 341 + 342 + clk_hw = &div_hw_data->hw_data.hw; 343 + clk_hw->init = &init; 344 + 345 + ret = devm_clk_hw_register(priv->dev, clk_hw); 346 + if (ret) 347 + return ERR_PTR(ret); 348 + 349 + ret = rzg2l_register_notifier(clk_hw, core, priv); 350 + if (ret) { 351 + dev_err(priv->dev, "Failed to register notifier for %s\n", 352 + core->name); 353 + return ERR_PTR(ret); 354 + } 355 + 356 + return clk_hw->clk; 167 357 } 168 358 169 359 static struct clk * __init ··· 463 183 464 184 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) 465 185 { 466 - struct sd_hw_data *hwdata = to_sd_hw_data(hw); 467 - struct rzg2l_cpg_priv *priv = hwdata->priv; 468 - u32 off = GET_REG_OFFSET(hwdata->conf); 469 - u32 shift = GET_SHIFT(hwdata->conf); 470 - const u32 clk_src_266 = 2; 471 - u32 bitmask; 186 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 187 + struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); 188 + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 189 + u32 off = GET_REG_OFFSET(clk_hw_data->conf); 190 + u32 shift = GET_SHIFT(clk_hw_data->conf); 191 + unsigned long flags; 192 + u32 val; 193 + int ret; 472 194 473 - /* 474 - * As per the HW manual, we should not directly switch from 533 MHz to 475 - * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) 476 - * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, 477 - * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 478 - * (400 MHz)). 479 - * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock 480 - * switching register is prohibited. 481 - * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and 482 - * the index to value mapping is done by adding 1 to the index. 483 - */ 484 - bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16; 485 - if (index != clk_src_266) { 486 - u32 msk, val; 487 - int ret; 195 + val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index); 488 196 489 - writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off); 197 + spin_lock_irqsave(&priv->rmw_lock, flags); 490 198 491 - msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS; 199 + writel((CPG_WEN_BIT | val) << shift, priv->base + off); 492 200 493 - ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val, 494 - !(val & msk), 100, 495 - CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); 496 - if (ret) { 497 - dev_err(priv->dev, "failed to switch clk source\n"); 498 - return ret; 499 - } 500 - } 201 + /* Wait for the update done. */ 202 + ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 501 203 502 - writel(bitmask | ((index + 1) << shift), priv->base + off); 204 + spin_unlock_irqrestore(&priv->rmw_lock, flags); 503 205 504 - return 0; 206 + if (ret) 207 + dev_err(priv->dev, "Failed to switch parent\n"); 208 + 209 + return ret; 505 210 } 506 211 507 212 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) 508 213 { 509 - struct sd_hw_data *hwdata = to_sd_hw_data(hw); 510 - struct rzg2l_cpg_priv *priv = hwdata->priv; 511 - u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf)); 214 + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 215 + struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); 216 + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 217 + u32 val; 512 218 513 - val >>= GET_SHIFT(hwdata->conf); 514 - val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0); 515 - if (val) { 516 - val--; 517 - } else { 518 - /* Prohibited clk source, change it to 533 MHz(reset value) */ 519 - rzg2l_cpg_sd_clk_mux_set_parent(hw, 0); 520 - } 219 + val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf)); 220 + val >>= GET_SHIFT(clk_hw_data->conf); 221 + val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); 521 222 522 - return val; 223 + return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val); 523 224 } 524 225 525 226 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { ··· 514 253 void __iomem *base, 515 254 struct rzg2l_cpg_priv *priv) 516 255 { 517 - struct sd_hw_data *clk_hw_data; 256 + struct sd_mux_hw_data *sd_mux_hw_data; 518 257 struct clk_init_data init; 519 258 struct clk_hw *clk_hw; 520 259 int ret; 521 260 522 - clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); 523 - if (!clk_hw_data) 261 + sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL); 262 + if (!sd_mux_hw_data) 524 263 return ERR_PTR(-ENOMEM); 525 264 526 - clk_hw_data->priv = priv; 527 - clk_hw_data->conf = core->conf; 265 + sd_mux_hw_data->hw_data.priv = priv; 266 + sd_mux_hw_data->hw_data.conf = core->conf; 267 + sd_mux_hw_data->hw_data.sconf = core->sconf; 268 + sd_mux_hw_data->mtable = core->mtable; 528 269 529 - init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0"; 270 + init.name = core->name; 530 271 init.ops = &rzg2l_cpg_sd_clk_mux_ops; 531 - init.flags = 0; 272 + init.flags = core->flag; 532 273 init.num_parents = core->num_parents; 533 274 init.parent_names = core->parent_names; 534 275 535 - clk_hw = &clk_hw_data->hw; 276 + clk_hw = &sd_mux_hw_data->hw_data.hw; 536 277 clk_hw->init = &init; 537 278 538 279 ret = devm_clk_hw_register(priv->dev, clk_hw); 539 280 if (ret) 540 281 return ERR_PTR(ret); 282 + 283 + ret = rzg2l_register_notifier(clk_hw, core, priv); 284 + if (ret) { 285 + dev_err(priv->dev, "Failed to register notifier for %s\n", 286 + core->name); 287 + return ERR_PTR(ret); 288 + } 541 289 542 290 return clk_hw->clk; 543 291 } ··· 965 695 struct pll_clk *pll_clk = to_pll(hw); 966 696 struct rzg2l_cpg_priv *priv = pll_clk->priv; 967 697 unsigned int val1, val2; 968 - unsigned int mult = 1; 969 - unsigned int div = 1; 698 + u64 rate; 970 699 971 700 if (pll_clk->type != CLK_TYPE_SAM_PLL) 972 701 return parent_rate; 973 702 974 703 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); 975 704 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf)); 976 - mult = MDIV(val1) + KDIV(val1) / 65536; 977 - div = PDIV(val1) << SDIV(val2); 978 705 979 - return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div); 706 + rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1), 707 + 16 + SDIV(val2)); 708 + 709 + return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1)); 980 710 } 981 711 982 712 static const struct clk_ops rzg2l_cpg_pll_ops = { 983 713 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate, 984 714 }; 985 715 716 + static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw, 717 + unsigned long parent_rate) 718 + { 719 + struct pll_clk *pll_clk = to_pll(hw); 720 + struct rzg2l_cpg_priv *priv = pll_clk->priv; 721 + u32 nir, nfr, mr, pr, val; 722 + u64 rate; 723 + 724 + if (pll_clk->type != CLK_TYPE_G3S_PLL) 725 + return parent_rate; 726 + 727 + val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); 728 + 729 + pr = 1 << FIELD_GET(RZG3S_DIV_P, val); 730 + /* Hardware interprets values higher than 8 as p = 16. */ 731 + if (pr > 8) 732 + pr = 16; 733 + 734 + mr = FIELD_GET(RZG3S_DIV_M, val) + 1; 735 + nir = FIELD_GET(RZG3S_DIV_NI, val) + 1; 736 + nfr = FIELD_GET(RZG3S_DIV_NF, val); 737 + 738 + rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12); 739 + 740 + return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr)); 741 + } 742 + 743 + static const struct clk_ops rzg3s_cpg_pll_ops = { 744 + .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate, 745 + }; 746 + 986 747 static struct clk * __init 987 748 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, 988 749 struct clk **clks, 989 750 void __iomem *base, 990 - struct rzg2l_cpg_priv *priv) 751 + struct rzg2l_cpg_priv *priv, 752 + const struct clk_ops *ops) 991 753 { 992 754 struct device *dev = priv->dev; 993 755 const struct clk *parent; ··· 1037 735 1038 736 parent_name = __clk_get_name(parent); 1039 737 init.name = core->name; 1040 - init.ops = &rzg2l_cpg_pll_ops; 738 + init.ops = ops; 1041 739 init.flags = 0; 1042 740 init.parent_names = &parent_name; 1043 741 init.num_parents = 1; ··· 1132 830 core->mult, div); 1133 831 break; 1134 832 case CLK_TYPE_SAM_PLL: 1135 - clk = rzg2l_cpg_pll_clk_register(core, priv->clks, 1136 - priv->base, priv); 833 + clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv, 834 + &rzg2l_cpg_pll_ops); 835 + break; 836 + case CLK_TYPE_G3S_PLL: 837 + clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv, 838 + &rzg3s_cpg_pll_ops); 1137 839 break; 1138 840 case CLK_TYPE_SIPLL5: 1139 841 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv); ··· 1145 839 case CLK_TYPE_DIV: 1146 840 clk = rzg2l_cpg_div_clk_register(core, priv->clks, 1147 841 priv->base, priv); 842 + break; 843 + case CLK_TYPE_G3S_DIV: 844 + clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv); 1148 845 break; 1149 846 case CLK_TYPE_MUX: 1150 847 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv); ··· 1204 895 struct rzg2l_cpg_priv *priv = clock->priv; 1205 896 unsigned int reg = clock->off; 1206 897 struct device *dev = priv->dev; 1207 - unsigned long flags; 1208 898 u32 bitmask = BIT(clock->bit); 1209 899 u32 value; 1210 900 int error; ··· 1213 905 return 0; 1214 906 } 1215 907 1216 - dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk, 908 + dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk, 1217 909 enable ? "ON" : "OFF"); 1218 - spin_lock_irqsave(&priv->rmw_lock, flags); 1219 910 911 + value = bitmask << 16; 1220 912 if (enable) 1221 - value = (bitmask << 16) | bitmask; 1222 - else 1223 - value = bitmask << 16; 1224 - writel(value, priv->base + CLK_ON_R(reg)); 913 + value |= bitmask; 1225 914 1226 - spin_unlock_irqrestore(&priv->rmw_lock, flags); 915 + writel(value, priv->base + CLK_ON_R(reg)); 1227 916 1228 917 if (!enable) 1229 918 return 0; ··· 1704 1399 { 1705 1400 .compatible = "renesas,r9a07g054-cpg", 1706 1401 .data = &r9a07g054_cpg_info, 1402 + }, 1403 + #endif 1404 + #ifdef CONFIG_CLK_R9A08G045 1405 + { 1406 + .compatible = "renesas,r9a08g045-cpg", 1407 + .data = &r9a08g045_cpg_info, 1707 1408 }, 1708 1409 #endif 1709 1410 #ifdef CONFIG_CLK_R9A09G011
+28 -11
drivers/clk/renesas/rzg2l-cpg.h
··· 9 9 #ifndef __RENESAS_RZG2L_CPG_H__ 10 10 #define __RENESAS_RZG2L_CPG_H__ 11 11 12 + #include <linux/notifier.h> 13 + 12 14 #define CPG_SIPLL5_STBY (0x140) 13 15 #define CPG_SIPLL5_CLK1 (0x144) 14 16 #define CPG_SIPLL5_CLK3 (0x14C) ··· 21 19 #define CPG_PL2_DDIV (0x204) 22 20 #define CPG_PL3A_DDIV (0x208) 23 21 #define CPG_PL6_DDIV (0x210) 24 - #define CPG_PL2SDHI_DSEL (0x218) 25 22 #define CPG_CLKSTATUS (0x280) 26 23 #define CPG_PL3_SSEL (0x408) 27 24 #define CPG_PL6_SSEL (0x414) ··· 43 42 44 43 #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28) 45 44 #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29) 46 - 47 - #define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000 48 45 49 46 /* n = 0/1/2 for PLL1/4/6 */ 50 47 #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n)) ··· 68 69 #define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1) 69 70 #define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1) 70 71 71 - #define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2) 72 - #define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2) 73 - 74 72 #define EXTAL_FREQ_IN_MEGA_HZ (24) 75 73 76 74 /** ··· 86 90 unsigned int mult; 87 91 unsigned int type; 88 92 unsigned int conf; 93 + unsigned int sconf; 89 94 const struct clk_div_table *dtable; 95 + const u32 *mtable; 96 + const unsigned long invalid_rate; 97 + const unsigned long max_rate; 90 98 const char * const *parent_names; 91 - int flag; 92 - int mux_flags; 99 + notifier_fn_t notifier; 100 + u32 flag; 101 + u32 mux_flags; 93 102 int num_parents; 94 103 }; 95 104 ··· 103 102 CLK_TYPE_IN, /* External Clock Input */ 104 103 CLK_TYPE_FF, /* Fixed Factor Clock */ 105 104 CLK_TYPE_SAM_PLL, 105 + CLK_TYPE_G3S_PLL, 106 106 107 107 /* Clock with divider */ 108 108 CLK_TYPE_DIV, 109 + CLK_TYPE_G3S_DIV, 109 110 110 111 /* Clock with clock source selector */ 111 112 CLK_TYPE_MUX, ··· 132 129 DEF_TYPE(_name, _id, _type, .parent = _parent) 133 130 #define DEF_SAMPLL(_name, _id, _parent, _conf) \ 134 131 DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf) 132 + #define DEF_G3S_PLL(_name, _id, _parent, _conf) \ 133 + DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf) 135 134 #define DEF_INPUT(_name, _id) \ 136 135 DEF_TYPE(_name, _id, CLK_TYPE_IN) 137 136 #define DEF_FIXED(_name, _id, _parent, _mult, _div) \ ··· 146 141 DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \ 147 142 .parent = _parent, .dtable = _dtable, \ 148 143 .flag = CLK_DIVIDER_READ_ONLY) 144 + #define DEF_G3S_DIV(_name, _id, _parent, _conf, _sconf, _dtable, _invalid_rate, \ 145 + _max_rate, _clk_flags, _notif) \ 146 + DEF_TYPE(_name, _id, CLK_TYPE_G3S_DIV, .conf = _conf, .sconf = _sconf, \ 147 + .parent = _parent, .dtable = _dtable, \ 148 + .invalid_rate = _invalid_rate, \ 149 + .max_rate = _max_rate, .flag = (_clk_flags), \ 150 + .notifier = _notif) 149 151 #define DEF_MUX(_name, _id, _conf, _parent_names) \ 150 152 DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \ 151 153 .parent_names = _parent_names, \ ··· 163 151 .parent_names = _parent_names, \ 164 152 .num_parents = ARRAY_SIZE(_parent_names), \ 165 153 .mux_flags = CLK_MUX_READ_ONLY) 166 - #define DEF_SD_MUX(_name, _id, _conf, _parent_names) \ 167 - DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \ 154 + #define DEF_SD_MUX(_name, _id, _conf, _sconf, _parent_names, _mtable, _clk_flags, _notifier) \ 155 + DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, .sconf = _sconf, \ 168 156 .parent_names = _parent_names, \ 169 - .num_parents = ARRAY_SIZE(_parent_names)) 157 + .num_parents = ARRAY_SIZE(_parent_names), \ 158 + .mtable = _mtable, .flag = _clk_flags, .notifier = _notifier) 170 159 #define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \ 171 160 DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent) 172 161 #define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \ ··· 284 271 extern const struct rzg2l_cpg_info r9a07g043_cpg_info; 285 272 extern const struct rzg2l_cpg_info r9a07g044_cpg_info; 286 273 extern const struct rzg2l_cpg_info r9a07g054_cpg_info; 274 + extern const struct rzg2l_cpg_info r9a08g045_cpg_info; 287 275 extern const struct rzg2l_cpg_info r9a09g011_cpg_info; 276 + 277 + int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, void *data); 278 + int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event, void *data); 288 279 289 280 #endif
+242
include/dt-bindings/clock/r9a08g045-cpg.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + * 3 + * Copyright (C) 2023 Renesas Electronics Corp. 4 + */ 5 + #ifndef __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ 6 + #define __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ 7 + 8 + #include <dt-bindings/clock/renesas-cpg-mssr.h> 9 + 10 + /* R9A08G045 CPG Core Clocks */ 11 + #define R9A08G045_CLK_I 0 12 + #define R9A08G045_CLK_I2 1 13 + #define R9A08G045_CLK_I3 2 14 + #define R9A08G045_CLK_S0 3 15 + #define R9A08G045_CLK_SPI0 4 16 + #define R9A08G045_CLK_SPI1 5 17 + #define R9A08G045_CLK_SD0 6 18 + #define R9A08G045_CLK_SD1 7 19 + #define R9A08G045_CLK_SD2 8 20 + #define R9A08G045_CLK_M0 9 21 + #define R9A08G045_CLK_HP 10 22 + #define R9A08G045_CLK_TSU 11 23 + #define R9A08G045_CLK_ZT 12 24 + #define R9A08G045_CLK_P0 13 25 + #define R9A08G045_CLK_P1 14 26 + #define R9A08G045_CLK_P2 15 27 + #define R9A08G045_CLK_P3 16 28 + #define R9A08G045_CLK_P4 17 29 + #define R9A08G045_CLK_P5 18 30 + #define R9A08G045_CLK_AT 19 31 + #define R9A08G045_CLK_OC0 20 32 + #define R9A08G045_CLK_OC1 21 33 + #define R9A08G045_OSCCLK 22 34 + #define R9A08G045_OSCCLK2 23 35 + #define R9A08G045_SWD 24 36 + 37 + /* R9A08G045 Module Clocks */ 38 + #define R9A08G045_OCTA_ACLK 0 39 + #define R9A08G045_OCTA_MCLK 1 40 + #define R9A08G045_CA55_SCLK 2 41 + #define R9A08G045_CA55_PCLK 3 42 + #define R9A08G045_CA55_ATCLK 4 43 + #define R9A08G045_CA55_GICCLK 5 44 + #define R9A08G045_CA55_PERICLK 6 45 + #define R9A08G045_CA55_ACLK 7 46 + #define R9A08G045_CA55_TSCLK 8 47 + #define R9A08G045_SRAM_ACPU_ACLK0 9 48 + #define R9A08G045_SRAM_ACPU_ACLK1 10 49 + #define R9A08G045_SRAM_ACPU_ACLK2 11 50 + #define R9A08G045_GIC600_GICCLK 12 51 + #define R9A08G045_IA55_CLK 13 52 + #define R9A08G045_IA55_PCLK 14 53 + #define R9A08G045_MHU_PCLK 15 54 + #define R9A08G045_SYC_CNT_CLK 16 55 + #define R9A08G045_DMAC_ACLK 17 56 + #define R9A08G045_DMAC_PCLK 18 57 + #define R9A08G045_OSTM0_PCLK 19 58 + #define R9A08G045_OSTM1_PCLK 20 59 + #define R9A08G045_OSTM2_PCLK 21 60 + #define R9A08G045_OSTM3_PCLK 22 61 + #define R9A08G045_OSTM4_PCLK 23 62 + #define R9A08G045_OSTM5_PCLK 24 63 + #define R9A08G045_OSTM6_PCLK 25 64 + #define R9A08G045_OSTM7_PCLK 26 65 + #define R9A08G045_MTU_X_MCK_MTU3 27 66 + #define R9A08G045_POE3_CLKM_POE 28 67 + #define R9A08G045_GPT_PCLK 29 68 + #define R9A08G045_POEG_A_CLKP 30 69 + #define R9A08G045_POEG_B_CLKP 31 70 + #define R9A08G045_POEG_C_CLKP 32 71 + #define R9A08G045_POEG_D_CLKP 33 72 + #define R9A08G045_WDT0_PCLK 34 73 + #define R9A08G045_WDT0_CLK 35 74 + #define R9A08G045_WDT1_PCLK 36 75 + #define R9A08G045_WDT1_CLK 37 76 + #define R9A08G045_WDT2_PCLK 38 77 + #define R9A08G045_WDT2_CLK 39 78 + #define R9A08G045_SPI_HCLK 40 79 + #define R9A08G045_SPI_ACLK 41 80 + #define R9A08G045_SPI_CLK 42 81 + #define R9A08G045_SPI_CLKX2 43 82 + #define R9A08G045_SDHI0_IMCLK 44 83 + #define R9A08G045_SDHI0_IMCLK2 45 84 + #define R9A08G045_SDHI0_CLK_HS 46 85 + #define R9A08G045_SDHI0_ACLK 47 86 + #define R9A08G045_SDHI1_IMCLK 48 87 + #define R9A08G045_SDHI1_IMCLK2 49 88 + #define R9A08G045_SDHI1_CLK_HS 50 89 + #define R9A08G045_SDHI1_ACLK 51 90 + #define R9A08G045_SDHI2_IMCLK 52 91 + #define R9A08G045_SDHI2_IMCLK2 53 92 + #define R9A08G045_SDHI2_CLK_HS 54 93 + #define R9A08G045_SDHI2_ACLK 55 94 + #define R9A08G045_SSI0_PCLK2 56 95 + #define R9A08G045_SSI0_PCLK_SFR 57 96 + #define R9A08G045_SSI1_PCLK2 58 97 + #define R9A08G045_SSI1_PCLK_SFR 59 98 + #define R9A08G045_SSI2_PCLK2 60 99 + #define R9A08G045_SSI2_PCLK_SFR 61 100 + #define R9A08G045_SSI3_PCLK2 62 101 + #define R9A08G045_SSI3_PCLK_SFR 63 102 + #define R9A08G045_SRC_CLKP 64 103 + #define R9A08G045_USB_U2H0_HCLK 65 104 + #define R9A08G045_USB_U2H1_HCLK 66 105 + #define R9A08G045_USB_U2P_EXR_CPUCLK 67 106 + #define R9A08G045_USB_PCLK 68 107 + #define R9A08G045_ETH0_CLK_AXI 69 108 + #define R9A08G045_ETH0_CLK_CHI 70 109 + #define R9A08G045_ETH0_REFCLK 71 110 + #define R9A08G045_ETH1_CLK_AXI 72 111 + #define R9A08G045_ETH1_CLK_CHI 73 112 + #define R9A08G045_ETH1_REFCLK 74 113 + #define R9A08G045_I2C0_PCLK 75 114 + #define R9A08G045_I2C1_PCLK 76 115 + #define R9A08G045_I2C2_PCLK 77 116 + #define R9A08G045_I2C3_PCLK 78 117 + #define R9A08G045_SCIF0_CLK_PCK 79 118 + #define R9A08G045_SCIF1_CLK_PCK 80 119 + #define R9A08G045_SCIF2_CLK_PCK 81 120 + #define R9A08G045_SCIF3_CLK_PCK 82 121 + #define R9A08G045_SCIF4_CLK_PCK 83 122 + #define R9A08G045_SCIF5_CLK_PCK 84 123 + #define R9A08G045_SCI0_CLKP 85 124 + #define R9A08G045_SCI1_CLKP 86 125 + #define R9A08G045_IRDA_CLKP 87 126 + #define R9A08G045_RSPI0_CLKB 88 127 + #define R9A08G045_RSPI1_CLKB 89 128 + #define R9A08G045_RSPI2_CLKB 90 129 + #define R9A08G045_RSPI3_CLKB 91 130 + #define R9A08G045_RSPI4_CLKB 92 131 + #define R9A08G045_CANFD_PCLK 93 132 + #define R9A08G045_CANFD_CLK_RAM 94 133 + #define R9A08G045_GPIO_HCLK 95 134 + #define R9A08G045_ADC_ADCLK 96 135 + #define R9A08G045_ADC_PCLK 97 136 + #define R9A08G045_TSU_PCLK 98 137 + #define R9A08G045_PDM_PCLK 99 138 + #define R9A08G045_PDM_CCLK 100 139 + #define R9A08G045_PCI_ACLK 101 140 + #define R9A08G045_PCI_CLKL1PM 102 141 + #define R9A08G045_SPDIF_PCLK 103 142 + #define R9A08G045_I3C_PCLK 104 143 + #define R9A08G045_I3C_TCLK 105 144 + #define R9A08G045_VBAT_BCLK 106 145 + 146 + /* R9A08G045 Resets */ 147 + #define R9A08G045_CA55_RST_1_0 0 148 + #define R9A08G045_CA55_RST_3_0 1 149 + #define R9A08G045_CA55_RST_4 2 150 + #define R9A08G045_CA55_RST_5 3 151 + #define R9A08G045_CA55_RST_6 4 152 + #define R9A08G045_CA55_RST_7 5 153 + #define R9A08G045_CA55_RST_8 6 154 + #define R9A08G045_CA55_RST_9 7 155 + #define R9A08G045_CA55_RST_10 8 156 + #define R9A08G045_CA55_RST_11 9 157 + #define R9A08G045_CA55_RST_12 10 158 + #define R9A08G045_SRAM_ACPU_ARESETN0 11 159 + #define R9A08G045_SRAM_ACPU_ARESETN1 12 160 + #define R9A08G045_SRAM_ACPU_ARESETN2 13 161 + #define R9A08G045_GIC600_GICRESET_N 14 162 + #define R9A08G045_GIC600_DBG_GICRESET_N 15 163 + #define R9A08G045_IA55_RESETN 16 164 + #define R9A08G045_MHU_RESETN 17 165 + #define R9A08G045_DMAC_ARESETN 18 166 + #define R9A08G045_DMAC_RST_ASYNC 19 167 + #define R9A08G045_SYC_RESETN 20 168 + #define R9A08G045_OSTM0_PRESETZ 21 169 + #define R9A08G045_OSTM1_PRESETZ 22 170 + #define R9A08G045_OSTM2_PRESETZ 23 171 + #define R9A08G045_OSTM3_PRESETZ 24 172 + #define R9A08G045_OSTM4_PRESETZ 25 173 + #define R9A08G045_OSTM5_PRESETZ 26 174 + #define R9A08G045_OSTM6_PRESETZ 27 175 + #define R9A08G045_OSTM7_PRESETZ 28 176 + #define R9A08G045_MTU_X_PRESET_MTU3 29 177 + #define R9A08G045_POE3_RST_M_REG 30 178 + #define R9A08G045_GPT_RST_C 31 179 + #define R9A08G045_POEG_A_RST 32 180 + #define R9A08G045_POEG_B_RST 33 181 + #define R9A08G045_POEG_C_RST 34 182 + #define R9A08G045_POEG_D_RST 35 183 + #define R9A08G045_WDT0_PRESETN 36 184 + #define R9A08G045_WDT1_PRESETN 37 185 + #define R9A08G045_WDT2_PRESETN 38 186 + #define R9A08G045_SPI_HRESETN 39 187 + #define R9A08G045_SPI_ARESETN 40 188 + #define R9A08G045_SDHI0_IXRST 41 189 + #define R9A08G045_SDHI1_IXRST 42 190 + #define R9A08G045_SDHI2_IXRST 43 191 + #define R9A08G045_SSI0_RST_M2_REG 44 192 + #define R9A08G045_SSI1_RST_M2_REG 45 193 + #define R9A08G045_SSI2_RST_M2_REG 46 194 + #define R9A08G045_SSI3_RST_M2_REG 47 195 + #define R9A08G045_SRC_RST 48 196 + #define R9A08G045_USB_U2H0_HRESETN 49 197 + #define R9A08G045_USB_U2H1_HRESETN 50 198 + #define R9A08G045_USB_U2P_EXL_SYSRST 51 199 + #define R9A08G045_USB_PRESETN 52 200 + #define R9A08G045_ETH0_RST_HW_N 53 201 + #define R9A08G045_ETH1_RST_HW_N 54 202 + #define R9A08G045_I2C0_MRST 55 203 + #define R9A08G045_I2C1_MRST 56 204 + #define R9A08G045_I2C2_MRST 57 205 + #define R9A08G045_I2C3_MRST 58 206 + #define R9A08G045_SCIF0_RST_SYSTEM_N 59 207 + #define R9A08G045_SCIF1_RST_SYSTEM_N 60 208 + #define R9A08G045_SCIF2_RST_SYSTEM_N 61 209 + #define R9A08G045_SCIF3_RST_SYSTEM_N 62 210 + #define R9A08G045_SCIF4_RST_SYSTEM_N 63 211 + #define R9A08G045_SCIF5_RST_SYSTEM_N 64 212 + #define R9A08G045_SCI0_RST 65 213 + #define R9A08G045_SCI1_RST 66 214 + #define R9A08G045_IRDA_RST 67 215 + #define R9A08G045_RSPI0_RST 68 216 + #define R9A08G045_RSPI1_RST 69 217 + #define R9A08G045_RSPI2_RST 70 218 + #define R9A08G045_RSPI3_RST 71 219 + #define R9A08G045_RSPI4_RST 72 220 + #define R9A08G045_CANFD_RSTP_N 73 221 + #define R9A08G045_CANFD_RSTC_N 74 222 + #define R9A08G045_GPIO_RSTN 75 223 + #define R9A08G045_GPIO_PORT_RESETN 76 224 + #define R9A08G045_GPIO_SPARE_RESETN 77 225 + #define R9A08G045_ADC_PRESETN 78 226 + #define R9A08G045_ADC_ADRST_N 79 227 + #define R9A08G045_TSU_PRESETN 80 228 + #define R9A08G045_OCTA_ARESETN 81 229 + #define R9A08G045_PDM0_PRESETNT 82 230 + #define R9A08G045_PCI_ARESETN 83 231 + #define R9A08G045_PCI_RST_B 84 232 + #define R9A08G045_PCI_RST_GP_B 85 233 + #define R9A08G045_PCI_RST_PS_B 86 234 + #define R9A08G045_PCI_RST_RSM_B 87 235 + #define R9A08G045_PCI_RST_CFG_B 88 236 + #define R9A08G045_PCI_RST_LOAD_B 89 237 + #define R9A08G045_SPDIF_RST 90 238 + #define R9A08G045_I3C_TRESETN 91 239 + #define R9A08G045_I3C_PRESETN 92 240 + #define R9A08G045_VBAT_BRESETN 93 241 + 242 + #endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */