Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'clk-leak', 'clk-rockchip', 'clk-renesas' and 'clk-at91' into clk-next

- Clock power management for new SAMA7G5 SoC
- Updates to the master clock driver and sam9x60-pll to be able to use
cpufreq-dt driver and avoid overclocking of CPU and MCK0 domains while
changing the frequency via DVFS
- Power management refinement with the use of save_context()/restore_context()
on each clock driver to specify their use in case of Backup mode only

* clk-leak:
clk: mvebu: ap-cpu-clk: Fix a memory leak in error handling paths

* clk-rockchip:
clk: rockchip: use module_platform_driver_probe
clk: rockchip: rk3399: expose PCLK_COREDBG_{B,L}
clk: rockchip: rk3399: make CPU clocks critical

* clk-renesas:
clk: renesas: r8a779[56]x: Add MLP clocks
clk: renesas: r9a07g044: Add SDHI clock and reset entries
clk: renesas: rzg2l: Add SDHI clk mux support
clk: renesas: r8a779a0: Add RPC support
clk: renesas: cpg-lib: Move RPC clock registration to the library
clk: renesas: r9a07g044: Add clock and reset entries for SPI Multi I/O Bus Controller
clk: renesas: r8a779a0: Add Z0 and Z1 clock support
clk: renesas: r9a07g044: Add GbEthernet clock/reset
clk: renesas: rzg2l: Add support to handle coupled clocks
clk: renesas: r9a07g044: Add ethernet clock sources
clk: renesas: rzg2l: Add support to handle MUX clocks
clk: renesas: r8a779a0: Add TPU clock
clk: renesas: rzg2l: Fix clk status function
clk: renesas: r9a07g044: Mark IA55_CLK and DMAC_ACLK critical

* clk-at91:
clk: at91: sama7g5: set low limit for mck0 at 32KHz
clk: at91: sama7g5: remove prescaler part of master clock
clk: at91: clk-master: add notifier for divider
clk: at91: clk-sam9x60-pll: add notifier for div part of PLL
clk: at91: clk-master: fix prescaler logic
clk: at91: clk-master: mask mckr against layout->mask
clk: at91: clk-master: check if div or pres is zero
clk: at91: sam9x60-pll: use DIV_ROUND_CLOSEST_ULL
clk: at91: pmc: add sama7g5 to the list of available pmcs
clk: at91: clk-master: improve readability by using local variables
clk: at91: clk-master: add register definition for sama7g5's master clock
clk: at91: sama7g5: add securam's peripheral clock
clk: at91: pmc: execute suspend/resume only for backup mode
clk: at91: re-factor clocks suspend/resume
clk: at91: check pmc node status before registering syscore ops

+1553 -422
+1 -1
drivers/clk/at91/at91rm9200.c
··· 152 152 "masterck_pres", 153 153 &at91rm9200_master_layout, 154 154 &rm9200_mck_characteristics, 155 - &rm9200_mck_lock, CLK_SET_RATE_GATE); 155 + &rm9200_mck_lock, CLK_SET_RATE_GATE, 0); 156 156 if (IS_ERR(hw)) 157 157 goto err_free; 158 158
+1 -1
drivers/clk/at91/at91sam9260.c
··· 429 429 &at91rm9200_master_layout, 430 430 data->mck_characteristics, 431 431 &at91sam9260_mck_lock, 432 - CLK_SET_RATE_GATE); 432 + CLK_SET_RATE_GATE, 0); 433 433 if (IS_ERR(hw)) 434 434 goto err_free; 435 435
+1 -1
drivers/clk/at91/at91sam9g45.c
··· 164 164 &at91rm9200_master_layout, 165 165 &mck_characteristics, 166 166 &at91sam9g45_mck_lock, 167 - CLK_SET_RATE_GATE); 167 + CLK_SET_RATE_GATE, 0); 168 168 if (IS_ERR(hw)) 169 169 goto err_free; 170 170
+1 -1
drivers/clk/at91/at91sam9n12.c
··· 191 191 &at91sam9x5_master_layout, 192 192 &mck_characteristics, 193 193 &at91sam9n12_mck_lock, 194 - CLK_SET_RATE_GATE); 194 + CLK_SET_RATE_GATE, 0); 195 195 if (IS_ERR(hw)) 196 196 goto err_free; 197 197
+1 -1
drivers/clk/at91/at91sam9rl.c
··· 132 132 "masterck_pres", 133 133 &at91rm9200_master_layout, 134 134 &sam9rl_mck_characteristics, 135 - &sam9rl_mck_lock, CLK_SET_RATE_GATE); 135 + &sam9rl_mck_lock, CLK_SET_RATE_GATE, 0); 136 136 if (IS_ERR(hw)) 137 137 goto err_free; 138 138
+1 -1
drivers/clk/at91/at91sam9x5.c
··· 210 210 "masterck_pres", 211 211 &at91sam9x5_master_layout, 212 212 &mck_characteristics, &mck_lock, 213 - CLK_SET_RATE_GATE); 213 + CLK_SET_RATE_GATE, 0); 214 214 if (IS_ERR(hw)) 215 215 goto err_free; 216 216
+37 -9
drivers/clk/at91/clk-generated.c
··· 27 27 u32 id; 28 28 u32 gckdiv; 29 29 const struct clk_pcr_layout *layout; 30 + struct at91_clk_pms pms; 30 31 u8 parent_id; 31 32 int chg_pid; 32 33 }; ··· 35 34 #define to_clk_generated(hw) \ 36 35 container_of(hw, struct clk_generated, hw) 37 36 38 - static int clk_generated_enable(struct clk_hw *hw) 37 + static int clk_generated_set(struct clk_generated *gck, int status) 39 38 { 40 - struct clk_generated *gck = to_clk_generated(hw); 41 39 unsigned long flags; 42 - 43 - pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", 44 - __func__, gck->gckdiv, gck->parent_id); 40 + unsigned int enable = status ? AT91_PMC_PCR_GCKEN : 0; 45 41 46 42 spin_lock_irqsave(gck->lock, flags); 47 43 regmap_write(gck->regmap, gck->layout->offset, 48 44 (gck->id & gck->layout->pid_mask)); 49 45 regmap_update_bits(gck->regmap, gck->layout->offset, 50 46 AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask | 51 - gck->layout->cmd | AT91_PMC_PCR_GCKEN, 47 + gck->layout->cmd | enable, 52 48 field_prep(gck->layout->gckcss_mask, gck->parent_id) | 53 49 gck->layout->cmd | 54 50 FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) | 55 - AT91_PMC_PCR_GCKEN); 51 + enable); 56 52 spin_unlock_irqrestore(gck->lock, flags); 53 + 54 + return 0; 55 + } 56 + 57 + static int clk_generated_enable(struct clk_hw *hw) 58 + { 59 + struct clk_generated *gck = to_clk_generated(hw); 60 + 61 + pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", 62 + __func__, gck->gckdiv, gck->parent_id); 63 + 64 + clk_generated_set(gck, 1); 65 + 57 66 return 0; 58 67 } 59 68 ··· 256 245 return 0; 257 246 } 258 247 248 + static int clk_generated_save_context(struct clk_hw *hw) 249 + { 250 + struct clk_generated *gck = to_clk_generated(hw); 251 + 252 + gck->pms.status = clk_generated_is_enabled(&gck->hw); 253 + 254 + return 0; 255 + } 256 + 257 + static void clk_generated_restore_context(struct clk_hw *hw) 258 + { 259 + struct clk_generated *gck = to_clk_generated(hw); 260 + 261 + if (gck->pms.status) 262 + clk_generated_set(gck, gck->pms.status); 263 + } 264 + 259 265 static const struct clk_ops generated_ops = { 260 266 .enable = clk_generated_enable, 261 267 .disable = clk_generated_disable, ··· 282 254 .get_parent = clk_generated_get_parent, 283 255 .set_parent = clk_generated_set_parent, 284 256 .set_rate = clk_generated_set_rate, 257 + .save_context = clk_generated_save_context, 258 + .restore_context = clk_generated_restore_context, 285 259 }; 286 260 287 261 /** ··· 350 320 if (ret) { 351 321 kfree(gck); 352 322 hw = ERR_PTR(ret); 353 - } else { 354 - pmc_register_id(id); 355 323 } 356 324 357 325 return hw;
+66
drivers/clk/at91/clk-main.c
··· 28 28 struct clk_main_osc { 29 29 struct clk_hw hw; 30 30 struct regmap *regmap; 31 + struct at91_clk_pms pms; 31 32 }; 32 33 33 34 #define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw) ··· 38 37 struct regmap *regmap; 39 38 unsigned long frequency; 40 39 unsigned long accuracy; 40 + struct at91_clk_pms pms; 41 41 }; 42 42 43 43 #define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw) ··· 53 51 struct clk_sam9x5_main { 54 52 struct clk_hw hw; 55 53 struct regmap *regmap; 54 + struct at91_clk_pms pms; 56 55 u8 parent; 57 56 }; 58 57 ··· 123 120 return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp); 124 121 } 125 122 123 + static int clk_main_osc_save_context(struct clk_hw *hw) 124 + { 125 + struct clk_main_osc *osc = to_clk_main_osc(hw); 126 + 127 + osc->pms.status = clk_main_osc_is_prepared(hw); 128 + 129 + return 0; 130 + } 131 + 132 + static void clk_main_osc_restore_context(struct clk_hw *hw) 133 + { 134 + struct clk_main_osc *osc = to_clk_main_osc(hw); 135 + 136 + if (osc->pms.status) 137 + clk_main_osc_prepare(hw); 138 + } 139 + 126 140 static const struct clk_ops main_osc_ops = { 127 141 .prepare = clk_main_osc_prepare, 128 142 .unprepare = clk_main_osc_unprepare, 129 143 .is_prepared = clk_main_osc_is_prepared, 144 + .save_context = clk_main_osc_save_context, 145 + .restore_context = clk_main_osc_restore_context, 130 146 }; 131 147 132 148 struct clk_hw * __init ··· 262 240 return osc->accuracy; 263 241 } 264 242 243 + static int clk_main_rc_osc_save_context(struct clk_hw *hw) 244 + { 245 + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); 246 + 247 + osc->pms.status = clk_main_rc_osc_is_prepared(hw); 248 + 249 + return 0; 250 + } 251 + 252 + static void clk_main_rc_osc_restore_context(struct clk_hw *hw) 253 + { 254 + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); 255 + 256 + if (osc->pms.status) 257 + clk_main_rc_osc_prepare(hw); 258 + } 259 + 265 260 static const struct clk_ops main_rc_osc_ops = { 266 261 .prepare = clk_main_rc_osc_prepare, 267 262 .unprepare = clk_main_rc_osc_unprepare, 268 263 .is_prepared = clk_main_rc_osc_is_prepared, 269 264 .recalc_rate = clk_main_rc_osc_recalc_rate, 270 265 .recalc_accuracy = clk_main_rc_osc_recalc_accuracy, 266 + .save_context = clk_main_rc_osc_save_context, 267 + .restore_context = clk_main_rc_osc_restore_context, 271 268 }; 272 269 273 270 struct clk_hw * __init ··· 506 465 return clk_main_parent_select(status); 507 466 } 508 467 468 + static int clk_sam9x5_main_save_context(struct clk_hw *hw) 469 + { 470 + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); 471 + 472 + clkmain->pms.status = clk_main_rc_osc_is_prepared(&clkmain->hw); 473 + clkmain->pms.parent = clk_sam9x5_main_get_parent(&clkmain->hw); 474 + 475 + return 0; 476 + } 477 + 478 + static void clk_sam9x5_main_restore_context(struct clk_hw *hw) 479 + { 480 + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); 481 + int ret; 482 + 483 + ret = clk_sam9x5_main_set_parent(hw, clkmain->pms.parent); 484 + if (ret) 485 + return; 486 + 487 + if (clkmain->pms.status) 488 + clk_sam9x5_main_prepare(hw); 489 + } 490 + 509 491 static const struct clk_ops sam9x5_main_ops = { 510 492 .prepare = clk_sam9x5_main_prepare, 511 493 .is_prepared = clk_sam9x5_main_is_prepared, 512 494 .recalc_rate = clk_sam9x5_main_recalc_rate, 513 495 .set_parent = clk_sam9x5_main_set_parent, 514 496 .get_parent = clk_sam9x5_main_get_parent, 497 + .save_context = clk_sam9x5_main_save_context, 498 + .restore_context = clk_sam9x5_main_restore_context, 515 499 }; 516 500 517 501 struct clk_hw * __init
+377 -104
drivers/clk/at91/clk-master.c
··· 5 5 6 6 #include <linux/clk-provider.h> 7 7 #include <linux/clkdev.h> 8 + #include <linux/clk.h> 8 9 #include <linux/clk/at91_pmc.h> 9 10 #include <linux/of.h> 10 11 #include <linux/mfd/syscon.h> ··· 18 17 #define MASTER_DIV_SHIFT 8 19 18 #define MASTER_DIV_MASK 0x7 20 19 21 - #define PMC_MCR 0x30 22 - #define PMC_MCR_ID_MSK GENMASK(3, 0) 23 - #define PMC_MCR_CMD BIT(7) 24 - #define PMC_MCR_DIV GENMASK(10, 8) 25 - #define PMC_MCR_CSS GENMASK(20, 16) 26 20 #define PMC_MCR_CSS_SHIFT (16) 27 - #define PMC_MCR_EN BIT(28) 28 - 29 - #define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK) 30 21 31 22 #define MASTER_MAX_ID 4 32 23 ··· 30 37 spinlock_t *lock; 31 38 const struct clk_master_layout *layout; 32 39 const struct clk_master_characteristics *characteristics; 40 + struct at91_clk_pms pms; 33 41 u32 *mux_table; 34 42 u32 mckr; 35 43 int chg_pid; 36 44 u8 id; 37 45 u8 parent; 38 46 u8 div; 47 + u32 safe_div; 39 48 }; 49 + 50 + /* MCK div reference to be used by notifier. */ 51 + static struct clk_master *master_div; 40 52 41 53 static inline bool clk_master_ready(struct clk_master *master) 42 54 { ··· 110 112 return rate; 111 113 } 112 114 113 - static const struct clk_ops master_div_ops = { 114 - .prepare = clk_master_prepare, 115 - .is_prepared = clk_master_is_prepared, 116 - .recalc_rate = clk_master_div_recalc_rate, 117 - }; 118 - 119 - static int clk_master_div_set_rate(struct clk_hw *hw, unsigned long rate, 120 - unsigned long parent_rate) 115 + static int clk_master_div_save_context(struct clk_hw *hw) 121 116 { 122 117 struct clk_master *master = to_clk_master(hw); 123 - const struct clk_master_characteristics *characteristics = 124 - master->characteristics; 118 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 125 119 unsigned long flags; 126 - int div, i; 127 - 128 - div = DIV_ROUND_CLOSEST(parent_rate, rate); 129 - if (div > ARRAY_SIZE(characteristics->divisors)) 130 - return -EINVAL; 131 - 132 - for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) { 133 - if (!characteristics->divisors[i]) 134 - break; 135 - 136 - if (div == characteristics->divisors[i]) { 137 - div = i; 138 - break; 139 - } 140 - } 141 - 142 - if (i == ARRAY_SIZE(characteristics->divisors)) 143 - return -EINVAL; 120 + unsigned int mckr, div; 144 121 145 122 spin_lock_irqsave(master->lock, flags); 146 - regmap_update_bits(master->regmap, master->layout->offset, 147 - (MASTER_DIV_MASK << MASTER_DIV_SHIFT), 148 - (div << MASTER_DIV_SHIFT)); 149 - while (!clk_master_ready(master)) 150 - cpu_relax(); 123 + regmap_read(master->regmap, master->layout->offset, &mckr); 151 124 spin_unlock_irqrestore(master->lock, flags); 125 + 126 + mckr &= master->layout->mask; 127 + div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 128 + div = master->characteristics->divisors[div]; 129 + 130 + master->pms.parent_rate = clk_hw_get_rate(parent_hw); 131 + master->pms.rate = DIV_ROUND_CLOSEST(master->pms.parent_rate, div); 152 132 153 133 return 0; 154 134 } 155 135 156 - static int clk_master_div_determine_rate(struct clk_hw *hw, 157 - struct clk_rate_request *req) 136 + static void clk_master_div_restore_context(struct clk_hw *hw) 158 137 { 159 138 struct clk_master *master = to_clk_master(hw); 139 + unsigned long flags; 140 + unsigned int mckr; 141 + u8 div; 142 + 143 + spin_lock_irqsave(master->lock, flags); 144 + regmap_read(master->regmap, master->layout->offset, &mckr); 145 + spin_unlock_irqrestore(master->lock, flags); 146 + 147 + mckr &= master->layout->mask; 148 + div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 149 + div = master->characteristics->divisors[div]; 150 + 151 + if (div != DIV_ROUND_CLOSEST(master->pms.parent_rate, master->pms.rate)) 152 + pr_warn("MCKR DIV not configured properly by firmware!\n"); 153 + } 154 + 155 + static const struct clk_ops master_div_ops = { 156 + .prepare = clk_master_prepare, 157 + .is_prepared = clk_master_is_prepared, 158 + .recalc_rate = clk_master_div_recalc_rate, 159 + .save_context = clk_master_div_save_context, 160 + .restore_context = clk_master_div_restore_context, 161 + }; 162 + 163 + /* This function must be called with lock acquired. */ 164 + static int clk_master_div_set(struct clk_master *master, 165 + unsigned long parent_rate, int div) 166 + { 160 167 const struct clk_master_characteristics *characteristics = 161 168 master->characteristics; 162 - struct clk_hw *parent; 163 - unsigned long parent_rate, tmp_rate, best_rate = 0; 164 - int i, best_diff = INT_MIN, tmp_diff; 165 - 166 - parent = clk_hw_get_parent(hw); 167 - if (!parent) 168 - return -EINVAL; 169 - 170 - parent_rate = clk_hw_get_rate(parent); 171 - if (!parent_rate) 172 - return -EINVAL; 169 + unsigned long rate = parent_rate; 170 + unsigned int max_div = 0, div_index = 0, max_div_index = 0; 171 + unsigned int i, mckr, tmp; 172 + int ret; 173 173 174 174 for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) { 175 175 if (!characteristics->divisors[i]) 176 176 break; 177 177 178 - tmp_rate = DIV_ROUND_CLOSEST_ULL(parent_rate, 179 - characteristics->divisors[i]); 180 - tmp_diff = abs(tmp_rate - req->rate); 178 + if (div == characteristics->divisors[i]) 179 + div_index = i; 181 180 182 - if (!best_rate || best_diff > tmp_diff) { 183 - best_diff = tmp_diff; 184 - best_rate = tmp_rate; 181 + if (max_div < characteristics->divisors[i]) { 182 + max_div = characteristics->divisors[i]; 183 + max_div_index = i; 185 184 } 186 - 187 - if (!best_diff) 188 - break; 189 185 } 190 186 191 - req->best_parent_rate = best_rate; 192 - req->best_parent_hw = parent; 193 - req->rate = best_rate; 187 + if (div > max_div) 188 + div_index = max_div_index; 189 + 190 + ret = regmap_read(master->regmap, master->layout->offset, &mckr); 191 + if (ret) 192 + return ret; 193 + 194 + mckr &= master->layout->mask; 195 + tmp = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 196 + if (tmp == div_index) 197 + return 0; 198 + 199 + rate /= characteristics->divisors[div_index]; 200 + if (rate < characteristics->output.min) 201 + pr_warn("master clk div is underclocked"); 202 + else if (rate > characteristics->output.max) 203 + pr_warn("master clk div is overclocked"); 204 + 205 + mckr &= ~(MASTER_DIV_MASK << MASTER_DIV_SHIFT); 206 + mckr |= (div_index << MASTER_DIV_SHIFT); 207 + ret = regmap_write(master->regmap, master->layout->offset, mckr); 208 + if (ret) 209 + return ret; 210 + 211 + while (!clk_master_ready(master)) 212 + cpu_relax(); 213 + 214 + master->div = characteristics->divisors[div_index]; 194 215 195 216 return 0; 217 + } 218 + 219 + static unsigned long clk_master_div_recalc_rate_chg(struct clk_hw *hw, 220 + unsigned long parent_rate) 221 + { 222 + struct clk_master *master = to_clk_master(hw); 223 + 224 + return DIV_ROUND_CLOSEST_ULL(parent_rate, master->div); 225 + } 226 + 227 + static void clk_master_div_restore_context_chg(struct clk_hw *hw) 228 + { 229 + struct clk_master *master = to_clk_master(hw); 230 + unsigned long flags; 231 + int ret; 232 + 233 + spin_lock_irqsave(master->lock, flags); 234 + ret = clk_master_div_set(master, master->pms.parent_rate, 235 + DIV_ROUND_CLOSEST(master->pms.parent_rate, 236 + master->pms.rate)); 237 + spin_unlock_irqrestore(master->lock, flags); 238 + if (ret) 239 + pr_warn("Failed to restore MCK DIV clock\n"); 196 240 } 197 241 198 242 static const struct clk_ops master_div_ops_chg = { 199 243 .prepare = clk_master_prepare, 200 244 .is_prepared = clk_master_is_prepared, 201 - .recalc_rate = clk_master_div_recalc_rate, 202 - .determine_rate = clk_master_div_determine_rate, 203 - .set_rate = clk_master_div_set_rate, 245 + .recalc_rate = clk_master_div_recalc_rate_chg, 246 + .save_context = clk_master_div_save_context, 247 + .restore_context = clk_master_div_restore_context_chg, 248 + }; 249 + 250 + static int clk_master_div_notifier_fn(struct notifier_block *notifier, 251 + unsigned long code, void *data) 252 + { 253 + const struct clk_master_characteristics *characteristics = 254 + master_div->characteristics; 255 + struct clk_notifier_data *cnd = data; 256 + unsigned long flags, new_parent_rate, new_rate; 257 + unsigned int mckr, div, new_div = 0; 258 + int ret, i; 259 + long tmp_diff; 260 + long best_diff = -1; 261 + 262 + spin_lock_irqsave(master_div->lock, flags); 263 + switch (code) { 264 + case PRE_RATE_CHANGE: 265 + /* 266 + * We want to avoid any overclocking of MCK DIV domain. To do 267 + * this we set a safe divider (the underclocking is not of 268 + * interest as we can go as low as 32KHz). The relation 269 + * b/w this clock and its parents are as follows: 270 + * 271 + * FRAC PLL -> DIV PLL -> MCK DIV 272 + * 273 + * With the proper safe divider we should be good even with FRAC 274 + * PLL at its maximum value. 275 + */ 276 + ret = regmap_read(master_div->regmap, master_div->layout->offset, 277 + &mckr); 278 + if (ret) { 279 + ret = NOTIFY_STOP_MASK; 280 + goto unlock; 281 + } 282 + 283 + mckr &= master_div->layout->mask; 284 + div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 285 + 286 + /* Switch to safe divider. */ 287 + clk_master_div_set(master_div, 288 + cnd->old_rate * characteristics->divisors[div], 289 + master_div->safe_div); 290 + break; 291 + 292 + case POST_RATE_CHANGE: 293 + /* 294 + * At this point we want to restore MCK DIV domain to its maximum 295 + * allowed rate. 296 + */ 297 + ret = regmap_read(master_div->regmap, master_div->layout->offset, 298 + &mckr); 299 + if (ret) { 300 + ret = NOTIFY_STOP_MASK; 301 + goto unlock; 302 + } 303 + 304 + mckr &= master_div->layout->mask; 305 + div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 306 + new_parent_rate = cnd->new_rate * characteristics->divisors[div]; 307 + 308 + for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) { 309 + if (!characteristics->divisors[i]) 310 + break; 311 + 312 + new_rate = DIV_ROUND_CLOSEST_ULL(new_parent_rate, 313 + characteristics->divisors[i]); 314 + 315 + tmp_diff = characteristics->output.max - new_rate; 316 + if (tmp_diff < 0) 317 + continue; 318 + 319 + if (best_diff < 0 || best_diff > tmp_diff) { 320 + new_div = characteristics->divisors[i]; 321 + best_diff = tmp_diff; 322 + } 323 + 324 + if (!tmp_diff) 325 + break; 326 + } 327 + 328 + if (!new_div) { 329 + ret = NOTIFY_STOP_MASK; 330 + goto unlock; 331 + } 332 + 333 + /* Update the div to preserve MCK DIV clock rate. */ 334 + clk_master_div_set(master_div, new_parent_rate, 335 + new_div); 336 + 337 + ret = NOTIFY_OK; 338 + break; 339 + 340 + default: 341 + ret = NOTIFY_DONE; 342 + break; 343 + } 344 + 345 + unlock: 346 + spin_unlock_irqrestore(master_div->lock, flags); 347 + 348 + return ret; 349 + } 350 + 351 + static struct notifier_block clk_master_div_notifier = { 352 + .notifier_call = clk_master_div_notifier_fn, 204 353 }; 205 354 206 355 static void clk_sama7g5_master_best_diff(struct clk_rate_request *req, ··· 417 272 { 418 273 struct clk_master *master = to_clk_master(hw); 419 274 unsigned long flags; 420 - unsigned int pres; 275 + unsigned int pres, mckr, tmp; 276 + int ret; 421 277 422 278 pres = DIV_ROUND_CLOSEST(parent_rate, rate); 423 279 if (pres > MASTER_PRES_MAX) ··· 426 280 427 281 else if (pres == 3) 428 282 pres = MASTER_PRES_MAX; 429 - else 283 + else if (pres) 430 284 pres = ffs(pres) - 1; 431 285 432 286 spin_lock_irqsave(master->lock, flags); 433 - regmap_update_bits(master->regmap, master->layout->offset, 434 - (MASTER_PRES_MASK << master->layout->pres_shift), 435 - (pres << master->layout->pres_shift)); 287 + ret = regmap_read(master->regmap, master->layout->offset, &mckr); 288 + if (ret) 289 + goto unlock; 290 + 291 + mckr &= master->layout->mask; 292 + tmp = (mckr >> master->layout->pres_shift) & MASTER_PRES_MASK; 293 + if (pres == tmp) 294 + goto unlock; 295 + 296 + mckr &= ~(MASTER_PRES_MASK << master->layout->pres_shift); 297 + mckr |= (pres << master->layout->pres_shift); 298 + ret = regmap_write(master->regmap, master->layout->offset, mckr); 299 + if (ret) 300 + goto unlock; 436 301 437 302 while (!clk_master_ready(master)) 438 303 cpu_relax(); 304 + unlock: 439 305 spin_unlock_irqrestore(master->lock, flags); 440 306 441 - return 0; 307 + return ret; 442 308 } 443 309 444 310 static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw, ··· 466 308 regmap_read(master->regmap, master->layout->offset, &val); 467 309 spin_unlock_irqrestore(master->lock, flags); 468 310 311 + val &= master->layout->mask; 469 312 pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK; 470 - if (pres == 3 && characteristics->have_div3_pres) 313 + if (pres == MASTER_PRES_MAX && characteristics->have_div3_pres) 471 314 pres = 3; 472 315 else 473 316 pres = (1 << pres); ··· 486 327 regmap_read(master->regmap, master->layout->offset, &mckr); 487 328 spin_unlock_irqrestore(master->lock, flags); 488 329 330 + mckr &= master->layout->mask; 331 + 489 332 return mckr & AT91_PMC_CSS; 333 + } 334 + 335 + static int clk_master_pres_save_context(struct clk_hw *hw) 336 + { 337 + struct clk_master *master = to_clk_master(hw); 338 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 339 + unsigned long flags; 340 + unsigned int val, pres; 341 + 342 + spin_lock_irqsave(master->lock, flags); 343 + regmap_read(master->regmap, master->layout->offset, &val); 344 + spin_unlock_irqrestore(master->lock, flags); 345 + 346 + val &= master->layout->mask; 347 + pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK; 348 + if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres) 349 + pres = 3; 350 + else 351 + pres = (1 << pres); 352 + 353 + master->pms.parent = val & AT91_PMC_CSS; 354 + master->pms.parent_rate = clk_hw_get_rate(parent_hw); 355 + master->pms.rate = DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres); 356 + 357 + return 0; 358 + } 359 + 360 + static void clk_master_pres_restore_context(struct clk_hw *hw) 361 + { 362 + struct clk_master *master = to_clk_master(hw); 363 + unsigned long flags; 364 + unsigned int val, pres; 365 + 366 + spin_lock_irqsave(master->lock, flags); 367 + regmap_read(master->regmap, master->layout->offset, &val); 368 + spin_unlock_irqrestore(master->lock, flags); 369 + 370 + val &= master->layout->mask; 371 + pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK; 372 + if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres) 373 + pres = 3; 374 + else 375 + pres = (1 << pres); 376 + 377 + if (master->pms.rate != 378 + DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres) || 379 + (master->pms.parent != (val & AT91_PMC_CSS))) 380 + pr_warn("MCKR PRES was not configured properly by firmware!\n"); 381 + } 382 + 383 + static void clk_master_pres_restore_context_chg(struct clk_hw *hw) 384 + { 385 + struct clk_master *master = to_clk_master(hw); 386 + 387 + clk_master_pres_set_rate(hw, master->pms.rate, master->pms.parent_rate); 490 388 } 491 389 492 390 static const struct clk_ops master_pres_ops = { ··· 551 335 .is_prepared = clk_master_is_prepared, 552 336 .recalc_rate = clk_master_pres_recalc_rate, 553 337 .get_parent = clk_master_pres_get_parent, 338 + .save_context = clk_master_pres_save_context, 339 + .restore_context = clk_master_pres_restore_context, 554 340 }; 555 341 556 342 static const struct clk_ops master_pres_ops_chg = { ··· 562 344 .recalc_rate = clk_master_pres_recalc_rate, 563 345 .get_parent = clk_master_pres_get_parent, 564 346 .set_rate = clk_master_pres_set_rate, 347 + .save_context = clk_master_pres_save_context, 348 + .restore_context = clk_master_pres_restore_context_chg, 565 349 }; 566 350 567 351 static struct clk_hw * __init ··· 578 358 struct clk_master *master; 579 359 struct clk_init_data init; 580 360 struct clk_hw *hw; 361 + unsigned int mckr; 362 + unsigned long irqflags; 581 363 int ret; 582 364 583 365 if (!name || !num_parents || !parent_names || !lock) ··· 601 379 master->regmap = regmap; 602 380 master->chg_pid = chg_pid; 603 381 master->lock = lock; 382 + 383 + if (ops == &master_div_ops_chg) { 384 + spin_lock_irqsave(master->lock, irqflags); 385 + regmap_read(master->regmap, master->layout->offset, &mckr); 386 + spin_unlock_irqrestore(master->lock, irqflags); 387 + 388 + mckr &= layout->mask; 389 + mckr = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 390 + master->div = characteristics->divisors[mckr]; 391 + } 604 392 605 393 hw = &master->hw; 606 394 ret = clk_hw_register(NULL, &master->hw); ··· 648 416 const char *name, const char *parent_name, 649 417 const struct clk_master_layout *layout, 650 418 const struct clk_master_characteristics *characteristics, 651 - spinlock_t *lock, u32 flags) 419 + spinlock_t *lock, u32 flags, u32 safe_div) 652 420 { 653 421 const struct clk_ops *ops; 422 + struct clk_hw *hw; 654 423 655 424 if (flags & CLK_SET_RATE_GATE) 656 425 ops = &master_div_ops; 657 426 else 658 427 ops = &master_div_ops_chg; 659 428 660 - return at91_clk_register_master_internal(regmap, name, 1, 661 - &parent_name, layout, 662 - characteristics, ops, 663 - lock, flags, -EINVAL); 429 + hw = at91_clk_register_master_internal(regmap, name, 1, 430 + &parent_name, layout, 431 + characteristics, ops, 432 + lock, flags, -EINVAL); 433 + 434 + if (!IS_ERR(hw) && safe_div) { 435 + master_div = to_clk_master(hw); 436 + master_div->safe_div = safe_div; 437 + clk_notifier_register(hw->clk, 438 + &clk_master_div_notifier); 439 + } 440 + 441 + return hw; 664 442 } 665 443 666 444 static unsigned long ··· 781 539 return 0; 782 540 } 783 541 784 - static int clk_sama7g5_master_enable(struct clk_hw *hw) 542 + static void clk_sama7g5_master_set(struct clk_master *master, 543 + unsigned int status) 785 544 { 786 - struct clk_master *master = to_clk_master(hw); 787 545 unsigned long flags; 788 546 unsigned int val, cparent; 547 + unsigned int enable = status ? AT91_PMC_MCR_V2_EN : 0; 548 + unsigned int parent = master->parent << PMC_MCR_CSS_SHIFT; 549 + unsigned int div = master->div << MASTER_DIV_SHIFT; 789 550 790 551 spin_lock_irqsave(master->lock, flags); 791 552 792 - regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id)); 793 - regmap_read(master->regmap, PMC_MCR, &val); 794 - regmap_update_bits(master->regmap, PMC_MCR, 795 - PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV | 796 - PMC_MCR_CMD | PMC_MCR_ID_MSK, 797 - PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) | 798 - (master->div << MASTER_DIV_SHIFT) | 799 - PMC_MCR_CMD | PMC_MCR_ID(master->id)); 553 + regmap_write(master->regmap, AT91_PMC_MCR_V2, 554 + AT91_PMC_MCR_V2_ID(master->id)); 555 + regmap_read(master->regmap, AT91_PMC_MCR_V2, &val); 556 + regmap_update_bits(master->regmap, AT91_PMC_MCR_V2, 557 + enable | AT91_PMC_MCR_V2_CSS | AT91_PMC_MCR_V2_DIV | 558 + AT91_PMC_MCR_V2_CMD | AT91_PMC_MCR_V2_ID_MSK, 559 + enable | parent | div | AT91_PMC_MCR_V2_CMD | 560 + AT91_PMC_MCR_V2_ID(master->id)); 800 561 801 - cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; 562 + cparent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT; 802 563 803 564 /* Wait here only if parent is being changed. */ 804 565 while ((cparent != master->parent) && !clk_master_ready(master)) 805 566 cpu_relax(); 806 567 807 568 spin_unlock_irqrestore(master->lock, flags); 569 + } 570 + 571 + static int clk_sama7g5_master_enable(struct clk_hw *hw) 572 + { 573 + struct clk_master *master = to_clk_master(hw); 574 + 575 + clk_sama7g5_master_set(master, 1); 808 576 809 577 return 0; 810 578 } ··· 826 574 827 575 spin_lock_irqsave(master->lock, flags); 828 576 829 - regmap_write(master->regmap, PMC_MCR, master->id); 830 - regmap_update_bits(master->regmap, PMC_MCR, 831 - PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK, 832 - PMC_MCR_CMD | PMC_MCR_ID(master->id)); 577 + regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id); 578 + regmap_update_bits(master->regmap, AT91_PMC_MCR_V2, 579 + AT91_PMC_MCR_V2_EN | AT91_PMC_MCR_V2_CMD | 580 + AT91_PMC_MCR_V2_ID_MSK, 581 + AT91_PMC_MCR_V2_CMD | 582 + AT91_PMC_MCR_V2_ID(master->id)); 833 583 834 584 spin_unlock_irqrestore(master->lock, flags); 835 585 } ··· 844 590 845 591 spin_lock_irqsave(master->lock, flags); 846 592 847 - regmap_write(master->regmap, PMC_MCR, master->id); 848 - regmap_read(master->regmap, PMC_MCR, &val); 593 + regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id); 594 + regmap_read(master->regmap, AT91_PMC_MCR_V2, &val); 849 595 850 596 spin_unlock_irqrestore(master->lock, flags); 851 597 852 - return !!(val & PMC_MCR_EN); 598 + return !!(val & AT91_PMC_MCR_V2_EN); 853 599 } 854 600 855 601 static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate, ··· 864 610 865 611 if (div == 3) 866 612 div = MASTER_PRES_MAX; 867 - else 613 + else if (div) 868 614 div = ffs(div) - 1; 869 615 870 616 spin_lock_irqsave(master->lock, flags); ··· 872 618 spin_unlock_irqrestore(master->lock, flags); 873 619 874 620 return 0; 621 + } 622 + 623 + static int clk_sama7g5_master_save_context(struct clk_hw *hw) 624 + { 625 + struct clk_master *master = to_clk_master(hw); 626 + 627 + master->pms.status = clk_sama7g5_master_is_enabled(hw); 628 + 629 + return 0; 630 + } 631 + 632 + static void clk_sama7g5_master_restore_context(struct clk_hw *hw) 633 + { 634 + struct clk_master *master = to_clk_master(hw); 635 + 636 + if (master->pms.status) 637 + clk_sama7g5_master_set(master, master->pms.status); 875 638 } 876 639 877 640 static const struct clk_ops sama7g5_master_ops = { ··· 900 629 .set_rate = clk_sama7g5_master_set_rate, 901 630 .get_parent = clk_sama7g5_master_get_parent, 902 631 .set_parent = clk_sama7g5_master_set_parent, 632 + .save_context = clk_sama7g5_master_save_context, 633 + .restore_context = clk_sama7g5_master_restore_context, 903 634 }; 904 635 905 636 struct clk_hw * __init ··· 945 672 master->mux_table = mux_table; 946 673 947 674 spin_lock_irqsave(master->lock, flags); 948 - regmap_write(master->regmap, PMC_MCR, master->id); 949 - regmap_read(master->regmap, PMC_MCR, &val); 950 - master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; 951 - master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT; 675 + regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id); 676 + regmap_read(master->regmap, AT91_PMC_MCR_V2, &val); 677 + master->parent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT; 678 + master->div = (val & AT91_PMC_MCR_V2_DIV) >> MASTER_DIV_SHIFT; 952 679 spin_unlock_irqrestore(master->lock, flags); 953 680 954 681 hw = &master->hw;
+34 -6
drivers/clk/at91/clk-peripheral.c
··· 37 37 u32 id; 38 38 u32 div; 39 39 const struct clk_pcr_layout *layout; 40 + struct at91_clk_pms pms; 40 41 bool auto_div; 41 42 int chg_pid; 42 43 }; ··· 156 155 periph->div = shift; 157 156 } 158 157 159 - static int clk_sam9x5_peripheral_enable(struct clk_hw *hw) 158 + static int clk_sam9x5_peripheral_set(struct clk_sam9x5_peripheral *periph, 159 + unsigned int status) 160 160 { 161 - struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 162 161 unsigned long flags; 162 + unsigned int enable = status ? AT91_PMC_PCR_EN : 0; 163 163 164 164 if (periph->id < PERIPHERAL_ID_MIN) 165 165 return 0; ··· 170 168 (periph->id & periph->layout->pid_mask)); 171 169 regmap_update_bits(periph->regmap, periph->layout->offset, 172 170 periph->layout->div_mask | periph->layout->cmd | 173 - AT91_PMC_PCR_EN, 171 + enable, 174 172 field_prep(periph->layout->div_mask, periph->div) | 175 - periph->layout->cmd | 176 - AT91_PMC_PCR_EN); 173 + periph->layout->cmd | enable); 177 174 spin_unlock_irqrestore(periph->lock, flags); 178 175 179 176 return 0; 177 + } 178 + 179 + static int clk_sam9x5_peripheral_enable(struct clk_hw *hw) 180 + { 181 + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 182 + 183 + return clk_sam9x5_peripheral_set(periph, 1); 180 184 } 181 185 182 186 static void clk_sam9x5_peripheral_disable(struct clk_hw *hw) ··· 401 393 return -EINVAL; 402 394 } 403 395 396 + static int clk_sam9x5_peripheral_save_context(struct clk_hw *hw) 397 + { 398 + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 399 + 400 + periph->pms.status = clk_sam9x5_peripheral_is_enabled(hw); 401 + 402 + return 0; 403 + } 404 + 405 + static void clk_sam9x5_peripheral_restore_context(struct clk_hw *hw) 406 + { 407 + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 408 + 409 + if (periph->pms.status) 410 + clk_sam9x5_peripheral_set(periph, periph->pms.status); 411 + } 412 + 404 413 static const struct clk_ops sam9x5_peripheral_ops = { 405 414 .enable = clk_sam9x5_peripheral_enable, 406 415 .disable = clk_sam9x5_peripheral_disable, ··· 425 400 .recalc_rate = clk_sam9x5_peripheral_recalc_rate, 426 401 .round_rate = clk_sam9x5_peripheral_round_rate, 427 402 .set_rate = clk_sam9x5_peripheral_set_rate, 403 + .save_context = clk_sam9x5_peripheral_save_context, 404 + .restore_context = clk_sam9x5_peripheral_restore_context, 428 405 }; 429 406 430 407 static const struct clk_ops sam9x5_peripheral_chg_ops = { ··· 436 409 .recalc_rate = clk_sam9x5_peripheral_recalc_rate, 437 410 .determine_rate = clk_sam9x5_peripheral_determine_rate, 438 411 .set_rate = clk_sam9x5_peripheral_set_rate, 412 + .save_context = clk_sam9x5_peripheral_save_context, 413 + .restore_context = clk_sam9x5_peripheral_restore_context, 439 414 }; 440 415 441 416 struct clk_hw * __init ··· 489 460 hw = ERR_PTR(ret); 490 461 } else { 491 462 clk_sam9x5_peripheral_autodiv(periph); 492 - pmc_register_id(id); 493 463 } 494 464 495 465 return hw;
+39
drivers/clk/at91/clk-pll.c
··· 40 40 u16 mul; 41 41 const struct clk_pll_layout *layout; 42 42 const struct clk_pll_characteristics *characteristics; 43 + struct at91_clk_pms pms; 43 44 }; 44 45 45 46 static inline bool clk_pll_ready(struct regmap *regmap, int id) ··· 261 260 return 0; 262 261 } 263 262 263 + static int clk_pll_save_context(struct clk_hw *hw) 264 + { 265 + struct clk_pll *pll = to_clk_pll(hw); 266 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 267 + 268 + pll->pms.parent_rate = clk_hw_get_rate(parent_hw); 269 + pll->pms.rate = clk_pll_recalc_rate(&pll->hw, pll->pms.parent_rate); 270 + pll->pms.status = clk_pll_ready(pll->regmap, PLL_REG(pll->id)); 271 + 272 + return 0; 273 + } 274 + 275 + static void clk_pll_restore_context(struct clk_hw *hw) 276 + { 277 + struct clk_pll *pll = to_clk_pll(hw); 278 + unsigned long calc_rate; 279 + unsigned int pllr, pllr_out, pllr_count; 280 + u8 out = 0; 281 + 282 + if (pll->characteristics->out) 283 + out = pll->characteristics->out[pll->range]; 284 + 285 + regmap_read(pll->regmap, PLL_REG(pll->id), &pllr); 286 + 287 + calc_rate = (pll->pms.parent_rate / PLL_DIV(pllr)) * 288 + (PLL_MUL(pllr, pll->layout) + 1); 289 + pllr_count = (pllr >> PLL_COUNT_SHIFT) & PLL_MAX_COUNT; 290 + pllr_out = (pllr >> PLL_OUT_SHIFT) & out; 291 + 292 + if (pll->pms.rate != calc_rate || 293 + pll->pms.status != clk_pll_ready(pll->regmap, PLL_REG(pll->id)) || 294 + pllr_count != PLL_MAX_COUNT || 295 + (out && pllr_out != out)) 296 + pr_warn("PLLAR was not configured properly by firmware\n"); 297 + } 298 + 264 299 static const struct clk_ops pll_ops = { 265 300 .prepare = clk_pll_prepare, 266 301 .unprepare = clk_pll_unprepare, ··· 304 267 .recalc_rate = clk_pll_recalc_rate, 305 268 .round_rate = clk_pll_round_rate, 306 269 .set_rate = clk_pll_set_rate, 270 + .save_context = clk_pll_save_context, 271 + .restore_context = clk_pll_restore_context, 307 272 }; 308 273 309 274 struct clk_hw * __init
+27 -2
drivers/clk/at91/clk-programmable.c
··· 24 24 u32 *mux_table; 25 25 u8 id; 26 26 const struct clk_programmable_layout *layout; 27 + struct at91_clk_pms pms; 27 28 }; 28 29 29 30 #define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw) ··· 178 177 return 0; 179 178 } 180 179 180 + static int clk_programmable_save_context(struct clk_hw *hw) 181 + { 182 + struct clk_programmable *prog = to_clk_programmable(hw); 183 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 184 + 185 + prog->pms.parent = clk_programmable_get_parent(hw); 186 + prog->pms.parent_rate = clk_hw_get_rate(parent_hw); 187 + prog->pms.rate = clk_programmable_recalc_rate(hw, prog->pms.parent_rate); 188 + 189 + return 0; 190 + } 191 + 192 + static void clk_programmable_restore_context(struct clk_hw *hw) 193 + { 194 + struct clk_programmable *prog = to_clk_programmable(hw); 195 + int ret; 196 + 197 + ret = clk_programmable_set_parent(hw, prog->pms.parent); 198 + if (ret) 199 + return; 200 + 201 + clk_programmable_set_rate(hw, prog->pms.rate, prog->pms.parent_rate); 202 + } 203 + 181 204 static const struct clk_ops programmable_ops = { 182 205 .recalc_rate = clk_programmable_recalc_rate, 183 206 .determine_rate = clk_programmable_determine_rate, 184 207 .get_parent = clk_programmable_get_parent, 185 208 .set_parent = clk_programmable_set_parent, 186 209 .set_rate = clk_programmable_set_rate, 210 + .save_context = clk_programmable_save_context, 211 + .restore_context = clk_programmable_restore_context, 187 212 }; 188 213 189 214 struct clk_hw * __init ··· 248 221 if (ret) { 249 222 kfree(prog); 250 223 hw = ERR_PTR(ret); 251 - } else { 252 - pmc_register_pck(id); 253 224 } 254 225 255 226 return hw;
+145 -29
drivers/clk/at91/clk-sam9x60-pll.c
··· 5 5 */ 6 6 7 7 #include <linux/bitfield.h> 8 + #include <linux/clk.h> 8 9 #include <linux/clk-provider.h> 9 10 #include <linux/clkdev.h> 10 11 #include <linux/clk/at91_pmc.h> ··· 39 38 40 39 struct sam9x60_frac { 41 40 struct sam9x60_pll_core core; 41 + struct at91_clk_pms pms; 42 42 u32 frac; 43 43 u16 mul; 44 44 }; 45 45 46 46 struct sam9x60_div { 47 47 struct sam9x60_pll_core core; 48 + struct at91_clk_pms pms; 48 49 u8 div; 50 + u8 safe_div; 49 51 }; 50 52 51 53 #define to_sam9x60_pll_core(hw) container_of(hw, struct sam9x60_pll_core, hw) 52 54 #define to_sam9x60_frac(core) container_of(core, struct sam9x60_frac, core) 53 55 #define to_sam9x60_div(core) container_of(core, struct sam9x60_div, core) 56 + 57 + static struct sam9x60_div *notifier_div; 54 58 55 59 static inline bool sam9x60_pll_ready(struct regmap *regmap, int id) 56 60 { ··· 77 71 struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 78 72 struct sam9x60_frac *frac = to_sam9x60_frac(core); 79 73 80 - return (parent_rate * (frac->mul + 1) + 81 - ((u64)parent_rate * frac->frac >> 22)); 74 + return parent_rate * (frac->mul + 1) + 75 + DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22)); 82 76 } 83 77 84 - static int sam9x60_frac_pll_prepare(struct clk_hw *hw) 78 + static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core) 85 79 { 86 - struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 87 80 struct sam9x60_frac *frac = to_sam9x60_frac(core); 88 81 struct regmap *regmap = core->regmap; 89 82 unsigned int val, cfrac, cmul; ··· 144 139 spin_unlock_irqrestore(core->lock, flags); 145 140 146 141 return 0; 142 + } 143 + 144 + static int sam9x60_frac_pll_prepare(struct clk_hw *hw) 145 + { 146 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 147 + 148 + return sam9x60_frac_pll_set(core); 147 149 } 148 150 149 151 static void sam9x60_frac_pll_unprepare(struct clk_hw *hw) ··· 292 280 return ret; 293 281 } 294 282 283 + static int sam9x60_frac_pll_save_context(struct clk_hw *hw) 284 + { 285 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 286 + struct sam9x60_frac *frac = to_sam9x60_frac(core); 287 + 288 + frac->pms.status = sam9x60_pll_ready(core->regmap, core->id); 289 + 290 + return 0; 291 + } 292 + 293 + static void sam9x60_frac_pll_restore_context(struct clk_hw *hw) 294 + { 295 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 296 + struct sam9x60_frac *frac = to_sam9x60_frac(core); 297 + 298 + if (frac->pms.status) 299 + sam9x60_frac_pll_set(core); 300 + } 301 + 295 302 static const struct clk_ops sam9x60_frac_pll_ops = { 296 303 .prepare = sam9x60_frac_pll_prepare, 297 304 .unprepare = sam9x60_frac_pll_unprepare, ··· 318 287 .recalc_rate = sam9x60_frac_pll_recalc_rate, 319 288 .round_rate = sam9x60_frac_pll_round_rate, 320 289 .set_rate = sam9x60_frac_pll_set_rate, 290 + .save_context = sam9x60_frac_pll_save_context, 291 + .restore_context = sam9x60_frac_pll_restore_context, 321 292 }; 322 293 323 294 static const struct clk_ops sam9x60_frac_pll_ops_chg = { ··· 329 296 .recalc_rate = sam9x60_frac_pll_recalc_rate, 330 297 .round_rate = sam9x60_frac_pll_round_rate, 331 298 .set_rate = sam9x60_frac_pll_set_rate_chg, 299 + .save_context = sam9x60_frac_pll_save_context, 300 + .restore_context = sam9x60_frac_pll_restore_context, 332 301 }; 333 302 334 - static int sam9x60_div_pll_prepare(struct clk_hw *hw) 303 + /* This function should be called with spinlock acquired. */ 304 + static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div, 305 + bool enable) 335 306 { 336 - struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 307 + struct regmap *regmap = core->regmap; 308 + u32 ena_msk = enable ? core->layout->endiv_mask : 0; 309 + u32 ena_val = enable ? (1 << core->layout->endiv_shift) : 0; 310 + 311 + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, 312 + core->layout->div_mask | ena_msk, 313 + (div << core->layout->div_shift) | ena_val); 314 + 315 + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, 316 + AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, 317 + AT91_PMC_PLL_UPDT_UPDATE | core->id); 318 + 319 + while (!sam9x60_pll_ready(regmap, core->id)) 320 + cpu_relax(); 321 + } 322 + 323 + static int sam9x60_div_pll_set(struct sam9x60_pll_core *core) 324 + { 337 325 struct sam9x60_div *div = to_sam9x60_div(core); 338 326 struct regmap *regmap = core->regmap; 339 327 unsigned long flags; ··· 370 316 if (!!(val & core->layout->endiv_mask) && cdiv == div->div) 371 317 goto unlock; 372 318 373 - regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, 374 - core->layout->div_mask | core->layout->endiv_mask, 375 - (div->div << core->layout->div_shift) | 376 - (1 << core->layout->endiv_shift)); 377 - 378 - regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, 379 - AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, 380 - AT91_PMC_PLL_UPDT_UPDATE | core->id); 381 - 382 - while (!sam9x60_pll_ready(regmap, core->id)) 383 - cpu_relax(); 319 + sam9x60_div_pll_set_div(core, div->div, 1); 384 320 385 321 unlock: 386 322 spin_unlock_irqrestore(core->lock, flags); 387 323 388 324 return 0; 325 + } 326 + 327 + static int sam9x60_div_pll_prepare(struct clk_hw *hw) 328 + { 329 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 330 + 331 + return sam9x60_div_pll_set(core); 389 332 } 390 333 391 334 static void sam9x60_div_pll_unprepare(struct clk_hw *hw) ··· 516 465 if (cdiv == div->div) 517 466 goto unlock; 518 467 519 - regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, 520 - core->layout->div_mask, 521 - (div->div << core->layout->div_shift)); 522 - 523 - regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, 524 - AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, 525 - AT91_PMC_PLL_UPDT_UPDATE | core->id); 526 - 527 - while (!sam9x60_pll_ready(regmap, core->id)) 528 - cpu_relax(); 468 + sam9x60_div_pll_set_div(core, div->div, 0); 529 469 530 470 unlock: 531 471 spin_unlock_irqrestore(core->lock, irqflags); 532 472 533 473 return 0; 534 474 } 475 + 476 + static int sam9x60_div_pll_save_context(struct clk_hw *hw) 477 + { 478 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 479 + struct sam9x60_div *div = to_sam9x60_div(core); 480 + 481 + div->pms.status = sam9x60_div_pll_is_prepared(hw); 482 + 483 + return 0; 484 + } 485 + 486 + static void sam9x60_div_pll_restore_context(struct clk_hw *hw) 487 + { 488 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 489 + struct sam9x60_div *div = to_sam9x60_div(core); 490 + 491 + if (div->pms.status) 492 + sam9x60_div_pll_set(core); 493 + } 494 + 495 + static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier, 496 + unsigned long code, void *data) 497 + { 498 + struct sam9x60_div *div = notifier_div; 499 + struct sam9x60_pll_core core = div->core; 500 + struct regmap *regmap = core.regmap; 501 + unsigned long irqflags; 502 + u32 val, cdiv; 503 + int ret = NOTIFY_DONE; 504 + 505 + if (code != PRE_RATE_CHANGE) 506 + return ret; 507 + 508 + /* 509 + * We switch to safe divider to avoid overclocking of other domains 510 + * feed by us while the frac PLL (our parent) is changed. 511 + */ 512 + div->div = div->safe_div; 513 + 514 + spin_lock_irqsave(core.lock, irqflags); 515 + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, 516 + core.id); 517 + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); 518 + cdiv = (val & core.layout->div_mask) >> core.layout->div_shift; 519 + 520 + /* Stop if nothing changed. */ 521 + if (cdiv == div->safe_div) 522 + goto unlock; 523 + 524 + sam9x60_div_pll_set_div(&core, div->div, 0); 525 + ret = NOTIFY_OK; 526 + 527 + unlock: 528 + spin_unlock_irqrestore(core.lock, irqflags); 529 + 530 + return ret; 531 + } 532 + 533 + static struct notifier_block sam9x60_div_pll_notifier = { 534 + .notifier_call = sam9x60_div_pll_notifier_fn, 535 + }; 535 536 536 537 static const struct clk_ops sam9x60_div_pll_ops = { 537 538 .prepare = sam9x60_div_pll_prepare, ··· 592 489 .recalc_rate = sam9x60_div_pll_recalc_rate, 593 490 .round_rate = sam9x60_div_pll_round_rate, 594 491 .set_rate = sam9x60_div_pll_set_rate, 492 + .save_context = sam9x60_div_pll_save_context, 493 + .restore_context = sam9x60_div_pll_restore_context, 595 494 }; 596 495 597 496 static const struct clk_ops sam9x60_div_pll_ops_chg = { ··· 603 498 .recalc_rate = sam9x60_div_pll_recalc_rate, 604 499 .round_rate = sam9x60_div_pll_round_rate, 605 500 .set_rate = sam9x60_div_pll_set_rate_chg, 501 + .save_context = sam9x60_div_pll_save_context, 502 + .restore_context = sam9x60_div_pll_restore_context, 606 503 }; 607 504 608 505 struct clk_hw * __init ··· 694 587 sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock, 695 588 const char *name, const char *parent_name, u8 id, 696 589 const struct clk_pll_characteristics *characteristics, 697 - const struct clk_pll_layout *layout, u32 flags) 590 + const struct clk_pll_layout *layout, u32 flags, 591 + u32 safe_div) 698 592 { 699 593 struct sam9x60_div *div; 700 594 struct clk_hw *hw; ··· 704 596 unsigned int val; 705 597 int ret; 706 598 707 - if (id > PLL_MAX_ID || !lock) 599 + /* We only support one changeable PLL. */ 600 + if (id > PLL_MAX_ID || !lock || (safe_div && notifier_div)) 708 601 return ERR_PTR(-EINVAL); 602 + 603 + if (safe_div >= PLL_DIV_MAX) 604 + safe_div = PLL_DIV_MAX - 1; 709 605 710 606 div = kzalloc(sizeof(*div), GFP_KERNEL); 711 607 if (!div) ··· 730 618 div->core.layout = layout; 731 619 div->core.regmap = regmap; 732 620 div->core.lock = lock; 621 + div->safe_div = safe_div; 733 622 734 623 spin_lock_irqsave(div->core.lock, irqflags); 735 624 ··· 746 633 if (ret) { 747 634 kfree(div); 748 635 hw = ERR_PTR(ret); 636 + } else if (div->safe_div) { 637 + notifier_div = div; 638 + clk_notifier_register(hw->clk, &sam9x60_div_pll_notifier); 749 639 } 750 640 751 641 return hw;
+20
drivers/clk/at91/clk-system.c
··· 20 20 struct clk_system { 21 21 struct clk_hw hw; 22 22 struct regmap *regmap; 23 + struct at91_clk_pms pms; 23 24 u8 id; 24 25 }; 25 26 ··· 78 77 return !!(status & (1 << sys->id)); 79 78 } 80 79 80 + static int clk_system_save_context(struct clk_hw *hw) 81 + { 82 + struct clk_system *sys = to_clk_system(hw); 83 + 84 + sys->pms.status = clk_system_is_prepared(hw); 85 + 86 + return 0; 87 + } 88 + 89 + static void clk_system_restore_context(struct clk_hw *hw) 90 + { 91 + struct clk_system *sys = to_clk_system(hw); 92 + 93 + if (sys->pms.status) 94 + clk_system_prepare(&sys->hw); 95 + } 96 + 81 97 static const struct clk_ops system_ops = { 82 98 .prepare = clk_system_prepare, 83 99 .unprepare = clk_system_unprepare, 84 100 .is_prepared = clk_system_is_prepared, 101 + .save_context = clk_system_save_context, 102 + .restore_context = clk_system_restore_context, 85 103 }; 86 104 87 105 struct clk_hw * __init
+27
drivers/clk/at91/clk-usb.c
··· 24 24 struct at91sam9x5_clk_usb { 25 25 struct clk_hw hw; 26 26 struct regmap *regmap; 27 + struct at91_clk_pms pms; 27 28 u32 usbs_mask; 28 29 u8 num_parents; 29 30 }; ··· 149 148 return 0; 150 149 } 151 150 151 + static int at91sam9x5_usb_save_context(struct clk_hw *hw) 152 + { 153 + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 154 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 155 + 156 + usb->pms.parent = at91sam9x5_clk_usb_get_parent(hw); 157 + usb->pms.parent_rate = clk_hw_get_rate(parent_hw); 158 + usb->pms.rate = at91sam9x5_clk_usb_recalc_rate(hw, usb->pms.parent_rate); 159 + 160 + return 0; 161 + } 162 + 163 + static void at91sam9x5_usb_restore_context(struct clk_hw *hw) 164 + { 165 + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 166 + int ret; 167 + 168 + ret = at91sam9x5_clk_usb_set_parent(hw, usb->pms.parent); 169 + if (ret) 170 + return; 171 + 172 + at91sam9x5_clk_usb_set_rate(hw, usb->pms.rate, usb->pms.parent_rate); 173 + } 174 + 152 175 static const struct clk_ops at91sam9x5_usb_ops = { 153 176 .recalc_rate = at91sam9x5_clk_usb_recalc_rate, 154 177 .determine_rate = at91sam9x5_clk_usb_determine_rate, 155 178 .get_parent = at91sam9x5_clk_usb_get_parent, 156 179 .set_parent = at91sam9x5_clk_usb_set_parent, 157 180 .set_rate = at91sam9x5_clk_usb_set_rate, 181 + .save_context = at91sam9x5_usb_save_context, 182 + .restore_context = at91sam9x5_usb_restore_context, 158 183 }; 159 184 160 185 static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
+39
drivers/clk/at91/clk-utmi.c
··· 23 23 struct clk_hw hw; 24 24 struct regmap *regmap_pmc; 25 25 struct regmap *regmap_sfr; 26 + struct at91_clk_pms pms; 26 27 }; 27 28 28 29 #define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw) ··· 114 113 return UTMI_RATE; 115 114 } 116 115 116 + static int clk_utmi_save_context(struct clk_hw *hw) 117 + { 118 + struct clk_utmi *utmi = to_clk_utmi(hw); 119 + 120 + utmi->pms.status = clk_utmi_is_prepared(hw); 121 + 122 + return 0; 123 + } 124 + 125 + static void clk_utmi_restore_context(struct clk_hw *hw) 126 + { 127 + struct clk_utmi *utmi = to_clk_utmi(hw); 128 + 129 + if (utmi->pms.status) 130 + clk_utmi_prepare(hw); 131 + } 132 + 117 133 static const struct clk_ops utmi_ops = { 118 134 .prepare = clk_utmi_prepare, 119 135 .unprepare = clk_utmi_unprepare, 120 136 .is_prepared = clk_utmi_is_prepared, 121 137 .recalc_rate = clk_utmi_recalc_rate, 138 + .save_context = clk_utmi_save_context, 139 + .restore_context = clk_utmi_restore_context, 122 140 }; 123 141 124 142 static struct clk_hw * __init ··· 252 232 return 0; 253 233 } 254 234 235 + static int clk_utmi_sama7g5_save_context(struct clk_hw *hw) 236 + { 237 + struct clk_utmi *utmi = to_clk_utmi(hw); 238 + 239 + utmi->pms.status = clk_utmi_sama7g5_is_prepared(hw); 240 + 241 + return 0; 242 + } 243 + 244 + static void clk_utmi_sama7g5_restore_context(struct clk_hw *hw) 245 + { 246 + struct clk_utmi *utmi = to_clk_utmi(hw); 247 + 248 + if (utmi->pms.status) 249 + clk_utmi_sama7g5_prepare(hw); 250 + } 251 + 255 252 static const struct clk_ops sama7g5_utmi_ops = { 256 253 .prepare = clk_utmi_sama7g5_prepare, 257 254 .is_prepared = clk_utmi_sama7g5_is_prepared, 258 255 .recalc_rate = clk_utmi_recalc_rate, 256 + .save_context = clk_utmi_sama7g5_save_context, 257 + .restore_context = clk_utmi_sama7g5_restore_context, 259 258 }; 260 259 261 260 struct clk_hw * __init
+1 -1
drivers/clk/at91/dt-compat.c
··· 399 399 400 400 hw = at91_clk_register_master_div(regmap, name, "masterck_pres", 401 401 layout, characteristics, 402 - &mck_lock, CLK_SET_RATE_GATE); 402 + &mck_lock, CLK_SET_RATE_GATE, 0); 403 403 if (IS_ERR(hw)) 404 404 goto out_free_characteristics; 405 405
+46 -134
drivers/clk/at91/pmc.c
··· 3 3 * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> 4 4 */ 5 5 6 + #include <linux/clk.h> 6 7 #include <linux/clk-provider.h> 7 8 #include <linux/clkdev.h> 8 9 #include <linux/clk/at91_pmc.h> 9 10 #include <linux/of.h> 11 + #include <linux/of_address.h> 10 12 #include <linux/mfd/syscon.h> 11 13 #include <linux/platform_device.h> 12 14 #include <linux/regmap.h> 13 15 #include <linux/syscore_ops.h> 14 16 15 17 #include <asm/proc-fns.h> 16 - 17 - #include <dt-bindings/clock/at91.h> 18 18 19 19 #include "pmc.h" 20 20 ··· 111 111 } 112 112 113 113 #ifdef CONFIG_PM 114 - static struct regmap *pmcreg; 115 114 116 - static u8 registered_ids[PMC_MAX_IDS]; 117 - static u8 registered_pcks[PMC_MAX_PCKS]; 115 + /* Address in SECURAM that say if we suspend to backup mode. */ 116 + static void __iomem *at91_pmc_backup_suspend; 118 117 119 - static struct 118 + static int at91_pmc_suspend(void) 120 119 { 121 - u32 scsr; 122 - u32 pcsr0; 123 - u32 uckr; 124 - u32 mor; 125 - u32 mcfr; 126 - u32 pllar; 127 - u32 mckr; 128 - u32 usb; 129 - u32 imr; 130 - u32 pcsr1; 131 - u32 pcr[PMC_MAX_IDS]; 132 - u32 audio_pll0; 133 - u32 audio_pll1; 134 - u32 pckr[PMC_MAX_PCKS]; 135 - } pmc_cache; 120 + unsigned int backup; 136 121 137 - /* 138 - * As Peripheral ID 0 is invalid on AT91 chips, the identifier is stored 139 - * without alteration in the table, and 0 is for unused clocks. 140 - */ 141 - void pmc_register_id(u8 id) 142 - { 143 - int i; 122 + if (!at91_pmc_backup_suspend) 123 + return 0; 144 124 145 - for (i = 0; i < PMC_MAX_IDS; i++) { 146 - if (registered_ids[i] == 0) { 147 - registered_ids[i] = id; 148 - break; 149 - } 150 - if (registered_ids[i] == id) 151 - break; 152 - } 125 + backup = readl_relaxed(at91_pmc_backup_suspend); 126 + if (!backup) 127 + return 0; 128 + 129 + return clk_save_context(); 153 130 } 154 131 155 - /* 156 - * As Programmable Clock 0 is valid on AT91 chips, there is an offset 157 - * of 1 between the stored value and the real clock ID. 158 - */ 159 - void pmc_register_pck(u8 pck) 132 + static void at91_pmc_resume(void) 160 133 { 161 - int i; 134 + unsigned int backup; 162 135 163 - for (i = 0; i < PMC_MAX_PCKS; i++) { 164 - if (registered_pcks[i] == 0) { 165 - registered_pcks[i] = pck + 1; 166 - break; 167 - } 168 - if (registered_pcks[i] == (pck + 1)) 169 - break; 170 - } 171 - } 136 + if (!at91_pmc_backup_suspend) 137 + return; 172 138 173 - static int pmc_suspend(void) 174 - { 175 - int i; 176 - u8 num; 139 + backup = readl_relaxed(at91_pmc_backup_suspend); 140 + if (!backup) 141 + return; 177 142 178 - regmap_read(pmcreg, AT91_PMC_SCSR, &pmc_cache.scsr); 179 - regmap_read(pmcreg, AT91_PMC_PCSR, &pmc_cache.pcsr0); 180 - regmap_read(pmcreg, AT91_CKGR_UCKR, &pmc_cache.uckr); 181 - regmap_read(pmcreg, AT91_CKGR_MOR, &pmc_cache.mor); 182 - regmap_read(pmcreg, AT91_CKGR_MCFR, &pmc_cache.mcfr); 183 - regmap_read(pmcreg, AT91_CKGR_PLLAR, &pmc_cache.pllar); 184 - regmap_read(pmcreg, AT91_PMC_MCKR, &pmc_cache.mckr); 185 - regmap_read(pmcreg, AT91_PMC_USB, &pmc_cache.usb); 186 - regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.imr); 187 - regmap_read(pmcreg, AT91_PMC_PCSR1, &pmc_cache.pcsr1); 188 - 189 - for (i = 0; registered_ids[i]; i++) { 190 - regmap_write(pmcreg, AT91_PMC_PCR, 191 - (registered_ids[i] & AT91_PMC_PCR_PID_MASK)); 192 - regmap_read(pmcreg, AT91_PMC_PCR, 193 - &pmc_cache.pcr[registered_ids[i]]); 194 - } 195 - for (i = 0; registered_pcks[i]; i++) { 196 - num = registered_pcks[i] - 1; 197 - regmap_read(pmcreg, AT91_PMC_PCKR(num), &pmc_cache.pckr[num]); 198 - } 199 - 200 - return 0; 201 - } 202 - 203 - static bool pmc_ready(unsigned int mask) 204 - { 205 - unsigned int status; 206 - 207 - regmap_read(pmcreg, AT91_PMC_SR, &status); 208 - 209 - return ((status & mask) == mask) ? 1 : 0; 210 - } 211 - 212 - static void pmc_resume(void) 213 - { 214 - int i; 215 - u8 num; 216 - u32 tmp; 217 - u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA; 218 - 219 - regmap_read(pmcreg, AT91_PMC_MCKR, &tmp); 220 - if (pmc_cache.mckr != tmp) 221 - pr_warn("MCKR was not configured properly by the firmware\n"); 222 - regmap_read(pmcreg, AT91_CKGR_PLLAR, &tmp); 223 - if (pmc_cache.pllar != tmp) 224 - pr_warn("PLLAR was not configured properly by the firmware\n"); 225 - 226 - regmap_write(pmcreg, AT91_PMC_SCER, pmc_cache.scsr); 227 - regmap_write(pmcreg, AT91_PMC_PCER, pmc_cache.pcsr0); 228 - regmap_write(pmcreg, AT91_CKGR_UCKR, pmc_cache.uckr); 229 - regmap_write(pmcreg, AT91_CKGR_MOR, pmc_cache.mor); 230 - regmap_write(pmcreg, AT91_CKGR_MCFR, pmc_cache.mcfr); 231 - regmap_write(pmcreg, AT91_PMC_USB, pmc_cache.usb); 232 - regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.imr); 233 - regmap_write(pmcreg, AT91_PMC_PCER1, pmc_cache.pcsr1); 234 - 235 - for (i = 0; registered_ids[i]; i++) { 236 - regmap_write(pmcreg, AT91_PMC_PCR, 237 - pmc_cache.pcr[registered_ids[i]] | 238 - AT91_PMC_PCR_CMD); 239 - } 240 - for (i = 0; registered_pcks[i]; i++) { 241 - num = registered_pcks[i] - 1; 242 - regmap_write(pmcreg, AT91_PMC_PCKR(num), pmc_cache.pckr[num]); 243 - } 244 - 245 - if (pmc_cache.uckr & AT91_PMC_UPLLEN) 246 - mask |= AT91_PMC_LOCKU; 247 - 248 - while (!pmc_ready(mask)) 249 - cpu_relax(); 143 + clk_restore_context(); 250 144 } 251 145 252 146 static struct syscore_ops pmc_syscore_ops = { 253 - .suspend = pmc_suspend, 254 - .resume = pmc_resume, 147 + .suspend = at91_pmc_suspend, 148 + .resume = at91_pmc_resume, 255 149 }; 256 150 257 - static const struct of_device_id sama5d2_pmc_dt_ids[] = { 151 + static const struct of_device_id pmc_dt_ids[] = { 258 152 { .compatible = "atmel,sama5d2-pmc" }, 153 + { .compatible = "microchip,sama7g5-pmc", }, 259 154 { /* sentinel */ } 260 155 }; 261 156 ··· 158 263 { 159 264 struct device_node *np; 160 265 161 - np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids); 266 + np = of_find_matching_node(NULL, pmc_dt_ids); 162 267 if (!np) 163 268 return -ENODEV; 164 269 165 - pmcreg = device_node_to_regmap(np); 270 + if (!of_device_is_available(np)) { 271 + of_node_put(np); 272 + return -ENODEV; 273 + } 166 274 of_node_put(np); 167 - if (IS_ERR(pmcreg)) 168 - return PTR_ERR(pmcreg); 275 + 276 + np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); 277 + if (!np) 278 + return -ENODEV; 279 + 280 + if (!of_device_is_available(np)) { 281 + of_node_put(np); 282 + return -ENODEV; 283 + } 284 + of_node_put(np); 285 + 286 + at91_pmc_backup_suspend = of_iomap(np, 0); 287 + if (!at91_pmc_backup_suspend) { 288 + pr_warn("%s(): unable to map securam\n", __func__); 289 + return -ENOMEM; 290 + } 169 291 170 292 register_syscore_ops(&pmc_syscore_ops); 171 293
+19 -10
drivers/clk/at91/pmc.h
··· 13 13 #include <linux/regmap.h> 14 14 #include <linux/spinlock.h> 15 15 16 + #include <dt-bindings/clock/at91.h> 17 + 16 18 extern spinlock_t pmc_pcr_lock; 17 19 18 20 struct pmc_data { ··· 100 98 u32 pid_mask; 101 99 }; 102 100 101 + /** 102 + * struct at91_clk_pms - Power management state for AT91 clock 103 + * @rate: clock rate 104 + * @parent_rate: clock parent rate 105 + * @status: clock status (enabled or disabled) 106 + * @parent: clock parent index 107 + */ 108 + struct at91_clk_pms { 109 + unsigned long rate; 110 + unsigned long parent_rate; 111 + unsigned int status; 112 + unsigned int parent; 113 + }; 114 + 103 115 #define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) 104 116 #define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask)) 105 117 ··· 182 166 const char *parent_names, 183 167 const struct clk_master_layout *layout, 184 168 const struct clk_master_characteristics *characteristics, 185 - spinlock_t *lock, u32 flags); 169 + spinlock_t *lock, u32 flags, u32 safe_div); 186 170 187 171 struct clk_hw * __init 188 172 at91_clk_sama7g5_register_master(struct regmap *regmap, ··· 214 198 sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock, 215 199 const char *name, const char *parent_name, u8 id, 216 200 const struct clk_pll_characteristics *characteristics, 217 - const struct clk_pll_layout *layout, u32 flags); 201 + const struct clk_pll_layout *layout, u32 flags, 202 + u32 safe_div); 218 203 219 204 struct clk_hw * __init 220 205 sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock, ··· 264 247 struct clk_hw * __init 265 248 at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name, 266 249 const char *parent_name); 267 - 268 - #ifdef CONFIG_PM 269 - void pmc_register_id(u8 id); 270 - void pmc_register_pck(u8 pck); 271 - #else 272 - static inline void pmc_register_id(u8 id) {} 273 - static inline void pmc_register_pck(u8 pck) {} 274 - #endif 275 250 276 251 #endif /* __PMC_H_ */
+3 -3
drivers/clk/at91/sam9x60.c
··· 242 242 * This feeds CPU. It should not 243 243 * be disabled. 244 244 */ 245 - CLK_IS_CRITICAL | CLK_SET_RATE_GATE); 245 + CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0); 246 246 if (IS_ERR(hw)) 247 247 goto err_free; 248 248 ··· 260 260 &pll_div_layout, 261 261 CLK_SET_RATE_GATE | 262 262 CLK_SET_PARENT_GATE | 263 - CLK_SET_RATE_PARENT); 263 + CLK_SET_RATE_PARENT, 0); 264 264 if (IS_ERR(hw)) 265 265 goto err_free; 266 266 ··· 279 279 hw = at91_clk_register_master_div(regmap, "masterck_div", 280 280 "masterck_pres", &sam9x60_master_layout, 281 281 &mck_characteristics, &mck_lock, 282 - CLK_SET_RATE_GATE); 282 + CLK_SET_RATE_GATE, 0); 283 283 if (IS_ERR(hw)) 284 284 goto err_free; 285 285
+1 -1
drivers/clk/at91/sama5d2.c
··· 249 249 "masterck_pres", 250 250 &at91sam9x5_master_layout, 251 251 &mck_characteristics, &mck_lock, 252 - CLK_SET_RATE_GATE); 252 + CLK_SET_RATE_GATE, 0); 253 253 if (IS_ERR(hw)) 254 254 goto err_free; 255 255
+1 -1
drivers/clk/at91/sama5d3.c
··· 184 184 "masterck_pres", 185 185 &at91sam9x5_master_layout, 186 186 &mck_characteristics, &mck_lock, 187 - CLK_SET_RATE_GATE); 187 + CLK_SET_RATE_GATE, 0); 188 188 if (IS_ERR(hw)) 189 189 goto err_free; 190 190
+1 -1
drivers/clk/at91/sama5d4.c
··· 199 199 "masterck_pres", 200 200 &at91sam9x5_master_layout, 201 201 &mck_characteristics, &mck_lock, 202 - CLK_SET_RATE_GATE); 202 + CLK_SET_RATE_GATE, 0); 203 203 if (IS_ERR(hw)) 204 204 goto err_free; 205 205
+15 -14
drivers/clk/at91/sama7g5.c
··· 127 127 * @t: clock type 128 128 * @f: clock flags 129 129 * @eid: export index in sama7g5->chws[] array 130 + * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE 131 + * notification 130 132 */ 131 133 static const struct { 132 134 const char *n; ··· 138 136 unsigned long f; 139 137 u8 t; 140 138 u8 eid; 139 + u8 safe_div; 141 140 } sama7g5_plls[][PLL_ID_MAX] = { 142 141 [PLL_ID_CPU] = { 143 142 { .n = "cpupll_fracck", ··· 159 156 .t = PLL_TYPE_DIV, 160 157 /* This feeds CPU. It should not be disabled. */ 161 158 .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 162 - .eid = PMC_CPUPLL, }, 159 + .eid = PMC_CPUPLL, 160 + /* 161 + * Safe div=15 should be safe even for switching b/w 1GHz and 162 + * 90MHz (frac pll might go up to 1.2GHz). 163 + */ 164 + .safe_div = 15, }, 163 165 }, 164 166 165 167 [PLL_ID_SYS] = { ··· 385 377 u8 id; 386 378 } sama7g5_periphck[] = { 387 379 { .n = "pioA_clk", .p = "mck0", .id = 11, }, 380 + { .n = "securam_clk", .p = "mck0", .id = 18, }, 388 381 { .n = "sfr_clk", .p = "mck1", .id = 19, }, 389 382 { .n = "hsmc_clk", .p = "mck1", .id = 21, }, 390 383 { .n = "xdmac0_clk", .p = "mck1", .id = 22, }, ··· 850 841 851 842 /* MCK0 characteristics. */ 852 843 static const struct clk_master_characteristics mck0_characteristics = { 853 - .output = { .min = 50000000, .max = 200000000 }, 844 + .output = { .min = 32768, .max = 200000000 }, 854 845 .divisors = { 1, 2, 4, 3, 5 }, 855 846 .have_div3_pres = 1, 856 847 }; ··· 975 966 sama7g5_plls[i][j].p, i, 976 967 sama7g5_plls[i][j].c, 977 968 sama7g5_plls[i][j].l, 978 - sama7g5_plls[i][j].f); 969 + sama7g5_plls[i][j].f, 970 + sama7g5_plls[i][j].safe_div); 979 971 break; 980 972 981 973 default: ··· 992 982 } 993 983 994 984 parent_names[0] = "cpupll_divpmcck"; 995 - hw = at91_clk_register_master_pres(regmap, "cpuck", 1, parent_names, 996 - &mck0_layout, &mck0_characteristics, 997 - &pmc_mck0_lock, 998 - CLK_SET_RATE_PARENT, 0); 999 - if (IS_ERR(hw)) 1000 - goto err_free; 1001 - 1002 - sama7g5_pmc->chws[PMC_CPU] = hw; 1003 - 1004 - hw = at91_clk_register_master_div(regmap, "mck0", "cpuck", 985 + hw = at91_clk_register_master_div(regmap, "mck0", "cpupll_divpmcck", 1005 986 &mck0_layout, &mck0_characteristics, 1006 - &pmc_mck0_lock, 0); 987 + &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5); 1007 988 if (IS_ERR(hw)) 1008 989 goto err_free; 1009 990
+11 -3
drivers/clk/mvebu/ap-cpu-clk.c
··· 256 256 int cpu, err; 257 257 258 258 err = of_property_read_u32(dn, "reg", &cpu); 259 - if (WARN_ON(err)) 259 + if (WARN_ON(err)) { 260 + of_node_put(dn); 260 261 return err; 262 + } 261 263 262 264 /* If cpu2 or cpu3 is enabled */ 263 265 if (cpu & APN806_CLUSTER_NUM_MASK) { 264 266 nclusters = 2; 267 + of_node_put(dn); 265 268 break; 266 269 } 267 270 } ··· 291 288 int cpu, err; 292 289 293 290 err = of_property_read_u32(dn, "reg", &cpu); 294 - if (WARN_ON(err)) 291 + if (WARN_ON(err)) { 292 + of_node_put(dn); 295 293 return err; 294 + } 296 295 297 296 cluster_index = cpu & APN806_CLUSTER_NUM_MASK; 298 297 cluster_index >>= APN806_CLUSTER_NUM_OFFSET; ··· 306 301 parent = of_clk_get(np, cluster_index); 307 302 if (IS_ERR(parent)) { 308 303 dev_err(dev, "Could not get the clock parent\n"); 304 + of_node_put(dn); 309 305 return -EINVAL; 310 306 } 311 307 parent_name = __clk_get_name(parent); ··· 325 319 init.parent_names = &parent_name; 326 320 327 321 ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw); 328 - if (ret) 322 + if (ret) { 323 + of_node_put(dn); 329 324 return ret; 325 + } 330 326 ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw; 331 327 } 332 328
+1
drivers/clk/renesas/r8a7795-cpg-mssr.c
··· 229 229 DEF_MOD("lvds", 727, R8A7795_CLK_S0D4), 230 230 DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI), 231 231 DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI), 232 + DEF_MOD("mlp", 802, R8A7795_CLK_S2D1), 232 233 DEF_MOD("vin7", 804, R8A7795_CLK_S0D2), 233 234 DEF_MOD("vin6", 805, R8A7795_CLK_S0D2), 234 235 DEF_MOD("vin5", 806, R8A7795_CLK_S0D2),
+1
drivers/clk/renesas/r8a7796-cpg-mssr.c
··· 207 207 DEF_MOD("du0", 724, R8A7796_CLK_S2D1), 208 208 DEF_MOD("lvds", 727, R8A7796_CLK_S2D1), 209 209 DEF_MOD("hdmi0", 729, R8A7796_CLK_HDMI), 210 + DEF_MOD("mlp", 802, R8A7796_CLK_S2D1), 210 211 DEF_MOD("vin7", 804, R8A7796_CLK_S0D2), 211 212 DEF_MOD("vin6", 805, R8A7796_CLK_S0D2), 212 213 DEF_MOD("vin5", 806, R8A7796_CLK_S0D2),
+1
drivers/clk/renesas/r8a77965-cpg-mssr.c
··· 205 205 DEF_MOD("lvds", 727, R8A77965_CLK_S2D1), 206 206 DEF_MOD("hdmi0", 729, R8A77965_CLK_HDMI), 207 207 208 + DEF_MOD("mlp", 802, R8A77965_CLK_S2D1), 208 209 DEF_MOD("vin7", 804, R8A77965_CLK_S0D2), 209 210 DEF_MOD("vin6", 805, R8A77965_CLK_S0D2), 210 211 DEF_MOD("vin5", 806, R8A77965_CLK_S0D2),
+191
drivers/clk/renesas/r8a779a0-cpg-mssr.c
··· 33 33 CLK_TYPE_R8A779A0_PLL1, 34 34 CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */ 35 35 CLK_TYPE_R8A779A0_PLL5, 36 + CLK_TYPE_R8A779A0_Z, 36 37 CLK_TYPE_R8A779A0_SD, 37 38 CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */ 38 39 CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */ 40 + CLK_TYPE_R8A779A0_RPCSRC, 41 + CLK_TYPE_R8A779A0_RPC, 42 + CLK_TYPE_R8A779A0_RPCD2, 39 43 }; 40 44 41 45 struct rcar_r8a779a0_cpg_pll_config { ··· 88 84 DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \ 89 85 .offset = _offset) 90 86 87 + #define DEF_Z(_name, _id, _parent, _div, _offset) \ 88 + DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_Z, _parent, .div = _div, \ 89 + .offset = _offset) 90 + 91 91 #define DEF_SD(_name, _id, _parent, _offset) \ 92 92 DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset) 93 93 ··· 128 120 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1), 129 121 DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1), 130 122 DEF_RATE(".oco", CLK_OCO, 32768), 123 + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_R8A779A0_RPCSRC, CLK_PLL5), 124 + DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_R8A779A0_RPC, CLK_RPCSRC), 125 + DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_R8A779A0_RPCD2, 126 + R8A779A0_CLK_RPC), 131 127 132 128 /* Core Clock Outputs */ 129 + DEF_Z("z0", R8A779A0_CLK_Z0, CLK_PLL20, 2, 0), 130 + DEF_Z("z1", R8A779A0_CLK_Z1, CLK_PLL21, 2, 8), 133 131 DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1), 134 132 DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1), 135 133 DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1), ··· 207 193 DEF_MOD("msi3", 621, R8A779A0_CLK_MSO), 208 194 DEF_MOD("msi4", 622, R8A779A0_CLK_MSO), 209 195 DEF_MOD("msi5", 623, R8A779A0_CLK_MSO), 196 + DEF_MOD("rpc-if", 629, R8A779A0_CLK_RPCD2), 210 197 DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8), 211 198 DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8), 212 199 DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8), ··· 220 205 DEF_MOD("tmu2", 715, R8A779A0_CLK_S1D4), 221 206 DEF_MOD("tmu3", 716, R8A779A0_CLK_S1D4), 222 207 DEF_MOD("tmu4", 717, R8A779A0_CLK_S1D4), 208 + DEF_MOD("tpu0", 718, R8A779A0_CLK_S1D8), 223 209 DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1), 224 210 DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1), 225 211 DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1), ··· 275 259 static unsigned int cpg_clk_extalr __initdata; 276 260 static u32 cpg_mode __initdata; 277 261 262 + /* 263 + * Z0 Clock & Z1 Clock 264 + */ 265 + #define CPG_FRQCRB 0x00000804 266 + #define CPG_FRQCRB_KICK BIT(31) 267 + #define CPG_FRQCRC 0x00000808 268 + 269 + struct cpg_z_clk { 270 + struct clk_hw hw; 271 + void __iomem *reg; 272 + void __iomem *kick_reg; 273 + unsigned long max_rate; /* Maximum rate for normal mode */ 274 + unsigned int fixed_div; 275 + u32 mask; 276 + }; 277 + 278 + #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) 279 + 280 + static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw, 281 + unsigned long parent_rate) 282 + { 283 + struct cpg_z_clk *zclk = to_z_clk(hw); 284 + unsigned int mult; 285 + u32 val; 286 + 287 + val = readl(zclk->reg) & zclk->mask; 288 + mult = 32 - (val >> __ffs(zclk->mask)); 289 + 290 + return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 291 + 32 * zclk->fixed_div); 292 + } 293 + 294 + static int cpg_z_clk_determine_rate(struct clk_hw *hw, 295 + struct clk_rate_request *req) 296 + { 297 + struct cpg_z_clk *zclk = to_z_clk(hw); 298 + unsigned int min_mult, max_mult, mult; 299 + unsigned long rate, prate; 300 + 301 + rate = min(req->rate, req->max_rate); 302 + if (rate <= zclk->max_rate) { 303 + /* Set parent rate to initial value for normal modes */ 304 + prate = zclk->max_rate; 305 + } else { 306 + /* Set increased parent rate for boost modes */ 307 + prate = rate; 308 + } 309 + req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 310 + prate * zclk->fixed_div); 311 + 312 + prate = req->best_parent_rate / zclk->fixed_div; 313 + min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL); 314 + max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL); 315 + if (max_mult < min_mult) 316 + return -EINVAL; 317 + 318 + mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate); 319 + mult = clamp(mult, min_mult, max_mult); 320 + 321 + req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32); 322 + return 0; 323 + } 324 + 325 + static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, 326 + unsigned long parent_rate) 327 + { 328 + struct cpg_z_clk *zclk = to_z_clk(hw); 329 + unsigned int mult; 330 + unsigned int i; 331 + 332 + mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div, 333 + parent_rate); 334 + mult = clamp(mult, 1U, 32U); 335 + 336 + if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) 337 + return -EBUSY; 338 + 339 + cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask)); 340 + 341 + /* 342 + * Set KICK bit in FRQCRB to update hardware setting and wait for 343 + * clock change completion. 344 + */ 345 + cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK); 346 + 347 + /* 348 + * Note: There is no HW information about the worst case latency. 349 + * 350 + * Using experimental measurements, it seems that no more than 351 + * ~10 iterations are needed, independently of the CPU rate. 352 + * Since this value might be dependent on external xtal rate, pll1 353 + * rate or even the other emulation clocks rate, use 1000 as a 354 + * "super" safe value. 355 + */ 356 + for (i = 1000; i; i--) { 357 + if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) 358 + return 0; 359 + 360 + cpu_relax(); 361 + } 362 + 363 + return -ETIMEDOUT; 364 + } 365 + 366 + static const struct clk_ops cpg_z_clk_ops = { 367 + .recalc_rate = cpg_z_clk_recalc_rate, 368 + .determine_rate = cpg_z_clk_determine_rate, 369 + .set_rate = cpg_z_clk_set_rate, 370 + }; 371 + 372 + static struct clk * __init cpg_z_clk_register(const char *name, 373 + const char *parent_name, 374 + void __iomem *reg, 375 + unsigned int div, 376 + unsigned int offset) 377 + { 378 + struct clk_init_data init = {}; 379 + struct cpg_z_clk *zclk; 380 + struct clk *clk; 381 + 382 + zclk = kzalloc(sizeof(*zclk), GFP_KERNEL); 383 + if (!zclk) 384 + return ERR_PTR(-ENOMEM); 385 + 386 + init.name = name; 387 + init.ops = &cpg_z_clk_ops; 388 + init.flags = CLK_SET_RATE_PARENT; 389 + init.parent_names = &parent_name; 390 + init.num_parents = 1; 391 + 392 + zclk->reg = reg + CPG_FRQCRC; 393 + zclk->kick_reg = reg + CPG_FRQCRB; 394 + zclk->hw.init = &init; 395 + zclk->mask = GENMASK(offset + 4, offset); 396 + zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */ 397 + 398 + clk = clk_register(NULL, &zclk->hw); 399 + if (IS_ERR(clk)) { 400 + kfree(zclk); 401 + return clk; 402 + } 403 + 404 + zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) / 405 + zclk->fixed_div; 406 + return clk; 407 + } 408 + 409 + /* 410 + * RPC Clocks 411 + */ 412 + #define CPG_RPCCKCR 0x874 413 + 414 + static const struct clk_div_table cpg_rpcsrc_div_table[] = { 415 + { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 }, 416 + }; 417 + 278 418 static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev, 279 419 const struct cpg_core_clk *core, const struct cpg_mssr_info *info, 280 420 struct clk **clks, void __iomem *base, ··· 465 293 div = cpg_pll_config->pll5_div; 466 294 break; 467 295 296 + case CLK_TYPE_R8A779A0_Z: 297 + return cpg_z_clk_register(core->name, __clk_get_name(parent), 298 + base, core->div, core->offset); 299 + 468 300 case CLK_TYPE_R8A779A0_SD: 469 301 return cpg_sd_clk_register(core->name, base, core->offset, 470 302 __clk_get_name(parent), notifiers, ··· 497 321 */ 498 322 div = cpg_pll_config->osc_prediv * core->div; 499 323 break; 324 + 325 + case CLK_TYPE_R8A779A0_RPCSRC: 326 + return clk_register_divider_table(NULL, core->name, 327 + __clk_get_name(parent), 0, 328 + base + CPG_RPCCKCR, 3, 2, 0, 329 + cpg_rpcsrc_div_table, 330 + &cpg_lock); 331 + 332 + case CLK_TYPE_R8A779A0_RPC: 333 + return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR, 334 + __clk_get_name(parent), notifiers); 335 + 336 + case CLK_TYPE_R8A779A0_RPCD2: 337 + return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR, 338 + __clk_get_name(parent)); 500 339 501 340 default: 502 341 return ERR_PTR(-EINVAL);
+84 -1
drivers/clk/renesas/r9a07g044-cpg.c
··· 29 29 CLK_PLL2_DIV16, 30 30 CLK_PLL2_DIV20, 31 31 CLK_PLL3, 32 + CLK_PLL3_400, 33 + CLK_PLL3_533, 32 34 CLK_PLL3_DIV2, 33 35 CLK_PLL3_DIV2_4, 34 36 CLK_PLL3_DIV2_4_2, 35 37 CLK_PLL3_DIV4, 38 + CLK_SEL_PLL3_3, 39 + CLK_DIV_PLL3_C, 36 40 CLK_PLL4, 37 41 CLK_PLL5, 38 - CLK_PLL5_DIV2, 42 + CLK_PLL5_FOUT3, 43 + CLK_PLL5_250, 39 44 CLK_PLL6, 45 + CLK_PLL6_250, 40 46 CLK_P1_DIV2, 47 + CLK_PLL2_800, 48 + CLK_PLL2_SDHI_533, 49 + CLK_PLL2_SDHI_400, 50 + CLK_PLL2_SDHI_266, 51 + CLK_SD0_DIV4, 52 + CLK_SD1_DIV4, 41 53 42 54 /* Module Clocks */ 43 55 MOD_CLK_BASE, ··· 65 53 {0, 0}, 66 54 }; 67 55 56 + /* Mux clock tables */ 57 + static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; 58 + static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; 59 + static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; 60 + 68 61 static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { 69 62 /* External Clock Inputs */ 70 63 DEF_INPUT("extal", CLK_EXTAL), ··· 80 63 DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)), 81 64 DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 133, 2), 82 65 DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 133, 2), 66 + DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4), 67 + DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3), 68 + 69 + DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1), 70 + DEF_FIXED(".pll5_fout3", CLK_PLL5_FOUT3, CLK_PLL5, 1, 6), 71 + 72 + DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6), 83 73 84 74 DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2), 75 + DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2), 76 + DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3), 77 + DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2), 78 + DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2), 79 + 85 80 DEF_FIXED(".pll2_div16", CLK_PLL2_DIV16, CLK_PLL2, 1, 16), 86 81 DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20), 87 82 ··· 101 72 DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), 102 73 DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2), 103 74 DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4), 75 + DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, 76 + sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY), 77 + DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, 78 + DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), 79 + 80 + DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2), 81 + DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2), 104 82 105 83 /* Core output clk */ 106 84 DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1), ··· 120 84 DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2), 121 85 DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, 122 86 DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), 87 + DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), 88 + DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1), 89 + DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, 90 + sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK), 91 + DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), 92 + DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), 93 + DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, 94 + sel_shdi, ARRAY_SIZE(sel_shdi)), 95 + DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, 96 + sel_shdi, ARRAY_SIZE(sel_shdi)), 97 + DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4), 98 + DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4), 123 99 }; 124 100 125 101 static struct rzg2l_mod_clk r9a07g044_mod_clks[] = { ··· 145 97 0x52c, 0), 146 98 DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2, 147 99 0x52c, 1), 100 + DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1, 101 + 0x550, 0), 102 + DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0, 103 + 0x550, 1), 104 + DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4, 105 + 0x554, 0), 106 + DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4, 107 + 0x554, 1), 108 + DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0, 109 + 0x554, 2), 110 + DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1, 111 + 0x554, 3), 112 + DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4, 113 + 0x554, 4), 114 + DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4, 115 + 0x554, 5), 116 + DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1, 117 + 0x554, 6), 118 + DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1, 119 + 0x554, 7), 148 120 DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0, 149 121 0x570, 0), 150 122 DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0, ··· 189 121 0x578, 2), 190 122 DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1, 191 123 0x578, 3), 124 + DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0, 125 + 0x57c, 0), 126 + DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT, 127 + 0x57c, 0), 128 + DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0, 129 + 0x57c, 1), 130 + DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT, 131 + 0x57c, 1), 192 132 DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0, 193 133 0x580, 0), 194 134 DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0, ··· 233 157 DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0), 234 158 DEF_RST(R9A07G044_DMAC_ARESETN, 0x82c, 0), 235 159 DEF_RST(R9A07G044_DMAC_RST_ASYNC, 0x82c, 1), 160 + DEF_RST(R9A07G044_SPI_RST, 0x850, 0), 161 + DEF_RST(R9A07G044_SDHI0_IXRST, 0x854, 0), 162 + DEF_RST(R9A07G044_SDHI1_IXRST, 0x854, 1), 236 163 DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0), 237 164 DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1), 238 165 DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2), ··· 244 165 DEF_RST(R9A07G044_USB_U2H1_HRESETN, 0x878, 1), 245 166 DEF_RST(R9A07G044_USB_U2P_EXL_SYSRST, 0x878, 2), 246 167 DEF_RST(R9A07G044_USB_PRESETN, 0x878, 3), 168 + DEF_RST(R9A07G044_ETH0_RST_HW_N, 0x87c, 0), 169 + DEF_RST(R9A07G044_ETH1_RST_HW_N, 0x87c, 1), 247 170 DEF_RST(R9A07G044_I2C0_MRST, 0x880, 0), 248 171 DEF_RST(R9A07G044_I2C1_MRST, 0x880, 1), 249 172 DEF_RST(R9A07G044_I2C2_MRST, 0x880, 2), ··· 267 186 268 187 static const unsigned int r9a07g044_crit_mod_clks[] __initconst = { 269 188 MOD_CLK_BASE + R9A07G044_GIC600_GICCLK, 189 + MOD_CLK_BASE + R9A07G044_IA55_CLK, 190 + MOD_CLK_BASE + R9A07G044_DMAC_ACLK, 270 191 }; 271 192 272 193 const struct rzg2l_cpg_info r9a07g044_cpg_info = {
+83
drivers/clk/renesas/rcar-cpg-lib.c
··· 267 267 return clk; 268 268 } 269 269 270 + struct rpc_clock { 271 + struct clk_divider div; 272 + struct clk_gate gate; 273 + /* 274 + * One notifier covers both RPC and RPCD2 clocks as they are both 275 + * controlled by the same RPCCKCR register... 276 + */ 277 + struct cpg_simple_notifier csn; 278 + }; 279 + 280 + static const struct clk_div_table cpg_rpc_div_table[] = { 281 + { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 }, 282 + }; 283 + 284 + struct clk * __init cpg_rpc_clk_register(const char *name, 285 + void __iomem *rpcckcr, const char *parent_name, 286 + struct raw_notifier_head *notifiers) 287 + { 288 + struct rpc_clock *rpc; 289 + struct clk *clk; 290 + 291 + rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); 292 + if (!rpc) 293 + return ERR_PTR(-ENOMEM); 294 + 295 + rpc->div.reg = rpcckcr; 296 + rpc->div.width = 3; 297 + rpc->div.table = cpg_rpc_div_table; 298 + rpc->div.lock = &cpg_lock; 299 + 300 + rpc->gate.reg = rpcckcr; 301 + rpc->gate.bit_idx = 8; 302 + rpc->gate.flags = CLK_GATE_SET_TO_DISABLE; 303 + rpc->gate.lock = &cpg_lock; 304 + 305 + rpc->csn.reg = rpcckcr; 306 + 307 + clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, 308 + &rpc->div.hw, &clk_divider_ops, 309 + &rpc->gate.hw, &clk_gate_ops, 310 + CLK_SET_RATE_PARENT); 311 + if (IS_ERR(clk)) { 312 + kfree(rpc); 313 + return clk; 314 + } 315 + 316 + cpg_simple_notifier_register(notifiers, &rpc->csn); 317 + return clk; 318 + } 319 + 320 + struct rpcd2_clock { 321 + struct clk_fixed_factor fixed; 322 + struct clk_gate gate; 323 + }; 324 + 325 + struct clk * __init cpg_rpcd2_clk_register(const char *name, 326 + void __iomem *rpcckcr, 327 + const char *parent_name) 328 + { 329 + struct rpcd2_clock *rpcd2; 330 + struct clk *clk; 331 + 332 + rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL); 333 + if (!rpcd2) 334 + return ERR_PTR(-ENOMEM); 335 + 336 + rpcd2->fixed.mult = 1; 337 + rpcd2->fixed.div = 2; 338 + 339 + rpcd2->gate.reg = rpcckcr; 340 + rpcd2->gate.bit_idx = 9; 341 + rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE; 342 + rpcd2->gate.lock = &cpg_lock; 343 + 344 + clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, 345 + &rpcd2->fixed.hw, &clk_fixed_factor_ops, 346 + &rpcd2->gate.hw, &clk_gate_ops, 347 + CLK_SET_RATE_PARENT); 348 + if (IS_ERR(clk)) 349 + kfree(rpcd2); 350 + 351 + return clk; 352 + } 270 353
+7
drivers/clk/renesas/rcar-cpg-lib.h
··· 30 30 void __iomem *base, unsigned int offset, const char *parent_name, 31 31 struct raw_notifier_head *notifiers, bool skip_first); 32 32 33 + struct clk * __init cpg_rpc_clk_register(const char *name, 34 + void __iomem *rpcckcr, const char *parent_name, 35 + struct raw_notifier_head *notifiers); 36 + 37 + struct clk * __init cpg_rpcd2_clk_register(const char *name, 38 + void __iomem *rpcckcr, 39 + const char *parent_name); 33 40 #endif
+2 -87
drivers/clk/renesas/rcar-gen3-cpg.c
··· 301 301 return clk; 302 302 } 303 303 304 - struct rpc_clock { 305 - struct clk_divider div; 306 - struct clk_gate gate; 307 - /* 308 - * One notifier covers both RPC and RPCD2 clocks as they are both 309 - * controlled by the same RPCCKCR register... 310 - */ 311 - struct cpg_simple_notifier csn; 312 - }; 313 - 314 304 static const struct clk_div_table cpg_rpcsrc_div_table[] = { 315 305 { 2, 5 }, { 3, 6 }, { 0, 0 }, 316 306 }; 317 - 318 - static const struct clk_div_table cpg_rpc_div_table[] = { 319 - { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 }, 320 - }; 321 - 322 - static struct clk * __init cpg_rpc_clk_register(const char *name, 323 - void __iomem *base, const char *parent_name, 324 - struct raw_notifier_head *notifiers) 325 - { 326 - struct rpc_clock *rpc; 327 - struct clk *clk; 328 - 329 - rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); 330 - if (!rpc) 331 - return ERR_PTR(-ENOMEM); 332 - 333 - rpc->div.reg = base + CPG_RPCCKCR; 334 - rpc->div.width = 3; 335 - rpc->div.table = cpg_rpc_div_table; 336 - rpc->div.lock = &cpg_lock; 337 - 338 - rpc->gate.reg = base + CPG_RPCCKCR; 339 - rpc->gate.bit_idx = 8; 340 - rpc->gate.flags = CLK_GATE_SET_TO_DISABLE; 341 - rpc->gate.lock = &cpg_lock; 342 - 343 - rpc->csn.reg = base + CPG_RPCCKCR; 344 - 345 - clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, 346 - &rpc->div.hw, &clk_divider_ops, 347 - &rpc->gate.hw, &clk_gate_ops, 348 - CLK_SET_RATE_PARENT); 349 - if (IS_ERR(clk)) { 350 - kfree(rpc); 351 - return clk; 352 - } 353 - 354 - cpg_simple_notifier_register(notifiers, &rpc->csn); 355 - return clk; 356 - } 357 - 358 - struct rpcd2_clock { 359 - struct clk_fixed_factor fixed; 360 - struct clk_gate gate; 361 - }; 362 - 363 - static struct clk * __init cpg_rpcd2_clk_register(const char *name, 364 - void __iomem *base, 365 - const char *parent_name) 366 - { 367 - struct rpcd2_clock *rpcd2; 368 - struct clk *clk; 369 - 370 - rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL); 371 - if (!rpcd2) 372 - return ERR_PTR(-ENOMEM); 373 - 374 - rpcd2->fixed.mult = 1; 375 - rpcd2->fixed.div = 2; 376 - 377 - rpcd2->gate.reg = base + CPG_RPCCKCR; 378 - rpcd2->gate.bit_idx = 9; 379 - rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE; 380 - rpcd2->gate.lock = &cpg_lock; 381 - 382 - clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, 383 - &rpcd2->fixed.hw, &clk_fixed_factor_ops, 384 - &rpcd2->gate.hw, &clk_gate_ops, 385 - CLK_SET_RATE_PARENT); 386 - if (IS_ERR(clk)) 387 - kfree(rpcd2); 388 - 389 - return clk; 390 - } 391 - 392 307 393 308 static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; 394 309 static unsigned int cpg_clk_extalr __initdata; ··· 515 600 break; 516 601 517 602 case CLK_TYPE_GEN3_RPC: 518 - return cpg_rpc_clk_register(core->name, base, 603 + return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR, 519 604 __clk_get_name(parent), notifiers); 520 605 521 606 case CLK_TYPE_GEN3_RPCD2: 522 - return cpg_rpcd2_clk_register(core->name, base, 607 + return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR, 523 608 __clk_get_name(parent)); 524 609 525 610 default:
+213 -1
drivers/clk/renesas/rzg2l-cpg.c
··· 17 17 #include <linux/delay.h> 18 18 #include <linux/device.h> 19 19 #include <linux/init.h> 20 + #include <linux/iopoll.h> 20 21 #include <linux/mod_devicetable.h> 21 22 #include <linux/module.h> 22 23 #include <linux/of_address.h> ··· 55 54 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) 56 55 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) 57 56 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) 57 + 58 + struct sd_hw_data { 59 + struct clk_hw hw; 60 + u32 conf; 61 + struct rzg2l_cpg_priv *priv; 62 + }; 63 + 64 + #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw) 58 65 59 66 /** 60 67 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data ··· 135 126 136 127 if (IS_ERR(clk_hw)) 137 128 return ERR_CAST(clk_hw); 129 + 130 + return clk_hw->clk; 131 + } 132 + 133 + static struct clk * __init 134 + rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core, 135 + void __iomem *base, 136 + struct rzg2l_cpg_priv *priv) 137 + { 138 + const struct clk_hw *clk_hw; 139 + 140 + clk_hw = devm_clk_hw_register_mux(priv->dev, core->name, 141 + core->parent_names, core->num_parents, 142 + core->flag, 143 + base + GET_REG_OFFSET(core->conf), 144 + GET_SHIFT(core->conf), 145 + GET_WIDTH(core->conf), 146 + core->mux_flags, &priv->rmw_lock); 147 + if (IS_ERR(clk_hw)) 148 + return ERR_CAST(clk_hw); 149 + 150 + return clk_hw->clk; 151 + } 152 + 153 + static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw, 154 + struct clk_rate_request *req) 155 + { 156 + return clk_mux_determine_rate_flags(hw, req, 0); 157 + } 158 + 159 + static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) 160 + { 161 + struct sd_hw_data *hwdata = to_sd_hw_data(hw); 162 + struct rzg2l_cpg_priv *priv = hwdata->priv; 163 + u32 off = GET_REG_OFFSET(hwdata->conf); 164 + u32 shift = GET_SHIFT(hwdata->conf); 165 + const u32 clk_src_266 = 2; 166 + u32 bitmask; 167 + 168 + /* 169 + * As per the HW manual, we should not directly switch from 533 MHz to 170 + * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) 171 + * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, 172 + * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 173 + * (400 MHz)). 174 + * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock 175 + * switching register is prohibited. 176 + * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and 177 + * the index to value mapping is done by adding 1 to the index. 178 + */ 179 + bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16; 180 + if (index != clk_src_266) { 181 + u32 msk, val; 182 + int ret; 183 + 184 + writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off); 185 + 186 + msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS; 187 + 188 + ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val, 189 + !(val & msk), 100, 190 + CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); 191 + if (ret) { 192 + dev_err(priv->dev, "failed to switch clk source\n"); 193 + return ret; 194 + } 195 + } 196 + 197 + writel(bitmask | ((index + 1) << shift), priv->base + off); 198 + 199 + return 0; 200 + } 201 + 202 + static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) 203 + { 204 + struct sd_hw_data *hwdata = to_sd_hw_data(hw); 205 + struct rzg2l_cpg_priv *priv = hwdata->priv; 206 + u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf)); 207 + 208 + val >>= GET_SHIFT(hwdata->conf); 209 + val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0); 210 + if (val) { 211 + val--; 212 + } else { 213 + /* Prohibited clk source, change it to 533 MHz(reset value) */ 214 + rzg2l_cpg_sd_clk_mux_set_parent(hw, 0); 215 + } 216 + 217 + return val; 218 + } 219 + 220 + static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { 221 + .determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate, 222 + .set_parent = rzg2l_cpg_sd_clk_mux_set_parent, 223 + .get_parent = rzg2l_cpg_sd_clk_mux_get_parent, 224 + }; 225 + 226 + static struct clk * __init 227 + rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core, 228 + void __iomem *base, 229 + struct rzg2l_cpg_priv *priv) 230 + { 231 + struct sd_hw_data *clk_hw_data; 232 + struct clk_init_data init; 233 + struct clk_hw *clk_hw; 234 + int ret; 235 + 236 + clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); 237 + if (!clk_hw_data) 238 + return ERR_PTR(-ENOMEM); 239 + 240 + clk_hw_data->priv = priv; 241 + clk_hw_data->conf = core->conf; 242 + 243 + init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0"; 244 + init.ops = &rzg2l_cpg_sd_clk_mux_ops; 245 + init.flags = 0; 246 + init.num_parents = core->num_parents; 247 + init.parent_names = core->parent_names; 248 + 249 + clk_hw = &clk_hw_data->hw; 250 + clk_hw->init = &init; 251 + 252 + ret = devm_clk_hw_register(priv->dev, clk_hw); 253 + if (ret) 254 + return ERR_PTR(ret); 138 255 139 256 return clk_hw->clk; 140 257 } ··· 423 288 clk = rzg2l_cpg_div_clk_register(core, priv->clks, 424 289 priv->base, priv); 425 290 break; 291 + case CLK_TYPE_MUX: 292 + clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv); 293 + break; 294 + case CLK_TYPE_SD_MUX: 295 + clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv); 296 + break; 426 297 default: 427 298 goto fail; 428 299 } ··· 451 310 * @hw: handle between common and hardware-specific interfaces 452 311 * @off: register offset 453 312 * @bit: ON/MON bit 313 + * @enabled: soft state of the clock, if it is coupled with another clock 454 314 * @priv: CPG/MSTP private data 315 + * @sibling: pointer to the other coupled clock 455 316 */ 456 317 struct mstp_clock { 457 318 struct clk_hw hw; 458 319 u16 off; 459 320 u8 bit; 321 + bool enabled; 460 322 struct rzg2l_cpg_priv *priv; 323 + struct mstp_clock *sibling; 461 324 }; 462 325 463 326 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw) ··· 514 369 515 370 static int rzg2l_mod_clock_enable(struct clk_hw *hw) 516 371 { 372 + struct mstp_clock *clock = to_mod_clock(hw); 373 + 374 + if (clock->sibling) { 375 + struct rzg2l_cpg_priv *priv = clock->priv; 376 + unsigned long flags; 377 + bool enabled; 378 + 379 + spin_lock_irqsave(&priv->rmw_lock, flags); 380 + enabled = clock->sibling->enabled; 381 + clock->enabled = true; 382 + spin_unlock_irqrestore(&priv->rmw_lock, flags); 383 + if (enabled) 384 + return 0; 385 + } 386 + 517 387 return rzg2l_mod_clock_endisable(hw, true); 518 388 } 519 389 520 390 static void rzg2l_mod_clock_disable(struct clk_hw *hw) 521 391 { 392 + struct mstp_clock *clock = to_mod_clock(hw); 393 + 394 + if (clock->sibling) { 395 + struct rzg2l_cpg_priv *priv = clock->priv; 396 + unsigned long flags; 397 + bool enabled; 398 + 399 + spin_lock_irqsave(&priv->rmw_lock, flags); 400 + enabled = clock->sibling->enabled; 401 + clock->enabled = false; 402 + spin_unlock_irqrestore(&priv->rmw_lock, flags); 403 + if (enabled) 404 + return; 405 + } 406 + 522 407 rzg2l_mod_clock_endisable(hw, false); 523 408 } 524 409 ··· 564 389 return 1; 565 390 } 566 391 392 + if (clock->sibling) 393 + return clock->enabled; 394 + 567 395 value = readl(priv->base + CLK_MON_R(clock->off)); 568 396 569 - return !(value & bitmask); 397 + return value & bitmask; 570 398 } 571 399 572 400 static const struct clk_ops rzg2l_mod_clock_ops = { ··· 577 399 .disable = rzg2l_mod_clock_disable, 578 400 .is_enabled = rzg2l_mod_clock_is_enabled, 579 401 }; 402 + 403 + static struct mstp_clock 404 + *rzg2l_mod_clock__get_sibling(struct mstp_clock *clock, 405 + struct rzg2l_cpg_priv *priv) 406 + { 407 + struct clk_hw *hw; 408 + unsigned int i; 409 + 410 + for (i = 0; i < priv->num_mod_clks; i++) { 411 + struct mstp_clock *clk; 412 + 413 + if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT)) 414 + continue; 415 + 416 + hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]); 417 + clk = to_mod_clock(hw); 418 + if (clock->off == clk->off && clock->bit == clk->bit) 419 + return clk; 420 + } 421 + 422 + return NULL; 423 + } 580 424 581 425 static void __init 582 426 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, ··· 661 461 662 462 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 663 463 priv->clks[id] = clk; 464 + 465 + if (mod->is_coupled) { 466 + struct mstp_clock *sibling; 467 + 468 + clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw); 469 + sibling = rzg2l_mod_clock__get_sibling(clock, priv); 470 + if (sibling) { 471 + clock->sibling = sibling; 472 + sibling->sibling = clock; 473 + } 474 + } 475 + 664 476 return; 665 477 666 478 fail:
+44 -1
drivers/clk/renesas/rzg2l-cpg.h
··· 11 11 12 12 #define CPG_PL2_DDIV (0x204) 13 13 #define CPG_PL3A_DDIV (0x208) 14 + #define CPG_PL2SDHI_DSEL (0x218) 15 + #define CPG_CLKSTATUS (0x280) 16 + #define CPG_PL3_SSEL (0x408) 17 + #define CPG_PL6_ETH_SSEL (0x418) 18 + 19 + #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28) 20 + #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29) 21 + 22 + #define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000 14 23 15 24 /* n = 0/1/2 for PLL1/4/6 */ 16 25 #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n)) ··· 32 23 #define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3) 33 24 #define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3) 34 25 #define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3) 26 + #define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3) 27 + 28 + #define SEL_PLL_PACK(offset, bitpos, size) \ 29 + (((offset) << 20) | ((bitpos) << 12) | ((size) << 8)) 30 + 31 + #define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1) 32 + #define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1) 33 + 34 + #define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2) 35 + #define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2) 35 36 36 37 /** 37 38 * Definitions of CPG Core Clocks ··· 62 43 const struct clk_div_table *dtable; 63 44 const char * const *parent_names; 64 45 int flag; 46 + int mux_flags; 65 47 int num_parents; 66 48 }; 67 49 ··· 74 54 75 55 /* Clock with divider */ 76 56 CLK_TYPE_DIV, 57 + 58 + /* Clock with clock source selector */ 59 + CLK_TYPE_MUX, 60 + 61 + /* Clock with SD clock source selector */ 62 + CLK_TYPE_SD_MUX, 77 63 }; 78 64 79 65 #define DEF_TYPE(_name, _id, _type...) \ ··· 95 69 #define DEF_DIV(_name, _id, _parent, _conf, _dtable, _flag) \ 96 70 DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \ 97 71 .parent = _parent, .dtable = _dtable, .flag = _flag) 72 + #define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _flag, \ 73 + _mux_flags) \ 74 + DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \ 75 + .parent_names = _parent_names, .num_parents = _num_parents, \ 76 + .flag = _flag, .mux_flags = _mux_flags) 77 + #define DEF_SD_MUX(_name, _id, _conf, _parent_names, _num_parents) \ 78 + DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \ 79 + .parent_names = _parent_names, .num_parents = _num_parents) 98 80 99 81 /** 100 82 * struct rzg2l_mod_clk - Module Clocks definitions ··· 112 78 * @parent: id of parent clock 113 79 * @off: register offset 114 80 * @bit: ON/MON bit 81 + * @is_coupled: flag to indicate coupled clock 115 82 */ 116 83 struct rzg2l_mod_clk { 117 84 const char *name; ··· 120 85 unsigned int parent; 121 86 u16 off; 122 87 u8 bit; 88 + bool is_coupled; 123 89 }; 124 90 125 - #define DEF_MOD(_name, _id, _parent, _off, _bit) \ 91 + #define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _is_coupled) \ 126 92 { \ 127 93 .name = _name, \ 128 94 .id = MOD_CLK_BASE + (_id), \ 129 95 .parent = (_parent), \ 130 96 .off = (_off), \ 131 97 .bit = (_bit), \ 98 + .is_coupled = (_is_coupled), \ 132 99 } 100 + 101 + #define DEF_MOD(_name, _id, _parent, _off, _bit) \ 102 + DEF_MOD_BASE(_name, _id, _parent, _off, _bit, false) 103 + 104 + #define DEF_COUPLED(_name, _id, _parent, _off, _bit) \ 105 + DEF_MOD_BASE(_name, _id, _parent, _off, _bit, true) 133 106 134 107 /** 135 108 * struct rzg2l_reset - Reset definitions
+10 -7
drivers/clk/rockchip/clk-rk3399.c
··· 481 481 COMPOSITE_NOMUX(0, "atclk_core_l", "armclkl", CLK_IGNORE_UNUSED, 482 482 RK3399_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, 483 483 RK3399_CLKGATE_CON(0), 5, GFLAGS), 484 - COMPOSITE_NOMUX(0, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED, 484 + COMPOSITE_NOMUX(PCLK_COREDBG_L, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED, 485 485 RK3399_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, 486 486 RK3399_CLKGATE_CON(0), 6, GFLAGS), 487 487 ··· 531 531 GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_core_adb400_core_b_2_gic", "armclkb", CLK_IGNORE_UNUSED, 532 532 RK3399_CLKGATE_CON(14), 4, GFLAGS), 533 533 534 - DIV(0, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED, 534 + DIV(PCLK_COREDBG_B, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED, 535 535 RK3399_CLKSEL_CON(3), 13, 2, DFLAGS | CLK_DIVIDER_READ_ONLY), 536 536 537 537 GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED, ··· 1514 1514 "aclk_vio_noc", 1515 1515 1516 1516 /* ddrc */ 1517 - "sclk_ddrc" 1517 + "sclk_ddrc", 1518 + 1519 + "armclkl", 1520 + "armclkb", 1518 1521 }; 1519 1522 1520 1523 static const char *const rk3399_pmucru_critical_clocks[] __initconst = { ··· 1552 1549 rockchip_clk_register_branches(ctx, rk3399_clk_branches, 1553 1550 ARRAY_SIZE(rk3399_clk_branches)); 1554 1551 1555 - rockchip_clk_protect_critical(rk3399_cru_critical_clocks, 1556 - ARRAY_SIZE(rk3399_cru_critical_clocks)); 1557 - 1558 1552 rockchip_clk_register_armclk(ctx, ARMCLKL, "armclkl", 1559 1553 mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p), 1560 1554 &rk3399_cpuclkl_data, rk3399_cpuclkl_rates, ··· 1561 1561 mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), 1562 1562 &rk3399_cpuclkb_data, rk3399_cpuclkb_rates, 1563 1563 ARRAY_SIZE(rk3399_cpuclkb_rates)); 1564 + 1565 + rockchip_clk_protect_critical(rk3399_cru_critical_clocks, 1566 + ARRAY_SIZE(rk3399_cru_critical_clocks)); 1564 1567 1565 1568 rockchip_register_softrst(np, 21, reg_base + RK3399_SOFTRST_CON(0), 1566 1569 ROCKCHIP_SOFTRST_HIWORD_MASK); ··· 1656 1653 .suppress_bind_attrs = true, 1657 1654 }, 1658 1655 }; 1659 - builtin_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe); 1656 + module_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe); 1660 1657 1661 1658 MODULE_DESCRIPTION("Rockchip RK3399 Clock Driver"); 1662 1659 MODULE_LICENSE("GPL");
+1 -1
drivers/clk/rockchip/clk-rk3568.c
··· 1719 1719 .suppress_bind_attrs = true, 1720 1720 }, 1721 1721 }; 1722 - builtin_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe); 1722 + module_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe); 1723 1723 1724 1724 MODULE_DESCRIPTION("Rockchip RK3568 Clock Driver"); 1725 1725 MODULE_LICENSE("GPL");