Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: at91: re-factor clocks suspend/resume

SAMA5D2 and SAMA7G5 have a special power saving mode (backup mode) where
most of the SoC's components are powered off (including PMC). Resuming
from this mode is done with the help of bootloader. Peripherals are not
aware of the power saving mode thus most of them are disabling clocks in
proper suspend API and re-enable them in resume API without taking into
account the previously setup rate. Moreover some of the peripherals are
acting as wakeup sources and are not disabling the clocks in this
scenario, when suspending. Since backup mode cuts the power for
peripherals, in resume part these clocks needs to be re-configured.

The initial PMC suspend/resume code was designed only for SAMA5D2's PMC
(as it was the only one supporting backup mode). SAMA7G supports also
backup mode and its PMC is different (few new functionalities, different
registers offsets, different offsets in registers for each
functionalities). To address both SAMA5D2 and SAMA7G5 PMC add
.save_context()/.resume_context() support to each clocks driver and call
this from PMC driver.

Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
Link: https://lore.kernel.org/r/20211011112719.3951784-2-claudiu.beznea@microchip.com
Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>

authored by

Claudiu Beznea and committed by
Stephen Boyd
36971566 c405f5c1

+558 -181
+37 -9
drivers/clk/at91/clk-generated.c
··· 27 27 u32 id; 28 28 u32 gckdiv; 29 29 const struct clk_pcr_layout *layout; 30 + struct at91_clk_pms pms; 30 31 u8 parent_id; 31 32 int chg_pid; 32 33 }; ··· 35 34 #define to_clk_generated(hw) \ 36 35 container_of(hw, struct clk_generated, hw) 37 36 38 - static int clk_generated_enable(struct clk_hw *hw) 37 + static int clk_generated_set(struct clk_generated *gck, int status) 39 38 { 40 - struct clk_generated *gck = to_clk_generated(hw); 41 39 unsigned long flags; 42 - 43 - pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", 44 - __func__, gck->gckdiv, gck->parent_id); 40 + unsigned int enable = status ? AT91_PMC_PCR_GCKEN : 0; 45 41 46 42 spin_lock_irqsave(gck->lock, flags); 47 43 regmap_write(gck->regmap, gck->layout->offset, 48 44 (gck->id & gck->layout->pid_mask)); 49 45 regmap_update_bits(gck->regmap, gck->layout->offset, 50 46 AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask | 51 - gck->layout->cmd | AT91_PMC_PCR_GCKEN, 47 + gck->layout->cmd | enable, 52 48 field_prep(gck->layout->gckcss_mask, gck->parent_id) | 53 49 gck->layout->cmd | 54 50 FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) | 55 - AT91_PMC_PCR_GCKEN); 51 + enable); 56 52 spin_unlock_irqrestore(gck->lock, flags); 53 + 54 + return 0; 55 + } 56 + 57 + static int clk_generated_enable(struct clk_hw *hw) 58 + { 59 + struct clk_generated *gck = to_clk_generated(hw); 60 + 61 + pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", 62 + __func__, gck->gckdiv, gck->parent_id); 63 + 64 + clk_generated_set(gck, 1); 65 + 57 66 return 0; 58 67 } 59 68 ··· 256 245 return 0; 257 246 } 258 247 248 + static int clk_generated_save_context(struct clk_hw *hw) 249 + { 250 + struct clk_generated *gck = to_clk_generated(hw); 251 + 252 + gck->pms.status = clk_generated_is_enabled(&gck->hw); 253 + 254 + return 0; 255 + } 256 + 257 + static void clk_generated_restore_context(struct clk_hw *hw) 258 + { 259 + struct clk_generated *gck = to_clk_generated(hw); 260 + 261 + if (gck->pms.status) 262 + clk_generated_set(gck, gck->pms.status); 263 + } 264 + 259 265 static const struct clk_ops generated_ops = { 260 266 .enable = clk_generated_enable, 261 267 .disable = clk_generated_disable, ··· 282 254 .get_parent = clk_generated_get_parent, 283 255 .set_parent = clk_generated_set_parent, 284 256 .set_rate = clk_generated_set_rate, 257 + .save_context = clk_generated_save_context, 258 + .restore_context = clk_generated_restore_context, 285 259 }; 286 260 287 261 /** ··· 350 320 if (ret) { 351 321 kfree(gck); 352 322 hw = ERR_PTR(ret); 353 - } else { 354 - pmc_register_id(id); 355 323 } 356 324 357 325 return hw;
+66
drivers/clk/at91/clk-main.c
··· 28 28 struct clk_main_osc { 29 29 struct clk_hw hw; 30 30 struct regmap *regmap; 31 + struct at91_clk_pms pms; 31 32 }; 32 33 33 34 #define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw) ··· 38 37 struct regmap *regmap; 39 38 unsigned long frequency; 40 39 unsigned long accuracy; 40 + struct at91_clk_pms pms; 41 41 }; 42 42 43 43 #define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw) ··· 53 51 struct clk_sam9x5_main { 54 52 struct clk_hw hw; 55 53 struct regmap *regmap; 54 + struct at91_clk_pms pms; 56 55 u8 parent; 57 56 }; 58 57 ··· 123 120 return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp); 124 121 } 125 122 123 + static int clk_main_osc_save_context(struct clk_hw *hw) 124 + { 125 + struct clk_main_osc *osc = to_clk_main_osc(hw); 126 + 127 + osc->pms.status = clk_main_osc_is_prepared(hw); 128 + 129 + return 0; 130 + } 131 + 132 + static void clk_main_osc_restore_context(struct clk_hw *hw) 133 + { 134 + struct clk_main_osc *osc = to_clk_main_osc(hw); 135 + 136 + if (osc->pms.status) 137 + clk_main_osc_prepare(hw); 138 + } 139 + 126 140 static const struct clk_ops main_osc_ops = { 127 141 .prepare = clk_main_osc_prepare, 128 142 .unprepare = clk_main_osc_unprepare, 129 143 .is_prepared = clk_main_osc_is_prepared, 144 + .save_context = clk_main_osc_save_context, 145 + .restore_context = clk_main_osc_restore_context, 130 146 }; 131 147 132 148 struct clk_hw * __init ··· 262 240 return osc->accuracy; 263 241 } 264 242 243 + static int clk_main_rc_osc_save_context(struct clk_hw *hw) 244 + { 245 + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); 246 + 247 + osc->pms.status = clk_main_rc_osc_is_prepared(hw); 248 + 249 + return 0; 250 + } 251 + 252 + static void clk_main_rc_osc_restore_context(struct clk_hw *hw) 253 + { 254 + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); 255 + 256 + if (osc->pms.status) 257 + clk_main_rc_osc_prepare(hw); 258 + } 259 + 265 260 static const struct clk_ops main_rc_osc_ops = { 266 261 .prepare = clk_main_rc_osc_prepare, 267 262 .unprepare = clk_main_rc_osc_unprepare, 268 263 .is_prepared = clk_main_rc_osc_is_prepared, 269 264 .recalc_rate = clk_main_rc_osc_recalc_rate, 270 265 .recalc_accuracy = clk_main_rc_osc_recalc_accuracy, 266 + .save_context = clk_main_rc_osc_save_context, 267 + .restore_context = clk_main_rc_osc_restore_context, 271 268 }; 272 269 273 270 struct clk_hw * __init ··· 506 465 return clk_main_parent_select(status); 507 466 } 508 467 468 + static int clk_sam9x5_main_save_context(struct clk_hw *hw) 469 + { 470 + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); 471 + 472 + clkmain->pms.status = clk_main_rc_osc_is_prepared(&clkmain->hw); 473 + clkmain->pms.parent = clk_sam9x5_main_get_parent(&clkmain->hw); 474 + 475 + return 0; 476 + } 477 + 478 + static void clk_sam9x5_main_restore_context(struct clk_hw *hw) 479 + { 480 + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); 481 + int ret; 482 + 483 + ret = clk_sam9x5_main_set_parent(hw, clkmain->pms.parent); 484 + if (ret) 485 + return; 486 + 487 + if (clkmain->pms.status) 488 + clk_sam9x5_main_prepare(hw); 489 + } 490 + 509 491 static const struct clk_ops sam9x5_main_ops = { 510 492 .prepare = clk_sam9x5_main_prepare, 511 493 .is_prepared = clk_sam9x5_main_is_prepared, 512 494 .recalc_rate = clk_sam9x5_main_recalc_rate, 513 495 .set_parent = clk_sam9x5_main_set_parent, 514 496 .get_parent = clk_sam9x5_main_get_parent, 497 + .save_context = clk_sam9x5_main_save_context, 498 + .restore_context = clk_sam9x5_main_restore_context, 515 499 }; 516 500 517 501 struct clk_hw * __init
+182 -12
drivers/clk/at91/clk-master.c
··· 37 37 spinlock_t *lock; 38 38 const struct clk_master_layout *layout; 39 39 const struct clk_master_characteristics *characteristics; 40 + struct at91_clk_pms pms; 40 41 u32 *mux_table; 41 42 u32 mckr; 42 43 int chg_pid; ··· 113 112 return rate; 114 113 } 115 114 115 + static int clk_master_div_save_context(struct clk_hw *hw) 116 + { 117 + struct clk_master *master = to_clk_master(hw); 118 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 119 + unsigned long flags; 120 + unsigned int mckr, div; 121 + 122 + spin_lock_irqsave(master->lock, flags); 123 + regmap_read(master->regmap, master->layout->offset, &mckr); 124 + spin_unlock_irqrestore(master->lock, flags); 125 + 126 + mckr &= master->layout->mask; 127 + div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 128 + div = master->characteristics->divisors[div]; 129 + 130 + master->pms.parent_rate = clk_hw_get_rate(parent_hw); 131 + master->pms.rate = DIV_ROUND_CLOSEST(master->pms.parent_rate, div); 132 + 133 + return 0; 134 + } 135 + 136 + static void clk_master_div_restore_context(struct clk_hw *hw) 137 + { 138 + struct clk_master *master = to_clk_master(hw); 139 + unsigned long flags; 140 + unsigned int mckr; 141 + u8 div; 142 + 143 + spin_lock_irqsave(master->lock, flags); 144 + regmap_read(master->regmap, master->layout->offset, &mckr); 145 + spin_unlock_irqrestore(master->lock, flags); 146 + 147 + mckr &= master->layout->mask; 148 + div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 149 + div = master->characteristics->divisors[div]; 150 + 151 + if (div != DIV_ROUND_CLOSEST(master->pms.parent_rate, master->pms.rate)) 152 + pr_warn("MCKR DIV not configured properly by firmware!\n"); 153 + } 154 + 116 155 static const struct clk_ops master_div_ops = { 117 156 .prepare = clk_master_prepare, 118 157 .is_prepared = clk_master_is_prepared, 119 158 .recalc_rate = clk_master_div_recalc_rate, 159 + .save_context = clk_master_div_save_context, 160 + .restore_context = clk_master_div_restore_context, 120 161 }; 121 162 122 163 static int clk_master_div_set_rate(struct clk_hw *hw, unsigned long rate, ··· 168 125 const struct clk_master_characteristics *characteristics = 169 126 master->characteristics; 170 127 unsigned long flags; 128 + unsigned int mckr, tmp; 171 129 int div, i; 130 + int ret; 172 131 173 132 div = DIV_ROUND_CLOSEST(parent_rate, rate); 174 133 if (div > ARRAY_SIZE(characteristics->divisors)) ··· 190 145 return -EINVAL; 191 146 192 147 spin_lock_irqsave(master->lock, flags); 193 - regmap_update_bits(master->regmap, master->layout->offset, 194 - (MASTER_DIV_MASK << MASTER_DIV_SHIFT), 195 - (div << MASTER_DIV_SHIFT)); 148 + ret = regmap_read(master->regmap, master->layout->offset, &mckr); 149 + if (ret) 150 + goto unlock; 151 + 152 + tmp = mckr & master->layout->mask; 153 + tmp = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; 154 + if (tmp == div) 155 + goto unlock; 156 + 157 + mckr &= ~(MASTER_DIV_MASK << MASTER_DIV_SHIFT); 158 + mckr |= (div << MASTER_DIV_SHIFT); 159 + ret = regmap_write(master->regmap, master->layout->offset, mckr); 160 + if (ret) 161 + goto unlock; 162 + 196 163 while (!clk_master_ready(master)) 197 164 cpu_relax(); 165 + unlock: 198 166 spin_unlock_irqrestore(master->lock, flags); 199 167 200 168 return 0; ··· 255 197 return 0; 256 198 } 257 199 200 + static void clk_master_div_restore_context_chg(struct clk_hw *hw) 201 + { 202 + struct clk_master *master = to_clk_master(hw); 203 + int ret; 204 + 205 + ret = clk_master_div_set_rate(hw, master->pms.rate, 206 + master->pms.parent_rate); 207 + if (ret) 208 + pr_warn("Failed to restore MCK DIV clock\n"); 209 + } 210 + 258 211 static const struct clk_ops master_div_ops_chg = { 259 212 .prepare = clk_master_prepare, 260 213 .is_prepared = clk_master_is_prepared, 261 214 .recalc_rate = clk_master_div_recalc_rate, 262 215 .determine_rate = clk_master_div_determine_rate, 263 216 .set_rate = clk_master_div_set_rate, 217 + .save_context = clk_master_div_save_context, 218 + .restore_context = clk_master_div_restore_context_chg, 264 219 }; 265 220 266 221 static void clk_sama7g5_master_best_diff(struct clk_rate_request *req, ··· 343 272 { 344 273 struct clk_master *master = to_clk_master(hw); 345 274 unsigned long flags; 346 - unsigned int pres; 275 + unsigned int pres, mckr, tmp; 276 + int ret; 347 277 348 278 pres = DIV_ROUND_CLOSEST(parent_rate, rate); 349 279 if (pres > MASTER_PRES_MAX) ··· 356 284 pres = ffs(pres) - 1; 357 285 358 286 spin_lock_irqsave(master->lock, flags); 359 - regmap_update_bits(master->regmap, master->layout->offset, 360 - (MASTER_PRES_MASK << master->layout->pres_shift), 361 - (pres << master->layout->pres_shift)); 287 + ret = regmap_read(master->regmap, master->layout->offset, &mckr); 288 + if (ret) 289 + goto unlock; 290 + 291 + mckr &= master->layout->mask; 292 + tmp = (mckr >> master->layout->pres_shift) & MASTER_PRES_MASK; 293 + if (pres == tmp) 294 + goto unlock; 295 + 296 + mckr &= ~(MASTER_PRES_MASK << master->layout->pres_shift); 297 + mckr |= (pres << master->layout->pres_shift); 298 + ret = regmap_write(master->regmap, master->layout->offset, mckr); 299 + if (ret) 300 + goto unlock; 362 301 363 302 while (!clk_master_ready(master)) 364 303 cpu_relax(); 304 + unlock: 365 305 spin_unlock_irqrestore(master->lock, flags); 366 306 367 - return 0; 307 + return ret; 368 308 } 369 309 370 310 static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw, ··· 414 330 return mckr & AT91_PMC_CSS; 415 331 } 416 332 333 + static int clk_master_pres_save_context(struct clk_hw *hw) 334 + { 335 + struct clk_master *master = to_clk_master(hw); 336 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 337 + unsigned long flags; 338 + unsigned int val, pres; 339 + 340 + spin_lock_irqsave(master->lock, flags); 341 + regmap_read(master->regmap, master->layout->offset, &val); 342 + spin_unlock_irqrestore(master->lock, flags); 343 + 344 + val &= master->layout->mask; 345 + pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK; 346 + if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres) 347 + pres = 3; 348 + else 349 + pres = (1 << pres); 350 + 351 + master->pms.parent = val & AT91_PMC_CSS; 352 + master->pms.parent_rate = clk_hw_get_rate(parent_hw); 353 + master->pms.rate = DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres); 354 + 355 + return 0; 356 + } 357 + 358 + static void clk_master_pres_restore_context(struct clk_hw *hw) 359 + { 360 + struct clk_master *master = to_clk_master(hw); 361 + unsigned long flags; 362 + unsigned int val, pres; 363 + 364 + spin_lock_irqsave(master->lock, flags); 365 + regmap_read(master->regmap, master->layout->offset, &val); 366 + spin_unlock_irqrestore(master->lock, flags); 367 + 368 + val &= master->layout->mask; 369 + pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK; 370 + if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres) 371 + pres = 3; 372 + else 373 + pres = (1 << pres); 374 + 375 + if (master->pms.rate != 376 + DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres) || 377 + (master->pms.parent != (val & AT91_PMC_CSS))) 378 + pr_warn("MCKR PRES was not configured properly by firmware!\n"); 379 + } 380 + 381 + static void clk_master_pres_restore_context_chg(struct clk_hw *hw) 382 + { 383 + struct clk_master *master = to_clk_master(hw); 384 + 385 + clk_master_pres_set_rate(hw, master->pms.rate, master->pms.parent_rate); 386 + } 387 + 417 388 static const struct clk_ops master_pres_ops = { 418 389 .prepare = clk_master_prepare, 419 390 .is_prepared = clk_master_is_prepared, 420 391 .recalc_rate = clk_master_pres_recalc_rate, 421 392 .get_parent = clk_master_pres_get_parent, 393 + .save_context = clk_master_pres_save_context, 394 + .restore_context = clk_master_pres_restore_context, 422 395 }; 423 396 424 397 static const struct clk_ops master_pres_ops_chg = { ··· 485 344 .recalc_rate = clk_master_pres_recalc_rate, 486 345 .get_parent = clk_master_pres_get_parent, 487 346 .set_rate = clk_master_pres_set_rate, 347 + .save_context = clk_master_pres_save_context, 348 + .restore_context = clk_master_pres_restore_context_chg, 488 349 }; 489 350 490 351 static struct clk_hw * __init ··· 682 539 return 0; 683 540 } 684 541 685 - static int clk_sama7g5_master_enable(struct clk_hw *hw) 542 + static void clk_sama7g5_master_set(struct clk_master *master, 543 + unsigned int status) 686 544 { 687 - struct clk_master *master = to_clk_master(hw); 688 545 unsigned long flags; 689 546 unsigned int val, cparent; 547 + unsigned int enable = status ? PMC_MCR_EN : 0; 690 548 691 549 spin_lock_irqsave(master->lock, flags); 692 550 693 551 regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id)); 694 552 regmap_read(master->regmap, PMC_MCR, &val); 695 553 regmap_update_bits(master->regmap, PMC_MCR, 696 - PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV | 554 + enable | PMC_MCR_CSS | PMC_MCR_DIV | 697 555 PMC_MCR_CMD | PMC_MCR_ID_MSK, 698 - PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) | 556 + enable | (master->parent << PMC_MCR_CSS_SHIFT) | 699 557 (master->div << MASTER_DIV_SHIFT) | 700 558 PMC_MCR_CMD | PMC_MCR_ID(master->id)); 701 559 ··· 707 563 cpu_relax(); 708 564 709 565 spin_unlock_irqrestore(master->lock, flags); 566 + } 567 + 568 + static int clk_sama7g5_master_enable(struct clk_hw *hw) 569 + { 570 + struct clk_master *master = to_clk_master(hw); 571 + 572 + clk_sama7g5_master_set(master, 1); 710 573 711 574 return 0; 712 575 } ··· 771 620 return 0; 772 621 } 773 622 623 + static int clk_sama7g5_master_save_context(struct clk_hw *hw) 624 + { 625 + struct clk_master *master = to_clk_master(hw); 626 + 627 + master->pms.status = clk_sama7g5_master_is_enabled(hw); 628 + 629 + return 0; 630 + } 631 + 632 + static void clk_sama7g5_master_restore_context(struct clk_hw *hw) 633 + { 634 + struct clk_master *master = to_clk_master(hw); 635 + 636 + if (master->pms.status) 637 + clk_sama7g5_master_set(master, master->pms.status); 638 + } 639 + 774 640 static const struct clk_ops sama7g5_master_ops = { 775 641 .enable = clk_sama7g5_master_enable, 776 642 .disable = clk_sama7g5_master_disable, ··· 797 629 .set_rate = clk_sama7g5_master_set_rate, 798 630 .get_parent = clk_sama7g5_master_get_parent, 799 631 .set_parent = clk_sama7g5_master_set_parent, 632 + .save_context = clk_sama7g5_master_save_context, 633 + .restore_context = clk_sama7g5_master_restore_context, 800 634 }; 801 635 802 636 struct clk_hw * __init
+34 -6
drivers/clk/at91/clk-peripheral.c
··· 37 37 u32 id; 38 38 u32 div; 39 39 const struct clk_pcr_layout *layout; 40 + struct at91_clk_pms pms; 40 41 bool auto_div; 41 42 int chg_pid; 42 43 }; ··· 156 155 periph->div = shift; 157 156 } 158 157 159 - static int clk_sam9x5_peripheral_enable(struct clk_hw *hw) 158 + static int clk_sam9x5_peripheral_set(struct clk_sam9x5_peripheral *periph, 159 + unsigned int status) 160 160 { 161 - struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 162 161 unsigned long flags; 162 + unsigned int enable = status ? AT91_PMC_PCR_EN : 0; 163 163 164 164 if (periph->id < PERIPHERAL_ID_MIN) 165 165 return 0; ··· 170 168 (periph->id & periph->layout->pid_mask)); 171 169 regmap_update_bits(periph->regmap, periph->layout->offset, 172 170 periph->layout->div_mask | periph->layout->cmd | 173 - AT91_PMC_PCR_EN, 171 + enable, 174 172 field_prep(periph->layout->div_mask, periph->div) | 175 - periph->layout->cmd | 176 - AT91_PMC_PCR_EN); 173 + periph->layout->cmd | enable); 177 174 spin_unlock_irqrestore(periph->lock, flags); 178 175 179 176 return 0; 177 + } 178 + 179 + static int clk_sam9x5_peripheral_enable(struct clk_hw *hw) 180 + { 181 + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 182 + 183 + return clk_sam9x5_peripheral_set(periph, 1); 180 184 } 181 185 182 186 static void clk_sam9x5_peripheral_disable(struct clk_hw *hw) ··· 401 393 return -EINVAL; 402 394 } 403 395 396 + static int clk_sam9x5_peripheral_save_context(struct clk_hw *hw) 397 + { 398 + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 399 + 400 + periph->pms.status = clk_sam9x5_peripheral_is_enabled(hw); 401 + 402 + return 0; 403 + } 404 + 405 + static void clk_sam9x5_peripheral_restore_context(struct clk_hw *hw) 406 + { 407 + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); 408 + 409 + if (periph->pms.status) 410 + clk_sam9x5_peripheral_set(periph, periph->pms.status); 411 + } 412 + 404 413 static const struct clk_ops sam9x5_peripheral_ops = { 405 414 .enable = clk_sam9x5_peripheral_enable, 406 415 .disable = clk_sam9x5_peripheral_disable, ··· 425 400 .recalc_rate = clk_sam9x5_peripheral_recalc_rate, 426 401 .round_rate = clk_sam9x5_peripheral_round_rate, 427 402 .set_rate = clk_sam9x5_peripheral_set_rate, 403 + .save_context = clk_sam9x5_peripheral_save_context, 404 + .restore_context = clk_sam9x5_peripheral_restore_context, 428 405 }; 429 406 430 407 static const struct clk_ops sam9x5_peripheral_chg_ops = { ··· 436 409 .recalc_rate = clk_sam9x5_peripheral_recalc_rate, 437 410 .determine_rate = clk_sam9x5_peripheral_determine_rate, 438 411 .set_rate = clk_sam9x5_peripheral_set_rate, 412 + .save_context = clk_sam9x5_peripheral_save_context, 413 + .restore_context = clk_sam9x5_peripheral_restore_context, 439 414 }; 440 415 441 416 struct clk_hw * __init ··· 489 460 hw = ERR_PTR(ret); 490 461 } else { 491 462 clk_sam9x5_peripheral_autodiv(periph); 492 - pmc_register_id(id); 493 463 } 494 464 495 465 return hw;
+39
drivers/clk/at91/clk-pll.c
··· 40 40 u16 mul; 41 41 const struct clk_pll_layout *layout; 42 42 const struct clk_pll_characteristics *characteristics; 43 + struct at91_clk_pms pms; 43 44 }; 44 45 45 46 static inline bool clk_pll_ready(struct regmap *regmap, int id) ··· 261 260 return 0; 262 261 } 263 262 263 + static int clk_pll_save_context(struct clk_hw *hw) 264 + { 265 + struct clk_pll *pll = to_clk_pll(hw); 266 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 267 + 268 + pll->pms.parent_rate = clk_hw_get_rate(parent_hw); 269 + pll->pms.rate = clk_pll_recalc_rate(&pll->hw, pll->pms.parent_rate); 270 + pll->pms.status = clk_pll_ready(pll->regmap, PLL_REG(pll->id)); 271 + 272 + return 0; 273 + } 274 + 275 + static void clk_pll_restore_context(struct clk_hw *hw) 276 + { 277 + struct clk_pll *pll = to_clk_pll(hw); 278 + unsigned long calc_rate; 279 + unsigned int pllr, pllr_out, pllr_count; 280 + u8 out = 0; 281 + 282 + if (pll->characteristics->out) 283 + out = pll->characteristics->out[pll->range]; 284 + 285 + regmap_read(pll->regmap, PLL_REG(pll->id), &pllr); 286 + 287 + calc_rate = (pll->pms.parent_rate / PLL_DIV(pllr)) * 288 + (PLL_MUL(pllr, pll->layout) + 1); 289 + pllr_count = (pllr >> PLL_COUNT_SHIFT) & PLL_MAX_COUNT; 290 + pllr_out = (pllr >> PLL_OUT_SHIFT) & out; 291 + 292 + if (pll->pms.rate != calc_rate || 293 + pll->pms.status != clk_pll_ready(pll->regmap, PLL_REG(pll->id)) || 294 + pllr_count != PLL_MAX_COUNT || 295 + (out && pllr_out != out)) 296 + pr_warn("PLLAR was not configured properly by firmware\n"); 297 + } 298 + 264 299 static const struct clk_ops pll_ops = { 265 300 .prepare = clk_pll_prepare, 266 301 .unprepare = clk_pll_unprepare, ··· 304 267 .recalc_rate = clk_pll_recalc_rate, 305 268 .round_rate = clk_pll_round_rate, 306 269 .set_rate = clk_pll_set_rate, 270 + .save_context = clk_pll_save_context, 271 + .restore_context = clk_pll_restore_context, 307 272 }; 308 273 309 274 struct clk_hw * __init
+27 -2
drivers/clk/at91/clk-programmable.c
··· 24 24 u32 *mux_table; 25 25 u8 id; 26 26 const struct clk_programmable_layout *layout; 27 + struct at91_clk_pms pms; 27 28 }; 28 29 29 30 #define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw) ··· 178 177 return 0; 179 178 } 180 179 180 + static int clk_programmable_save_context(struct clk_hw *hw) 181 + { 182 + struct clk_programmable *prog = to_clk_programmable(hw); 183 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 184 + 185 + prog->pms.parent = clk_programmable_get_parent(hw); 186 + prog->pms.parent_rate = clk_hw_get_rate(parent_hw); 187 + prog->pms.rate = clk_programmable_recalc_rate(hw, prog->pms.parent_rate); 188 + 189 + return 0; 190 + } 191 + 192 + static void clk_programmable_restore_context(struct clk_hw *hw) 193 + { 194 + struct clk_programmable *prog = to_clk_programmable(hw); 195 + int ret; 196 + 197 + ret = clk_programmable_set_parent(hw, prog->pms.parent); 198 + if (ret) 199 + return; 200 + 201 + clk_programmable_set_rate(hw, prog->pms.rate, prog->pms.parent_rate); 202 + } 203 + 181 204 static const struct clk_ops programmable_ops = { 182 205 .recalc_rate = clk_programmable_recalc_rate, 183 206 .determine_rate = clk_programmable_determine_rate, 184 207 .get_parent = clk_programmable_get_parent, 185 208 .set_parent = clk_programmable_set_parent, 186 209 .set_rate = clk_programmable_set_rate, 210 + .save_context = clk_programmable_save_context, 211 + .restore_context = clk_programmable_restore_context, 187 212 }; 188 213 189 214 struct clk_hw * __init ··· 248 221 if (ret) { 249 222 kfree(prog); 250 223 hw = ERR_PTR(ret); 251 - } else { 252 - pmc_register_pck(id); 253 224 } 254 225 255 226 return hw;
+64 -4
drivers/clk/at91/clk-sam9x60-pll.c
··· 38 38 39 39 struct sam9x60_frac { 40 40 struct sam9x60_pll_core core; 41 + struct at91_clk_pms pms; 41 42 u32 frac; 42 43 u16 mul; 43 44 }; 44 45 45 46 struct sam9x60_div { 46 47 struct sam9x60_pll_core core; 48 + struct at91_clk_pms pms; 47 49 u8 div; 48 50 }; 49 51 ··· 77 75 ((u64)parent_rate * frac->frac >> 22)); 78 76 } 79 77 80 - static int sam9x60_frac_pll_prepare(struct clk_hw *hw) 78 + static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core) 81 79 { 82 - struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 83 80 struct sam9x60_frac *frac = to_sam9x60_frac(core); 84 81 struct regmap *regmap = core->regmap; 85 82 unsigned int val, cfrac, cmul; ··· 140 139 spin_unlock_irqrestore(core->lock, flags); 141 140 142 141 return 0; 142 + } 143 + 144 + static int sam9x60_frac_pll_prepare(struct clk_hw *hw) 145 + { 146 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 147 + 148 + return sam9x60_frac_pll_set(core); 143 149 } 144 150 145 151 static void sam9x60_frac_pll_unprepare(struct clk_hw *hw) ··· 288 280 return ret; 289 281 } 290 282 283 + static int sam9x60_frac_pll_save_context(struct clk_hw *hw) 284 + { 285 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 286 + struct sam9x60_frac *frac = to_sam9x60_frac(core); 287 + 288 + frac->pms.status = sam9x60_pll_ready(core->regmap, core->id); 289 + 290 + return 0; 291 + } 292 + 293 + static void sam9x60_frac_pll_restore_context(struct clk_hw *hw) 294 + { 295 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 296 + struct sam9x60_frac *frac = to_sam9x60_frac(core); 297 + 298 + if (frac->pms.status) 299 + sam9x60_frac_pll_set(core); 300 + } 301 + 291 302 static const struct clk_ops sam9x60_frac_pll_ops = { 292 303 .prepare = sam9x60_frac_pll_prepare, 293 304 .unprepare = sam9x60_frac_pll_unprepare, ··· 314 287 .recalc_rate = sam9x60_frac_pll_recalc_rate, 315 288 .round_rate = sam9x60_frac_pll_round_rate, 316 289 .set_rate = sam9x60_frac_pll_set_rate, 290 + .save_context = sam9x60_frac_pll_save_context, 291 + .restore_context = sam9x60_frac_pll_restore_context, 317 292 }; 318 293 319 294 static const struct clk_ops sam9x60_frac_pll_ops_chg = { ··· 325 296 .recalc_rate = sam9x60_frac_pll_recalc_rate, 326 297 .round_rate = sam9x60_frac_pll_round_rate, 327 298 .set_rate = sam9x60_frac_pll_set_rate_chg, 299 + .save_context = sam9x60_frac_pll_save_context, 300 + .restore_context = sam9x60_frac_pll_restore_context, 328 301 }; 329 302 330 - static int sam9x60_div_pll_prepare(struct clk_hw *hw) 303 + static int sam9x60_div_pll_set(struct sam9x60_pll_core *core) 331 304 { 332 - struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 333 305 struct sam9x60_div *div = to_sam9x60_div(core); 334 306 struct regmap *regmap = core->regmap; 335 307 unsigned long flags; ··· 362 332 spin_unlock_irqrestore(core->lock, flags); 363 333 364 334 return 0; 335 + } 336 + 337 + static int sam9x60_div_pll_prepare(struct clk_hw *hw) 338 + { 339 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 340 + 341 + return sam9x60_div_pll_set(core); 365 342 } 366 343 367 344 static void sam9x60_div_pll_unprepare(struct clk_hw *hw) ··· 519 482 return 0; 520 483 } 521 484 485 + static int sam9x60_div_pll_save_context(struct clk_hw *hw) 486 + { 487 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 488 + struct sam9x60_div *div = to_sam9x60_div(core); 489 + 490 + div->pms.status = sam9x60_div_pll_is_prepared(hw); 491 + 492 + return 0; 493 + } 494 + 495 + static void sam9x60_div_pll_restore_context(struct clk_hw *hw) 496 + { 497 + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); 498 + struct sam9x60_div *div = to_sam9x60_div(core); 499 + 500 + if (div->pms.status) 501 + sam9x60_div_pll_set(core); 502 + } 503 + 522 504 static const struct clk_ops sam9x60_div_pll_ops = { 523 505 .prepare = sam9x60_div_pll_prepare, 524 506 .unprepare = sam9x60_div_pll_unprepare, ··· 545 489 .recalc_rate = sam9x60_div_pll_recalc_rate, 546 490 .round_rate = sam9x60_div_pll_round_rate, 547 491 .set_rate = sam9x60_div_pll_set_rate, 492 + .save_context = sam9x60_div_pll_save_context, 493 + .restore_context = sam9x60_div_pll_restore_context, 548 494 }; 549 495 550 496 static const struct clk_ops sam9x60_div_pll_ops_chg = { ··· 556 498 .recalc_rate = sam9x60_div_pll_recalc_rate, 557 499 .round_rate = sam9x60_div_pll_round_rate, 558 500 .set_rate = sam9x60_div_pll_set_rate_chg, 501 + .save_context = sam9x60_div_pll_save_context, 502 + .restore_context = sam9x60_div_pll_restore_context, 559 503 }; 560 504 561 505 struct clk_hw * __init
+20
drivers/clk/at91/clk-system.c
··· 20 20 struct clk_system { 21 21 struct clk_hw hw; 22 22 struct regmap *regmap; 23 + struct at91_clk_pms pms; 23 24 u8 id; 24 25 }; 25 26 ··· 78 77 return !!(status & (1 << sys->id)); 79 78 } 80 79 80 + static int clk_system_save_context(struct clk_hw *hw) 81 + { 82 + struct clk_system *sys = to_clk_system(hw); 83 + 84 + sys->pms.status = clk_system_is_prepared(hw); 85 + 86 + return 0; 87 + } 88 + 89 + static void clk_system_restore_context(struct clk_hw *hw) 90 + { 91 + struct clk_system *sys = to_clk_system(hw); 92 + 93 + if (sys->pms.status) 94 + clk_system_prepare(&sys->hw); 95 + } 96 + 81 97 static const struct clk_ops system_ops = { 82 98 .prepare = clk_system_prepare, 83 99 .unprepare = clk_system_unprepare, 84 100 .is_prepared = clk_system_is_prepared, 101 + .save_context = clk_system_save_context, 102 + .restore_context = clk_system_restore_context, 85 103 }; 86 104 87 105 struct clk_hw * __init
+27
drivers/clk/at91/clk-usb.c
··· 24 24 struct at91sam9x5_clk_usb { 25 25 struct clk_hw hw; 26 26 struct regmap *regmap; 27 + struct at91_clk_pms pms; 27 28 u32 usbs_mask; 28 29 u8 num_parents; 29 30 }; ··· 149 148 return 0; 150 149 } 151 150 151 + static int at91sam9x5_usb_save_context(struct clk_hw *hw) 152 + { 153 + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 154 + struct clk_hw *parent_hw = clk_hw_get_parent(hw); 155 + 156 + usb->pms.parent = at91sam9x5_clk_usb_get_parent(hw); 157 + usb->pms.parent_rate = clk_hw_get_rate(parent_hw); 158 + usb->pms.rate = at91sam9x5_clk_usb_recalc_rate(hw, usb->pms.parent_rate); 159 + 160 + return 0; 161 + } 162 + 163 + static void at91sam9x5_usb_restore_context(struct clk_hw *hw) 164 + { 165 + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 166 + int ret; 167 + 168 + ret = at91sam9x5_clk_usb_set_parent(hw, usb->pms.parent); 169 + if (ret) 170 + return; 171 + 172 + at91sam9x5_clk_usb_set_rate(hw, usb->pms.rate, usb->pms.parent_rate); 173 + } 174 + 152 175 static const struct clk_ops at91sam9x5_usb_ops = { 153 176 .recalc_rate = at91sam9x5_clk_usb_recalc_rate, 154 177 .determine_rate = at91sam9x5_clk_usb_determine_rate, 155 178 .get_parent = at91sam9x5_clk_usb_get_parent, 156 179 .set_parent = at91sam9x5_clk_usb_set_parent, 157 180 .set_rate = at91sam9x5_clk_usb_set_rate, 181 + .save_context = at91sam9x5_usb_save_context, 182 + .restore_context = at91sam9x5_usb_restore_context, 158 183 }; 159 184 160 185 static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
+39
drivers/clk/at91/clk-utmi.c
··· 23 23 struct clk_hw hw; 24 24 struct regmap *regmap_pmc; 25 25 struct regmap *regmap_sfr; 26 + struct at91_clk_pms pms; 26 27 }; 27 28 28 29 #define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw) ··· 114 113 return UTMI_RATE; 115 114 } 116 115 116 + static int clk_utmi_save_context(struct clk_hw *hw) 117 + { 118 + struct clk_utmi *utmi = to_clk_utmi(hw); 119 + 120 + utmi->pms.status = clk_utmi_is_prepared(hw); 121 + 122 + return 0; 123 + } 124 + 125 + static void clk_utmi_restore_context(struct clk_hw *hw) 126 + { 127 + struct clk_utmi *utmi = to_clk_utmi(hw); 128 + 129 + if (utmi->pms.status) 130 + clk_utmi_prepare(hw); 131 + } 132 + 117 133 static const struct clk_ops utmi_ops = { 118 134 .prepare = clk_utmi_prepare, 119 135 .unprepare = clk_utmi_unprepare, 120 136 .is_prepared = clk_utmi_is_prepared, 121 137 .recalc_rate = clk_utmi_recalc_rate, 138 + .save_context = clk_utmi_save_context, 139 + .restore_context = clk_utmi_restore_context, 122 140 }; 123 141 124 142 static struct clk_hw * __init ··· 252 232 return 0; 253 233 } 254 234 235 + static int clk_utmi_sama7g5_save_context(struct clk_hw *hw) 236 + { 237 + struct clk_utmi *utmi = to_clk_utmi(hw); 238 + 239 + utmi->pms.status = clk_utmi_sama7g5_is_prepared(hw); 240 + 241 + return 0; 242 + } 243 + 244 + static void clk_utmi_sama7g5_restore_context(struct clk_hw *hw) 245 + { 246 + struct clk_utmi *utmi = to_clk_utmi(hw); 247 + 248 + if (utmi->pms.status) 249 + clk_utmi_sama7g5_prepare(hw); 250 + } 251 + 255 252 static const struct clk_ops sama7g5_utmi_ops = { 256 253 .prepare = clk_utmi_sama7g5_prepare, 257 254 .is_prepared = clk_utmi_sama7g5_is_prepared, 258 255 .recalc_rate = clk_utmi_recalc_rate, 256 + .save_context = clk_utmi_sama7g5_save_context, 257 + .restore_context = clk_utmi_sama7g5_restore_context, 259 258 }; 260 259 261 260 struct clk_hw * __init
+7 -140
drivers/clk/at91/pmc.c
··· 3 3 * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> 4 4 */ 5 5 6 + #include <linux/clk.h> 6 7 #include <linux/clk-provider.h> 7 8 #include <linux/clkdev.h> 8 9 #include <linux/clk/at91_pmc.h> ··· 14 13 #include <linux/syscore_ops.h> 15 14 16 15 #include <asm/proc-fns.h> 17 - 18 - #include <dt-bindings/clock/at91.h> 19 16 20 17 #include "pmc.h" 21 18 ··· 110 111 } 111 112 112 113 #ifdef CONFIG_PM 113 - static struct regmap *pmcreg; 114 - 115 - static u8 registered_ids[PMC_MAX_IDS]; 116 - static u8 registered_pcks[PMC_MAX_PCKS]; 117 - 118 - static struct 114 + static int at91_pmc_suspend(void) 119 115 { 120 - u32 scsr; 121 - u32 pcsr0; 122 - u32 uckr; 123 - u32 mor; 124 - u32 mcfr; 125 - u32 pllar; 126 - u32 mckr; 127 - u32 usb; 128 - u32 imr; 129 - u32 pcsr1; 130 - u32 pcr[PMC_MAX_IDS]; 131 - u32 audio_pll0; 132 - u32 audio_pll1; 133 - u32 pckr[PMC_MAX_PCKS]; 134 - } pmc_cache; 135 - 136 - /* 137 - * As Peripheral ID 0 is invalid on AT91 chips, the identifier is stored 138 - * without alteration in the table, and 0 is for unused clocks. 139 - */ 140 - void pmc_register_id(u8 id) 141 - { 142 - int i; 143 - 144 - for (i = 0; i < PMC_MAX_IDS; i++) { 145 - if (registered_ids[i] == 0) { 146 - registered_ids[i] = id; 147 - break; 148 - } 149 - if (registered_ids[i] == id) 150 - break; 151 - } 116 + return clk_save_context(); 152 117 } 153 118 154 - /* 155 - * As Programmable Clock 0 is valid on AT91 chips, there is an offset 156 - * of 1 between the stored value and the real clock ID. 157 - */ 158 - void pmc_register_pck(u8 pck) 119 + static void at91_pmc_resume(void) 159 120 { 160 - int i; 161 - 162 - for (i = 0; i < PMC_MAX_PCKS; i++) { 163 - if (registered_pcks[i] == 0) { 164 - registered_pcks[i] = pck + 1; 165 - break; 166 - } 167 - if (registered_pcks[i] == (pck + 1)) 168 - break; 169 - } 170 - } 171 - 172 - static int pmc_suspend(void) 173 - { 174 - int i; 175 - u8 num; 176 - 177 - regmap_read(pmcreg, AT91_PMC_SCSR, &pmc_cache.scsr); 178 - regmap_read(pmcreg, AT91_PMC_PCSR, &pmc_cache.pcsr0); 179 - regmap_read(pmcreg, AT91_CKGR_UCKR, &pmc_cache.uckr); 180 - regmap_read(pmcreg, AT91_CKGR_MOR, &pmc_cache.mor); 181 - regmap_read(pmcreg, AT91_CKGR_MCFR, &pmc_cache.mcfr); 182 - regmap_read(pmcreg, AT91_CKGR_PLLAR, &pmc_cache.pllar); 183 - regmap_read(pmcreg, AT91_PMC_MCKR, &pmc_cache.mckr); 184 - regmap_read(pmcreg, AT91_PMC_USB, &pmc_cache.usb); 185 - regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.imr); 186 - regmap_read(pmcreg, AT91_PMC_PCSR1, &pmc_cache.pcsr1); 187 - 188 - for (i = 0; registered_ids[i]; i++) { 189 - regmap_write(pmcreg, AT91_PMC_PCR, 190 - (registered_ids[i] & AT91_PMC_PCR_PID_MASK)); 191 - regmap_read(pmcreg, AT91_PMC_PCR, 192 - &pmc_cache.pcr[registered_ids[i]]); 193 - } 194 - for (i = 0; registered_pcks[i]; i++) { 195 - num = registered_pcks[i] - 1; 196 - regmap_read(pmcreg, AT91_PMC_PCKR(num), &pmc_cache.pckr[num]); 197 - } 198 - 199 - return 0; 200 - } 201 - 202 - static bool pmc_ready(unsigned int mask) 203 - { 204 - unsigned int status; 205 - 206 - regmap_read(pmcreg, AT91_PMC_SR, &status); 207 - 208 - return ((status & mask) == mask) ? 1 : 0; 209 - } 210 - 211 - static void pmc_resume(void) 212 - { 213 - int i; 214 - u8 num; 215 - u32 tmp; 216 - u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA; 217 - 218 - regmap_read(pmcreg, AT91_PMC_MCKR, &tmp); 219 - if (pmc_cache.mckr != tmp) 220 - pr_warn("MCKR was not configured properly by the firmware\n"); 221 - regmap_read(pmcreg, AT91_CKGR_PLLAR, &tmp); 222 - if (pmc_cache.pllar != tmp) 223 - pr_warn("PLLAR was not configured properly by the firmware\n"); 224 - 225 - regmap_write(pmcreg, AT91_PMC_SCER, pmc_cache.scsr); 226 - regmap_write(pmcreg, AT91_PMC_PCER, pmc_cache.pcsr0); 227 - regmap_write(pmcreg, AT91_CKGR_UCKR, pmc_cache.uckr); 228 - regmap_write(pmcreg, AT91_CKGR_MOR, pmc_cache.mor); 229 - regmap_write(pmcreg, AT91_CKGR_MCFR, pmc_cache.mcfr); 230 - regmap_write(pmcreg, AT91_PMC_USB, pmc_cache.usb); 231 - regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.imr); 232 - regmap_write(pmcreg, AT91_PMC_PCER1, pmc_cache.pcsr1); 233 - 234 - for (i = 0; registered_ids[i]; i++) { 235 - regmap_write(pmcreg, AT91_PMC_PCR, 236 - pmc_cache.pcr[registered_ids[i]] | 237 - AT91_PMC_PCR_CMD); 238 - } 239 - for (i = 0; registered_pcks[i]; i++) { 240 - num = registered_pcks[i] - 1; 241 - regmap_write(pmcreg, AT91_PMC_PCKR(num), pmc_cache.pckr[num]); 242 - } 243 - 244 - if (pmc_cache.uckr & AT91_PMC_UPLLEN) 245 - mask |= AT91_PMC_LOCKU; 246 - 247 - while (!pmc_ready(mask)) 248 - cpu_relax(); 121 + clk_restore_context(); 249 122 } 250 123 251 124 static struct syscore_ops pmc_syscore_ops = { 252 - .suspend = pmc_suspend, 253 - .resume = pmc_resume, 125 + .suspend = at91_pmc_suspend, 126 + .resume = at91_pmc_resume, 254 127 }; 255 128 256 129 static const struct of_device_id sama5d2_pmc_dt_ids[] = { ··· 142 271 of_node_put(np); 143 272 return -ENODEV; 144 273 } 145 - 146 - pmcreg = device_node_to_regmap(np); 147 274 of_node_put(np); 148 - if (IS_ERR(pmcreg)) 149 - return PTR_ERR(pmcreg); 150 275 151 276 register_syscore_ops(&pmc_syscore_ops); 152 277
+16 -8
drivers/clk/at91/pmc.h
··· 13 13 #include <linux/regmap.h> 14 14 #include <linux/spinlock.h> 15 15 16 + #include <dt-bindings/clock/at91.h> 17 + 16 18 extern spinlock_t pmc_pcr_lock; 17 19 18 20 struct pmc_data { ··· 98 96 u32 div_mask; 99 97 u32 gckcss_mask; 100 98 u32 pid_mask; 99 + }; 100 + 101 + /** 102 + * struct at91_clk_pms - Power management state for AT91 clock 103 + * @rate: clock rate 104 + * @parent_rate: clock parent rate 105 + * @status: clock status (enabled or disabled) 106 + * @parent: clock parent index 107 + */ 108 + struct at91_clk_pms { 109 + unsigned long rate; 110 + unsigned long parent_rate; 111 + unsigned int status; 112 + unsigned int parent; 101 113 }; 102 114 103 115 #define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) ··· 263 247 struct clk_hw * __init 264 248 at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name, 265 249 const char *parent_name); 266 - 267 - #ifdef CONFIG_PM 268 - void pmc_register_id(u8 id); 269 - void pmc_register_pck(u8 pck); 270 - #else 271 - static inline void pmc_register_id(u8 id) {} 272 - static inline void pmc_register_pck(u8 pck) {} 273 - #endif 274 250 275 251 #endif /* __PMC_H_ */