Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

PM: clk: make PM clock layer compatible with clocks that must sleep

The clock API splits its interface into sleepable ant atomic contexts:

- clk_prepare/clk_unprepare for stuff that might sleep

- clk_enable_clk_disable for anything that may be done in atomic context

The code handling runtime PM for clocks only calls clk_disable() on
suspend requests, and clk_enable on resume requests. This means that
runtime PM with clock providers that only have the prepare/unprepare
methods implemented is basically useless.

Many clock implementations can't accommodate atomic contexts. This is
often the case when communication with the clock happens through another
subsystem like I2C or SCMI.

Let's make the clock PM code useful with such clocks by safely invoking
clk_prepare/clk_unprepare upon resume/suspend requests. Of course, when
such clocks are registered with the PM layer then pm_runtime_irq_safe()
can't be used, and neither pm_runtime_suspend() nor pm_runtime_resume()
may be invoked in atomic context.

For clocks that do implement the enable and disable methods then
everything just works as before.

A note on sparse:
According to https://lwn.net/Articles/109066/ there are things
that sparse can't cope with. In particular, pm_clk_op_lock() and
pm_clk_op_unlock() may or may not lock/unlock psd->lock depending on
some runtime condition. To work around that we tell it the lock is
always untaken for the purpose of static analisys.

Thanks to Naresh Kamboju for reporting issues with the initial patch.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Nicolas Pitre and committed by
Rafael J. Wysocki
0bfa0820 6ee1d745

+228 -42
+182 -41
drivers/base/power/clock_ops.c
··· 23 23 enum pce_status { 24 24 PCE_STATUS_NONE = 0, 25 25 PCE_STATUS_ACQUIRED, 26 + PCE_STATUS_PREPARED, 26 27 PCE_STATUS_ENABLED, 27 28 PCE_STATUS_ERROR, 28 29 }; ··· 33 32 char *con_id; 34 33 struct clk *clk; 35 34 enum pce_status status; 35 + bool enabled_when_prepared; 36 36 }; 37 + 38 + /** 39 + * pm_clk_list_lock - ensure exclusive access for modifying the PM clock 40 + * entry list. 41 + * @psd: pm_subsys_data instance corresponding to the PM clock entry list 42 + * and clk_op_might_sleep count to be modified. 43 + * 44 + * Get exclusive access before modifying the PM clock entry list and the 45 + * clock_op_might_sleep count to guard against concurrent modifications. 46 + * This also protects against a concurrent clock_op_might_sleep and PM clock 47 + * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not 48 + * happen in atomic context, hence both the mutex and the spinlock must be 49 + * taken here. 50 + */ 51 + static void pm_clk_list_lock(struct pm_subsys_data *psd) 52 + __acquires(&psd->lock) 53 + { 54 + mutex_lock(&psd->clock_mutex); 55 + spin_lock_irq(&psd->lock); 56 + } 57 + 58 + /** 59 + * pm_clk_list_unlock - counterpart to pm_clk_list_lock(). 60 + * @psd: the same pm_subsys_data instance previously passed to 61 + * pm_clk_list_lock(). 62 + */ 63 + static void pm_clk_list_unlock(struct pm_subsys_data *psd) 64 + __releases(&psd->lock) 65 + { 66 + spin_unlock_irq(&psd->lock); 67 + mutex_unlock(&psd->clock_mutex); 68 + } 69 + 70 + /** 71 + * pm_clk_op_lock - ensure exclusive access for performing clock operations. 72 + * @psd: pm_subsys_data instance corresponding to the PM clock entry list 73 + * and clk_op_might_sleep count being used. 74 + * @flags: stored irq flags. 75 + * @fn: string for the caller function's name. 76 + * 77 + * This is used by pm_clk_suspend() and pm_clk_resume() to guard 78 + * against concurrent modifications to the clock entry list and the 79 + * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then 80 + * only the mutex can be locked and those functions can only be used in 81 + * non atomic context. If clock_op_might_sleep == 0 then these functions 82 + * may be used in any context and only the spinlock can be locked. 83 + * Returns -EINVAL if called in atomic context when clock ops might sleep. 84 + */ 85 + static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags, 86 + const char *fn) 87 + /* sparse annotations don't work here as exit state isn't static */ 88 + { 89 + bool atomic_context = in_atomic() || irqs_disabled(); 90 + 91 + try_again: 92 + spin_lock_irqsave(&psd->lock, *flags); 93 + if (!psd->clock_op_might_sleep) { 94 + /* the __release is there to work around sparse limitations */ 95 + __release(&psd->lock); 96 + return 0; 97 + } 98 + 99 + /* bail out if in atomic context */ 100 + if (atomic_context) { 101 + pr_err("%s: atomic context with clock_ops_might_sleep = %d", 102 + fn, psd->clock_op_might_sleep); 103 + spin_unlock_irqrestore(&psd->lock, *flags); 104 + might_sleep(); 105 + return -EPERM; 106 + } 107 + 108 + /* we must switch to the mutex */ 109 + spin_unlock_irqrestore(&psd->lock, *flags); 110 + mutex_lock(&psd->clock_mutex); 111 + 112 + /* 113 + * There was a possibility for psd->clock_op_might_sleep 114 + * to become 0 above. Keep the mutex only if not the case. 115 + */ 116 + if (likely(psd->clock_op_might_sleep)) 117 + return 0; 118 + 119 + mutex_unlock(&psd->clock_mutex); 120 + goto try_again; 121 + } 122 + 123 + /** 124 + * pm_clk_op_unlock - counterpart to pm_clk_op_lock(). 125 + * @psd: the same pm_subsys_data instance previously passed to 126 + * pm_clk_op_lock(). 127 + * @flags: irq flags provided by pm_clk_op_lock(). 128 + */ 129 + static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags) 130 + /* sparse annotations don't work here as entry state isn't static */ 131 + { 132 + if (psd->clock_op_might_sleep) { 133 + mutex_unlock(&psd->clock_mutex); 134 + } else { 135 + /* the __acquire is there to work around sparse limitations */ 136 + __acquire(&psd->lock); 137 + spin_unlock_irqrestore(&psd->lock, *flags); 138 + } 139 + } 37 140 38 141 /** 39 142 * pm_clk_enable - Enable a clock, reporting any errors ··· 148 43 { 149 44 int ret; 150 45 151 - if (ce->status < PCE_STATUS_ERROR) { 46 + switch (ce->status) { 47 + case PCE_STATUS_ACQUIRED: 48 + ret = clk_prepare_enable(ce->clk); 49 + break; 50 + case PCE_STATUS_PREPARED: 152 51 ret = clk_enable(ce->clk); 153 - if (!ret) 154 - ce->status = PCE_STATUS_ENABLED; 155 - else 156 - dev_err(dev, "%s: failed to enable clk %p, error %d\n", 157 - __func__, ce->clk, ret); 52 + break; 53 + default: 54 + return; 158 55 } 56 + if (!ret) 57 + ce->status = PCE_STATUS_ENABLED; 58 + else 59 + dev_err(dev, "%s: failed to enable clk %p, error %d\n", 60 + __func__, ce->clk, ret); 159 61 } 160 62 161 63 /** ··· 176 64 ce->clk = clk_get(dev, ce->con_id); 177 65 if (IS_ERR(ce->clk)) { 178 66 ce->status = PCE_STATUS_ERROR; 67 + return; 68 + } else if (clk_is_enabled_when_prepared(ce->clk)) { 69 + /* we defer preparing the clock in that case */ 70 + ce->status = PCE_STATUS_ACQUIRED; 71 + ce->enabled_when_prepared = true; 72 + } else if (clk_prepare(ce->clk)) { 73 + ce->status = PCE_STATUS_ERROR; 74 + dev_err(dev, "clk_prepare() failed\n"); 75 + return; 179 76 } else { 180 - if (clk_prepare(ce->clk)) { 181 - ce->status = PCE_STATUS_ERROR; 182 - dev_err(dev, "clk_prepare() failed\n"); 183 - } else { 184 - ce->status = PCE_STATUS_ACQUIRED; 185 - dev_dbg(dev, 186 - "Clock %pC con_id %s managed by runtime PM.\n", 187 - ce->clk, ce->con_id); 188 - } 77 + ce->status = PCE_STATUS_PREPARED; 189 78 } 79 + dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", 80 + ce->clk, ce->con_id); 190 81 } 191 82 192 83 static int __pm_clk_add(struct device *dev, const char *con_id, ··· 221 106 222 107 pm_clk_acquire(dev, ce); 223 108 224 - spin_lock_irq(&psd->lock); 109 + pm_clk_list_lock(psd); 225 110 list_add_tail(&ce->node, &psd->clock_list); 226 - spin_unlock_irq(&psd->lock); 111 + if (ce->enabled_when_prepared) 112 + psd->clock_op_might_sleep++; 113 + pm_clk_list_unlock(psd); 227 114 return 0; 228 115 } 229 116 ··· 356 239 if (!ce) 357 240 return; 358 241 359 - if (ce->status < PCE_STATUS_ERROR) { 360 - if (ce->status == PCE_STATUS_ENABLED) 361 - clk_disable(ce->clk); 362 - 363 - if (ce->status >= PCE_STATUS_ACQUIRED) { 364 - clk_unprepare(ce->clk); 242 + switch (ce->status) { 243 + case PCE_STATUS_ENABLED: 244 + clk_disable(ce->clk); 245 + fallthrough; 246 + case PCE_STATUS_PREPARED: 247 + clk_unprepare(ce->clk); 248 + fallthrough; 249 + case PCE_STATUS_ACQUIRED: 250 + case PCE_STATUS_ERROR: 251 + if (!IS_ERR(ce->clk)) 365 252 clk_put(ce->clk); 366 - } 253 + break; 254 + default: 255 + break; 367 256 } 368 257 369 258 kfree(ce->con_id); ··· 392 269 if (!psd) 393 270 return; 394 271 395 - spin_lock_irq(&psd->lock); 272 + pm_clk_list_lock(psd); 396 273 397 274 list_for_each_entry(ce, &psd->clock_list, node) { 398 275 if (!con_id && !ce->con_id) ··· 403 280 goto remove; 404 281 } 405 282 406 - spin_unlock_irq(&psd->lock); 283 + pm_clk_list_unlock(psd); 407 284 return; 408 285 409 286 remove: 410 287 list_del(&ce->node); 411 - spin_unlock_irq(&psd->lock); 288 + if (ce->enabled_when_prepared) 289 + psd->clock_op_might_sleep--; 290 + pm_clk_list_unlock(psd); 412 291 413 292 __pm_clk_remove(ce); 414 293 } ··· 432 307 if (!psd || !clk) 433 308 return; 434 309 435 - spin_lock_irq(&psd->lock); 310 + pm_clk_list_lock(psd); 436 311 437 312 list_for_each_entry(ce, &psd->clock_list, node) { 438 313 if (clk == ce->clk) 439 314 goto remove; 440 315 } 441 316 442 - spin_unlock_irq(&psd->lock); 317 + pm_clk_list_unlock(psd); 443 318 return; 444 319 445 320 remove: 446 321 list_del(&ce->node); 447 - spin_unlock_irq(&psd->lock); 322 + if (ce->enabled_when_prepared) 323 + psd->clock_op_might_sleep--; 324 + pm_clk_list_unlock(psd); 448 325 449 326 __pm_clk_remove(ce); 450 327 } ··· 457 330 * @dev: Device to initialize the list of PM clocks for. 458 331 * 459 332 * Initialize the lock and clock_list members of the device's pm_subsys_data 460 - * object. 333 + * object, set the count of clocks that might sleep to 0. 461 334 */ 462 335 void pm_clk_init(struct device *dev) 463 336 { 464 337 struct pm_subsys_data *psd = dev_to_psd(dev); 465 - if (psd) 338 + if (psd) { 466 339 INIT_LIST_HEAD(&psd->clock_list); 340 + mutex_init(&psd->clock_mutex); 341 + psd->clock_op_might_sleep = 0; 342 + } 467 343 } 468 344 EXPORT_SYMBOL_GPL(pm_clk_init); 469 345 ··· 502 372 503 373 INIT_LIST_HEAD(&list); 504 374 505 - spin_lock_irq(&psd->lock); 375 + pm_clk_list_lock(psd); 506 376 507 377 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) 508 378 list_move(&ce->node, &list); 379 + psd->clock_op_might_sleep = 0; 509 380 510 - spin_unlock_irq(&psd->lock); 381 + pm_clk_list_unlock(psd); 511 382 512 383 dev_pm_put_subsys_data(dev); 513 384 ··· 528 397 struct pm_subsys_data *psd = dev_to_psd(dev); 529 398 struct pm_clock_entry *ce; 530 399 unsigned long flags; 400 + int ret; 531 401 532 402 dev_dbg(dev, "%s()\n", __func__); 533 403 534 404 if (!psd) 535 405 return 0; 536 406 537 - spin_lock_irqsave(&psd->lock, flags); 407 + ret = pm_clk_op_lock(psd, &flags, __func__); 408 + if (ret) 409 + return ret; 538 410 539 411 list_for_each_entry_reverse(ce, &psd->clock_list, node) { 540 - if (ce->status < PCE_STATUS_ERROR) { 541 - if (ce->status == PCE_STATUS_ENABLED) 412 + if (ce->status == PCE_STATUS_ENABLED) { 413 + if (ce->enabled_when_prepared) { 414 + clk_disable_unprepare(ce->clk); 415 + ce->status = PCE_STATUS_ACQUIRED; 416 + } else { 542 417 clk_disable(ce->clk); 543 - ce->status = PCE_STATUS_ACQUIRED; 418 + ce->status = PCE_STATUS_PREPARED; 419 + } 544 420 } 545 421 } 546 422 547 - spin_unlock_irqrestore(&psd->lock, flags); 423 + pm_clk_op_unlock(psd, &flags); 548 424 549 425 return 0; 550 426 } ··· 566 428 struct pm_subsys_data *psd = dev_to_psd(dev); 567 429 struct pm_clock_entry *ce; 568 430 unsigned long flags; 431 + int ret; 569 432 570 433 dev_dbg(dev, "%s()\n", __func__); 571 434 572 435 if (!psd) 573 436 return 0; 574 437 575 - spin_lock_irqsave(&psd->lock, flags); 438 + ret = pm_clk_op_lock(psd, &flags, __func__); 439 + if (ret) 440 + return ret; 576 441 577 442 list_for_each_entry(ce, &psd->clock_list, node) 578 443 __pm_clk_enable(dev, ce); 579 444 580 - spin_unlock_irqrestore(&psd->lock, flags); 445 + pm_clk_op_unlock(psd, &flags); 581 446 582 447 return 0; 583 448 }
+21
drivers/clk/clk.c
··· 1164 1164 } 1165 1165 EXPORT_SYMBOL_GPL(clk_enable); 1166 1166 1167 + /** 1168 + * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 1169 + * @clk: clock source 1170 + * 1171 + * Returns true if clk_prepare() implicitly enables the clock, effectively 1172 + * making clk_enable()/clk_disable() no-ops, false otherwise. 1173 + * 1174 + * This is of interest mainly to power management code where actually 1175 + * disabling the clock also requires unpreparing it to have any material 1176 + * effect. 1177 + * 1178 + * Regardless of the value returned here, the caller must always invoke 1179 + * clk_enable() or clk_prepare_enable() and counterparts for usage counts 1180 + * to be right. 1181 + */ 1182 + bool clk_is_enabled_when_prepared(struct clk *clk) 1183 + { 1184 + return clk && !(clk->core->ops->enable && clk->core->ops->disable); 1185 + } 1186 + EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared); 1187 + 1167 1188 static int clk_core_prepare_enable(struct clk_core *core) 1168 1189 { 1169 1190 int ret;
+23 -1
include/linux/clk.h
··· 238 238 239 239 #endif 240 240 241 + #ifdef CONFIG_HAVE_CLK_PREPARE 241 242 /** 242 243 * clk_prepare - prepare a clock source 243 244 * @clk: clock source ··· 247 246 * 248 247 * Must not be called from within atomic context. 249 248 */ 250 - #ifdef CONFIG_HAVE_CLK_PREPARE 251 249 int clk_prepare(struct clk *clk); 252 250 int __must_check clk_bulk_prepare(int num_clks, 253 251 const struct clk_bulk_data *clks); 252 + 253 + /** 254 + * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 255 + * @clk: clock source 256 + * 257 + * Returns true if clk_prepare() implicitly enables the clock, effectively 258 + * making clk_enable()/clk_disable() no-ops, false otherwise. 259 + * 260 + * This is of interest mainly to the power management code where actually 261 + * disabling the clock also requires unpreparing it to have any material 262 + * effect. 263 + * 264 + * Regardless of the value returned here, the caller must always invoke 265 + * clk_enable() or clk_prepare_enable() and counterparts for usage counts 266 + * to be right. 267 + */ 268 + bool clk_is_enabled_when_prepared(struct clk *clk); 254 269 #else 255 270 static inline int clk_prepare(struct clk *clk) 256 271 { ··· 279 262 { 280 263 might_sleep(); 281 264 return 0; 265 + } 266 + 267 + static inline bool clk_is_enabled_when_prepared(struct clk *clk) 268 + { 269 + return false; 282 270 } 283 271 #endif 284 272
+2
include/linux/pm.h
··· 537 537 spinlock_t lock; 538 538 unsigned int refcount; 539 539 #ifdef CONFIG_PM_CLK 540 + unsigned int clock_op_might_sleep; 541 + struct mutex clock_mutex; 540 542 struct list_head clock_list; 541 543 #endif 542 544 #ifdef CONFIG_PM_GENERIC_DOMAINS