at v6.17 37 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/include/linux/clk.h 4 * 5 * Copyright (C) 2004 ARM Limited. 6 * Written by Deep Blue Solutions Limited. 7 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 8 */ 9#ifndef __LINUX_CLK_H 10#define __LINUX_CLK_H 11 12#include <linux/err.h> 13#include <linux/kernel.h> 14#include <linux/notifier.h> 15 16struct device; 17struct clk; 18struct device_node; 19struct of_phandle_args; 20 21/** 22 * DOC: clk notifier callback types 23 * 24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 25 * to indicate that the rate change will proceed. Drivers must 26 * immediately terminate any operations that will be affected by the 27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 28 * NOTIFY_STOP or NOTIFY_BAD. 29 * 30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 33 * always return NOTIFY_DONE or NOTIFY_OK. 34 * 35 * POST_RATE_CHANGE - called after the clk rate change has successfully 36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 */ 39#define PRE_RATE_CHANGE BIT(0) 40#define POST_RATE_CHANGE BIT(1) 41#define ABORT_RATE_CHANGE BIT(2) 42 43/** 44 * struct clk_notifier - associate a clk with a notifier 45 * @clk: struct clk * to associate the notifier with 46 * @notifier_head: a blocking_notifier_head for this clk 47 * @node: linked list pointers 48 * 49 * A list of struct clk_notifier is maintained by the notifier code. 50 * An entry is created whenever code registers the first notifier on a 51 * particular @clk. Future notifiers on that @clk are added to the 52 * @notifier_head. 53 */ 54struct clk_notifier { 55 struct clk *clk; 56 struct srcu_notifier_head notifier_head; 57 struct list_head node; 58}; 59 60/** 61 * struct clk_notifier_data - rate data to pass to the notifier callback 62 * @clk: struct clk * being changed 63 * @old_rate: previous rate of this clk 64 * @new_rate: new rate of this clk 65 * 66 * For a pre-notifier, old_rate is the clk's rate before this rate 67 * change, and new_rate is what the rate will be in the future. For a 68 * post-notifier, old_rate and new_rate are both set to the clk's 69 * current rate (this was done to optimize the implementation). 70 */ 71struct clk_notifier_data { 72 struct clk *clk; 73 unsigned long old_rate; 74 unsigned long new_rate; 75}; 76 77/** 78 * struct clk_bulk_data - Data used for bulk clk operations. 79 * 80 * @id: clock consumer ID 81 * @clk: struct clk * to store the associated clock 82 * 83 * The CLK APIs provide a series of clk_bulk_() API calls as 84 * a convenience to consumers which require multiple clks. This 85 * structure is used to manage data for these calls. 86 */ 87struct clk_bulk_data { 88 const char *id; 89 struct clk *clk; 90}; 91 92#ifdef CONFIG_COMMON_CLK 93 94/** 95 * clk_notifier_register - register a clock rate-change notifier callback 96 * @clk: clock whose rate we are interested in 97 * @nb: notifier block with callback function pointer 98 * 99 * ProTip: debugging across notifier chains can be frustrating. Make sure that 100 * your notifier callback function prints a nice big warning in case of 101 * failure. 102 */ 103int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 104 105/** 106 * clk_notifier_unregister - unregister a clock rate-change notifier callback 107 * @clk: clock whose rate we are no longer interested in 108 * @nb: notifier block which will be unregistered 109 */ 110int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 111 112/** 113 * devm_clk_notifier_register - register a managed rate-change notifier callback 114 * @dev: device for clock "consumer" 115 * @clk: clock whose rate we are interested in 116 * @nb: notifier block with callback function pointer 117 * 118 * Returns 0 on success, -EERROR otherwise 119 */ 120int devm_clk_notifier_register(struct device *dev, struct clk *clk, 121 struct notifier_block *nb); 122 123/** 124 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 125 * for a clock source. 126 * @clk: clock source 127 * 128 * This gets the clock source accuracy expressed in ppb. 129 * A perfect clock returns 0. 130 */ 131long clk_get_accuracy(struct clk *clk); 132 133/** 134 * clk_set_phase - adjust the phase shift of a clock signal 135 * @clk: clock signal source 136 * @degrees: number of degrees the signal is shifted 137 * 138 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 139 * success, -EERROR otherwise. 140 */ 141int clk_set_phase(struct clk *clk, int degrees); 142 143/** 144 * clk_get_phase - return the phase shift of a clock signal 145 * @clk: clock signal source 146 * 147 * Returns the phase shift of a clock node in degrees, otherwise returns 148 * -EERROR. 149 */ 150int clk_get_phase(struct clk *clk); 151 152/** 153 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 154 * @clk: clock signal source 155 * @num: numerator of the duty cycle ratio to be applied 156 * @den: denominator of the duty cycle ratio to be applied 157 * 158 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 159 * success, -EERROR otherwise. 160 */ 161int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 162 163/** 164 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 165 * @clk: clock signal source 166 * @scale: scaling factor to be applied to represent the ratio as an integer 167 * 168 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 169 * returns -EERROR. 170 */ 171int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 172 173/** 174 * clk_is_match - check if two clk's point to the same hardware clock 175 * @p: clk compared against q 176 * @q: clk compared against p 177 * 178 * Returns true if the two struct clk pointers both point to the same hardware 179 * clock node. Put differently, returns true if @p and @q 180 * share the same &struct clk_core object. 181 * 182 * Returns false otherwise. Note that two NULL clks are treated as matching. 183 */ 184bool clk_is_match(const struct clk *p, const struct clk *q); 185 186/** 187 * clk_rate_exclusive_get - get exclusivity over the rate control of a 188 * producer 189 * @clk: clock source 190 * 191 * This function allows drivers to get exclusive control over the rate of a 192 * provider. It prevents any other consumer to execute, even indirectly, 193 * opereation which could alter the rate of the provider or cause glitches 194 * 195 * If exlusivity is claimed more than once on clock, even by the same driver, 196 * the rate effectively gets locked as exclusivity can't be preempted. 197 * 198 * Must not be called from within atomic context. 199 * 200 * Returns success (0) or negative errno. 201 */ 202int clk_rate_exclusive_get(struct clk *clk); 203 204/** 205 * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get 206 * @dev: device the exclusivity is bound to 207 * @clk: clock source 208 * 209 * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler 210 * on @dev to call clk_rate_exclusive_put(). 211 * 212 * Must not be called from within atomic context. 213 */ 214int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk); 215 216/** 217 * clk_rate_exclusive_put - release exclusivity over the rate control of a 218 * producer 219 * @clk: clock source 220 * 221 * This function allows drivers to release the exclusivity it previously got 222 * from clk_rate_exclusive_get() 223 * 224 * The caller must balance the number of clk_rate_exclusive_get() and 225 * clk_rate_exclusive_put() calls. 226 * 227 * Must not be called from within atomic context. 228 */ 229void clk_rate_exclusive_put(struct clk *clk); 230 231#else 232 233static inline int clk_notifier_register(struct clk *clk, 234 struct notifier_block *nb) 235{ 236 return -ENOTSUPP; 237} 238 239static inline int clk_notifier_unregister(struct clk *clk, 240 struct notifier_block *nb) 241{ 242 return -ENOTSUPP; 243} 244 245static inline int devm_clk_notifier_register(struct device *dev, 246 struct clk *clk, 247 struct notifier_block *nb) 248{ 249 return -ENOTSUPP; 250} 251 252static inline long clk_get_accuracy(struct clk *clk) 253{ 254 return -ENOTSUPP; 255} 256 257static inline long clk_set_phase(struct clk *clk, int phase) 258{ 259 return -ENOTSUPP; 260} 261 262static inline long clk_get_phase(struct clk *clk) 263{ 264 return -ENOTSUPP; 265} 266 267static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 268 unsigned int den) 269{ 270 return -ENOTSUPP; 271} 272 273static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 274 unsigned int scale) 275{ 276 return 0; 277} 278 279static inline bool clk_is_match(const struct clk *p, const struct clk *q) 280{ 281 return p == q; 282} 283 284static inline int clk_rate_exclusive_get(struct clk *clk) 285{ 286 return 0; 287} 288 289static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) 290{ 291 return 0; 292} 293 294static inline void clk_rate_exclusive_put(struct clk *clk) {} 295 296#endif 297 298#ifdef CONFIG_HAVE_CLK_PREPARE 299/** 300 * clk_prepare - prepare a clock source 301 * @clk: clock source 302 * 303 * This prepares the clock source for use. 304 * 305 * Must not be called from within atomic context. 306 */ 307int clk_prepare(struct clk *clk); 308int __must_check clk_bulk_prepare(int num_clks, 309 const struct clk_bulk_data *clks); 310 311/** 312 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 313 * @clk: clock source 314 * 315 * Returns true if clk_prepare() implicitly enables the clock, effectively 316 * making clk_enable()/clk_disable() no-ops, false otherwise. 317 * 318 * This is of interest mainly to the power management code where actually 319 * disabling the clock also requires unpreparing it to have any material 320 * effect. 321 * 322 * Regardless of the value returned here, the caller must always invoke 323 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 324 * to be right. 325 */ 326bool clk_is_enabled_when_prepared(struct clk *clk); 327#else 328static inline int clk_prepare(struct clk *clk) 329{ 330 might_sleep(); 331 return 0; 332} 333 334static inline int __must_check 335clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) 336{ 337 might_sleep(); 338 return 0; 339} 340 341static inline bool clk_is_enabled_when_prepared(struct clk *clk) 342{ 343 return false; 344} 345#endif 346 347/** 348 * clk_unprepare - undo preparation of a clock source 349 * @clk: clock source 350 * 351 * This undoes a previously prepared clock. The caller must balance 352 * the number of prepare and unprepare calls. 353 * 354 * Must not be called from within atomic context. 355 */ 356#ifdef CONFIG_HAVE_CLK_PREPARE 357void clk_unprepare(struct clk *clk); 358void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 359#else 360static inline void clk_unprepare(struct clk *clk) 361{ 362 might_sleep(); 363} 364static inline void clk_bulk_unprepare(int num_clks, 365 const struct clk_bulk_data *clks) 366{ 367 might_sleep(); 368} 369#endif 370 371#ifdef CONFIG_HAVE_CLK 372/** 373 * clk_get - lookup and obtain a reference to a clock producer. 374 * @dev: device for clock "consumer" 375 * @id: clock consumer ID 376 * 377 * Returns a struct clk corresponding to the clock producer, or 378 * valid IS_ERR() condition containing errno. The implementation 379 * uses @dev and @id to determine the clock consumer, and thereby 380 * the clock producer. (IOW, @id may be identical strings, but 381 * clk_get may return different clock producers depending on @dev.) 382 * 383 * Drivers must assume that the clock source is not enabled. 384 * 385 * clk_get should not be called from within interrupt context. 386 */ 387struct clk *clk_get(struct device *dev, const char *id); 388 389/** 390 * clk_bulk_get - lookup and obtain a number of references to clock producer. 391 * @dev: device for clock "consumer" 392 * @num_clks: the number of clk_bulk_data 393 * @clks: the clk_bulk_data table of consumer 394 * 395 * This helper function allows drivers to get several clk consumers in one 396 * operation. If any of the clk cannot be acquired then any clks 397 * that were obtained will be freed before returning to the caller. 398 * 399 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 400 * successfully, or valid IS_ERR() condition containing errno. 401 * The implementation uses @dev and @clk_bulk_data.id to determine the 402 * clock consumer, and thereby the clock producer. 403 * The clock returned is stored in each @clk_bulk_data.clk field. 404 * 405 * Drivers must assume that the clock source is not enabled. 406 * 407 * clk_bulk_get should not be called from within interrupt context. 408 */ 409int __must_check clk_bulk_get(struct device *dev, int num_clks, 410 struct clk_bulk_data *clks); 411/** 412 * clk_bulk_get_all - lookup and obtain all available references to clock 413 * producer. 414 * @dev: device for clock "consumer" 415 * @clks: pointer to the clk_bulk_data table of consumer 416 * 417 * This helper function allows drivers to get all clk consumers in one 418 * operation. If any of the clk cannot be acquired then any clks 419 * that were obtained will be freed before returning to the caller. 420 * 421 * Returns a positive value for the number of clocks obtained while the 422 * clock references are stored in the clk_bulk_data table in @clks field. 423 * Returns 0 if there're none and a negative value if something failed. 424 * 425 * Drivers must assume that the clock source is not enabled. 426 * 427 * clk_bulk_get should not be called from within interrupt context. 428 */ 429int __must_check clk_bulk_get_all(struct device *dev, 430 struct clk_bulk_data **clks); 431 432/** 433 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer 434 * @dev: device for clock "consumer" 435 * @num_clks: the number of clk_bulk_data 436 * @clks: the clk_bulk_data table of consumer 437 * 438 * Behaves the same as clk_bulk_get() except where there is no clock producer. 439 * In this case, instead of returning -ENOENT, the function returns 0 and 440 * NULL for a clk for which a clock producer could not be determined. 441 */ 442int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, 443 struct clk_bulk_data *clks); 444/** 445 * devm_clk_bulk_get - managed get multiple clk consumers 446 * @dev: device for clock "consumer" 447 * @num_clks: the number of clk_bulk_data 448 * @clks: the clk_bulk_data table of consumer 449 * 450 * Return 0 on success, an errno on failure. 451 * 452 * This helper function allows drivers to get several clk 453 * consumers in one operation with management, the clks will 454 * automatically be freed when the device is unbound. 455 */ 456int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 457 struct clk_bulk_data *clks); 458/** 459 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 460 * @dev: device for clock "consumer" 461 * @num_clks: the number of clk_bulk_data 462 * @clks: pointer to the clk_bulk_data table of consumer 463 * 464 * Behaves the same as devm_clk_bulk_get() except where there is no clock 465 * producer. In this case, instead of returning -ENOENT, the function returns 466 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. 467 * 468 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 469 * successfully or for any clk there was no clk provider available, otherwise 470 * returns valid IS_ERR() condition containing errno. 471 * The implementation uses @dev and @clk_bulk_data.id to determine the 472 * clock consumer, and thereby the clock producer. 473 * The clock returned is stored in each @clk_bulk_data.clk field. 474 * 475 * Drivers must assume that the clock source is not enabled. 476 * 477 * clk_bulk_get should not be called from within interrupt context. 478 */ 479int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 480 struct clk_bulk_data *clks); 481/** 482 * devm_clk_bulk_get_all - managed get multiple clk consumers 483 * @dev: device for clock "consumer" 484 * @clks: pointer to the clk_bulk_data table of consumer 485 * 486 * Returns a positive value for the number of clocks obtained while the 487 * clock references are stored in the clk_bulk_data table in @clks field. 488 * Returns 0 if there're none and a negative value if something failed. 489 * 490 * This helper function allows drivers to get several clk 491 * consumers in one operation with management, the clks will 492 * automatically be freed when the device is unbound. 493 */ 494 495int __must_check devm_clk_bulk_get_all(struct device *dev, 496 struct clk_bulk_data **clks); 497 498/** 499 * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed) 500 * @dev: device for clock "consumer" 501 * @clks: pointer to the clk_bulk_data table of consumer 502 * 503 * Returns a positive value for the number of clocks obtained while the 504 * clock references are stored in the clk_bulk_data table in @clks field. 505 * Returns 0 if there're none and a negative value if something failed. 506 * 507 * This helper function allows drivers to get all clocks of the 508 * consumer and enables them in one operation with management. 509 * The clks will automatically be disabled and freed when the device 510 * is unbound. 511 */ 512 513int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, 514 struct clk_bulk_data **clks); 515 516/** 517 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 518 * @dev: device for clock "consumer" 519 * @id: clock consumer ID 520 * 521 * Context: May sleep. 522 * 523 * Return: a struct clk corresponding to the clock producer, or 524 * valid IS_ERR() condition containing errno. The implementation 525 * uses @dev and @id to determine the clock consumer, and thereby 526 * the clock producer. (IOW, @id may be identical strings, but 527 * clk_get may return different clock producers depending on @dev.) 528 * 529 * Drivers must assume that the clock source is neither prepared nor 530 * enabled. 531 * 532 * The clock will automatically be freed when the device is unbound 533 * from the bus. 534 */ 535struct clk *devm_clk_get(struct device *dev, const char *id); 536 537/** 538 * devm_clk_get_prepared - devm_clk_get() + clk_prepare() 539 * @dev: device for clock "consumer" 540 * @id: clock consumer ID 541 * 542 * Context: May sleep. 543 * 544 * Return: a struct clk corresponding to the clock producer, or 545 * valid IS_ERR() condition containing errno. The implementation 546 * uses @dev and @id to determine the clock consumer, and thereby 547 * the clock producer. (IOW, @id may be identical strings, but 548 * clk_get may return different clock producers depending on @dev.) 549 * 550 * The returned clk (if valid) is prepared. Drivers must however assume 551 * that the clock is not enabled. 552 * 553 * The clock will automatically be unprepared and freed when the device 554 * is unbound from the bus. 555 */ 556struct clk *devm_clk_get_prepared(struct device *dev, const char *id); 557 558/** 559 * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() 560 * @dev: device for clock "consumer" 561 * @id: clock consumer ID 562 * 563 * Context: May sleep. 564 * 565 * Return: a struct clk corresponding to the clock producer, or 566 * valid IS_ERR() condition containing errno. The implementation 567 * uses @dev and @id to determine the clock consumer, and thereby 568 * the clock producer. (IOW, @id may be identical strings, but 569 * clk_get may return different clock producers depending on @dev.) 570 * 571 * The returned clk (if valid) is prepared and enabled. 572 * 573 * The clock will automatically be disabled, unprepared and freed 574 * when the device is unbound from the bus. 575 */ 576struct clk *devm_clk_get_enabled(struct device *dev, const char *id); 577 578/** 579 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 580 * clock producer. 581 * @dev: device for clock "consumer" 582 * @id: clock consumer ID 583 * 584 * Context: May sleep. 585 * 586 * Return: a struct clk corresponding to the clock producer, or 587 * valid IS_ERR() condition containing errno. The implementation 588 * uses @dev and @id to determine the clock consumer, and thereby 589 * the clock producer. If no such clk is found, it returns NULL 590 * which serves as a dummy clk. That's the only difference compared 591 * to devm_clk_get(). 592 * 593 * Drivers must assume that the clock source is neither prepared nor 594 * enabled. 595 * 596 * The clock will automatically be freed when the device is unbound 597 * from the bus. 598 */ 599struct clk *devm_clk_get_optional(struct device *dev, const char *id); 600 601/** 602 * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() 603 * @dev: device for clock "consumer" 604 * @id: clock consumer ID 605 * 606 * Context: May sleep. 607 * 608 * Return: a struct clk corresponding to the clock producer, or 609 * valid IS_ERR() condition containing errno. The implementation 610 * uses @dev and @id to determine the clock consumer, and thereby 611 * the clock producer. If no such clk is found, it returns NULL 612 * which serves as a dummy clk. That's the only difference compared 613 * to devm_clk_get_prepared(). 614 * 615 * The returned clk (if valid) is prepared. Drivers must however 616 * assume that the clock is not enabled. 617 * 618 * The clock will automatically be unprepared and freed when the 619 * device is unbound from the bus. 620 */ 621struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); 622 623/** 624 * devm_clk_get_optional_enabled - devm_clk_get_optional() + 625 * clk_prepare_enable() 626 * @dev: device for clock "consumer" 627 * @id: clock consumer ID 628 * 629 * Context: May sleep. 630 * 631 * Return: a struct clk corresponding to the clock producer, or 632 * valid IS_ERR() condition containing errno. The implementation 633 * uses @dev and @id to determine the clock consumer, and thereby 634 * the clock producer. If no such clk is found, it returns NULL 635 * which serves as a dummy clk. That's the only difference compared 636 * to devm_clk_get_enabled(). 637 * 638 * The returned clk (if valid) is prepared and enabled. 639 * 640 * The clock will automatically be disabled, unprepared and freed 641 * when the device is unbound from the bus. 642 */ 643struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); 644 645/** 646 * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() + 647 * clk_set_rate() + 648 * clk_prepare_enable() 649 * @dev: device for clock "consumer" 650 * @id: clock consumer ID 651 * @rate: new clock rate 652 * 653 * Context: May sleep. 654 * 655 * Return: a struct clk corresponding to the clock producer, or 656 * valid IS_ERR() condition containing errno. The implementation 657 * uses @dev and @id to determine the clock consumer, and thereby 658 * the clock producer. If no such clk is found, it returns NULL 659 * which serves as a dummy clk. That's the only difference compared 660 * to devm_clk_get_enabled(). 661 * 662 * The returned clk (if valid) is prepared and enabled and rate was set. 663 * 664 * The clock will automatically be disabled, unprepared and freed 665 * when the device is unbound from the bus. 666 */ 667struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev, 668 const char *id, 669 unsigned long rate); 670 671/** 672 * devm_get_clk_from_child - lookup and obtain a managed reference to a 673 * clock producer from child node. 674 * @dev: device for clock "consumer" 675 * @np: pointer to clock consumer node 676 * @con_id: clock consumer ID 677 * 678 * This function parses the clocks, and uses them to look up the 679 * struct clk from the registered list of clock providers by using 680 * @np and @con_id 681 * 682 * The clock will automatically be freed when the device is unbound 683 * from the bus. 684 */ 685struct clk *devm_get_clk_from_child(struct device *dev, 686 struct device_node *np, const char *con_id); 687 688/** 689 * clk_enable - inform the system when the clock source should be running. 690 * @clk: clock source 691 * 692 * If the clock can not be enabled/disabled, this should return success. 693 * 694 * May be called from atomic contexts. 695 * 696 * Returns success (0) or negative errno. 697 */ 698int clk_enable(struct clk *clk); 699 700/** 701 * clk_bulk_enable - inform the system when the set of clks should be running. 702 * @num_clks: the number of clk_bulk_data 703 * @clks: the clk_bulk_data table of consumer 704 * 705 * May be called from atomic contexts. 706 * 707 * Returns success (0) or negative errno. 708 */ 709int __must_check clk_bulk_enable(int num_clks, 710 const struct clk_bulk_data *clks); 711 712/** 713 * clk_disable - inform the system when the clock source is no longer required. 714 * @clk: clock source 715 * 716 * Inform the system that a clock source is no longer required by 717 * a driver and may be shut down. 718 * 719 * May be called from atomic contexts. 720 * 721 * Implementation detail: if the clock source is shared between 722 * multiple drivers, clk_enable() calls must be balanced by the 723 * same number of clk_disable() calls for the clock source to be 724 * disabled. 725 */ 726void clk_disable(struct clk *clk); 727 728/** 729 * clk_bulk_disable - inform the system when the set of clks is no 730 * longer required. 731 * @num_clks: the number of clk_bulk_data 732 * @clks: the clk_bulk_data table of consumer 733 * 734 * Inform the system that a set of clks is no longer required by 735 * a driver and may be shut down. 736 * 737 * May be called from atomic contexts. 738 * 739 * Implementation detail: if the set of clks is shared between 740 * multiple drivers, clk_bulk_enable() calls must be balanced by the 741 * same number of clk_bulk_disable() calls for the clock source to be 742 * disabled. 743 */ 744void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 745 746/** 747 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 748 * This is only valid once the clock source has been enabled. 749 * @clk: clock source 750 */ 751unsigned long clk_get_rate(struct clk *clk); 752 753/** 754 * clk_put - "free" the clock source 755 * @clk: clock source 756 * 757 * Note: drivers must ensure that all clk_enable calls made on this 758 * clock source are balanced by clk_disable calls prior to calling 759 * this function. 760 * 761 * clk_put should not be called from within interrupt context. 762 */ 763void clk_put(struct clk *clk); 764 765/** 766 * clk_bulk_put - "free" the clock source 767 * @num_clks: the number of clk_bulk_data 768 * @clks: the clk_bulk_data table of consumer 769 * 770 * Note: drivers must ensure that all clk_bulk_enable calls made on this 771 * clock source are balanced by clk_bulk_disable calls prior to calling 772 * this function. 773 * 774 * clk_bulk_put should not be called from within interrupt context. 775 */ 776void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 777 778/** 779 * clk_bulk_put_all - "free" all the clock source 780 * @num_clks: the number of clk_bulk_data 781 * @clks: the clk_bulk_data table of consumer 782 * 783 * Note: drivers must ensure that all clk_bulk_enable calls made on this 784 * clock source are balanced by clk_bulk_disable calls prior to calling 785 * this function. 786 * 787 * clk_bulk_put_all should not be called from within interrupt context. 788 */ 789void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 790 791/** 792 * devm_clk_put - "free" a managed clock source 793 * @dev: device used to acquire the clock 794 * @clk: clock source acquired with devm_clk_get() 795 * 796 * Note: drivers must ensure that all clk_enable calls made on this 797 * clock source are balanced by clk_disable calls prior to calling 798 * this function. 799 * 800 * clk_put should not be called from within interrupt context. 801 */ 802void devm_clk_put(struct device *dev, struct clk *clk); 803 804/* 805 * The remaining APIs are optional for machine class support. 806 */ 807 808 809/** 810 * clk_round_rate - adjust a rate to the exact rate a clock can provide 811 * @clk: clock source 812 * @rate: desired clock rate in Hz 813 * 814 * This answers the question "if I were to pass @rate to clk_set_rate(), 815 * what clock rate would I end up with?" without changing the hardware 816 * in any way. In other words: 817 * 818 * rate = clk_round_rate(clk, r); 819 * 820 * and: 821 * 822 * clk_set_rate(clk, r); 823 * rate = clk_get_rate(clk); 824 * 825 * are equivalent except the former does not modify the clock hardware 826 * in any way. 827 * 828 * Returns rounded clock rate in Hz, or negative errno. 829 */ 830long clk_round_rate(struct clk *clk, unsigned long rate); 831 832/** 833 * clk_set_rate - set the clock rate for a clock source 834 * @clk: clock source 835 * @rate: desired clock rate in Hz 836 * 837 * Updating the rate starts at the top-most affected clock and then 838 * walks the tree down to the bottom-most clock that needs updating. 839 * 840 * Returns success (0) or negative errno. 841 */ 842int clk_set_rate(struct clk *clk, unsigned long rate); 843 844/** 845 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 846 * clock source 847 * @clk: clock source 848 * @rate: desired clock rate in Hz 849 * 850 * This helper function allows drivers to atomically set the rate of a producer 851 * and claim exclusivity over the rate control of the producer. 852 * 853 * It is essentially a combination of clk_set_rate() and 854 * clk_rate_exclusite_get(). Caller must balance this call with a call to 855 * clk_rate_exclusive_put() 856 * 857 * Returns success (0) or negative errno. 858 */ 859int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 860 861/** 862 * clk_has_parent - check if a clock is a possible parent for another 863 * @clk: clock source 864 * @parent: parent clock source 865 * 866 * This function can be used in drivers that need to check that a clock can be 867 * the parent of another without actually changing the parent. 868 * 869 * Returns true if @parent is a possible parent for @clk, false otherwise. 870 */ 871bool clk_has_parent(const struct clk *clk, const struct clk *parent); 872 873/** 874 * clk_set_rate_range - set a rate range for a clock source 875 * @clk: clock source 876 * @min: desired minimum clock rate in Hz, inclusive 877 * @max: desired maximum clock rate in Hz, inclusive 878 * 879 * Returns success (0) or negative errno. 880 */ 881int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 882 883/** 884 * clk_set_min_rate - set a minimum clock rate for a clock source 885 * @clk: clock source 886 * @rate: desired minimum clock rate in Hz, inclusive 887 * 888 * Returns success (0) or negative errno. 889 */ 890int clk_set_min_rate(struct clk *clk, unsigned long rate); 891 892/** 893 * clk_set_max_rate - set a maximum clock rate for a clock source 894 * @clk: clock source 895 * @rate: desired maximum clock rate in Hz, inclusive 896 * 897 * Returns success (0) or negative errno. 898 */ 899int clk_set_max_rate(struct clk *clk, unsigned long rate); 900 901/** 902 * clk_set_parent - set the parent clock source for this clock 903 * @clk: clock source 904 * @parent: parent clock source 905 * 906 * Returns success (0) or negative errno. 907 */ 908int clk_set_parent(struct clk *clk, struct clk *parent); 909 910/** 911 * clk_get_parent - get the parent clock source for this clock 912 * @clk: clock source 913 * 914 * Returns struct clk corresponding to parent clock source, or 915 * valid IS_ERR() condition containing errno. 916 */ 917struct clk *clk_get_parent(struct clk *clk); 918 919/** 920 * clk_get_sys - get a clock based upon the device name 921 * @dev_id: device name 922 * @con_id: connection ID 923 * 924 * Returns a struct clk corresponding to the clock producer, or 925 * valid IS_ERR() condition containing errno. The implementation 926 * uses @dev_id and @con_id to determine the clock consumer, and 927 * thereby the clock producer. In contrast to clk_get() this function 928 * takes the device name instead of the device itself for identification. 929 * 930 * Drivers must assume that the clock source is not enabled. 931 * 932 * clk_get_sys should not be called from within interrupt context. 933 */ 934struct clk *clk_get_sys(const char *dev_id, const char *con_id); 935 936/** 937 * clk_save_context - save clock context for poweroff 938 * 939 * Saves the context of the clock register for powerstates in which the 940 * contents of the registers will be lost. Occurs deep within the suspend 941 * code so locking is not necessary. 942 */ 943int clk_save_context(void); 944 945/** 946 * clk_restore_context - restore clock context after poweroff 947 * 948 * This occurs with all clocks enabled. Occurs deep within the resume code 949 * so locking is not necessary. 950 */ 951void clk_restore_context(void); 952 953#else /* !CONFIG_HAVE_CLK */ 954 955static inline struct clk *clk_get(struct device *dev, const char *id) 956{ 957 return NULL; 958} 959 960static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 961 struct clk_bulk_data *clks) 962{ 963 return 0; 964} 965 966static inline int __must_check clk_bulk_get_optional(struct device *dev, 967 int num_clks, struct clk_bulk_data *clks) 968{ 969 return 0; 970} 971 972static inline int __must_check clk_bulk_get_all(struct device *dev, 973 struct clk_bulk_data **clks) 974{ 975 return 0; 976} 977 978static inline struct clk *devm_clk_get(struct device *dev, const char *id) 979{ 980 return NULL; 981} 982 983static inline struct clk *devm_clk_get_prepared(struct device *dev, 984 const char *id) 985{ 986 return NULL; 987} 988 989static inline struct clk *devm_clk_get_enabled(struct device *dev, 990 const char *id) 991{ 992 return NULL; 993} 994 995static inline struct clk *devm_clk_get_optional(struct device *dev, 996 const char *id) 997{ 998 return NULL; 999} 1000 1001static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, 1002 const char *id) 1003{ 1004 return NULL; 1005} 1006 1007static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, 1008 const char *id) 1009{ 1010 return NULL; 1011} 1012 1013static inline struct clk * 1014devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id, 1015 unsigned long rate) 1016{ 1017 return NULL; 1018} 1019 1020static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 1021 struct clk_bulk_data *clks) 1022{ 1023 return 0; 1024} 1025 1026static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 1027 int num_clks, struct clk_bulk_data *clks) 1028{ 1029 return 0; 1030} 1031 1032static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 1033 struct clk_bulk_data **clks) 1034{ 1035 1036 return 0; 1037} 1038 1039static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, 1040 struct clk_bulk_data **clks) 1041{ 1042 return 0; 1043} 1044 1045static inline struct clk *devm_get_clk_from_child(struct device *dev, 1046 struct device_node *np, const char *con_id) 1047{ 1048 return NULL; 1049} 1050 1051static inline void clk_put(struct clk *clk) {} 1052 1053static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 1054 1055static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 1056 1057static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 1058 1059static inline int clk_enable(struct clk *clk) 1060{ 1061 return 0; 1062} 1063 1064static inline int __must_check clk_bulk_enable(int num_clks, 1065 const struct clk_bulk_data *clks) 1066{ 1067 return 0; 1068} 1069 1070static inline void clk_disable(struct clk *clk) {} 1071 1072 1073static inline void clk_bulk_disable(int num_clks, 1074 const struct clk_bulk_data *clks) {} 1075 1076static inline unsigned long clk_get_rate(struct clk *clk) 1077{ 1078 return 0; 1079} 1080 1081static inline int clk_set_rate(struct clk *clk, unsigned long rate) 1082{ 1083 return 0; 1084} 1085 1086static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 1087{ 1088 return 0; 1089} 1090 1091static inline long clk_round_rate(struct clk *clk, unsigned long rate) 1092{ 1093 return 0; 1094} 1095 1096static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 1097{ 1098 return true; 1099} 1100 1101static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 1102 unsigned long max) 1103{ 1104 return 0; 1105} 1106 1107static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 1108{ 1109 return 0; 1110} 1111 1112static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 1113{ 1114 return 0; 1115} 1116 1117static inline int clk_set_parent(struct clk *clk, struct clk *parent) 1118{ 1119 return 0; 1120} 1121 1122static inline struct clk *clk_get_parent(struct clk *clk) 1123{ 1124 return NULL; 1125} 1126 1127static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 1128{ 1129 return NULL; 1130} 1131 1132static inline int clk_save_context(void) 1133{ 1134 return 0; 1135} 1136 1137static inline void clk_restore_context(void) {} 1138 1139#endif 1140 1141/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 1142static inline int clk_prepare_enable(struct clk *clk) 1143{ 1144 int ret; 1145 1146 ret = clk_prepare(clk); 1147 if (ret) 1148 return ret; 1149 ret = clk_enable(clk); 1150 if (ret) 1151 clk_unprepare(clk); 1152 1153 return ret; 1154} 1155 1156/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 1157static inline void clk_disable_unprepare(struct clk *clk) 1158{ 1159 clk_disable(clk); 1160 clk_unprepare(clk); 1161} 1162 1163static inline int __must_check 1164clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) 1165{ 1166 int ret; 1167 1168 ret = clk_bulk_prepare(num_clks, clks); 1169 if (ret) 1170 return ret; 1171 ret = clk_bulk_enable(num_clks, clks); 1172 if (ret) 1173 clk_bulk_unprepare(num_clks, clks); 1174 1175 return ret; 1176} 1177 1178static inline void clk_bulk_disable_unprepare(int num_clks, 1179 const struct clk_bulk_data *clks) 1180{ 1181 clk_bulk_disable(num_clks, clks); 1182 clk_bulk_unprepare(num_clks, clks); 1183} 1184 1185/** 1186 * clk_drop_range - Reset any range set on that clock 1187 * @clk: clock source 1188 * 1189 * Returns success (0) or negative errno. 1190 */ 1191static inline int clk_drop_range(struct clk *clk) 1192{ 1193 return clk_set_rate_range(clk, 0, ULONG_MAX); 1194} 1195 1196/** 1197 * clk_get_optional - lookup and obtain a reference to an optional clock 1198 * producer. 1199 * @dev: device for clock "consumer" 1200 * @id: clock consumer ID 1201 * 1202 * Behaves the same as clk_get() except where there is no clock producer. In 1203 * this case, instead of returning -ENOENT, the function returns NULL. 1204 */ 1205static inline struct clk *clk_get_optional(struct device *dev, const char *id) 1206{ 1207 struct clk *clk = clk_get(dev, id); 1208 1209 if (clk == ERR_PTR(-ENOENT)) 1210 return NULL; 1211 1212 return clk; 1213} 1214 1215#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 1216struct clk *of_clk_get(struct device_node *np, int index); 1217struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 1218struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 1219#else 1220static inline struct clk *of_clk_get(struct device_node *np, int index) 1221{ 1222 return ERR_PTR(-ENOENT); 1223} 1224static inline struct clk *of_clk_get_by_name(struct device_node *np, 1225 const char *name) 1226{ 1227 return ERR_PTR(-ENOENT); 1228} 1229static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 1230{ 1231 return ERR_PTR(-ENOENT); 1232} 1233#endif 1234 1235#endif