at v6.12 37 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/include/linux/clk.h 4 * 5 * Copyright (C) 2004 ARM Limited. 6 * Written by Deep Blue Solutions Limited. 7 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 8 */ 9#ifndef __LINUX_CLK_H 10#define __LINUX_CLK_H 11 12#include <linux/err.h> 13#include <linux/kernel.h> 14#include <linux/notifier.h> 15 16struct device; 17struct clk; 18struct device_node; 19struct of_phandle_args; 20 21/** 22 * DOC: clk notifier callback types 23 * 24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 25 * to indicate that the rate change will proceed. Drivers must 26 * immediately terminate any operations that will be affected by the 27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 28 * NOTIFY_STOP or NOTIFY_BAD. 29 * 30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 33 * always return NOTIFY_DONE or NOTIFY_OK. 34 * 35 * POST_RATE_CHANGE - called after the clk rate change has successfully 36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 */ 39#define PRE_RATE_CHANGE BIT(0) 40#define POST_RATE_CHANGE BIT(1) 41#define ABORT_RATE_CHANGE BIT(2) 42 43/** 44 * struct clk_notifier - associate a clk with a notifier 45 * @clk: struct clk * to associate the notifier with 46 * @notifier_head: a blocking_notifier_head for this clk 47 * @node: linked list pointers 48 * 49 * A list of struct clk_notifier is maintained by the notifier code. 50 * An entry is created whenever code registers the first notifier on a 51 * particular @clk. Future notifiers on that @clk are added to the 52 * @notifier_head. 53 */ 54struct clk_notifier { 55 struct clk *clk; 56 struct srcu_notifier_head notifier_head; 57 struct list_head node; 58}; 59 60/** 61 * struct clk_notifier_data - rate data to pass to the notifier callback 62 * @clk: struct clk * being changed 63 * @old_rate: previous rate of this clk 64 * @new_rate: new rate of this clk 65 * 66 * For a pre-notifier, old_rate is the clk's rate before this rate 67 * change, and new_rate is what the rate will be in the future. For a 68 * post-notifier, old_rate and new_rate are both set to the clk's 69 * current rate (this was done to optimize the implementation). 70 */ 71struct clk_notifier_data { 72 struct clk *clk; 73 unsigned long old_rate; 74 unsigned long new_rate; 75}; 76 77/** 78 * struct clk_bulk_data - Data used for bulk clk operations. 79 * 80 * @id: clock consumer ID 81 * @clk: struct clk * to store the associated clock 82 * 83 * The CLK APIs provide a series of clk_bulk_() API calls as 84 * a convenience to consumers which require multiple clks. This 85 * structure is used to manage data for these calls. 86 */ 87struct clk_bulk_data { 88 const char *id; 89 struct clk *clk; 90}; 91 92#ifdef CONFIG_COMMON_CLK 93 94/** 95 * clk_notifier_register - register a clock rate-change notifier callback 96 * @clk: clock whose rate we are interested in 97 * @nb: notifier block with callback function pointer 98 * 99 * ProTip: debugging across notifier chains can be frustrating. Make sure that 100 * your notifier callback function prints a nice big warning in case of 101 * failure. 102 */ 103int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 104 105/** 106 * clk_notifier_unregister - unregister a clock rate-change notifier callback 107 * @clk: clock whose rate we are no longer interested in 108 * @nb: notifier block which will be unregistered 109 */ 110int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 111 112/** 113 * devm_clk_notifier_register - register a managed rate-change notifier callback 114 * @dev: device for clock "consumer" 115 * @clk: clock whose rate we are interested in 116 * @nb: notifier block with callback function pointer 117 * 118 * Returns 0 on success, -EERROR otherwise 119 */ 120int devm_clk_notifier_register(struct device *dev, struct clk *clk, 121 struct notifier_block *nb); 122 123/** 124 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 125 * for a clock source. 126 * @clk: clock source 127 * 128 * This gets the clock source accuracy expressed in ppb. 129 * A perfect clock returns 0. 130 */ 131long clk_get_accuracy(struct clk *clk); 132 133/** 134 * clk_set_phase - adjust the phase shift of a clock signal 135 * @clk: clock signal source 136 * @degrees: number of degrees the signal is shifted 137 * 138 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 139 * success, -EERROR otherwise. 140 */ 141int clk_set_phase(struct clk *clk, int degrees); 142 143/** 144 * clk_get_phase - return the phase shift of a clock signal 145 * @clk: clock signal source 146 * 147 * Returns the phase shift of a clock node in degrees, otherwise returns 148 * -EERROR. 149 */ 150int clk_get_phase(struct clk *clk); 151 152/** 153 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 154 * @clk: clock signal source 155 * @num: numerator of the duty cycle ratio to be applied 156 * @den: denominator of the duty cycle ratio to be applied 157 * 158 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 159 * success, -EERROR otherwise. 160 */ 161int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 162 163/** 164 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 165 * @clk: clock signal source 166 * @scale: scaling factor to be applied to represent the ratio as an integer 167 * 168 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 169 * returns -EERROR. 170 */ 171int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 172 173/** 174 * clk_is_match - check if two clk's point to the same hardware clock 175 * @p: clk compared against q 176 * @q: clk compared against p 177 * 178 * Returns true if the two struct clk pointers both point to the same hardware 179 * clock node. Put differently, returns true if @p and @q 180 * share the same &struct clk_core object. 181 * 182 * Returns false otherwise. Note that two NULL clks are treated as matching. 183 */ 184bool clk_is_match(const struct clk *p, const struct clk *q); 185 186/** 187 * clk_rate_exclusive_get - get exclusivity over the rate control of a 188 * producer 189 * @clk: clock source 190 * 191 * This function allows drivers to get exclusive control over the rate of a 192 * provider. It prevents any other consumer to execute, even indirectly, 193 * opereation which could alter the rate of the provider or cause glitches 194 * 195 * If exlusivity is claimed more than once on clock, even by the same driver, 196 * the rate effectively gets locked as exclusivity can't be preempted. 197 * 198 * Must not be called from within atomic context. 199 * 200 * Returns success (0) or negative errno. 201 */ 202int clk_rate_exclusive_get(struct clk *clk); 203 204/** 205 * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get 206 * @dev: device the exclusivity is bound to 207 * @clk: clock source 208 * 209 * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler 210 * on @dev to call clk_rate_exclusive_put(). 211 * 212 * Must not be called from within atomic context. 213 */ 214int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk); 215 216/** 217 * clk_rate_exclusive_put - release exclusivity over the rate control of a 218 * producer 219 * @clk: clock source 220 * 221 * This function allows drivers to release the exclusivity it previously got 222 * from clk_rate_exclusive_get() 223 * 224 * The caller must balance the number of clk_rate_exclusive_get() and 225 * clk_rate_exclusive_put() calls. 226 * 227 * Must not be called from within atomic context. 228 */ 229void clk_rate_exclusive_put(struct clk *clk); 230 231#else 232 233static inline int clk_notifier_register(struct clk *clk, 234 struct notifier_block *nb) 235{ 236 return -ENOTSUPP; 237} 238 239static inline int clk_notifier_unregister(struct clk *clk, 240 struct notifier_block *nb) 241{ 242 return -ENOTSUPP; 243} 244 245static inline int devm_clk_notifier_register(struct device *dev, 246 struct clk *clk, 247 struct notifier_block *nb) 248{ 249 return -ENOTSUPP; 250} 251 252static inline long clk_get_accuracy(struct clk *clk) 253{ 254 return -ENOTSUPP; 255} 256 257static inline long clk_set_phase(struct clk *clk, int phase) 258{ 259 return -ENOTSUPP; 260} 261 262static inline long clk_get_phase(struct clk *clk) 263{ 264 return -ENOTSUPP; 265} 266 267static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 268 unsigned int den) 269{ 270 return -ENOTSUPP; 271} 272 273static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 274 unsigned int scale) 275{ 276 return 0; 277} 278 279static inline bool clk_is_match(const struct clk *p, const struct clk *q) 280{ 281 return p == q; 282} 283 284static inline int clk_rate_exclusive_get(struct clk *clk) 285{ 286 return 0; 287} 288 289static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) 290{ 291 return 0; 292} 293 294static inline void clk_rate_exclusive_put(struct clk *clk) {} 295 296#endif 297 298#ifdef CONFIG_HAVE_CLK_PREPARE 299/** 300 * clk_prepare - prepare a clock source 301 * @clk: clock source 302 * 303 * This prepares the clock source for use. 304 * 305 * Must not be called from within atomic context. 306 */ 307int clk_prepare(struct clk *clk); 308int __must_check clk_bulk_prepare(int num_clks, 309 const struct clk_bulk_data *clks); 310 311/** 312 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 313 * @clk: clock source 314 * 315 * Returns true if clk_prepare() implicitly enables the clock, effectively 316 * making clk_enable()/clk_disable() no-ops, false otherwise. 317 * 318 * This is of interest mainly to the power management code where actually 319 * disabling the clock also requires unpreparing it to have any material 320 * effect. 321 * 322 * Regardless of the value returned here, the caller must always invoke 323 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 324 * to be right. 325 */ 326bool clk_is_enabled_when_prepared(struct clk *clk); 327#else 328static inline int clk_prepare(struct clk *clk) 329{ 330 might_sleep(); 331 return 0; 332} 333 334static inline int __must_check 335clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) 336{ 337 might_sleep(); 338 return 0; 339} 340 341static inline bool clk_is_enabled_when_prepared(struct clk *clk) 342{ 343 return false; 344} 345#endif 346 347/** 348 * clk_unprepare - undo preparation of a clock source 349 * @clk: clock source 350 * 351 * This undoes a previously prepared clock. The caller must balance 352 * the number of prepare and unprepare calls. 353 * 354 * Must not be called from within atomic context. 355 */ 356#ifdef CONFIG_HAVE_CLK_PREPARE 357void clk_unprepare(struct clk *clk); 358void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 359#else 360static inline void clk_unprepare(struct clk *clk) 361{ 362 might_sleep(); 363} 364static inline void clk_bulk_unprepare(int num_clks, 365 const struct clk_bulk_data *clks) 366{ 367 might_sleep(); 368} 369#endif 370 371#ifdef CONFIG_HAVE_CLK 372/** 373 * clk_get - lookup and obtain a reference to a clock producer. 374 * @dev: device for clock "consumer" 375 * @id: clock consumer ID 376 * 377 * Returns a struct clk corresponding to the clock producer, or 378 * valid IS_ERR() condition containing errno. The implementation 379 * uses @dev and @id to determine the clock consumer, and thereby 380 * the clock producer. (IOW, @id may be identical strings, but 381 * clk_get may return different clock producers depending on @dev.) 382 * 383 * Drivers must assume that the clock source is not enabled. 384 * 385 * clk_get should not be called from within interrupt context. 386 */ 387struct clk *clk_get(struct device *dev, const char *id); 388 389/** 390 * clk_bulk_get - lookup and obtain a number of references to clock producer. 391 * @dev: device for clock "consumer" 392 * @num_clks: the number of clk_bulk_data 393 * @clks: the clk_bulk_data table of consumer 394 * 395 * This helper function allows drivers to get several clk consumers in one 396 * operation. If any of the clk cannot be acquired then any clks 397 * that were obtained will be freed before returning to the caller. 398 * 399 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 400 * successfully, or valid IS_ERR() condition containing errno. 401 * The implementation uses @dev and @clk_bulk_data.id to determine the 402 * clock consumer, and thereby the clock producer. 403 * The clock returned is stored in each @clk_bulk_data.clk field. 404 * 405 * Drivers must assume that the clock source is not enabled. 406 * 407 * clk_bulk_get should not be called from within interrupt context. 408 */ 409int __must_check clk_bulk_get(struct device *dev, int num_clks, 410 struct clk_bulk_data *clks); 411/** 412 * clk_bulk_get_all - lookup and obtain all available references to clock 413 * producer. 414 * @dev: device for clock "consumer" 415 * @clks: pointer to the clk_bulk_data table of consumer 416 * 417 * This helper function allows drivers to get all clk consumers in one 418 * operation. If any of the clk cannot be acquired then any clks 419 * that were obtained will be freed before returning to the caller. 420 * 421 * Returns a positive value for the number of clocks obtained while the 422 * clock references are stored in the clk_bulk_data table in @clks field. 423 * Returns 0 if there're none and a negative value if something failed. 424 * 425 * Drivers must assume that the clock source is not enabled. 426 * 427 * clk_bulk_get should not be called from within interrupt context. 428 */ 429int __must_check clk_bulk_get_all(struct device *dev, 430 struct clk_bulk_data **clks); 431 432/** 433 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer 434 * @dev: device for clock "consumer" 435 * @num_clks: the number of clk_bulk_data 436 * @clks: the clk_bulk_data table of consumer 437 * 438 * Behaves the same as clk_bulk_get() except where there is no clock producer. 439 * In this case, instead of returning -ENOENT, the function returns 0 and 440 * NULL for a clk for which a clock producer could not be determined. 441 */ 442int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, 443 struct clk_bulk_data *clks); 444/** 445 * devm_clk_bulk_get - managed get multiple clk consumers 446 * @dev: device for clock "consumer" 447 * @num_clks: the number of clk_bulk_data 448 * @clks: the clk_bulk_data table of consumer 449 * 450 * Return 0 on success, an errno on failure. 451 * 452 * This helper function allows drivers to get several clk 453 * consumers in one operation with management, the clks will 454 * automatically be freed when the device is unbound. 455 */ 456int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 457 struct clk_bulk_data *clks); 458/** 459 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 460 * @dev: device for clock "consumer" 461 * @num_clks: the number of clk_bulk_data 462 * @clks: pointer to the clk_bulk_data table of consumer 463 * 464 * Behaves the same as devm_clk_bulk_get() except where there is no clock 465 * producer. In this case, instead of returning -ENOENT, the function returns 466 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. 467 * 468 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 469 * successfully or for any clk there was no clk provider available, otherwise 470 * returns valid IS_ERR() condition containing errno. 471 * The implementation uses @dev and @clk_bulk_data.id to determine the 472 * clock consumer, and thereby the clock producer. 473 * The clock returned is stored in each @clk_bulk_data.clk field. 474 * 475 * Drivers must assume that the clock source is not enabled. 476 * 477 * clk_bulk_get should not be called from within interrupt context. 478 */ 479int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 480 struct clk_bulk_data *clks); 481/** 482 * devm_clk_bulk_get_all - managed get multiple clk consumers 483 * @dev: device for clock "consumer" 484 * @clks: pointer to the clk_bulk_data table of consumer 485 * 486 * Returns a positive value for the number of clocks obtained while the 487 * clock references are stored in the clk_bulk_data table in @clks field. 488 * Returns 0 if there're none and a negative value if something failed. 489 * 490 * This helper function allows drivers to get several clk 491 * consumers in one operation with management, the clks will 492 * automatically be freed when the device is unbound. 493 */ 494 495int __must_check devm_clk_bulk_get_all(struct device *dev, 496 struct clk_bulk_data **clks); 497 498/** 499 * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed) 500 * @dev: device for clock "consumer" 501 * @clks: pointer to the clk_bulk_data table of consumer 502 * 503 * Returns success (0) or negative errno. 504 * 505 * This helper function allows drivers to get all clocks of the 506 * consumer and enables them in one operation with management. 507 * The clks will automatically be disabled and freed when the device 508 * is unbound. 509 */ 510 511int __must_check devm_clk_bulk_get_all_enable(struct device *dev, 512 struct clk_bulk_data **clks); 513 514/** 515 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 516 * @dev: device for clock "consumer" 517 * @id: clock consumer ID 518 * 519 * Context: May sleep. 520 * 521 * Return: a struct clk corresponding to the clock producer, or 522 * valid IS_ERR() condition containing errno. The implementation 523 * uses @dev and @id to determine the clock consumer, and thereby 524 * the clock producer. (IOW, @id may be identical strings, but 525 * clk_get may return different clock producers depending on @dev.) 526 * 527 * Drivers must assume that the clock source is neither prepared nor 528 * enabled. 529 * 530 * The clock will automatically be freed when the device is unbound 531 * from the bus. 532 */ 533struct clk *devm_clk_get(struct device *dev, const char *id); 534 535/** 536 * devm_clk_get_prepared - devm_clk_get() + clk_prepare() 537 * @dev: device for clock "consumer" 538 * @id: clock consumer ID 539 * 540 * Context: May sleep. 541 * 542 * Return: a struct clk corresponding to the clock producer, or 543 * valid IS_ERR() condition containing errno. The implementation 544 * uses @dev and @id to determine the clock consumer, and thereby 545 * the clock producer. (IOW, @id may be identical strings, but 546 * clk_get may return different clock producers depending on @dev.) 547 * 548 * The returned clk (if valid) is prepared. Drivers must however assume 549 * that the clock is not enabled. 550 * 551 * The clock will automatically be unprepared and freed when the device 552 * is unbound from the bus. 553 */ 554struct clk *devm_clk_get_prepared(struct device *dev, const char *id); 555 556/** 557 * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() 558 * @dev: device for clock "consumer" 559 * @id: clock consumer ID 560 * 561 * Context: May sleep. 562 * 563 * Return: a struct clk corresponding to the clock producer, or 564 * valid IS_ERR() condition containing errno. The implementation 565 * uses @dev and @id to determine the clock consumer, and thereby 566 * the clock producer. (IOW, @id may be identical strings, but 567 * clk_get may return different clock producers depending on @dev.) 568 * 569 * The returned clk (if valid) is prepared and enabled. 570 * 571 * The clock will automatically be disabled, unprepared and freed 572 * when the device is unbound from the bus. 573 */ 574struct clk *devm_clk_get_enabled(struct device *dev, const char *id); 575 576/** 577 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 578 * clock producer. 579 * @dev: device for clock "consumer" 580 * @id: clock consumer ID 581 * 582 * Context: May sleep. 583 * 584 * Return: a struct clk corresponding to the clock producer, or 585 * valid IS_ERR() condition containing errno. The implementation 586 * uses @dev and @id to determine the clock consumer, and thereby 587 * the clock producer. If no such clk is found, it returns NULL 588 * which serves as a dummy clk. That's the only difference compared 589 * to devm_clk_get(). 590 * 591 * Drivers must assume that the clock source is neither prepared nor 592 * enabled. 593 * 594 * The clock will automatically be freed when the device is unbound 595 * from the bus. 596 */ 597struct clk *devm_clk_get_optional(struct device *dev, const char *id); 598 599/** 600 * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() 601 * @dev: device for clock "consumer" 602 * @id: clock consumer ID 603 * 604 * Context: May sleep. 605 * 606 * Return: a struct clk corresponding to the clock producer, or 607 * valid IS_ERR() condition containing errno. The implementation 608 * uses @dev and @id to determine the clock consumer, and thereby 609 * the clock producer. If no such clk is found, it returns NULL 610 * which serves as a dummy clk. That's the only difference compared 611 * to devm_clk_get_prepared(). 612 * 613 * The returned clk (if valid) is prepared. Drivers must however 614 * assume that the clock is not enabled. 615 * 616 * The clock will automatically be unprepared and freed when the 617 * device is unbound from the bus. 618 */ 619struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); 620 621/** 622 * devm_clk_get_optional_enabled - devm_clk_get_optional() + 623 * clk_prepare_enable() 624 * @dev: device for clock "consumer" 625 * @id: clock consumer ID 626 * 627 * Context: May sleep. 628 * 629 * Return: a struct clk corresponding to the clock producer, or 630 * valid IS_ERR() condition containing errno. The implementation 631 * uses @dev and @id to determine the clock consumer, and thereby 632 * the clock producer. If no such clk is found, it returns NULL 633 * which serves as a dummy clk. That's the only difference compared 634 * to devm_clk_get_enabled(). 635 * 636 * The returned clk (if valid) is prepared and enabled. 637 * 638 * The clock will automatically be disabled, unprepared and freed 639 * when the device is unbound from the bus. 640 */ 641struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); 642 643/** 644 * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() + 645 * clk_set_rate() + 646 * clk_prepare_enable() 647 * @dev: device for clock "consumer" 648 * @id: clock consumer ID 649 * @rate: new clock rate 650 * 651 * Context: May sleep. 652 * 653 * Return: a struct clk corresponding to the clock producer, or 654 * valid IS_ERR() condition containing errno. The implementation 655 * uses @dev and @id to determine the clock consumer, and thereby 656 * the clock producer. If no such clk is found, it returns NULL 657 * which serves as a dummy clk. That's the only difference compared 658 * to devm_clk_get_enabled(). 659 * 660 * The returned clk (if valid) is prepared and enabled and rate was set. 661 * 662 * The clock will automatically be disabled, unprepared and freed 663 * when the device is unbound from the bus. 664 */ 665struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev, 666 const char *id, 667 unsigned long rate); 668 669/** 670 * devm_get_clk_from_child - lookup and obtain a managed reference to a 671 * clock producer from child node. 672 * @dev: device for clock "consumer" 673 * @np: pointer to clock consumer node 674 * @con_id: clock consumer ID 675 * 676 * This function parses the clocks, and uses them to look up the 677 * struct clk from the registered list of clock providers by using 678 * @np and @con_id 679 * 680 * The clock will automatically be freed when the device is unbound 681 * from the bus. 682 */ 683struct clk *devm_get_clk_from_child(struct device *dev, 684 struct device_node *np, const char *con_id); 685 686/** 687 * clk_enable - inform the system when the clock source should be running. 688 * @clk: clock source 689 * 690 * If the clock can not be enabled/disabled, this should return success. 691 * 692 * May be called from atomic contexts. 693 * 694 * Returns success (0) or negative errno. 695 */ 696int clk_enable(struct clk *clk); 697 698/** 699 * clk_bulk_enable - inform the system when the set of clks should be running. 700 * @num_clks: the number of clk_bulk_data 701 * @clks: the clk_bulk_data table of consumer 702 * 703 * May be called from atomic contexts. 704 * 705 * Returns success (0) or negative errno. 706 */ 707int __must_check clk_bulk_enable(int num_clks, 708 const struct clk_bulk_data *clks); 709 710/** 711 * clk_disable - inform the system when the clock source is no longer required. 712 * @clk: clock source 713 * 714 * Inform the system that a clock source is no longer required by 715 * a driver and may be shut down. 716 * 717 * May be called from atomic contexts. 718 * 719 * Implementation detail: if the clock source is shared between 720 * multiple drivers, clk_enable() calls must be balanced by the 721 * same number of clk_disable() calls for the clock source to be 722 * disabled. 723 */ 724void clk_disable(struct clk *clk); 725 726/** 727 * clk_bulk_disable - inform the system when the set of clks is no 728 * longer required. 729 * @num_clks: the number of clk_bulk_data 730 * @clks: the clk_bulk_data table of consumer 731 * 732 * Inform the system that a set of clks is no longer required by 733 * a driver and may be shut down. 734 * 735 * May be called from atomic contexts. 736 * 737 * Implementation detail: if the set of clks is shared between 738 * multiple drivers, clk_bulk_enable() calls must be balanced by the 739 * same number of clk_bulk_disable() calls for the clock source to be 740 * disabled. 741 */ 742void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 743 744/** 745 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 746 * This is only valid once the clock source has been enabled. 747 * @clk: clock source 748 */ 749unsigned long clk_get_rate(struct clk *clk); 750 751/** 752 * clk_put - "free" the clock source 753 * @clk: clock source 754 * 755 * Note: drivers must ensure that all clk_enable calls made on this 756 * clock source are balanced by clk_disable calls prior to calling 757 * this function. 758 * 759 * clk_put should not be called from within interrupt context. 760 */ 761void clk_put(struct clk *clk); 762 763/** 764 * clk_bulk_put - "free" the clock source 765 * @num_clks: the number of clk_bulk_data 766 * @clks: the clk_bulk_data table of consumer 767 * 768 * Note: drivers must ensure that all clk_bulk_enable calls made on this 769 * clock source are balanced by clk_bulk_disable calls prior to calling 770 * this function. 771 * 772 * clk_bulk_put should not be called from within interrupt context. 773 */ 774void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 775 776/** 777 * clk_bulk_put_all - "free" all the clock source 778 * @num_clks: the number of clk_bulk_data 779 * @clks: the clk_bulk_data table of consumer 780 * 781 * Note: drivers must ensure that all clk_bulk_enable calls made on this 782 * clock source are balanced by clk_bulk_disable calls prior to calling 783 * this function. 784 * 785 * clk_bulk_put_all should not be called from within interrupt context. 786 */ 787void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 788 789/** 790 * devm_clk_put - "free" a managed clock source 791 * @dev: device used to acquire the clock 792 * @clk: clock source acquired with devm_clk_get() 793 * 794 * Note: drivers must ensure that all clk_enable calls made on this 795 * clock source are balanced by clk_disable calls prior to calling 796 * this function. 797 * 798 * clk_put should not be called from within interrupt context. 799 */ 800void devm_clk_put(struct device *dev, struct clk *clk); 801 802/* 803 * The remaining APIs are optional for machine class support. 804 */ 805 806 807/** 808 * clk_round_rate - adjust a rate to the exact rate a clock can provide 809 * @clk: clock source 810 * @rate: desired clock rate in Hz 811 * 812 * This answers the question "if I were to pass @rate to clk_set_rate(), 813 * what clock rate would I end up with?" without changing the hardware 814 * in any way. In other words: 815 * 816 * rate = clk_round_rate(clk, r); 817 * 818 * and: 819 * 820 * clk_set_rate(clk, r); 821 * rate = clk_get_rate(clk); 822 * 823 * are equivalent except the former does not modify the clock hardware 824 * in any way. 825 * 826 * Returns rounded clock rate in Hz, or negative errno. 827 */ 828long clk_round_rate(struct clk *clk, unsigned long rate); 829 830/** 831 * clk_set_rate - set the clock rate for a clock source 832 * @clk: clock source 833 * @rate: desired clock rate in Hz 834 * 835 * Updating the rate starts at the top-most affected clock and then 836 * walks the tree down to the bottom-most clock that needs updating. 837 * 838 * Returns success (0) or negative errno. 839 */ 840int clk_set_rate(struct clk *clk, unsigned long rate); 841 842/** 843 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 844 * clock source 845 * @clk: clock source 846 * @rate: desired clock rate in Hz 847 * 848 * This helper function allows drivers to atomically set the rate of a producer 849 * and claim exclusivity over the rate control of the producer. 850 * 851 * It is essentially a combination of clk_set_rate() and 852 * clk_rate_exclusite_get(). Caller must balance this call with a call to 853 * clk_rate_exclusive_put() 854 * 855 * Returns success (0) or negative errno. 856 */ 857int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 858 859/** 860 * clk_has_parent - check if a clock is a possible parent for another 861 * @clk: clock source 862 * @parent: parent clock source 863 * 864 * This function can be used in drivers that need to check that a clock can be 865 * the parent of another without actually changing the parent. 866 * 867 * Returns true if @parent is a possible parent for @clk, false otherwise. 868 */ 869bool clk_has_parent(const struct clk *clk, const struct clk *parent); 870 871/** 872 * clk_set_rate_range - set a rate range for a clock source 873 * @clk: clock source 874 * @min: desired minimum clock rate in Hz, inclusive 875 * @max: desired maximum clock rate in Hz, inclusive 876 * 877 * Returns success (0) or negative errno. 878 */ 879int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 880 881/** 882 * clk_set_min_rate - set a minimum clock rate for a clock source 883 * @clk: clock source 884 * @rate: desired minimum clock rate in Hz, inclusive 885 * 886 * Returns success (0) or negative errno. 887 */ 888int clk_set_min_rate(struct clk *clk, unsigned long rate); 889 890/** 891 * clk_set_max_rate - set a maximum clock rate for a clock source 892 * @clk: clock source 893 * @rate: desired maximum clock rate in Hz, inclusive 894 * 895 * Returns success (0) or negative errno. 896 */ 897int clk_set_max_rate(struct clk *clk, unsigned long rate); 898 899/** 900 * clk_set_parent - set the parent clock source for this clock 901 * @clk: clock source 902 * @parent: parent clock source 903 * 904 * Returns success (0) or negative errno. 905 */ 906int clk_set_parent(struct clk *clk, struct clk *parent); 907 908/** 909 * clk_get_parent - get the parent clock source for this clock 910 * @clk: clock source 911 * 912 * Returns struct clk corresponding to parent clock source, or 913 * valid IS_ERR() condition containing errno. 914 */ 915struct clk *clk_get_parent(struct clk *clk); 916 917/** 918 * clk_get_sys - get a clock based upon the device name 919 * @dev_id: device name 920 * @con_id: connection ID 921 * 922 * Returns a struct clk corresponding to the clock producer, or 923 * valid IS_ERR() condition containing errno. The implementation 924 * uses @dev_id and @con_id to determine the clock consumer, and 925 * thereby the clock producer. In contrast to clk_get() this function 926 * takes the device name instead of the device itself for identification. 927 * 928 * Drivers must assume that the clock source is not enabled. 929 * 930 * clk_get_sys should not be called from within interrupt context. 931 */ 932struct clk *clk_get_sys(const char *dev_id, const char *con_id); 933 934/** 935 * clk_save_context - save clock context for poweroff 936 * 937 * Saves the context of the clock register for powerstates in which the 938 * contents of the registers will be lost. Occurs deep within the suspend 939 * code so locking is not necessary. 940 */ 941int clk_save_context(void); 942 943/** 944 * clk_restore_context - restore clock context after poweroff 945 * 946 * This occurs with all clocks enabled. Occurs deep within the resume code 947 * so locking is not necessary. 948 */ 949void clk_restore_context(void); 950 951#else /* !CONFIG_HAVE_CLK */ 952 953static inline struct clk *clk_get(struct device *dev, const char *id) 954{ 955 return NULL; 956} 957 958static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 959 struct clk_bulk_data *clks) 960{ 961 return 0; 962} 963 964static inline int __must_check clk_bulk_get_optional(struct device *dev, 965 int num_clks, struct clk_bulk_data *clks) 966{ 967 return 0; 968} 969 970static inline int __must_check clk_bulk_get_all(struct device *dev, 971 struct clk_bulk_data **clks) 972{ 973 return 0; 974} 975 976static inline struct clk *devm_clk_get(struct device *dev, const char *id) 977{ 978 return NULL; 979} 980 981static inline struct clk *devm_clk_get_prepared(struct device *dev, 982 const char *id) 983{ 984 return NULL; 985} 986 987static inline struct clk *devm_clk_get_enabled(struct device *dev, 988 const char *id) 989{ 990 return NULL; 991} 992 993static inline struct clk *devm_clk_get_optional(struct device *dev, 994 const char *id) 995{ 996 return NULL; 997} 998 999static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, 1000 const char *id) 1001{ 1002 return NULL; 1003} 1004 1005static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, 1006 const char *id) 1007{ 1008 return NULL; 1009} 1010 1011static inline struct clk * 1012devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id, 1013 unsigned long rate) 1014{ 1015 return NULL; 1016} 1017 1018static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 1019 struct clk_bulk_data *clks) 1020{ 1021 return 0; 1022} 1023 1024static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 1025 int num_clks, struct clk_bulk_data *clks) 1026{ 1027 return 0; 1028} 1029 1030static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 1031 struct clk_bulk_data **clks) 1032{ 1033 1034 return 0; 1035} 1036 1037static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev, 1038 struct clk_bulk_data **clks) 1039{ 1040 return 0; 1041} 1042 1043static inline struct clk *devm_get_clk_from_child(struct device *dev, 1044 struct device_node *np, const char *con_id) 1045{ 1046 return NULL; 1047} 1048 1049static inline void clk_put(struct clk *clk) {} 1050 1051static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 1052 1053static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 1054 1055static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 1056 1057static inline int clk_enable(struct clk *clk) 1058{ 1059 return 0; 1060} 1061 1062static inline int __must_check clk_bulk_enable(int num_clks, 1063 const struct clk_bulk_data *clks) 1064{ 1065 return 0; 1066} 1067 1068static inline void clk_disable(struct clk *clk) {} 1069 1070 1071static inline void clk_bulk_disable(int num_clks, 1072 const struct clk_bulk_data *clks) {} 1073 1074static inline unsigned long clk_get_rate(struct clk *clk) 1075{ 1076 return 0; 1077} 1078 1079static inline int clk_set_rate(struct clk *clk, unsigned long rate) 1080{ 1081 return 0; 1082} 1083 1084static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 1085{ 1086 return 0; 1087} 1088 1089static inline long clk_round_rate(struct clk *clk, unsigned long rate) 1090{ 1091 return 0; 1092} 1093 1094static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 1095{ 1096 return true; 1097} 1098 1099static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 1100 unsigned long max) 1101{ 1102 return 0; 1103} 1104 1105static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 1106{ 1107 return 0; 1108} 1109 1110static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 1111{ 1112 return 0; 1113} 1114 1115static inline int clk_set_parent(struct clk *clk, struct clk *parent) 1116{ 1117 return 0; 1118} 1119 1120static inline struct clk *clk_get_parent(struct clk *clk) 1121{ 1122 return NULL; 1123} 1124 1125static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 1126{ 1127 return NULL; 1128} 1129 1130static inline int clk_save_context(void) 1131{ 1132 return 0; 1133} 1134 1135static inline void clk_restore_context(void) {} 1136 1137#endif 1138 1139/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 1140static inline int clk_prepare_enable(struct clk *clk) 1141{ 1142 int ret; 1143 1144 ret = clk_prepare(clk); 1145 if (ret) 1146 return ret; 1147 ret = clk_enable(clk); 1148 if (ret) 1149 clk_unprepare(clk); 1150 1151 return ret; 1152} 1153 1154/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 1155static inline void clk_disable_unprepare(struct clk *clk) 1156{ 1157 clk_disable(clk); 1158 clk_unprepare(clk); 1159} 1160 1161static inline int __must_check 1162clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) 1163{ 1164 int ret; 1165 1166 ret = clk_bulk_prepare(num_clks, clks); 1167 if (ret) 1168 return ret; 1169 ret = clk_bulk_enable(num_clks, clks); 1170 if (ret) 1171 clk_bulk_unprepare(num_clks, clks); 1172 1173 return ret; 1174} 1175 1176static inline void clk_bulk_disable_unprepare(int num_clks, 1177 const struct clk_bulk_data *clks) 1178{ 1179 clk_bulk_disable(num_clks, clks); 1180 clk_bulk_unprepare(num_clks, clks); 1181} 1182 1183/** 1184 * clk_drop_range - Reset any range set on that clock 1185 * @clk: clock source 1186 * 1187 * Returns success (0) or negative errno. 1188 */ 1189static inline int clk_drop_range(struct clk *clk) 1190{ 1191 return clk_set_rate_range(clk, 0, ULONG_MAX); 1192} 1193 1194/** 1195 * clk_get_optional - lookup and obtain a reference to an optional clock 1196 * producer. 1197 * @dev: device for clock "consumer" 1198 * @id: clock consumer ID 1199 * 1200 * Behaves the same as clk_get() except where there is no clock producer. In 1201 * this case, instead of returning -ENOENT, the function returns NULL. 1202 */ 1203static inline struct clk *clk_get_optional(struct device *dev, const char *id) 1204{ 1205 struct clk *clk = clk_get(dev, id); 1206 1207 if (clk == ERR_PTR(-ENOENT)) 1208 return NULL; 1209 1210 return clk; 1211} 1212 1213#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 1214struct clk *of_clk_get(struct device_node *np, int index); 1215struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 1216struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 1217#else 1218static inline struct clk *of_clk_get(struct device_node *np, int index) 1219{ 1220 return ERR_PTR(-ENOENT); 1221} 1222static inline struct clk *of_clk_get_by_name(struct device_node *np, 1223 const char *name) 1224{ 1225 return ERR_PTR(-ENOENT); 1226} 1227static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 1228{ 1229 return ERR_PTR(-ENOENT); 1230} 1231#endif 1232 1233#endif