at v5.1 27 kB view raw
1/* 2 * linux/include/linux/clk.h 3 * 4 * Copyright (C) 2004 ARM Limited. 5 * Written by Deep Blue Solutions Limited. 6 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12#ifndef __LINUX_CLK_H 13#define __LINUX_CLK_H 14 15#include <linux/err.h> 16#include <linux/kernel.h> 17#include <linux/notifier.h> 18 19struct device; 20struct clk; 21struct device_node; 22struct of_phandle_args; 23 24/** 25 * DOC: clk notifier callback types 26 * 27 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 28 * to indicate that the rate change will proceed. Drivers must 29 * immediately terminate any operations that will be affected by the 30 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 31 * NOTIFY_STOP or NOTIFY_BAD. 32 * 33 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 34 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 35 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 36 * always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 * POST_RATE_CHANGE - called after the clk rate change has successfully 39 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 40 * 41 */ 42#define PRE_RATE_CHANGE BIT(0) 43#define POST_RATE_CHANGE BIT(1) 44#define ABORT_RATE_CHANGE BIT(2) 45 46/** 47 * struct clk_notifier - associate a clk with a notifier 48 * @clk: struct clk * to associate the notifier with 49 * @notifier_head: a blocking_notifier_head for this clk 50 * @node: linked list pointers 51 * 52 * A list of struct clk_notifier is maintained by the notifier code. 53 * An entry is created whenever code registers the first notifier on a 54 * particular @clk. Future notifiers on that @clk are added to the 55 * @notifier_head. 56 */ 57struct clk_notifier { 58 struct clk *clk; 59 struct srcu_notifier_head notifier_head; 60 struct list_head node; 61}; 62 63/** 64 * struct clk_notifier_data - rate data to pass to the notifier callback 65 * @clk: struct clk * being changed 66 * @old_rate: previous rate of this clk 67 * @new_rate: new rate of this clk 68 * 69 * For a pre-notifier, old_rate is the clk's rate before this rate 70 * change, and new_rate is what the rate will be in the future. For a 71 * post-notifier, old_rate and new_rate are both set to the clk's 72 * current rate (this was done to optimize the implementation). 73 */ 74struct clk_notifier_data { 75 struct clk *clk; 76 unsigned long old_rate; 77 unsigned long new_rate; 78}; 79 80/** 81 * struct clk_bulk_data - Data used for bulk clk operations. 82 * 83 * @id: clock consumer ID 84 * @clk: struct clk * to store the associated clock 85 * 86 * The CLK APIs provide a series of clk_bulk_() API calls as 87 * a convenience to consumers which require multiple clks. This 88 * structure is used to manage data for these calls. 89 */ 90struct clk_bulk_data { 91 const char *id; 92 struct clk *clk; 93}; 94 95#ifdef CONFIG_COMMON_CLK 96 97/** 98 * clk_notifier_register: register a clock rate-change notifier callback 99 * @clk: clock whose rate we are interested in 100 * @nb: notifier block with callback function pointer 101 * 102 * ProTip: debugging across notifier chains can be frustrating. Make sure that 103 * your notifier callback function prints a nice big warning in case of 104 * failure. 105 */ 106int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 107 108/** 109 * clk_notifier_unregister: unregister a clock rate-change notifier callback 110 * @clk: clock whose rate we are no longer interested in 111 * @nb: notifier block which will be unregistered 112 */ 113int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 114 115/** 116 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 117 * for a clock source. 118 * @clk: clock source 119 * 120 * This gets the clock source accuracy expressed in ppb. 121 * A perfect clock returns 0. 122 */ 123long clk_get_accuracy(struct clk *clk); 124 125/** 126 * clk_set_phase - adjust the phase shift of a clock signal 127 * @clk: clock signal source 128 * @degrees: number of degrees the signal is shifted 129 * 130 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 131 * success, -EERROR otherwise. 132 */ 133int clk_set_phase(struct clk *clk, int degrees); 134 135/** 136 * clk_get_phase - return the phase shift of a clock signal 137 * @clk: clock signal source 138 * 139 * Returns the phase shift of a clock node in degrees, otherwise returns 140 * -EERROR. 141 */ 142int clk_get_phase(struct clk *clk); 143 144/** 145 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 146 * @clk: clock signal source 147 * @num: numerator of the duty cycle ratio to be applied 148 * @den: denominator of the duty cycle ratio to be applied 149 * 150 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 151 * success, -EERROR otherwise. 152 */ 153int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 154 155/** 156 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal 157 * @clk: clock signal source 158 * @scale: scaling factor to be applied to represent the ratio as an integer 159 * 160 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 161 * returns -EERROR. 162 */ 163int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 164 165/** 166 * clk_is_match - check if two clk's point to the same hardware clock 167 * @p: clk compared against q 168 * @q: clk compared against p 169 * 170 * Returns true if the two struct clk pointers both point to the same hardware 171 * clock node. Put differently, returns true if @p and @q 172 * share the same &struct clk_core object. 173 * 174 * Returns false otherwise. Note that two NULL clks are treated as matching. 175 */ 176bool clk_is_match(const struct clk *p, const struct clk *q); 177 178#else 179 180static inline int clk_notifier_register(struct clk *clk, 181 struct notifier_block *nb) 182{ 183 return -ENOTSUPP; 184} 185 186static inline int clk_notifier_unregister(struct clk *clk, 187 struct notifier_block *nb) 188{ 189 return -ENOTSUPP; 190} 191 192static inline long clk_get_accuracy(struct clk *clk) 193{ 194 return -ENOTSUPP; 195} 196 197static inline long clk_set_phase(struct clk *clk, int phase) 198{ 199 return -ENOTSUPP; 200} 201 202static inline long clk_get_phase(struct clk *clk) 203{ 204 return -ENOTSUPP; 205} 206 207static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 208 unsigned int den) 209{ 210 return -ENOTSUPP; 211} 212 213static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 214 unsigned int scale) 215{ 216 return 0; 217} 218 219static inline bool clk_is_match(const struct clk *p, const struct clk *q) 220{ 221 return p == q; 222} 223 224#endif 225 226/** 227 * clk_prepare - prepare a clock source 228 * @clk: clock source 229 * 230 * This prepares the clock source for use. 231 * 232 * Must not be called from within atomic context. 233 */ 234#ifdef CONFIG_HAVE_CLK_PREPARE 235int clk_prepare(struct clk *clk); 236int __must_check clk_bulk_prepare(int num_clks, 237 const struct clk_bulk_data *clks); 238#else 239static inline int clk_prepare(struct clk *clk) 240{ 241 might_sleep(); 242 return 0; 243} 244 245static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) 246{ 247 might_sleep(); 248 return 0; 249} 250#endif 251 252/** 253 * clk_unprepare - undo preparation of a clock source 254 * @clk: clock source 255 * 256 * This undoes a previously prepared clock. The caller must balance 257 * the number of prepare and unprepare calls. 258 * 259 * Must not be called from within atomic context. 260 */ 261#ifdef CONFIG_HAVE_CLK_PREPARE 262void clk_unprepare(struct clk *clk); 263void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 264#else 265static inline void clk_unprepare(struct clk *clk) 266{ 267 might_sleep(); 268} 269static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) 270{ 271 might_sleep(); 272} 273#endif 274 275#ifdef CONFIG_HAVE_CLK 276/** 277 * clk_get - lookup and obtain a reference to a clock producer. 278 * @dev: device for clock "consumer" 279 * @id: clock consumer ID 280 * 281 * Returns a struct clk corresponding to the clock producer, or 282 * valid IS_ERR() condition containing errno. The implementation 283 * uses @dev and @id to determine the clock consumer, and thereby 284 * the clock producer. (IOW, @id may be identical strings, but 285 * clk_get may return different clock producers depending on @dev.) 286 * 287 * Drivers must assume that the clock source is not enabled. 288 * 289 * clk_get should not be called from within interrupt context. 290 */ 291struct clk *clk_get(struct device *dev, const char *id); 292 293/** 294 * clk_bulk_get - lookup and obtain a number of references to clock producer. 295 * @dev: device for clock "consumer" 296 * @num_clks: the number of clk_bulk_data 297 * @clks: the clk_bulk_data table of consumer 298 * 299 * This helper function allows drivers to get several clk consumers in one 300 * operation. If any of the clk cannot be acquired then any clks 301 * that were obtained will be freed before returning to the caller. 302 * 303 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 304 * successfully, or valid IS_ERR() condition containing errno. 305 * The implementation uses @dev and @clk_bulk_data.id to determine the 306 * clock consumer, and thereby the clock producer. 307 * The clock returned is stored in each @clk_bulk_data.clk field. 308 * 309 * Drivers must assume that the clock source is not enabled. 310 * 311 * clk_bulk_get should not be called from within interrupt context. 312 */ 313int __must_check clk_bulk_get(struct device *dev, int num_clks, 314 struct clk_bulk_data *clks); 315/** 316 * clk_bulk_get_all - lookup and obtain all available references to clock 317 * producer. 318 * @dev: device for clock "consumer" 319 * @clks: pointer to the clk_bulk_data table of consumer 320 * 321 * This helper function allows drivers to get all clk consumers in one 322 * operation. If any of the clk cannot be acquired then any clks 323 * that were obtained will be freed before returning to the caller. 324 * 325 * Returns a positive value for the number of clocks obtained while the 326 * clock references are stored in the clk_bulk_data table in @clks field. 327 * Returns 0 if there're none and a negative value if something failed. 328 * 329 * Drivers must assume that the clock source is not enabled. 330 * 331 * clk_bulk_get should not be called from within interrupt context. 332 */ 333int __must_check clk_bulk_get_all(struct device *dev, 334 struct clk_bulk_data **clks); 335/** 336 * devm_clk_bulk_get - managed get multiple clk consumers 337 * @dev: device for clock "consumer" 338 * @num_clks: the number of clk_bulk_data 339 * @clks: the clk_bulk_data table of consumer 340 * 341 * Return 0 on success, an errno on failure. 342 * 343 * This helper function allows drivers to get several clk 344 * consumers in one operation with management, the clks will 345 * automatically be freed when the device is unbound. 346 */ 347int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 348 struct clk_bulk_data *clks); 349/** 350 * devm_clk_bulk_get_all - managed get multiple clk consumers 351 * @dev: device for clock "consumer" 352 * @clks: pointer to the clk_bulk_data table of consumer 353 * 354 * Returns a positive value for the number of clocks obtained while the 355 * clock references are stored in the clk_bulk_data table in @clks field. 356 * Returns 0 if there're none and a negative value if something failed. 357 * 358 * This helper function allows drivers to get several clk 359 * consumers in one operation with management, the clks will 360 * automatically be freed when the device is unbound. 361 */ 362 363int __must_check devm_clk_bulk_get_all(struct device *dev, 364 struct clk_bulk_data **clks); 365 366/** 367 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 368 * @dev: device for clock "consumer" 369 * @id: clock consumer ID 370 * 371 * Returns a struct clk corresponding to the clock producer, or 372 * valid IS_ERR() condition containing errno. The implementation 373 * uses @dev and @id to determine the clock consumer, and thereby 374 * the clock producer. (IOW, @id may be identical strings, but 375 * clk_get may return different clock producers depending on @dev.) 376 * 377 * Drivers must assume that the clock source is not enabled. 378 * 379 * devm_clk_get should not be called from within interrupt context. 380 * 381 * The clock will automatically be freed when the device is unbound 382 * from the bus. 383 */ 384struct clk *devm_clk_get(struct device *dev, const char *id); 385 386/** 387 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 388 * clock producer. 389 * @dev: device for clock "consumer" 390 * @id: clock consumer ID 391 * 392 * Behaves the same as devm_clk_get() except where there is no clock producer. 393 * In this case, instead of returning -ENOENT, the function returns NULL. 394 */ 395struct clk *devm_clk_get_optional(struct device *dev, const char *id); 396 397/** 398 * devm_get_clk_from_child - lookup and obtain a managed reference to a 399 * clock producer from child node. 400 * @dev: device for clock "consumer" 401 * @np: pointer to clock consumer node 402 * @con_id: clock consumer ID 403 * 404 * This function parses the clocks, and uses them to look up the 405 * struct clk from the registered list of clock providers by using 406 * @np and @con_id 407 * 408 * The clock will automatically be freed when the device is unbound 409 * from the bus. 410 */ 411struct clk *devm_get_clk_from_child(struct device *dev, 412 struct device_node *np, const char *con_id); 413/** 414 * clk_rate_exclusive_get - get exclusivity over the rate control of a 415 * producer 416 * @clk: clock source 417 * 418 * This function allows drivers to get exclusive control over the rate of a 419 * provider. It prevents any other consumer to execute, even indirectly, 420 * opereation which could alter the rate of the provider or cause glitches 421 * 422 * If exlusivity is claimed more than once on clock, even by the same driver, 423 * the rate effectively gets locked as exclusivity can't be preempted. 424 * 425 * Must not be called from within atomic context. 426 * 427 * Returns success (0) or negative errno. 428 */ 429int clk_rate_exclusive_get(struct clk *clk); 430 431/** 432 * clk_rate_exclusive_put - release exclusivity over the rate control of a 433 * producer 434 * @clk: clock source 435 * 436 * This function allows drivers to release the exclusivity it previously got 437 * from clk_rate_exclusive_get() 438 * 439 * The caller must balance the number of clk_rate_exclusive_get() and 440 * clk_rate_exclusive_put() calls. 441 * 442 * Must not be called from within atomic context. 443 */ 444void clk_rate_exclusive_put(struct clk *clk); 445 446/** 447 * clk_enable - inform the system when the clock source should be running. 448 * @clk: clock source 449 * 450 * If the clock can not be enabled/disabled, this should return success. 451 * 452 * May be called from atomic contexts. 453 * 454 * Returns success (0) or negative errno. 455 */ 456int clk_enable(struct clk *clk); 457 458/** 459 * clk_bulk_enable - inform the system when the set of clks should be running. 460 * @num_clks: the number of clk_bulk_data 461 * @clks: the clk_bulk_data table of consumer 462 * 463 * May be called from atomic contexts. 464 * 465 * Returns success (0) or negative errno. 466 */ 467int __must_check clk_bulk_enable(int num_clks, 468 const struct clk_bulk_data *clks); 469 470/** 471 * clk_disable - inform the system when the clock source is no longer required. 472 * @clk: clock source 473 * 474 * Inform the system that a clock source is no longer required by 475 * a driver and may be shut down. 476 * 477 * May be called from atomic contexts. 478 * 479 * Implementation detail: if the clock source is shared between 480 * multiple drivers, clk_enable() calls must be balanced by the 481 * same number of clk_disable() calls for the clock source to be 482 * disabled. 483 */ 484void clk_disable(struct clk *clk); 485 486/** 487 * clk_bulk_disable - inform the system when the set of clks is no 488 * longer required. 489 * @num_clks: the number of clk_bulk_data 490 * @clks: the clk_bulk_data table of consumer 491 * 492 * Inform the system that a set of clks is no longer required by 493 * a driver and may be shut down. 494 * 495 * May be called from atomic contexts. 496 * 497 * Implementation detail: if the set of clks is shared between 498 * multiple drivers, clk_bulk_enable() calls must be balanced by the 499 * same number of clk_bulk_disable() calls for the clock source to be 500 * disabled. 501 */ 502void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 503 504/** 505 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 506 * This is only valid once the clock source has been enabled. 507 * @clk: clock source 508 */ 509unsigned long clk_get_rate(struct clk *clk); 510 511/** 512 * clk_put - "free" the clock source 513 * @clk: clock source 514 * 515 * Note: drivers must ensure that all clk_enable calls made on this 516 * clock source are balanced by clk_disable calls prior to calling 517 * this function. 518 * 519 * clk_put should not be called from within interrupt context. 520 */ 521void clk_put(struct clk *clk); 522 523/** 524 * clk_bulk_put - "free" the clock source 525 * @num_clks: the number of clk_bulk_data 526 * @clks: the clk_bulk_data table of consumer 527 * 528 * Note: drivers must ensure that all clk_bulk_enable calls made on this 529 * clock source are balanced by clk_bulk_disable calls prior to calling 530 * this function. 531 * 532 * clk_bulk_put should not be called from within interrupt context. 533 */ 534void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 535 536/** 537 * clk_bulk_put_all - "free" all the clock source 538 * @num_clks: the number of clk_bulk_data 539 * @clks: the clk_bulk_data table of consumer 540 * 541 * Note: drivers must ensure that all clk_bulk_enable calls made on this 542 * clock source are balanced by clk_bulk_disable calls prior to calling 543 * this function. 544 * 545 * clk_bulk_put_all should not be called from within interrupt context. 546 */ 547void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 548 549/** 550 * devm_clk_put - "free" a managed clock source 551 * @dev: device used to acquire the clock 552 * @clk: clock source acquired with devm_clk_get() 553 * 554 * Note: drivers must ensure that all clk_enable calls made on this 555 * clock source are balanced by clk_disable calls prior to calling 556 * this function. 557 * 558 * clk_put should not be called from within interrupt context. 559 */ 560void devm_clk_put(struct device *dev, struct clk *clk); 561 562/* 563 * The remaining APIs are optional for machine class support. 564 */ 565 566 567/** 568 * clk_round_rate - adjust a rate to the exact rate a clock can provide 569 * @clk: clock source 570 * @rate: desired clock rate in Hz 571 * 572 * This answers the question "if I were to pass @rate to clk_set_rate(), 573 * what clock rate would I end up with?" without changing the hardware 574 * in any way. In other words: 575 * 576 * rate = clk_round_rate(clk, r); 577 * 578 * and: 579 * 580 * clk_set_rate(clk, r); 581 * rate = clk_get_rate(clk); 582 * 583 * are equivalent except the former does not modify the clock hardware 584 * in any way. 585 * 586 * Returns rounded clock rate in Hz, or negative errno. 587 */ 588long clk_round_rate(struct clk *clk, unsigned long rate); 589 590/** 591 * clk_set_rate - set the clock rate for a clock source 592 * @clk: clock source 593 * @rate: desired clock rate in Hz 594 * 595 * Returns success (0) or negative errno. 596 */ 597int clk_set_rate(struct clk *clk, unsigned long rate); 598 599/** 600 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 601 * clock source 602 * @clk: clock source 603 * @rate: desired clock rate in Hz 604 * 605 * This helper function allows drivers to atomically set the rate of a producer 606 * and claim exclusivity over the rate control of the producer. 607 * 608 * It is essentially a combination of clk_set_rate() and 609 * clk_rate_exclusite_get(). Caller must balance this call with a call to 610 * clk_rate_exclusive_put() 611 * 612 * Returns success (0) or negative errno. 613 */ 614int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 615 616/** 617 * clk_has_parent - check if a clock is a possible parent for another 618 * @clk: clock source 619 * @parent: parent clock source 620 * 621 * This function can be used in drivers that need to check that a clock can be 622 * the parent of another without actually changing the parent. 623 * 624 * Returns true if @parent is a possible parent for @clk, false otherwise. 625 */ 626bool clk_has_parent(struct clk *clk, struct clk *parent); 627 628/** 629 * clk_set_rate_range - set a rate range for a clock source 630 * @clk: clock source 631 * @min: desired minimum clock rate in Hz, inclusive 632 * @max: desired maximum clock rate in Hz, inclusive 633 * 634 * Returns success (0) or negative errno. 635 */ 636int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 637 638/** 639 * clk_set_min_rate - set a minimum clock rate for a clock source 640 * @clk: clock source 641 * @rate: desired minimum clock rate in Hz, inclusive 642 * 643 * Returns success (0) or negative errno. 644 */ 645int clk_set_min_rate(struct clk *clk, unsigned long rate); 646 647/** 648 * clk_set_max_rate - set a maximum clock rate for a clock source 649 * @clk: clock source 650 * @rate: desired maximum clock rate in Hz, inclusive 651 * 652 * Returns success (0) or negative errno. 653 */ 654int clk_set_max_rate(struct clk *clk, unsigned long rate); 655 656/** 657 * clk_set_parent - set the parent clock source for this clock 658 * @clk: clock source 659 * @parent: parent clock source 660 * 661 * Returns success (0) or negative errno. 662 */ 663int clk_set_parent(struct clk *clk, struct clk *parent); 664 665/** 666 * clk_get_parent - get the parent clock source for this clock 667 * @clk: clock source 668 * 669 * Returns struct clk corresponding to parent clock source, or 670 * valid IS_ERR() condition containing errno. 671 */ 672struct clk *clk_get_parent(struct clk *clk); 673 674/** 675 * clk_get_sys - get a clock based upon the device name 676 * @dev_id: device name 677 * @con_id: connection ID 678 * 679 * Returns a struct clk corresponding to the clock producer, or 680 * valid IS_ERR() condition containing errno. The implementation 681 * uses @dev_id and @con_id to determine the clock consumer, and 682 * thereby the clock producer. In contrast to clk_get() this function 683 * takes the device name instead of the device itself for identification. 684 * 685 * Drivers must assume that the clock source is not enabled. 686 * 687 * clk_get_sys should not be called from within interrupt context. 688 */ 689struct clk *clk_get_sys(const char *dev_id, const char *con_id); 690 691/** 692 * clk_save_context - save clock context for poweroff 693 * 694 * Saves the context of the clock register for powerstates in which the 695 * contents of the registers will be lost. Occurs deep within the suspend 696 * code so locking is not necessary. 697 */ 698int clk_save_context(void); 699 700/** 701 * clk_restore_context - restore clock context after poweroff 702 * 703 * This occurs with all clocks enabled. Occurs deep within the resume code 704 * so locking is not necessary. 705 */ 706void clk_restore_context(void); 707 708#else /* !CONFIG_HAVE_CLK */ 709 710static inline struct clk *clk_get(struct device *dev, const char *id) 711{ 712 return NULL; 713} 714 715static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 716 struct clk_bulk_data *clks) 717{ 718 return 0; 719} 720 721static inline int __must_check clk_bulk_get_all(struct device *dev, 722 struct clk_bulk_data **clks) 723{ 724 return 0; 725} 726 727static inline struct clk *devm_clk_get(struct device *dev, const char *id) 728{ 729 return NULL; 730} 731 732static inline struct clk *devm_clk_get_optional(struct device *dev, 733 const char *id) 734{ 735 return NULL; 736} 737 738static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 739 struct clk_bulk_data *clks) 740{ 741 return 0; 742} 743 744static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 745 struct clk_bulk_data **clks) 746{ 747 748 return 0; 749} 750 751static inline struct clk *devm_get_clk_from_child(struct device *dev, 752 struct device_node *np, const char *con_id) 753{ 754 return NULL; 755} 756 757static inline void clk_put(struct clk *clk) {} 758 759static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 760 761static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 762 763static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 764 765 766static inline int clk_rate_exclusive_get(struct clk *clk) 767{ 768 return 0; 769} 770 771static inline void clk_rate_exclusive_put(struct clk *clk) {} 772 773static inline int clk_enable(struct clk *clk) 774{ 775 return 0; 776} 777 778static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) 779{ 780 return 0; 781} 782 783static inline void clk_disable(struct clk *clk) {} 784 785 786static inline void clk_bulk_disable(int num_clks, 787 struct clk_bulk_data *clks) {} 788 789static inline unsigned long clk_get_rate(struct clk *clk) 790{ 791 return 0; 792} 793 794static inline int clk_set_rate(struct clk *clk, unsigned long rate) 795{ 796 return 0; 797} 798 799static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 800{ 801 return 0; 802} 803 804static inline long clk_round_rate(struct clk *clk, unsigned long rate) 805{ 806 return 0; 807} 808 809static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 810{ 811 return true; 812} 813 814static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 815 unsigned long max) 816{ 817 return 0; 818} 819 820static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 821{ 822 return 0; 823} 824 825static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 826{ 827 return 0; 828} 829 830static inline int clk_set_parent(struct clk *clk, struct clk *parent) 831{ 832 return 0; 833} 834 835static inline struct clk *clk_get_parent(struct clk *clk) 836{ 837 return NULL; 838} 839 840static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 841{ 842 return NULL; 843} 844 845static inline int clk_save_context(void) 846{ 847 return 0; 848} 849 850static inline void clk_restore_context(void) {} 851 852#endif 853 854/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 855static inline int clk_prepare_enable(struct clk *clk) 856{ 857 int ret; 858 859 ret = clk_prepare(clk); 860 if (ret) 861 return ret; 862 ret = clk_enable(clk); 863 if (ret) 864 clk_unprepare(clk); 865 866 return ret; 867} 868 869/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 870static inline void clk_disable_unprepare(struct clk *clk) 871{ 872 clk_disable(clk); 873 clk_unprepare(clk); 874} 875 876static inline int __must_check clk_bulk_prepare_enable(int num_clks, 877 struct clk_bulk_data *clks) 878{ 879 int ret; 880 881 ret = clk_bulk_prepare(num_clks, clks); 882 if (ret) 883 return ret; 884 ret = clk_bulk_enable(num_clks, clks); 885 if (ret) 886 clk_bulk_unprepare(num_clks, clks); 887 888 return ret; 889} 890 891static inline void clk_bulk_disable_unprepare(int num_clks, 892 struct clk_bulk_data *clks) 893{ 894 clk_bulk_disable(num_clks, clks); 895 clk_bulk_unprepare(num_clks, clks); 896} 897 898/** 899 * clk_get_optional - lookup and obtain a reference to an optional clock 900 * producer. 901 * @dev: device for clock "consumer" 902 * @id: clock consumer ID 903 * 904 * Behaves the same as clk_get() except where there is no clock producer. In 905 * this case, instead of returning -ENOENT, the function returns NULL. 906 */ 907static inline struct clk *clk_get_optional(struct device *dev, const char *id) 908{ 909 struct clk *clk = clk_get(dev, id); 910 911 if (clk == ERR_PTR(-ENOENT)) 912 return NULL; 913 914 return clk; 915} 916 917#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 918struct clk *of_clk_get(struct device_node *np, int index); 919struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 920struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 921#else 922static inline struct clk *of_clk_get(struct device_node *np, int index) 923{ 924 return ERR_PTR(-ENOENT); 925} 926static inline struct clk *of_clk_get_by_name(struct device_node *np, 927 const char *name) 928{ 929 return ERR_PTR(-ENOENT); 930} 931static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 932{ 933 return ERR_PTR(-ENOENT); 934} 935#endif 936 937#endif