at v4.4 1316 lines 38 kB view raw
1/* 2 * Generic OPP Interface 3 * 4 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 5 * Nishanth Menon 6 * Romit Dasgupta 7 * Kevin Hilman 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16#include <linux/errno.h> 17#include <linux/err.h> 18#include <linux/slab.h> 19#include <linux/device.h> 20#include <linux/of.h> 21#include <linux/export.h> 22 23#include "opp.h" 24 25/* 26 * The root of the list of all devices. All device_opp structures branch off 27 * from here, with each device_opp containing the list of opp it supports in 28 * various states of availability. 29 */ 30static LIST_HEAD(dev_opp_list); 31/* Lock to allow exclusive modification to the device and opp lists */ 32DEFINE_MUTEX(dev_opp_list_lock); 33 34#define opp_rcu_lockdep_assert() \ 35do { \ 36 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 37 !lockdep_is_held(&dev_opp_list_lock), \ 38 "Missing rcu_read_lock() or " \ 39 "dev_opp_list_lock protection"); \ 40} while (0) 41 42static struct device_list_opp *_find_list_dev(const struct device *dev, 43 struct device_opp *dev_opp) 44{ 45 struct device_list_opp *list_dev; 46 47 list_for_each_entry(list_dev, &dev_opp->dev_list, node) 48 if (list_dev->dev == dev) 49 return list_dev; 50 51 return NULL; 52} 53 54static struct device_opp *_managed_opp(const struct device_node *np) 55{ 56 struct device_opp *dev_opp; 57 58 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { 59 if (dev_opp->np == np) { 60 /* 61 * Multiple devices can point to the same OPP table and 62 * so will have same node-pointer, np. 63 * 64 * But the OPPs will be considered as shared only if the 65 * OPP table contains a "opp-shared" property. 66 */ 67 return dev_opp->shared_opp ? dev_opp : NULL; 68 } 69 } 70 71 return NULL; 72} 73 74/** 75 * _find_device_opp() - find device_opp struct using device pointer 76 * @dev: device pointer used to lookup device OPPs 77 * 78 * Search list of device OPPs for one containing matching device. Does a RCU 79 * reader operation to grab the pointer needed. 80 * 81 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or 82 * -EINVAL based on type of error. 83 * 84 * Locking: For readers, this function must be called under rcu_read_lock(). 85 * device_opp is a RCU protected pointer, which means that device_opp is valid 86 * as long as we are under RCU lock. 87 * 88 * For Writers, this function must be called with dev_opp_list_lock held. 89 */ 90struct device_opp *_find_device_opp(struct device *dev) 91{ 92 struct device_opp *dev_opp; 93 94 opp_rcu_lockdep_assert(); 95 96 if (IS_ERR_OR_NULL(dev)) { 97 pr_err("%s: Invalid parameters\n", __func__); 98 return ERR_PTR(-EINVAL); 99 } 100 101 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) 102 if (_find_list_dev(dev, dev_opp)) 103 return dev_opp; 104 105 return ERR_PTR(-ENODEV); 106} 107 108/** 109 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 110 * @opp: opp for which voltage has to be returned for 111 * 112 * Return: voltage in micro volt corresponding to the opp, else 113 * return 0 114 * 115 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 116 * protected pointer. This means that opp which could have been fetched by 117 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 118 * under RCU lock. The pointer returned by the opp_find_freq family must be 119 * used in the same section as the usage of this function with the pointer 120 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 121 * pointer. 122 */ 123unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 124{ 125 struct dev_pm_opp *tmp_opp; 126 unsigned long v = 0; 127 128 opp_rcu_lockdep_assert(); 129 130 tmp_opp = rcu_dereference(opp); 131 if (IS_ERR_OR_NULL(tmp_opp)) 132 pr_err("%s: Invalid parameters\n", __func__); 133 else 134 v = tmp_opp->u_volt; 135 136 return v; 137} 138EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 139 140/** 141 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp 142 * @opp: opp for which frequency has to be returned for 143 * 144 * Return: frequency in hertz corresponding to the opp, else 145 * return 0 146 * 147 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 148 * protected pointer. This means that opp which could have been fetched by 149 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 150 * under RCU lock. The pointer returned by the opp_find_freq family must be 151 * used in the same section as the usage of this function with the pointer 152 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 153 * pointer. 154 */ 155unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) 156{ 157 struct dev_pm_opp *tmp_opp; 158 unsigned long f = 0; 159 160 opp_rcu_lockdep_assert(); 161 162 tmp_opp = rcu_dereference(opp); 163 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) 164 pr_err("%s: Invalid parameters\n", __func__); 165 else 166 f = tmp_opp->rate; 167 168 return f; 169} 170EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 171 172/** 173 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 174 * @opp: opp for which turbo mode is being verified 175 * 176 * Turbo OPPs are not for normal use, and can be enabled (under certain 177 * conditions) for short duration of times to finish high throughput work 178 * quickly. Running on them for longer times may overheat the chip. 179 * 180 * Return: true if opp is turbo opp, else false. 181 * 182 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 183 * protected pointer. This means that opp which could have been fetched by 184 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 185 * under RCU lock. The pointer returned by the opp_find_freq family must be 186 * used in the same section as the usage of this function with the pointer 187 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 188 * pointer. 189 */ 190bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 191{ 192 struct dev_pm_opp *tmp_opp; 193 194 opp_rcu_lockdep_assert(); 195 196 tmp_opp = rcu_dereference(opp); 197 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { 198 pr_err("%s: Invalid parameters\n", __func__); 199 return false; 200 } 201 202 return tmp_opp->turbo; 203} 204EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 205 206/** 207 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 208 * @dev: device for which we do this operation 209 * 210 * Return: This function returns the max clock latency in nanoseconds. 211 * 212 * Locking: This function takes rcu_read_lock(). 213 */ 214unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 215{ 216 struct device_opp *dev_opp; 217 unsigned long clock_latency_ns; 218 219 rcu_read_lock(); 220 221 dev_opp = _find_device_opp(dev); 222 if (IS_ERR(dev_opp)) 223 clock_latency_ns = 0; 224 else 225 clock_latency_ns = dev_opp->clock_latency_ns_max; 226 227 rcu_read_unlock(); 228 return clock_latency_ns; 229} 230EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 231 232/** 233 * dev_pm_opp_get_suspend_opp() - Get suspend opp 234 * @dev: device for which we do this operation 235 * 236 * Return: This function returns pointer to the suspend opp if it is 237 * defined and available, otherwise it returns NULL. 238 * 239 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 240 * protected pointer. The reason for the same is that the opp pointer which is 241 * returned will remain valid for use with opp_get_{voltage, freq} only while 242 * under the locked area. The pointer returned must be used prior to unlocking 243 * with rcu_read_unlock() to maintain the integrity of the pointer. 244 */ 245struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) 246{ 247 struct device_opp *dev_opp; 248 249 opp_rcu_lockdep_assert(); 250 251 dev_opp = _find_device_opp(dev); 252 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || 253 !dev_opp->suspend_opp->available) 254 return NULL; 255 256 return dev_opp->suspend_opp; 257} 258EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); 259 260/** 261 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 262 * @dev: device for which we do this operation 263 * 264 * Return: This function returns the number of available opps if there are any, 265 * else returns 0 if none or the corresponding error value. 266 * 267 * Locking: This function takes rcu_read_lock(). 268 */ 269int dev_pm_opp_get_opp_count(struct device *dev) 270{ 271 struct device_opp *dev_opp; 272 struct dev_pm_opp *temp_opp; 273 int count = 0; 274 275 rcu_read_lock(); 276 277 dev_opp = _find_device_opp(dev); 278 if (IS_ERR(dev_opp)) { 279 count = PTR_ERR(dev_opp); 280 dev_err(dev, "%s: device OPP not found (%d)\n", 281 __func__, count); 282 goto out_unlock; 283 } 284 285 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 286 if (temp_opp->available) 287 count++; 288 } 289 290out_unlock: 291 rcu_read_unlock(); 292 return count; 293} 294EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 295 296/** 297 * dev_pm_opp_find_freq_exact() - search for an exact frequency 298 * @dev: device for which we do this operation 299 * @freq: frequency to search for 300 * @available: true/false - match for available opp 301 * 302 * Return: Searches for exact match in the opp list and returns pointer to the 303 * matching opp if found, else returns ERR_PTR in case of error and should 304 * be handled using IS_ERR. Error return values can be: 305 * EINVAL: for bad pointer 306 * ERANGE: no match found for search 307 * ENODEV: if device not found in list of registered devices 308 * 309 * Note: available is a modifier for the search. if available=true, then the 310 * match is for exact matching frequency and is available in the stored OPP 311 * table. if false, the match is for exact frequency which is not available. 312 * 313 * This provides a mechanism to enable an opp which is not available currently 314 * or the opposite as well. 315 * 316 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 317 * protected pointer. The reason for the same is that the opp pointer which is 318 * returned will remain valid for use with opp_get_{voltage, freq} only while 319 * under the locked area. The pointer returned must be used prior to unlocking 320 * with rcu_read_unlock() to maintain the integrity of the pointer. 321 */ 322struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 323 unsigned long freq, 324 bool available) 325{ 326 struct device_opp *dev_opp; 327 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 328 329 opp_rcu_lockdep_assert(); 330 331 dev_opp = _find_device_opp(dev); 332 if (IS_ERR(dev_opp)) { 333 int r = PTR_ERR(dev_opp); 334 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 335 return ERR_PTR(r); 336 } 337 338 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 339 if (temp_opp->available == available && 340 temp_opp->rate == freq) { 341 opp = temp_opp; 342 break; 343 } 344 } 345 346 return opp; 347} 348EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 349 350/** 351 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 352 * @dev: device for which we do this operation 353 * @freq: Start frequency 354 * 355 * Search for the matching ceil *available* OPP from a starting freq 356 * for a device. 357 * 358 * Return: matching *opp and refreshes *freq accordingly, else returns 359 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 360 * values can be: 361 * EINVAL: for bad pointer 362 * ERANGE: no match found for search 363 * ENODEV: if device not found in list of registered devices 364 * 365 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 366 * protected pointer. The reason for the same is that the opp pointer which is 367 * returned will remain valid for use with opp_get_{voltage, freq} only while 368 * under the locked area. The pointer returned must be used prior to unlocking 369 * with rcu_read_unlock() to maintain the integrity of the pointer. 370 */ 371struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 372 unsigned long *freq) 373{ 374 struct device_opp *dev_opp; 375 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 376 377 opp_rcu_lockdep_assert(); 378 379 if (!dev || !freq) { 380 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 381 return ERR_PTR(-EINVAL); 382 } 383 384 dev_opp = _find_device_opp(dev); 385 if (IS_ERR(dev_opp)) 386 return ERR_CAST(dev_opp); 387 388 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 389 if (temp_opp->available && temp_opp->rate >= *freq) { 390 opp = temp_opp; 391 *freq = opp->rate; 392 break; 393 } 394 } 395 396 return opp; 397} 398EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 399 400/** 401 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 402 * @dev: device for which we do this operation 403 * @freq: Start frequency 404 * 405 * Search for the matching floor *available* OPP from a starting freq 406 * for a device. 407 * 408 * Return: matching *opp and refreshes *freq accordingly, else returns 409 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 410 * values can be: 411 * EINVAL: for bad pointer 412 * ERANGE: no match found for search 413 * ENODEV: if device not found in list of registered devices 414 * 415 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 416 * protected pointer. The reason for the same is that the opp pointer which is 417 * returned will remain valid for use with opp_get_{voltage, freq} only while 418 * under the locked area. The pointer returned must be used prior to unlocking 419 * with rcu_read_unlock() to maintain the integrity of the pointer. 420 */ 421struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 422 unsigned long *freq) 423{ 424 struct device_opp *dev_opp; 425 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 426 427 opp_rcu_lockdep_assert(); 428 429 if (!dev || !freq) { 430 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 431 return ERR_PTR(-EINVAL); 432 } 433 434 dev_opp = _find_device_opp(dev); 435 if (IS_ERR(dev_opp)) 436 return ERR_CAST(dev_opp); 437 438 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 439 if (temp_opp->available) { 440 /* go to the next node, before choosing prev */ 441 if (temp_opp->rate > *freq) 442 break; 443 else 444 opp = temp_opp; 445 } 446 } 447 if (!IS_ERR(opp)) 448 *freq = opp->rate; 449 450 return opp; 451} 452EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 453 454/* List-dev Helpers */ 455static void _kfree_list_dev_rcu(struct rcu_head *head) 456{ 457 struct device_list_opp *list_dev; 458 459 list_dev = container_of(head, struct device_list_opp, rcu_head); 460 kfree_rcu(list_dev, rcu_head); 461} 462 463static void _remove_list_dev(struct device_list_opp *list_dev, 464 struct device_opp *dev_opp) 465{ 466 list_del(&list_dev->node); 467 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, 468 _kfree_list_dev_rcu); 469} 470 471struct device_list_opp *_add_list_dev(const struct device *dev, 472 struct device_opp *dev_opp) 473{ 474 struct device_list_opp *list_dev; 475 476 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); 477 if (!list_dev) 478 return NULL; 479 480 /* Initialize list-dev */ 481 list_dev->dev = dev; 482 list_add_rcu(&list_dev->node, &dev_opp->dev_list); 483 484 return list_dev; 485} 486 487/** 488 * _add_device_opp() - Find device OPP table or allocate a new one 489 * @dev: device for which we do this operation 490 * 491 * It tries to find an existing table first, if it couldn't find one, it 492 * allocates a new OPP table and returns that. 493 * 494 * Return: valid device_opp pointer if success, else NULL. 495 */ 496static struct device_opp *_add_device_opp(struct device *dev) 497{ 498 struct device_opp *dev_opp; 499 struct device_list_opp *list_dev; 500 501 /* Check for existing list for 'dev' first */ 502 dev_opp = _find_device_opp(dev); 503 if (!IS_ERR(dev_opp)) 504 return dev_opp; 505 506 /* 507 * Allocate a new device OPP table. In the infrequent case where a new 508 * device is needed to be added, we pay this penalty. 509 */ 510 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); 511 if (!dev_opp) 512 return NULL; 513 514 INIT_LIST_HEAD(&dev_opp->dev_list); 515 516 list_dev = _add_list_dev(dev, dev_opp); 517 if (!list_dev) { 518 kfree(dev_opp); 519 return NULL; 520 } 521 522 srcu_init_notifier_head(&dev_opp->srcu_head); 523 INIT_LIST_HEAD(&dev_opp->opp_list); 524 525 /* Secure the device list modification */ 526 list_add_rcu(&dev_opp->node, &dev_opp_list); 527 return dev_opp; 528} 529 530/** 531 * _kfree_device_rcu() - Free device_opp RCU handler 532 * @head: RCU head 533 */ 534static void _kfree_device_rcu(struct rcu_head *head) 535{ 536 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); 537 538 kfree_rcu(device_opp, rcu_head); 539} 540 541/** 542 * _remove_device_opp() - Removes a device OPP table 543 * @dev_opp: device OPP table to be removed. 544 * 545 * Removes/frees device OPP table it it doesn't contain any OPPs. 546 */ 547static void _remove_device_opp(struct device_opp *dev_opp) 548{ 549 struct device_list_opp *list_dev; 550 551 if (!list_empty(&dev_opp->opp_list)) 552 return; 553 554 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, 555 node); 556 557 _remove_list_dev(list_dev, dev_opp); 558 559 /* dev_list must be empty now */ 560 WARN_ON(!list_empty(&dev_opp->dev_list)); 561 562 list_del_rcu(&dev_opp->node); 563 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, 564 _kfree_device_rcu); 565} 566 567/** 568 * _kfree_opp_rcu() - Free OPP RCU handler 569 * @head: RCU head 570 */ 571static void _kfree_opp_rcu(struct rcu_head *head) 572{ 573 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); 574 575 kfree_rcu(opp, rcu_head); 576} 577 578/** 579 * _opp_remove() - Remove an OPP from a table definition 580 * @dev_opp: points back to the device_opp struct this opp belongs to 581 * @opp: pointer to the OPP to remove 582 * @notify: OPP_EVENT_REMOVE notification should be sent or not 583 * 584 * This function removes an opp definition from the opp list. 585 * 586 * Locking: The internal device_opp and opp structures are RCU protected. 587 * It is assumed that the caller holds required mutex for an RCU updater 588 * strategy. 589 */ 590static void _opp_remove(struct device_opp *dev_opp, 591 struct dev_pm_opp *opp, bool notify) 592{ 593 /* 594 * Notify the changes in the availability of the operable 595 * frequency/voltage list. 596 */ 597 if (notify) 598 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); 599 list_del_rcu(&opp->node); 600 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); 601 602 _remove_device_opp(dev_opp); 603} 604 605/** 606 * dev_pm_opp_remove() - Remove an OPP from OPP list 607 * @dev: device for which we do this operation 608 * @freq: OPP to remove with matching 'freq' 609 * 610 * This function removes an opp from the opp list. 611 * 612 * Locking: The internal device_opp and opp structures are RCU protected. 613 * Hence this function internally uses RCU updater strategy with mutex locks 614 * to keep the integrity of the internal data structures. Callers should ensure 615 * that this function is *NOT* called under RCU protection or in contexts where 616 * mutex cannot be locked. 617 */ 618void dev_pm_opp_remove(struct device *dev, unsigned long freq) 619{ 620 struct dev_pm_opp *opp; 621 struct device_opp *dev_opp; 622 bool found = false; 623 624 /* Hold our list modification lock here */ 625 mutex_lock(&dev_opp_list_lock); 626 627 dev_opp = _find_device_opp(dev); 628 if (IS_ERR(dev_opp)) 629 goto unlock; 630 631 list_for_each_entry(opp, &dev_opp->opp_list, node) { 632 if (opp->rate == freq) { 633 found = true; 634 break; 635 } 636 } 637 638 if (!found) { 639 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 640 __func__, freq); 641 goto unlock; 642 } 643 644 _opp_remove(dev_opp, opp, true); 645unlock: 646 mutex_unlock(&dev_opp_list_lock); 647} 648EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 649 650static struct dev_pm_opp *_allocate_opp(struct device *dev, 651 struct device_opp **dev_opp) 652{ 653 struct dev_pm_opp *opp; 654 655 /* allocate new OPP node */ 656 opp = kzalloc(sizeof(*opp), GFP_KERNEL); 657 if (!opp) 658 return NULL; 659 660 INIT_LIST_HEAD(&opp->node); 661 662 *dev_opp = _add_device_opp(dev); 663 if (!*dev_opp) { 664 kfree(opp); 665 return NULL; 666 } 667 668 return opp; 669} 670 671static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 672 struct device_opp *dev_opp) 673{ 674 struct dev_pm_opp *opp; 675 struct list_head *head = &dev_opp->opp_list; 676 677 /* 678 * Insert new OPP in order of increasing frequency and discard if 679 * already present. 680 * 681 * Need to use &dev_opp->opp_list in the condition part of the 'for' 682 * loop, don't replace it with head otherwise it will become an infinite 683 * loop. 684 */ 685 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { 686 if (new_opp->rate > opp->rate) { 687 head = &opp->node; 688 continue; 689 } 690 691 if (new_opp->rate < opp->rate) 692 break; 693 694 /* Duplicate OPPs */ 695 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 696 __func__, opp->rate, opp->u_volt, opp->available, 697 new_opp->rate, new_opp->u_volt, new_opp->available); 698 699 return opp->available && new_opp->u_volt == opp->u_volt ? 700 0 : -EEXIST; 701 } 702 703 new_opp->dev_opp = dev_opp; 704 list_add_rcu(&new_opp->node, head); 705 706 return 0; 707} 708 709/** 710 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 711 * @dev: device for which we do this operation 712 * @freq: Frequency in Hz for this OPP 713 * @u_volt: Voltage in uVolts for this OPP 714 * @dynamic: Dynamically added OPPs. 715 * 716 * This function adds an opp definition to the opp list and returns status. 717 * The opp is made available by default and it can be controlled using 718 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 719 * 720 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 721 * and freed by dev_pm_opp_of_remove_table. 722 * 723 * Locking: The internal device_opp and opp structures are RCU protected. 724 * Hence this function internally uses RCU updater strategy with mutex locks 725 * to keep the integrity of the internal data structures. Callers should ensure 726 * that this function is *NOT* called under RCU protection or in contexts where 727 * mutex cannot be locked. 728 * 729 * Return: 730 * 0 On success OR 731 * Duplicate OPPs (both freq and volt are same) and opp->available 732 * -EEXIST Freq are same and volt are different OR 733 * Duplicate OPPs (both freq and volt are same) and !opp->available 734 * -ENOMEM Memory allocation failure 735 */ 736static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, 737 bool dynamic) 738{ 739 struct device_opp *dev_opp; 740 struct dev_pm_opp *new_opp; 741 int ret; 742 743 /* Hold our list modification lock here */ 744 mutex_lock(&dev_opp_list_lock); 745 746 new_opp = _allocate_opp(dev, &dev_opp); 747 if (!new_opp) { 748 ret = -ENOMEM; 749 goto unlock; 750 } 751 752 /* populate the opp table */ 753 new_opp->rate = freq; 754 new_opp->u_volt = u_volt; 755 new_opp->available = true; 756 new_opp->dynamic = dynamic; 757 758 ret = _opp_add(dev, new_opp, dev_opp); 759 if (ret) 760 goto free_opp; 761 762 mutex_unlock(&dev_opp_list_lock); 763 764 /* 765 * Notify the changes in the availability of the operable 766 * frequency/voltage list. 767 */ 768 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); 769 return 0; 770 771free_opp: 772 _opp_remove(dev_opp, new_opp, false); 773unlock: 774 mutex_unlock(&dev_opp_list_lock); 775 return ret; 776} 777 778/* TODO: Support multiple regulators */ 779static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) 780{ 781 u32 microvolt[3] = {0}; 782 u32 val; 783 int count, ret; 784 785 /* Missing property isn't a problem, but an invalid entry is */ 786 if (!of_find_property(opp->np, "opp-microvolt", NULL)) 787 return 0; 788 789 count = of_property_count_u32_elems(opp->np, "opp-microvolt"); 790 if (count < 0) { 791 dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", 792 __func__, count); 793 return count; 794 } 795 796 /* There can be one or three elements here */ 797 if (count != 1 && count != 3) { 798 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", 799 __func__, count); 800 return -EINVAL; 801 } 802 803 ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, 804 count); 805 if (ret) { 806 dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, 807 ret); 808 return -EINVAL; 809 } 810 811 opp->u_volt = microvolt[0]; 812 opp->u_volt_min = microvolt[1]; 813 opp->u_volt_max = microvolt[2]; 814 815 if (!of_property_read_u32(opp->np, "opp-microamp", &val)) 816 opp->u_amp = val; 817 818 return 0; 819} 820 821/** 822 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) 823 * @dev: device for which we do this operation 824 * @np: device node 825 * 826 * This function adds an opp definition to the opp list and returns status. The 827 * opp can be controlled using dev_pm_opp_enable/disable functions and may be 828 * removed by dev_pm_opp_remove. 829 * 830 * Locking: The internal device_opp and opp structures are RCU protected. 831 * Hence this function internally uses RCU updater strategy with mutex locks 832 * to keep the integrity of the internal data structures. Callers should ensure 833 * that this function is *NOT* called under RCU protection or in contexts where 834 * mutex cannot be locked. 835 * 836 * Return: 837 * 0 On success OR 838 * Duplicate OPPs (both freq and volt are same) and opp->available 839 * -EEXIST Freq are same and volt are different OR 840 * Duplicate OPPs (both freq and volt are same) and !opp->available 841 * -ENOMEM Memory allocation failure 842 * -EINVAL Failed parsing the OPP node 843 */ 844static int _opp_add_static_v2(struct device *dev, struct device_node *np) 845{ 846 struct device_opp *dev_opp; 847 struct dev_pm_opp *new_opp; 848 u64 rate; 849 u32 val; 850 int ret; 851 852 /* Hold our list modification lock here */ 853 mutex_lock(&dev_opp_list_lock); 854 855 new_opp = _allocate_opp(dev, &dev_opp); 856 if (!new_opp) { 857 ret = -ENOMEM; 858 goto unlock; 859 } 860 861 ret = of_property_read_u64(np, "opp-hz", &rate); 862 if (ret < 0) { 863 dev_err(dev, "%s: opp-hz not found\n", __func__); 864 goto free_opp; 865 } 866 867 /* 868 * Rate is defined as an unsigned long in clk API, and so casting 869 * explicitly to its type. Must be fixed once rate is 64 bit 870 * guaranteed in clk API. 871 */ 872 new_opp->rate = (unsigned long)rate; 873 new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 874 875 new_opp->np = np; 876 new_opp->dynamic = false; 877 new_opp->available = true; 878 879 if (!of_property_read_u32(np, "clock-latency-ns", &val)) 880 new_opp->clock_latency_ns = val; 881 882 ret = opp_parse_supplies(new_opp, dev); 883 if (ret) 884 goto free_opp; 885 886 ret = _opp_add(dev, new_opp, dev_opp); 887 if (ret) 888 goto free_opp; 889 890 /* OPP to select on device suspend */ 891 if (of_property_read_bool(np, "opp-suspend")) { 892 if (dev_opp->suspend_opp) 893 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", 894 __func__, dev_opp->suspend_opp->rate, 895 new_opp->rate); 896 else 897 dev_opp->suspend_opp = new_opp; 898 } 899 900 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) 901 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; 902 903 mutex_unlock(&dev_opp_list_lock); 904 905 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", 906 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, 907 new_opp->u_volt_min, new_opp->u_volt_max, 908 new_opp->clock_latency_ns); 909 910 /* 911 * Notify the changes in the availability of the operable 912 * frequency/voltage list. 913 */ 914 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); 915 return 0; 916 917free_opp: 918 _opp_remove(dev_opp, new_opp, false); 919unlock: 920 mutex_unlock(&dev_opp_list_lock); 921 return ret; 922} 923 924/** 925 * dev_pm_opp_add() - Add an OPP table from a table definitions 926 * @dev: device for which we do this operation 927 * @freq: Frequency in Hz for this OPP 928 * @u_volt: Voltage in uVolts for this OPP 929 * 930 * This function adds an opp definition to the opp list and returns status. 931 * The opp is made available by default and it can be controlled using 932 * dev_pm_opp_enable/disable functions. 933 * 934 * Locking: The internal device_opp and opp structures are RCU protected. 935 * Hence this function internally uses RCU updater strategy with mutex locks 936 * to keep the integrity of the internal data structures. Callers should ensure 937 * that this function is *NOT* called under RCU protection or in contexts where 938 * mutex cannot be locked. 939 * 940 * Return: 941 * 0 On success OR 942 * Duplicate OPPs (both freq and volt are same) and opp->available 943 * -EEXIST Freq are same and volt are different OR 944 * Duplicate OPPs (both freq and volt are same) and !opp->available 945 * -ENOMEM Memory allocation failure 946 */ 947int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 948{ 949 return _opp_add_v1(dev, freq, u_volt, true); 950} 951EXPORT_SYMBOL_GPL(dev_pm_opp_add); 952 953/** 954 * _opp_set_availability() - helper to set the availability of an opp 955 * @dev: device for which we do this operation 956 * @freq: OPP frequency to modify availability 957 * @availability_req: availability status requested for this opp 958 * 959 * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 960 * share a common logic which is isolated here. 961 * 962 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 963 * copy operation, returns 0 if no modification was done OR modification was 964 * successful. 965 * 966 * Locking: The internal device_opp and opp structures are RCU protected. 967 * Hence this function internally uses RCU updater strategy with mutex locks to 968 * keep the integrity of the internal data structures. Callers should ensure 969 * that this function is *NOT* called under RCU protection or in contexts where 970 * mutex locking or synchronize_rcu() blocking calls cannot be used. 971 */ 972static int _opp_set_availability(struct device *dev, unsigned long freq, 973 bool availability_req) 974{ 975 struct device_opp *dev_opp; 976 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 977 int r = 0; 978 979 /* keep the node allocated */ 980 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); 981 if (!new_opp) 982 return -ENOMEM; 983 984 mutex_lock(&dev_opp_list_lock); 985 986 /* Find the device_opp */ 987 dev_opp = _find_device_opp(dev); 988 if (IS_ERR(dev_opp)) { 989 r = PTR_ERR(dev_opp); 990 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 991 goto unlock; 992 } 993 994 /* Do we have the frequency? */ 995 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { 996 if (tmp_opp->rate == freq) { 997 opp = tmp_opp; 998 break; 999 } 1000 } 1001 if (IS_ERR(opp)) { 1002 r = PTR_ERR(opp); 1003 goto unlock; 1004 } 1005 1006 /* Is update really needed? */ 1007 if (opp->available == availability_req) 1008 goto unlock; 1009 /* copy the old data over */ 1010 *new_opp = *opp; 1011 1012 /* plug in new node */ 1013 new_opp->available = availability_req; 1014 1015 list_replace_rcu(&opp->node, &new_opp->node); 1016 mutex_unlock(&dev_opp_list_lock); 1017 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); 1018 1019 /* Notify the change of the OPP availability */ 1020 if (availability_req) 1021 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE, 1022 new_opp); 1023 else 1024 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE, 1025 new_opp); 1026 1027 return 0; 1028 1029unlock: 1030 mutex_unlock(&dev_opp_list_lock); 1031 kfree(new_opp); 1032 return r; 1033} 1034 1035/** 1036 * dev_pm_opp_enable() - Enable a specific OPP 1037 * @dev: device for which we do this operation 1038 * @freq: OPP frequency to enable 1039 * 1040 * Enables a provided opp. If the operation is valid, this returns 0, else the 1041 * corresponding error value. It is meant to be used for users an OPP available 1042 * after being temporarily made unavailable with dev_pm_opp_disable. 1043 * 1044 * Locking: The internal device_opp and opp structures are RCU protected. 1045 * Hence this function indirectly uses RCU and mutex locks to keep the 1046 * integrity of the internal data structures. Callers should ensure that 1047 * this function is *NOT* called under RCU protection or in contexts where 1048 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1049 * 1050 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1051 * copy operation, returns 0 if no modification was done OR modification was 1052 * successful. 1053 */ 1054int dev_pm_opp_enable(struct device *dev, unsigned long freq) 1055{ 1056 return _opp_set_availability(dev, freq, true); 1057} 1058EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 1059 1060/** 1061 * dev_pm_opp_disable() - Disable a specific OPP 1062 * @dev: device for which we do this operation 1063 * @freq: OPP frequency to disable 1064 * 1065 * Disables a provided opp. If the operation is valid, this returns 1066 * 0, else the corresponding error value. It is meant to be a temporary 1067 * control by users to make this OPP not available until the circumstances are 1068 * right to make it available again (with a call to dev_pm_opp_enable). 1069 * 1070 * Locking: The internal device_opp and opp structures are RCU protected. 1071 * Hence this function indirectly uses RCU and mutex locks to keep the 1072 * integrity of the internal data structures. Callers should ensure that 1073 * this function is *NOT* called under RCU protection or in contexts where 1074 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1075 * 1076 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1077 * copy operation, returns 0 if no modification was done OR modification was 1078 * successful. 1079 */ 1080int dev_pm_opp_disable(struct device *dev, unsigned long freq) 1081{ 1082 return _opp_set_availability(dev, freq, false); 1083} 1084EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 1085 1086/** 1087 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp 1088 * @dev: device pointer used to lookup device OPPs. 1089 * 1090 * Return: pointer to notifier head if found, otherwise -ENODEV or 1091 * -EINVAL based on type of error casted as pointer. value must be checked 1092 * with IS_ERR to determine valid pointer or error result. 1093 * 1094 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU 1095 * protected pointer. The reason for the same is that the opp pointer which is 1096 * returned will remain valid for use with opp_get_{voltage, freq} only while 1097 * under the locked area. The pointer returned must be used prior to unlocking 1098 * with rcu_read_unlock() to maintain the integrity of the pointer. 1099 */ 1100struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) 1101{ 1102 struct device_opp *dev_opp = _find_device_opp(dev); 1103 1104 if (IS_ERR(dev_opp)) 1105 return ERR_CAST(dev_opp); /* matching type */ 1106 1107 return &dev_opp->srcu_head; 1108} 1109EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); 1110 1111#ifdef CONFIG_OF 1112/** 1113 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 1114 * entries 1115 * @dev: device pointer used to lookup device OPPs. 1116 * 1117 * Free OPPs created using static entries present in DT. 1118 * 1119 * Locking: The internal device_opp and opp structures are RCU protected. 1120 * Hence this function indirectly uses RCU updater strategy with mutex locks 1121 * to keep the integrity of the internal data structures. Callers should ensure 1122 * that this function is *NOT* called under RCU protection or in contexts where 1123 * mutex cannot be locked. 1124 */ 1125void dev_pm_opp_of_remove_table(struct device *dev) 1126{ 1127 struct device_opp *dev_opp; 1128 struct dev_pm_opp *opp, *tmp; 1129 1130 /* Hold our list modification lock here */ 1131 mutex_lock(&dev_opp_list_lock); 1132 1133 /* Check for existing list for 'dev' */ 1134 dev_opp = _find_device_opp(dev); 1135 if (IS_ERR(dev_opp)) { 1136 int error = PTR_ERR(dev_opp); 1137 1138 if (error != -ENODEV) 1139 WARN(1, "%s: dev_opp: %d\n", 1140 IS_ERR_OR_NULL(dev) ? 1141 "Invalid device" : dev_name(dev), 1142 error); 1143 goto unlock; 1144 } 1145 1146 /* Find if dev_opp manages a single device */ 1147 if (list_is_singular(&dev_opp->dev_list)) { 1148 /* Free static OPPs */ 1149 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { 1150 if (!opp->dynamic) 1151 _opp_remove(dev_opp, opp, true); 1152 } 1153 } else { 1154 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); 1155 } 1156 1157unlock: 1158 mutex_unlock(&dev_opp_list_lock); 1159} 1160EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); 1161 1162/* Returns opp descriptor node for a device, caller must do of_node_put() */ 1163struct device_node *_of_get_opp_desc_node(struct device *dev) 1164{ 1165 /* 1166 * TODO: Support for multiple OPP tables. 1167 * 1168 * There should be only ONE phandle present in "operating-points-v2" 1169 * property. 1170 */ 1171 1172 return of_parse_phandle(dev->of_node, "operating-points-v2", 0); 1173} 1174 1175/* Initializes OPP tables based on new bindings */ 1176static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) 1177{ 1178 struct device_node *np; 1179 struct device_opp *dev_opp; 1180 int ret = 0, count = 0; 1181 1182 mutex_lock(&dev_opp_list_lock); 1183 1184 dev_opp = _managed_opp(opp_np); 1185 if (dev_opp) { 1186 /* OPPs are already managed */ 1187 if (!_add_list_dev(dev, dev_opp)) 1188 ret = -ENOMEM; 1189 mutex_unlock(&dev_opp_list_lock); 1190 return ret; 1191 } 1192 mutex_unlock(&dev_opp_list_lock); 1193 1194 /* We have opp-list node now, iterate over it and add OPPs */ 1195 for_each_available_child_of_node(opp_np, np) { 1196 count++; 1197 1198 ret = _opp_add_static_v2(dev, np); 1199 if (ret) { 1200 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, 1201 ret); 1202 goto free_table; 1203 } 1204 } 1205 1206 /* There should be one of more OPP defined */ 1207 if (WARN_ON(!count)) 1208 return -ENOENT; 1209 1210 mutex_lock(&dev_opp_list_lock); 1211 1212 dev_opp = _find_device_opp(dev); 1213 if (WARN_ON(IS_ERR(dev_opp))) { 1214 ret = PTR_ERR(dev_opp); 1215 mutex_unlock(&dev_opp_list_lock); 1216 goto free_table; 1217 } 1218 1219 dev_opp->np = opp_np; 1220 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 1221 1222 mutex_unlock(&dev_opp_list_lock); 1223 1224 return 0; 1225 1226free_table: 1227 dev_pm_opp_of_remove_table(dev); 1228 1229 return ret; 1230} 1231 1232/* Initializes OPP tables based on old-deprecated bindings */ 1233static int _of_add_opp_table_v1(struct device *dev) 1234{ 1235 const struct property *prop; 1236 const __be32 *val; 1237 int nr; 1238 1239 prop = of_find_property(dev->of_node, "operating-points", NULL); 1240 if (!prop) 1241 return -ENODEV; 1242 if (!prop->value) 1243 return -ENODATA; 1244 1245 /* 1246 * Each OPP is a set of tuples consisting of frequency and 1247 * voltage like <freq-kHz vol-uV>. 1248 */ 1249 nr = prop->length / sizeof(u32); 1250 if (nr % 2) { 1251 dev_err(dev, "%s: Invalid OPP list\n", __func__); 1252 return -EINVAL; 1253 } 1254 1255 val = prop->value; 1256 while (nr) { 1257 unsigned long freq = be32_to_cpup(val++) * 1000; 1258 unsigned long volt = be32_to_cpup(val++); 1259 1260 if (_opp_add_v1(dev, freq, volt, false)) 1261 dev_warn(dev, "%s: Failed to add OPP %ld\n", 1262 __func__, freq); 1263 nr -= 2; 1264 } 1265 1266 return 0; 1267} 1268 1269/** 1270 * dev_pm_opp_of_add_table() - Initialize opp table from device tree 1271 * @dev: device pointer used to lookup device OPPs. 1272 * 1273 * Register the initial OPP table with the OPP library for given device. 1274 * 1275 * Locking: The internal device_opp and opp structures are RCU protected. 1276 * Hence this function indirectly uses RCU updater strategy with mutex locks 1277 * to keep the integrity of the internal data structures. Callers should ensure 1278 * that this function is *NOT* called under RCU protection or in contexts where 1279 * mutex cannot be locked. 1280 * 1281 * Return: 1282 * 0 On success OR 1283 * Duplicate OPPs (both freq and volt are same) and opp->available 1284 * -EEXIST Freq are same and volt are different OR 1285 * Duplicate OPPs (both freq and volt are same) and !opp->available 1286 * -ENOMEM Memory allocation failure 1287 * -ENODEV when 'operating-points' property is not found or is invalid data 1288 * in device node. 1289 * -ENODATA when empty 'operating-points' property is found 1290 * -EINVAL when invalid entries are found in opp-v2 table 1291 */ 1292int dev_pm_opp_of_add_table(struct device *dev) 1293{ 1294 struct device_node *opp_np; 1295 int ret; 1296 1297 /* 1298 * OPPs have two version of bindings now. The older one is deprecated, 1299 * try for the new binding first. 1300 */ 1301 opp_np = _of_get_opp_desc_node(dev); 1302 if (!opp_np) { 1303 /* 1304 * Try old-deprecated bindings for backward compatibility with 1305 * older dtbs. 1306 */ 1307 return _of_add_opp_table_v1(dev); 1308 } 1309 1310 ret = _of_add_opp_table_v2(dev, opp_np); 1311 of_node_put(opp_np); 1312 1313 return ret; 1314} 1315EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 1316#endif