Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.5-rc1 1618 lines 47 kB view raw
1/* 2 * Generic OPP Interface 3 * 4 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 5 * Nishanth Menon 6 * Romit Dasgupta 7 * Kevin Hilman 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16#include <linux/errno.h> 17#include <linux/err.h> 18#include <linux/slab.h> 19#include <linux/device.h> 20#include <linux/of.h> 21#include <linux/export.h> 22 23#include "opp.h" 24 25/* 26 * The root of the list of all devices. All device_opp structures branch off 27 * from here, with each device_opp containing the list of opp it supports in 28 * various states of availability. 29 */ 30static LIST_HEAD(dev_opp_list); 31/* Lock to allow exclusive modification to the device and opp lists */ 32DEFINE_MUTEX(dev_opp_list_lock); 33 34#define opp_rcu_lockdep_assert() \ 35do { \ 36 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 37 !lockdep_is_held(&dev_opp_list_lock), \ 38 "Missing rcu_read_lock() or " \ 39 "dev_opp_list_lock protection"); \ 40} while (0) 41 42static struct device_list_opp *_find_list_dev(const struct device *dev, 43 struct device_opp *dev_opp) 44{ 45 struct device_list_opp *list_dev; 46 47 list_for_each_entry(list_dev, &dev_opp->dev_list, node) 48 if (list_dev->dev == dev) 49 return list_dev; 50 51 return NULL; 52} 53 54static struct device_opp *_managed_opp(const struct device_node *np) 55{ 56 struct device_opp *dev_opp; 57 58 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { 59 if (dev_opp->np == np) { 60 /* 61 * Multiple devices can point to the same OPP table and 62 * so will have same node-pointer, np. 63 * 64 * But the OPPs will be considered as shared only if the 65 * OPP table contains a "opp-shared" property. 66 */ 67 return dev_opp->shared_opp ? dev_opp : NULL; 68 } 69 } 70 71 return NULL; 72} 73 74/** 75 * _find_device_opp() - find device_opp struct using device pointer 76 * @dev: device pointer used to lookup device OPPs 77 * 78 * Search list of device OPPs for one containing matching device. Does a RCU 79 * reader operation to grab the pointer needed. 80 * 81 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or 82 * -EINVAL based on type of error. 83 * 84 * Locking: For readers, this function must be called under rcu_read_lock(). 85 * device_opp is a RCU protected pointer, which means that device_opp is valid 86 * as long as we are under RCU lock. 87 * 88 * For Writers, this function must be called with dev_opp_list_lock held. 89 */ 90struct device_opp *_find_device_opp(struct device *dev) 91{ 92 struct device_opp *dev_opp; 93 94 opp_rcu_lockdep_assert(); 95 96 if (IS_ERR_OR_NULL(dev)) { 97 pr_err("%s: Invalid parameters\n", __func__); 98 return ERR_PTR(-EINVAL); 99 } 100 101 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) 102 if (_find_list_dev(dev, dev_opp)) 103 return dev_opp; 104 105 return ERR_PTR(-ENODEV); 106} 107 108/** 109 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 110 * @opp: opp for which voltage has to be returned for 111 * 112 * Return: voltage in micro volt corresponding to the opp, else 113 * return 0 114 * 115 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 116 * protected pointer. This means that opp which could have been fetched by 117 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 118 * under RCU lock. The pointer returned by the opp_find_freq family must be 119 * used in the same section as the usage of this function with the pointer 120 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 121 * pointer. 122 */ 123unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 124{ 125 struct dev_pm_opp *tmp_opp; 126 unsigned long v = 0; 127 128 opp_rcu_lockdep_assert(); 129 130 tmp_opp = rcu_dereference(opp); 131 if (IS_ERR_OR_NULL(tmp_opp)) 132 pr_err("%s: Invalid parameters\n", __func__); 133 else 134 v = tmp_opp->u_volt; 135 136 return v; 137} 138EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 139 140/** 141 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp 142 * @opp: opp for which frequency has to be returned for 143 * 144 * Return: frequency in hertz corresponding to the opp, else 145 * return 0 146 * 147 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 148 * protected pointer. This means that opp which could have been fetched by 149 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 150 * under RCU lock. The pointer returned by the opp_find_freq family must be 151 * used in the same section as the usage of this function with the pointer 152 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 153 * pointer. 154 */ 155unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) 156{ 157 struct dev_pm_opp *tmp_opp; 158 unsigned long f = 0; 159 160 opp_rcu_lockdep_assert(); 161 162 tmp_opp = rcu_dereference(opp); 163 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) 164 pr_err("%s: Invalid parameters\n", __func__); 165 else 166 f = tmp_opp->rate; 167 168 return f; 169} 170EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 171 172/** 173 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 174 * @opp: opp for which turbo mode is being verified 175 * 176 * Turbo OPPs are not for normal use, and can be enabled (under certain 177 * conditions) for short duration of times to finish high throughput work 178 * quickly. Running on them for longer times may overheat the chip. 179 * 180 * Return: true if opp is turbo opp, else false. 181 * 182 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 183 * protected pointer. This means that opp which could have been fetched by 184 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 185 * under RCU lock. The pointer returned by the opp_find_freq family must be 186 * used in the same section as the usage of this function with the pointer 187 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 188 * pointer. 189 */ 190bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 191{ 192 struct dev_pm_opp *tmp_opp; 193 194 opp_rcu_lockdep_assert(); 195 196 tmp_opp = rcu_dereference(opp); 197 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { 198 pr_err("%s: Invalid parameters\n", __func__); 199 return false; 200 } 201 202 return tmp_opp->turbo; 203} 204EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 205 206/** 207 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 208 * @dev: device for which we do this operation 209 * 210 * Return: This function returns the max clock latency in nanoseconds. 211 * 212 * Locking: This function takes rcu_read_lock(). 213 */ 214unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 215{ 216 struct device_opp *dev_opp; 217 unsigned long clock_latency_ns; 218 219 rcu_read_lock(); 220 221 dev_opp = _find_device_opp(dev); 222 if (IS_ERR(dev_opp)) 223 clock_latency_ns = 0; 224 else 225 clock_latency_ns = dev_opp->clock_latency_ns_max; 226 227 rcu_read_unlock(); 228 return clock_latency_ns; 229} 230EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 231 232/** 233 * dev_pm_opp_get_suspend_opp() - Get suspend opp 234 * @dev: device for which we do this operation 235 * 236 * Return: This function returns pointer to the suspend opp if it is 237 * defined and available, otherwise it returns NULL. 238 * 239 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 240 * protected pointer. The reason for the same is that the opp pointer which is 241 * returned will remain valid for use with opp_get_{voltage, freq} only while 242 * under the locked area. The pointer returned must be used prior to unlocking 243 * with rcu_read_unlock() to maintain the integrity of the pointer. 244 */ 245struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) 246{ 247 struct device_opp *dev_opp; 248 249 opp_rcu_lockdep_assert(); 250 251 dev_opp = _find_device_opp(dev); 252 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || 253 !dev_opp->suspend_opp->available) 254 return NULL; 255 256 return dev_opp->suspend_opp; 257} 258EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); 259 260/** 261 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 262 * @dev: device for which we do this operation 263 * 264 * Return: This function returns the number of available opps if there are any, 265 * else returns 0 if none or the corresponding error value. 266 * 267 * Locking: This function takes rcu_read_lock(). 268 */ 269int dev_pm_opp_get_opp_count(struct device *dev) 270{ 271 struct device_opp *dev_opp; 272 struct dev_pm_opp *temp_opp; 273 int count = 0; 274 275 rcu_read_lock(); 276 277 dev_opp = _find_device_opp(dev); 278 if (IS_ERR(dev_opp)) { 279 count = PTR_ERR(dev_opp); 280 dev_err(dev, "%s: device OPP not found (%d)\n", 281 __func__, count); 282 goto out_unlock; 283 } 284 285 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 286 if (temp_opp->available) 287 count++; 288 } 289 290out_unlock: 291 rcu_read_unlock(); 292 return count; 293} 294EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 295 296/** 297 * dev_pm_opp_find_freq_exact() - search for an exact frequency 298 * @dev: device for which we do this operation 299 * @freq: frequency to search for 300 * @available: true/false - match for available opp 301 * 302 * Return: Searches for exact match in the opp list and returns pointer to the 303 * matching opp if found, else returns ERR_PTR in case of error and should 304 * be handled using IS_ERR. Error return values can be: 305 * EINVAL: for bad pointer 306 * ERANGE: no match found for search 307 * ENODEV: if device not found in list of registered devices 308 * 309 * Note: available is a modifier for the search. if available=true, then the 310 * match is for exact matching frequency and is available in the stored OPP 311 * table. if false, the match is for exact frequency which is not available. 312 * 313 * This provides a mechanism to enable an opp which is not available currently 314 * or the opposite as well. 315 * 316 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 317 * protected pointer. The reason for the same is that the opp pointer which is 318 * returned will remain valid for use with opp_get_{voltage, freq} only while 319 * under the locked area. The pointer returned must be used prior to unlocking 320 * with rcu_read_unlock() to maintain the integrity of the pointer. 321 */ 322struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 323 unsigned long freq, 324 bool available) 325{ 326 struct device_opp *dev_opp; 327 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 328 329 opp_rcu_lockdep_assert(); 330 331 dev_opp = _find_device_opp(dev); 332 if (IS_ERR(dev_opp)) { 333 int r = PTR_ERR(dev_opp); 334 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 335 return ERR_PTR(r); 336 } 337 338 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 339 if (temp_opp->available == available && 340 temp_opp->rate == freq) { 341 opp = temp_opp; 342 break; 343 } 344 } 345 346 return opp; 347} 348EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 349 350/** 351 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 352 * @dev: device for which we do this operation 353 * @freq: Start frequency 354 * 355 * Search for the matching ceil *available* OPP from a starting freq 356 * for a device. 357 * 358 * Return: matching *opp and refreshes *freq accordingly, else returns 359 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 360 * values can be: 361 * EINVAL: for bad pointer 362 * ERANGE: no match found for search 363 * ENODEV: if device not found in list of registered devices 364 * 365 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 366 * protected pointer. The reason for the same is that the opp pointer which is 367 * returned will remain valid for use with opp_get_{voltage, freq} only while 368 * under the locked area. The pointer returned must be used prior to unlocking 369 * with rcu_read_unlock() to maintain the integrity of the pointer. 370 */ 371struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 372 unsigned long *freq) 373{ 374 struct device_opp *dev_opp; 375 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 376 377 opp_rcu_lockdep_assert(); 378 379 if (!dev || !freq) { 380 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 381 return ERR_PTR(-EINVAL); 382 } 383 384 dev_opp = _find_device_opp(dev); 385 if (IS_ERR(dev_opp)) 386 return ERR_CAST(dev_opp); 387 388 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 389 if (temp_opp->available && temp_opp->rate >= *freq) { 390 opp = temp_opp; 391 *freq = opp->rate; 392 break; 393 } 394 } 395 396 return opp; 397} 398EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 399 400/** 401 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 402 * @dev: device for which we do this operation 403 * @freq: Start frequency 404 * 405 * Search for the matching floor *available* OPP from a starting freq 406 * for a device. 407 * 408 * Return: matching *opp and refreshes *freq accordingly, else returns 409 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 410 * values can be: 411 * EINVAL: for bad pointer 412 * ERANGE: no match found for search 413 * ENODEV: if device not found in list of registered devices 414 * 415 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 416 * protected pointer. The reason for the same is that the opp pointer which is 417 * returned will remain valid for use with opp_get_{voltage, freq} only while 418 * under the locked area. The pointer returned must be used prior to unlocking 419 * with rcu_read_unlock() to maintain the integrity of the pointer. 420 */ 421struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 422 unsigned long *freq) 423{ 424 struct device_opp *dev_opp; 425 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 426 427 opp_rcu_lockdep_assert(); 428 429 if (!dev || !freq) { 430 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 431 return ERR_PTR(-EINVAL); 432 } 433 434 dev_opp = _find_device_opp(dev); 435 if (IS_ERR(dev_opp)) 436 return ERR_CAST(dev_opp); 437 438 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 439 if (temp_opp->available) { 440 /* go to the next node, before choosing prev */ 441 if (temp_opp->rate > *freq) 442 break; 443 else 444 opp = temp_opp; 445 } 446 } 447 if (!IS_ERR(opp)) 448 *freq = opp->rate; 449 450 return opp; 451} 452EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 453 454/* List-dev Helpers */ 455static void _kfree_list_dev_rcu(struct rcu_head *head) 456{ 457 struct device_list_opp *list_dev; 458 459 list_dev = container_of(head, struct device_list_opp, rcu_head); 460 kfree_rcu(list_dev, rcu_head); 461} 462 463static void _remove_list_dev(struct device_list_opp *list_dev, 464 struct device_opp *dev_opp) 465{ 466 opp_debug_unregister(list_dev, dev_opp); 467 list_del(&list_dev->node); 468 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, 469 _kfree_list_dev_rcu); 470} 471 472struct device_list_opp *_add_list_dev(const struct device *dev, 473 struct device_opp *dev_opp) 474{ 475 struct device_list_opp *list_dev; 476 int ret; 477 478 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); 479 if (!list_dev) 480 return NULL; 481 482 /* Initialize list-dev */ 483 list_dev->dev = dev; 484 list_add_rcu(&list_dev->node, &dev_opp->dev_list); 485 486 /* Create debugfs entries for the dev_opp */ 487 ret = opp_debug_register(list_dev, dev_opp); 488 if (ret) 489 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", 490 __func__, ret); 491 492 return list_dev; 493} 494 495/** 496 * _add_device_opp() - Find device OPP table or allocate a new one 497 * @dev: device for which we do this operation 498 * 499 * It tries to find an existing table first, if it couldn't find one, it 500 * allocates a new OPP table and returns that. 501 * 502 * Return: valid device_opp pointer if success, else NULL. 503 */ 504static struct device_opp *_add_device_opp(struct device *dev) 505{ 506 struct device_opp *dev_opp; 507 struct device_list_opp *list_dev; 508 509 /* Check for existing list for 'dev' first */ 510 dev_opp = _find_device_opp(dev); 511 if (!IS_ERR(dev_opp)) 512 return dev_opp; 513 514 /* 515 * Allocate a new device OPP table. In the infrequent case where a new 516 * device is needed to be added, we pay this penalty. 517 */ 518 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); 519 if (!dev_opp) 520 return NULL; 521 522 INIT_LIST_HEAD(&dev_opp->dev_list); 523 524 list_dev = _add_list_dev(dev, dev_opp); 525 if (!list_dev) { 526 kfree(dev_opp); 527 return NULL; 528 } 529 530 srcu_init_notifier_head(&dev_opp->srcu_head); 531 INIT_LIST_HEAD(&dev_opp->opp_list); 532 533 /* Secure the device list modification */ 534 list_add_rcu(&dev_opp->node, &dev_opp_list); 535 return dev_opp; 536} 537 538/** 539 * _kfree_device_rcu() - Free device_opp RCU handler 540 * @head: RCU head 541 */ 542static void _kfree_device_rcu(struct rcu_head *head) 543{ 544 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); 545 546 kfree_rcu(device_opp, rcu_head); 547} 548 549/** 550 * _remove_device_opp() - Removes a device OPP table 551 * @dev_opp: device OPP table to be removed. 552 * 553 * Removes/frees device OPP table it it doesn't contain any OPPs. 554 */ 555static void _remove_device_opp(struct device_opp *dev_opp) 556{ 557 struct device_list_opp *list_dev; 558 559 if (!list_empty(&dev_opp->opp_list)) 560 return; 561 562 if (dev_opp->supported_hw) 563 return; 564 565 if (dev_opp->prop_name) 566 return; 567 568 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, 569 node); 570 571 _remove_list_dev(list_dev, dev_opp); 572 573 /* dev_list must be empty now */ 574 WARN_ON(!list_empty(&dev_opp->dev_list)); 575 576 list_del_rcu(&dev_opp->node); 577 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, 578 _kfree_device_rcu); 579} 580 581/** 582 * _kfree_opp_rcu() - Free OPP RCU handler 583 * @head: RCU head 584 */ 585static void _kfree_opp_rcu(struct rcu_head *head) 586{ 587 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); 588 589 kfree_rcu(opp, rcu_head); 590} 591 592/** 593 * _opp_remove() - Remove an OPP from a table definition 594 * @dev_opp: points back to the device_opp struct this opp belongs to 595 * @opp: pointer to the OPP to remove 596 * @notify: OPP_EVENT_REMOVE notification should be sent or not 597 * 598 * This function removes an opp definition from the opp list. 599 * 600 * Locking: The internal device_opp and opp structures are RCU protected. 601 * It is assumed that the caller holds required mutex for an RCU updater 602 * strategy. 603 */ 604static void _opp_remove(struct device_opp *dev_opp, 605 struct dev_pm_opp *opp, bool notify) 606{ 607 /* 608 * Notify the changes in the availability of the operable 609 * frequency/voltage list. 610 */ 611 if (notify) 612 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); 613 opp_debug_remove_one(opp); 614 list_del_rcu(&opp->node); 615 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); 616 617 _remove_device_opp(dev_opp); 618} 619 620/** 621 * dev_pm_opp_remove() - Remove an OPP from OPP list 622 * @dev: device for which we do this operation 623 * @freq: OPP to remove with matching 'freq' 624 * 625 * This function removes an opp from the opp list. 626 * 627 * Locking: The internal device_opp and opp structures are RCU protected. 628 * Hence this function internally uses RCU updater strategy with mutex locks 629 * to keep the integrity of the internal data structures. Callers should ensure 630 * that this function is *NOT* called under RCU protection or in contexts where 631 * mutex cannot be locked. 632 */ 633void dev_pm_opp_remove(struct device *dev, unsigned long freq) 634{ 635 struct dev_pm_opp *opp; 636 struct device_opp *dev_opp; 637 bool found = false; 638 639 /* Hold our list modification lock here */ 640 mutex_lock(&dev_opp_list_lock); 641 642 dev_opp = _find_device_opp(dev); 643 if (IS_ERR(dev_opp)) 644 goto unlock; 645 646 list_for_each_entry(opp, &dev_opp->opp_list, node) { 647 if (opp->rate == freq) { 648 found = true; 649 break; 650 } 651 } 652 653 if (!found) { 654 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 655 __func__, freq); 656 goto unlock; 657 } 658 659 _opp_remove(dev_opp, opp, true); 660unlock: 661 mutex_unlock(&dev_opp_list_lock); 662} 663EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 664 665static struct dev_pm_opp *_allocate_opp(struct device *dev, 666 struct device_opp **dev_opp) 667{ 668 struct dev_pm_opp *opp; 669 670 /* allocate new OPP node */ 671 opp = kzalloc(sizeof(*opp), GFP_KERNEL); 672 if (!opp) 673 return NULL; 674 675 INIT_LIST_HEAD(&opp->node); 676 677 *dev_opp = _add_device_opp(dev); 678 if (!*dev_opp) { 679 kfree(opp); 680 return NULL; 681 } 682 683 return opp; 684} 685 686static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 687 struct device_opp *dev_opp) 688{ 689 struct dev_pm_opp *opp; 690 struct list_head *head = &dev_opp->opp_list; 691 int ret; 692 693 /* 694 * Insert new OPP in order of increasing frequency and discard if 695 * already present. 696 * 697 * Need to use &dev_opp->opp_list in the condition part of the 'for' 698 * loop, don't replace it with head otherwise it will become an infinite 699 * loop. 700 */ 701 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { 702 if (new_opp->rate > opp->rate) { 703 head = &opp->node; 704 continue; 705 } 706 707 if (new_opp->rate < opp->rate) 708 break; 709 710 /* Duplicate OPPs */ 711 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 712 __func__, opp->rate, opp->u_volt, opp->available, 713 new_opp->rate, new_opp->u_volt, new_opp->available); 714 715 return opp->available && new_opp->u_volt == opp->u_volt ? 716 0 : -EEXIST; 717 } 718 719 new_opp->dev_opp = dev_opp; 720 list_add_rcu(&new_opp->node, head); 721 722 ret = opp_debug_create_one(new_opp, dev_opp); 723 if (ret) 724 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", 725 __func__, ret); 726 727 return 0; 728} 729 730/** 731 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 732 * @dev: device for which we do this operation 733 * @freq: Frequency in Hz for this OPP 734 * @u_volt: Voltage in uVolts for this OPP 735 * @dynamic: Dynamically added OPPs. 736 * 737 * This function adds an opp definition to the opp list and returns status. 738 * The opp is made available by default and it can be controlled using 739 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 740 * 741 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 742 * and freed by dev_pm_opp_of_remove_table. 743 * 744 * Locking: The internal device_opp and opp structures are RCU protected. 745 * Hence this function internally uses RCU updater strategy with mutex locks 746 * to keep the integrity of the internal data structures. Callers should ensure 747 * that this function is *NOT* called under RCU protection or in contexts where 748 * mutex cannot be locked. 749 * 750 * Return: 751 * 0 On success OR 752 * Duplicate OPPs (both freq and volt are same) and opp->available 753 * -EEXIST Freq are same and volt are different OR 754 * Duplicate OPPs (both freq and volt are same) and !opp->available 755 * -ENOMEM Memory allocation failure 756 */ 757static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, 758 bool dynamic) 759{ 760 struct device_opp *dev_opp; 761 struct dev_pm_opp *new_opp; 762 int ret; 763 764 /* Hold our list modification lock here */ 765 mutex_lock(&dev_opp_list_lock); 766 767 new_opp = _allocate_opp(dev, &dev_opp); 768 if (!new_opp) { 769 ret = -ENOMEM; 770 goto unlock; 771 } 772 773 /* populate the opp table */ 774 new_opp->rate = freq; 775 new_opp->u_volt = u_volt; 776 new_opp->available = true; 777 new_opp->dynamic = dynamic; 778 779 ret = _opp_add(dev, new_opp, dev_opp); 780 if (ret) 781 goto free_opp; 782 783 mutex_unlock(&dev_opp_list_lock); 784 785 /* 786 * Notify the changes in the availability of the operable 787 * frequency/voltage list. 788 */ 789 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); 790 return 0; 791 792free_opp: 793 _opp_remove(dev_opp, new_opp, false); 794unlock: 795 mutex_unlock(&dev_opp_list_lock); 796 return ret; 797} 798 799/* TODO: Support multiple regulators */ 800static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, 801 struct device_opp *dev_opp) 802{ 803 u32 microvolt[3] = {0}; 804 u32 val; 805 int count, ret; 806 struct property *prop = NULL; 807 char name[NAME_MAX]; 808 809 /* Search for "opp-microvolt-<name>" */ 810 if (dev_opp->prop_name) { 811 snprintf(name, sizeof(name), "opp-microvolt-%s", 812 dev_opp->prop_name); 813 prop = of_find_property(opp->np, name, NULL); 814 } 815 816 if (!prop) { 817 /* Search for "opp-microvolt" */ 818 sprintf(name, "opp-microvolt"); 819 prop = of_find_property(opp->np, name, NULL); 820 821 /* Missing property isn't a problem, but an invalid entry is */ 822 if (!prop) 823 return 0; 824 } 825 826 count = of_property_count_u32_elems(opp->np, name); 827 if (count < 0) { 828 dev_err(dev, "%s: Invalid %s property (%d)\n", 829 __func__, name, count); 830 return count; 831 } 832 833 /* There can be one or three elements here */ 834 if (count != 1 && count != 3) { 835 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", 836 __func__, name, count); 837 return -EINVAL; 838 } 839 840 ret = of_property_read_u32_array(opp->np, name, microvolt, count); 841 if (ret) { 842 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); 843 return -EINVAL; 844 } 845 846 opp->u_volt = microvolt[0]; 847 opp->u_volt_min = microvolt[1]; 848 opp->u_volt_max = microvolt[2]; 849 850 /* Search for "opp-microamp-<name>" */ 851 prop = NULL; 852 if (dev_opp->prop_name) { 853 snprintf(name, sizeof(name), "opp-microamp-%s", 854 dev_opp->prop_name); 855 prop = of_find_property(opp->np, name, NULL); 856 } 857 858 if (!prop) { 859 /* Search for "opp-microamp" */ 860 sprintf(name, "opp-microamp"); 861 prop = of_find_property(opp->np, name, NULL); 862 } 863 864 if (prop && !of_property_read_u32(opp->np, name, &val)) 865 opp->u_amp = val; 866 867 return 0; 868} 869 870/** 871 * dev_pm_opp_set_supported_hw() - Set supported platforms 872 * @dev: Device for which supported-hw has to be set. 873 * @versions: Array of hierarchy of versions to match. 874 * @count: Number of elements in the array. 875 * 876 * This is required only for the V2 bindings, and it enables a platform to 877 * specify the hierarchy of versions it supports. OPP layer will then enable 878 * OPPs, which are available for those versions, based on its 'opp-supported-hw' 879 * property. 880 * 881 * Locking: The internal device_opp and opp structures are RCU protected. 882 * Hence this function internally uses RCU updater strategy with mutex locks 883 * to keep the integrity of the internal data structures. Callers should ensure 884 * that this function is *NOT* called under RCU protection or in contexts where 885 * mutex cannot be locked. 886 */ 887int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, 888 unsigned int count) 889{ 890 struct device_opp *dev_opp; 891 int ret = 0; 892 893 /* Hold our list modification lock here */ 894 mutex_lock(&dev_opp_list_lock); 895 896 dev_opp = _add_device_opp(dev); 897 if (!dev_opp) { 898 ret = -ENOMEM; 899 goto unlock; 900 } 901 902 /* Make sure there are no concurrent readers while updating dev_opp */ 903 WARN_ON(!list_empty(&dev_opp->opp_list)); 904 905 /* Do we already have a version hierarchy associated with dev_opp? */ 906 if (dev_opp->supported_hw) { 907 dev_err(dev, "%s: Already have supported hardware list\n", 908 __func__); 909 ret = -EBUSY; 910 goto err; 911 } 912 913 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions), 914 GFP_KERNEL); 915 if (!dev_opp->supported_hw) { 916 ret = -ENOMEM; 917 goto err; 918 } 919 920 dev_opp->supported_hw_count = count; 921 mutex_unlock(&dev_opp_list_lock); 922 return 0; 923 924err: 925 _remove_device_opp(dev_opp); 926unlock: 927 mutex_unlock(&dev_opp_list_lock); 928 929 return ret; 930} 931EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); 932 933/** 934 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw 935 * @dev: Device for which supported-hw has to be set. 936 * 937 * This is required only for the V2 bindings, and is called for a matching 938 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure 939 * will not be freed. 940 * 941 * Locking: The internal device_opp and opp structures are RCU protected. 942 * Hence this function internally uses RCU updater strategy with mutex locks 943 * to keep the integrity of the internal data structures. Callers should ensure 944 * that this function is *NOT* called under RCU protection or in contexts where 945 * mutex cannot be locked. 946 */ 947void dev_pm_opp_put_supported_hw(struct device *dev) 948{ 949 struct device_opp *dev_opp; 950 951 /* Hold our list modification lock here */ 952 mutex_lock(&dev_opp_list_lock); 953 954 /* Check for existing list for 'dev' first */ 955 dev_opp = _find_device_opp(dev); 956 if (IS_ERR(dev_opp)) { 957 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp)); 958 goto unlock; 959 } 960 961 /* Make sure there are no concurrent readers while updating dev_opp */ 962 WARN_ON(!list_empty(&dev_opp->opp_list)); 963 964 if (!dev_opp->supported_hw) { 965 dev_err(dev, "%s: Doesn't have supported hardware list\n", 966 __func__); 967 goto unlock; 968 } 969 970 kfree(dev_opp->supported_hw); 971 dev_opp->supported_hw = NULL; 972 dev_opp->supported_hw_count = 0; 973 974 /* Try freeing device_opp if this was the last blocking resource */ 975 _remove_device_opp(dev_opp); 976 977unlock: 978 mutex_unlock(&dev_opp_list_lock); 979} 980EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); 981 982/** 983 * dev_pm_opp_set_prop_name() - Set prop-extn name 984 * @dev: Device for which the regulator has to be set. 985 * @name: name to postfix to properties. 986 * 987 * This is required only for the V2 bindings, and it enables a platform to 988 * specify the extn to be used for certain property names. The properties to 989 * which the extension will apply are opp-microvolt and opp-microamp. OPP core 990 * should postfix the property name with -<name> while looking for them. 991 * 992 * Locking: The internal device_opp and opp structures are RCU protected. 993 * Hence this function internally uses RCU updater strategy with mutex locks 994 * to keep the integrity of the internal data structures. Callers should ensure 995 * that this function is *NOT* called under RCU protection or in contexts where 996 * mutex cannot be locked. 997 */ 998int dev_pm_opp_set_prop_name(struct device *dev, const char *name) 999{ 1000 struct device_opp *dev_opp; 1001 int ret = 0; 1002 1003 /* Hold our list modification lock here */ 1004 mutex_lock(&dev_opp_list_lock); 1005 1006 dev_opp = _add_device_opp(dev); 1007 if (!dev_opp) { 1008 ret = -ENOMEM; 1009 goto unlock; 1010 } 1011 1012 /* Make sure there are no concurrent readers while updating dev_opp */ 1013 WARN_ON(!list_empty(&dev_opp->opp_list)); 1014 1015 /* Do we already have a prop-name associated with dev_opp? */ 1016 if (dev_opp->prop_name) { 1017 dev_err(dev, "%s: Already have prop-name %s\n", __func__, 1018 dev_opp->prop_name); 1019 ret = -EBUSY; 1020 goto err; 1021 } 1022 1023 dev_opp->prop_name = kstrdup(name, GFP_KERNEL); 1024 if (!dev_opp->prop_name) { 1025 ret = -ENOMEM; 1026 goto err; 1027 } 1028 1029 mutex_unlock(&dev_opp_list_lock); 1030 return 0; 1031 1032err: 1033 _remove_device_opp(dev_opp); 1034unlock: 1035 mutex_unlock(&dev_opp_list_lock); 1036 1037 return ret; 1038} 1039EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); 1040 1041/** 1042 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name 1043 * @dev: Device for which the regulator has to be set. 1044 * 1045 * This is required only for the V2 bindings, and is called for a matching 1046 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure 1047 * will not be freed. 1048 * 1049 * Locking: The internal device_opp and opp structures are RCU protected. 1050 * Hence this function internally uses RCU updater strategy with mutex locks 1051 * to keep the integrity of the internal data structures. Callers should ensure 1052 * that this function is *NOT* called under RCU protection or in contexts where 1053 * mutex cannot be locked. 1054 */ 1055void dev_pm_opp_put_prop_name(struct device *dev) 1056{ 1057 struct device_opp *dev_opp; 1058 1059 /* Hold our list modification lock here */ 1060 mutex_lock(&dev_opp_list_lock); 1061 1062 /* Check for existing list for 'dev' first */ 1063 dev_opp = _find_device_opp(dev); 1064 if (IS_ERR(dev_opp)) { 1065 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp)); 1066 goto unlock; 1067 } 1068 1069 /* Make sure there are no concurrent readers while updating dev_opp */ 1070 WARN_ON(!list_empty(&dev_opp->opp_list)); 1071 1072 if (!dev_opp->prop_name) { 1073 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__); 1074 goto unlock; 1075 } 1076 1077 kfree(dev_opp->prop_name); 1078 dev_opp->prop_name = NULL; 1079 1080 /* Try freeing device_opp if this was the last blocking resource */ 1081 _remove_device_opp(dev_opp); 1082 1083unlock: 1084 mutex_unlock(&dev_opp_list_lock); 1085} 1086EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); 1087 1088static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp, 1089 struct device_node *np) 1090{ 1091 unsigned int count = dev_opp->supported_hw_count; 1092 u32 version; 1093 int ret; 1094 1095 if (!dev_opp->supported_hw) 1096 return true; 1097 1098 while (count--) { 1099 ret = of_property_read_u32_index(np, "opp-supported-hw", count, 1100 &version); 1101 if (ret) { 1102 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", 1103 __func__, count, ret); 1104 return false; 1105 } 1106 1107 /* Both of these are bitwise masks of the versions */ 1108 if (!(version & dev_opp->supported_hw[count])) 1109 return false; 1110 } 1111 1112 return true; 1113} 1114 1115/** 1116 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) 1117 * @dev: device for which we do this operation 1118 * @np: device node 1119 * 1120 * This function adds an opp definition to the opp list and returns status. The 1121 * opp can be controlled using dev_pm_opp_enable/disable functions and may be 1122 * removed by dev_pm_opp_remove. 1123 * 1124 * Locking: The internal device_opp and opp structures are RCU protected. 1125 * Hence this function internally uses RCU updater strategy with mutex locks 1126 * to keep the integrity of the internal data structures. Callers should ensure 1127 * that this function is *NOT* called under RCU protection or in contexts where 1128 * mutex cannot be locked. 1129 * 1130 * Return: 1131 * 0 On success OR 1132 * Duplicate OPPs (both freq and volt are same) and opp->available 1133 * -EEXIST Freq are same and volt are different OR 1134 * Duplicate OPPs (both freq and volt are same) and !opp->available 1135 * -ENOMEM Memory allocation failure 1136 * -EINVAL Failed parsing the OPP node 1137 */ 1138static int _opp_add_static_v2(struct device *dev, struct device_node *np) 1139{ 1140 struct device_opp *dev_opp; 1141 struct dev_pm_opp *new_opp; 1142 u64 rate; 1143 u32 val; 1144 int ret; 1145 1146 /* Hold our list modification lock here */ 1147 mutex_lock(&dev_opp_list_lock); 1148 1149 new_opp = _allocate_opp(dev, &dev_opp); 1150 if (!new_opp) { 1151 ret = -ENOMEM; 1152 goto unlock; 1153 } 1154 1155 ret = of_property_read_u64(np, "opp-hz", &rate); 1156 if (ret < 0) { 1157 dev_err(dev, "%s: opp-hz not found\n", __func__); 1158 goto free_opp; 1159 } 1160 1161 /* Check if the OPP supports hardware's hierarchy of versions or not */ 1162 if (!_opp_is_supported(dev, dev_opp, np)) { 1163 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); 1164 goto free_opp; 1165 } 1166 1167 /* 1168 * Rate is defined as an unsigned long in clk API, and so casting 1169 * explicitly to its type. Must be fixed once rate is 64 bit 1170 * guaranteed in clk API. 1171 */ 1172 new_opp->rate = (unsigned long)rate; 1173 new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 1174 1175 new_opp->np = np; 1176 new_opp->dynamic = false; 1177 new_opp->available = true; 1178 1179 if (!of_property_read_u32(np, "clock-latency-ns", &val)) 1180 new_opp->clock_latency_ns = val; 1181 1182 ret = opp_parse_supplies(new_opp, dev, dev_opp); 1183 if (ret) 1184 goto free_opp; 1185 1186 ret = _opp_add(dev, new_opp, dev_opp); 1187 if (ret) 1188 goto free_opp; 1189 1190 /* OPP to select on device suspend */ 1191 if (of_property_read_bool(np, "opp-suspend")) { 1192 if (dev_opp->suspend_opp) { 1193 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", 1194 __func__, dev_opp->suspend_opp->rate, 1195 new_opp->rate); 1196 } else { 1197 new_opp->suspend = true; 1198 dev_opp->suspend_opp = new_opp; 1199 } 1200 } 1201 1202 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) 1203 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; 1204 1205 mutex_unlock(&dev_opp_list_lock); 1206 1207 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", 1208 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, 1209 new_opp->u_volt_min, new_opp->u_volt_max, 1210 new_opp->clock_latency_ns); 1211 1212 /* 1213 * Notify the changes in the availability of the operable 1214 * frequency/voltage list. 1215 */ 1216 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); 1217 return 0; 1218 1219free_opp: 1220 _opp_remove(dev_opp, new_opp, false); 1221unlock: 1222 mutex_unlock(&dev_opp_list_lock); 1223 return ret; 1224} 1225 1226/** 1227 * dev_pm_opp_add() - Add an OPP table from a table definitions 1228 * @dev: device for which we do this operation 1229 * @freq: Frequency in Hz for this OPP 1230 * @u_volt: Voltage in uVolts for this OPP 1231 * 1232 * This function adds an opp definition to the opp list and returns status. 1233 * The opp is made available by default and it can be controlled using 1234 * dev_pm_opp_enable/disable functions. 1235 * 1236 * Locking: The internal device_opp and opp structures are RCU protected. 1237 * Hence this function internally uses RCU updater strategy with mutex locks 1238 * to keep the integrity of the internal data structures. Callers should ensure 1239 * that this function is *NOT* called under RCU protection or in contexts where 1240 * mutex cannot be locked. 1241 * 1242 * Return: 1243 * 0 On success OR 1244 * Duplicate OPPs (both freq and volt are same) and opp->available 1245 * -EEXIST Freq are same and volt are different OR 1246 * Duplicate OPPs (both freq and volt are same) and !opp->available 1247 * -ENOMEM Memory allocation failure 1248 */ 1249int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 1250{ 1251 return _opp_add_v1(dev, freq, u_volt, true); 1252} 1253EXPORT_SYMBOL_GPL(dev_pm_opp_add); 1254 1255/** 1256 * _opp_set_availability() - helper to set the availability of an opp 1257 * @dev: device for which we do this operation 1258 * @freq: OPP frequency to modify availability 1259 * @availability_req: availability status requested for this opp 1260 * 1261 * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 1262 * share a common logic which is isolated here. 1263 * 1264 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1265 * copy operation, returns 0 if no modification was done OR modification was 1266 * successful. 1267 * 1268 * Locking: The internal device_opp and opp structures are RCU protected. 1269 * Hence this function internally uses RCU updater strategy with mutex locks to 1270 * keep the integrity of the internal data structures. Callers should ensure 1271 * that this function is *NOT* called under RCU protection or in contexts where 1272 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1273 */ 1274static int _opp_set_availability(struct device *dev, unsigned long freq, 1275 bool availability_req) 1276{ 1277 struct device_opp *dev_opp; 1278 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 1279 int r = 0; 1280 1281 /* keep the node allocated */ 1282 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); 1283 if (!new_opp) 1284 return -ENOMEM; 1285 1286 mutex_lock(&dev_opp_list_lock); 1287 1288 /* Find the device_opp */ 1289 dev_opp = _find_device_opp(dev); 1290 if (IS_ERR(dev_opp)) { 1291 r = PTR_ERR(dev_opp); 1292 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 1293 goto unlock; 1294 } 1295 1296 /* Do we have the frequency? */ 1297 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { 1298 if (tmp_opp->rate == freq) { 1299 opp = tmp_opp; 1300 break; 1301 } 1302 } 1303 if (IS_ERR(opp)) { 1304 r = PTR_ERR(opp); 1305 goto unlock; 1306 } 1307 1308 /* Is update really needed? */ 1309 if (opp->available == availability_req) 1310 goto unlock; 1311 /* copy the old data over */ 1312 *new_opp = *opp; 1313 1314 /* plug in new node */ 1315 new_opp->available = availability_req; 1316 1317 list_replace_rcu(&opp->node, &new_opp->node); 1318 mutex_unlock(&dev_opp_list_lock); 1319 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); 1320 1321 /* Notify the change of the OPP availability */ 1322 if (availability_req) 1323 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE, 1324 new_opp); 1325 else 1326 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE, 1327 new_opp); 1328 1329 return 0; 1330 1331unlock: 1332 mutex_unlock(&dev_opp_list_lock); 1333 kfree(new_opp); 1334 return r; 1335} 1336 1337/** 1338 * dev_pm_opp_enable() - Enable a specific OPP 1339 * @dev: device for which we do this operation 1340 * @freq: OPP frequency to enable 1341 * 1342 * Enables a provided opp. If the operation is valid, this returns 0, else the 1343 * corresponding error value. It is meant to be used for users an OPP available 1344 * after being temporarily made unavailable with dev_pm_opp_disable. 1345 * 1346 * Locking: The internal device_opp and opp structures are RCU protected. 1347 * Hence this function indirectly uses RCU and mutex locks to keep the 1348 * integrity of the internal data structures. Callers should ensure that 1349 * this function is *NOT* called under RCU protection or in contexts where 1350 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1351 * 1352 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1353 * copy operation, returns 0 if no modification was done OR modification was 1354 * successful. 1355 */ 1356int dev_pm_opp_enable(struct device *dev, unsigned long freq) 1357{ 1358 return _opp_set_availability(dev, freq, true); 1359} 1360EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 1361 1362/** 1363 * dev_pm_opp_disable() - Disable a specific OPP 1364 * @dev: device for which we do this operation 1365 * @freq: OPP frequency to disable 1366 * 1367 * Disables a provided opp. If the operation is valid, this returns 1368 * 0, else the corresponding error value. It is meant to be a temporary 1369 * control by users to make this OPP not available until the circumstances are 1370 * right to make it available again (with a call to dev_pm_opp_enable). 1371 * 1372 * Locking: The internal device_opp and opp structures are RCU protected. 1373 * Hence this function indirectly uses RCU and mutex locks to keep the 1374 * integrity of the internal data structures. Callers should ensure that 1375 * this function is *NOT* called under RCU protection or in contexts where 1376 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1377 * 1378 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1379 * copy operation, returns 0 if no modification was done OR modification was 1380 * successful. 1381 */ 1382int dev_pm_opp_disable(struct device *dev, unsigned long freq) 1383{ 1384 return _opp_set_availability(dev, freq, false); 1385} 1386EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 1387 1388/** 1389 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp 1390 * @dev: device pointer used to lookup device OPPs. 1391 * 1392 * Return: pointer to notifier head if found, otherwise -ENODEV or 1393 * -EINVAL based on type of error casted as pointer. value must be checked 1394 * with IS_ERR to determine valid pointer or error result. 1395 * 1396 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU 1397 * protected pointer. The reason for the same is that the opp pointer which is 1398 * returned will remain valid for use with opp_get_{voltage, freq} only while 1399 * under the locked area. The pointer returned must be used prior to unlocking 1400 * with rcu_read_unlock() to maintain the integrity of the pointer. 1401 */ 1402struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) 1403{ 1404 struct device_opp *dev_opp = _find_device_opp(dev); 1405 1406 if (IS_ERR(dev_opp)) 1407 return ERR_CAST(dev_opp); /* matching type */ 1408 1409 return &dev_opp->srcu_head; 1410} 1411EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); 1412 1413#ifdef CONFIG_OF 1414/** 1415 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 1416 * entries 1417 * @dev: device pointer used to lookup device OPPs. 1418 * 1419 * Free OPPs created using static entries present in DT. 1420 * 1421 * Locking: The internal device_opp and opp structures are RCU protected. 1422 * Hence this function indirectly uses RCU updater strategy with mutex locks 1423 * to keep the integrity of the internal data structures. Callers should ensure 1424 * that this function is *NOT* called under RCU protection or in contexts where 1425 * mutex cannot be locked. 1426 */ 1427void dev_pm_opp_of_remove_table(struct device *dev) 1428{ 1429 struct device_opp *dev_opp; 1430 struct dev_pm_opp *opp, *tmp; 1431 1432 /* Hold our list modification lock here */ 1433 mutex_lock(&dev_opp_list_lock); 1434 1435 /* Check for existing list for 'dev' */ 1436 dev_opp = _find_device_opp(dev); 1437 if (IS_ERR(dev_opp)) { 1438 int error = PTR_ERR(dev_opp); 1439 1440 if (error != -ENODEV) 1441 WARN(1, "%s: dev_opp: %d\n", 1442 IS_ERR_OR_NULL(dev) ? 1443 "Invalid device" : dev_name(dev), 1444 error); 1445 goto unlock; 1446 } 1447 1448 /* Find if dev_opp manages a single device */ 1449 if (list_is_singular(&dev_opp->dev_list)) { 1450 /* Free static OPPs */ 1451 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { 1452 if (!opp->dynamic) 1453 _opp_remove(dev_opp, opp, true); 1454 } 1455 } else { 1456 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); 1457 } 1458 1459unlock: 1460 mutex_unlock(&dev_opp_list_lock); 1461} 1462EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); 1463 1464/* Returns opp descriptor node for a device, caller must do of_node_put() */ 1465struct device_node *_of_get_opp_desc_node(struct device *dev) 1466{ 1467 /* 1468 * TODO: Support for multiple OPP tables. 1469 * 1470 * There should be only ONE phandle present in "operating-points-v2" 1471 * property. 1472 */ 1473 1474 return of_parse_phandle(dev->of_node, "operating-points-v2", 0); 1475} 1476 1477/* Initializes OPP tables based on new bindings */ 1478static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) 1479{ 1480 struct device_node *np; 1481 struct device_opp *dev_opp; 1482 int ret = 0, count = 0; 1483 1484 mutex_lock(&dev_opp_list_lock); 1485 1486 dev_opp = _managed_opp(opp_np); 1487 if (dev_opp) { 1488 /* OPPs are already managed */ 1489 if (!_add_list_dev(dev, dev_opp)) 1490 ret = -ENOMEM; 1491 mutex_unlock(&dev_opp_list_lock); 1492 return ret; 1493 } 1494 mutex_unlock(&dev_opp_list_lock); 1495 1496 /* We have opp-list node now, iterate over it and add OPPs */ 1497 for_each_available_child_of_node(opp_np, np) { 1498 count++; 1499 1500 ret = _opp_add_static_v2(dev, np); 1501 if (ret) { 1502 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, 1503 ret); 1504 goto free_table; 1505 } 1506 } 1507 1508 /* There should be one of more OPP defined */ 1509 if (WARN_ON(!count)) 1510 return -ENOENT; 1511 1512 mutex_lock(&dev_opp_list_lock); 1513 1514 dev_opp = _find_device_opp(dev); 1515 if (WARN_ON(IS_ERR(dev_opp))) { 1516 ret = PTR_ERR(dev_opp); 1517 mutex_unlock(&dev_opp_list_lock); 1518 goto free_table; 1519 } 1520 1521 dev_opp->np = opp_np; 1522 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 1523 1524 mutex_unlock(&dev_opp_list_lock); 1525 1526 return 0; 1527 1528free_table: 1529 dev_pm_opp_of_remove_table(dev); 1530 1531 return ret; 1532} 1533 1534/* Initializes OPP tables based on old-deprecated bindings */ 1535static int _of_add_opp_table_v1(struct device *dev) 1536{ 1537 const struct property *prop; 1538 const __be32 *val; 1539 int nr; 1540 1541 prop = of_find_property(dev->of_node, "operating-points", NULL); 1542 if (!prop) 1543 return -ENODEV; 1544 if (!prop->value) 1545 return -ENODATA; 1546 1547 /* 1548 * Each OPP is a set of tuples consisting of frequency and 1549 * voltage like <freq-kHz vol-uV>. 1550 */ 1551 nr = prop->length / sizeof(u32); 1552 if (nr % 2) { 1553 dev_err(dev, "%s: Invalid OPP list\n", __func__); 1554 return -EINVAL; 1555 } 1556 1557 val = prop->value; 1558 while (nr) { 1559 unsigned long freq = be32_to_cpup(val++) * 1000; 1560 unsigned long volt = be32_to_cpup(val++); 1561 1562 if (_opp_add_v1(dev, freq, volt, false)) 1563 dev_warn(dev, "%s: Failed to add OPP %ld\n", 1564 __func__, freq); 1565 nr -= 2; 1566 } 1567 1568 return 0; 1569} 1570 1571/** 1572 * dev_pm_opp_of_add_table() - Initialize opp table from device tree 1573 * @dev: device pointer used to lookup device OPPs. 1574 * 1575 * Register the initial OPP table with the OPP library for given device. 1576 * 1577 * Locking: The internal device_opp and opp structures are RCU protected. 1578 * Hence this function indirectly uses RCU updater strategy with mutex locks 1579 * to keep the integrity of the internal data structures. Callers should ensure 1580 * that this function is *NOT* called under RCU protection or in contexts where 1581 * mutex cannot be locked. 1582 * 1583 * Return: 1584 * 0 On success OR 1585 * Duplicate OPPs (both freq and volt are same) and opp->available 1586 * -EEXIST Freq are same and volt are different OR 1587 * Duplicate OPPs (both freq and volt are same) and !opp->available 1588 * -ENOMEM Memory allocation failure 1589 * -ENODEV when 'operating-points' property is not found or is invalid data 1590 * in device node. 1591 * -ENODATA when empty 'operating-points' property is found 1592 * -EINVAL when invalid entries are found in opp-v2 table 1593 */ 1594int dev_pm_opp_of_add_table(struct device *dev) 1595{ 1596 struct device_node *opp_np; 1597 int ret; 1598 1599 /* 1600 * OPPs have two version of bindings now. The older one is deprecated, 1601 * try for the new binding first. 1602 */ 1603 opp_np = _of_get_opp_desc_node(dev); 1604 if (!opp_np) { 1605 /* 1606 * Try old-deprecated bindings for backward compatibility with 1607 * older dtbs. 1608 */ 1609 return _of_add_opp_table_v1(dev); 1610 } 1611 1612 ret = _of_add_opp_table_v2(dev, opp_np); 1613 of_node_put(opp_np); 1614 1615 return ret; 1616} 1617EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 1618#endif