at v3.8-rc2 741 lines 23 kB view raw
1/* 2 * Generic OPP Interface 3 * 4 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 5 * Nishanth Menon 6 * Romit Dasgupta 7 * Kevin Hilman 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/kernel.h> 15#include <linux/errno.h> 16#include <linux/err.h> 17#include <linux/init.h> 18#include <linux/slab.h> 19#include <linux/cpufreq.h> 20#include <linux/device.h> 21#include <linux/list.h> 22#include <linux/rculist.h> 23#include <linux/rcupdate.h> 24#include <linux/opp.h> 25#include <linux/of.h> 26#include <linux/export.h> 27 28/* 29 * Internal data structure organization with the OPP layer library is as 30 * follows: 31 * dev_opp_list (root) 32 * |- device 1 (represents voltage domain 1) 33 * | |- opp 1 (availability, freq, voltage) 34 * | |- opp 2 .. 35 * ... ... 36 * | `- opp n .. 37 * |- device 2 (represents the next voltage domain) 38 * ... 39 * `- device m (represents mth voltage domain) 40 * device 1, 2.. are represented by dev_opp structure while each opp 41 * is represented by the opp structure. 42 */ 43 44/** 45 * struct opp - Generic OPP description structure 46 * @node: opp list node. The nodes are maintained throughout the lifetime 47 * of boot. It is expected only an optimal set of OPPs are 48 * added to the library by the SoC framework. 49 * RCU usage: opp list is traversed with RCU locks. node 50 * modification is possible realtime, hence the modifications 51 * are protected by the dev_opp_list_lock for integrity. 52 * IMPORTANT: the opp nodes should be maintained in increasing 53 * order. 54 * @available: true/false - marks if this OPP as available or not 55 * @rate: Frequency in hertz 56 * @u_volt: Nominal voltage in microvolts corresponding to this OPP 57 * @dev_opp: points back to the device_opp struct this opp belongs to 58 * 59 * This structure stores the OPP information for a given device. 60 */ 61struct opp { 62 struct list_head node; 63 64 bool available; 65 unsigned long rate; 66 unsigned long u_volt; 67 68 struct device_opp *dev_opp; 69 struct rcu_head head; 70}; 71 72/** 73 * struct device_opp - Device opp structure 74 * @node: list node - contains the devices with OPPs that 75 * have been registered. Nodes once added are not modified in this 76 * list. 77 * RCU usage: nodes are not modified in the list of device_opp, 78 * however addition is possible and is secured by dev_opp_list_lock 79 * @dev: device pointer 80 * @head: notifier head to notify the OPP availability changes. 81 * @opp_list: list of opps 82 * 83 * This is an internal data structure maintaining the link to opps attached to 84 * a device. This structure is not meant to be shared to users as it is 85 * meant for book keeping and private to OPP library 86 */ 87struct device_opp { 88 struct list_head node; 89 90 struct device *dev; 91 struct srcu_notifier_head head; 92 struct list_head opp_list; 93}; 94 95/* 96 * The root of the list of all devices. All device_opp structures branch off 97 * from here, with each device_opp containing the list of opp it supports in 98 * various states of availability. 99 */ 100static LIST_HEAD(dev_opp_list); 101/* Lock to allow exclusive modification to the device and opp lists */ 102static DEFINE_MUTEX(dev_opp_list_lock); 103 104/** 105 * find_device_opp() - find device_opp struct using device pointer 106 * @dev: device pointer used to lookup device OPPs 107 * 108 * Search list of device OPPs for one containing matching device. Does a RCU 109 * reader operation to grab the pointer needed. 110 * 111 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or 112 * -EINVAL based on type of error. 113 * 114 * Locking: This function must be called under rcu_read_lock(). device_opp 115 * is a RCU protected pointer. This means that device_opp is valid as long 116 * as we are under RCU lock. 117 */ 118static struct device_opp *find_device_opp(struct device *dev) 119{ 120 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 121 122 if (unlikely(IS_ERR_OR_NULL(dev))) { 123 pr_err("%s: Invalid parameters\n", __func__); 124 return ERR_PTR(-EINVAL); 125 } 126 127 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { 128 if (tmp_dev_opp->dev == dev) { 129 dev_opp = tmp_dev_opp; 130 break; 131 } 132 } 133 134 return dev_opp; 135} 136 137/** 138 * opp_get_voltage() - Gets the voltage corresponding to an available opp 139 * @opp: opp for which voltage has to be returned for 140 * 141 * Return voltage in micro volt corresponding to the opp, else 142 * return 0 143 * 144 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 145 * protected pointer. This means that opp which could have been fetched by 146 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 147 * under RCU lock. The pointer returned by the opp_find_freq family must be 148 * used in the same section as the usage of this function with the pointer 149 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 150 * pointer. 151 */ 152unsigned long opp_get_voltage(struct opp *opp) 153{ 154 struct opp *tmp_opp; 155 unsigned long v = 0; 156 157 tmp_opp = rcu_dereference(opp); 158 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 159 pr_err("%s: Invalid parameters\n", __func__); 160 else 161 v = tmp_opp->u_volt; 162 163 return v; 164} 165EXPORT_SYMBOL(opp_get_voltage); 166 167/** 168 * opp_get_freq() - Gets the frequency corresponding to an available opp 169 * @opp: opp for which frequency has to be returned for 170 * 171 * Return frequency in hertz corresponding to the opp, else 172 * return 0 173 * 174 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 175 * protected pointer. This means that opp which could have been fetched by 176 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 177 * under RCU lock. The pointer returned by the opp_find_freq family must be 178 * used in the same section as the usage of this function with the pointer 179 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 180 * pointer. 181 */ 182unsigned long opp_get_freq(struct opp *opp) 183{ 184 struct opp *tmp_opp; 185 unsigned long f = 0; 186 187 tmp_opp = rcu_dereference(opp); 188 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 189 pr_err("%s: Invalid parameters\n", __func__); 190 else 191 f = tmp_opp->rate; 192 193 return f; 194} 195EXPORT_SYMBOL(opp_get_freq); 196 197/** 198 * opp_get_opp_count() - Get number of opps available in the opp list 199 * @dev: device for which we do this operation 200 * 201 * This function returns the number of available opps if there are any, 202 * else returns 0 if none or the corresponding error value. 203 * 204 * Locking: This function must be called under rcu_read_lock(). This function 205 * internally references two RCU protected structures: device_opp and opp which 206 * are safe as long as we are under a common RCU locked section. 207 */ 208int opp_get_opp_count(struct device *dev) 209{ 210 struct device_opp *dev_opp; 211 struct opp *temp_opp; 212 int count = 0; 213 214 dev_opp = find_device_opp(dev); 215 if (IS_ERR(dev_opp)) { 216 int r = PTR_ERR(dev_opp); 217 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 218 return r; 219 } 220 221 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 222 if (temp_opp->available) 223 count++; 224 } 225 226 return count; 227} 228EXPORT_SYMBOL(opp_get_opp_count); 229 230/** 231 * opp_find_freq_exact() - search for an exact frequency 232 * @dev: device for which we do this operation 233 * @freq: frequency to search for 234 * @available: true/false - match for available opp 235 * 236 * Searches for exact match in the opp list and returns pointer to the matching 237 * opp if found, else returns ERR_PTR in case of error and should be handled 238 * using IS_ERR. Error return values can be: 239 * EINVAL: for bad pointer 240 * ERANGE: no match found for search 241 * ENODEV: if device not found in list of registered devices 242 * 243 * Note: available is a modifier for the search. if available=true, then the 244 * match is for exact matching frequency and is available in the stored OPP 245 * table. if false, the match is for exact frequency which is not available. 246 * 247 * This provides a mechanism to enable an opp which is not available currently 248 * or the opposite as well. 249 * 250 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 251 * protected pointer. The reason for the same is that the opp pointer which is 252 * returned will remain valid for use with opp_get_{voltage, freq} only while 253 * under the locked area. The pointer returned must be used prior to unlocking 254 * with rcu_read_unlock() to maintain the integrity of the pointer. 255 */ 256struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, 257 bool available) 258{ 259 struct device_opp *dev_opp; 260 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 261 262 dev_opp = find_device_opp(dev); 263 if (IS_ERR(dev_opp)) { 264 int r = PTR_ERR(dev_opp); 265 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 266 return ERR_PTR(r); 267 } 268 269 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 270 if (temp_opp->available == available && 271 temp_opp->rate == freq) { 272 opp = temp_opp; 273 break; 274 } 275 } 276 277 return opp; 278} 279EXPORT_SYMBOL(opp_find_freq_exact); 280 281/** 282 * opp_find_freq_ceil() - Search for an rounded ceil freq 283 * @dev: device for which we do this operation 284 * @freq: Start frequency 285 * 286 * Search for the matching ceil *available* OPP from a starting freq 287 * for a device. 288 * 289 * Returns matching *opp and refreshes *freq accordingly, else returns 290 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 291 * values can be: 292 * EINVAL: for bad pointer 293 * ERANGE: no match found for search 294 * ENODEV: if device not found in list of registered devices 295 * 296 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 297 * protected pointer. The reason for the same is that the opp pointer which is 298 * returned will remain valid for use with opp_get_{voltage, freq} only while 299 * under the locked area. The pointer returned must be used prior to unlocking 300 * with rcu_read_unlock() to maintain the integrity of the pointer. 301 */ 302struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) 303{ 304 struct device_opp *dev_opp; 305 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 306 307 if (!dev || !freq) { 308 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 309 return ERR_PTR(-EINVAL); 310 } 311 312 dev_opp = find_device_opp(dev); 313 if (IS_ERR(dev_opp)) 314 return ERR_CAST(dev_opp); 315 316 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 317 if (temp_opp->available && temp_opp->rate >= *freq) { 318 opp = temp_opp; 319 *freq = opp->rate; 320 break; 321 } 322 } 323 324 return opp; 325} 326EXPORT_SYMBOL(opp_find_freq_ceil); 327 328/** 329 * opp_find_freq_floor() - Search for a rounded floor freq 330 * @dev: device for which we do this operation 331 * @freq: Start frequency 332 * 333 * Search for the matching floor *available* OPP from a starting freq 334 * for a device. 335 * 336 * Returns matching *opp and refreshes *freq accordingly, else returns 337 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 338 * values can be: 339 * EINVAL: for bad pointer 340 * ERANGE: no match found for search 341 * ENODEV: if device not found in list of registered devices 342 * 343 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 344 * protected pointer. The reason for the same is that the opp pointer which is 345 * returned will remain valid for use with opp_get_{voltage, freq} only while 346 * under the locked area. The pointer returned must be used prior to unlocking 347 * with rcu_read_unlock() to maintain the integrity of the pointer. 348 */ 349struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) 350{ 351 struct device_opp *dev_opp; 352 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 353 354 if (!dev || !freq) { 355 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 356 return ERR_PTR(-EINVAL); 357 } 358 359 dev_opp = find_device_opp(dev); 360 if (IS_ERR(dev_opp)) 361 return ERR_CAST(dev_opp); 362 363 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 364 if (temp_opp->available) { 365 /* go to the next node, before choosing prev */ 366 if (temp_opp->rate > *freq) 367 break; 368 else 369 opp = temp_opp; 370 } 371 } 372 if (!IS_ERR(opp)) 373 *freq = opp->rate; 374 375 return opp; 376} 377EXPORT_SYMBOL(opp_find_freq_floor); 378 379/** 380 * opp_add() - Add an OPP table from a table definitions 381 * @dev: device for which we do this operation 382 * @freq: Frequency in Hz for this OPP 383 * @u_volt: Voltage in uVolts for this OPP 384 * 385 * This function adds an opp definition to the opp list and returns status. 386 * The opp is made available by default and it can be controlled using 387 * opp_enable/disable functions. 388 * 389 * Locking: The internal device_opp and opp structures are RCU protected. 390 * Hence this function internally uses RCU updater strategy with mutex locks 391 * to keep the integrity of the internal data structures. Callers should ensure 392 * that this function is *NOT* called under RCU protection or in contexts where 393 * mutex cannot be locked. 394 */ 395int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 396{ 397 struct device_opp *dev_opp = NULL; 398 struct opp *opp, *new_opp; 399 struct list_head *head; 400 401 /* allocate new OPP node */ 402 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); 403 if (!new_opp) { 404 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); 405 return -ENOMEM; 406 } 407 408 /* Hold our list modification lock here */ 409 mutex_lock(&dev_opp_list_lock); 410 411 /* Check for existing list for 'dev' */ 412 dev_opp = find_device_opp(dev); 413 if (IS_ERR(dev_opp)) { 414 /* 415 * Allocate a new device OPP table. In the infrequent case 416 * where a new device is needed to be added, we pay this 417 * penalty. 418 */ 419 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); 420 if (!dev_opp) { 421 mutex_unlock(&dev_opp_list_lock); 422 kfree(new_opp); 423 dev_warn(dev, 424 "%s: Unable to create device OPP structure\n", 425 __func__); 426 return -ENOMEM; 427 } 428 429 dev_opp->dev = dev; 430 srcu_init_notifier_head(&dev_opp->head); 431 INIT_LIST_HEAD(&dev_opp->opp_list); 432 433 /* Secure the device list modification */ 434 list_add_rcu(&dev_opp->node, &dev_opp_list); 435 } 436 437 /* populate the opp table */ 438 new_opp->dev_opp = dev_opp; 439 new_opp->rate = freq; 440 new_opp->u_volt = u_volt; 441 new_opp->available = true; 442 443 /* Insert new OPP in order of increasing frequency */ 444 head = &dev_opp->opp_list; 445 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { 446 if (new_opp->rate < opp->rate) 447 break; 448 else 449 head = &opp->node; 450 } 451 452 list_add_rcu(&new_opp->node, head); 453 mutex_unlock(&dev_opp_list_lock); 454 455 /* 456 * Notify the changes in the availability of the operable 457 * frequency/voltage list. 458 */ 459 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); 460 return 0; 461} 462 463/** 464 * opp_set_availability() - helper to set the availability of an opp 465 * @dev: device for which we do this operation 466 * @freq: OPP frequency to modify availability 467 * @availability_req: availability status requested for this opp 468 * 469 * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 470 * share a common logic which is isolated here. 471 * 472 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the 473 * copy operation, returns 0 if no modifcation was done OR modification was 474 * successful. 475 * 476 * Locking: The internal device_opp and opp structures are RCU protected. 477 * Hence this function internally uses RCU updater strategy with mutex locks to 478 * keep the integrity of the internal data structures. Callers should ensure 479 * that this function is *NOT* called under RCU protection or in contexts where 480 * mutex locking or synchronize_rcu() blocking calls cannot be used. 481 */ 482static int opp_set_availability(struct device *dev, unsigned long freq, 483 bool availability_req) 484{ 485 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 486 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 487 int r = 0; 488 489 /* keep the node allocated */ 490 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); 491 if (!new_opp) { 492 dev_warn(dev, "%s: Unable to create OPP\n", __func__); 493 return -ENOMEM; 494 } 495 496 mutex_lock(&dev_opp_list_lock); 497 498 /* Find the device_opp */ 499 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { 500 if (dev == tmp_dev_opp->dev) { 501 dev_opp = tmp_dev_opp; 502 break; 503 } 504 } 505 if (IS_ERR(dev_opp)) { 506 r = PTR_ERR(dev_opp); 507 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 508 goto unlock; 509 } 510 511 /* Do we have the frequency? */ 512 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { 513 if (tmp_opp->rate == freq) { 514 opp = tmp_opp; 515 break; 516 } 517 } 518 if (IS_ERR(opp)) { 519 r = PTR_ERR(opp); 520 goto unlock; 521 } 522 523 /* Is update really needed? */ 524 if (opp->available == availability_req) 525 goto unlock; 526 /* copy the old data over */ 527 *new_opp = *opp; 528 529 /* plug in new node */ 530 new_opp->available = availability_req; 531 532 list_replace_rcu(&opp->node, &new_opp->node); 533 mutex_unlock(&dev_opp_list_lock); 534 kfree_rcu(opp, head); 535 536 /* Notify the change of the OPP availability */ 537 if (availability_req) 538 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, 539 new_opp); 540 else 541 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, 542 new_opp); 543 544 return 0; 545 546unlock: 547 mutex_unlock(&dev_opp_list_lock); 548 kfree(new_opp); 549 return r; 550} 551 552/** 553 * opp_enable() - Enable a specific OPP 554 * @dev: device for which we do this operation 555 * @freq: OPP frequency to enable 556 * 557 * Enables a provided opp. If the operation is valid, this returns 0, else the 558 * corresponding error value. It is meant to be used for users an OPP available 559 * after being temporarily made unavailable with opp_disable. 560 * 561 * Locking: The internal device_opp and opp structures are RCU protected. 562 * Hence this function indirectly uses RCU and mutex locks to keep the 563 * integrity of the internal data structures. Callers should ensure that 564 * this function is *NOT* called under RCU protection or in contexts where 565 * mutex locking or synchronize_rcu() blocking calls cannot be used. 566 */ 567int opp_enable(struct device *dev, unsigned long freq) 568{ 569 return opp_set_availability(dev, freq, true); 570} 571EXPORT_SYMBOL(opp_enable); 572 573/** 574 * opp_disable() - Disable a specific OPP 575 * @dev: device for which we do this operation 576 * @freq: OPP frequency to disable 577 * 578 * Disables a provided opp. If the operation is valid, this returns 579 * 0, else the corresponding error value. It is meant to be a temporary 580 * control by users to make this OPP not available until the circumstances are 581 * right to make it available again (with a call to opp_enable). 582 * 583 * Locking: The internal device_opp and opp structures are RCU protected. 584 * Hence this function indirectly uses RCU and mutex locks to keep the 585 * integrity of the internal data structures. Callers should ensure that 586 * this function is *NOT* called under RCU protection or in contexts where 587 * mutex locking or synchronize_rcu() blocking calls cannot be used. 588 */ 589int opp_disable(struct device *dev, unsigned long freq) 590{ 591 return opp_set_availability(dev, freq, false); 592} 593EXPORT_SYMBOL(opp_disable); 594 595#ifdef CONFIG_CPU_FREQ 596/** 597 * opp_init_cpufreq_table() - create a cpufreq table for a device 598 * @dev: device for which we do this operation 599 * @table: Cpufreq table returned back to caller 600 * 601 * Generate a cpufreq table for a provided device- this assumes that the 602 * opp list is already initialized and ready for usage. 603 * 604 * This function allocates required memory for the cpufreq table. It is 605 * expected that the caller does the required maintenance such as freeing 606 * the table as required. 607 * 608 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM 609 * if no memory available for the operation (table is not populated), returns 0 610 * if successful and table is populated. 611 * 612 * WARNING: It is important for the callers to ensure refreshing their copy of 613 * the table if any of the mentioned functions have been invoked in the interim. 614 * 615 * Locking: The internal device_opp and opp structures are RCU protected. 616 * To simplify the logic, we pretend we are updater and hold relevant mutex here 617 * Callers should ensure that this function is *NOT* called under RCU protection 618 * or in contexts where mutex locking cannot be used. 619 */ 620int opp_init_cpufreq_table(struct device *dev, 621 struct cpufreq_frequency_table **table) 622{ 623 struct device_opp *dev_opp; 624 struct opp *opp; 625 struct cpufreq_frequency_table *freq_table; 626 int i = 0; 627 628 /* Pretend as if I am an updater */ 629 mutex_lock(&dev_opp_list_lock); 630 631 dev_opp = find_device_opp(dev); 632 if (IS_ERR(dev_opp)) { 633 int r = PTR_ERR(dev_opp); 634 mutex_unlock(&dev_opp_list_lock); 635 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); 636 return r; 637 } 638 639 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * 640 (opp_get_opp_count(dev) + 1), GFP_KERNEL); 641 if (!freq_table) { 642 mutex_unlock(&dev_opp_list_lock); 643 dev_warn(dev, "%s: Unable to allocate frequency table\n", 644 __func__); 645 return -ENOMEM; 646 } 647 648 list_for_each_entry(opp, &dev_opp->opp_list, node) { 649 if (opp->available) { 650 freq_table[i].index = i; 651 freq_table[i].frequency = opp->rate / 1000; 652 i++; 653 } 654 } 655 mutex_unlock(&dev_opp_list_lock); 656 657 freq_table[i].index = i; 658 freq_table[i].frequency = CPUFREQ_TABLE_END; 659 660 *table = &freq_table[0]; 661 662 return 0; 663} 664 665/** 666 * opp_free_cpufreq_table() - free the cpufreq table 667 * @dev: device for which we do this operation 668 * @table: table to free 669 * 670 * Free up the table allocated by opp_init_cpufreq_table 671 */ 672void opp_free_cpufreq_table(struct device *dev, 673 struct cpufreq_frequency_table **table) 674{ 675 if (!table) 676 return; 677 678 kfree(*table); 679 *table = NULL; 680} 681#endif /* CONFIG_CPU_FREQ */ 682 683/** 684 * opp_get_notifier() - find notifier_head of the device with opp 685 * @dev: device pointer used to lookup device OPPs. 686 */ 687struct srcu_notifier_head *opp_get_notifier(struct device *dev) 688{ 689 struct device_opp *dev_opp = find_device_opp(dev); 690 691 if (IS_ERR(dev_opp)) 692 return ERR_CAST(dev_opp); /* matching type */ 693 694 return &dev_opp->head; 695} 696 697#ifdef CONFIG_OF 698/** 699 * of_init_opp_table() - Initialize opp table from device tree 700 * @dev: device pointer used to lookup device OPPs. 701 * 702 * Register the initial OPP table with the OPP library for given device. 703 */ 704int of_init_opp_table(struct device *dev) 705{ 706 const struct property *prop; 707 const __be32 *val; 708 int nr; 709 710 prop = of_find_property(dev->of_node, "operating-points", NULL); 711 if (!prop) 712 return -ENODEV; 713 if (!prop->value) 714 return -ENODATA; 715 716 /* 717 * Each OPP is a set of tuples consisting of frequency and 718 * voltage like <freq-kHz vol-uV>. 719 */ 720 nr = prop->length / sizeof(u32); 721 if (nr % 2) { 722 dev_err(dev, "%s: Invalid OPP list\n", __func__); 723 return -EINVAL; 724 } 725 726 val = prop->value; 727 while (nr) { 728 unsigned long freq = be32_to_cpup(val++) * 1000; 729 unsigned long volt = be32_to_cpup(val++); 730 731 if (opp_add(dev, freq, volt)) { 732 dev_warn(dev, "%s: Failed to add OPP %ld\n", 733 __func__, freq); 734 continue; 735 } 736 nr -= 2; 737 } 738 739 return 0; 740} 741#endif