at v3.7-rc2 723 lines 22 kB view raw
1/* 2 * Generic OPP Interface 3 * 4 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 5 * Nishanth Menon 6 * Romit Dasgupta 7 * Kevin Hilman 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/kernel.h> 15#include <linux/errno.h> 16#include <linux/err.h> 17#include <linux/init.h> 18#include <linux/slab.h> 19#include <linux/cpufreq.h> 20#include <linux/device.h> 21#include <linux/list.h> 22#include <linux/rculist.h> 23#include <linux/rcupdate.h> 24#include <linux/opp.h> 25#include <linux/of.h> 26 27/* 28 * Internal data structure organization with the OPP layer library is as 29 * follows: 30 * dev_opp_list (root) 31 * |- device 1 (represents voltage domain 1) 32 * | |- opp 1 (availability, freq, voltage) 33 * | |- opp 2 .. 34 * ... ... 35 * | `- opp n .. 36 * |- device 2 (represents the next voltage domain) 37 * ... 38 * `- device m (represents mth voltage domain) 39 * device 1, 2.. are represented by dev_opp structure while each opp 40 * is represented by the opp structure. 41 */ 42 43/** 44 * struct opp - Generic OPP description structure 45 * @node: opp list node. The nodes are maintained throughout the lifetime 46 * of boot. It is expected only an optimal set of OPPs are 47 * added to the library by the SoC framework. 48 * RCU usage: opp list is traversed with RCU locks. node 49 * modification is possible realtime, hence the modifications 50 * are protected by the dev_opp_list_lock for integrity. 51 * IMPORTANT: the opp nodes should be maintained in increasing 52 * order. 53 * @available: true/false - marks if this OPP as available or not 54 * @rate: Frequency in hertz 55 * @u_volt: Nominal voltage in microvolts corresponding to this OPP 56 * @dev_opp: points back to the device_opp struct this opp belongs to 57 * 58 * This structure stores the OPP information for a given device. 59 */ 60struct opp { 61 struct list_head node; 62 63 bool available; 64 unsigned long rate; 65 unsigned long u_volt; 66 67 struct device_opp *dev_opp; 68}; 69 70/** 71 * struct device_opp - Device opp structure 72 * @node: list node - contains the devices with OPPs that 73 * have been registered. Nodes once added are not modified in this 74 * list. 75 * RCU usage: nodes are not modified in the list of device_opp, 76 * however addition is possible and is secured by dev_opp_list_lock 77 * @dev: device pointer 78 * @head: notifier head to notify the OPP availability changes. 79 * @opp_list: list of opps 80 * 81 * This is an internal data structure maintaining the link to opps attached to 82 * a device. This structure is not meant to be shared to users as it is 83 * meant for book keeping and private to OPP library 84 */ 85struct device_opp { 86 struct list_head node; 87 88 struct device *dev; 89 struct srcu_notifier_head head; 90 struct list_head opp_list; 91}; 92 93/* 94 * The root of the list of all devices. All device_opp structures branch off 95 * from here, with each device_opp containing the list of opp it supports in 96 * various states of availability. 97 */ 98static LIST_HEAD(dev_opp_list); 99/* Lock to allow exclusive modification to the device and opp lists */ 100static DEFINE_MUTEX(dev_opp_list_lock); 101 102/** 103 * find_device_opp() - find device_opp struct using device pointer 104 * @dev: device pointer used to lookup device OPPs 105 * 106 * Search list of device OPPs for one containing matching device. Does a RCU 107 * reader operation to grab the pointer needed. 108 * 109 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or 110 * -EINVAL based on type of error. 111 * 112 * Locking: This function must be called under rcu_read_lock(). device_opp 113 * is a RCU protected pointer. This means that device_opp is valid as long 114 * as we are under RCU lock. 115 */ 116static struct device_opp *find_device_opp(struct device *dev) 117{ 118 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 119 120 if (unlikely(IS_ERR_OR_NULL(dev))) { 121 pr_err("%s: Invalid parameters\n", __func__); 122 return ERR_PTR(-EINVAL); 123 } 124 125 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { 126 if (tmp_dev_opp->dev == dev) { 127 dev_opp = tmp_dev_opp; 128 break; 129 } 130 } 131 132 return dev_opp; 133} 134 135/** 136 * opp_get_voltage() - Gets the voltage corresponding to an available opp 137 * @opp: opp for which voltage has to be returned for 138 * 139 * Return voltage in micro volt corresponding to the opp, else 140 * return 0 141 * 142 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 143 * protected pointer. This means that opp which could have been fetched by 144 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 145 * under RCU lock. The pointer returned by the opp_find_freq family must be 146 * used in the same section as the usage of this function with the pointer 147 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 148 * pointer. 149 */ 150unsigned long opp_get_voltage(struct opp *opp) 151{ 152 struct opp *tmp_opp; 153 unsigned long v = 0; 154 155 tmp_opp = rcu_dereference(opp); 156 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 157 pr_err("%s: Invalid parameters\n", __func__); 158 else 159 v = tmp_opp->u_volt; 160 161 return v; 162} 163 164/** 165 * opp_get_freq() - Gets the frequency corresponding to an available opp 166 * @opp: opp for which frequency has to be returned for 167 * 168 * Return frequency in hertz corresponding to the opp, else 169 * return 0 170 * 171 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 172 * protected pointer. This means that opp which could have been fetched by 173 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are 174 * under RCU lock. The pointer returned by the opp_find_freq family must be 175 * used in the same section as the usage of this function with the pointer 176 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 177 * pointer. 178 */ 179unsigned long opp_get_freq(struct opp *opp) 180{ 181 struct opp *tmp_opp; 182 unsigned long f = 0; 183 184 tmp_opp = rcu_dereference(opp); 185 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 186 pr_err("%s: Invalid parameters\n", __func__); 187 else 188 f = tmp_opp->rate; 189 190 return f; 191} 192 193/** 194 * opp_get_opp_count() - Get number of opps available in the opp list 195 * @dev: device for which we do this operation 196 * 197 * This function returns the number of available opps if there are any, 198 * else returns 0 if none or the corresponding error value. 199 * 200 * Locking: This function must be called under rcu_read_lock(). This function 201 * internally references two RCU protected structures: device_opp and opp which 202 * are safe as long as we are under a common RCU locked section. 203 */ 204int opp_get_opp_count(struct device *dev) 205{ 206 struct device_opp *dev_opp; 207 struct opp *temp_opp; 208 int count = 0; 209 210 dev_opp = find_device_opp(dev); 211 if (IS_ERR(dev_opp)) { 212 int r = PTR_ERR(dev_opp); 213 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 214 return r; 215 } 216 217 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 218 if (temp_opp->available) 219 count++; 220 } 221 222 return count; 223} 224 225/** 226 * opp_find_freq_exact() - search for an exact frequency 227 * @dev: device for which we do this operation 228 * @freq: frequency to search for 229 * @available: true/false - match for available opp 230 * 231 * Searches for exact match in the opp list and returns pointer to the matching 232 * opp if found, else returns ERR_PTR in case of error and should be handled 233 * using IS_ERR. 234 * 235 * Note: available is a modifier for the search. if available=true, then the 236 * match is for exact matching frequency and is available in the stored OPP 237 * table. if false, the match is for exact frequency which is not available. 238 * 239 * This provides a mechanism to enable an opp which is not available currently 240 * or the opposite as well. 241 * 242 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 243 * protected pointer. The reason for the same is that the opp pointer which is 244 * returned will remain valid for use with opp_get_{voltage, freq} only while 245 * under the locked area. The pointer returned must be used prior to unlocking 246 * with rcu_read_unlock() to maintain the integrity of the pointer. 247 */ 248struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, 249 bool available) 250{ 251 struct device_opp *dev_opp; 252 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); 253 254 dev_opp = find_device_opp(dev); 255 if (IS_ERR(dev_opp)) { 256 int r = PTR_ERR(dev_opp); 257 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 258 return ERR_PTR(r); 259 } 260 261 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 262 if (temp_opp->available == available && 263 temp_opp->rate == freq) { 264 opp = temp_opp; 265 break; 266 } 267 } 268 269 return opp; 270} 271 272/** 273 * opp_find_freq_ceil() - Search for an rounded ceil freq 274 * @dev: device for which we do this operation 275 * @freq: Start frequency 276 * 277 * Search for the matching ceil *available* OPP from a starting freq 278 * for a device. 279 * 280 * Returns matching *opp and refreshes *freq accordingly, else returns 281 * ERR_PTR in case of error and should be handled using IS_ERR. 282 * 283 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 284 * protected pointer. The reason for the same is that the opp pointer which is 285 * returned will remain valid for use with opp_get_{voltage, freq} only while 286 * under the locked area. The pointer returned must be used prior to unlocking 287 * with rcu_read_unlock() to maintain the integrity of the pointer. 288 */ 289struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) 290{ 291 struct device_opp *dev_opp; 292 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); 293 294 if (!dev || !freq) { 295 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 296 return ERR_PTR(-EINVAL); 297 } 298 299 dev_opp = find_device_opp(dev); 300 if (IS_ERR(dev_opp)) 301 return opp; 302 303 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 304 if (temp_opp->available && temp_opp->rate >= *freq) { 305 opp = temp_opp; 306 *freq = opp->rate; 307 break; 308 } 309 } 310 311 return opp; 312} 313 314/** 315 * opp_find_freq_floor() - Search for a rounded floor freq 316 * @dev: device for which we do this operation 317 * @freq: Start frequency 318 * 319 * Search for the matching floor *available* OPP from a starting freq 320 * for a device. 321 * 322 * Returns matching *opp and refreshes *freq accordingly, else returns 323 * ERR_PTR in case of error and should be handled using IS_ERR. 324 * 325 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 326 * protected pointer. The reason for the same is that the opp pointer which is 327 * returned will remain valid for use with opp_get_{voltage, freq} only while 328 * under the locked area. The pointer returned must be used prior to unlocking 329 * with rcu_read_unlock() to maintain the integrity of the pointer. 330 */ 331struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) 332{ 333 struct device_opp *dev_opp; 334 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); 335 336 if (!dev || !freq) { 337 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 338 return ERR_PTR(-EINVAL); 339 } 340 341 dev_opp = find_device_opp(dev); 342 if (IS_ERR(dev_opp)) 343 return opp; 344 345 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 346 if (temp_opp->available) { 347 /* go to the next node, before choosing prev */ 348 if (temp_opp->rate > *freq) 349 break; 350 else 351 opp = temp_opp; 352 } 353 } 354 if (!IS_ERR(opp)) 355 *freq = opp->rate; 356 357 return opp; 358} 359 360/** 361 * opp_add() - Add an OPP table from a table definitions 362 * @dev: device for which we do this operation 363 * @freq: Frequency in Hz for this OPP 364 * @u_volt: Voltage in uVolts for this OPP 365 * 366 * This function adds an opp definition to the opp list and returns status. 367 * The opp is made available by default and it can be controlled using 368 * opp_enable/disable functions. 369 * 370 * Locking: The internal device_opp and opp structures are RCU protected. 371 * Hence this function internally uses RCU updater strategy with mutex locks 372 * to keep the integrity of the internal data structures. Callers should ensure 373 * that this function is *NOT* called under RCU protection or in contexts where 374 * mutex cannot be locked. 375 */ 376int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 377{ 378 struct device_opp *dev_opp = NULL; 379 struct opp *opp, *new_opp; 380 struct list_head *head; 381 382 /* allocate new OPP node */ 383 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); 384 if (!new_opp) { 385 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); 386 return -ENOMEM; 387 } 388 389 /* Hold our list modification lock here */ 390 mutex_lock(&dev_opp_list_lock); 391 392 /* Check for existing list for 'dev' */ 393 dev_opp = find_device_opp(dev); 394 if (IS_ERR(dev_opp)) { 395 /* 396 * Allocate a new device OPP table. In the infrequent case 397 * where a new device is needed to be added, we pay this 398 * penalty. 399 */ 400 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); 401 if (!dev_opp) { 402 mutex_unlock(&dev_opp_list_lock); 403 kfree(new_opp); 404 dev_warn(dev, 405 "%s: Unable to create device OPP structure\n", 406 __func__); 407 return -ENOMEM; 408 } 409 410 dev_opp->dev = dev; 411 srcu_init_notifier_head(&dev_opp->head); 412 INIT_LIST_HEAD(&dev_opp->opp_list); 413 414 /* Secure the device list modification */ 415 list_add_rcu(&dev_opp->node, &dev_opp_list); 416 } 417 418 /* populate the opp table */ 419 new_opp->dev_opp = dev_opp; 420 new_opp->rate = freq; 421 new_opp->u_volt = u_volt; 422 new_opp->available = true; 423 424 /* Insert new OPP in order of increasing frequency */ 425 head = &dev_opp->opp_list; 426 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { 427 if (new_opp->rate < opp->rate) 428 break; 429 else 430 head = &opp->node; 431 } 432 433 list_add_rcu(&new_opp->node, head); 434 mutex_unlock(&dev_opp_list_lock); 435 436 /* 437 * Notify the changes in the availability of the operable 438 * frequency/voltage list. 439 */ 440 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); 441 return 0; 442} 443 444/** 445 * opp_set_availability() - helper to set the availability of an opp 446 * @dev: device for which we do this operation 447 * @freq: OPP frequency to modify availability 448 * @availability_req: availability status requested for this opp 449 * 450 * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 451 * share a common logic which is isolated here. 452 * 453 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the 454 * copy operation, returns 0 if no modifcation was done OR modification was 455 * successful. 456 * 457 * Locking: The internal device_opp and opp structures are RCU protected. 458 * Hence this function internally uses RCU updater strategy with mutex locks to 459 * keep the integrity of the internal data structures. Callers should ensure 460 * that this function is *NOT* called under RCU protection or in contexts where 461 * mutex locking or synchronize_rcu() blocking calls cannot be used. 462 */ 463static int opp_set_availability(struct device *dev, unsigned long freq, 464 bool availability_req) 465{ 466 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 467 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 468 int r = 0; 469 470 /* keep the node allocated */ 471 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); 472 if (!new_opp) { 473 dev_warn(dev, "%s: Unable to create OPP\n", __func__); 474 return -ENOMEM; 475 } 476 477 mutex_lock(&dev_opp_list_lock); 478 479 /* Find the device_opp */ 480 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { 481 if (dev == tmp_dev_opp->dev) { 482 dev_opp = tmp_dev_opp; 483 break; 484 } 485 } 486 if (IS_ERR(dev_opp)) { 487 r = PTR_ERR(dev_opp); 488 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 489 goto unlock; 490 } 491 492 /* Do we have the frequency? */ 493 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { 494 if (tmp_opp->rate == freq) { 495 opp = tmp_opp; 496 break; 497 } 498 } 499 if (IS_ERR(opp)) { 500 r = PTR_ERR(opp); 501 goto unlock; 502 } 503 504 /* Is update really needed? */ 505 if (opp->available == availability_req) 506 goto unlock; 507 /* copy the old data over */ 508 *new_opp = *opp; 509 510 /* plug in new node */ 511 new_opp->available = availability_req; 512 513 list_replace_rcu(&opp->node, &new_opp->node); 514 mutex_unlock(&dev_opp_list_lock); 515 synchronize_rcu(); 516 517 /* Notify the change of the OPP availability */ 518 if (availability_req) 519 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, 520 new_opp); 521 else 522 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, 523 new_opp); 524 525 /* clean up old opp */ 526 new_opp = opp; 527 goto out; 528 529unlock: 530 mutex_unlock(&dev_opp_list_lock); 531out: 532 kfree(new_opp); 533 return r; 534} 535 536/** 537 * opp_enable() - Enable a specific OPP 538 * @dev: device for which we do this operation 539 * @freq: OPP frequency to enable 540 * 541 * Enables a provided opp. If the operation is valid, this returns 0, else the 542 * corresponding error value. It is meant to be used for users an OPP available 543 * after being temporarily made unavailable with opp_disable. 544 * 545 * Locking: The internal device_opp and opp structures are RCU protected. 546 * Hence this function indirectly uses RCU and mutex locks to keep the 547 * integrity of the internal data structures. Callers should ensure that 548 * this function is *NOT* called under RCU protection or in contexts where 549 * mutex locking or synchronize_rcu() blocking calls cannot be used. 550 */ 551int opp_enable(struct device *dev, unsigned long freq) 552{ 553 return opp_set_availability(dev, freq, true); 554} 555 556/** 557 * opp_disable() - Disable a specific OPP 558 * @dev: device for which we do this operation 559 * @freq: OPP frequency to disable 560 * 561 * Disables a provided opp. If the operation is valid, this returns 562 * 0, else the corresponding error value. It is meant to be a temporary 563 * control by users to make this OPP not available until the circumstances are 564 * right to make it available again (with a call to opp_enable). 565 * 566 * Locking: The internal device_opp and opp structures are RCU protected. 567 * Hence this function indirectly uses RCU and mutex locks to keep the 568 * integrity of the internal data structures. Callers should ensure that 569 * this function is *NOT* called under RCU protection or in contexts where 570 * mutex locking or synchronize_rcu() blocking calls cannot be used. 571 */ 572int opp_disable(struct device *dev, unsigned long freq) 573{ 574 return opp_set_availability(dev, freq, false); 575} 576 577#ifdef CONFIG_CPU_FREQ 578/** 579 * opp_init_cpufreq_table() - create a cpufreq table for a device 580 * @dev: device for which we do this operation 581 * @table: Cpufreq table returned back to caller 582 * 583 * Generate a cpufreq table for a provided device- this assumes that the 584 * opp list is already initialized and ready for usage. 585 * 586 * This function allocates required memory for the cpufreq table. It is 587 * expected that the caller does the required maintenance such as freeing 588 * the table as required. 589 * 590 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM 591 * if no memory available for the operation (table is not populated), returns 0 592 * if successful and table is populated. 593 * 594 * WARNING: It is important for the callers to ensure refreshing their copy of 595 * the table if any of the mentioned functions have been invoked in the interim. 596 * 597 * Locking: The internal device_opp and opp structures are RCU protected. 598 * To simplify the logic, we pretend we are updater and hold relevant mutex here 599 * Callers should ensure that this function is *NOT* called under RCU protection 600 * or in contexts where mutex locking cannot be used. 601 */ 602int opp_init_cpufreq_table(struct device *dev, 603 struct cpufreq_frequency_table **table) 604{ 605 struct device_opp *dev_opp; 606 struct opp *opp; 607 struct cpufreq_frequency_table *freq_table; 608 int i = 0; 609 610 /* Pretend as if I am an updater */ 611 mutex_lock(&dev_opp_list_lock); 612 613 dev_opp = find_device_opp(dev); 614 if (IS_ERR(dev_opp)) { 615 int r = PTR_ERR(dev_opp); 616 mutex_unlock(&dev_opp_list_lock); 617 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); 618 return r; 619 } 620 621 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * 622 (opp_get_opp_count(dev) + 1), GFP_KERNEL); 623 if (!freq_table) { 624 mutex_unlock(&dev_opp_list_lock); 625 dev_warn(dev, "%s: Unable to allocate frequency table\n", 626 __func__); 627 return -ENOMEM; 628 } 629 630 list_for_each_entry(opp, &dev_opp->opp_list, node) { 631 if (opp->available) { 632 freq_table[i].index = i; 633 freq_table[i].frequency = opp->rate / 1000; 634 i++; 635 } 636 } 637 mutex_unlock(&dev_opp_list_lock); 638 639 freq_table[i].index = i; 640 freq_table[i].frequency = CPUFREQ_TABLE_END; 641 642 *table = &freq_table[0]; 643 644 return 0; 645} 646 647/** 648 * opp_free_cpufreq_table() - free the cpufreq table 649 * @dev: device for which we do this operation 650 * @table: table to free 651 * 652 * Free up the table allocated by opp_init_cpufreq_table 653 */ 654void opp_free_cpufreq_table(struct device *dev, 655 struct cpufreq_frequency_table **table) 656{ 657 if (!table) 658 return; 659 660 kfree(*table); 661 *table = NULL; 662} 663#endif /* CONFIG_CPU_FREQ */ 664 665/** 666 * opp_get_notifier() - find notifier_head of the device with opp 667 * @dev: device pointer used to lookup device OPPs. 668 */ 669struct srcu_notifier_head *opp_get_notifier(struct device *dev) 670{ 671 struct device_opp *dev_opp = find_device_opp(dev); 672 673 if (IS_ERR(dev_opp)) 674 return ERR_CAST(dev_opp); /* matching type */ 675 676 return &dev_opp->head; 677} 678 679#ifdef CONFIG_OF 680/** 681 * of_init_opp_table() - Initialize opp table from device tree 682 * @dev: device pointer used to lookup device OPPs. 683 * 684 * Register the initial OPP table with the OPP library for given device. 685 */ 686int of_init_opp_table(struct device *dev) 687{ 688 const struct property *prop; 689 const __be32 *val; 690 int nr; 691 692 prop = of_find_property(dev->of_node, "operating-points", NULL); 693 if (!prop) 694 return -ENODEV; 695 if (!prop->value) 696 return -ENODATA; 697 698 /* 699 * Each OPP is a set of tuples consisting of frequency and 700 * voltage like <freq-kHz vol-uV>. 701 */ 702 nr = prop->length / sizeof(u32); 703 if (nr % 2) { 704 dev_err(dev, "%s: Invalid OPP list\n", __func__); 705 return -EINVAL; 706 } 707 708 val = prop->value; 709 while (nr) { 710 unsigned long freq = be32_to_cpup(val++) * 1000; 711 unsigned long volt = be32_to_cpup(val++); 712 713 if (opp_add(dev, freq, volt)) { 714 dev_warn(dev, "%s: Failed to add OPP %ld\n", 715 __func__, freq); 716 continue; 717 } 718 nr -= 2; 719 } 720 721 return 0; 722} 723#endif