Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

PM / OPP: Move away from RCU locking

The RCU locking isn't well suited for the OPP core. The RCU locking fits
better for reader heavy stuff, while the OPP core have at max one or two
readers only at a time.

Over that, it was getting very confusing the way RCU locking was used
with the OPP core. The individual OPPs are mostly well handled, i.e. for
an update a new structure was created and then that replaced the older
one. But the OPP tables were updated directly all the time from various
parts of the core. Though they were mostly used from within RCU locked
region, they didn't had much to do with RCU and were governed by the
mutex instead.

And that mixed with the 'opp_table_lock' has made the core even more
confusing.

Now that we are already managing the OPPs and the OPP tables with kernel
reference infrastructure, we can get rid of RCU locking completely and
simplify the code a lot.

Remove all RCU references from code and comments.

Acquire opp_table->lock while parsing the list of OPPs though.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Viresh Kumar and committed by
Rafael J. Wysocki
052c6f19 5b650b38

+80 -294
+75 -219
drivers/base/power/opp/core.c
··· 32 32 /* Lock to allow exclusive modification to the device and opp lists */ 33 33 DEFINE_MUTEX(opp_table_lock); 34 34 35 - #define opp_rcu_lockdep_assert() \ 36 - do { \ 37 - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 38 - !lockdep_is_held(&opp_table_lock), \ 39 - "Missing rcu_read_lock() or " \ 40 - "opp_table_lock protection"); \ 41 - } while (0) 42 - 43 35 static void dev_pm_opp_get(struct dev_pm_opp *opp); 44 36 45 37 static struct opp_device *_find_opp_dev(const struct device *dev, ··· 65 73 * _find_opp_table() - find opp_table struct using device pointer 66 74 * @dev: device pointer used to lookup OPP table 67 75 * 68 - * Search OPP table for one containing matching device. Does a RCU reader 69 - * operation to grab the pointer needed. 76 + * Search OPP table for one containing matching device. 70 77 * 71 78 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or 72 79 * -EINVAL based on type of error. ··· 99 108 */ 100 109 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 101 110 { 102 - struct dev_pm_opp *tmp_opp; 103 - unsigned long v = 0; 104 - 105 - rcu_read_lock(); 106 - 107 - tmp_opp = rcu_dereference(opp); 108 - if (IS_ERR_OR_NULL(tmp_opp)) 111 + if (IS_ERR_OR_NULL(opp)) { 109 112 pr_err("%s: Invalid parameters\n", __func__); 110 - else 111 - v = tmp_opp->supplies[0].u_volt; 113 + return 0; 114 + } 112 115 113 - rcu_read_unlock(); 114 - return v; 116 + return opp->supplies[0].u_volt; 115 117 } 116 118 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 117 119 ··· 117 133 */ 118 134 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) 119 135 { 120 - struct dev_pm_opp *tmp_opp; 121 - unsigned long f = 0; 122 - 123 - rcu_read_lock(); 124 - 125 - tmp_opp = rcu_dereference(opp); 126 - if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) 136 + if (IS_ERR_OR_NULL(opp) || !opp->available) { 127 137 pr_err("%s: Invalid parameters\n", __func__); 128 - else 129 - f = tmp_opp->rate; 138 + return 0; 139 + } 130 140 131 - rcu_read_unlock(); 132 - return f; 141 + return opp->rate; 133 142 } 134 143 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 135 144 ··· 138 161 */ 139 162 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 140 163 { 141 - struct dev_pm_opp *tmp_opp; 142 - bool turbo; 143 - 144 - rcu_read_lock(); 145 - 146 - tmp_opp = rcu_dereference(opp); 147 - if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { 164 + if (IS_ERR_OR_NULL(opp) || !opp->available) { 148 165 pr_err("%s: Invalid parameters\n", __func__); 149 166 return false; 150 167 } 151 168 152 - turbo = tmp_opp->turbo; 153 - 154 - rcu_read_unlock(); 155 - return turbo; 169 + return opp->turbo; 156 170 } 157 171 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 158 172 ··· 191 223 * @dev: device for which we do this operation 192 224 * 193 225 * Return: This function returns the max voltage latency in nanoseconds. 194 - * 195 - * Locking: This function takes rcu_read_lock(). 196 226 */ 197 227 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) 198 228 { ··· 222 256 if (IS_ERR(opp_table)) 223 257 goto free_uV; 224 258 225 - rcu_read_lock(); 226 - 227 259 memcpy(regulators, opp_table->regulators, count * sizeof(*regulators)); 260 + 261 + mutex_lock(&opp_table->lock); 228 262 229 263 for (i = 0; i < count; i++) { 230 264 uV[i].min = ~0; 231 265 uV[i].max = 0; 232 266 233 - list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { 267 + list_for_each_entry(opp, &opp_table->opp_list, node) { 234 268 if (!opp->available) 235 269 continue; 236 270 ··· 241 275 } 242 276 } 243 277 244 - rcu_read_unlock(); 278 + mutex_unlock(&opp_table->lock); 245 279 dev_pm_opp_put_opp_table(opp_table); 246 280 247 281 /* ··· 270 304 * 271 305 * Return: This function returns the max transition latency, in nanoseconds, to 272 306 * switch from one OPP to other. 273 - * 274 - * Locking: This function takes rcu_read_lock(). 275 307 */ 276 308 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) 277 309 { ··· 309 345 * 310 346 * Return: This function returns the number of available opps if there are any, 311 347 * else returns 0 if none or the corresponding error value. 312 - * 313 - * Locking: This function takes rcu_read_lock(). 314 348 */ 315 349 int dev_pm_opp_get_opp_count(struct device *dev) 316 350 { ··· 324 362 return count; 325 363 } 326 364 327 - rcu_read_lock(); 365 + mutex_lock(&opp_table->lock); 328 366 329 - list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { 367 + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 330 368 if (temp_opp->available) 331 369 count++; 332 370 } 333 371 334 - rcu_read_unlock(); 372 + mutex_unlock(&opp_table->lock); 335 373 dev_pm_opp_put_opp_table(opp_table); 336 374 337 375 return count; ··· 376 414 return ERR_PTR(r); 377 415 } 378 416 379 - rcu_read_lock(); 417 + mutex_lock(&opp_table->lock); 380 418 381 - list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { 419 + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 382 420 if (temp_opp->available == available && 383 421 temp_opp->rate == freq) { 384 422 opp = temp_opp; ··· 389 427 } 390 428 } 391 429 392 - rcu_read_unlock(); 430 + mutex_unlock(&opp_table->lock); 393 431 dev_pm_opp_put_opp_table(opp_table); 394 432 395 433 return opp; ··· 401 439 { 402 440 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 403 441 404 - list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { 442 + mutex_lock(&opp_table->lock); 443 + 444 + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 405 445 if (temp_opp->available && temp_opp->rate >= *freq) { 406 446 opp = temp_opp; 407 447 *freq = opp->rate; ··· 413 449 break; 414 450 } 415 451 } 452 + 453 + mutex_unlock(&opp_table->lock); 416 454 417 455 return opp; 418 456 } ··· 452 486 if (IS_ERR(opp_table)) 453 487 return ERR_CAST(opp_table); 454 488 455 - rcu_read_lock(); 456 - 457 489 opp = _find_freq_ceil(opp_table, freq); 458 490 459 - rcu_read_unlock(); 460 491 dev_pm_opp_put_opp_table(opp_table); 461 492 462 493 return opp; ··· 493 530 if (IS_ERR(opp_table)) 494 531 return ERR_CAST(opp_table); 495 532 496 - rcu_read_lock(); 533 + mutex_lock(&opp_table->lock); 497 534 498 - list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { 535 + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 499 536 if (temp_opp->available) { 500 537 /* go to the next node, before choosing prev */ 501 538 if (temp_opp->rate > *freq) ··· 508 545 /* Increment the reference count of OPP */ 509 546 if (!IS_ERR(opp)) 510 547 dev_pm_opp_get(opp); 511 - rcu_read_unlock(); 548 + mutex_unlock(&opp_table->lock); 512 549 dev_pm_opp_put_opp_table(opp_table); 513 550 514 551 if (!IS_ERR(opp)) ··· 517 554 return opp; 518 555 } 519 556 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 520 - 521 - /* 522 - * The caller needs to ensure that opp_table (and hence the clk) isn't freed, 523 - * while clk returned here is used. 524 - */ 525 - static struct clk *_get_opp_clk(struct device *dev) 526 - { 527 - struct opp_table *opp_table; 528 - struct clk *clk; 529 - 530 - opp_table = _find_opp_table(dev); 531 - if (IS_ERR(opp_table)) { 532 - dev_err(dev, "%s: device opp doesn't exist\n", __func__); 533 - return ERR_CAST(opp_table); 534 - } 535 - 536 - clk = opp_table->clk; 537 - if (IS_ERR(clk)) 538 - dev_err(dev, "%s: No clock available for the device\n", 539 - __func__); 540 - dev_pm_opp_put_opp_table(opp_table); 541 - 542 - return clk; 543 - } 544 557 545 558 static int _set_opp_voltage(struct device *dev, struct regulator *reg, 546 559 struct dev_pm_opp_supply *supply) ··· 613 674 * 614 675 * This configures the power-supplies and clock source to the levels specified 615 676 * by the OPP corresponding to the target_freq. 616 - * 617 - * Locking: This function takes rcu_read_lock(). 618 677 */ 619 678 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 620 679 { ··· 631 694 return -EINVAL; 632 695 } 633 696 634 - clk = _get_opp_clk(dev); 635 - if (IS_ERR(clk)) 636 - return PTR_ERR(clk); 697 + opp_table = _find_opp_table(dev); 698 + if (IS_ERR(opp_table)) { 699 + dev_err(dev, "%s: device opp doesn't exist\n", __func__); 700 + return PTR_ERR(opp_table); 701 + } 702 + 703 + clk = opp_table->clk; 704 + if (IS_ERR(clk)) { 705 + dev_err(dev, "%s: No clock available for the device\n", 706 + __func__); 707 + ret = PTR_ERR(clk); 708 + goto put_opp_table; 709 + } 637 710 638 711 freq = clk_round_rate(clk, target_freq); 639 712 if ((long)freq <= 0) ··· 655 708 if (old_freq == freq) { 656 709 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", 657 710 __func__, freq); 658 - return 0; 711 + ret = 0; 712 + goto put_opp_table; 659 713 } 660 - 661 - opp_table = _find_opp_table(dev); 662 - if (IS_ERR(opp_table)) { 663 - dev_err(dev, "%s: device opp doesn't exist\n", __func__); 664 - return PTR_ERR(opp_table); 665 - } 666 - 667 - rcu_read_lock(); 668 714 669 715 old_opp = _find_freq_ceil(opp_table, &old_freq); 670 716 if (IS_ERR(old_opp)) { ··· 670 730 ret = PTR_ERR(opp); 671 731 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", 672 732 __func__, freq, ret); 673 - if (!IS_ERR(old_opp)) 674 - dev_pm_opp_put(old_opp); 675 - rcu_read_unlock(); 676 - dev_pm_opp_put_opp_table(opp_table); 677 - return ret; 733 + goto put_old_opp; 678 734 } 679 735 680 736 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, ··· 680 744 681 745 /* Only frequency scaling */ 682 746 if (!regulators) { 683 - dev_pm_opp_put(opp); 684 - if (!IS_ERR(old_opp)) 685 - dev_pm_opp_put(old_opp); 686 - rcu_read_unlock(); 687 - dev_pm_opp_put_opp_table(opp_table); 688 - return _generic_set_opp_clk_only(dev, clk, old_freq, freq); 747 + ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); 748 + goto put_opps; 689 749 } 690 750 691 751 if (opp_table->set_opp) ··· 705 773 data->new_opp.rate = freq; 706 774 memcpy(data->new_opp.supplies, opp->supplies, size); 707 775 776 + ret = set_opp(data); 777 + 778 + put_opps: 708 779 dev_pm_opp_put(opp); 780 + put_old_opp: 709 781 if (!IS_ERR(old_opp)) 710 782 dev_pm_opp_put(old_opp); 711 - rcu_read_unlock(); 783 + put_opp_table: 712 784 dev_pm_opp_put_opp_table(opp_table); 713 - 714 - return set_opp(data); 785 + return ret; 715 786 } 716 787 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); 717 788 718 789 /* OPP-dev Helpers */ 719 - static void _kfree_opp_dev_rcu(struct rcu_head *head) 720 - { 721 - struct opp_device *opp_dev; 722 - 723 - opp_dev = container_of(head, struct opp_device, rcu_head); 724 - kfree_rcu(opp_dev, rcu_head); 725 - } 726 - 727 790 static void _remove_opp_dev(struct opp_device *opp_dev, 728 791 struct opp_table *opp_table) 729 792 { 730 793 opp_debug_unregister(opp_dev, opp_table); 731 794 list_del(&opp_dev->node); 732 - call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head, 733 - _kfree_opp_dev_rcu); 795 + kfree(opp_dev); 734 796 } 735 797 736 798 struct opp_device *_add_opp_dev(const struct device *dev, ··· 739 813 740 814 /* Initialize opp-dev */ 741 815 opp_dev->dev = dev; 742 - list_add_rcu(&opp_dev->node, &opp_table->dev_list); 816 + list_add(&opp_dev->node, &opp_table->dev_list); 743 817 744 818 /* Create debugfs entries for the opp_table */ 745 819 ret = opp_debug_register(opp_dev, opp_table); ··· 783 857 ret); 784 858 } 785 859 786 - srcu_init_notifier_head(&opp_table->srcu_head); 860 + BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); 787 861 INIT_LIST_HEAD(&opp_table->opp_list); 788 862 mutex_init(&opp_table->lock); 789 863 kref_init(&opp_table->kref); 790 864 791 865 /* Secure the device table modification */ 792 - list_add_rcu(&opp_table->node, &opp_tables); 866 + list_add(&opp_table->node, &opp_tables); 793 867 return opp_table; 794 - } 795 - 796 - /** 797 - * _kfree_device_rcu() - Free opp_table RCU handler 798 - * @head: RCU head 799 - */ 800 - static void _kfree_device_rcu(struct rcu_head *head) 801 - { 802 - struct opp_table *opp_table = container_of(head, struct opp_table, 803 - rcu_head); 804 - 805 - kfree_rcu(opp_table, rcu_head); 806 868 } 807 869 808 870 void _get_opp_table_kref(struct opp_table *opp_table) ··· 836 922 WARN_ON(!list_empty(&opp_table->dev_list)); 837 923 838 924 mutex_destroy(&opp_table->lock); 839 - list_del_rcu(&opp_table->node); 840 - call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head, 841 - _kfree_device_rcu); 925 + list_del(&opp_table->node); 926 + kfree(opp_table); 842 927 843 928 mutex_unlock(&opp_table_lock); 844 929 } ··· 854 941 kfree(opp); 855 942 } 856 943 857 - /** 858 - * _kfree_opp_rcu() - Free OPP RCU handler 859 - * @head: RCU head 860 - */ 861 - static void _kfree_opp_rcu(struct rcu_head *head) 862 - { 863 - struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); 864 - 865 - kfree_rcu(opp, rcu_head); 866 - } 867 - 868 944 static void _opp_kref_release(struct kref *kref) 869 945 { 870 946 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); ··· 863 961 * Notify the changes in the availability of the operable 864 962 * frequency/voltage list. 865 963 */ 866 - srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_REMOVE, opp); 964 + blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); 867 965 opp_debug_remove_one(opp); 868 - list_del_rcu(&opp->node); 869 - call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); 966 + list_del(&opp->node); 967 + kfree(opp); 870 968 871 969 mutex_unlock(&opp_table->lock); 872 970 dev_pm_opp_put_opp_table(opp_table); ··· 889 987 * @freq: OPP to remove with matching 'freq' 890 988 * 891 989 * This function removes an opp from the opp table. 892 - * 893 - * Locking: The internal opp_table and opp structures are RCU protected. 894 - * Hence this function internally uses RCU updater strategy with mutex locks 895 - * to keep the integrity of the internal data structures. Callers should ensure 896 - * that this function is *NOT* called under RCU protection or in contexts where 897 - * mutex cannot be locked. 898 990 */ 899 991 void dev_pm_opp_remove(struct device *dev, unsigned long freq) 900 992 { ··· 993 1097 mutex_lock(&opp_table->lock); 994 1098 head = &opp_table->opp_list; 995 1099 996 - list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { 1100 + list_for_each_entry(opp, &opp_table->opp_list, node) { 997 1101 if (new_opp->rate > opp->rate) { 998 1102 head = &opp->node; 999 1103 continue; ··· 1016 1120 return ret; 1017 1121 } 1018 1122 1019 - list_add_rcu(&new_opp->node, head); 1123 + list_add(&new_opp->node, head); 1020 1124 mutex_unlock(&opp_table->lock); 1021 1125 1022 1126 new_opp->opp_table = opp_table; ··· 1053 1157 * 1054 1158 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 1055 1159 * and freed by dev_pm_opp_of_remove_table. 1056 - * 1057 - * Locking: The internal opp_table and opp structures are RCU protected. 1058 - * Hence this function internally uses RCU updater strategy with mutex locks 1059 - * to keep the integrity of the internal data structures. Callers should ensure 1060 - * that this function is *NOT* called under RCU protection or in contexts where 1061 - * mutex cannot be locked. 1062 1160 * 1063 1161 * Return: 1064 1162 * 0 On success OR ··· 1093 1203 * Notify the changes in the availability of the operable 1094 1204 * frequency/voltage list. 1095 1205 */ 1096 - srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); 1206 + blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 1097 1207 return 0; 1098 1208 1099 1209 free_opp: ··· 1470 1580 * The opp is made available by default and it can be controlled using 1471 1581 * dev_pm_opp_enable/disable functions. 1472 1582 * 1473 - * Locking: The internal opp_table and opp structures are RCU protected. 1474 - * Hence this function internally uses RCU updater strategy with mutex locks 1475 - * to keep the integrity of the internal data structures. Callers should ensure 1476 - * that this function is *NOT* called under RCU protection or in contexts where 1477 - * mutex cannot be locked. 1478 - * 1479 1583 * Return: 1480 1584 * 0 On success OR 1481 1585 * Duplicate OPPs (both freq and volt are same) and opp->available ··· 1499 1615 * @freq: OPP frequency to modify availability 1500 1616 * @availability_req: availability status requested for this opp 1501 1617 * 1502 - * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 1503 - * share a common logic which is isolated here. 1618 + * Set the availability of an OPP, opp_{enable,disable} share a common logic 1619 + * which is isolated here. 1504 1620 * 1505 1621 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1506 1622 * copy operation, returns 0 if no modification was done OR modification was 1507 1623 * successful. 1508 - * 1509 - * Locking: The internal opp_table and opp structures are RCU protected. 1510 - * Hence this function internally uses RCU updater strategy with mutex locks to 1511 - * keep the integrity of the internal data structures. Callers should ensure 1512 - * that this function is *NOT* called under RCU protection or in contexts where 1513 - * mutex locking or synchronize_rcu() blocking calls cannot be used. 1514 1624 */ 1515 1625 static int _opp_set_availability(struct device *dev, unsigned long freq, 1516 1626 bool availability_req) ··· 1550 1672 /* plug in new node */ 1551 1673 new_opp->available = availability_req; 1552 1674 1553 - list_replace_rcu(&opp->node, &new_opp->node); 1554 - call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); 1675 + list_replace(&opp->node, &new_opp->node); 1676 + kfree(opp); 1555 1677 1556 1678 /* Notify the change of the OPP availability */ 1557 1679 if (availability_req) 1558 - srcu_notifier_call_chain(&opp_table->srcu_head, 1559 - OPP_EVENT_ENABLE, new_opp); 1680 + blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 1681 + new_opp); 1560 1682 else 1561 - srcu_notifier_call_chain(&opp_table->srcu_head, 1562 - OPP_EVENT_DISABLE, new_opp); 1683 + blocking_notifier_call_chain(&opp_table->head, 1684 + OPP_EVENT_DISABLE, new_opp); 1563 1685 1564 1686 mutex_unlock(&opp_table->lock); 1565 1687 dev_pm_opp_put_opp_table(opp_table); ··· 1582 1704 * corresponding error value. It is meant to be used for users an OPP available 1583 1705 * after being temporarily made unavailable with dev_pm_opp_disable. 1584 1706 * 1585 - * Locking: The internal opp_table and opp structures are RCU protected. 1586 - * Hence this function indirectly uses RCU and mutex locks to keep the 1587 - * integrity of the internal data structures. Callers should ensure that 1588 - * this function is *NOT* called under RCU protection or in contexts where 1589 - * mutex locking or synchronize_rcu() blocking calls cannot be used. 1590 - * 1591 1707 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1592 1708 * copy operation, returns 0 if no modification was done OR modification was 1593 1709 * successful. ··· 1601 1729 * 0, else the corresponding error value. It is meant to be a temporary 1602 1730 * control by users to make this OPP not available until the circumstances are 1603 1731 * right to make it available again (with a call to dev_pm_opp_enable). 1604 - * 1605 - * Locking: The internal opp_table and opp structures are RCU protected. 1606 - * Hence this function indirectly uses RCU and mutex locks to keep the 1607 - * integrity of the internal data structures. Callers should ensure that 1608 - * this function is *NOT* called under RCU protection or in contexts where 1609 - * mutex locking or synchronize_rcu() blocking calls cannot be used. 1610 1732 * 1611 1733 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1612 1734 * copy operation, returns 0 if no modification was done OR modification was ··· 1628 1762 if (IS_ERR(opp_table)) 1629 1763 return PTR_ERR(opp_table); 1630 1764 1631 - rcu_read_lock(); 1765 + ret = blocking_notifier_chain_register(&opp_table->head, nb); 1632 1766 1633 - ret = srcu_notifier_chain_register(&opp_table->srcu_head, nb); 1634 - 1635 - rcu_read_unlock(); 1636 1767 dev_pm_opp_put_opp_table(opp_table); 1637 1768 1638 1769 return ret; ··· 1653 1790 if (IS_ERR(opp_table)) 1654 1791 return PTR_ERR(opp_table); 1655 1792 1656 - ret = srcu_notifier_chain_unregister(&opp_table->srcu_head, nb); 1793 + ret = blocking_notifier_chain_unregister(&opp_table->head, nb); 1657 1794 1658 - rcu_read_unlock(); 1659 1795 dev_pm_opp_put_opp_table(opp_table); 1660 1796 1661 1797 return ret; ··· 1710 1848 * 1711 1849 * Free both OPPs created using static entries present in DT and the 1712 1850 * dynamically added entries. 1713 - * 1714 - * Locking: The internal opp_table and opp structures are RCU protected. 1715 - * Hence this function indirectly uses RCU updater strategy with mutex locks 1716 - * to keep the integrity of the internal data structures. Callers should ensure 1717 - * that this function is *NOT* called under RCU protection or in contexts where 1718 - * mutex cannot be locked. 1719 1851 */ 1720 1852 void dev_pm_opp_remove_table(struct device *dev) 1721 1853 {
-18
drivers/base/power/opp/cpu.c
··· 137 137 * This removes the OPP tables for CPUs present in the @cpumask. 138 138 * This should be used to remove all the OPPs entries associated with 139 139 * the cpus in @cpumask. 140 - * 141 - * Locking: The internal opp_table and opp structures are RCU protected. 142 - * Hence this function internally uses RCU updater strategy with mutex locks 143 - * to keep the integrity of the internal data structures. Callers should ensure 144 - * that this function is *NOT* called under RCU protection or in contexts where 145 - * mutex cannot be locked. 146 140 */ 147 141 void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) 148 142 { ··· 153 159 * @cpumask. 154 160 * 155 161 * Returns -ENODEV if OPP table isn't already present. 156 - * 157 - * Locking: The internal opp_table and opp structures are RCU protected. 158 - * Hence this function internally uses RCU updater strategy with mutex locks 159 - * to keep the integrity of the internal data structures. Callers should ensure 160 - * that this function is *NOT* called under RCU protection or in contexts where 161 - * mutex cannot be locked. 162 162 */ 163 163 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, 164 164 const struct cpumask *cpumask) ··· 203 215 * 204 216 * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP 205 217 * table's status is access-unknown. 206 - * 207 - * Locking: The internal opp_table and opp structures are RCU protected. 208 - * Hence this function internally uses RCU updater strategy with mutex locks 209 - * to keep the integrity of the internal data structures. Callers should ensure 210 - * that this function is *NOT* called under RCU protection or in contexts where 211 - * mutex cannot be locked. 212 218 */ 213 219 int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) 214 220 {
+2 -38
drivers/base/power/opp/of.c
··· 28 28 29 29 mutex_lock(&opp_table_lock); 30 30 31 - list_for_each_entry_rcu(opp_table, &opp_tables, node) { 31 + list_for_each_entry(opp_table, &opp_tables, node) { 32 32 if (opp_table->np == np) { 33 33 /* 34 34 * Multiple devices can point to the same OPP table and ··· 235 235 * @dev: device pointer used to lookup OPP table. 236 236 * 237 237 * Free OPPs created using static entries present in DT. 238 - * 239 - * Locking: The internal opp_table and opp structures are RCU protected. 240 - * Hence this function indirectly uses RCU updater strategy with mutex locks 241 - * to keep the integrity of the internal data structures. Callers should ensure 242 - * that this function is *NOT* called under RCU protection or in contexts where 243 - * mutex cannot be locked. 244 238 */ 245 239 void dev_pm_opp_of_remove_table(struct device *dev) 246 240 { ··· 262 268 * This function adds an opp definition to the opp table and returns status. The 263 269 * opp can be controlled using dev_pm_opp_enable/disable functions and may be 264 270 * removed by dev_pm_opp_remove. 265 - * 266 - * Locking: The internal opp_table and opp structures are RCU protected. 267 - * Hence this function internally uses RCU updater strategy with mutex locks 268 - * to keep the integrity of the internal data structures. Callers should ensure 269 - * that this function is *NOT* called under RCU protection or in contexts where 270 - * mutex cannot be locked. 271 271 * 272 272 * Return: 273 273 * 0 On success OR ··· 346 358 * Notify the changes in the availability of the operable 347 359 * frequency/voltage list. 348 360 */ 349 - srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); 361 + blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 350 362 return 0; 351 363 352 364 free_opp: ··· 458 470 * 459 471 * Register the initial OPP table with the OPP library for given device. 460 472 * 461 - * Locking: The internal opp_table and opp structures are RCU protected. 462 - * Hence this function indirectly uses RCU updater strategy with mutex locks 463 - * to keep the integrity of the internal data structures. Callers should ensure 464 - * that this function is *NOT* called under RCU protection or in contexts where 465 - * mutex cannot be locked. 466 - * 467 473 * Return: 468 474 * 0 On success OR 469 475 * Duplicate OPPs (both freq and volt are same) and opp->available ··· 502 520 * 503 521 * This removes the OPP tables for CPUs present in the @cpumask. 504 522 * This should be used only to remove static entries created from DT. 505 - * 506 - * Locking: The internal opp_table and opp structures are RCU protected. 507 - * Hence this function internally uses RCU updater strategy with mutex locks 508 - * to keep the integrity of the internal data structures. Callers should ensure 509 - * that this function is *NOT* called under RCU protection or in contexts where 510 - * mutex cannot be locked. 511 523 */ 512 524 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) 513 525 { ··· 514 538 * @cpumask: cpumask for which OPP table needs to be added. 515 539 * 516 540 * This adds the OPP tables for CPUs present in the @cpumask. 517 - * 518 - * Locking: The internal opp_table and opp structures are RCU protected. 519 - * Hence this function internally uses RCU updater strategy with mutex locks 520 - * to keep the integrity of the internal data structures. Callers should ensure 521 - * that this function is *NOT* called under RCU protection or in contexts where 522 - * mutex cannot be locked. 523 541 */ 524 542 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) 525 543 { ··· 561 591 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 562 592 * 563 593 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. 564 - * 565 - * Locking: The internal opp_table and opp structures are RCU protected. 566 - * Hence this function internally uses RCU updater strategy with mutex locks 567 - * to keep the integrity of the internal data structures. Callers should ensure 568 - * that this function is *NOT* called under RCU protection or in contexts where 569 - * mutex cannot be locked. 570 594 */ 571 595 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, 572 596 struct cpumask *cpumask)
+3 -19
drivers/base/power/opp/opp.h
··· 20 20 #include <linux/list.h> 21 21 #include <linux/limits.h> 22 22 #include <linux/pm_opp.h> 23 - #include <linux/rculist.h> 24 - #include <linux/rcupdate.h> 23 + #include <linux/notifier.h> 25 24 26 25 struct clk; 27 26 struct regulator; ··· 51 52 * @node: opp table node. The nodes are maintained throughout the lifetime 52 53 * of boot. It is expected only an optimal set of OPPs are 53 54 * added to the library by the SoC framework. 54 - * RCU usage: opp table is traversed with RCU locks. node 55 - * modification is possible realtime, hence the modifications 56 - * are protected by the opp_table_lock for integrity. 57 55 * IMPORTANT: the opp nodes should be maintained in increasing 58 56 * order. 59 57 * @kref: for reference count of the OPP. ··· 63 67 * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's 64 68 * frequency from any other OPP's frequency. 65 69 * @opp_table: points back to the opp_table struct this opp belongs to 66 - * @rcu_head: RCU callback head used for deferred freeing 67 70 * @np: OPP's device node. 68 71 * @dentry: debugfs dentry pointer (per opp) 69 72 * ··· 83 88 unsigned long clock_latency_ns; 84 89 85 90 struct opp_table *opp_table; 86 - struct rcu_head rcu_head; 87 91 88 92 struct device_node *np; 89 93 ··· 95 101 * struct opp_device - devices managed by 'struct opp_table' 96 102 * @node: list node 97 103 * @dev: device to which the struct object belongs 98 - * @rcu_head: RCU callback head used for deferred freeing 99 104 * @dentry: debugfs dentry pointer (per device) 100 105 * 101 106 * This is an internal data structure maintaining the devices that are managed ··· 103 110 struct opp_device { 104 111 struct list_head node; 105 112 const struct device *dev; 106 - struct rcu_head rcu_head; 107 113 108 114 #ifdef CONFIG_DEBUG_FS 109 115 struct dentry *dentry; ··· 120 128 * @node: table node - contains the devices with OPPs that 121 129 * have been registered. Nodes once added are not modified in this 122 130 * table. 123 - * RCU usage: nodes are not modified in the table of opp_table, 124 - * however addition is possible and is secured by opp_table_lock 125 - * @srcu_head: notifier head to notify the OPP availability changes. 126 - * @rcu_head: RCU callback head used for deferred freeing 131 + * @head: notifier head to notify the OPP availability changes. 127 132 * @dev_list: list of devices that share these OPPs 128 133 * @opp_list: table of opps 129 134 * @kref: for reference count of the table. ··· 145 156 * This is an internal data structure maintaining the link to opps attached to 146 157 * a device. This structure is not meant to be shared to users as it is 147 158 * meant for book keeping and private to OPP library. 148 - * 149 - * Because the opp structures can be used from both rcu and srcu readers, we 150 - * need to wait for the grace period of both of them before freeing any 151 - * resources. And so we have used kfree_rcu() from within call_srcu() handlers. 152 159 */ 153 160 struct opp_table { 154 161 struct list_head node; 155 162 156 - struct srcu_notifier_head srcu_head; 157 - struct rcu_head rcu_head; 163 + struct blocking_notifier_head head; 158 164 struct list_head dev_list; 159 165 struct list_head opp_list; 160 166 struct kref kref;