Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

OPP: Use mutex locking guards

Use mutex locking guard in the OPP core.

No intentional functional impact.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

+105 -164
+79 -122
drivers/opp/core.c
··· 40 40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) 41 41 { 42 42 struct opp_device *opp_dev; 43 - bool found = false; 44 43 45 - mutex_lock(&opp_table->lock); 44 + guard(mutex)(&opp_table->lock); 45 + 46 46 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 47 - if (opp_dev->dev == dev) { 48 - found = true; 49 - break; 50 - } 47 + if (opp_dev->dev == dev) 48 + return true; 51 49 52 - mutex_unlock(&opp_table->lock); 53 - return found; 50 + return false; 54 51 } 55 52 56 53 static struct opp_table *_find_opp_table_unlocked(struct device *dev) ··· 75 78 */ 76 79 struct opp_table *_find_opp_table(struct device *dev) 77 80 { 78 - struct opp_table *opp_table; 79 - 80 81 if (IS_ERR_OR_NULL(dev)) { 81 82 pr_err("%s: Invalid parameters\n", __func__); 82 83 return ERR_PTR(-EINVAL); 83 84 } 84 85 85 - mutex_lock(&opp_table_lock); 86 - opp_table = _find_opp_table_unlocked(dev); 87 - mutex_unlock(&opp_table_lock); 88 - 89 - return opp_table; 86 + guard(mutex)(&opp_table_lock); 87 + return _find_opp_table_unlocked(dev); 90 88 } 91 89 92 90 /* ··· 351 359 if (!uV) 352 360 return 0; 353 361 354 - mutex_lock(&opp_table->lock); 362 + scoped_guard(mutex, &opp_table->lock) { 363 + for (i = 0; i < count; i++) { 364 + uV[i].min = ~0; 365 + uV[i].max = 0; 355 366 356 - for (i = 0; i < count; i++) { 357 - uV[i].min = ~0; 358 - uV[i].max = 0; 367 + list_for_each_entry(opp, &opp_table->opp_list, node) { 368 + if (!opp->available) 369 + continue; 359 370 360 - list_for_each_entry(opp, &opp_table->opp_list, node) { 361 - if (!opp->available) 362 - continue; 363 - 364 - if (opp->supplies[i].u_volt_min < uV[i].min) 365 - uV[i].min = opp->supplies[i].u_volt_min; 366 - if (opp->supplies[i].u_volt_max > uV[i].max) 367 - uV[i].max = opp->supplies[i].u_volt_max; 371 + if (opp->supplies[i].u_volt_min < uV[i].min) 372 + uV[i].min = opp->supplies[i].u_volt_min; 373 + if (opp->supplies[i].u_volt_max > uV[i].max) 374 + uV[i].max = opp->supplies[i].u_volt_max; 375 + } 368 376 } 369 377 } 370 - 371 - mutex_unlock(&opp_table->lock); 372 378 373 379 /* 374 380 * The caller needs to ensure that opp_table (and hence the regulator) ··· 428 438 struct dev_pm_opp *opp; 429 439 int count = 0; 430 440 431 - mutex_lock(&opp_table->lock); 441 + guard(mutex)(&opp_table->lock); 432 442 433 443 list_for_each_entry(opp, &opp_table->opp_list, node) { 434 444 if (opp->available) 435 445 count++; 436 446 } 437 - 438 - mutex_unlock(&opp_table->lock); 439 447 440 448 return count; 441 449 } ··· 523 535 if (assert && !assert(opp_table, index)) 524 536 return ERR_PTR(-EINVAL); 525 537 526 - mutex_lock(&opp_table->lock); 538 + guard(mutex)(&opp_table->lock); 527 539 528 540 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 529 541 if (temp_opp->available == available) { ··· 537 549 *key = read(opp, index); 538 550 dev_pm_opp_get(opp); 539 551 } 540 - 541 - mutex_unlock(&opp_table->lock); 542 552 543 553 return opp; 544 554 } ··· 1152 1166 * make special checks to validate current_opp. 1153 1167 */ 1154 1168 if (IS_ERR(opp)) { 1155 - mutex_lock(&opp_table->lock); 1169 + guard(mutex)(&opp_table->lock); 1156 1170 opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, 1157 1171 struct dev_pm_opp, node)); 1158 - mutex_unlock(&opp_table->lock); 1159 1172 } 1160 1173 1161 1174 opp_table->current_opp = opp; ··· 1411 1426 /* Initialize opp-dev */ 1412 1427 opp_dev->dev = dev; 1413 1428 1414 - mutex_lock(&opp_table->lock); 1415 - list_add(&opp_dev->node, &opp_table->dev_list); 1416 - mutex_unlock(&opp_table->lock); 1429 + scoped_guard(mutex, &opp_table->lock) 1430 + list_add(&opp_dev->node, &opp_table->dev_list); 1417 1431 1418 1432 /* Create debugfs entries for the opp_table */ 1419 1433 opp_debug_register(opp_dev, opp_table); ··· 1705 1721 if (!assert_single_clk(opp_table, 0)) 1706 1722 return; 1707 1723 1708 - mutex_lock(&opp_table->lock); 1709 - 1710 - list_for_each_entry(iter, &opp_table->opp_list, node) { 1711 - if (iter->rates[0] == freq) { 1712 - opp = iter; 1713 - break; 1724 + scoped_guard(mutex, &opp_table->lock) { 1725 + list_for_each_entry(iter, &opp_table->opp_list, node) { 1726 + if (iter->rates[0] == freq) { 1727 + opp = iter; 1728 + break; 1729 + } 1714 1730 } 1715 1731 } 1716 - 1717 - mutex_unlock(&opp_table->lock); 1718 1732 1719 1733 if (opp) { 1720 1734 dev_pm_opp_put(opp); ··· 1729 1747 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, 1730 1748 bool dynamic) 1731 1749 { 1732 - struct dev_pm_opp *opp = NULL, *temp; 1750 + struct dev_pm_opp *opp; 1733 1751 1734 - mutex_lock(&opp_table->lock); 1735 - list_for_each_entry(temp, &opp_table->opp_list, node) { 1752 + guard(mutex)(&opp_table->lock); 1753 + 1754 + list_for_each_entry(opp, &opp_table->opp_list, node) { 1736 1755 /* 1737 1756 * Refcount must be dropped only once for each OPP by OPP core, 1738 1757 * do that with help of "removed" flag. 1739 1758 */ 1740 - if (!temp->removed && dynamic == temp->dynamic) { 1741 - opp = temp; 1742 - break; 1743 - } 1759 + if (!opp->removed && dynamic == opp->dynamic) 1760 + return opp; 1744 1761 } 1745 1762 1746 - mutex_unlock(&opp_table->lock); 1747 - return opp; 1763 + return NULL; 1748 1764 } 1749 1765 1750 1766 /* ··· 1766 1786 1767 1787 bool _opp_remove_all_static(struct opp_table *opp_table) 1768 1788 { 1769 - mutex_lock(&opp_table->lock); 1789 + scoped_guard(mutex, &opp_table->lock) { 1790 + if (!opp_table->parsed_static_opps) 1791 + return false; 1770 1792 1771 - if (!opp_table->parsed_static_opps) { 1772 - mutex_unlock(&opp_table->lock); 1773 - return false; 1793 + if (--opp_table->parsed_static_opps) 1794 + return true; 1774 1795 } 1775 - 1776 - if (--opp_table->parsed_static_opps) { 1777 - mutex_unlock(&opp_table->lock); 1778 - return true; 1779 - } 1780 - 1781 - mutex_unlock(&opp_table->lock); 1782 1796 1783 1797 _opp_remove_all(opp_table, false); 1784 1798 return true; ··· 1977 2003 struct list_head *head; 1978 2004 int ret; 1979 2005 1980 - mutex_lock(&opp_table->lock); 1981 - head = &opp_table->opp_list; 2006 + scoped_guard(mutex, &opp_table->lock) { 2007 + head = &opp_table->opp_list; 1982 2008 1983 - ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 1984 - if (ret) { 1985 - mutex_unlock(&opp_table->lock); 1986 - return ret; 2009 + ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 2010 + if (ret) 2011 + return ret; 2012 + 2013 + list_add(&new_opp->node, head); 1987 2014 } 1988 - 1989 - list_add(&new_opp->node, head); 1990 - mutex_unlock(&opp_table->lock); 1991 2015 1992 2016 new_opp->opp_table = opp_table; 1993 2017 kref_init(&new_opp->kref); ··· 2632 2660 return ERR_PTR(-EBUSY); 2633 2661 2634 2662 for (i = 0; i < src_table->required_opp_count; i++) { 2635 - if (src_table->required_opp_tables[i] == dst_table) { 2636 - mutex_lock(&src_table->lock); 2663 + if (src_table->required_opp_tables[i] != dst_table) 2664 + continue; 2637 2665 2666 + scoped_guard(mutex, &src_table->lock) { 2638 2667 list_for_each_entry(opp, &src_table->opp_list, node) { 2639 2668 if (opp == src_opp) { 2640 2669 dest_opp = dev_pm_opp_get(opp->required_opps[i]); 2641 2670 break; 2642 2671 } 2643 2672 } 2644 - 2645 - mutex_unlock(&src_table->lock); 2646 2673 break; 2647 2674 } 2648 2675 } ··· 2673 2702 unsigned int pstate) 2674 2703 { 2675 2704 struct dev_pm_opp *opp; 2676 - int dest_pstate = -EINVAL; 2677 2705 int i; 2678 2706 2679 2707 /* ··· 2706 2736 return -EINVAL; 2707 2737 } 2708 2738 2709 - mutex_lock(&src_table->lock); 2739 + guard(mutex)(&src_table->lock); 2710 2740 2711 2741 list_for_each_entry(opp, &src_table->opp_list, node) { 2712 - if (opp->level == pstate) { 2713 - dest_pstate = opp->required_opps[i]->level; 2714 - goto unlock; 2715 - } 2742 + if (opp->level == pstate) 2743 + return opp->required_opps[i]->level; 2716 2744 } 2717 2745 2718 2746 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, 2719 2747 dst_table); 2720 2748 2721 - unlock: 2722 - mutex_unlock(&src_table->lock); 2723 - 2724 - return dest_pstate; 2749 + return -EINVAL; 2725 2750 } 2726 2751 2727 2752 /** ··· 2785 2820 if (!assert_single_clk(opp_table, 0)) 2786 2821 return -EINVAL; 2787 2822 2788 - mutex_lock(&opp_table->lock); 2823 + scoped_guard(mutex, &opp_table->lock) { 2824 + /* Do we have the frequency? */ 2825 + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2826 + if (tmp_opp->rates[0] == freq) { 2827 + opp = dev_pm_opp_get(tmp_opp); 2789 2828 2790 - /* Do we have the frequency? */ 2791 - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2792 - if (tmp_opp->rates[0] == freq) { 2793 - opp = dev_pm_opp_get(tmp_opp); 2829 + /* Is update really needed? */ 2830 + if (opp->available == availability_req) 2831 + return 0; 2794 2832 2795 - /* Is update really needed? */ 2796 - if (opp->available == availability_req) { 2797 - mutex_unlock(&opp_table->lock); 2798 - return 0; 2833 + opp->available = availability_req; 2834 + break; 2799 2835 } 2800 - 2801 - opp->available = availability_req; 2802 - break; 2803 2836 } 2804 2837 } 2805 - 2806 - mutex_unlock(&opp_table->lock); 2807 2838 2808 2839 if (IS_ERR(opp)) 2809 2840 return PTR_ERR(opp); ··· 2847 2886 if (!assert_single_clk(opp_table, 0)) 2848 2887 return -EINVAL; 2849 2888 2850 - mutex_lock(&opp_table->lock); 2889 + scoped_guard(mutex, &opp_table->lock) { 2890 + /* Do we have the frequency? */ 2891 + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2892 + if (tmp_opp->rates[0] == freq) { 2893 + opp = dev_pm_opp_get(tmp_opp); 2851 2894 2852 - /* Do we have the frequency? */ 2853 - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2854 - if (tmp_opp->rates[0] == freq) { 2855 - opp = dev_pm_opp_get(tmp_opp); 2895 + /* Is update really needed? */ 2896 + if (opp->supplies->u_volt == u_volt) 2897 + return 0; 2856 2898 2857 - /* Is update really needed? */ 2858 - if (opp->supplies->u_volt == u_volt) { 2859 - mutex_unlock(&opp_table->lock); 2860 - return 0; 2899 + opp->supplies->u_volt = u_volt; 2900 + opp->supplies->u_volt_min = u_volt_min; 2901 + opp->supplies->u_volt_max = u_volt_max; 2902 + 2903 + break; 2861 2904 } 2862 - 2863 - opp->supplies->u_volt = u_volt; 2864 - opp->supplies->u_volt_min = u_volt_min; 2865 - opp->supplies->u_volt_max = u_volt_max; 2866 - 2867 - break; 2868 2905 } 2869 2906 } 2870 - 2871 - mutex_unlock(&opp_table->lock); 2872 2907 2873 2908 if (IS_ERR(opp)) 2874 2909 return PTR_ERR(opp);
+1 -2
drivers/opp/cpu.c
··· 214 214 cpumask_clear(cpumask); 215 215 216 216 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { 217 - mutex_lock(&opp_table->lock); 217 + guard(mutex)(&opp_table->lock); 218 218 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 219 219 cpumask_set_cpu(opp_dev->dev->id, cpumask); 220 - mutex_unlock(&opp_table->lock); 221 220 } else { 222 221 cpumask_set_cpu(cpu_dev->id, cpumask); 223 222 }
+25 -40
drivers/opp/of.c
··· 76 76 { 77 77 struct dev_pm_opp *opp; 78 78 79 - mutex_lock(&opp_table->lock); 79 + guard(mutex)(&opp_table->lock); 80 80 81 81 list_for_each_entry(opp, &opp_table->opp_list, node) { 82 - if (opp->np == opp_np) { 83 - dev_pm_opp_get(opp); 84 - mutex_unlock(&opp_table->lock); 85 - return opp; 86 - } 82 + if (opp->np == opp_np) 83 + return dev_pm_opp_get(opp); 87 84 } 88 - 89 - mutex_unlock(&opp_table->lock); 90 85 91 86 return NULL; 92 87 } ··· 100 105 101 106 opp_table_np = of_get_parent(opp_np); 102 107 if (!opp_table_np) 103 - goto err; 108 + return ERR_PTR(-ENODEV); 104 109 105 - mutex_lock(&opp_table_lock); 110 + guard(mutex)(&opp_table_lock); 111 + 106 112 list_for_each_entry(opp_table, &opp_tables, node) { 107 - if (opp_table_np == opp_table->np) { 108 - dev_pm_opp_get_opp_table_ref(opp_table); 109 - mutex_unlock(&opp_table_lock); 110 - return opp_table; 111 - } 113 + if (opp_table_np == opp_table->np) 114 + return dev_pm_opp_get_opp_table_ref(opp_table); 112 115 } 113 - mutex_unlock(&opp_table_lock); 114 116 115 - err: 116 117 return ERR_PTR(-ENODEV); 117 118 } 118 119 ··· 133 142 opp_table->required_opp_count = 0; 134 143 opp_table->required_opp_tables = NULL; 135 144 136 - mutex_lock(&opp_table_lock); 145 + guard(mutex)(&opp_table_lock); 137 146 list_del(&opp_table->lazy); 138 - mutex_unlock(&opp_table_lock); 139 147 } 140 148 141 149 /* ··· 191 201 * The OPP table is not held while allocating the table, take it 192 202 * now to avoid corruption to the lazy_opp_tables list. 193 203 */ 194 - mutex_lock(&opp_table_lock); 204 + guard(mutex)(&opp_table_lock); 195 205 list_add(&opp_table->lazy, &lazy_opp_tables); 196 - mutex_unlock(&opp_table_lock); 197 206 } 198 207 } 199 208 ··· 346 357 struct dev_pm_opp *opp; 347 358 int i, ret; 348 359 349 - mutex_lock(&opp_table_lock); 360 + guard(mutex)(&opp_table_lock); 350 361 351 362 list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { 352 363 struct device_node *opp_np __free(device_node); ··· 397 408 _required_opps_available(opp, opp_table->required_opp_count); 398 409 } 399 410 } 400 - 401 - mutex_unlock(&opp_table_lock); 402 411 } 403 412 404 413 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) ··· 957 970 struct dev_pm_opp *opp; 958 971 959 972 /* OPP table is already initialized for the device */ 960 - mutex_lock(&opp_table->lock); 961 - if (opp_table->parsed_static_opps) { 962 - opp_table->parsed_static_opps++; 963 - mutex_unlock(&opp_table->lock); 964 - return 0; 965 - } 973 + scoped_guard(mutex, &opp_table->lock) { 974 + if (opp_table->parsed_static_opps) { 975 + opp_table->parsed_static_opps++; 976 + return 0; 977 + } 966 978 967 - opp_table->parsed_static_opps = 1; 968 - mutex_unlock(&opp_table->lock); 979 + opp_table->parsed_static_opps = 1; 980 + } 969 981 970 982 /* We have opp-table node now, iterate over it and add OPPs */ 971 983 for_each_available_child_of_node(opp_table->np, np) { ··· 1004 1018 const __be32 *val; 1005 1019 int nr, ret = 0; 1006 1020 1007 - mutex_lock(&opp_table->lock); 1008 - if (opp_table->parsed_static_opps) { 1009 - opp_table->parsed_static_opps++; 1010 - mutex_unlock(&opp_table->lock); 1011 - return 0; 1012 - } 1021 + scoped_guard(mutex, &opp_table->lock) { 1022 + if (opp_table->parsed_static_opps) { 1023 + opp_table->parsed_static_opps++; 1024 + return 0; 1025 + } 1013 1026 1014 - opp_table->parsed_static_opps = 1; 1015 - mutex_unlock(&opp_table->lock); 1027 + opp_table->parsed_static_opps = 1; 1028 + } 1016 1029 1017 1030 prop = of_find_property(dev->of_node, "operating-points", NULL); 1018 1031 if (!prop) {