Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back new material for v4.7.

+763 -544
+1
drivers/base/power/opp/Makefile
··· 1 1 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 2 2 obj-y += core.o cpu.o 3 + obj-$(CONFIG_OF) += of.o 3 4 obj-$(CONFIG_DEBUG_FS) += debugfs.o
+21 -419
drivers/base/power/opp/core.c
··· 18 18 #include <linux/err.h> 19 19 #include <linux/slab.h> 20 20 #include <linux/device.h> 21 - #include <linux/of.h> 22 21 #include <linux/export.h> 23 22 #include <linux/regulator/consumer.h> 24 23 ··· 28 29 * from here, with each opp_table containing the list of opps it supports in 29 30 * various states of availability. 30 31 */ 31 - static LIST_HEAD(opp_tables); 32 + LIST_HEAD(opp_tables); 32 33 /* Lock to allow exclusive modification to the device and opp lists */ 33 34 DEFINE_MUTEX(opp_table_lock); 34 35 ··· 48 49 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 49 50 if (opp_dev->dev == dev) 50 51 return opp_dev; 51 - 52 - return NULL; 53 - } 54 - 55 - static struct opp_table *_managed_opp(const struct device_node *np) 56 - { 57 - struct opp_table *opp_table; 58 - 59 - list_for_each_entry_rcu(opp_table, &opp_tables, node) { 60 - if (opp_table->np == np) { 61 - /* 62 - * Multiple devices can point to the same OPP table and 63 - * so will have same node-pointer, np. 64 - * 65 - * But the OPPs will be considered as shared only if the 66 - * OPP table contains a "opp-shared" property. 67 - */ 68 - return opp_table->shared_opp ? opp_table : NULL; 69 - } 70 - } 71 52 72 53 return NULL; 73 54 } ··· 736 757 { 737 758 struct opp_table *opp_table; 738 759 struct opp_device *opp_dev; 739 - struct device_node *np; 740 760 int ret; 741 761 742 762 /* Check for existing table for 'dev' first */ ··· 759 781 return NULL; 760 782 } 761 783 762 - /* 763 - * Only required for backward compatibility with v1 bindings, but isn't 764 - * harmful for other cases. And so we do it unconditionally. 765 - */ 766 - np = of_node_get(dev->of_node); 767 - if (np) { 768 - u32 val; 769 - 770 - if (!of_property_read_u32(np, "clock-latency", &val)) 771 - opp_table->clock_latency_ns_max = val; 772 - of_property_read_u32(np, "voltage-tolerance", 773 - &opp_table->voltage_tolerance_v1); 774 - of_node_put(np); 775 - } 784 + _of_init_opp_table(opp_table, dev); 776 785 777 786 /* Set regulator to a non-NULL error value */ 778 787 opp_table->regulator = ERR_PTR(-ENXIO); ··· 855 890 * It is assumed that the caller holds required mutex for an RCU updater 856 891 * strategy. 857 892 */ 858 - static void _opp_remove(struct opp_table *opp_table, 859 - struct dev_pm_opp *opp, bool notify) 893 + void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, 894 + bool notify) 860 895 { 861 896 /* 862 897 * Notify the changes in the availability of the operable ··· 917 952 } 918 953 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 919 954 920 - static struct dev_pm_opp *_allocate_opp(struct device *dev, 921 - struct opp_table **opp_table) 955 + struct dev_pm_opp *_allocate_opp(struct device *dev, 956 + struct opp_table **opp_table) 922 957 { 923 958 struct dev_pm_opp *opp; 924 959 ··· 954 989 return true; 955 990 } 956 991 957 - static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 958 - struct opp_table *opp_table) 992 + int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 993 + struct opp_table *opp_table) 959 994 { 960 995 struct dev_pm_opp *opp; 961 996 struct list_head *head = &opp_table->opp_list; ··· 1031 1066 * Duplicate OPPs (both freq and volt are same) and !opp->available 1032 1067 * -ENOMEM Memory allocation failure 1033 1068 */ 1034 - static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, 1035 - bool dynamic) 1069 + int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, 1070 + bool dynamic) 1036 1071 { 1037 1072 struct opp_table *opp_table; 1038 1073 struct dev_pm_opp *new_opp; ··· 1075 1110 unlock: 1076 1111 mutex_unlock(&opp_table_lock); 1077 1112 return ret; 1078 - } 1079 - 1080 - /* TODO: Support multiple regulators */ 1081 - static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, 1082 - struct opp_table *opp_table) 1083 - { 1084 - u32 microvolt[3] = {0}; 1085 - u32 val; 1086 - int count, ret; 1087 - struct property *prop = NULL; 1088 - char name[NAME_MAX]; 1089 - 1090 - /* Search for "opp-microvolt-<name>" */ 1091 - if (opp_table->prop_name) { 1092 - snprintf(name, sizeof(name), "opp-microvolt-%s", 1093 - opp_table->prop_name); 1094 - prop = of_find_property(opp->np, name, NULL); 1095 - } 1096 - 1097 - if (!prop) { 1098 - /* Search for "opp-microvolt" */ 1099 - sprintf(name, "opp-microvolt"); 1100 - prop = of_find_property(opp->np, name, NULL); 1101 - 1102 - /* Missing property isn't a problem, but an invalid entry is */ 1103 - if (!prop) 1104 - return 0; 1105 - } 1106 - 1107 - count = of_property_count_u32_elems(opp->np, name); 1108 - if (count < 0) { 1109 - dev_err(dev, "%s: Invalid %s property (%d)\n", 1110 - __func__, name, count); 1111 - return count; 1112 - } 1113 - 1114 - /* There can be one or three elements here */ 1115 - if (count != 1 && count != 3) { 1116 - dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", 1117 - __func__, name, count); 1118 - return -EINVAL; 1119 - } 1120 - 1121 - ret = of_property_read_u32_array(opp->np, name, microvolt, count); 1122 - if (ret) { 1123 - dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); 1124 - return -EINVAL; 1125 - } 1126 - 1127 - opp->u_volt = microvolt[0]; 1128 - 1129 - if (count == 1) { 1130 - opp->u_volt_min = opp->u_volt; 1131 - opp->u_volt_max = opp->u_volt; 1132 - } else { 1133 - opp->u_volt_min = microvolt[1]; 1134 - opp->u_volt_max = microvolt[2]; 1135 - } 1136 - 1137 - /* Search for "opp-microamp-<name>" */ 1138 - prop = NULL; 1139 - if (opp_table->prop_name) { 1140 - snprintf(name, sizeof(name), "opp-microamp-%s", 1141 - opp_table->prop_name); 1142 - prop = of_find_property(opp->np, name, NULL); 1143 - } 1144 - 1145 - if (!prop) { 1146 - /* Search for "opp-microamp" */ 1147 - sprintf(name, "opp-microamp"); 1148 - prop = of_find_property(opp->np, name, NULL); 1149 - } 1150 - 1151 - if (prop && !of_property_read_u32(opp->np, name, &val)) 1152 - opp->u_amp = val; 1153 - 1154 - return 0; 1155 1113 } 1156 1114 1157 1115 /** ··· 1405 1517 } 1406 1518 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); 1407 1519 1408 - static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, 1409 - struct device_node *np) 1410 - { 1411 - unsigned int count = opp_table->supported_hw_count; 1412 - u32 version; 1413 - int ret; 1414 - 1415 - if (!opp_table->supported_hw) 1416 - return true; 1417 - 1418 - while (count--) { 1419 - ret = of_property_read_u32_index(np, "opp-supported-hw", count, 1420 - &version); 1421 - if (ret) { 1422 - dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", 1423 - __func__, count, ret); 1424 - return false; 1425 - } 1426 - 1427 - /* Both of these are bitwise masks of the versions */ 1428 - if (!(version & opp_table->supported_hw[count])) 1429 - return false; 1430 - } 1431 - 1432 - return true; 1433 - } 1434 - 1435 - /** 1436 - * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) 1437 - * @dev: device for which we do this operation 1438 - * @np: device node 1439 - * 1440 - * This function adds an opp definition to the opp table and returns status. The 1441 - * opp can be controlled using dev_pm_opp_enable/disable functions and may be 1442 - * removed by dev_pm_opp_remove. 1443 - * 1444 - * Locking: The internal opp_table and opp structures are RCU protected. 1445 - * Hence this function internally uses RCU updater strategy with mutex locks 1446 - * to keep the integrity of the internal data structures. Callers should ensure 1447 - * that this function is *NOT* called under RCU protection or in contexts where 1448 - * mutex cannot be locked. 1449 - * 1450 - * Return: 1451 - * 0 On success OR 1452 - * Duplicate OPPs (both freq and volt are same) and opp->available 1453 - * -EEXIST Freq are same and volt are different OR 1454 - * Duplicate OPPs (both freq and volt are same) and !opp->available 1455 - * -ENOMEM Memory allocation failure 1456 - * -EINVAL Failed parsing the OPP node 1457 - */ 1458 - static int _opp_add_static_v2(struct device *dev, struct device_node *np) 1459 - { 1460 - struct opp_table *opp_table; 1461 - struct dev_pm_opp *new_opp; 1462 - u64 rate; 1463 - u32 val; 1464 - int ret; 1465 - 1466 - /* Hold our table modification lock here */ 1467 - mutex_lock(&opp_table_lock); 1468 - 1469 - new_opp = _allocate_opp(dev, &opp_table); 1470 - if (!new_opp) { 1471 - ret = -ENOMEM; 1472 - goto unlock; 1473 - } 1474 - 1475 - ret = of_property_read_u64(np, "opp-hz", &rate); 1476 - if (ret < 0) { 1477 - dev_err(dev, "%s: opp-hz not found\n", __func__); 1478 - goto free_opp; 1479 - } 1480 - 1481 - /* Check if the OPP supports hardware's hierarchy of versions or not */ 1482 - if (!_opp_is_supported(dev, opp_table, np)) { 1483 - dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); 1484 - goto free_opp; 1485 - } 1486 - 1487 - /* 1488 - * Rate is defined as an unsigned long in clk API, and so casting 1489 - * explicitly to its type. Must be fixed once rate is 64 bit 1490 - * guaranteed in clk API. 1491 - */ 1492 - new_opp->rate = (unsigned long)rate; 1493 - new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 1494 - 1495 - new_opp->np = np; 1496 - new_opp->dynamic = false; 1497 - new_opp->available = true; 1498 - 1499 - if (!of_property_read_u32(np, "clock-latency-ns", &val)) 1500 - new_opp->clock_latency_ns = val; 1501 - 1502 - ret = opp_parse_supplies(new_opp, dev, opp_table); 1503 - if (ret) 1504 - goto free_opp; 1505 - 1506 - ret = _opp_add(dev, new_opp, opp_table); 1507 - if (ret) 1508 - goto free_opp; 1509 - 1510 - /* OPP to select on device suspend */ 1511 - if (of_property_read_bool(np, "opp-suspend")) { 1512 - if (opp_table->suspend_opp) { 1513 - dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", 1514 - __func__, opp_table->suspend_opp->rate, 1515 - new_opp->rate); 1516 - } else { 1517 - new_opp->suspend = true; 1518 - opp_table->suspend_opp = new_opp; 1519 - } 1520 - } 1521 - 1522 - if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) 1523 - opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; 1524 - 1525 - mutex_unlock(&opp_table_lock); 1526 - 1527 - pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", 1528 - __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, 1529 - new_opp->u_volt_min, new_opp->u_volt_max, 1530 - new_opp->clock_latency_ns); 1531 - 1532 - /* 1533 - * Notify the changes in the availability of the operable 1534 - * frequency/voltage list. 1535 - */ 1536 - srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); 1537 - return 0; 1538 - 1539 - free_opp: 1540 - _opp_remove(opp_table, new_opp, false); 1541 - unlock: 1542 - mutex_unlock(&opp_table_lock); 1543 - return ret; 1544 - } 1545 - 1546 1520 /** 1547 1521 * dev_pm_opp_add() - Add an OPP table from a table definitions 1548 1522 * @dev: device for which we do this operation ··· 1592 1842 } 1593 1843 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); 1594 1844 1595 - #ifdef CONFIG_OF 1596 - /** 1597 - * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 1598 - * entries 1599 - * @dev: device pointer used to lookup OPP table. 1600 - * 1601 - * Free OPPs created using static entries present in DT. 1602 - * 1603 - * Locking: The internal opp_table and opp structures are RCU protected. 1604 - * Hence this function indirectly uses RCU updater strategy with mutex locks 1605 - * to keep the integrity of the internal data structures. Callers should ensure 1606 - * that this function is *NOT* called under RCU protection or in contexts where 1607 - * mutex cannot be locked. 1845 + /* 1846 + * Free OPPs either created using static entries present in DT or even the 1847 + * dynamically added entries based on remove_all param. 1608 1848 */ 1609 - void dev_pm_opp_of_remove_table(struct device *dev) 1849 + void _dev_pm_opp_remove_table(struct device *dev, bool remove_all) 1610 1850 { 1611 1851 struct opp_table *opp_table; 1612 1852 struct dev_pm_opp *opp, *tmp; ··· 1621 1881 if (list_is_singular(&opp_table->dev_list)) { 1622 1882 /* Free static OPPs */ 1623 1883 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { 1624 - if (!opp->dynamic) 1884 + if (remove_all || !opp->dynamic) 1625 1885 _opp_remove(opp_table, opp, true); 1626 1886 } 1627 1887 } else { ··· 1631 1891 unlock: 1632 1892 mutex_unlock(&opp_table_lock); 1633 1893 } 1634 - EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); 1635 - 1636 - /* Returns opp descriptor node for a device, caller must do of_node_put() */ 1637 - struct device_node *_of_get_opp_desc_node(struct device *dev) 1638 - { 1639 - /* 1640 - * TODO: Support for multiple OPP tables. 1641 - * 1642 - * There should be only ONE phandle present in "operating-points-v2" 1643 - * property. 1644 - */ 1645 - 1646 - return of_parse_phandle(dev->of_node, "operating-points-v2", 0); 1647 - } 1648 - 1649 - /* Initializes OPP tables based on new bindings */ 1650 - static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) 1651 - { 1652 - struct device_node *np; 1653 - struct opp_table *opp_table; 1654 - int ret = 0, count = 0; 1655 - 1656 - mutex_lock(&opp_table_lock); 1657 - 1658 - opp_table = _managed_opp(opp_np); 1659 - if (opp_table) { 1660 - /* OPPs are already managed */ 1661 - if (!_add_opp_dev(dev, opp_table)) 1662 - ret = -ENOMEM; 1663 - mutex_unlock(&opp_table_lock); 1664 - return ret; 1665 - } 1666 - mutex_unlock(&opp_table_lock); 1667 - 1668 - /* We have opp-table node now, iterate over it and add OPPs */ 1669 - for_each_available_child_of_node(opp_np, np) { 1670 - count++; 1671 - 1672 - ret = _opp_add_static_v2(dev, np); 1673 - if (ret) { 1674 - dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, 1675 - ret); 1676 - goto free_table; 1677 - } 1678 - } 1679 - 1680 - /* There should be one of more OPP defined */ 1681 - if (WARN_ON(!count)) 1682 - return -ENOENT; 1683 - 1684 - mutex_lock(&opp_table_lock); 1685 - 1686 - opp_table = _find_opp_table(dev); 1687 - if (WARN_ON(IS_ERR(opp_table))) { 1688 - ret = PTR_ERR(opp_table); 1689 - mutex_unlock(&opp_table_lock); 1690 - goto free_table; 1691 - } 1692 - 1693 - opp_table->np = opp_np; 1694 - opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 1695 - 1696 - mutex_unlock(&opp_table_lock); 1697 - 1698 - return 0; 1699 - 1700 - free_table: 1701 - dev_pm_opp_of_remove_table(dev); 1702 - 1703 - return ret; 1704 - } 1705 - 1706 - /* Initializes OPP tables based on old-deprecated bindings */ 1707 - static int _of_add_opp_table_v1(struct device *dev) 1708 - { 1709 - const struct property *prop; 1710 - const __be32 *val; 1711 - int nr; 1712 - 1713 - prop = of_find_property(dev->of_node, "operating-points", NULL); 1714 - if (!prop) 1715 - return -ENODEV; 1716 - if (!prop->value) 1717 - return -ENODATA; 1718 - 1719 - /* 1720 - * Each OPP is a set of tuples consisting of frequency and 1721 - * voltage like <freq-kHz vol-uV>. 1722 - */ 1723 - nr = prop->length / sizeof(u32); 1724 - if (nr % 2) { 1725 - dev_err(dev, "%s: Invalid OPP table\n", __func__); 1726 - return -EINVAL; 1727 - } 1728 - 1729 - val = prop->value; 1730 - while (nr) { 1731 - unsigned long freq = be32_to_cpup(val++) * 1000; 1732 - unsigned long volt = be32_to_cpup(val++); 1733 - 1734 - if (_opp_add_v1(dev, freq, volt, false)) 1735 - dev_warn(dev, "%s: Failed to add OPP %ld\n", 1736 - __func__, freq); 1737 - nr -= 2; 1738 - } 1739 - 1740 - return 0; 1741 - } 1742 1894 1743 1895 /** 1744 - * dev_pm_opp_of_add_table() - Initialize opp table from device tree 1896 + * dev_pm_opp_remove_table() - Free all OPPs associated with the device 1745 1897 * @dev: device pointer used to lookup OPP table. 1746 1898 * 1747 - * Register the initial OPP table with the OPP library for given device. 1899 + * Free both OPPs created using static entries present in DT and the 1900 + * dynamically added entries. 1748 1901 * 1749 1902 * Locking: The internal opp_table and opp structures are RCU protected. 1750 1903 * Hence this function indirectly uses RCU updater strategy with mutex locks 1751 1904 * to keep the integrity of the internal data structures. Callers should ensure 1752 1905 * that this function is *NOT* called under RCU protection or in contexts where 1753 1906 * mutex cannot be locked. 1754 - * 1755 - * Return: 1756 - * 0 On success OR 1757 - * Duplicate OPPs (both freq and volt are same) and opp->available 1758 - * -EEXIST Freq are same and volt are different OR 1759 - * Duplicate OPPs (both freq and volt are same) and !opp->available 1760 - * -ENOMEM Memory allocation failure 1761 - * -ENODEV when 'operating-points' property is not found or is invalid data 1762 - * in device node. 1763 - * -ENODATA when empty 'operating-points' property is found 1764 - * -EINVAL when invalid entries are found in opp-v2 table 1765 1907 */ 1766 - int dev_pm_opp_of_add_table(struct device *dev) 1908 + void dev_pm_opp_remove_table(struct device *dev) 1767 1909 { 1768 - struct device_node *opp_np; 1769 - int ret; 1770 - 1771 - /* 1772 - * OPPs have two version of bindings now. The older one is deprecated, 1773 - * try for the new binding first. 1774 - */ 1775 - opp_np = _of_get_opp_desc_node(dev); 1776 - if (!opp_np) { 1777 - /* 1778 - * Try old-deprecated bindings for backward compatibility with 1779 - * older dtbs. 1780 - */ 1781 - return _of_add_opp_table_v1(dev); 1782 - } 1783 - 1784 - ret = _of_add_opp_table_v2(dev, opp_np); 1785 - of_node_put(opp_np); 1786 - 1787 - return ret; 1910 + _dev_pm_opp_remove_table(dev, true); 1788 1911 } 1789 - EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 1790 - #endif 1912 + EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
+97 -102
drivers/base/power/opp/cpu.c
··· 18 18 #include <linux/err.h> 19 19 #include <linux/errno.h> 20 20 #include <linux/export.h> 21 - #include <linux/of.h> 22 21 #include <linux/slab.h> 23 22 24 23 #include "opp.h" ··· 118 119 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); 119 120 #endif /* CONFIG_CPU_FREQ */ 120 121 121 - /* Required only for V1 bindings, as v2 can manage it from DT itself */ 122 - int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 122 + void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) 123 + { 124 + struct device *cpu_dev; 125 + int cpu; 126 + 127 + WARN_ON(cpumask_empty(cpumask)); 128 + 129 + for_each_cpu(cpu, cpumask) { 130 + cpu_dev = get_cpu_device(cpu); 131 + if (!cpu_dev) { 132 + pr_err("%s: failed to get cpu%d device\n", __func__, 133 + cpu); 134 + continue; 135 + } 136 + 137 + if (of) 138 + dev_pm_opp_of_remove_table(cpu_dev); 139 + else 140 + dev_pm_opp_remove_table(cpu_dev); 141 + } 142 + } 143 + 144 + /** 145 + * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask 146 + * @cpumask: cpumask for which OPP table needs to be removed 147 + * 148 + * This removes the OPP tables for CPUs present in the @cpumask. 149 + * This should be used to remove all the OPPs entries associated with 150 + * the cpus in @cpumask. 151 + * 152 + * Locking: The internal opp_table and opp structures are RCU protected. 153 + * Hence this function internally uses RCU updater strategy with mutex locks 154 + * to keep the integrity of the internal data structures. Callers should ensure 155 + * that this function is *NOT* called under RCU protection or in contexts where 156 + * mutex cannot be locked. 157 + */ 158 + void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) 159 + { 160 + _dev_pm_opp_cpumask_remove_table(cpumask, false); 161 + } 162 + EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); 163 + 164 + /** 165 + * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs 166 + * @cpu_dev: CPU device for which we do this operation 167 + * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev 168 + * 169 + * This marks OPP table of the @cpu_dev as shared by the CPUs present in 170 + * @cpumask. 171 + * 172 + * Returns -ENODEV if OPP table isn't already present. 173 + * 174 + * Locking: The internal opp_table and opp structures are RCU protected. 175 + * Hence this function internally uses RCU updater strategy with mutex locks 176 + * to keep the integrity of the internal data structures. Callers should ensure 177 + * that this function is *NOT* called under RCU protection or in contexts where 178 + * mutex cannot be locked. 179 + */ 180 + int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, 181 + const struct cpumask *cpumask) 123 182 { 124 183 struct opp_device *opp_dev; 125 184 struct opp_table *opp_table; ··· 188 131 189 132 opp_table = _find_opp_table(cpu_dev); 190 133 if (IS_ERR(opp_table)) { 191 - ret = -EINVAL; 134 + ret = PTR_ERR(opp_table); 192 135 goto unlock; 193 136 } 194 137 ··· 209 152 __func__, cpu); 210 153 continue; 211 154 } 155 + 156 + /* Mark opp-table as multiple CPUs are sharing it now */ 157 + opp_table->shared_opp = true; 212 158 } 213 159 unlock: 214 160 mutex_unlock(&opp_table_lock); ··· 220 160 } 221 161 EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); 222 162 223 - #ifdef CONFIG_OF 224 - void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) 225 - { 226 - struct device *cpu_dev; 227 - int cpu; 228 - 229 - WARN_ON(cpumask_empty(cpumask)); 230 - 231 - for_each_cpu(cpu, cpumask) { 232 - cpu_dev = get_cpu_device(cpu); 233 - if (!cpu_dev) { 234 - pr_err("%s: failed to get cpu%d device\n", __func__, 235 - cpu); 236 - continue; 237 - } 238 - 239 - dev_pm_opp_of_remove_table(cpu_dev); 240 - } 241 - } 242 - EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); 243 - 244 - int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) 245 - { 246 - struct device *cpu_dev; 247 - int cpu, ret = 0; 248 - 249 - WARN_ON(cpumask_empty(cpumask)); 250 - 251 - for_each_cpu(cpu, cpumask) { 252 - cpu_dev = get_cpu_device(cpu); 253 - if (!cpu_dev) { 254 - pr_err("%s: failed to get cpu%d device\n", __func__, 255 - cpu); 256 - continue; 257 - } 258 - 259 - ret = dev_pm_opp_of_add_table(cpu_dev); 260 - if (ret) { 261 - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", 262 - __func__, cpu, ret); 263 - 264 - /* Free all other OPPs */ 265 - dev_pm_opp_of_cpumask_remove_table(cpumask); 266 - break; 267 - } 268 - } 269 - 270 - return ret; 271 - } 272 - EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); 273 - 274 - /* 275 - * Works only for OPP v2 bindings. 163 + /** 164 + * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev 165 + * @cpu_dev: CPU device for which we do this operation 166 + * @cpumask: cpumask to update with information of sharing CPUs 276 167 * 277 - * Returns -ENOENT if operating-points-v2 bindings aren't supported. 168 + * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 169 + * 170 + * Returns -ENODEV if OPP table isn't already present. 171 + * 172 + * Locking: The internal opp_table and opp structures are RCU protected. 173 + * Hence this function internally uses RCU updater strategy with mutex locks 174 + * to keep the integrity of the internal data structures. Callers should ensure 175 + * that this function is *NOT* called under RCU protection or in contexts where 176 + * mutex cannot be locked. 278 177 */ 279 - int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 178 + int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) 280 179 { 281 - struct device_node *np, *tmp_np; 282 - struct device *tcpu_dev; 283 - int cpu, ret = 0; 180 + struct opp_device *opp_dev; 181 + struct opp_table *opp_table; 182 + int ret = 0; 284 183 285 - /* Get OPP descriptor node */ 286 - np = _of_get_opp_desc_node(cpu_dev); 287 - if (!np) { 288 - dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); 289 - return -ENOENT; 184 + mutex_lock(&opp_table_lock); 185 + 186 + opp_table = _find_opp_table(cpu_dev); 187 + if (IS_ERR(opp_table)) { 188 + ret = PTR_ERR(opp_table); 189 + goto unlock; 290 190 } 291 191 292 - cpumask_set_cpu(cpu_dev->id, cpumask); 192 + cpumask_clear(cpumask); 293 193 294 - /* OPPs are shared ? */ 295 - if (!of_property_read_bool(np, "opp-shared")) 296 - goto put_cpu_node; 297 - 298 - for_each_possible_cpu(cpu) { 299 - if (cpu == cpu_dev->id) 300 - continue; 301 - 302 - tcpu_dev = get_cpu_device(cpu); 303 - if (!tcpu_dev) { 304 - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", 305 - __func__, cpu); 306 - ret = -ENODEV; 307 - goto put_cpu_node; 308 - } 309 - 310 - /* Get OPP descriptor node */ 311 - tmp_np = _of_get_opp_desc_node(tcpu_dev); 312 - if (!tmp_np) { 313 - dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", 314 - __func__); 315 - ret = -ENOENT; 316 - goto put_cpu_node; 317 - } 318 - 319 - /* CPUs are sharing opp node */ 320 - if (np == tmp_np) 321 - cpumask_set_cpu(cpu, cpumask); 322 - 323 - of_node_put(tmp_np); 194 + if (opp_table->shared_opp) { 195 + list_for_each_entry(opp_dev, &opp_table->dev_list, node) 196 + cpumask_set_cpu(opp_dev->dev->id, cpumask); 197 + } else { 198 + cpumask_set_cpu(cpu_dev->id, cpumask); 324 199 } 325 200 326 - put_cpu_node: 327 - of_node_put(np); 201 + unlock: 202 + mutex_unlock(&opp_table_lock); 203 + 328 204 return ret; 329 205 } 330 - EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); 331 - #endif 206 + EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
+591
drivers/base/power/opp/of.c
··· 1 + /* 2 + * Generic OPP OF helpers 3 + * 4 + * Copyright (C) 2009-2010 Texas Instruments Incorporated. 5 + * Nishanth Menon 6 + * Romit Dasgupta 7 + * Kevin Hilman 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 16 + #include <linux/cpu.h> 17 + #include <linux/errno.h> 18 + #include <linux/device.h> 19 + #include <linux/of.h> 20 + #include <linux/export.h> 21 + 22 + #include "opp.h" 23 + 24 + static struct opp_table *_managed_opp(const struct device_node *np) 25 + { 26 + struct opp_table *opp_table; 27 + 28 + list_for_each_entry_rcu(opp_table, &opp_tables, node) { 29 + if (opp_table->np == np) { 30 + /* 31 + * Multiple devices can point to the same OPP table and 32 + * so will have same node-pointer, np. 33 + * 34 + * But the OPPs will be considered as shared only if the 35 + * OPP table contains a "opp-shared" property. 36 + */ 37 + return opp_table->shared_opp ? opp_table : NULL; 38 + } 39 + } 40 + 41 + return NULL; 42 + } 43 + 44 + void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) 45 + { 46 + struct device_node *np; 47 + 48 + /* 49 + * Only required for backward compatibility with v1 bindings, but isn't 50 + * harmful for other cases. And so we do it unconditionally. 51 + */ 52 + np = of_node_get(dev->of_node); 53 + if (np) { 54 + u32 val; 55 + 56 + if (!of_property_read_u32(np, "clock-latency", &val)) 57 + opp_table->clock_latency_ns_max = val; 58 + of_property_read_u32(np, "voltage-tolerance", 59 + &opp_table->voltage_tolerance_v1); 60 + of_node_put(np); 61 + } 62 + } 63 + 64 + static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, 65 + struct device_node *np) 66 + { 67 + unsigned int count = opp_table->supported_hw_count; 68 + u32 version; 69 + int ret; 70 + 71 + if (!opp_table->supported_hw) 72 + return true; 73 + 74 + while (count--) { 75 + ret = of_property_read_u32_index(np, "opp-supported-hw", count, 76 + &version); 77 + if (ret) { 78 + dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", 79 + __func__, count, ret); 80 + return false; 81 + } 82 + 83 + /* Both of these are bitwise masks of the versions */ 84 + if (!(version & opp_table->supported_hw[count])) 85 + return false; 86 + } 87 + 88 + return true; 89 + } 90 + 91 + /* TODO: Support multiple regulators */ 92 + static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, 93 + struct opp_table *opp_table) 94 + { 95 + u32 microvolt[3] = {0}; 96 + u32 val; 97 + int count, ret; 98 + struct property *prop = NULL; 99 + char name[NAME_MAX]; 100 + 101 + /* Search for "opp-microvolt-<name>" */ 102 + if (opp_table->prop_name) { 103 + snprintf(name, sizeof(name), "opp-microvolt-%s", 104 + opp_table->prop_name); 105 + prop = of_find_property(opp->np, name, NULL); 106 + } 107 + 108 + if (!prop) { 109 + /* Search for "opp-microvolt" */ 110 + sprintf(name, "opp-microvolt"); 111 + prop = of_find_property(opp->np, name, NULL); 112 + 113 + /* Missing property isn't a problem, but an invalid entry is */ 114 + if (!prop) 115 + return 0; 116 + } 117 + 118 + count = of_property_count_u32_elems(opp->np, name); 119 + if (count < 0) { 120 + dev_err(dev, "%s: Invalid %s property (%d)\n", 121 + __func__, name, count); 122 + return count; 123 + } 124 + 125 + /* There can be one or three elements here */ 126 + if (count != 1 && count != 3) { 127 + dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", 128 + __func__, name, count); 129 + return -EINVAL; 130 + } 131 + 132 + ret = of_property_read_u32_array(opp->np, name, microvolt, count); 133 + if (ret) { 134 + dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); 135 + return -EINVAL; 136 + } 137 + 138 + opp->u_volt = microvolt[0]; 139 + 140 + if (count == 1) { 141 + opp->u_volt_min = opp->u_volt; 142 + opp->u_volt_max = opp->u_volt; 143 + } else { 144 + opp->u_volt_min = microvolt[1]; 145 + opp->u_volt_max = microvolt[2]; 146 + } 147 + 148 + /* Search for "opp-microamp-<name>" */ 149 + prop = NULL; 150 + if (opp_table->prop_name) { 151 + snprintf(name, sizeof(name), "opp-microamp-%s", 152 + opp_table->prop_name); 153 + prop = of_find_property(opp->np, name, NULL); 154 + } 155 + 156 + if (!prop) { 157 + /* Search for "opp-microamp" */ 158 + sprintf(name, "opp-microamp"); 159 + prop = of_find_property(opp->np, name, NULL); 160 + } 161 + 162 + if (prop && !of_property_read_u32(opp->np, name, &val)) 163 + opp->u_amp = val; 164 + 165 + return 0; 166 + } 167 + 168 + /** 169 + * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 170 + * entries 171 + * @dev: device pointer used to lookup OPP table. 172 + * 173 + * Free OPPs created using static entries present in DT. 174 + * 175 + * Locking: The internal opp_table and opp structures are RCU protected. 176 + * Hence this function indirectly uses RCU updater strategy with mutex locks 177 + * to keep the integrity of the internal data structures. Callers should ensure 178 + * that this function is *NOT* called under RCU protection or in contexts where 179 + * mutex cannot be locked. 180 + */ 181 + void dev_pm_opp_of_remove_table(struct device *dev) 182 + { 183 + _dev_pm_opp_remove_table(dev, false); 184 + } 185 + EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); 186 + 187 + /* Returns opp descriptor node for a device, caller must do of_node_put() */ 188 + struct device_node *_of_get_opp_desc_node(struct device *dev) 189 + { 190 + /* 191 + * TODO: Support for multiple OPP tables. 192 + * 193 + * There should be only ONE phandle present in "operating-points-v2" 194 + * property. 195 + */ 196 + 197 + return of_parse_phandle(dev->of_node, "operating-points-v2", 0); 198 + } 199 + 200 + /** 201 + * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) 202 + * @dev: device for which we do this operation 203 + * @np: device node 204 + * 205 + * This function adds an opp definition to the opp table and returns status. The 206 + * opp can be controlled using dev_pm_opp_enable/disable functions and may be 207 + * removed by dev_pm_opp_remove. 208 + * 209 + * Locking: The internal opp_table and opp structures are RCU protected. 210 + * Hence this function internally uses RCU updater strategy with mutex locks 211 + * to keep the integrity of the internal data structures. Callers should ensure 212 + * that this function is *NOT* called under RCU protection or in contexts where 213 + * mutex cannot be locked. 214 + * 215 + * Return: 216 + * 0 On success OR 217 + * Duplicate OPPs (both freq and volt are same) and opp->available 218 + * -EEXIST Freq are same and volt are different OR 219 + * Duplicate OPPs (both freq and volt are same) and !opp->available 220 + * -ENOMEM Memory allocation failure 221 + * -EINVAL Failed parsing the OPP node 222 + */ 223 + static int _opp_add_static_v2(struct device *dev, struct device_node *np) 224 + { 225 + struct opp_table *opp_table; 226 + struct dev_pm_opp *new_opp; 227 + u64 rate; 228 + u32 val; 229 + int ret; 230 + 231 + /* Hold our table modification lock here */ 232 + mutex_lock(&opp_table_lock); 233 + 234 + new_opp = _allocate_opp(dev, &opp_table); 235 + if (!new_opp) { 236 + ret = -ENOMEM; 237 + goto unlock; 238 + } 239 + 240 + ret = of_property_read_u64(np, "opp-hz", &rate); 241 + if (ret < 0) { 242 + dev_err(dev, "%s: opp-hz not found\n", __func__); 243 + goto free_opp; 244 + } 245 + 246 + /* Check if the OPP supports hardware's hierarchy of versions or not */ 247 + if (!_opp_is_supported(dev, opp_table, np)) { 248 + dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); 249 + goto free_opp; 250 + } 251 + 252 + /* 253 + * Rate is defined as an unsigned long in clk API, and so casting 254 + * explicitly to its type. Must be fixed once rate is 64 bit 255 + * guaranteed in clk API. 256 + */ 257 + new_opp->rate = (unsigned long)rate; 258 + new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 259 + 260 + new_opp->np = np; 261 + new_opp->dynamic = false; 262 + new_opp->available = true; 263 + 264 + if (!of_property_read_u32(np, "clock-latency-ns", &val)) 265 + new_opp->clock_latency_ns = val; 266 + 267 + ret = opp_parse_supplies(new_opp, dev, opp_table); 268 + if (ret) 269 + goto free_opp; 270 + 271 + ret = _opp_add(dev, new_opp, opp_table); 272 + if (ret) 273 + goto free_opp; 274 + 275 + /* OPP to select on device suspend */ 276 + if (of_property_read_bool(np, "opp-suspend")) { 277 + if (opp_table->suspend_opp) { 278 + dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", 279 + __func__, opp_table->suspend_opp->rate, 280 + new_opp->rate); 281 + } else { 282 + new_opp->suspend = true; 283 + opp_table->suspend_opp = new_opp; 284 + } 285 + } 286 + 287 + if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) 288 + opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; 289 + 290 + mutex_unlock(&opp_table_lock); 291 + 292 + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", 293 + __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, 294 + new_opp->u_volt_min, new_opp->u_volt_max, 295 + new_opp->clock_latency_ns); 296 + 297 + /* 298 + * Notify the changes in the availability of the operable 299 + * frequency/voltage list. 300 + */ 301 + srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); 302 + return 0; 303 + 304 + free_opp: 305 + _opp_remove(opp_table, new_opp, false); 306 + unlock: 307 + mutex_unlock(&opp_table_lock); 308 + return ret; 309 + } 310 + 311 + /* Initializes OPP tables based on new bindings */ 312 + static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) 313 + { 314 + struct device_node *np; 315 + struct opp_table *opp_table; 316 + int ret = 0, count = 0; 317 + 318 + mutex_lock(&opp_table_lock); 319 + 320 + opp_table = _managed_opp(opp_np); 321 + if (opp_table) { 322 + /* OPPs are already managed */ 323 + if (!_add_opp_dev(dev, opp_table)) 324 + ret = -ENOMEM; 325 + mutex_unlock(&opp_table_lock); 326 + return ret; 327 + } 328 + mutex_unlock(&opp_table_lock); 329 + 330 + /* We have opp-table node now, iterate over it and add OPPs */ 331 + for_each_available_child_of_node(opp_np, np) { 332 + count++; 333 + 334 + ret = _opp_add_static_v2(dev, np); 335 + if (ret) { 336 + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, 337 + ret); 338 + goto free_table; 339 + } 340 + } 341 + 342 + /* There should be one of more OPP defined */ 343 + if (WARN_ON(!count)) 344 + return -ENOENT; 345 + 346 + mutex_lock(&opp_table_lock); 347 + 348 + opp_table = _find_opp_table(dev); 349 + if (WARN_ON(IS_ERR(opp_table))) { 350 + ret = PTR_ERR(opp_table); 351 + mutex_unlock(&opp_table_lock); 352 + goto free_table; 353 + } 354 + 355 + opp_table->np = opp_np; 356 + opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 357 + 358 + mutex_unlock(&opp_table_lock); 359 + 360 + return 0; 361 + 362 + free_table: 363 + dev_pm_opp_of_remove_table(dev); 364 + 365 + return ret; 366 + } 367 + 368 + /* Initializes OPP tables based on old-deprecated bindings */ 369 + static int _of_add_opp_table_v1(struct device *dev) 370 + { 371 + const struct property *prop; 372 + const __be32 *val; 373 + int nr; 374 + 375 + prop = of_find_property(dev->of_node, "operating-points", NULL); 376 + if (!prop) 377 + return -ENODEV; 378 + if (!prop->value) 379 + return -ENODATA; 380 + 381 + /* 382 + * Each OPP is a set of tuples consisting of frequency and 383 + * voltage like <freq-kHz vol-uV>. 384 + */ 385 + nr = prop->length / sizeof(u32); 386 + if (nr % 2) { 387 + dev_err(dev, "%s: Invalid OPP table\n", __func__); 388 + return -EINVAL; 389 + } 390 + 391 + val = prop->value; 392 + while (nr) { 393 + unsigned long freq = be32_to_cpup(val++) * 1000; 394 + unsigned long volt = be32_to_cpup(val++); 395 + 396 + if (_opp_add_v1(dev, freq, volt, false)) 397 + dev_warn(dev, "%s: Failed to add OPP %ld\n", 398 + __func__, freq); 399 + nr -= 2; 400 + } 401 + 402 + return 0; 403 + } 404 + 405 + /** 406 + * dev_pm_opp_of_add_table() - Initialize opp table from device tree 407 + * @dev: device pointer used to lookup OPP table. 408 + * 409 + * Register the initial OPP table with the OPP library for given device. 410 + * 411 + * Locking: The internal opp_table and opp structures are RCU protected. 412 + * Hence this function indirectly uses RCU updater strategy with mutex locks 413 + * to keep the integrity of the internal data structures. Callers should ensure 414 + * that this function is *NOT* called under RCU protection or in contexts where 415 + * mutex cannot be locked. 416 + * 417 + * Return: 418 + * 0 On success OR 419 + * Duplicate OPPs (both freq and volt are same) and opp->available 420 + * -EEXIST Freq are same and volt are different OR 421 + * Duplicate OPPs (both freq and volt are same) and !opp->available 422 + * -ENOMEM Memory allocation failure 423 + * -ENODEV when 'operating-points' property is not found or is invalid data 424 + * in device node. 425 + * -ENODATA when empty 'operating-points' property is found 426 + * -EINVAL when invalid entries are found in opp-v2 table 427 + */ 428 + int dev_pm_opp_of_add_table(struct device *dev) 429 + { 430 + struct device_node *opp_np; 431 + int ret; 432 + 433 + /* 434 + * OPPs have two version of bindings now. The older one is deprecated, 435 + * try for the new binding first. 436 + */ 437 + opp_np = _of_get_opp_desc_node(dev); 438 + if (!opp_np) { 439 + /* 440 + * Try old-deprecated bindings for backward compatibility with 441 + * older dtbs. 442 + */ 443 + return _of_add_opp_table_v1(dev); 444 + } 445 + 446 + ret = _of_add_opp_table_v2(dev, opp_np); 447 + of_node_put(opp_np); 448 + 449 + return ret; 450 + } 451 + EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 452 + 453 + /* CPU device specific helpers */ 454 + 455 + /** 456 + * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask 457 + * @cpumask: cpumask for which OPP table needs to be removed 458 + * 459 + * This removes the OPP tables for CPUs present in the @cpumask. 460 + * This should be used only to remove static entries created from DT. 461 + * 462 + * Locking: The internal opp_table and opp structures are RCU protected. 463 + * Hence this function internally uses RCU updater strategy with mutex locks 464 + * to keep the integrity of the internal data structures. Callers should ensure 465 + * that this function is *NOT* called under RCU protection or in contexts where 466 + * mutex cannot be locked. 467 + */ 468 + void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) 469 + { 470 + _dev_pm_opp_cpumask_remove_table(cpumask, true); 471 + } 472 + EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); 473 + 474 + /** 475 + * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask 476 + * @cpumask: cpumask for which OPP table needs to be added. 477 + * 478 + * This adds the OPP tables for CPUs present in the @cpumask. 479 + * 480 + * Locking: The internal opp_table and opp structures are RCU protected. 481 + * Hence this function internally uses RCU updater strategy with mutex locks 482 + * to keep the integrity of the internal data structures. Callers should ensure 483 + * that this function is *NOT* called under RCU protection or in contexts where 484 + * mutex cannot be locked. 485 + */ 486 + int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) 487 + { 488 + struct device *cpu_dev; 489 + int cpu, ret = 0; 490 + 491 + WARN_ON(cpumask_empty(cpumask)); 492 + 493 + for_each_cpu(cpu, cpumask) { 494 + cpu_dev = get_cpu_device(cpu); 495 + if (!cpu_dev) { 496 + pr_err("%s: failed to get cpu%d device\n", __func__, 497 + cpu); 498 + continue; 499 + } 500 + 501 + ret = dev_pm_opp_of_add_table(cpu_dev); 502 + if (ret) { 503 + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", 504 + __func__, cpu, ret); 505 + 506 + /* Free all other OPPs */ 507 + dev_pm_opp_of_cpumask_remove_table(cpumask); 508 + break; 509 + } 510 + } 511 + 512 + return ret; 513 + } 514 + EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); 515 + 516 + /* 517 + * Works only for OPP v2 bindings. 518 + * 519 + * Returns -ENOENT if operating-points-v2 bindings aren't supported. 520 + */ 521 + /** 522 + * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with 523 + * @cpu_dev using operating-points-v2 524 + * bindings. 525 + * 526 + * @cpu_dev: CPU device for which we do this operation 527 + * @cpumask: cpumask to update with information of sharing CPUs 528 + * 529 + * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 530 + * 531 + * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. 532 + * 533 + * Locking: The internal opp_table and opp structures are RCU protected. 534 + * Hence this function internally uses RCU updater strategy with mutex locks 535 + * to keep the integrity of the internal data structures. Callers should ensure 536 + * that this function is *NOT* called under RCU protection or in contexts where 537 + * mutex cannot be locked. 538 + */ 539 + int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, 540 + struct cpumask *cpumask) 541 + { 542 + struct device_node *np, *tmp_np; 543 + struct device *tcpu_dev; 544 + int cpu, ret = 0; 545 + 546 + /* Get OPP descriptor node */ 547 + np = _of_get_opp_desc_node(cpu_dev); 548 + if (!np) { 549 + dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); 550 + return -ENOENT; 551 + } 552 + 553 + cpumask_set_cpu(cpu_dev->id, cpumask); 554 + 555 + /* OPPs are shared ? */ 556 + if (!of_property_read_bool(np, "opp-shared")) 557 + goto put_cpu_node; 558 + 559 + for_each_possible_cpu(cpu) { 560 + if (cpu == cpu_dev->id) 561 + continue; 562 + 563 + tcpu_dev = get_cpu_device(cpu); 564 + if (!tcpu_dev) { 565 + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", 566 + __func__, cpu); 567 + ret = -ENODEV; 568 + goto put_cpu_node; 569 + } 570 + 571 + /* Get OPP descriptor node */ 572 + tmp_np = _of_get_opp_desc_node(tcpu_dev); 573 + if (!tmp_np) { 574 + dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", 575 + __func__); 576 + ret = -ENOENT; 577 + goto put_cpu_node; 578 + } 579 + 580 + /* CPUs are sharing opp node */ 581 + if (np == tmp_np) 582 + cpumask_set_cpu(cpu, cpumask); 583 + 584 + of_node_put(tmp_np); 585 + } 586 + 587 + put_cpu_node: 588 + of_node_put(np); 589 + return ret; 590 + } 591 + EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
+14
drivers/base/power/opp/opp.h
··· 28 28 /* Lock to allow exclusive modification to the device and opp lists */ 29 29 extern struct mutex opp_table_lock; 30 30 31 + extern struct list_head opp_tables; 32 + 31 33 /* 32 34 * Internal data structure organization with the OPP layer library is as 33 35 * follows: ··· 185 183 struct opp_table *_find_opp_table(struct device *dev); 186 184 struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); 187 185 struct device_node *_of_get_opp_desc_node(struct device *dev); 186 + void _dev_pm_opp_remove_table(struct device *dev, bool remove_all); 187 + struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table); 188 + int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); 189 + void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify); 190 + int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic); 191 + void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of); 192 + 193 + #ifdef CONFIG_OF 194 + void _of_init_opp_table(struct opp_table *opp_table, struct device *dev); 195 + #else 196 + static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {} 197 + #endif 188 198 189 199 #ifdef CONFIG_DEBUG_FS 190 200 void opp_debug_remove_one(struct dev_pm_opp *opp);
+39 -23
include/linux/pm_opp.h
··· 65 65 int dev_pm_opp_set_regulator(struct device *dev, const char *name); 66 66 void dev_pm_opp_put_regulator(struct device *dev); 67 67 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); 68 + int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); 69 + int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 70 + void dev_pm_opp_remove_table(struct device *dev); 71 + void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); 68 72 #else 69 73 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 70 74 { ··· 113 109 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 114 110 unsigned long freq, bool available) 115 111 { 116 - return ERR_PTR(-EINVAL); 112 + return ERR_PTR(-ENOTSUPP); 117 113 } 118 114 119 115 static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 120 116 unsigned long *freq) 121 117 { 122 - return ERR_PTR(-EINVAL); 118 + return ERR_PTR(-ENOTSUPP); 123 119 } 124 120 125 121 static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 126 122 unsigned long *freq) 127 123 { 128 - return ERR_PTR(-EINVAL); 124 + return ERR_PTR(-ENOTSUPP); 129 125 } 130 126 131 127 static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, 132 128 unsigned long u_volt) 133 129 { 134 - return -EINVAL; 130 + return -ENOTSUPP; 135 131 } 136 132 137 133 static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) ··· 151 147 static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( 152 148 struct device *dev) 153 149 { 154 - return ERR_PTR(-EINVAL); 150 + return ERR_PTR(-ENOTSUPP); 155 151 } 156 152 157 153 static inline int dev_pm_opp_set_supported_hw(struct device *dev, 158 154 const u32 *versions, 159 155 unsigned int count) 160 156 { 161 - return -EINVAL; 157 + return -ENOTSUPP; 162 158 } 163 159 164 160 static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} 165 161 166 162 static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) 167 163 { 168 - return -EINVAL; 164 + return -ENOTSUPP; 169 165 } 170 166 171 167 static inline void dev_pm_opp_put_prop_name(struct device *dev) {} 172 168 173 169 static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) 174 170 { 175 - return -EINVAL; 171 + return -ENOTSUPP; 176 172 } 177 173 178 174 static inline void dev_pm_opp_put_regulator(struct device *dev) {} 179 175 180 176 static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 181 177 { 178 + return -ENOTSUPP; 179 + } 180 + 181 + static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) 182 + { 183 + return -ENOTSUPP; 184 + } 185 + 186 + static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) 187 + { 182 188 return -EINVAL; 189 + } 190 + 191 + static inline void dev_pm_opp_remove_table(struct device *dev) 192 + { 193 + } 194 + 195 + static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) 196 + { 183 197 } 184 198 185 199 #endif /* CONFIG_PM_OPP */ ··· 205 183 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 206 184 int dev_pm_opp_of_add_table(struct device *dev); 207 185 void dev_pm_opp_of_remove_table(struct device *dev); 208 - int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); 209 - void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); 210 - int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); 211 - int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); 186 + int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); 187 + void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); 188 + int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 212 189 #else 213 190 static inline int dev_pm_opp_of_add_table(struct device *dev) 214 191 { 215 - return -EINVAL; 192 + return -ENOTSUPP; 216 193 } 217 194 218 195 static inline void dev_pm_opp_of_remove_table(struct device *dev) 219 196 { 220 197 } 221 198 222 - static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) 199 + static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) 223 200 { 224 - return -ENOSYS; 201 + return -ENOTSUPP; 225 202 } 226 203 227 - static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) 204 + static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) 228 205 { 229 206 } 230 207 231 - static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 208 + static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) 232 209 { 233 - return -ENOSYS; 234 - } 235 - 236 - static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 237 - { 238 - return -ENOSYS; 210 + return -ENOTSUPP; 239 211 } 240 212 #endif 241 213