Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pm+acpi-4.4-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management and ACPI updates from Rafael Wysocki:
"The only new feature in this batch is support for the ACPI _CCA device
configuration object, which it a pre-requisite for future ACPI PCI
support on ARM64, but should not affect the other architectures.

The rest is fixes and cleanups, mostly in cpufreq (including
intel_pstate), the Operating Performace Points (OPP) framework and
tools (cpupower and turbostat).

Specifics:

- Support for the ACPI _CCA configuration object intended to tell the
OS whether or not a bus master device supports hardware managed
cache coherency and a new set of functions to allow drivers to
check the cache coherency support for devices in a platform
firmware interface agnostic way (Suravee Suthikulpanit, Jeremy
Linton).

- ACPI backlight quirks for ESPRIMO Mobile M9410 and Dell XPS L421X
(Aaron Lu, Hans de Goede).

- Fixes for the arm_big_little and s5pv210-cpufreq cpufreq drivers
(Jon Medhurst, Nicolas Pitre).

- kfree()-related fixup for the recently introduced CPPC cpufreq
frontend (Markus Elfring).

- intel_pstate fix reducing kernel log noise on systems where
P-states are managed by hardware (Prarit Bhargava).

- intel_pstate maintainers information update (Srinivas Pandruvada).

- cpufreq core optimization related to the handling of delayed work
items used by governors (Viresh Kumar).

- Locking fixes and cleanups of the Operating Performance Points
(OPP) framework (Viresh Kumar).

- Generic power domains framework cleanups (Lina Iyer).

- cpupower tool updates (Jacob Tanenbaum, Sriram Raghunathan, Thomas
Renninger).

- turbostat tool updates (Len Brown)"

* tag 'pm+acpi-4.4-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (32 commits)
PCI: ACPI: Add support for PCI device DMA coherency
PCI: OF: Move of_pci_dma_configure() to pci_dma_configure()
of/pci: Fix pci_get_host_bridge_device leak
device property: ACPI: Remove unused DMA APIs
device property: ACPI: Make use of the new DMA Attribute APIs
device property: Adding DMA Attribute APIs for Generic Devices
ACPI: Adding DMA Attribute APIs for ACPI Device
device property: Introducing enum dev_dma_attr
ACPI: Honor ACPI _CCA attribute setting
cpufreq: CPPC: Delete an unnecessary check before the function call kfree()
PM / OPP: Add opp_rcu_lockdep_assert() to _find_device_opp()
PM / OPP: Hold dev_opp_list_lock for writers
PM / OPP: Protect updates to list_dev with mutex
PM / OPP: Propagate error properly from dev_pm_opp_set_sharing_cpus()
cpufreq: s5pv210-cpufreq: fix wrong do_div() usage
MAINTAINERS: update for intel P-state driver
Creating a common structure initialization pattern for struct option
cpupower: Enable disabled Cstates if they are below max latency
cpupower: Remove debug message when using cpupower idle-set -D switch
cpupower: cpupower monitor reports uninitialized values for offline cpus
...

+407 -211
+2 -1
MAINTAINERS
··· 5505 5505 F: drivers/idle/intel_idle.c 5506 5506 5507 5507 INTEL PSTATE DRIVER 5508 - M: Kristen Carlson Accardi <kristen@linux.intel.com> 5508 + M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> 5509 + M: Len Brown <lenb@kernel.org> 5509 5510 L: linux-pm@vger.kernel.org 5510 5511 S: Supported 5511 5512 F: drivers/cpufreq/intel_pstate.c
+6 -1
drivers/acpi/acpi_platform.c
··· 103 103 pdevinfo.res = resources; 104 104 pdevinfo.num_res = count; 105 105 pdevinfo.fwnode = acpi_fwnode_handle(adev); 106 - pdevinfo.dma_mask = acpi_check_dma(adev, NULL) ? DMA_BIT_MASK(32) : 0; 106 + 107 + if (acpi_dma_supported(adev)) 108 + pdevinfo.dma_mask = DMA_BIT_MASK(32); 109 + else 110 + pdevinfo.dma_mask = 0; 111 + 107 112 pdev = platform_device_register_full(&pdevinfo); 108 113 if (IS_ERR(pdev)) 109 114 dev_err(&adev->dev, "platform device creation failed: %ld\n",
+66 -12
drivers/acpi/acpi_video.c
··· 77 77 static int disable_backlight_sysfs_if = -1; 78 78 module_param(disable_backlight_sysfs_if, int, 0444); 79 79 80 + static bool device_id_scheme = false; 81 + module_param(device_id_scheme, bool, 0444); 82 + 83 + static bool only_lcd = false; 84 + module_param(only_lcd, bool, 0444); 85 + 80 86 static int register_count; 81 87 static DEFINE_MUTEX(register_count_mutex); 82 88 static struct mutex video_list_lock; ··· 400 394 return 0; 401 395 } 402 396 397 + static int video_set_device_id_scheme(const struct dmi_system_id *d) 398 + { 399 + device_id_scheme = true; 400 + return 0; 401 + } 402 + 403 + static int video_enable_only_lcd(const struct dmi_system_id *d) 404 + { 405 + only_lcd = true; 406 + return 0; 407 + } 408 + 403 409 static struct dmi_system_id video_dmi_table[] = { 404 410 /* 405 411 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 ··· 471 453 .matches = { 472 454 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 473 455 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"), 456 + }, 457 + }, 458 + /* 459 + * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set 460 + * but the IDs actually follow the Device ID Scheme. 461 + */ 462 + { 463 + /* https://bugzilla.kernel.org/show_bug.cgi?id=104121 */ 464 + .callback = video_set_device_id_scheme, 465 + .ident = "ESPRIMO Mobile M9410", 466 + .matches = { 467 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 468 + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"), 469 + }, 470 + }, 471 + /* 472 + * Some machines have multiple video output devices, but only the one 473 + * that is the type of LCD can do the backlight control so we should not 474 + * register backlight interface for other video output devices. 475 + */ 476 + { 477 + /* https://bugzilla.kernel.org/show_bug.cgi?id=104121 */ 478 + .callback = video_enable_only_lcd, 479 + .ident = "ESPRIMO Mobile M9410", 480 + .matches = { 481 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 482 + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"), 474 483 }, 475 484 }, 476 485 {} ··· 1048 1003 1049 1004 attribute = acpi_video_get_device_attr(video, device_id); 1050 1005 1051 - if (attribute && attribute->device_id_scheme) { 1006 + if (attribute && (attribute->device_id_scheme || device_id_scheme)) { 1052 1007 switch (attribute->display_type) { 1053 1008 case ACPI_VIDEO_DISPLAY_CRT: 1054 1009 data->flags.crt = 1; ··· 1613 1568 static int count; 1614 1569 char *name; 1615 1570 1616 - /* 1617 - * Do not create backlight device for video output 1618 - * device that is not in the enumerated list. 1619 - */ 1620 - if (!acpi_video_device_in_dod(device)) { 1621 - dev_dbg(&device->dev->dev, "not in _DOD list, ignore\n"); 1622 - return; 1623 - } 1624 - 1625 1571 result = acpi_video_init_brightness(device); 1626 1572 if (result) 1627 1573 return; ··· 1693 1657 mutex_unlock(&video->device_list_lock); 1694 1658 } 1695 1659 1660 + static bool acpi_video_should_register_backlight(struct acpi_video_device *dev) 1661 + { 1662 + /* 1663 + * Do not create backlight device for video output 1664 + * device that is not in the enumerated list. 1665 + */ 1666 + if (!acpi_video_device_in_dod(dev)) { 1667 + dev_dbg(&dev->dev->dev, "not in _DOD list, ignore\n"); 1668 + return false; 1669 + } 1670 + 1671 + if (only_lcd) 1672 + return dev->flags.lcd; 1673 + return true; 1674 + } 1675 + 1696 1676 static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) 1697 1677 { 1698 1678 struct acpi_video_device *dev; ··· 1722 1670 return 0; 1723 1671 1724 1672 mutex_lock(&video->device_list_lock); 1725 - list_for_each_entry(dev, &video->video_device_list, entry) 1726 - acpi_video_dev_register_backlight(dev); 1673 + list_for_each_entry(dev, &video->video_device_list, entry) { 1674 + if (acpi_video_should_register_backlight(dev)) 1675 + acpi_video_dev_register_backlight(dev); 1676 + } 1727 1677 mutex_unlock(&video->device_list_lock); 1728 1678 1729 1679 video->backlight_registered = true;
+5 -3
drivers/acpi/glue.c
··· 168 168 struct list_head *physnode_list; 169 169 unsigned int node_id; 170 170 int retval = -EINVAL; 171 - bool coherent; 171 + enum dev_dma_attr attr; 172 172 173 173 if (has_acpi_companion(dev)) { 174 174 if (acpi_dev) { ··· 225 225 if (!has_acpi_companion(dev)) 226 226 ACPI_COMPANION_SET(dev, acpi_dev); 227 227 228 - if (acpi_check_dma(acpi_dev, &coherent)) 229 - arch_setup_dma_ops(dev, 0, 0, NULL, coherent); 228 + attr = acpi_get_dma_attr(acpi_dev); 229 + if (attr != DEV_DMA_NOT_SUPPORTED) 230 + arch_setup_dma_ops(dev, 0, 0, NULL, 231 + attr == DEV_DMA_COHERENT); 230 232 231 233 acpi_physnode_link_name(physical_node_name, node_id); 232 234 retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
+42
drivers/acpi/scan.c
··· 1308 1308 kfree(pnp->unique_id); 1309 1309 } 1310 1310 1311 + /** 1312 + * acpi_dma_supported - Check DMA support for the specified device. 1313 + * @adev: The pointer to acpi device 1314 + * 1315 + * Return false if DMA is not supported. Otherwise, return true 1316 + */ 1317 + bool acpi_dma_supported(struct acpi_device *adev) 1318 + { 1319 + if (!adev) 1320 + return false; 1321 + 1322 + if (adev->flags.cca_seen) 1323 + return true; 1324 + 1325 + /* 1326 + * Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent 1327 + * DMA on "Intel platforms". Presumably that includes all x86 and 1328 + * ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y. 1329 + */ 1330 + if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) 1331 + return true; 1332 + 1333 + return false; 1334 + } 1335 + 1336 + /** 1337 + * acpi_get_dma_attr - Check the supported DMA attr for the specified device. 1338 + * @adev: The pointer to acpi device 1339 + * 1340 + * Return enum dev_dma_attr. 1341 + */ 1342 + enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) 1343 + { 1344 + if (!acpi_dma_supported(adev)) 1345 + return DEV_DMA_NOT_SUPPORTED; 1346 + 1347 + if (adev->flags.coherent_dma) 1348 + return DEV_DMA_COHERENT; 1349 + else 1350 + return DEV_DMA_NON_COHERENT; 1351 + } 1352 + 1311 1353 static void acpi_init_coherency(struct acpi_device *adev) 1312 1354 { 1313 1355 unsigned long long cca = 0;
+9
drivers/acpi/video_detect.c
··· 233 233 }, 234 234 }, 235 235 { 236 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1272633 */ 237 + .callback = video_detect_force_video, 238 + .ident = "Dell XPS14 L421X", 239 + .matches = { 240 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 241 + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), 242 + }, 243 + }, 244 + { 236 245 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 237 246 .callback = video_detect_force_video, 238 247 .ident = "Dell XPS15 L521X",
+10 -11
drivers/base/power/domain.c
··· 321 321 if (stat > PM_QOS_FLAGS_NONE) 322 322 return -EBUSY; 323 323 324 - if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 325 - || pdd->dev->power.irq_safe)) 324 + if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) 326 325 not_suspended++; 327 326 } 328 327 ··· 1311 1312 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1312 1313 struct generic_pm_domain *subdomain) 1313 1314 { 1314 - struct gpd_link *link; 1315 + struct gpd_link *link, *itr; 1315 1316 int ret = 0; 1316 1317 1317 1318 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1318 1319 || genpd == subdomain) 1319 1320 return -EINVAL; 1321 + 1322 + link = kzalloc(sizeof(*link), GFP_KERNEL); 1323 + if (!link) 1324 + return -ENOMEM; 1320 1325 1321 1326 mutex_lock(&genpd->lock); 1322 1327 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); ··· 1331 1328 goto out; 1332 1329 } 1333 1330 1334 - list_for_each_entry(link, &genpd->master_links, master_node) { 1335 - if (link->slave == subdomain && link->master == genpd) { 1331 + list_for_each_entry(itr, &genpd->master_links, master_node) { 1332 + if (itr->slave == subdomain && itr->master == genpd) { 1336 1333 ret = -EINVAL; 1337 1334 goto out; 1338 1335 } 1339 1336 } 1340 1337 1341 - link = kzalloc(sizeof(*link), GFP_KERNEL); 1342 - if (!link) { 1343 - ret = -ENOMEM; 1344 - goto out; 1345 - } 1346 1338 link->master = genpd; 1347 1339 list_add_tail(&link->master_node, &genpd->master_links); 1348 1340 link->slave = subdomain; ··· 1348 1350 out: 1349 1351 mutex_unlock(&subdomain->lock); 1350 1352 mutex_unlock(&genpd->lock); 1351 - 1353 + if (ret) 1354 + kfree(link); 1352 1355 return ret; 1353 1356 } 1354 1357 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
+30 -14
drivers/base/power/opp/core.c
··· 11 11 * published by the Free Software Foundation. 12 12 */ 13 13 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 14 16 #include <linux/errno.h> 15 17 #include <linux/err.h> 16 18 #include <linux/slab.h> ··· 29 27 */ 30 28 static LIST_HEAD(dev_opp_list); 31 29 /* Lock to allow exclusive modification to the device and opp lists */ 32 - static DEFINE_MUTEX(dev_opp_list_lock); 30 + DEFINE_MUTEX(dev_opp_list_lock); 33 31 34 32 #define opp_rcu_lockdep_assert() \ 35 33 do { \ ··· 81 79 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or 82 80 * -EINVAL based on type of error. 83 81 * 84 - * Locking: This function must be called under rcu_read_lock(). device_opp 85 - * is a RCU protected pointer. This means that device_opp is valid as long 86 - * as we are under RCU lock. 82 + * Locking: For readers, this function must be called under rcu_read_lock(). 83 + * device_opp is a RCU protected pointer, which means that device_opp is valid 84 + * as long as we are under RCU lock. 85 + * 86 + * For Writers, this function must be called with dev_opp_list_lock held. 87 87 */ 88 88 struct device_opp *_find_device_opp(struct device *dev) 89 89 { 90 90 struct device_opp *dev_opp; 91 + 92 + opp_rcu_lockdep_assert(); 91 93 92 94 if (IS_ERR_OR_NULL(dev)) { 93 95 pr_err("%s: Invalid parameters\n", __func__); ··· 707 701 } 708 702 709 703 /** 710 - * _opp_add_dynamic() - Allocate a dynamic OPP. 704 + * _opp_add_v1() - Allocate a OPP based on v1 bindings. 711 705 * @dev: device for which we do this operation 712 706 * @freq: Frequency in Hz for this OPP 713 707 * @u_volt: Voltage in uVolts for this OPP ··· 733 727 * Duplicate OPPs (both freq and volt are same) and !opp->available 734 728 * -ENOMEM Memory allocation failure 735 729 */ 736 - static int _opp_add_dynamic(struct device *dev, unsigned long freq, 737 - long u_volt, bool dynamic) 730 + static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, 731 + bool dynamic) 738 732 { 739 733 struct device_opp *dev_opp; 740 734 struct dev_pm_opp *new_opp; ··· 776 770 } 777 771 778 772 /* TODO: Support multiple regulators */ 779 - static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) 773 + static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) 780 774 { 781 775 u32 microvolt[3] = {0}; 776 + u32 val; 782 777 int count, ret; 783 778 784 779 /* Missing property isn't a problem, but an invalid entry is */ ··· 811 804 opp->u_volt = microvolt[0]; 812 805 opp->u_volt_min = microvolt[1]; 813 806 opp->u_volt_max = microvolt[2]; 807 + 808 + if (!of_property_read_u32(opp->np, "opp-microamp", &val)) 809 + opp->u_amp = val; 814 810 815 811 return 0; 816 812 } ··· 879 869 if (!of_property_read_u32(np, "clock-latency-ns", &val)) 880 870 new_opp->clock_latency_ns = val; 881 871 882 - ret = opp_get_microvolt(new_opp, dev); 872 + ret = opp_parse_supplies(new_opp, dev); 883 873 if (ret) 884 874 goto free_opp; 885 - 886 - if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) 887 - new_opp->u_amp = val; 888 875 889 876 ret = _opp_add(dev, new_opp, dev_opp); 890 877 if (ret) ··· 946 939 */ 947 940 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 948 941 { 949 - return _opp_add_dynamic(dev, freq, u_volt, true); 942 + return _opp_add_v1(dev, freq, u_volt, true); 950 943 } 951 944 EXPORT_SYMBOL_GPL(dev_pm_opp_add); 952 945 ··· 1179 1172 struct device_opp *dev_opp; 1180 1173 int ret = 0, count = 0; 1181 1174 1175 + mutex_lock(&dev_opp_list_lock); 1176 + 1182 1177 dev_opp = _managed_opp(opp_np); 1183 1178 if (dev_opp) { 1184 1179 /* OPPs are already managed */ 1185 1180 if (!_add_list_dev(dev, dev_opp)) 1186 1181 ret = -ENOMEM; 1182 + mutex_unlock(&dev_opp_list_lock); 1187 1183 return ret; 1188 1184 } 1185 + mutex_unlock(&dev_opp_list_lock); 1189 1186 1190 1187 /* We have opp-list node now, iterate over it and add OPPs */ 1191 1188 for_each_available_child_of_node(opp_np, np) { ··· 1207 1196 if (WARN_ON(!count)) 1208 1197 return -ENOENT; 1209 1198 1199 + mutex_lock(&dev_opp_list_lock); 1200 + 1210 1201 dev_opp = _find_device_opp(dev); 1211 1202 if (WARN_ON(IS_ERR(dev_opp))) { 1212 1203 ret = PTR_ERR(dev_opp); 1204 + mutex_unlock(&dev_opp_list_lock); 1213 1205 goto free_table; 1214 1206 } 1215 1207 1216 1208 dev_opp->np = opp_np; 1217 1209 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 1210 + 1211 + mutex_unlock(&dev_opp_list_lock); 1218 1212 1219 1213 return 0; 1220 1214 ··· 1257 1241 unsigned long freq = be32_to_cpup(val++) * 1000; 1258 1242 unsigned long volt = be32_to_cpup(val++); 1259 1243 1260 - if (_opp_add_dynamic(dev, freq, volt, false)) 1244 + if (_opp_add_v1(dev, freq, volt, false)) 1261 1245 dev_warn(dev, "%s: Failed to add OPP %ld\n", 1262 1246 __func__, freq); 1263 1247 nr -= 2;
+8 -5
drivers/base/power/opp/cpu.c
··· 10 10 * it under the terms of the GNU General Public License version 2 as 11 11 * published by the Free Software Foundation. 12 12 */ 13 + 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 13 16 #include <linux/cpu.h> 14 17 #include <linux/cpufreq.h> 15 18 #include <linux/err.h> ··· 127 124 struct device *dev; 128 125 int cpu, ret = 0; 129 126 130 - rcu_read_lock(); 127 + mutex_lock(&dev_opp_list_lock); 131 128 132 129 dev_opp = _find_device_opp(cpu_dev); 133 130 if (IS_ERR(dev_opp)) { 134 131 ret = -EINVAL; 135 - goto out_rcu_read_unlock; 132 + goto unlock; 136 133 } 137 134 138 135 for_each_cpu(cpu, cpumask) { ··· 153 150 continue; 154 151 } 155 152 } 156 - out_rcu_read_unlock: 157 - rcu_read_unlock(); 153 + unlock: 154 + mutex_unlock(&dev_opp_list_lock); 158 155 159 - return 0; 156 + return ret; 160 157 } 161 158 EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); 162 159
+3
drivers/base/power/opp/opp.h
··· 21 21 #include <linux/rculist.h> 22 22 #include <linux/rcupdate.h> 23 23 24 + /* Lock to allow exclusive modification to the device and opp lists */ 25 + extern struct mutex dev_opp_list_lock; 26 + 24 27 /* 25 28 * Internal data structure organization with the OPP layer library is as 26 29 * follows:
+24 -8
drivers/base/property.c
··· 598 598 } 599 599 EXPORT_SYMBOL_GPL(device_get_child_node_count); 600 600 601 - bool device_dma_is_coherent(struct device *dev) 601 + bool device_dma_supported(struct device *dev) 602 602 { 603 - bool coherent = false; 604 - 603 + /* For DT, this is always supported. 604 + * For ACPI, this depends on CCA, which 605 + * is determined by the acpi_dma_supported(). 606 + */ 605 607 if (IS_ENABLED(CONFIG_OF) && dev->of_node) 606 - coherent = of_dma_is_coherent(dev->of_node); 607 - else 608 - acpi_check_dma(ACPI_COMPANION(dev), &coherent); 608 + return true; 609 609 610 - return coherent; 610 + return acpi_dma_supported(ACPI_COMPANION(dev)); 611 611 } 612 - EXPORT_SYMBOL_GPL(device_dma_is_coherent); 612 + EXPORT_SYMBOL_GPL(device_dma_supported); 613 + 614 + enum dev_dma_attr device_get_dma_attr(struct device *dev) 615 + { 616 + enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED; 617 + 618 + if (IS_ENABLED(CONFIG_OF) && dev->of_node) { 619 + if (of_dma_is_coherent(dev->of_node)) 620 + attr = DEV_DMA_COHERENT; 621 + else 622 + attr = DEV_DMA_NON_COHERENT; 623 + } else 624 + attr = acpi_get_dma_attr(ACPI_COMPANION(dev)); 625 + 626 + return attr; 627 + } 628 + EXPORT_SYMBOL_GPL(device_get_dma_attr); 613 629 614 630 /** 615 631 * device_get_phy_mode - Get phy mode for given device
+13 -9
drivers/cpufreq/arm_big_little.c
··· 149 149 __func__, cpu, old_cluster, new_cluster, new_rate); 150 150 151 151 ret = clk_set_rate(clk[new_cluster], new_rate * 1000); 152 + if (!ret) { 153 + /* 154 + * FIXME: clk_set_rate hasn't returned an error here however it 155 + * may be that clk_change_rate failed due to hardware or 156 + * firmware issues and wasn't able to report that due to the 157 + * current design of the clk core layer. To work around this 158 + * problem we will read back the clock rate and check it is 159 + * correct. This needs to be removed once clk core is fixed. 160 + */ 161 + if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) 162 + ret = -EIO; 163 + } 164 + 152 165 if (WARN_ON(ret)) { 153 166 pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, 154 167 new_cluster); ··· 202 189 mutex_unlock(&cluster_lock[old_cluster]); 203 190 } 204 191 205 - /* 206 - * FIXME: clk_set_rate has to handle the case where clk_change_rate 207 - * can fail due to hardware or firmware issues. Until the clk core 208 - * layer is fixed, we can check here. In most of the cases we will 209 - * be reading only the cached value anyway. This needs to be removed 210 - * once clk core is fixed. 211 - */ 212 - if (bL_cpufreq_get_rate(cpu) != new_rate) 213 - return -EIO; 214 192 return 0; 215 193 } 216 194
+1 -2
drivers/cpufreq/cppc_cpufreq.c
··· 166 166 167 167 out: 168 168 for_each_possible_cpu(i) 169 - if (all_cpu_data[i]) 170 - kfree(all_cpu_data[i]); 169 + kfree(all_cpu_data[i]); 171 170 172 171 kfree(all_cpu_data); 173 172 return -ENODEV;
+23 -10
drivers/cpufreq/cpufreq_governor.c
··· 171 171 { 172 172 int i; 173 173 174 - mutex_lock(&cpufreq_governor_lock); 175 - if (!policy->governor_enabled) 176 - goto out_unlock; 177 - 178 174 if (!all_cpus) { 179 175 /* 180 176 * Use raw_smp_processor_id() to avoid preemptible warnings. ··· 184 188 for_each_cpu(i, policy->cpus) 185 189 __gov_queue_work(i, dbs_data, delay); 186 190 } 187 - 188 - out_unlock: 189 - mutex_unlock(&cpufreq_governor_lock); 190 191 } 191 192 EXPORT_SYMBOL_GPL(gov_queue_work); 192 193 ··· 222 229 struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, 223 230 dwork.work); 224 231 struct cpu_common_dbs_info *shared = cdbs->shared; 225 - struct cpufreq_policy *policy = shared->policy; 226 - struct dbs_data *dbs_data = policy->governor_data; 232 + struct cpufreq_policy *policy; 233 + struct dbs_data *dbs_data; 227 234 unsigned int sampling_rate, delay; 228 235 bool modify_all = true; 229 236 230 237 mutex_lock(&shared->timer_mutex); 238 + 239 + policy = shared->policy; 240 + 241 + /* 242 + * Governor might already be disabled and there is no point continuing 243 + * with the work-handler. 244 + */ 245 + if (!policy) 246 + goto unlock; 247 + 248 + dbs_data = policy->governor_data; 231 249 232 250 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 233 251 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; ··· 256 252 delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); 257 253 gov_queue_work(dbs_data, policy, delay, modify_all); 258 254 255 + unlock: 259 256 mutex_unlock(&shared->timer_mutex); 260 257 } 261 258 ··· 483 478 if (!shared || !shared->policy) 484 479 return -EBUSY; 485 480 481 + /* 482 + * Work-handler must see this updated, as it should not proceed any 483 + * further after governor is disabled. And so timer_mutex is taken while 484 + * updating this value. 485 + */ 486 + mutex_lock(&shared->timer_mutex); 487 + shared->policy = NULL; 488 + mutex_unlock(&shared->timer_mutex); 489 + 486 490 gov_cancel_work(dbs_data, policy); 487 491 488 - shared->policy = NULL; 489 492 mutex_destroy(&shared->timer_mutex); 490 493 return 0; 491 494 }
+6 -4
drivers/cpufreq/intel_pstate.c
··· 684 684 685 685 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 686 686 { 687 - pr_info("intel_pstate: HWP enabled\n"); 688 - 689 687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 690 688 } 691 689 ··· 1555 1557 if (!all_cpu_data) 1556 1558 return -ENOMEM; 1557 1559 1558 - if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) 1560 + if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { 1561 + pr_info("intel_pstate: HWP enabled\n"); 1559 1562 hwp_active++; 1563 + } 1560 1564 1561 1565 if (!hwp_active && hwp_only) 1562 1566 goto out; ··· 1593 1593 1594 1594 if (!strcmp(str, "disable")) 1595 1595 no_load = 1; 1596 - if (!strcmp(str, "no_hwp")) 1596 + if (!strcmp(str, "no_hwp")) { 1597 + pr_info("intel_pstate: HWP disabled\n"); 1597 1598 no_hwp = 1; 1599 + } 1598 1600 if (!strcmp(str, "force")) 1599 1601 force_load = 1; 1600 1602 if (!strcmp(str, "hwp_only"))
+2 -2
drivers/cpufreq/s5pv210-cpufreq.c
··· 212 212 /* Find current DRAM frequency */ 213 213 tmp = s5pv210_dram_conf[ch].freq; 214 214 215 - do_div(tmp, freq); 215 + tmp /= freq; 216 216 217 217 tmp1 = s5pv210_dram_conf[ch].refresh; 218 218 219 - do_div(tmp1, tmp); 219 + tmp1 /= tmp; 220 220 221 221 __raw_writel(tmp1, reg); 222 222 }
+13 -6
drivers/crypto/ccp/ccp-platform.c
··· 94 94 struct ccp_device *ccp; 95 95 struct ccp_platform *ccp_platform; 96 96 struct device *dev = &pdev->dev; 97 + enum dev_dma_attr attr; 97 98 struct resource *ior; 98 99 int ret; 99 100 ··· 119 118 } 120 119 ccp->io_regs = ccp->io_map; 121 120 121 + attr = device_get_dma_attr(dev); 122 + if (attr == DEV_DMA_NOT_SUPPORTED) { 123 + dev_err(dev, "DMA is not supported"); 124 + goto e_err; 125 + } 126 + 127 + ccp_platform->coherent = (attr == DEV_DMA_COHERENT); 128 + if (ccp_platform->coherent) 129 + ccp->axcache = CACHE_WB_NO_ALLOC; 130 + else 131 + ccp->axcache = CACHE_NONE; 132 + 122 133 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 123 134 if (ret) { 124 135 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); 125 136 goto e_err; 126 137 } 127 - 128 - ccp_platform->coherent = device_dma_is_coherent(ccp->dev); 129 - if (ccp_platform->coherent) 130 - ccp->axcache = CACHE_WB_NO_ALLOC; 131 - else 132 - ccp->axcache = CACHE_NONE; 133 138 134 139 dev_set_drvdata(dev, ccp); 135 140
+7 -1
drivers/net/ethernet/amd/xgbe/xgbe-main.c
··· 342 342 struct resource *res; 343 343 const char *phy_mode; 344 344 unsigned int i, phy_memnum, phy_irqnum; 345 + enum dev_dma_attr attr; 345 346 int ret; 346 347 347 348 DBGPR("--> xgbe_probe\n"); ··· 610 609 goto err_io; 611 610 612 611 /* Set the DMA coherency values */ 613 - pdata->coherent = device_dma_is_coherent(pdata->dev); 612 + attr = device_get_dma_attr(dev); 613 + if (attr == DEV_DMA_NOT_SUPPORTED) { 614 + dev_err(dev, "DMA is not supported"); 615 + goto err_io; 616 + } 617 + pdata->coherent = (attr == DEV_DMA_COHERENT); 614 618 if (pdata->coherent) { 615 619 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; 616 620 pdata->arcache = XGBE_DMA_OS_ARCACHE;
-20
drivers/of/of_pci.c
··· 143 143 } 144 144 EXPORT_SYMBOL_GPL(of_pci_check_probe_only); 145 145 146 - /** 147 - * of_pci_dma_configure - Setup DMA configuration 148 - * @dev: ptr to pci_dev struct of the PCI device 149 - * 150 - * Function to update PCI devices's DMA configuration using the same 151 - * info from the OF node of host bridge's parent (if any). 152 - */ 153 - void of_pci_dma_configure(struct pci_dev *pci_dev) 154 - { 155 - struct device *dev = &pci_dev->dev; 156 - struct device *bridge = pci_get_host_bridge_device(pci_dev); 157 - 158 - if (!bridge->parent) 159 - return; 160 - 161 - of_dma_configure(dev, bridge->parent->of_node); 162 - pci_put_host_bridge_device(bridge); 163 - } 164 - EXPORT_SYMBOL_GPL(of_pci_dma_configure); 165 - 166 146 #if defined(CONFIG_OF_ADDRESS) 167 147 /** 168 148 * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
+31 -1
drivers/pci/probe.c
··· 6 6 #include <linux/delay.h> 7 7 #include <linux/init.h> 8 8 #include <linux/pci.h> 9 + #include <linux/of_device.h> 9 10 #include <linux/of_pci.h> 10 11 #include <linux/pci_hotplug.h> 11 12 #include <linux/slab.h> ··· 14 13 #include <linux/cpumask.h> 15 14 #include <linux/pci-aspm.h> 16 15 #include <linux/aer.h> 16 + #include <linux/acpi.h> 17 17 #include <asm-generic/pci-bridge.h> 18 18 #include "pci.h" 19 19 ··· 1674 1672 dev_set_msi_domain(&dev->dev, d); 1675 1673 } 1676 1674 1675 + /** 1676 + * pci_dma_configure - Setup DMA configuration 1677 + * @dev: ptr to pci_dev struct of the PCI device 1678 + * 1679 + * Function to update PCI devices's DMA configuration using the same 1680 + * info from the OF node or ACPI node of host bridge's parent (if any). 1681 + */ 1682 + static void pci_dma_configure(struct pci_dev *dev) 1683 + { 1684 + struct device *bridge = pci_get_host_bridge_device(dev); 1685 + 1686 + if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { 1687 + if (bridge->parent) 1688 + of_dma_configure(&dev->dev, bridge->parent->of_node); 1689 + } else if (has_acpi_companion(bridge)) { 1690 + struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1691 + enum dev_dma_attr attr = acpi_get_dma_attr(adev); 1692 + 1693 + if (attr == DEV_DMA_NOT_SUPPORTED) 1694 + dev_warn(&dev->dev, "DMA not supported.\n"); 1695 + else 1696 + arch_setup_dma_ops(&dev->dev, 0, 0, NULL, 1697 + attr == DEV_DMA_COHERENT); 1698 + } 1699 + 1700 + pci_put_host_bridge_device(bridge); 1701 + } 1702 + 1677 1703 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1678 1704 { 1679 1705 int ret; ··· 1715 1685 dev->dev.dma_mask = &dev->dma_mask; 1716 1686 dev->dev.dma_parms = &dev->dma_parms; 1717 1687 dev->dev.coherent_dma_mask = 0xffffffffull; 1718 - of_pci_dma_configure(dev); 1688 + pci_dma_configure(dev); 1719 1689 1720 1690 pci_set_dma_max_seg_size(dev, 65536); 1721 1691 pci_set_dma_seg_boundary(dev, 0xffffffff);
+3 -33
include/acpi/acpi_bus.h
··· 390 390 struct completion kobj_done; 391 391 }; 392 392 393 - static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent) 394 - { 395 - bool ret = false; 396 - 397 - if (!adev) 398 - return ret; 399 - 400 - /** 401 - * Currently, we only support _CCA=1 (i.e. coherent_dma=1) 402 - * This should be equivalent to specifyig dma-coherent for 403 - * a device in OF. 404 - * 405 - * For the case when _CCA=0 (i.e. coherent_dma=0 && cca_seen=1), 406 - * There are two cases: 407 - * case 1. Do not support and disable DMA. 408 - * case 2. Support but rely on arch-specific cache maintenance for 409 - * non-coherence DMA operations. 410 - * Currently, we implement case 1 above. 411 - * 412 - * For the case when _CCA is missing (i.e. cca_seen=0) and 413 - * platform specifies ACPI_CCA_REQUIRED, we do not support DMA, 414 - * and fallback to arch-specific default handling. 415 - * 416 - * See acpi_init_coherency() for more info. 417 - */ 418 - if (adev->flags.coherent_dma) { 419 - ret = true; 420 - if (coherent) 421 - *coherent = adev->flags.coherent_dma; 422 - } 423 - return ret; 424 - } 425 - 426 393 static inline bool is_acpi_node(struct fwnode_handle *fwnode) 427 394 { 428 395 return fwnode && (fwnode->type == FWNODE_ACPI ··· 561 594 }; 562 595 563 596 /* helper */ 597 + 598 + bool acpi_dma_supported(struct acpi_device *adev); 599 + enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); 564 600 565 601 struct acpi_device *acpi_find_child_device(struct acpi_device *parent, 566 602 u64 address, bool check_children);
+6 -1
include/linux/acpi.h
··· 601 601 return -ENODEV; 602 602 } 603 603 604 - static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent) 604 + static inline bool acpi_dma_supported(struct acpi_device *adev) 605 605 { 606 606 return false; 607 + } 608 + 609 + static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) 610 + { 611 + return DEV_DMA_NOT_SUPPORTED; 607 612 } 608 613 609 614 #define ACPI_PTR(_ptr) (NULL)
-3
include/linux/of_pci.h
··· 16 16 int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); 17 17 int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 18 18 int of_get_pci_domain_nr(struct device_node *node); 19 - void of_pci_dma_configure(struct pci_dev *pci_dev); 20 19 void of_pci_check_probe_only(void); 21 20 #else 22 21 static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) ··· 51 52 { 52 53 return -1; 53 54 } 54 - 55 - static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } 56 55 57 56 static inline void of_pci_check_probe_only(void) { } 58 57 #endif
+9 -1
include/linux/property.h
··· 27 27 DEV_PROP_MAX, 28 28 }; 29 29 30 + enum dev_dma_attr { 31 + DEV_DMA_NOT_SUPPORTED, 32 + DEV_DMA_NON_COHERENT, 33 + DEV_DMA_COHERENT, 34 + }; 35 + 30 36 bool device_property_present(struct device *dev, const char *propname); 31 37 int device_property_read_u8_array(struct device *dev, const char *propname, 32 38 u8 *val, size_t nval); ··· 174 168 175 169 void device_add_property_set(struct device *dev, struct property_set *pset); 176 170 177 - bool device_dma_is_coherent(struct device *dev); 171 + bool device_dma_supported(struct device *dev); 172 + 173 + enum dev_dma_attr device_get_dma_attr(struct device *dev); 178 174 179 175 int device_get_phy_mode(struct device *dev); 180 176
+1 -1
tools/power/cpupower/debug/i386/dump_psb.c
··· 134 134 } 135 135 136 136 static struct option info_opts[] = { 137 - {.name = "numpst", .has_arg=no_argument, .flag=NULL, .val='n'}, 137 + {"numpst", no_argument, NULL, 'n'}, 138 138 }; 139 139 140 140 void print_help(void)
+3 -1
tools/power/cpupower/man/cpupower-idle-set.1
··· 20 20 Enable a specific processor sleep state. 21 21 .TP 22 22 \fB\-D\fR \fB\-\-disable-by-latency\fR <LATENCY> 23 - Disable all idle states with a equal or higher latency than <LATENCY> 23 + Disable all idle states with a equal or higher latency than <LATENCY>. 24 + 25 + Enable all idle states with a latency lower than <LATENCY>. 24 26 .TP 25 27 \fB\-E\fR \fB\-\-enable-all\fR 26 28 Enable all idle states if not enabled already.
+15 -15
tools/power/cpupower/utils/cpufreq-info.c
··· 536 536 } 537 537 538 538 static struct option info_opts[] = { 539 - { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, 540 - { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, 541 - { .name = "freq", .has_arg = no_argument, .flag = NULL, .val = 'f'}, 542 - { .name = "hwfreq", .has_arg = no_argument, .flag = NULL, .val = 'w'}, 543 - { .name = "hwlimits", .has_arg = no_argument, .flag = NULL, .val = 'l'}, 544 - { .name = "driver", .has_arg = no_argument, .flag = NULL, .val = 'd'}, 545 - { .name = "policy", .has_arg = no_argument, .flag = NULL, .val = 'p'}, 546 - { .name = "governors", .has_arg = no_argument, .flag = NULL, .val = 'g'}, 547 - { .name = "related-cpus", .has_arg = no_argument, .flag = NULL, .val = 'r'}, 548 - { .name = "affected-cpus",.has_arg = no_argument, .flag = NULL, .val = 'a'}, 549 - { .name = "stats", .has_arg = no_argument, .flag = NULL, .val = 's'}, 550 - { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, 551 - { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 552 - { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, 553 - { .name = "no-rounding", .has_arg = no_argument, .flag = NULL, .val = 'n'}, 539 + {"debug", no_argument, NULL, 'e'}, 540 + {"boost", no_argument, NULL, 'b'}, 541 + {"freq", no_argument, NULL, 'f'}, 542 + {"hwfreq", no_argument, NULL, 'w'}, 543 + {"hwlimits", no_argument, NULL, 'l'}, 544 + {"driver", no_argument, NULL, 'd'}, 545 + {"policy", no_argument, NULL, 'p'}, 546 + {"governors", no_argument, NULL, 'g'}, 547 + {"related-cpus", no_argument, NULL, 'r'}, 548 + {"affected-cpus", no_argument, NULL, 'a'}, 549 + {"stats", no_argument, NULL, 's'}, 550 + {"latency", no_argument, NULL, 'y'}, 551 + {"proc", no_argument, NULL, 'o'}, 552 + {"human", no_argument, NULL, 'm'}, 553 + {"no-rounding", no_argument, NULL, 'n'}, 554 554 { }, 555 555 }; 556 556
+5 -5
tools/power/cpupower/utils/cpufreq-set.c
··· 22 22 #define NORM_FREQ_LEN 32 23 23 24 24 static struct option set_opts[] = { 25 - { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, 26 - { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, 27 - { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, 28 - { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, 29 - { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, 25 + {"min", required_argument, NULL, 'd'}, 26 + {"max", required_argument, NULL, 'u'}, 27 + {"governor", required_argument, NULL, 'g'}, 28 + {"freq", required_argument, NULL, 'f'}, 29 + {"related", no_argument, NULL, 'r'}, 30 30 { }, 31 31 }; 32 32
+2 -2
tools/power/cpupower/utils/cpuidle-info.c
··· 126 126 } 127 127 128 128 static struct option info_opts[] = { 129 - { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, 130 - { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 129 + {"silent", no_argument, NULL, 's'}, 130 + {"proc", no_argument, NULL, 'o'}, 131 131 { }, 132 132 }; 133 133
+18 -15
tools/power/cpupower/utils/cpuidle-set.c
··· 13 13 #include "helpers/sysfs.h" 14 14 15 15 static struct option info_opts[] = { 16 - { .name = "disable", 17 - .has_arg = required_argument, .flag = NULL, .val = 'd'}, 18 - { .name = "enable", 19 - .has_arg = required_argument, .flag = NULL, .val = 'e'}, 20 - { .name = "disable-by-latency", 21 - .has_arg = required_argument, .flag = NULL, .val = 'D'}, 22 - { .name = "enable-all", 23 - .has_arg = no_argument, .flag = NULL, .val = 'E'}, 24 - { }, 16 + {"disable", required_argument, NULL, 'd'}, 17 + {"enable", required_argument, NULL, 'e'}, 18 + {"disable-by-latency", required_argument, NULL, 'D'}, 19 + {"enable-all", no_argument, NULL, 'E'}, 20 + { }, 25 21 }; 26 22 27 23 ··· 144 148 (cpu, idlestate); 145 149 state_latency = sysfs_get_idlestate_latency 146 150 (cpu, idlestate); 147 - printf("CPU: %u - idlestate %u - state_latency: %llu - latency: %llu\n", 148 - cpu, idlestate, state_latency, latency); 149 - if (disabled == 1 || latency > state_latency) 151 + if (disabled == 1) { 152 + if (latency > state_latency){ 153 + ret = sysfs_idlestate_disable 154 + (cpu, idlestate, 0); 155 + if (ret == 0) 156 + printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); 157 + } 150 158 continue; 151 - ret = sysfs_idlestate_disable 152 - (cpu, idlestate, 1); 153 - if (ret == 0) 159 + } 160 + if (latency <= state_latency){ 161 + ret = sysfs_idlestate_disable 162 + (cpu, idlestate, 1); 163 + if (ret == 0) 154 164 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); 165 + } 155 166 } 156 167 break; 157 168 case 'E':
+2 -2
tools/power/cpupower/utils/cpupower-info.c
··· 17 17 #include "helpers/sysfs.h" 18 18 19 19 static struct option set_opts[] = { 20 - { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 21 - { }, 20 + {"perf-bias", optional_argument, NULL, 'b'}, 21 + { }, 22 22 }; 23 23 24 24 static void print_wrong_arg_exit(void)
+1 -1
tools/power/cpupower/utils/cpupower-set.c
··· 18 18 #include "helpers/bitmask.h" 19 19 20 20 static struct option set_opts[] = { 21 - { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, 21 + {"perf-bias", required_argument, NULL, 'b'}, 22 22 { }, 23 23 }; 24 24
+15 -8
tools/power/cpupower/utils/helpers/topology.c
··· 73 73 for (cpu = 0; cpu < cpus; cpu++) { 74 74 cpu_top->core_info[cpu].cpu = cpu; 75 75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); 76 - if (!cpu_top->core_info[cpu].is_online) 77 - continue; 78 76 if(sysfs_topology_read_file( 79 77 cpu, 80 78 "physical_package_id", 81 - &(cpu_top->core_info[cpu].pkg)) < 0) 82 - return -1; 79 + &(cpu_top->core_info[cpu].pkg)) < 0) { 80 + cpu_top->core_info[cpu].pkg = -1; 81 + cpu_top->core_info[cpu].core = -1; 82 + continue; 83 + } 83 84 if(sysfs_topology_read_file( 84 85 cpu, 85 86 "core_id", 86 - &(cpu_top->core_info[cpu].core)) < 0) 87 - return -1; 87 + &(cpu_top->core_info[cpu].core)) < 0) { 88 + cpu_top->core_info[cpu].pkg = -1; 89 + cpu_top->core_info[cpu].core = -1; 90 + continue; 91 + } 88 92 } 89 93 90 94 qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), ··· 99 95 done by pkg value. */ 100 96 last_pkg = cpu_top->core_info[0].pkg; 101 97 for(cpu = 1; cpu < cpus; cpu++) { 102 - if(cpu_top->core_info[cpu].pkg != last_pkg) { 98 + if (cpu_top->core_info[cpu].pkg != last_pkg && 99 + cpu_top->core_info[cpu].pkg != -1) { 100 + 103 101 last_pkg = cpu_top->core_info[cpu].pkg; 104 102 cpu_top->pkgs++; 105 103 } 106 104 } 107 - cpu_top->pkgs++; 105 + if (!cpu_top->core_info[0].pkg == -1) 106 + cpu_top->pkgs++; 108 107 109 108 /* Intel's cores count is not consecutively numbered, there may 110 109 * be a core_id of 3, but none of 2. Assume there always is 0
+8 -1
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
··· 143 143 /* Be careful CPUs may got resorted for pkg value do not just use cpu */ 144 144 if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) 145 145 return; 146 + if (!cpu_top.core_info[cpu].is_online && 147 + cpu_top.core_info[cpu].pkg == -1) 148 + return; 146 149 147 150 if (topology_depth > 2) 148 151 printf("%4d|", cpu_top.core_info[cpu].pkg); ··· 194 191 * It's up to the monitor plug-in to check .is_online, this one 195 192 * is just for additional info. 196 193 */ 197 - if (!cpu_top.core_info[cpu].is_online) { 194 + if (!cpu_top.core_info[cpu].is_online && 195 + cpu_top.core_info[cpu].pkg != -1) { 198 196 printf(_(" *is offline\n")); 199 197 return; 200 198 } else ··· 391 387 printf(_("Cannot read number of available processors\n")); 392 388 return EXIT_FAILURE; 393 389 } 390 + 391 + if (!cpu_top.core_info[0].is_online) 392 + printf("WARNING: at least one cpu is offline\n"); 394 393 395 394 /* Default is: monitor all CPUs */ 396 395 if (bitmask_isallclear(cpus_chosen))
+18 -12
tools/power/x86/turbostat/turbostat.c
··· 75 75 int do_smi; 76 76 double bclk; 77 77 double base_hz; 78 + unsigned int has_base_hz; 78 79 double tsc_tweak = 1.0; 79 80 unsigned int show_pkg; 80 81 unsigned int show_core; ··· 97 96 unsigned int crystal_hz; 98 97 unsigned long long tsc_hz; 99 98 int base_cpu; 99 + double discover_bclk(unsigned int family, unsigned int model); 100 100 101 101 #define RAPL_PKG (1 << 0) 102 102 /* 0x610 MSR_PKG_POWER_LIMIT */ ··· 513 511 } 514 512 515 513 /* Bzy_MHz */ 516 - if (has_aperf) 517 - outp += sprintf(outp, "%8.0f", 518 - 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); 514 + if (has_aperf) { 515 + if (has_base_hz) 516 + outp += sprintf(outp, "%8.0f", base_hz / units * t->aperf / t->mperf); 517 + else 518 + outp += sprintf(outp, "%8.0f", 519 + 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); 520 + } 519 521 520 522 /* TSC_MHz */ 521 523 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); ··· 1164 1158 static void 1165 1159 calculate_tsc_tweak() 1166 1160 { 1167 - unsigned long long msr; 1168 - unsigned int base_ratio; 1169 - 1170 - get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1171 - base_ratio = (msr >> 8) & 0xFF; 1172 - base_hz = base_ratio * bclk * 1000000; 1173 1161 tsc_tweak = base_hz / tsc_hz; 1174 1162 } 1175 1163 ··· 1440 1440 1441 1441 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr); 1442 1442 fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr); 1443 - fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xEF); 1443 + fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0x7F); 1444 1444 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1); 1445 1445 fprintf(stderr, ")\n"); 1446 1446 } ··· 1821 1821 int probe_nhm_msrs(unsigned int family, unsigned int model) 1822 1822 { 1823 1823 unsigned long long msr; 1824 + unsigned int base_ratio; 1824 1825 int *pkg_cstate_limits; 1825 1826 1826 1827 if (!genuine_intel) ··· 1829 1828 1830 1829 if (family != 6) 1831 1830 return 0; 1831 + 1832 + bclk = discover_bclk(family, model); 1832 1833 1833 1834 switch (model) { 1834 1835 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ ··· 1874 1871 return 0; 1875 1872 } 1876 1873 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1877 - 1878 1874 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1879 1875 1876 + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1877 + base_ratio = (msr >> 8) & 0xFF; 1878 + 1879 + base_hz = base_ratio * bclk * 1000000; 1880 + has_base_hz = 1; 1880 1881 return 1; 1881 1882 } 1882 1883 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) ··· 2787 2780 do_skl_residency = has_skl_msrs(family, model); 2788 2781 do_slm_cstates = is_slm(family, model); 2789 2782 do_knl_cstates = is_knl(family, model); 2790 - bclk = discover_bclk(family, model); 2791 2783 2792 2784 rapl_probe(family, model); 2793 2785 perf_limit_reasons_probe(family, model);