Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'pnp', 'powercap', 'pm-runtime' and 'pm-opp'

* pnp:
MAINTAINERS: Remove Bjorn Helgaas as PNP maintainer
PNP / resources: remove positive test on unsigned values

* powercap:
powercap / RAPL: add new CPU IDs
powercap / RAPL: further relax energy counter checks

* pm-runtime:
PM / runtime: Update documentation to reflect the current code flow

* pm-opp:
PM / OPP: discard duplicate OPPs
PM / OPP: Make OPP invisible to users in Kconfig
PM / OPP: fix incorrect OPP count handling in of_init_opp_table

+51 -39
+10 -8
Documentation/power/runtime_pm.txt
··· 665 665 the runtime PM and system suspend/resume (and hibernation) callbacks by carrying 666 666 out the following operations: 667 667 668 - * During system suspend it calls pm_runtime_get_noresume() and 669 - pm_runtime_barrier() for every device right before executing the 670 - subsystem-level .suspend() callback for it. In addition to that it calls 671 - __pm_runtime_disable() with 'false' as the second argument for every device 672 - right before executing the subsystem-level .suspend_late() callback for it. 668 + * During system suspend pm_runtime_get_noresume() is called for every device 669 + right before executing the subsystem-level .prepare() callback for it and 670 + pm_runtime_barrier() is called for every device right before executing the 671 + subsystem-level .suspend() callback for it. In addition to that the PM core 672 + calls __pm_runtime_disable() with 'false' as the second argument for every 673 + device right before executing the subsystem-level .suspend_late() callback 674 + for it. 673 675 674 - * During system resume it calls pm_runtime_enable() and pm_runtime_put() 675 - for every device right after executing the subsystem-level .resume_early() 676 - callback and right after executing the subsystem-level .resume() callback 676 + * During system resume pm_runtime_enable() and pm_runtime_put() are called for 677 + every device right after executing the subsystem-level .resume_early() 678 + callback and right after executing the subsystem-level .complete() callback 677 679 for it, respectively. 678 680 679 681 7. Generic subsystem callbacks
-1
MAINTAINERS
··· 6938 6938 6939 6939 PNP SUPPORT 6940 6940 M: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6941 - M: Bjorn Helgaas <bhelgaas@google.com> 6942 6941 S: Maintained 6943 6942 F: drivers/pnp/ 6944 6943
+26 -5
drivers/base/power/opp.c
··· 394 394 * to keep the integrity of the internal data structures. Callers should ensure 395 395 * that this function is *NOT* called under RCU protection or in contexts where 396 396 * mutex cannot be locked. 397 + * 398 + * Return: 399 + * 0: On success OR 400 + * Duplicate OPPs (both freq and volt are same) and opp->available 401 + * -EEXIST: Freq are same and volt are different OR 402 + * Duplicate OPPs (both freq and volt are same) and !opp->available 403 + * -ENOMEM: Memory allocation failure 397 404 */ 398 405 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 399 406 { ··· 450 443 new_opp->u_volt = u_volt; 451 444 new_opp->available = true; 452 445 453 - /* Insert new OPP in order of increasing frequency */ 446 + /* 447 + * Insert new OPP in order of increasing frequency 448 + * and discard if already present 449 + */ 454 450 head = &dev_opp->opp_list; 455 451 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { 456 - if (new_opp->rate < opp->rate) 452 + if (new_opp->rate <= opp->rate) 457 453 break; 458 454 else 459 455 head = &opp->node; 456 + } 457 + 458 + /* Duplicate OPPs ? */ 459 + if (new_opp->rate == opp->rate) { 460 + int ret = opp->available && new_opp->u_volt == opp->u_volt ? 461 + 0 : -EEXIST; 462 + 463 + dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 464 + __func__, opp->rate, opp->u_volt, opp->available, 465 + new_opp->rate, new_opp->u_volt, new_opp->available); 466 + mutex_unlock(&dev_opp_list_lock); 467 + kfree(new_opp); 468 + return ret; 460 469 } 461 470 462 471 list_add_rcu(&new_opp->node, head); ··· 757 734 unsigned long freq = be32_to_cpup(val++) * 1000; 758 735 unsigned long volt = be32_to_cpup(val++); 759 736 760 - if (dev_pm_opp_add(dev, freq, volt)) { 737 + if (dev_pm_opp_add(dev, freq, volt)) 761 738 dev_warn(dev, "%s: Failed to add OPP %ld\n", 762 739 __func__, freq); 763 - continue; 764 - } 765 740 nr -= 2; 766 741 } 767 742
+2 -2
drivers/pnp/resource.c
··· 360 360 return 1; 361 361 362 362 /* check if the resource is valid */ 363 - if (*irq < 0 || *irq > 15) 363 + if (*irq > 15) 364 364 return 0; 365 365 366 366 /* check if the resource is reserved */ ··· 424 424 return 1; 425 425 426 426 /* check if the resource is valid */ 427 - if (*dma < 0 || *dma == 4 || *dma > 7) 427 + if (*dma == 4 || *dma > 7) 428 428 return 0; 429 429 430 430 /* check if the resource is reserved */
+12 -21
drivers/powercap/intel_rapl.c
··· 951 951 { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */ 952 952 { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */ 953 953 { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ 954 - { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */ 954 + { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */ 955 + { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */ 956 + { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */ 955 957 /* TODO: Add more CPU IDs after testing */ 956 958 {} 957 959 }; ··· 1126 1124 static int rapl_check_domain(int cpu, int domain) 1127 1125 { 1128 1126 unsigned msr; 1129 - u64 val1, val2 = 0; 1130 - int retry = 0; 1127 + u64 val = 0; 1131 1128 1132 1129 switch (domain) { 1133 1130 case RAPL_DOMAIN_PACKAGE: ··· 1145 1144 pr_err("invalid domain id %d\n", domain); 1146 1145 return -EINVAL; 1147 1146 } 1148 - if (rdmsrl_safe_on_cpu(cpu, msr, &val1)) 1147 + /* make sure domain counters are available and contains non-zero 1148 + * values, otherwise skip it. 1149 + */ 1150 + if (rdmsrl_safe_on_cpu(cpu, msr, &val) || !val) 1149 1151 return -ENODEV; 1150 1152 1151 - /* PP1/uncore/graphics domain may not be active at the time of 1152 - * driver loading. So skip further checks. 1153 - */ 1154 - if (domain == RAPL_DOMAIN_PP1) 1155 - return 0; 1156 - /* energy counters roll slowly on some domains */ 1157 - while (++retry < 10) { 1158 - usleep_range(10000, 15000); 1159 - rdmsrl_safe_on_cpu(cpu, msr, &val2); 1160 - if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK)) 1161 - return 0; 1162 - } 1163 - /* if energy counter does not change, report as bad domain */ 1164 - pr_info("domain %s energy ctr %llu:%llu not working, skip\n", 1165 - rapl_domain_names[domain], val1, val2); 1166 - 1167 - return -ENODEV; 1153 + return 0; 1168 1154 } 1169 1155 1170 1156 /* Detect active and valid domains for the given CPU, caller must ··· 1168 1180 /* use physical package id to read counters */ 1169 1181 if (!rapl_check_domain(cpu, i)) 1170 1182 rp->domain_map |= 1 << i; 1183 + else 1184 + pr_warn("RAPL domain %s detection failed\n", 1185 + rapl_domain_names[i]); 1171 1186 } 1172 1187 rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); 1173 1188 if (!rp->nr_domains) {
+1 -2
kernel/power/Kconfig
··· 257 257 bool 258 258 259 259 config PM_OPP 260 - bool "Operating Performance Point (OPP) Layer library" 261 - depends on ARCH_HAS_OPP 260 + bool 262 261 ---help--- 263 262 SOCs have a standard set of tuples consisting of frequency and 264 263 voltage pairs that the device will support per voltage domain. This