Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'pm-cpufreq'

* pm-cpufreq:
intel_pstate: Add Haswell CPU models
Revert "cpufreq: make sure frequency transitions are serialized"
cpufreq: Use signed type for 'ret' variable, to store negative error values
cpufreq: Remove temporary fix for race between CPU hotplug and sysfs-writes
cpufreq: Synchronize the cpufreq store_*() routines with CPU hotplug
cpufreq: Invoke __cpufreq_remove_dev_finish() after releasing cpu_hotplug.lock
cpufreq: Split __cpufreq_remove_dev() into two parts
cpufreq: Fix wrong time unit conversion
cpufreq: serialize calls to __cpufreq_governor()
cpufreq: don't allow governor limits to be changed when it is disabled

+76 -35
+70 -33
drivers/cpufreq/cpufreq.c
··· 280 280 switch (state) { 281 281 282 282 case CPUFREQ_PRECHANGE: 283 - if (WARN(policy->transition_ongoing == 284 - cpumask_weight(policy->cpus), 285 - "In middle of another frequency transition\n")) 286 - return; 287 - 288 - policy->transition_ongoing++; 289 - 290 283 /* detect if the driver reported a value as "old frequency" 291 284 * which is not equal to what the cpufreq core thinks is 292 285 * "old frequency". ··· 299 306 break; 300 307 301 308 case CPUFREQ_POSTCHANGE: 302 - if (WARN(!policy->transition_ongoing, 303 - "No frequency transition in progress\n")) 304 - return; 305 - 306 - policy->transition_ongoing--; 307 - 308 309 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 309 310 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 310 311 (unsigned long)freqs->cpu); ··· 424 437 static ssize_t store_##file_name \ 425 438 (struct cpufreq_policy *policy, const char *buf, size_t count) \ 426 439 { \ 427 - unsigned int ret; \ 440 + int ret; \ 428 441 struct cpufreq_policy new_policy; \ 429 442 \ 430 443 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ ··· 477 490 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 478 491 const char *buf, size_t count) 479 492 { 480 - unsigned int ret; 493 + int ret; 481 494 char str_governor[16]; 482 495 struct cpufreq_policy new_policy; 483 496 ··· 681 694 struct freq_attr *fattr = to_attr(attr); 682 695 ssize_t ret = -EINVAL; 683 696 697 + get_online_cpus(); 698 + 699 + if (!cpu_online(policy->cpu)) 700 + goto unlock; 701 + 684 702 if (!down_read_trylock(&cpufreq_rwsem)) 685 - goto exit; 703 + goto unlock; 686 704 687 705 if (lock_policy_rwsem_write(policy->cpu) < 0) 688 706 goto up_read; ··· 701 709 702 710 up_read: 703 711 up_read(&cpufreq_rwsem); 704 - exit: 712 + unlock: 713 + put_online_cpus(); 714 + 705 715 return ret; 706 716 } 707 717 ··· 1135 1141 return cpu_dev->id; 1136 1142 } 1137 1143 1138 - /** 1139 - * __cpufreq_remove_dev - remove a CPU device 1140 - * 1141 - * Removes the cpufreq interface for a CPU device. 1142 - * Caller should already have policy_rwsem in write mode for this CPU. 1143 - * This routine frees the rwsem before returning. 1144 - */ 1145 - static int __cpufreq_remove_dev(struct device *dev, 1146 - struct subsys_interface *sif, bool frozen) 1144 + static int __cpufreq_remove_dev_prepare(struct device *dev, 1145 + struct subsys_interface *sif, 1146 + bool frozen) 1147 1147 { 1148 1148 unsigned int cpu = dev->id, cpus; 1149 1149 int new_cpu, ret; 1150 1150 unsigned long flags; 1151 1151 struct cpufreq_policy *policy; 1152 - struct kobject *kobj; 1153 - struct completion *cmp; 1154 1152 1155 1153 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1156 1154 ··· 1198 1212 } 1199 1213 } 1200 1214 } 1215 + 1216 + return 0; 1217 + } 1218 + 1219 + static int __cpufreq_remove_dev_finish(struct device *dev, 1220 + struct subsys_interface *sif, 1221 + bool frozen) 1222 + { 1223 + unsigned int cpu = dev->id, cpus; 1224 + int ret; 1225 + unsigned long flags; 1226 + struct cpufreq_policy *policy; 1227 + struct kobject *kobj; 1228 + struct completion *cmp; 1229 + 1230 + read_lock_irqsave(&cpufreq_driver_lock, flags); 1231 + policy = per_cpu(cpufreq_cpu_data, cpu); 1232 + read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1233 + 1234 + if (!policy) { 1235 + pr_debug("%s: No cpu_data found\n", __func__); 1236 + return -EINVAL; 1237 + } 1238 + 1239 + lock_policy_rwsem_read(cpu); 1240 + cpus = cpumask_weight(policy->cpus); 1241 + unlock_policy_rwsem_read(cpu); 1201 1242 1202 1243 /* If cpu is last user of policy, free policy */ 1203 1244 if (cpus == 1) { ··· 1283 1270 1284 1271 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1285 1272 return 0; 1273 + } 1274 + 1275 + /** 1276 + * __cpufreq_remove_dev - remove a CPU device 1277 + * 1278 + * Removes the cpufreq interface for a CPU device. 1279 + * Caller should already have policy_rwsem in write mode for this CPU. 1280 + * This routine frees the rwsem before returning. 1281 + */ 1282 + static inline int __cpufreq_remove_dev(struct device *dev, 1283 + struct subsys_interface *sif, 1284 + bool frozen) 1285 + { 1286 + int ret; 1287 + 1288 + ret = __cpufreq_remove_dev_prepare(dev, sif, frozen); 1289 + 1290 + if (!ret) 1291 + ret = __cpufreq_remove_dev_finish(dev, sif, frozen); 1292 + 1293 + return ret; 1286 1294 } 1287 1295 1288 1296 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) ··· 1644 1610 1645 1611 if (cpufreq_disabled()) 1646 1612 return -ENODEV; 1647 - if (policy->transition_ongoing) 1648 - return -EBUSY; 1649 1613 1650 1614 /* Make sure that target_freq is within supported range */ 1651 1615 if (target_freq > policy->max) ··· 1724 1692 policy->cpu, event); 1725 1693 1726 1694 mutex_lock(&cpufreq_governor_lock); 1727 - if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) || 1728 - (policy->governor_enabled && (event == CPUFREQ_GOV_START))) { 1695 + if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 1696 + || (!policy->governor_enabled 1697 + && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 1729 1698 mutex_unlock(&cpufreq_governor_lock); 1730 1699 return -EBUSY; 1731 1700 } ··· 2027 1994 break; 2028 1995 2029 1996 case CPU_DOWN_PREPARE: 2030 - __cpufreq_remove_dev(dev, NULL, frozen); 1997 + __cpufreq_remove_dev_prepare(dev, NULL, frozen); 1998 + break; 1999 + 2000 + case CPU_POST_DEAD: 2001 + __cpufreq_remove_dev_finish(dev, NULL, frozen); 2031 2002 break; 2032 2003 2033 2004 case CPU_DOWN_FAILED:
+1 -1
drivers/cpufreq/cpufreq_stats.c
··· 74 74 for (i = 0; i < stat->state_num; i++) { 75 75 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], 76 76 (unsigned long long) 77 - cputime64_to_clock_t(stat->time_in_state[i])); 77 + jiffies_64_to_clock_t(stat->time_in_state[i])); 78 78 } 79 79 return len; 80 80 }
+5
drivers/cpufreq/intel_pstate.c
··· 522 522 ICPU(0x2a, default_policy), 523 523 ICPU(0x2d, default_policy), 524 524 ICPU(0x3a, default_policy), 525 + ICPU(0x3c, default_policy), 526 + ICPU(0x3e, default_policy), 527 + ICPU(0x3f, default_policy), 528 + ICPU(0x45, default_policy), 529 + ICPU(0x46, default_policy), 525 530 {} 526 531 }; 527 532 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
-1
include/linux/cpufreq.h
··· 85 85 struct list_head policy_list; 86 86 struct kobject kobj; 87 87 struct completion kobj_unregister; 88 - int transition_ongoing; /* Tracks transition status */ 89 88 }; 90 89 91 90 /* Only for ACPI */