Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] powernow-k8: Get transition latency from ACPI _PSS table
[CPUFREQ] Make ignore_nice_load setting of ondemand work as expected.

+47 -28
+22 -6
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 939 939 free_cpumask_var(data->acpi_data.shared_cpu_map); 940 940 } 941 941 942 + static int get_transition_latency(struct powernow_k8_data *data) 943 + { 944 + int max_latency = 0; 945 + int i; 946 + for (i = 0; i < data->acpi_data.state_count; i++) { 947 + int cur_latency = data->acpi_data.states[i].transition_latency 948 + + data->acpi_data.states[i].bus_master_latency; 949 + if (cur_latency > max_latency) 950 + max_latency = cur_latency; 951 + } 952 + /* value in usecs, needs to be in nanoseconds */ 953 + return 1000 * max_latency; 954 + } 955 + 942 956 #else 943 957 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 944 958 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 945 959 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 960 + static int get_transition_latency(struct powernow_k8_data *data) { return 0; } 946 961 #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ 947 962 948 963 /* Take a frequency, and issue the fid/vid transition command */ ··· 1188 1173 if (rc) { 1189 1174 goto err_out; 1190 1175 } 1191 - } 1176 + /* Take a crude guess here. 1177 + * That guess was in microseconds, so multiply with 1000 */ 1178 + pol->cpuinfo.transition_latency = ( 1179 + ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + 1180 + ((1 << data->irt) * 30)) * 1000; 1181 + } else /* ACPI _PSS objects available */ 1182 + pol->cpuinfo.transition_latency = get_transition_latency(data); 1192 1183 1193 1184 /* only run on specific CPU from here on */ 1194 1185 oldmask = current->cpus_allowed; ··· 1224 1203 else 1225 1204 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1226 1205 data->available_cores = pol->cpus; 1227 - 1228 - /* Take a crude guess here. 1229 - * That guess was in microseconds, so multiply with 1000 */ 1230 - pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) 1231 - + (3 * (1 << data->irt) * 10)) * 1000; 1232 1206 1233 1207 if (cpu_family == CPU_HW_PSTATE) 1234 1208 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
+25 -22
drivers/cpufreq/cpufreq_ondemand.c
··· 117 117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 118 118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 119 119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 120 - 121 - if (!dbs_tuners_ins.ignore_nice) { 122 - busy_time = cputime64_add(busy_time, 123 - kstat_cpu(cpu).cpustat.nice); 124 - } 120 + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); 125 121 126 122 idle_time = cputime64_sub(cur_wall_time, busy_time); 127 123 if (wall) ··· 133 137 if (idle_time == -1ULL) 134 138 return get_cpu_idle_time_jiffy(cpu, wall); 135 139 136 - if (dbs_tuners_ins.ignore_nice) { 137 - cputime64_t cur_nice; 138 - unsigned long cur_nice_jiffies; 139 - struct cpu_dbs_info_s *dbs_info; 140 - 141 - dbs_info = &per_cpu(cpu_dbs_info, cpu); 142 - cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice, 143 - dbs_info->prev_cpu_nice); 144 - /* 145 - * Assumption: nice time between sampling periods will be 146 - * less than 2^32 jiffies for 32 bit sys 147 - */ 148 - cur_nice_jiffies = (unsigned long) 149 - cputime64_to_jiffies64(cur_nice); 150 - dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice; 151 - return idle_time + jiffies_to_usecs(cur_nice_jiffies); 152 - } 153 140 return idle_time; 154 141 } 155 142 ··· 298 319 dbs_info = &per_cpu(cpu_dbs_info, j); 299 320 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 300 321 &dbs_info->prev_cpu_wall); 322 + if (dbs_tuners_ins.ignore_nice) 323 + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 324 + 301 325 } 302 326 mutex_unlock(&dbs_mutex); 303 327 ··· 400 418 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 401 419 j_dbs_info->prev_cpu_idle); 402 420 j_dbs_info->prev_cpu_idle = cur_idle_time; 421 + 422 + if (dbs_tuners_ins.ignore_nice) { 423 + cputime64_t cur_nice; 424 + unsigned long cur_nice_jiffies; 425 + 426 + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, 427 + j_dbs_info->prev_cpu_nice); 428 + /* 429 + * Assumption: nice time between sampling periods will 430 + * be less than 2^32 jiffies for 32 bit sys 431 + */ 432 + cur_nice_jiffies = (unsigned long) 433 + cputime64_to_jiffies64(cur_nice); 434 + 435 + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 436 + idle_time += jiffies_to_usecs(cur_nice_jiffies); 437 + } 403 438 404 439 if (unlikely(!wall_time || wall_time < idle_time)) 405 440 continue; ··· 574 575 575 576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 576 577 &j_dbs_info->prev_cpu_wall); 578 + if (dbs_tuners_ins.ignore_nice) { 579 + j_dbs_info->prev_cpu_nice = 580 + kstat_cpu(j).cpustat.nice; 581 + } 577 582 } 578 583 this_dbs_info->cpu = cpu; 579 584 /*