Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back schedutil governor updates for 4.12.

+54 -23
+1
include/linux/tick.h
··· 117 117 extern void tick_nohz_idle_exit(void); 118 118 extern void tick_nohz_irq_exit(void); 119 119 extern ktime_t tick_nohz_get_sleep_length(void); 120 + extern unsigned long tick_nohz_get_idle_calls(void); 120 121 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 121 122 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 122 123 #else /* !CONFIG_NO_HZ_COMMON */
+41 -23
kernel/sched/cpufreq_schedutil.c
··· 61 61 unsigned long util; 62 62 unsigned long max; 63 63 unsigned int flags; 64 + 65 + /* The field below is for single-CPU policies only. */ 66 + #ifdef CONFIG_NO_HZ_COMMON 67 + unsigned long saved_idle_calls; 68 + #endif 64 69 }; 65 70 66 71 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); ··· 98 93 { 99 94 struct cpufreq_policy *policy = sg_policy->policy; 100 95 96 + if (sg_policy->next_freq == next_freq) 97 + return; 98 + 99 + sg_policy->next_freq = next_freq; 101 100 sg_policy->last_freq_update_time = time; 102 101 103 102 if (policy->fast_switch_enabled) { 104 - if (sg_policy->next_freq == next_freq) { 105 - trace_cpu_frequency(policy->cur, smp_processor_id()); 106 - return; 107 - } 108 - sg_policy->next_freq = next_freq; 109 103 next_freq = cpufreq_driver_fast_switch(policy, next_freq); 110 104 if (next_freq == CPUFREQ_ENTRY_INVALID) 111 105 return; 112 106 113 107 policy->cur = next_freq; 114 108 trace_cpu_frequency(next_freq, smp_processor_id()); 115 - } else if (sg_policy->next_freq != next_freq) { 116 - sg_policy->next_freq = next_freq; 109 + } else { 117 110 sg_policy->work_in_progress = true; 118 111 irq_work_queue(&sg_policy->irq_work); 119 112 } ··· 195 192 sg_cpu->iowait_boost >>= 1; 196 193 } 197 194 195 + #ifdef CONFIG_NO_HZ_COMMON 196 + static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 197 + { 198 + unsigned long idle_calls = tick_nohz_get_idle_calls(); 199 + bool ret = idle_calls == sg_cpu->saved_idle_calls; 200 + 201 + sg_cpu->saved_idle_calls = idle_calls; 202 + return ret; 203 + } 204 + #else 205 + static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } 206 + #endif /* CONFIG_NO_HZ_COMMON */ 207 + 198 208 static void sugov_update_single(struct update_util_data *hook, u64 time, 199 209 unsigned int flags) 200 210 { ··· 216 200 struct cpufreq_policy *policy = sg_policy->policy; 217 201 unsigned long util, max; 218 202 unsigned int next_f; 203 + bool busy; 219 204 220 205 sugov_set_iowait_boost(sg_cpu, time, flags); 221 206 sg_cpu->last_update = time; ··· 224 207 if (!sugov_should_update_freq(sg_policy, time)) 225 208 return; 226 209 210 + busy = sugov_cpu_is_busy(sg_cpu); 211 + 227 212 if (flags & SCHED_CPUFREQ_RT_DL) { 228 213 next_f = policy->cpuinfo.max_freq; 229 214 } else { 230 215 sugov_get_util(&util, &max); 231 216 sugov_iowait_boost(sg_cpu, &util, &max); 232 217 next_f = get_next_freq(sg_policy, util, max); 218 + /* 219 + * Do not reduce the frequency if the CPU has not been idle 220 + * recently, as the reduction is likely to be premature then. 221 + */ 222 + if (busy && next_f < sg_policy->next_freq) 223 + next_f = sg_policy->next_freq; 233 224 } 234 225 sugov_update_commit(sg_policy, time, next_f); 235 226 } 236 227 237 - static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, 238 - unsigned long util, unsigned long max, 239 - unsigned int flags) 228 + static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu) 240 229 { 241 230 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 242 231 struct cpufreq_policy *policy = sg_policy->policy; 243 - unsigned int max_f = policy->cpuinfo.max_freq; 244 232 u64 last_freq_update_time = sg_policy->last_freq_update_time; 233 + unsigned long util = 0, max = 1; 245 234 unsigned int j; 246 235 247 - if (flags & SCHED_CPUFREQ_RT_DL) 248 - return max_f; 249 - 250 - sugov_iowait_boost(sg_cpu, &util, &max); 251 - 252 236 for_each_cpu(j, policy->cpus) { 253 - struct sugov_cpu *j_sg_cpu; 237 + struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 254 238 unsigned long j_util, j_max; 255 239 s64 delta_ns; 256 240 257 - if (j == smp_processor_id()) 258 - continue; 259 - 260 - j_sg_cpu = &per_cpu(sugov_cpu, j); 261 241 /* 262 242 * If the CPU utilization was last updated before the previous 263 243 * frequency update and the time elapsed between the last update ··· 268 254 continue; 269 255 } 270 256 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) 271 - return max_f; 257 + return policy->cpuinfo.max_freq; 272 258 273 259 j_util = j_sg_cpu->util; 274 260 j_max = j_sg_cpu->max; ··· 303 289 sg_cpu->last_update = time; 304 290 305 291 if (sugov_should_update_freq(sg_policy, time)) { 306 - next_f = sugov_next_freq_shared(sg_cpu, util, max, flags); 292 + if (flags & SCHED_CPUFREQ_RT_DL) 293 + next_f = sg_policy->policy->cpuinfo.max_freq; 294 + else 295 + next_f = sugov_next_freq_shared(sg_cpu); 296 + 307 297 sugov_update_commit(sg_policy, time, next_f); 308 298 } 309 299
+12
kernel/time/tick-sched.c
··· 993 993 return ts->sleep_length; 994 994 } 995 995 996 + /** 997 + * tick_nohz_get_idle_calls - return the current idle calls counter value 998 + * 999 + * Called from the schedutil frequency scaling governor in scheduler context. 1000 + */ 1001 + unsigned long tick_nohz_get_idle_calls(void) 1002 + { 1003 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1004 + 1005 + return ts->idle_calls; 1006 + } 1007 + 996 1008 static void tick_nohz_account_idle_ticks(struct tick_sched *ts) 997 1009 { 998 1010 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE