Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CPUFREQ] ondemand,conservative governor idle_tick clean-up

[PATCH] [3/5] ondemand,conservative governor idle_tick clean-up

Ondemand and conservative governor clean-up, it factorises the idle ticks
measurement.

Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>

+10 -42
+5 -21
drivers/cpufreq/cpufreq_conservative.c
··· 297 static void dbs_check_cpu(int cpu) 298 { 299 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 300 - unsigned int total_idle_ticks; 301 unsigned int freq_step; 302 unsigned int freq_down_sampling_rate; 303 static int down_skip[NR_CPUS]; ··· 337 */ 338 339 /* Check for frequency increase */ 340 - total_idle_ticks = get_cpu_idle_time(cpu); 341 - idle_ticks = total_idle_ticks - 342 - this_dbs_info->prev_cpu_idle_up; 343 - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 344 - 345 346 for_each_cpu_mask(j, policy->cpus) { 347 - unsigned int tmp_idle_ticks; 348 struct cpu_dbs_info_s *j_dbs_info; 349 - 350 - if (j == cpu) 351 - continue; 352 353 j_dbs_info = &per_cpu(cpu_dbs_info, j); 354 /* Check for frequency increase */ ··· 392 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 393 return; 394 395 - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 396 - idle_ticks = total_idle_ticks - 397 - this_dbs_info->prev_cpu_idle_down; 398 - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 399 - 400 for_each_cpu_mask(j, policy->cpus) { 401 - unsigned int tmp_idle_ticks; 402 struct cpu_dbs_info_s *j_dbs_info; 403 404 - if (j == cpu) 405 - continue; 406 - 407 j_dbs_info = &per_cpu(cpu_dbs_info, j); 408 - /* Check for frequency increase */ 409 total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 410 tmp_idle_ticks = total_idle_ticks - 411 j_dbs_info->prev_cpu_idle_down; ··· 416 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 417 usecs_to_jiffies(freq_down_sampling_rate); 418 419 - if (idle_ticks > down_idle_ticks ) { 420 /* if we are already at the lowest speed then break out early 421 * or if we 'cannot' reduce the speed as the user might want 422 * freq_step to be zero */
··· 297 static void dbs_check_cpu(int cpu) 298 { 299 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 300 unsigned int freq_step; 301 unsigned int freq_down_sampling_rate; 302 static int down_skip[NR_CPUS]; ··· 338 */ 339 340 /* Check for frequency increase */ 341 342 + idle_ticks = UINT_MAX; 343 for_each_cpu_mask(j, policy->cpus) { 344 + unsigned int tmp_idle_ticks, total_idle_ticks; 345 struct cpu_dbs_info_s *j_dbs_info; 346 347 j_dbs_info = &per_cpu(cpu_dbs_info, j); 348 /* Check for frequency increase */ ··· 400 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 401 return; 402 403 + idle_ticks = UINT_MAX; 404 for_each_cpu_mask(j, policy->cpus) { 405 + unsigned int tmp_idle_ticks, total_idle_ticks; 406 struct cpu_dbs_info_s *j_dbs_info; 407 408 j_dbs_info = &per_cpu(cpu_dbs_info, j); 409 total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 410 tmp_idle_ticks = total_idle_ticks - 411 j_dbs_info->prev_cpu_idle_down; ··· 432 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 433 usecs_to_jiffies(freq_down_sampling_rate); 434 435 + if (idle_ticks > down_idle_ticks) { 436 /* if we are already at the lowest speed then break out early 437 * or if we 'cannot' reduce the speed as the user might want 438 * freq_step to be zero */
+5 -21
drivers/cpufreq/cpufreq_ondemand.c
··· 296 static void dbs_check_cpu(int cpu) 297 { 298 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 299 - unsigned int total_idle_ticks; 300 unsigned int freq_down_step; 301 unsigned int freq_down_sampling_rate; 302 static int down_skip[NR_CPUS]; ··· 324 */ 325 326 /* Check for frequency increase */ 327 - total_idle_ticks = get_cpu_idle_time(cpu); 328 - idle_ticks = total_idle_ticks - 329 - this_dbs_info->prev_cpu_idle_up; 330 - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 331 - 332 for_each_cpu_mask(j, policy->cpus) { 333 - unsigned int tmp_idle_ticks; 334 struct cpu_dbs_info_s *j_dbs_info; 335 336 - if (j == cpu) 337 - continue; 338 - 339 j_dbs_info = &per_cpu(cpu_dbs_info, j); 340 - /* Check for frequency increase */ 341 total_idle_ticks = get_cpu_idle_time(j); 342 tmp_idle_ticks = total_idle_ticks - 343 j_dbs_info->prev_cpu_idle_up; ··· 367 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 368 return; 369 370 - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 371 - idle_ticks = total_idle_ticks - 372 - this_dbs_info->prev_cpu_idle_down; 373 - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 374 - 375 for_each_cpu_mask(j, policy->cpus) { 376 - unsigned int tmp_idle_ticks; 377 struct cpu_dbs_info_s *j_dbs_info; 378 - 379 - if (j == cpu) 380 - continue; 381 382 j_dbs_info = &per_cpu(cpu_dbs_info, j); 383 /* Check for frequency decrease */ ··· 392 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 393 usecs_to_jiffies(freq_down_sampling_rate); 394 395 - if (idle_ticks > down_idle_ticks ) { 396 /* if we are already at the lowest speed then break out early 397 * or if we 'cannot' reduce the speed as the user might want 398 * freq_step to be zero */
··· 296 static void dbs_check_cpu(int cpu) 297 { 298 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 299 unsigned int freq_down_step; 300 unsigned int freq_down_sampling_rate; 301 static int down_skip[NR_CPUS]; ··· 325 */ 326 327 /* Check for frequency increase */ 328 + idle_ticks = UINT_MAX; 329 for_each_cpu_mask(j, policy->cpus) { 330 + unsigned int tmp_idle_ticks, total_idle_ticks; 331 struct cpu_dbs_info_s *j_dbs_info; 332 333 j_dbs_info = &per_cpu(cpu_dbs_info, j); 334 total_idle_ticks = get_cpu_idle_time(j); 335 tmp_idle_ticks = total_idle_ticks - 336 j_dbs_info->prev_cpu_idle_up; ··· 376 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 377 return; 378 379 + idle_ticks = UINT_MAX; 380 for_each_cpu_mask(j, policy->cpus) { 381 + unsigned int tmp_idle_ticks, total_idle_ticks; 382 struct cpu_dbs_info_s *j_dbs_info; 383 384 j_dbs_info = &per_cpu(cpu_dbs_info, j); 385 /* Check for frequency decrease */ ··· 408 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 409 usecs_to_jiffies(freq_down_sampling_rate); 410 411 + if (idle_ticks > down_idle_ticks) { 412 /* if we are already at the lowest speed then break out early 413 * or if we 'cannot' reduce the speed as the user might want 414 * freq_step to be zero */