Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CPUFREQ] ondemand,conservative governor idle_tick clean-up

[PATCH] [3/5] ondemand,conservative governor idle_tick clean-up

Ondemand and conservative governor clean-up, it factorises the idle ticks
measurement.

Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>

+10 -42
+5 -21
drivers/cpufreq/cpufreq_conservative.c
··· 297 297 static void dbs_check_cpu(int cpu) 298 298 { 299 299 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 300 - unsigned int total_idle_ticks; 301 300 unsigned int freq_step; 302 301 unsigned int freq_down_sampling_rate; 303 302 static int down_skip[NR_CPUS]; ··· 337 338 */ 338 339 339 340 /* Check for frequency increase */ 340 - total_idle_ticks = get_cpu_idle_time(cpu); 341 - idle_ticks = total_idle_ticks - 342 - this_dbs_info->prev_cpu_idle_up; 343 - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 344 - 345 341 342 + idle_ticks = UINT_MAX; 346 343 for_each_cpu_mask(j, policy->cpus) { 347 - unsigned int tmp_idle_ticks; 344 + unsigned int tmp_idle_ticks, total_idle_ticks; 348 345 struct cpu_dbs_info_s *j_dbs_info; 349 - 350 - if (j == cpu) 351 - continue; 352 346 353 347 j_dbs_info = &per_cpu(cpu_dbs_info, j); 354 348 /* Check for frequency increase */ ··· 392 400 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 393 401 return; 394 402 395 - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 396 - idle_ticks = total_idle_ticks - 397 - this_dbs_info->prev_cpu_idle_down; 398 - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 399 - 403 + idle_ticks = UINT_MAX; 400 404 for_each_cpu_mask(j, policy->cpus) { 401 - unsigned int tmp_idle_ticks; 405 + unsigned int tmp_idle_ticks, total_idle_ticks; 402 406 struct cpu_dbs_info_s *j_dbs_info; 403 407 404 - if (j == cpu) 405 - continue; 406 - 407 408 j_dbs_info = &per_cpu(cpu_dbs_info, j); 408 - /* Check for frequency increase */ 409 409 total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 410 410 tmp_idle_ticks = total_idle_ticks - 411 411 j_dbs_info->prev_cpu_idle_down; ··· 416 432 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 417 433 usecs_to_jiffies(freq_down_sampling_rate); 418 434 419 - if (idle_ticks > down_idle_ticks ) { 435 + if (idle_ticks > down_idle_ticks) { 420 436 /* if we are already at the lowest speed then break out early 421 437 * or if we 'cannot' reduce the speed as the user might want 422 438 * freq_step to be zero */
+5 -21
drivers/cpufreq/cpufreq_ondemand.c
··· 296 296 static void dbs_check_cpu(int cpu) 297 297 { 298 298 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 299 - unsigned int total_idle_ticks; 300 299 unsigned int freq_down_step; 301 300 unsigned int freq_down_sampling_rate; 302 301 static int down_skip[NR_CPUS]; ··· 324 325 */ 325 326 326 327 /* Check for frequency increase */ 327 - total_idle_ticks = get_cpu_idle_time(cpu); 328 - idle_ticks = total_idle_ticks - 329 - this_dbs_info->prev_cpu_idle_up; 330 - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 331 - 328 + idle_ticks = UINT_MAX; 332 329 for_each_cpu_mask(j, policy->cpus) { 333 - unsigned int tmp_idle_ticks; 330 + unsigned int tmp_idle_ticks, total_idle_ticks; 334 331 struct cpu_dbs_info_s *j_dbs_info; 335 332 336 - if (j == cpu) 337 - continue; 338 - 339 333 j_dbs_info = &per_cpu(cpu_dbs_info, j); 340 - /* Check for frequency increase */ 341 334 total_idle_ticks = get_cpu_idle_time(j); 342 335 tmp_idle_ticks = total_idle_ticks - 343 336 j_dbs_info->prev_cpu_idle_up; ··· 367 376 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 368 377 return; 369 378 370 - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 371 - idle_ticks = total_idle_ticks - 372 - this_dbs_info->prev_cpu_idle_down; 373 - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 374 - 379 + idle_ticks = UINT_MAX; 375 380 for_each_cpu_mask(j, policy->cpus) { 376 - unsigned int tmp_idle_ticks; 381 + unsigned int tmp_idle_ticks, total_idle_ticks; 377 382 struct cpu_dbs_info_s *j_dbs_info; 378 - 379 - if (j == cpu) 380 - continue; 381 383 382 384 j_dbs_info = &per_cpu(cpu_dbs_info, j); 383 385 /* Check for frequency decrease */ ··· 392 408 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 393 409 usecs_to_jiffies(freq_down_sampling_rate); 394 410 395 - if (idle_ticks > down_idle_ticks ) { 411 + if (idle_ticks > down_idle_ticks) { 396 412 /* if we are already at the lowest speed then break out early 397 413 * or if we 'cannot' reduce the speed as the user might want 398 414 * freq_step to be zero */