[CPUFREQ] Make ondemand sampling per CPU and remove the mutex usage in sampling path.

Make ondemand sampling per CPU and remove the mutex usage in sampling path.

Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>

authored by Venkatesh Pallipadi and committed by Dave Jones 2f8a835c 7a6bc1cd

+32 -40
+32 -40
drivers/cpufreq/cpufreq_ondemand.c
··· 64 cputime64_t prev_cpu_idle; 65 cputime64_t prev_cpu_wall; 66 struct cpufreq_policy *cur_policy; 67 unsigned int enable; 68 }; 69 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); ··· 82 static DEFINE_MUTEX (dbs_mutex); 83 static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 84 85 - static struct workqueue_struct *dbs_workq; 86 87 struct dbs_tuners { 88 unsigned int sampling_rate; ··· 234 235 /************************** sysfs end ************************/ 236 237 - static void dbs_check_cpu(int cpu) 238 { 239 unsigned int idle_ticks, total_ticks; 240 unsigned int load; 241 - struct cpu_dbs_info_s *this_dbs_info; 242 cputime64_t cur_jiffies; 243 244 struct cpufreq_policy *policy; 245 unsigned int j; 246 247 - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 248 if (!this_dbs_info->enable) 249 return; 250 ··· 313 314 static void do_dbs_timer(void *data) 315 { 316 - int i; 317 - lock_cpu_hotplug(); 318 - mutex_lock(&dbs_mutex); 319 - for_each_online_cpu(i) 320 - dbs_check_cpu(i); 321 - queue_delayed_work(dbs_workq, &dbs_work, 322 - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 323 - mutex_unlock(&dbs_mutex); 324 - unlock_cpu_hotplug(); 325 } 326 327 - static inline void dbs_timer_init(void) 328 { 329 - INIT_WORK(&dbs_work, do_dbs_timer, NULL); 330 - if (!dbs_workq) 331 - dbs_workq = create_singlethread_workqueue("ondemand"); 332 - if (!dbs_workq) { 333 - printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); 334 - return; 335 - } 336 - queue_delayed_work(dbs_workq, &dbs_work, 337 - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 338 return; 339 } 340 341 - static inline void dbs_timer_exit(void) 342 { 343 - if (dbs_workq) 344 - cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); 345 } 346 347 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ··· 363 break; 364 365 mutex_lock(&dbs_mutex); 366 for_each_cpu_mask(j, policy->cpus) { 367 struct cpu_dbs_info_s *j_dbs_info; 368 j_dbs_info = &per_cpu(cpu_dbs_info, j); ··· 383 } 384 this_dbs_info->enable = 1; 385 sysfs_create_group(&policy->kobj, &dbs_attr_group); 386 - dbs_enable++; 387 /* 388 * Start the timerschedule work, when this governor 389 * is used for first time ··· 401 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 402 403 dbs_tuners_ins.sampling_rate = def_sampling_rate; 404 - dbs_timer_init(); 405 } 406 407 mutex_unlock(&dbs_mutex); 408 break; 409 410 case CPUFREQ_GOV_STOP: 411 mutex_lock(&dbs_mutex); 412 this_dbs_info->enable = 0; 413 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 414 dbs_enable--; 415 - /* 416 - * Stop the timerschedule work, when this governor 417 - * is used for first time 418 - */ 419 if (dbs_enable == 0) 420 - dbs_timer_exit(); 421 422 mutex_unlock(&dbs_mutex); 423 ··· 451 452 static void __exit cpufreq_gov_dbs_exit(void) 453 { 454 - /* Make sure that the scheduled work is indeed not running. 455 - Assumes the timer has been cancelled first. */ 456 - if (dbs_workq) { 457 - flush_workqueue(dbs_workq); 458 - destroy_workqueue(dbs_workq); 459 - } 460 - 461 cpufreq_unregister_governor(&cpufreq_gov_dbs); 462 } 463
··· 64 cputime64_t prev_cpu_idle; 65 cputime64_t prev_cpu_wall; 66 struct cpufreq_policy *cur_policy; 67 + struct work_struct work; 68 unsigned int enable; 69 }; 70 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); ··· 81 static DEFINE_MUTEX (dbs_mutex); 82 static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 83 84 + static struct workqueue_struct *kondemand_wq; 85 86 struct dbs_tuners { 87 unsigned int sampling_rate; ··· 233 234 /************************** sysfs end ************************/ 235 236 + static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 237 { 238 unsigned int idle_ticks, total_ticks; 239 unsigned int load; 240 cputime64_t cur_jiffies; 241 242 struct cpufreq_policy *policy; 243 unsigned int j; 244 245 if (!this_dbs_info->enable) 246 return; 247 ··· 314 315 static void do_dbs_timer(void *data) 316 { 317 + unsigned int cpu = smp_processor_id(); 318 + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 319 + 320 + dbs_check_cpu(dbs_info); 321 + queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 322 + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 323 } 324 325 + static inline void dbs_timer_init(unsigned int cpu) 326 { 327 + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 328 + 329 + INIT_WORK(&dbs_info->work, do_dbs_timer, 0); 330 + queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 331 + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 332 return; 333 } 334 335 + static inline void dbs_timer_exit(unsigned int cpu) 336 { 337 + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 338 + 339 + cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 340 } 341 342 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ··· 370 break; 371 372 mutex_lock(&dbs_mutex); 373 + dbs_enable++; 374 + if (dbs_enable == 1) { 375 + kondemand_wq = create_workqueue("kondemand"); 376 + if (!kondemand_wq) { 377 + printk(KERN_ERR "Creation of kondemand failed\n"); 378 + dbs_enable--; 379 + mutex_unlock(&dbs_mutex); 380 + return -ENOSPC; 381 + } 382 + } 383 for_each_cpu_mask(j, policy->cpus) { 384 struct cpu_dbs_info_s *j_dbs_info; 385 j_dbs_info = &per_cpu(cpu_dbs_info, j); ··· 380 } 381 this_dbs_info->enable = 1; 382 sysfs_create_group(&policy->kobj, &dbs_attr_group); 383 /* 384 * Start the timerschedule work, when this governor 385 * is used for first time ··· 399 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 400 401 dbs_tuners_ins.sampling_rate = def_sampling_rate; 402 } 403 + dbs_timer_init(policy->cpu); 404 405 mutex_unlock(&dbs_mutex); 406 break; 407 408 case CPUFREQ_GOV_STOP: 409 mutex_lock(&dbs_mutex); 410 + dbs_timer_exit(policy->cpu); 411 this_dbs_info->enable = 0; 412 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 413 dbs_enable--; 414 if (dbs_enable == 0) 415 + destroy_workqueue(kondemand_wq); 416 417 mutex_unlock(&dbs_mutex); 418 ··· 452 453 static void __exit cpufreq_gov_dbs_exit(void) 454 { 455 cpufreq_unregister_governor(&cpufreq_gov_dbs); 456 } 457