Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: run_rebalance_domains: s/SCHED_IDLE/CPU_IDLE/
sched: fix sleeper bonus
sched: make global code static

+36 -38
-2
include/linux/cpu.h
··· 41 41 extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); 42 42 extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); 43 43 44 - extern struct sysdev_attribute attr_sched_mc_power_savings; 45 - extern struct sysdev_attribute attr_sched_smt_power_savings; 46 44 extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); 47 45 48 46 #ifdef CONFIG_HOTPLUG_CPU
+30 -30
kernel/sched.c
··· 3106 3106 if (need_resched()) 3107 3107 break; 3108 3108 3109 - rebalance_domains(balance_cpu, SCHED_IDLE); 3109 + rebalance_domains(balance_cpu, CPU_IDLE); 3110 3110 3111 3111 rq = cpu_rq(balance_cpu); 3112 3112 if (time_after(this_rq->next_balance, rq->next_balance)) ··· 6328 6328 } 6329 6329 6330 6330 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 6331 - int arch_reinit_sched_domains(void) 6331 + static int arch_reinit_sched_domains(void) 6332 6332 { 6333 6333 int err; 6334 6334 ··· 6357 6357 return ret ? ret : count; 6358 6358 } 6359 6359 6360 + #ifdef CONFIG_SCHED_MC 6361 + static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) 6362 + { 6363 + return sprintf(page, "%u\n", sched_mc_power_savings); 6364 + } 6365 + static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 6366 + const char *buf, size_t count) 6367 + { 6368 + return sched_power_savings_store(buf, count, 0); 6369 + } 6370 + static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, 6371 + sched_mc_power_savings_store); 6372 + #endif 6373 + 6374 + #ifdef CONFIG_SCHED_SMT 6375 + static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) 6376 + { 6377 + return sprintf(page, "%u\n", sched_smt_power_savings); 6378 + } 6379 + static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 6380 + const char *buf, size_t count) 6381 + { 6382 + return sched_power_savings_store(buf, count, 1); 6383 + } 6384 + static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, 6385 + sched_smt_power_savings_store); 6386 + #endif 6387 + 6360 6388 int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) 6361 6389 { 6362 6390 int err = 0; ··· 6401 6373 #endif 6402 6374 return err; 6403 6375 } 6404 - #endif 6405 - 6406 - #ifdef CONFIG_SCHED_MC 6407 - static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) 6408 - { 6409 - return sprintf(page, "%u\n", sched_mc_power_savings); 6410 - } 6411 - static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 6412 - const char *buf, size_t count) 6413 - { 6414 - return sched_power_savings_store(buf, count, 0); 6415 - } 6416 - SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, 6417 - sched_mc_power_savings_store); 6418 - #endif 6419 - 6420 - #ifdef CONFIG_SCHED_SMT 6421 - static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) 6422 - { 6423 - return sprintf(page, "%u\n", sched_smt_power_savings); 6424 - } 6425 - static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 6426 - const char *buf, size_t count) 6427 - { 6428 - return sched_power_savings_store(buf, count, 1); 6429 - } 6430 - SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, 6431 - sched_smt_power_savings_store); 6432 6376 #endif 6433 6377 6434 6378 /*
+6 -6
kernel/sched_fair.c
··· 75 75 76 76 unsigned int sysctl_sched_features __read_mostly = 77 77 SCHED_FEAT_FAIR_SLEEPERS *1 | 78 - SCHED_FEAT_SLEEPER_AVG *1 | 78 + SCHED_FEAT_SLEEPER_AVG *0 | 79 79 SCHED_FEAT_SLEEPER_LOAD_AVG *1 | 80 80 SCHED_FEAT_PRECISE_CPU_LOAD *1 | 81 81 SCHED_FEAT_START_DEBIT *1 | ··· 304 304 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); 305 305 306 306 if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { 307 - delta = calc_delta_mine(cfs_rq->sleeper_bonus, 308 - curr->load.weight, lw); 309 - if (unlikely(delta > cfs_rq->sleeper_bonus)) 310 - delta = cfs_rq->sleeper_bonus; 311 - 307 + delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); 308 + delta = calc_delta_mine(delta, curr->load.weight, lw); 309 + delta = min((u64)delta, cfs_rq->sleeper_bonus); 312 310 cfs_rq->sleeper_bonus -= delta; 313 311 delta_mine -= delta; 314 312 } ··· 519 521 * Track the amount of bonus we've given to sleepers: 520 522 */ 521 523 cfs_rq->sleeper_bonus += delta_fair; 524 + if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) 525 + cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; 522 526 523 527 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); 524 528 }