Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Use rq->clock_task instead of rq->clock for correctly maintaining load averages
sched: Fix/remove redundant cfs_rq checks
sched: Fix sign under-flows in wake_affine

+5 -8
+5 -8
kernel/sched_fair.c
··· 722 u64 now, delta; 723 unsigned long load = cfs_rq->load.weight; 724 725 - if (!cfs_rq) 726 return; 727 728 - now = rq_of(cfs_rq)->clock; 729 delta = now - cfs_rq->load_stamp; 730 731 /* truncate load history at 4 idle periods */ ··· 829 struct task_group *tg; 830 struct sched_entity *se; 831 long shares; 832 - 833 - if (!cfs_rq) 834 - return; 835 836 tg = cfs_rq->tg; 837 se = tg->se[cpu_of(rq_of(cfs_rq))]; ··· 1429 1430 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 1431 { 1432 - unsigned long this_load, load; 1433 int idx, this_cpu, prev_cpu; 1434 unsigned long tl_per_task; 1435 struct task_group *tg; ··· 1468 * Otherwise check if either cpus are near enough in load to allow this 1469 * task to be woken on this_cpu. 1470 */ 1471 - if (this_load) { 1472 - unsigned long this_eff_load, prev_eff_load; 1473 1474 this_eff_load = 100; 1475 this_eff_load *= power_of(prev_cpu);
··· 722 u64 now, delta; 723 unsigned long load = cfs_rq->load.weight; 724 725 + if (cfs_rq->tg == &root_task_group) 726 return; 727 728 + now = rq_of(cfs_rq)->clock_task; 729 delta = now - cfs_rq->load_stamp; 730 731 /* truncate load history at 4 idle periods */ ··· 829 struct task_group *tg; 830 struct sched_entity *se; 831 long shares; 832 833 tg = cfs_rq->tg; 834 se = tg->se[cpu_of(rq_of(cfs_rq))]; ··· 1432 1433 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 1434 { 1435 + s64 this_load, load; 1436 int idx, this_cpu, prev_cpu; 1437 unsigned long tl_per_task; 1438 struct task_group *tg; ··· 1471 * Otherwise check if either cpus are near enough in load to allow this 1472 * task to be woken on this_cpu. 1473 */ 1474 + if (this_load > 0) { 1475 + s64 this_eff_load, prev_eff_load; 1476 1477 this_eff_load = 100; 1478 this_eff_load *= power_of(prev_cpu);