Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel:
sched: add arch_update_cpu_topology hook.
sched: add exported arch_reinit_sched_domains() to header file.
sched: remove double unlikely from schedule()
sched: cleanup old and rarely used 'debug' features.

+12 -22
+1
include/linux/sched.h
··· 790 }; 791 792 extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 793 794 #endif /* CONFIG_SMP */ 795
··· 790 }; 791 792 extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 793 + extern int arch_reinit_sched_domains(void); 794 795 #endif /* CONFIG_SMP */ 796
+2
include/linux/topology.h
··· 50 for_each_online_node(node) \ 51 if (nr_cpus_node(node)) 52 53 /* Conform to ACPI 2.0 SLIT distance definitions */ 54 #define LOCAL_DISTANCE 10 55 #define REMOTE_DISTANCE 20
··· 50 for_each_online_node(node) \ 51 if (nr_cpus_node(node)) 52 53 + void arch_update_cpu_topology(void); 54 + 55 /* Conform to ACPI 2.0 SLIT distance definitions */ 56 #define LOCAL_DISTANCE 10 57 #define REMOTE_DISTANCE 20
+9 -8
kernel/sched.c
··· 594 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 595 SCHED_FEAT_WAKEUP_PREEMPT = 2, 596 SCHED_FEAT_START_DEBIT = 4, 597 - SCHED_FEAT_TREE_AVG = 8, 598 - SCHED_FEAT_APPROX_AVG = 16, 599 - SCHED_FEAT_HRTICK = 32, 600 - SCHED_FEAT_DOUBLE_TICK = 64, 601 }; 602 603 const_debug unsigned int sysctl_sched_features = 604 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 605 SCHED_FEAT_WAKEUP_PREEMPT * 1 | 606 SCHED_FEAT_START_DEBIT * 1 | 607 - SCHED_FEAT_TREE_AVG * 0 | 608 - SCHED_FEAT_APPROX_AVG * 0 | 609 SCHED_FEAT_HRTICK * 1 | 610 SCHED_FEAT_DOUBLE_TICK * 0; 611 ··· 3882 3883 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3884 if (unlikely((prev->state & TASK_INTERRUPTIBLE) && 3885 - unlikely(signal_pending(prev)))) { 3886 prev->state = TASK_RUNNING; 3887 } else { 3888 deactivate_task(rq, prev, 1); ··· 6807 */ 6808 static cpumask_t fallback_doms; 6809 6810 /* 6811 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6812 * For now this just excludes isolated cpus, but could be used to ··· 6820 { 6821 int err; 6822 6823 ndoms_cur = 1; 6824 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 6825 if (!doms_cur) ··· 6925 } 6926 6927 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 6928 - static int arch_reinit_sched_domains(void) 6929 { 6930 int err; 6931
··· 594 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 595 SCHED_FEAT_WAKEUP_PREEMPT = 2, 596 SCHED_FEAT_START_DEBIT = 4, 597 + SCHED_FEAT_HRTICK = 8, 598 + SCHED_FEAT_DOUBLE_TICK = 16, 599 }; 600 601 const_debug unsigned int sysctl_sched_features = 602 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 603 SCHED_FEAT_WAKEUP_PREEMPT * 1 | 604 SCHED_FEAT_START_DEBIT * 1 | 605 SCHED_FEAT_HRTICK * 1 | 606 SCHED_FEAT_DOUBLE_TICK * 0; 607 ··· 3886 3887 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3888 if (unlikely((prev->state & TASK_INTERRUPTIBLE) && 3889 + signal_pending(prev))) { 3890 prev->state = TASK_RUNNING; 3891 } else { 3892 deactivate_task(rq, prev, 1); ··· 6811 */ 6812 static cpumask_t fallback_doms; 6813 6814 + void __attribute__((weak)) arch_update_cpu_topology(void) 6815 + { 6816 + } 6817 + 6818 /* 6819 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6820 * For now this just excludes isolated cpus, but could be used to ··· 6820 { 6821 int err; 6822 6823 + arch_update_cpu_topology(); 6824 ndoms_cur = 1; 6825 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 6826 if (!doms_cur) ··· 6924 } 6925 6926 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 6927 + int arch_reinit_sched_domains(void) 6928 { 6929 int err; 6930
-14
kernel/sched_fair.c
··· 302 return vslice; 303 } 304 305 - static u64 sched_vslice(struct cfs_rq *cfs_rq) 306 - { 307 - return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running); 308 - } 309 - 310 static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 311 { 312 return __sched_vslice(cfs_rq->load.weight + se->load.weight, ··· 498 __pick_next_entity(cfs_rq)->vruntime); 499 } else 500 vruntime = cfs_rq->min_vruntime; 501 - 502 - if (sched_feat(TREE_AVG)) { 503 - struct sched_entity *last = __pick_last_entity(cfs_rq); 504 - if (last) { 505 - vruntime += last->vruntime; 506 - vruntime >>= 1; 507 - } 508 - } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) 509 - vruntime += sched_vslice(cfs_rq)/2; 510 511 /* 512 * The 'current' period is already promised to the current tasks,
··· 302 return vslice; 303 } 304 305 static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 306 { 307 return __sched_vslice(cfs_rq->load.weight + se->load.weight, ··· 503 __pick_next_entity(cfs_rq)->vruntime); 504 } else 505 vruntime = cfs_rq->min_vruntime; 506 507 /* 508 * The 'current' period is already promised to the current tasks,