Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: reorder SCHED_FEAT_ bits
sched: make sched_nr_latency static
sched: remove activate_idle_task()
sched: fix __set_task_cpu() SMP race
sched: fix SCHED_FIFO tasks & FAIR_GROUP_SCHED
sched: fix accounting of interrupts during guest execution on s390

+28 -36
+27 -35
kernel/sched.c
··· 216 } 217 218 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 219 - static inline void set_task_cfs_rq(struct task_struct *p) 220 { 221 - p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; 222 - p->se.parent = task_group(p)->se[task_cpu(p)]; 223 } 224 225 #else 226 227 - static inline void set_task_cfs_rq(struct task_struct *p) { } 228 229 #endif /* CONFIG_FAIR_GROUP_SCHED */ 230 ··· 455 */ 456 enum { 457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 458 - SCHED_FEAT_START_DEBIT = 2, 459 - SCHED_FEAT_TREE_AVG = 4, 460 - SCHED_FEAT_APPROX_AVG = 8, 461 - SCHED_FEAT_WAKEUP_PREEMPT = 16, 462 }; 463 464 const_debug unsigned int sysctl_sched_features = 465 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 466 SCHED_FEAT_START_DEBIT * 1 | 467 SCHED_FEAT_TREE_AVG * 0 | 468 - SCHED_FEAT_APPROX_AVG * 0 | 469 - SCHED_FEAT_WAKEUP_PREEMPT * 1; 470 471 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) 472 ··· 1022 1023 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1024 { 1025 #ifdef CONFIG_SMP 1026 task_thread_info(p)->cpu = cpu; 1027 #endif 1028 - set_task_cfs_rq(p); 1029 } 1030 1031 #ifdef CONFIG_SMP ··· 3396 struct rq *rq = this_rq(); 3397 cputime64_t tmp; 3398 3399 - if (p->flags & PF_VCPU) { 3400 - account_guest_time(p, cputime); 3401 - return; 3402 - } 3403 3404 p->stime = cputime_add(p->stime, cputime); 3405 ··· 5282 } 5283 5284 /* 5285 - * activate_idle_task - move idle task to the _front_ of runqueue. 5286 - */ 5287 - static void activate_idle_task(struct task_struct *p, struct rq *rq) 5288 - { 5289 - update_rq_clock(rq); 5290 - 5291 - if (p->state == TASK_UNINTERRUPTIBLE) 5292 - rq->nr_uninterruptible--; 5293 - 5294 - enqueue_task(rq, p, 0); 5295 - inc_nr_running(p, rq); 5296 - } 5297 - 5298 - /* 5299 * Schedules idle task to be the next runnable task on current CPU. 5300 - * It does so by boosting its priority to highest possible and adding it to 5301 - * the _front_ of the runqueue. Used by CPU offline code. 5302 */ 5303 void sched_idle_next(void) 5304 { ··· 5304 5305 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5306 5307 - /* Add idle task to the _front_ of its priority queue: */ 5308 - activate_idle_task(p, rq); 5309 5310 spin_unlock_irqrestore(&rq->lock, flags); 5311 } ··· 7079 7080 rq = task_rq_lock(tsk, &flags); 7081 7082 - if (tsk->sched_class != &fair_sched_class) 7083 goto done; 7084 7085 update_rq_clock(rq); 7086 ··· 7095 tsk->sched_class->put_prev_task(rq, tsk); 7096 } 7097 7098 - set_task_cfs_rq(tsk); 7099 7100 if (on_rq) { 7101 if (unlikely(running))
··· 216 } 217 218 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 219 + static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) 220 { 221 + p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; 222 + p->se.parent = task_group(p)->se[cpu]; 223 } 224 225 #else 226 227 + static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } 228 229 #endif /* CONFIG_FAIR_GROUP_SCHED */ 230 ··· 455 */ 456 enum { 457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 458 + SCHED_FEAT_WAKEUP_PREEMPT = 2, 459 + SCHED_FEAT_START_DEBIT = 4, 460 + SCHED_FEAT_TREE_AVG = 8, 461 + SCHED_FEAT_APPROX_AVG = 16, 462 }; 463 464 const_debug unsigned int sysctl_sched_features = 465 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 466 + SCHED_FEAT_WAKEUP_PREEMPT * 1 | 467 SCHED_FEAT_START_DEBIT * 1 | 468 SCHED_FEAT_TREE_AVG * 0 | 469 + SCHED_FEAT_APPROX_AVG * 0; 470 471 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) 472 ··· 1022 1023 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1024 { 1025 + set_task_cfs_rq(p, cpu); 1026 #ifdef CONFIG_SMP 1027 + /* 1028 + * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1029 + * successfuly executed on another CPU. We must ensure that updates of 1030 + * per-task data have been completed by this moment. 1031 + */ 1032 + smp_wmb(); 1033 task_thread_info(p)->cpu = cpu; 1034 #endif 1035 } 1036 1037 #ifdef CONFIG_SMP ··· 3390 struct rq *rq = this_rq(); 3391 cputime64_t tmp; 3392 3393 + if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) 3394 + return account_guest_time(p, cputime); 3395 3396 p->stime = cputime_add(p->stime, cputime); 3397 ··· 5278 } 5279 5280 /* 5281 * Schedules idle task to be the next runnable task on current CPU. 5282 + * It does so by boosting its priority to highest possible. 5283 + * Used by CPU offline code. 5284 */ 5285 void sched_idle_next(void) 5286 { ··· 5314 5315 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5316 5317 + update_rq_clock(rq); 5318 + activate_task(rq, p, 0); 5319 5320 spin_unlock_irqrestore(&rq->lock, flags); 5321 } ··· 7089 7090 rq = task_rq_lock(tsk, &flags); 7091 7092 + if (tsk->sched_class != &fair_sched_class) { 7093 + set_task_cfs_rq(tsk, task_cpu(tsk)); 7094 goto done; 7095 + } 7096 7097 update_rq_clock(rq); 7098 ··· 7103 tsk->sched_class->put_prev_task(rq, tsk); 7104 } 7105 7106 + set_task_cfs_rq(tsk, task_cpu(tsk)); 7107 7108 if (on_rq) { 7109 if (unlikely(running))
+1 -1
kernel/sched_fair.c
··· 43 /* 44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 45 */ 46 - unsigned int sched_nr_latency = 20; 47 48 /* 49 * After fork, child runs first. (default) If set to 0 then
··· 43 /* 44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 45 */ 46 + static unsigned int sched_nr_latency = 20; 47 48 /* 49 * After fork, child runs first. (default) If set to 0 then