Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel:
sched: don't allow rt_runtime_us to be zero for groups having rt tasks
sched: rt-group: fixup schedulability constraints calculation
sched: fix the wrong time slice value for SCHED_FIFO tasks
sched: export task_nice
sched: balance RT task resched only on runqueue
sched: retain vruntime

+49 -11
+4
include/linux/sched.h
··· 899 899 int running); 900 900 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 901 901 int oldprio, int running); 902 + 903 + #ifdef CONFIG_FAIR_GROUP_SCHED 904 + void (*moved_group) (struct task_struct *p); 905 + #endif 902 906 }; 903 907 904 908 struct load_weight {
+27 -9
kernel/sched.c
··· 4422 4422 { 4423 4423 return TASK_NICE(p); 4424 4424 } 4425 - EXPORT_SYMBOL_GPL(task_nice); 4425 + EXPORT_SYMBOL(task_nice); 4426 4426 4427 4427 /** 4428 4428 * idle_cpu - is a given cpu idle currently? ··· 5100 5100 time_slice = 0; 5101 5101 if (p->policy == SCHED_RR) { 5102 5102 time_slice = DEF_TIMESLICE; 5103 - } else { 5103 + } else if (p->policy != SCHED_FIFO) { 5104 5104 struct sched_entity *se = &p->se; 5105 5105 unsigned long flags; 5106 5106 struct rq *rq; ··· 7625 7625 7626 7626 set_task_rq(tsk, task_cpu(tsk)); 7627 7627 7628 + #ifdef CONFIG_FAIR_GROUP_SCHED 7629 + if (tsk->sched_class->moved_group) 7630 + tsk->sched_class->moved_group(tsk); 7631 + #endif 7632 + 7628 7633 if (on_rq) { 7629 7634 if (unlikely(running)) 7630 7635 tsk->sched_class->set_curr_task(rq); ··· 7726 7721 if (runtime == RUNTIME_INF) 7727 7722 return 1ULL << 16; 7728 7723 7729 - runtime *= (1ULL << 16); 7730 - div64_64(runtime, period); 7731 - return runtime; 7724 + return div64_64(runtime << 16, period); 7732 7725 } 7733 7726 7734 7727 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) ··· 7750 7747 return total + to_ratio(period, runtime) < global_ratio; 7751 7748 } 7752 7749 7750 + /* Must be called with tasklist_lock held */ 7751 + static inline int tg_has_rt_tasks(struct task_group *tg) 7752 + { 7753 + struct task_struct *g, *p; 7754 + do_each_thread(g, p) { 7755 + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) 7756 + return 1; 7757 + } while_each_thread(g, p); 7758 + return 0; 7759 + } 7760 + 7753 7761 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7754 7762 { 7755 7763 u64 rt_runtime, rt_period; 7756 7764 int err = 0; 7757 7765 7758 - rt_period = sysctl_sched_rt_period * NSEC_PER_USEC; 7766 + rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 7759 7767 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7760 7768 if (rt_runtime_us == -1) 7761 - rt_runtime = rt_period; 7769 + rt_runtime = RUNTIME_INF; 7762 7770 7763 7771 mutex_lock(&rt_constraints_mutex); 7772 + read_lock(&tasklist_lock); 7773 + if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) { 7774 + err = -EBUSY; 7775 + goto unlock; 7776 + } 7764 7777 if (!__rt_schedulable(tg, rt_period, rt_runtime)) { 7765 7778 err = -EINVAL; 7766 7779 goto unlock; 7767 7780 } 7768 - if (rt_runtime_us == -1) 7769 - rt_runtime = RUNTIME_INF; 7770 7781 tg->rt_runtime = rt_runtime; 7771 7782 unlock: 7783 + read_unlock(&tasklist_lock); 7772 7784 mutex_unlock(&rt_constraints_mutex); 7773 7785 7774 7786 return err;
+14
kernel/sched_fair.c
··· 1353 1353 set_next_entity(cfs_rq_of(se), se); 1354 1354 } 1355 1355 1356 + #ifdef CONFIG_FAIR_GROUP_SCHED 1357 + static void moved_group_fair(struct task_struct *p) 1358 + { 1359 + struct cfs_rq *cfs_rq = task_cfs_rq(p); 1360 + 1361 + update_curr(cfs_rq); 1362 + place_entity(cfs_rq, &p->se, 1); 1363 + } 1364 + #endif 1365 + 1356 1366 /* 1357 1367 * All the scheduling class methods: 1358 1368 */ ··· 1391 1381 1392 1382 .prio_changed = prio_changed_fair, 1393 1383 .switched_to = switched_to_fair, 1384 + 1385 + #ifdef CONFIG_FAIR_GROUP_SCHED 1386 + .moved_group = moved_group_fair, 1387 + #endif 1394 1388 }; 1395 1389 1396 1390 #ifdef CONFIG_SCHED_DEBUG
+4 -2
kernel/sched_rt.c
··· 1107 1107 pull_rt_task(rq); 1108 1108 /* 1109 1109 * If there's a higher priority task waiting to run 1110 - * then reschedule. 1110 + * then reschedule. Note, the above pull_rt_task 1111 + * can release the rq lock and p could migrate. 1112 + * Only reschedule if p is still on the same runqueue. 1111 1113 */ 1112 - if (p->prio > rq->rt.highest_prio) 1114 + if (p->prio > rq->rt.highest_prio && rq->curr == p) 1113 1115 resched_task(p); 1114 1116 #else 1115 1117 /* For UP simply resched on drop of prio */