Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel:
sched: don't allow rt_runtime_us to be zero for groups having rt tasks
sched: rt-group: fixup schedulability constraints calculation
sched: fix the wrong time slice value for SCHED_FIFO tasks
sched: export task_nice
sched: balance RT task resched only on runqueue
sched: retain vruntime

+49 -11
+4
include/linux/sched.h
··· 899 int running); 900 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 901 int oldprio, int running); 902 }; 903 904 struct load_weight {
··· 899 int running); 900 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 901 int oldprio, int running); 902 + 903 + #ifdef CONFIG_FAIR_GROUP_SCHED 904 + void (*moved_group) (struct task_struct *p); 905 + #endif 906 }; 907 908 struct load_weight {
+27 -9
kernel/sched.c
··· 4422 { 4423 return TASK_NICE(p); 4424 } 4425 - EXPORT_SYMBOL_GPL(task_nice); 4426 4427 /** 4428 * idle_cpu - is a given cpu idle currently? ··· 5100 time_slice = 0; 5101 if (p->policy == SCHED_RR) { 5102 time_slice = DEF_TIMESLICE; 5103 - } else { 5104 struct sched_entity *se = &p->se; 5105 unsigned long flags; 5106 struct rq *rq; ··· 7625 7626 set_task_rq(tsk, task_cpu(tsk)); 7627 7628 if (on_rq) { 7629 if (unlikely(running)) 7630 tsk->sched_class->set_curr_task(rq); ··· 7726 if (runtime == RUNTIME_INF) 7727 return 1ULL << 16; 7728 7729 - runtime *= (1ULL << 16); 7730 - div64_64(runtime, period); 7731 - return runtime; 7732 } 7733 7734 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) ··· 7750 return total + to_ratio(period, runtime) < global_ratio; 7751 } 7752 7753 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7754 { 7755 u64 rt_runtime, rt_period; 7756 int err = 0; 7757 7758 - rt_period = sysctl_sched_rt_period * NSEC_PER_USEC; 7759 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7760 if (rt_runtime_us == -1) 7761 - rt_runtime = rt_period; 7762 7763 mutex_lock(&rt_constraints_mutex); 7764 if (!__rt_schedulable(tg, rt_period, rt_runtime)) { 7765 err = -EINVAL; 7766 goto unlock; 7767 } 7768 - if (rt_runtime_us == -1) 7769 - rt_runtime = RUNTIME_INF; 7770 tg->rt_runtime = rt_runtime; 7771 unlock: 7772 mutex_unlock(&rt_constraints_mutex); 7773 7774 return err;
··· 4422 { 4423 return TASK_NICE(p); 4424 } 4425 + EXPORT_SYMBOL(task_nice); 4426 4427 /** 4428 * idle_cpu - is a given cpu idle currently? ··· 5100 time_slice = 0; 5101 if (p->policy == SCHED_RR) { 5102 time_slice = DEF_TIMESLICE; 5103 + } else if (p->policy != SCHED_FIFO) { 5104 struct sched_entity *se = &p->se; 5105 unsigned long flags; 5106 struct rq *rq; ··· 7625 7626 set_task_rq(tsk, task_cpu(tsk)); 7627 7628 + #ifdef CONFIG_FAIR_GROUP_SCHED 7629 + if (tsk->sched_class->moved_group) 7630 + tsk->sched_class->moved_group(tsk); 7631 + #endif 7632 + 7633 if (on_rq) { 7634 if (unlikely(running)) 7635 tsk->sched_class->set_curr_task(rq); ··· 7721 if (runtime == RUNTIME_INF) 7722 return 1ULL << 16; 7723 7724 + return div64_64(runtime << 16, period); 7725 } 7726 7727 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) ··· 7747 return total + to_ratio(period, runtime) < global_ratio; 7748 } 7749 7750 + /* Must be called with tasklist_lock held */ 7751 + static inline int tg_has_rt_tasks(struct task_group *tg) 7752 + { 7753 + struct task_struct *g, *p; 7754 + do_each_thread(g, p) { 7755 + if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) 7756 + return 1; 7757 + } while_each_thread(g, p); 7758 + return 0; 7759 + } 7760 + 7761 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7762 { 7763 u64 rt_runtime, rt_period; 7764 int err = 0; 7765 7766 + rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 7767 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7768 if (rt_runtime_us == -1) 7769 + rt_runtime = RUNTIME_INF; 7770 7771 mutex_lock(&rt_constraints_mutex); 7772 + read_lock(&tasklist_lock); 7773 + if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) { 7774 + err = -EBUSY; 7775 + goto unlock; 7776 + } 7777 if (!__rt_schedulable(tg, rt_period, rt_runtime)) { 7778 err = -EINVAL; 7779 goto unlock; 7780 } 7781 tg->rt_runtime = rt_runtime; 7782 unlock: 7783 + read_unlock(&tasklist_lock); 7784 mutex_unlock(&rt_constraints_mutex); 7785 7786 return err;
+14
kernel/sched_fair.c
··· 1353 set_next_entity(cfs_rq_of(se), se); 1354 } 1355 1356 /* 1357 * All the scheduling class methods: 1358 */ ··· 1391 1392 .prio_changed = prio_changed_fair, 1393 .switched_to = switched_to_fair, 1394 }; 1395 1396 #ifdef CONFIG_SCHED_DEBUG
··· 1353 set_next_entity(cfs_rq_of(se), se); 1354 } 1355 1356 + #ifdef CONFIG_FAIR_GROUP_SCHED 1357 + static void moved_group_fair(struct task_struct *p) 1358 + { 1359 + struct cfs_rq *cfs_rq = task_cfs_rq(p); 1360 + 1361 + update_curr(cfs_rq); 1362 + place_entity(cfs_rq, &p->se, 1); 1363 + } 1364 + #endif 1365 + 1366 /* 1367 * All the scheduling class methods: 1368 */ ··· 1381 1382 .prio_changed = prio_changed_fair, 1383 .switched_to = switched_to_fair, 1384 + 1385 + #ifdef CONFIG_FAIR_GROUP_SCHED 1386 + .moved_group = moved_group_fair, 1387 + #endif 1388 }; 1389 1390 #ifdef CONFIG_SCHED_DEBUG
+4 -2
kernel/sched_rt.c
··· 1107 pull_rt_task(rq); 1108 /* 1109 * If there's a higher priority task waiting to run 1110 - * then reschedule. 1111 */ 1112 - if (p->prio > rq->rt.highest_prio) 1113 resched_task(p); 1114 #else 1115 /* For UP simply resched on drop of prio */
··· 1107 pull_rt_task(rq); 1108 /* 1109 * If there's a higher priority task waiting to run 1110 + * then reschedule. Note, the above pull_rt_task 1111 + * can release the rq lock and p could migrate. 1112 + * Only reschedule if p is still on the same runqueue. 1113 */ 1114 + if (p->prio > rq->rt.highest_prio && rq->curr == p) 1115 resched_task(p); 1116 #else 1117 /* For UP simply resched on drop of prio */