Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: redundant reschedule when set_user_nice() boosts a prio of a task from the "expired" array

- Make TASK_PREEMPTS_CURR(task, rq) return "true" only if the task's prio
is higher than the current's one and the task is in the "active" array.
This ensures we don't make redundant resched_task() calls when the task
is in the "expired" array (as may happen now in set_user_prio(),
rt_mutex_setprio() and pull_task() ) ;

- generalise conditions for a call to resched_task() in set_user_nice(),
rt_mutex_setprio() and sched_setscheduler()

Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Cc: Con Kolivas <kernel@kolivas.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Dmitry Adamushko and committed by
Linus Torvalds
bd53f96c 4953198b

+16 -18
+16 -18
kernel/sched.c
··· 169 169 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1)) 170 170 171 171 #define TASK_PREEMPTS_CURR(p, rq) \ 172 - ((p)->prio < (rq)->curr->prio) 172 + (((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active)) 173 173 174 174 #define SCALE_PRIO(x, prio) \ 175 175 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) ··· 4076 4076 struct prio_array *array; 4077 4077 unsigned long flags; 4078 4078 struct rq *rq; 4079 - int oldprio; 4079 + int delta; 4080 4080 4081 4081 BUG_ON(prio < 0 || prio > MAX_PRIO); 4082 4082 4083 4083 rq = task_rq_lock(p, &flags); 4084 4084 4085 - oldprio = p->prio; 4085 + delta = prio - p->prio; 4086 4086 array = p->array; 4087 4087 if (array) 4088 4088 dequeue_task(p, array); ··· 4098 4098 enqueue_task(p, array); 4099 4099 /* 4100 4100 * Reschedule if we are currently running on this runqueue and 4101 - * our priority decreased, or if we are not currently running on 4102 - * this runqueue and our priority is higher than the current's 4101 + * our priority decreased, or if our priority became higher 4102 + * than the current's. 4103 4103 */ 4104 - if (task_running(rq, p)) { 4105 - if (p->prio > oldprio) 4106 - resched_task(rq->curr); 4107 - } else if (TASK_PREEMPTS_CURR(p, rq)) 4104 + if (TASK_PREEMPTS_CURR(p, rq) || 4105 + (delta > 0 && task_running(rq, p))) 4108 4106 resched_task(rq->curr); 4109 4107 } 4110 4108 task_rq_unlock(rq, &flags); ··· 4150 4152 enqueue_task(p, array); 4151 4153 inc_raw_weighted_load(rq, p); 4152 4154 /* 4153 - * If the task increased its priority or is running and 4154 - * lowered its priority, then reschedule its CPU: 4155 + * Reschedule if we are currently running on this runqueue and 4156 + * our priority decreased, or if our priority became higher 4157 + * than the current's. 4155 4158 */ 4156 - if (delta < 0 || (delta > 0 && task_running(rq, p))) 4159 + if (TASK_PREEMPTS_CURR(p, rq) || 4160 + (delta > 0 && task_running(rq, p))) 4157 4161 resched_task(rq->curr); 4158 4162 } 4159 4163 out_unlock: ··· 4382 4382 __activate_task(p, rq); 4383 4383 /* 4384 4384 * Reschedule if we are currently running on this runqueue and 4385 - * our priority decreased, or if we are not currently running on 4386 - * this runqueue and our priority is higher than the current's 4385 + * our priority decreased, or our priority became higher 4386 + * than the current's. 4387 4387 */ 4388 - if (task_running(rq, p)) { 4389 - if (p->prio > oldprio) 4390 - resched_task(rq->curr); 4391 - } else if (TASK_PREEMPTS_CURR(p, rq)) 4388 + if (TASK_PREEMPTS_CURR(p, rq) || 4389 + (task_running(rq, p) && p->prio > oldprio)) 4392 4390 resched_task(rq->curr); 4393 4391 } 4394 4392 __task_rq_unlock(rq);