sched/dlserver: Fix dlserver double enqueue

dlserver can get dequeued during a dlserver pick_task due to the delayed
deueue feature and this can lead to issues with dlserver logic as it
still thinks that dlserver is on the runqueue. The dlserver throttling
and replenish logic gets confused and can lead to double enqueue of
dlserver.

Double enqueue of dlserver could happend due to couple of reasons:

Case 1
------

Delayed dequeue feature[1] can cause dlserver being stopped during a
pick initiated by dlserver:
__pick_next_task
pick_task_dl -> server_pick_task
pick_task_fair
pick_next_entity (if (sched_delayed))
dequeue_entities
dl_server_stop

server_pick_task goes ahead with update_curr_dl_se without knowing that
dlserver is dequeued and this confuses the logic and may lead to
unintended enqueue while the server is stopped.

Case 2
------
A race condition between a task dequeue on one cpu and same task's enqueue
on this cpu by a remote cpu while the lock is released causing dlserver
double enqueue.

One cpu would be in the schedule() and releasing RQ-lock:

current->state = TASK_INTERRUPTIBLE();
schedule();
deactivate_task()
dl_stop_server();
pick_next_task()
pick_next_task_fair()
sched_balance_newidle()
rq_unlock(this_rq)

at which point another CPU can take our RQ-lock and do:

try_to_wake_up()
ttwu_queue()
rq_lock()
...
activate_task()
dl_server_start() --> first enqueue
wakeup_preempt() := check_preempt_wakeup_fair()
update_curr()
update_curr_task()
if (current->dl_server)
dl_server_update()
enqueue_dl_entity() --> second enqueue

This bug was not apparent as the enqueue in dl_server_start doesn't
usually happen because of the defer logic. But as a side effect of the
first case(dequeue during dlserver pick), dl_throttled and dl_yield will
be set and this causes the time accounting of dlserver to messup and
then leading to a enqueue in dl_server_start.

Have an explicit flag representing the status of dlserver to avoid the
confusion. This is set in dl_server_start and reset in dlserver_stop.

Fixes: 63ba8422f876 ("sched/deadline: Introduce deadline servers")
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: "Vineeth Pillai (Google)" <vineeth@bitbyteword.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Marcel Ziswiler <marcel.ziswiler@codethink.co.uk> # ROCK 5B
Link: https://lkml.kernel.org/r/20241213032244.877029-1-vineeth@bitbyteword.org

authored by Vineeth Pillai (Google) and committed by Peter Zijlstra b53127db 76f2f783

+18 -2
+7
include/linux/sched.h
··· 656 * @dl_defer_armed tells if the deferrable server is waiting 657 * for the replenishment timer to activate it. 658 * 659 * @dl_defer_running tells if the deferrable server is actually 660 * running, skipping the defer phase. 661 */ ··· 670 unsigned int dl_non_contending : 1; 671 unsigned int dl_overrun : 1; 672 unsigned int dl_server : 1; 673 unsigned int dl_defer : 1; 674 unsigned int dl_defer_armed : 1; 675 unsigned int dl_defer_running : 1;
··· 656 * @dl_defer_armed tells if the deferrable server is waiting 657 * for the replenishment timer to activate it. 658 * 659 + * @dl_server_active tells if the dlserver is active(started). 660 + * dlserver is started on first cfs enqueue on an idle runqueue 661 + * and is stopped when a dequeue results in 0 cfs tasks on the 662 + * runqueue. In other words, dlserver is active only when cpu's 663 + * runqueue has atleast one cfs task. 664 + * 665 * @dl_defer_running tells if the deferrable server is actually 666 * running, skipping the defer phase. 667 */ ··· 664 unsigned int dl_non_contending : 1; 665 unsigned int dl_overrun : 1; 666 unsigned int dl_server : 1; 667 + unsigned int dl_server_active : 1; 668 unsigned int dl_defer : 1; 669 unsigned int dl_defer_armed : 1; 670 unsigned int dl_defer_running : 1;
+6 -2
kernel/sched/deadline.c
··· 1647 if (!dl_se->dl_runtime) 1648 return; 1649 1650 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); 1651 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) 1652 resched_curr(dl_se->rq); ··· 1662 hrtimer_try_to_cancel(&dl_se->dl_timer); 1663 dl_se->dl_defer_armed = 0; 1664 dl_se->dl_throttled = 0; 1665 } 1666 1667 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, ··· 2423 if (dl_server(dl_se)) { 2424 p = dl_se->server_pick_task(dl_se); 2425 if (!p) { 2426 - dl_se->dl_yielded = 1; 2427 - update_curr_dl_se(rq, dl_se, 0); 2428 goto again; 2429 } 2430 rq->dl_server = dl_se;
··· 1647 if (!dl_se->dl_runtime) 1648 return; 1649 1650 + dl_se->dl_server_active = 1; 1651 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); 1652 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) 1653 resched_curr(dl_se->rq); ··· 1661 hrtimer_try_to_cancel(&dl_se->dl_timer); 1662 dl_se->dl_defer_armed = 0; 1663 dl_se->dl_throttled = 0; 1664 + dl_se->dl_server_active = 0; 1665 } 1666 1667 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, ··· 2421 if (dl_server(dl_se)) { 2422 p = dl_se->server_pick_task(dl_se); 2423 if (!p) { 2424 + if (dl_server_active(dl_se)) { 2425 + dl_se->dl_yielded = 1; 2426 + update_curr_dl_se(rq, dl_se, 0); 2427 + } 2428 goto again; 2429 } 2430 rq->dl_server = dl_se;
+5
kernel/sched/sched.h
··· 398 extern int dl_server_apply_params(struct sched_dl_entity *dl_se, 399 u64 runtime, u64 period, bool init); 400 401 #ifdef CONFIG_CGROUP_SCHED 402 403 extern struct list_head task_groups;
··· 398 extern int dl_server_apply_params(struct sched_dl_entity *dl_se, 399 u64 runtime, u64 period, bool init); 400 401 + static inline bool dl_server_active(struct sched_dl_entity *dl_se) 402 + { 403 + return dl_se->dl_server_active; 404 + } 405 + 406 #ifdef CONFIG_CGROUP_SCHED 407 408 extern struct list_head task_groups;