Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sched-urgent-2025-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Ingo Molnar:
"Revert a scheduler performance optimization that regressed other
workloads"

* tag 'sched-urgent-2025-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
Revert "sched/core: Reduce cost of sched_move_task when config autogroup"

+3 -18
+3 -18
kernel/sched/core.c
··· 9016 spin_unlock_irqrestore(&task_group_lock, flags); 9017 } 9018 9019 - static struct task_group *sched_get_task_group(struct task_struct *tsk) 9020 { 9021 struct task_group *tg; 9022 ··· 9028 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 9029 struct task_group, css); 9030 tg = autogroup_task_group(tsk, tg); 9031 - 9032 - return tg; 9033 - } 9034 - 9035 - static void sched_change_group(struct task_struct *tsk, struct task_group *group) 9036 - { 9037 - tsk->sched_task_group = group; 9038 9039 #ifdef CONFIG_FAIR_GROUP_SCHED 9040 if (tsk->sched_class->task_change_group) ··· 9049 { 9050 int queued, running, queue_flags = 9051 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 9052 - struct task_group *group; 9053 struct rq *rq; 9054 9055 CLASS(task_rq_lock, rq_guard)(tsk); 9056 rq = rq_guard.rq; 9057 - 9058 - /* 9059 - * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous 9060 - * group changes. 9061 - */ 9062 - group = sched_get_task_group(tsk); 9063 - if (group == tsk->sched_task_group) 9064 - return; 9065 9066 update_rq_clock(rq); 9067 ··· 9064 if (running) 9065 put_prev_task(rq, tsk); 9066 9067 - sched_change_group(tsk, group); 9068 if (!for_autogroup) 9069 scx_cgroup_move_task(tsk); 9070
··· 9016 spin_unlock_irqrestore(&task_group_lock, flags); 9017 } 9018 9019 + static void sched_change_group(struct task_struct *tsk) 9020 { 9021 struct task_group *tg; 9022 ··· 9028 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 9029 struct task_group, css); 9030 tg = autogroup_task_group(tsk, tg); 9031 + tsk->sched_task_group = tg; 9032 9033 #ifdef CONFIG_FAIR_GROUP_SCHED 9034 if (tsk->sched_class->task_change_group) ··· 9055 { 9056 int queued, running, queue_flags = 9057 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 9058 struct rq *rq; 9059 9060 CLASS(task_rq_lock, rq_guard)(tsk); 9061 rq = rq_guard.rq; 9062 9063 update_rq_clock(rq); 9064 ··· 9079 if (running) 9080 put_prev_task(rq, tsk); 9081 9082 + sched_change_group(tsk); 9083 if (!for_autogroup) 9084 scx_cgroup_move_task(tsk); 9085