+3
-18
kernel/sched/core.c
+3
-18
kernel/sched/core.c
···
9016
9016
spin_unlock_irqrestore(&task_group_lock, flags);
9017
9017
}
9018
9018
9019
-
static struct task_group *sched_get_task_group(struct task_struct *tsk)
9019
+
static void sched_change_group(struct task_struct *tsk)
9020
9020
{
9021
9021
struct task_group *tg;
9022
9022
···
9028
9028
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9029
9029
struct task_group, css);
9030
9030
tg = autogroup_task_group(tsk, tg);
9031
-
9032
-
return tg;
9033
-
}
9034
-
9035
-
static void sched_change_group(struct task_struct *tsk, struct task_group *group)
9036
-
{
9037
-
tsk->sched_task_group = group;
9031
+
tsk->sched_task_group = tg;
9038
9032
9039
9033
#ifdef CONFIG_FAIR_GROUP_SCHED
9040
9034
if (tsk->sched_class->task_change_group)
···
9049
9055
{
9050
9056
int queued, running, queue_flags =
9051
9057
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
9052
-
struct task_group *group;
9053
9058
struct rq *rq;
9054
9059
9055
9060
CLASS(task_rq_lock, rq_guard)(tsk);
9056
9061
rq = rq_guard.rq;
9057
-
9058
-
/*
9059
-
* Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
9060
-
* group changes.
9061
-
*/
9062
-
group = sched_get_task_group(tsk);
9063
-
if (group == tsk->sched_task_group)
9064
-
return;
9065
9062
9066
9063
update_rq_clock(rq);
9067
9064
···
9064
9079
if (running)
9065
9080
put_prev_task(rq, tsk);
9066
9081
9067
-
sched_change_group(tsk, group);
9082
+
sched_change_group(tsk);
9068
9083
if (!for_autogroup)
9069
9084
scx_cgroup_move_task(tsk);
9070
9085