+3
-2
kernel/sched/core.c
+3
-2
kernel/sched/core.c
···
1065
1065
* affecting a valid clamp bucket, the next time it's enqueued,
1066
1066
* it will already see the updated clamp bucket value.
1067
1067
*/
1068
-
if (!p->uclamp[clamp_id].active) {
1068
+
if (p->uclamp[clamp_id].active) {
1069
1069
uclamp_rq_dec_id(rq, p, clamp_id);
1070
1070
uclamp_rq_inc_id(rq, p, clamp_id);
1071
1071
}
···
6019
6019
struct rq *rq = cpu_rq(cpu);
6020
6020
unsigned long flags;
6021
6021
6022
+
__sched_fork(0, idle);
6023
+
6022
6024
raw_spin_lock_irqsave(&idle->pi_lock, flags);
6023
6025
raw_spin_lock(&rq->lock);
6024
6026
6025
-
__sched_fork(0, idle);
6026
6027
idle->state = TASK_RUNNING;
6027
6028
idle->se.exec_start = sched_clock();
6028
6029
idle->flags |= PF_IDLE;
+20
-9
kernel/sched/fair.c
+20
-9
kernel/sched/fair.c
···
7548
7548
update_rq_clock(rq);
7549
7549
7550
7550
/*
7551
+
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
7552
+
* that RT, DL and IRQ signals have been updated before updating CFS.
7553
+
*/
7554
+
curr_class = rq->curr->sched_class;
7555
+
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
7556
+
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
7557
+
update_irq_load_avg(rq, 0);
7558
+
7559
+
/* Don't need periodic decay once load/util_avg are null */
7560
+
if (others_have_blocked(rq))
7561
+
done = false;
7562
+
7563
+
/*
7551
7564
* Iterates the task_group tree in a bottom up fashion, see
7552
7565
* list_add_leaf_cfs_rq() for details.
7553
7566
*/
···
7586
7573
if (cfs_rq_has_blocked(cfs_rq))
7587
7574
done = false;
7588
7575
}
7589
-
7590
-
curr_class = rq->curr->sched_class;
7591
-
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
7592
-
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
7593
-
update_irq_load_avg(rq, 0);
7594
-
/* Don't need periodic decay once load/util_avg are null */
7595
-
if (others_have_blocked(rq))
7596
-
done = false;
7597
7576
7598
7577
update_blocked_load_status(rq, !done);
7599
7578
rq_unlock_irqrestore(rq, &rf);
···
7647
7642
7648
7643
rq_lock_irqsave(rq, &rf);
7649
7644
update_rq_clock(rq);
7650
-
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
7651
7645
7646
+
/*
7647
+
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
7648
+
* that RT, DL and IRQ signals have been updated before updating CFS.
7649
+
*/
7652
7650
curr_class = rq->curr->sched_class;
7653
7651
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
7654
7652
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
7655
7653
update_irq_load_avg(rq, 0);
7654
+
7655
+
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
7656
+
7656
7657
update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
7657
7658
rq_unlock_irqrestore(rq, &rf);
7658
7659
}