Merge tag 'sched_urgent_for_v5.13_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Borislav Petkov:
"A single fix to restore fairness between control groups with equal
priority"

* tag 'sched_urgent_for_v5.13_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Correctly insert cfs_rq's to list on unthrottle

Changed files
+25 -19
kernel
sched
+25 -19
kernel/sched/fair.c
··· 3298 3299 #ifdef CONFIG_SMP 3300 #ifdef CONFIG_FAIR_GROUP_SCHED 3301 /** 3302 * update_tg_load_avg - update the tg's load avg 3303 * @cfs_rq: the cfs_rq whose avg changed ··· 4109 4110 #else /* CONFIG_SMP */ 4111 4112 #define UPDATE_TG 0x0 4113 #define SKIP_AGE_LOAD 0x0 4114 #define DO_ATTACH 0x0 ··· 4772 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4773 cfs_rq->throttled_clock_task; 4774 4775 - /* Add cfs_rq with already running entity in the list */ 4776 - if (cfs_rq->nr_running >= 1) 4777 list_add_leaf_cfs_rq(cfs_rq); 4778 } 4779 ··· 8018 } 8019 8020 #ifdef CONFIG_FAIR_GROUP_SCHED 8021 - 8022 - static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 8023 - { 8024 - if (cfs_rq->load.weight) 8025 - return false; 8026 - 8027 - if (cfs_rq->avg.load_sum) 8028 - return false; 8029 - 8030 - if (cfs_rq->avg.util_sum) 8031 - return false; 8032 - 8033 - if (cfs_rq->avg.runnable_sum) 8034 - return false; 8035 - 8036 - return true; 8037 - } 8038 8039 static bool __update_blocked_fair(struct rq *rq, bool *done) 8040 {
··· 3298 3299 #ifdef CONFIG_SMP 3300 #ifdef CONFIG_FAIR_GROUP_SCHED 3301 + 3302 + static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 3303 + { 3304 + if (cfs_rq->load.weight) 3305 + return false; 3306 + 3307 + if (cfs_rq->avg.load_sum) 3308 + return false; 3309 + 3310 + if (cfs_rq->avg.util_sum) 3311 + return false; 3312 + 3313 + if (cfs_rq->avg.runnable_sum) 3314 + return false; 3315 + 3316 + return true; 3317 + } 3318 + 3319 /** 3320 * update_tg_load_avg - update the tg's load avg 3321 * @cfs_rq: the cfs_rq whose avg changed ··· 4091 4092 #else /* CONFIG_SMP */ 4093 4094 + static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 4095 + { 4096 + return true; 4097 + } 4098 + 4099 #define UPDATE_TG 0x0 4100 #define SKIP_AGE_LOAD 0x0 4101 #define DO_ATTACH 0x0 ··· 4749 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4750 cfs_rq->throttled_clock_task; 4751 4752 + /* Add cfs_rq with load or one or more already running entities to the list */ 4753 + if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running) 4754 list_add_leaf_cfs_rq(cfs_rq); 4755 } 4756 ··· 7995 } 7996 7997 #ifdef CONFIG_FAIR_GROUP_SCHED 7998 7999 static bool __update_blocked_fair(struct rq *rq, bool *done) 8000 {