+25
-19
kernel/sched/fair.c
+25
-19
kernel/sched/fair.c
···
3298
3298
3299
3299
#ifdef CONFIG_SMP
3300
3300
#ifdef CONFIG_FAIR_GROUP_SCHED
3301
+
3302
+
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
3303
+
{
3304
+
if (cfs_rq->load.weight)
3305
+
return false;
3306
+
3307
+
if (cfs_rq->avg.load_sum)
3308
+
return false;
3309
+
3310
+
if (cfs_rq->avg.util_sum)
3311
+
return false;
3312
+
3313
+
if (cfs_rq->avg.runnable_sum)
3314
+
return false;
3315
+
3316
+
return true;
3317
+
}
3318
+
3301
3319
/**
3302
3320
* update_tg_load_avg - update the tg's load avg
3303
3321
* @cfs_rq: the cfs_rq whose avg changed
···
4109
4091
4110
4092
#else /* CONFIG_SMP */
4111
4093
4094
+
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
4095
+
{
4096
+
return true;
4097
+
}
4098
+
4112
4099
#define UPDATE_TG 0x0
4113
4100
#define SKIP_AGE_LOAD 0x0
4114
4101
#define DO_ATTACH 0x0
···
4772
4749
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
4773
4750
cfs_rq->throttled_clock_task;
4774
4751
4775
-
/* Add cfs_rq with already running entity in the list */
4776
-
if (cfs_rq->nr_running >= 1)
4752
+
/* Add cfs_rq with load or one or more already running entities to the list */
4753
+
if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
4777
4754
list_add_leaf_cfs_rq(cfs_rq);
4778
4755
}
4779
4756
···
8018
7995
}
8019
7996
8020
7997
#ifdef CONFIG_FAIR_GROUP_SCHED
8021
-
8022
-
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
8023
-
{
8024
-
if (cfs_rq->load.weight)
8025
-
return false;
8026
-
8027
-
if (cfs_rq->avg.load_sum)
8028
-
return false;
8029
-
8030
-
if (cfs_rq->avg.util_sum)
8031
-
return false;
8032
-
8033
-
if (cfs_rq->avg.runnable_sum)
8034
-
return false;
8035
-
8036
-
return true;
8037
-
}
8038
7998
8039
7999
static bool __update_blocked_fair(struct rq *rq, bool *done)
8040
8000
{