Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/fair: Remove SIS_PROP

SIS_UTIL seems to work well, lets remove the old thing.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20231020134337.GD33965@noisy.programming.kicks-ass.net

-59
-2
include/linux/sched/topology.h
··· 109 109 u64 max_newidle_lb_cost; 110 110 unsigned long last_decay_max_lb_cost; 111 111 112 - u64 avg_scan_cost; /* select_idle_sibling */ 113 - 114 112 #ifdef CONFIG_SCHEDSTATS 115 113 /* load_balance() stats */ 116 114 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
-5
kernel/sched/core.c
··· 3792 3792 if (rq->avg_idle > max) 3793 3793 rq->avg_idle = max; 3794 3794 3795 - rq->wake_stamp = jiffies; 3796 - rq->wake_avg_idle = rq->avg_idle / 2; 3797 - 3798 3795 rq->idle_stamp = 0; 3799 3796 } 3800 3797 #endif ··· 9950 9953 rq->online = 0; 9951 9954 rq->idle_stamp = 0; 9952 9955 rq->avg_idle = 2*sysctl_sched_migration_cost; 9953 - rq->wake_stamp = jiffies; 9954 - rq->wake_avg_idle = rq->avg_idle; 9955 9956 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 9956 9957 9957 9958 INIT_LIST_HEAD(&rq->cfs_tasks);
-48
kernel/sched/fair.c
··· 7209 7209 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); 7210 7210 int i, cpu, idle_cpu = -1, nr = INT_MAX; 7211 7211 struct sched_domain_shared *sd_share; 7212 - struct rq *this_rq = this_rq(); 7213 - int this = smp_processor_id(); 7214 - struct sched_domain *this_sd = NULL; 7215 - u64 time = 0; 7216 7212 7217 7213 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 7218 - 7219 - if (sched_feat(SIS_PROP) && !has_idle_core) { 7220 - u64 avg_cost, avg_idle, span_avg; 7221 - unsigned long now = jiffies; 7222 - 7223 - this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 7224 - if (!this_sd) 7225 - return -1; 7226 - 7227 - /* 7228 - * If we're busy, the assumption that the last idle period 7229 - * predicts the future is flawed; age away the remaining 7230 - * predicted idle time. 7231 - */ 7232 - if (unlikely(this_rq->wake_stamp < now)) { 7233 - while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { 7234 - this_rq->wake_stamp++; 7235 - this_rq->wake_avg_idle >>= 1; 7236 - } 7237 - } 7238 - 7239 - avg_idle = this_rq->wake_avg_idle; 7240 - avg_cost = this_sd->avg_scan_cost + 1; 7241 - 7242 - span_avg = sd->span_weight * avg_idle; 7243 - if (span_avg > 4*avg_cost) 7244 - nr = div_u64(span_avg, avg_cost); 7245 - else 7246 - nr = 4; 7247 - 7248 - time = cpu_clock(this); 7249 - } 7250 7214 7251 7215 if (sched_feat(SIS_UTIL)) { 7252 7216 sd_share = rcu_dereference(per_cpu(sd_llc_shared, target)); ··· 7264 7300 7265 7301 if (has_idle_core) 7266 7302 set_idle_cores(target, false); 7267 - 7268 - if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) { 7269 - time = cpu_clock(this) - time; 7270 - 7271 - /* 7272 - * Account for the scan cost of wakeups against the average 7273 - * idle time. 7274 - */ 7275 - this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time); 7276 - 7277 - update_avg(&this_sd->avg_scan_cost, time); 7278 - } 7279 7303 7280 7304 return idle_cpu; 7281 7305 }
-1
kernel/sched/features.h
··· 49 49 /* 50 50 * When doing wakeups, attempt to limit superfluous scans of the LLC domain. 51 51 */ 52 - SCHED_FEAT(SIS_PROP, false) 53 52 SCHED_FEAT(SIS_UTIL, true) 54 53 55 54 /*
-3
kernel/sched/sched.h
··· 1059 1059 u64 idle_stamp; 1060 1060 u64 avg_idle; 1061 1061 1062 - unsigned long wake_stamp; 1063 - u64 wake_avg_idle; 1064 - 1065 1062 /* This is used to determine avg_idle's max value */ 1066 1063 u64 max_idle_balance_cost; 1067 1064