Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/topology: Remove sched_domain_topology_level::flags

Support for overlapping domains added in commit e3589f6c81e4 ("sched:
Allow for overlapping sched_domain spans") also allowed forcefully
setting SD_OVERLAP for !NUMA domains via FORCE_SD_OVERLAP sched_feat().

Since NUMA domains had to be presumed overlapping to ensure correct
behavior, "sched_domain_topology_level::flags" was introduced. NUMA
domains added the SDTL_OVERLAP flag would ensure SD_OVERLAP was always
added during build_sched_domains() for these domains, even when
FORCE_SD_OVERLAP was off.

Condition for adding the SD_OVERLAP flag at the aforementioned commit
was as follows:

if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
sd->flags |= SD_OVERLAP;

The FORCE_SD_OVERLAP debug feature was removed in commit af85596c74de
("sched/topology: Remove FORCE_SD_OVERLAP") which left the NUMA domains
as the exclusive users of SDTL_OVERLAP, SD_OVERLAP, and SD_NUMA flags.

Get rid of SDTL_OVERLAP and SD_OVERLAP as they have become redundant
and instead rely on SD_NUMA to detect the only overlapping domain
currently supported. Since SDTL_OVERLAP was the only user of
"tl->flags", get rid of "sched_domain_topology_level::flags" too.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/ba4dbdf8-bc37-493d-b2e0-2efb00ea3e19@amd.com

authored by

K Prateek Nayak and committed by
Peter Zijlstra
1eec89a6 f79c9aa4

+13 -23
-8
include/linux/sched/sd_flags.h
··· 154 154 SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS) 155 155 156 156 /* 157 - * sched_groups of this level overlap 158 - * 159 - * SHARED_PARENT: Set for all NUMA levels above NODE. 160 - * NEEDS_GROUPS: Overlaps can only exist with more than one group. 161 - */ 162 - SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) 163 - 164 - /* 165 157 * Cross-node balancing 166 158 * 167 159 * SHARED_PARENT: Set for all NUMA levels above NODE.
-3
include/linux/sched/topology.h
··· 175 175 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 176 176 typedef int (*sched_domain_flags_f)(void); 177 177 178 - #define SDTL_OVERLAP 0x01 179 - 180 178 struct sd_data { 181 179 struct sched_domain *__percpu *sd; 182 180 struct sched_domain_shared *__percpu *sds; ··· 185 187 struct sched_domain_topology_level { 186 188 sched_domain_mask_f mask; 187 189 sched_domain_flags_f sd_flags; 188 - int flags; 189 190 int numa_level; 190 191 struct sd_data data; 191 192 char *name;
+3 -3
kernel/sched/fair.c
··· 9926 9926 min_capacity = ULONG_MAX; 9927 9927 max_capacity = 0; 9928 9928 9929 - if (child->flags & SD_OVERLAP) { 9929 + if (child->flags & SD_NUMA) { 9930 9930 /* 9931 - * SD_OVERLAP domains cannot assume that child groups 9931 + * SD_NUMA domains cannot assume that child groups 9932 9932 * span the current group. 9933 9933 */ 9934 9934 ··· 9941 9941 } 9942 9942 } else { 9943 9943 /* 9944 - * !SD_OVERLAP domains can assume that child groups 9944 + * !SD_NUMA domains can assume that child groups 9945 9945 * span the current group. 9946 9946 */ 9947 9947
+10 -9
kernel/sched/topology.c
··· 89 89 break; 90 90 } 91 91 92 - if (!(sd->flags & SD_OVERLAP) && 92 + if (!(sd->flags & SD_NUMA) && 93 93 cpumask_intersects(groupmask, sched_group_span(group))) { 94 94 printk(KERN_CONT "\n"); 95 95 printk(KERN_ERR "ERROR: repeated CPUs\n"); ··· 102 102 group->sgc->id, 103 103 cpumask_pr_args(sched_group_span(group))); 104 104 105 - if ((sd->flags & SD_OVERLAP) && 105 + if ((sd->flags & SD_NUMA) && 106 106 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 107 107 printk(KERN_CONT " mask=%*pbl", 108 108 cpumask_pr_args(group_balance_mask(group))); ··· 1344 1344 * "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu" 1345 1345 * which is shared by all the overlapping groups. 1346 1346 */ 1347 - WARN_ON_ONCE(sd->flags & SD_OVERLAP); 1347 + WARN_ON_ONCE(sd->flags & SD_NUMA); 1348 1348 1349 1349 sg = sd->groups; 1350 1350 if (cpu != sg->asym_prefer_cpu) { ··· 2016 2016 for (j = 1; j < nr_levels; i++, j++) { 2017 2017 tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA); 2018 2018 tl[i].numa_level = j; 2019 - tl[i].flags = SDTL_OVERLAP; 2020 2019 } 2021 2020 2022 2021 sched_domain_topology_saved = sched_domain_topology; ··· 2326 2327 2327 2328 if (sdd->sd) { 2328 2329 sd = *per_cpu_ptr(sdd->sd, j); 2329 - if (sd && (sd->flags & SD_OVERLAP)) 2330 + if (sd && (sd->flags & SD_NUMA)) 2330 2331 free_sched_groups(sd->groups, 0); 2331 2332 kfree(*per_cpu_ptr(sdd->sd, j)); 2332 2333 } ··· 2392 2393 id_seen = sched_domains_tmpmask2; 2393 2394 2394 2395 for_each_sd_topology(tl) { 2396 + int tl_common_flags = 0; 2397 + 2398 + if (tl->sd_flags) 2399 + tl_common_flags = (*tl->sd_flags)(); 2395 2400 2396 2401 /* NUMA levels are allowed to overlap */ 2397 - if (tl->flags & SDTL_OVERLAP) 2402 + if (tl_common_flags & SD_NUMA) 2398 2403 continue; 2399 2404 2400 2405 cpumask_clear(covered); ··· 2469 2466 2470 2467 if (tl == sched_domain_topology) 2471 2468 *per_cpu_ptr(d.sd, i) = sd; 2472 - if (tl->flags & SDTL_OVERLAP) 2473 - sd->flags |= SD_OVERLAP; 2474 2469 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2475 2470 break; 2476 2471 } ··· 2481 2480 for_each_cpu(i, cpu_map) { 2482 2481 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2483 2482 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2484 - if (sd->flags & SD_OVERLAP) { 2483 + if (sd->flags & SD_NUMA) { 2485 2484 if (build_overlap_sched_groups(sd, i)) 2486 2485 goto error; 2487 2486 } else {