Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] sched: introduce child field in sched_domain

Introduce the child field in sched_domain struct and use it in
sched_balance_self().

We will also use this field in cleaning up the sched group cpu_power
setup(done in a different patch) code.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Siddha, Suresh B and committed by
Linus Torvalds
1a848870 74732646

+40 -10
+1
include/asm-i386/topology.h
··· 74 74 #define SD_NODE_INIT (struct sched_domain) { \ 75 75 .span = CPU_MASK_NONE, \ 76 76 .parent = NULL, \ 77 + .child = NULL, \ 77 78 .groups = NULL, \ 78 79 .min_interval = 8, \ 79 80 .max_interval = 32, \
+2
include/asm-ia64/topology.h
··· 59 59 #define SD_CPU_INIT (struct sched_domain) { \ 60 60 .span = CPU_MASK_NONE, \ 61 61 .parent = NULL, \ 62 + .child = NULL, \ 62 63 .groups = NULL, \ 63 64 .min_interval = 1, \ 64 65 .max_interval = 4, \ ··· 85 84 #define SD_NODE_INIT (struct sched_domain) { \ 86 85 .span = CPU_MASK_NONE, \ 87 86 .parent = NULL, \ 87 + .child = NULL, \ 88 88 .groups = NULL, \ 89 89 .min_interval = 8, \ 90 90 .max_interval = 8*(min(num_online_cpus(), 32)), \
+1
include/asm-mips/mach-ip27/topology.h
··· 22 22 #define SD_NODE_INIT (struct sched_domain) { \ 23 23 .span = CPU_MASK_NONE, \ 24 24 .parent = NULL, \ 25 + .child = NULL, \ 25 26 .groups = NULL, \ 26 27 .min_interval = 8, \ 27 28 .max_interval = 32, \
+1
include/asm-powerpc/topology.h
··· 43 43 #define SD_NODE_INIT (struct sched_domain) { \ 44 44 .span = CPU_MASK_NONE, \ 45 45 .parent = NULL, \ 46 + .child = NULL, \ 46 47 .groups = NULL, \ 47 48 .min_interval = 8, \ 48 49 .max_interval = 32, \
+1
include/asm-x86_64/topology.h
··· 31 31 #define SD_NODE_INIT (struct sched_domain) { \ 32 32 .span = CPU_MASK_NONE, \ 33 33 .parent = NULL, \ 34 + .child = NULL, \ 34 35 .groups = NULL, \ 35 36 .min_interval = 8, \ 36 37 .max_interval = 32, \
+1
include/linux/sched.h
··· 644 644 struct sched_domain { 645 645 /* These fields must be setup */ 646 646 struct sched_domain *parent; /* top domain must be null terminated */ 647 + struct sched_domain *child; /* bottom domain must be null terminated */ 647 648 struct sched_group *groups; /* the balancing groups of the domain */ 648 649 cpumask_t span; /* span of all CPUs in this domain */ 649 650 unsigned long min_interval; /* Minimum balance interval ms */
+3
include/linux/topology.h
··· 89 89 #define SD_SIBLING_INIT (struct sched_domain) { \ 90 90 .span = CPU_MASK_NONE, \ 91 91 .parent = NULL, \ 92 + .child = NULL, \ 92 93 .groups = NULL, \ 93 94 .min_interval = 1, \ 94 95 .max_interval = 2, \ ··· 120 119 #define SD_CPU_INIT (struct sched_domain) { \ 121 120 .span = CPU_MASK_NONE, \ 122 121 .parent = NULL, \ 122 + .child = NULL, \ 123 123 .groups = NULL, \ 124 124 .min_interval = 1, \ 125 125 .max_interval = 4, \ ··· 148 146 #define SD_ALLNODES_INIT (struct sched_domain) { \ 149 147 .span = CPU_MASK_NONE, \ 150 148 .parent = NULL, \ 149 + .child = NULL, \ 151 150 .groups = NULL, \ 152 151 .min_interval = 64, \ 153 152 .max_interval = 64*num_online_cpus(), \
+30 -10
kernel/sched.c
··· 1286 1286 while (sd) { 1287 1287 cpumask_t span; 1288 1288 struct sched_group *group; 1289 - int new_cpu; 1290 - int weight; 1289 + int new_cpu, weight; 1290 + 1291 + if (!(sd->flags & flag)) { 1292 + sd = sd->child; 1293 + continue; 1294 + } 1291 1295 1292 1296 span = sd->span; 1293 1297 group = find_idlest_group(sd, t, cpu); 1294 - if (!group) 1295 - goto nextlevel; 1298 + if (!group) { 1299 + sd = sd->child; 1300 + continue; 1301 + } 1296 1302 1297 1303 new_cpu = find_idlest_cpu(group, t, cpu); 1298 - if (new_cpu == -1 || new_cpu == cpu) 1299 - goto nextlevel; 1304 + if (new_cpu == -1 || new_cpu == cpu) { 1305 + /* Now try balancing at a lower domain level of cpu */ 1306 + sd = sd->child; 1307 + continue; 1308 + } 1300 1309 1301 - /* Now try balancing at a lower domain level */ 1310 + /* Now try balancing at a lower domain level of new_cpu */ 1302 1311 cpu = new_cpu; 1303 - nextlevel: 1304 1312 sd = NULL; 1305 1313 weight = cpus_weight(span); 1306 1314 for_each_domain(cpu, tmp) { ··· 5456 5448 struct sched_domain *parent = tmp->parent; 5457 5449 if (!parent) 5458 5450 break; 5459 - if (sd_parent_degenerate(tmp, parent)) 5451 + if (sd_parent_degenerate(tmp, parent)) { 5460 5452 tmp->parent = parent->parent; 5453 + if (parent->parent) 5454 + parent->parent->child = tmp; 5455 + } 5461 5456 } 5462 5457 5463 - if (sd && sd_degenerate(sd)) 5458 + if (sd && sd_degenerate(sd)) { 5464 5459 sd = sd->parent; 5460 + if (sd) 5461 + sd->child = NULL; 5462 + } 5465 5463 5466 5464 sched_domain_debug(sd, cpu); 5467 5465 ··· 6302 6288 *sd = SD_NODE_INIT; 6303 6289 sd->span = sched_domain_node_span(cpu_to_node(i)); 6304 6290 sd->parent = p; 6291 + if (p) 6292 + p->child = sd; 6305 6293 cpus_and(sd->span, sd->span, *cpu_map); 6306 6294 #endif 6307 6295 ··· 6313 6297 *sd = SD_CPU_INIT; 6314 6298 sd->span = nodemask; 6315 6299 sd->parent = p; 6300 + if (p) 6301 + p->child = sd; 6316 6302 sd->groups = &sched_group_phys[group]; 6317 6303 6318 6304 #ifdef CONFIG_SCHED_MC ··· 6325 6307 sd->span = cpu_coregroup_map(i); 6326 6308 cpus_and(sd->span, sd->span, *cpu_map); 6327 6309 sd->parent = p; 6310 + p->child = sd; 6328 6311 sd->groups = &sched_group_core[group]; 6329 6312 #endif 6330 6313 ··· 6337 6318 sd->span = cpu_sibling_map[i]; 6338 6319 cpus_and(sd->span, sd->span, *cpu_map); 6339 6320 sd->parent = p; 6321 + p->child = sd; 6340 6322 sd->groups = &sched_group_cpus[group]; 6341 6323 #endif 6342 6324 }