x86: cleanup remaining cpumask_t ops in smpboot code

Impact: use new cpumask API to reduce memory and stack usage

Allocate the following local cpumasks based on the number of cpus that
are present. References will use new cpumask API. (Currently only
modified for x86_64, x86_32 continues to use the *_map variants.)

cpu_callin_mask
cpu_callout_mask
cpu_initialized_mask
cpu_sibling_setup_mask

Provide the following accessor functions:

struct cpumask *cpu_sibling_mask(int cpu)
struct cpumask *cpu_core_mask(int cpu)

Other changes are when setting or clearing the cpu online, possible
or present maps, use the accessor functions.

Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Mike Travis and committed by
Ingo Molnar
c2d1cec1 588235bb

+152 -76
+29 -3
arch/x86/include/asm/smp.h
··· 18 #include <asm/pda.h> 19 #include <asm/thread_info.h> 20 21 extern cpumask_t cpu_callout_map; 22 extern cpumask_t cpu_initialized; 23 - extern cpumask_t cpu_callin_map; 24 25 extern void (*mtrr_hook)(void); 26 extern void zap_low_mappings(void); ··· 46 47 extern int smp_num_siblings; 48 extern unsigned int num_processors; 49 - extern cpumask_t cpu_initialized; 50 51 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 52 DECLARE_PER_CPU(cpumask_t, cpu_core_map); ··· 53 #ifdef CONFIG_X86_32 54 DECLARE_PER_CPU(int, cpu_number); 55 #endif 56 57 DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 58 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); ··· 175 /* We don't mark CPUs online until __cpu_up(), so we need another measure */ 176 static inline int num_booting_cpus(void) 177 { 178 - return cpus_weight(cpu_callout_map); 179 } 180 #else 181 static inline void prefill_possible_map(void)
··· 18 #include <asm/pda.h> 19 #include <asm/thread_info.h> 20 21 + #ifdef CONFIG_X86_64 22 + 23 + extern cpumask_var_t cpu_callin_mask; 24 + extern cpumask_var_t cpu_callout_mask; 25 + extern cpumask_var_t cpu_initialized_mask; 26 + extern cpumask_var_t cpu_sibling_setup_mask; 27 + 28 + #else /* CONFIG_X86_32 */ 29 + 30 + extern cpumask_t cpu_callin_map; 31 extern cpumask_t cpu_callout_map; 32 extern cpumask_t cpu_initialized; 33 + extern cpumask_t cpu_sibling_setup_map; 34 + 35 + #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) 36 + #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) 37 + #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) 38 + #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) 39 + 40 + #endif /* CONFIG_X86_32 */ 41 42 extern void (*mtrr_hook)(void); 43 extern void zap_low_mappings(void); ··· 29 30 extern int smp_num_siblings; 31 extern unsigned int num_processors; 32 33 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34 DECLARE_PER_CPU(cpumask_t, cpu_core_map); ··· 37 #ifdef CONFIG_X86_32 38 DECLARE_PER_CPU(int, cpu_number); 39 #endif 40 + 41 + static inline struct cpumask *cpu_sibling_mask(int cpu) 42 + { 43 + return &per_cpu(cpu_sibling_map, cpu); 44 + } 45 + 46 + static inline struct cpumask *cpu_core_mask(int cpu) 47 + { 48 + return &per_cpu(cpu_core_map, cpu); 49 + } 50 51 DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 52 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); ··· 149 /* We don't mark CPUs online until __cpu_up(), so we need another measure */ 150 static inline int num_booting_cpus(void) 151 { 152 + return cpumask_weight(cpu_callout_mask); 153 } 154 #else 155 static inline void prefill_possible_map(void)
+22 -4
arch/x86/kernel/cpu/common.c
··· 40 41 #include "cpu.h" 42 43 static struct cpu_dev *this_cpu __cpuinitdata; 44 45 #ifdef CONFIG_X86_64 ··· 876 } 877 __setup("clearcpuid=", setup_disablecpuid); 878 879 - cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 880 - 881 #ifdef CONFIG_X86_64 882 struct x8664_pda **_cpu_pda __read_mostly; 883 EXPORT_SYMBOL(_cpu_pda); ··· 994 995 me = current; 996 997 - if (cpu_test_and_set(cpu, cpu_initialized)) 998 panic("CPU#%d already initialized!\n", cpu); 999 1000 printk(KERN_INFO "Initializing CPU#%d\n", cpu); ··· 1103 struct tss_struct *t = &per_cpu(init_tss, cpu); 1104 struct thread_struct *thread = &curr->thread; 1105 1106 - if (cpu_test_and_set(cpu, cpu_initialized)) { 1107 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1108 for (;;) local_irq_enable(); 1109 }
··· 40 41 #include "cpu.h" 42 43 + #ifdef CONFIG_X86_64 44 + 45 + /* all of these masks are initialized in setup_cpu_local_masks() */ 46 + cpumask_var_t cpu_callin_mask; 47 + cpumask_var_t cpu_callout_mask; 48 + cpumask_var_t cpu_initialized_mask; 49 + 50 + /* representing cpus for which sibling maps can be computed */ 51 + cpumask_var_t cpu_sibling_setup_mask; 52 + 53 + #else /* CONFIG_X86_32 */ 54 + 55 + cpumask_t cpu_callin_map; 56 + cpumask_t cpu_callout_map; 57 + cpumask_t cpu_initialized; 58 + cpumask_t cpu_sibling_setup_map; 59 + 60 + #endif /* CONFIG_X86_32 */ 61 + 62 + 63 static struct cpu_dev *this_cpu __cpuinitdata; 64 65 #ifdef CONFIG_X86_64 ··· 856 } 857 __setup("clearcpuid=", setup_disablecpuid); 858 859 #ifdef CONFIG_X86_64 860 struct x8664_pda **_cpu_pda __read_mostly; 861 EXPORT_SYMBOL(_cpu_pda); ··· 976 977 me = current; 978 979 + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 980 panic("CPU#%d already initialized!\n", cpu); 981 982 printk(KERN_INFO "Initializing CPU#%d\n", cpu); ··· 1085 struct tss_struct *t = &per_cpu(init_tss, cpu); 1086 struct thread_struct *thread = &curr->thread; 1087 1088 + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1089 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1090 for (;;) local_irq_enable(); 1091 }
+24 -1
arch/x86/kernel/setup_percpu.c
··· 131 /* point to new pointer table */ 132 _cpu_pda = new_cpu_pda; 133 } 134 - #endif 135 136 /* 137 * Great future plan: ··· 207 208 /* Setup node to cpumask map */ 209 setup_node_to_cpumask_map(); 210 } 211 212 #endif
··· 131 /* point to new pointer table */ 132 _cpu_pda = new_cpu_pda; 133 } 134 + 135 + #endif /* CONFIG_SMP && CONFIG_X86_64 */ 136 + 137 + #ifdef CONFIG_X86_64 138 + 139 + /* correctly size the local cpu masks */ 140 + static void setup_cpu_local_masks(void) 141 + { 142 + alloc_bootmem_cpumask_var(&cpu_initialized_mask); 143 + alloc_bootmem_cpumask_var(&cpu_callin_mask); 144 + alloc_bootmem_cpumask_var(&cpu_callout_mask); 145 + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 146 + } 147 + 148 + #else /* CONFIG_X86_32 */ 149 + 150 + static inline void setup_cpu_local_masks(void) 151 + { 152 + } 153 + 154 + #endif /* CONFIG_X86_32 */ 155 156 /* 157 * Great future plan: ··· 187 188 /* Setup node to cpumask map */ 189 setup_node_to_cpumask_map(); 190 + 191 + /* Setup cpu initialized, callin, callout masks */ 192 + setup_cpu_local_masks(); 193 } 194 195 #endif
+12 -5
arch/x86/kernel/smp.c
··· 128 129 void native_send_call_func_ipi(const struct cpumask *mask) 130 { 131 - cpumask_t allbutself; 132 133 - allbutself = cpu_online_map; 134 - cpu_clear(smp_processor_id(), allbutself); 135 136 - if (cpus_equal(*mask, allbutself) && 137 - cpus_equal(cpu_online_map, cpu_callout_map)) 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 139 else 140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 141 } 142 143 /*
··· 128 129 void native_send_call_func_ipi(const struct cpumask *mask) 130 { 131 + cpumask_var_t allbutself; 132 133 + if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { 134 + send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 135 + return; 136 + } 137 138 + cpumask_copy(allbutself, cpu_online_mask); 139 + cpumask_clear_cpu(smp_processor_id(), allbutself); 140 + 141 + if (cpumask_equal(mask, allbutself) && 142 + cpumask_equal(cpu_online_mask, cpu_callout_mask)) 143 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 144 else 145 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 146 + 147 + free_cpumask_var(allbutself); 148 } 149 150 /*
+65 -63
arch/x86/kernel/smpboot.c
··· 102 /* Last level cache ID of each logical CPU */ 103 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 104 105 - cpumask_t cpu_callin_map; 106 - cpumask_t cpu_callout_map; 107 - 108 /* representing HT siblings of each logical CPU */ 109 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 110 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); ··· 116 117 static atomic_t init_deasserted; 118 119 - 120 - /* representing cpus for which sibling maps can be computed */ 121 - static cpumask_t cpu_sibling_setup_map; 122 123 /* Set if we find a B stepping CPU */ 124 static int __cpuinitdata smp_b_stepping; ··· 134 static void map_cpu_to_node(int cpu, int node) 135 { 136 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); 137 - cpu_set(cpu, node_to_cpumask_map[node]); 138 cpu_to_node_map[cpu] = node; 139 } 140 ··· 145 146 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); 147 for (node = 0; node < MAX_NUMNODES; node++) 148 - cpu_clear(cpu, node_to_cpumask_map[node]); 149 cpu_to_node_map[cpu] = 0; 150 } 151 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ ··· 203 */ 204 phys_id = read_apic_id(); 205 cpuid = smp_processor_id(); 206 - if (cpu_isset(cpuid, cpu_callin_map)) { 207 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 208 phys_id, cpuid); 209 } ··· 225 /* 226 * Has the boot CPU finished it's STARTUP sequence? 227 */ 228 - if (cpu_isset(cpuid, cpu_callout_map)) 229 break; 230 cpu_relax(); 231 } ··· 268 /* 269 * Allow the master to continue. 270 */ 271 - cpu_set(cpuid, cpu_callin_map); 272 } 273 274 static int __cpuinitdata unsafe_smp; ··· 326 ipi_call_lock(); 327 lock_vector_lock(); 328 __setup_vector_irq(smp_processor_id()); 329 - cpu_set(smp_processor_id(), cpu_online_map); 330 unlock_vector_lock(); 331 ipi_call_unlock(); 332 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; ··· 432 int i; 433 struct cpuinfo_x86 *c = &cpu_data(cpu); 434 435 - cpu_set(cpu, cpu_sibling_setup_map); 436 437 if (smp_num_siblings > 1) { 438 - for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 439 - if (c->phys_proc_id == cpu_data(i).phys_proc_id && 440 - c->cpu_core_id == cpu_data(i).cpu_core_id) { 441 - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 442 - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 443 - cpu_set(i, per_cpu(cpu_core_map, cpu)); 444 - cpu_set(cpu, per_cpu(cpu_core_map, i)); 445 - cpu_set(i, c->llc_shared_map); 446 - cpu_set(cpu, cpu_data(i).llc_shared_map); 447 } 448 } 449 } else { 450 - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 451 } 452 453 - cpu_set(cpu, c->llc_shared_map); 454 455 if (current_cpu_data.x86_max_cores == 1) { 456 - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 457 c->booted_cores = 1; 458 return; 459 } 460 461 - for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 462 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 463 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 464 - cpu_set(i, c->llc_shared_map); 465 - cpu_set(cpu, cpu_data(i).llc_shared_map); 466 } 467 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 468 - cpu_set(i, per_cpu(cpu_core_map, cpu)); 469 - cpu_set(cpu, per_cpu(cpu_core_map, i)); 470 /* 471 * Does this new cpu bringup a new core? 472 */ 473 - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { 474 /* 475 * for each core in package, increment 476 * the booted_cores for this new cpu 477 */ 478 - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 479 c->booted_cores++; 480 /* 481 * increment the core count for all ··· 500 * And for power savings, we return cpu_core_map 501 */ 502 if (sched_mc_power_savings || sched_smt_power_savings) 503 - return &per_cpu(cpu_core_map, cpu); 504 else 505 return &c->llc_shared_map; 506 } ··· 519 */ 520 pr_debug("Before bogomips.\n"); 521 for_each_possible_cpu(cpu) 522 - if (cpu_isset(cpu, cpu_callout_map)) 523 bogosum += cpu_data(cpu).loops_per_jiffy; 524 printk(KERN_INFO 525 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", ··· 900 * allow APs to start initializing. 901 */ 902 pr_debug("Before Callout %d.\n", cpu); 903 - cpu_set(cpu, cpu_callout_map); 904 pr_debug("After Callout %d.\n", cpu); 905 906 /* 907 * Wait 5s total for a response 908 */ 909 for (timeout = 0; timeout < 50000; timeout++) { 910 - if (cpu_isset(cpu, cpu_callin_map)) 911 break; /* It has booted */ 912 udelay(100); 913 } 914 915 - if (cpu_isset(cpu, cpu_callin_map)) { 916 /* number CPUs logically, starting from 1 (BSP is 0) */ 917 pr_debug("OK.\n"); 918 printk(KERN_INFO "CPU%d: ", cpu); ··· 937 if (boot_error) { 938 /* Try to put things back the way they were before ... */ 939 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 940 - cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ 941 - cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 942 - cpu_clear(cpu, cpu_present_map); 943 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; 944 } 945 ··· 978 /* 979 * Already booted CPU? 980 */ 981 - if (cpu_isset(cpu, cpu_callin_map)) { 982 pr_debug("do_boot_cpu %d Already started\n", cpu); 983 return -ENOSYS; 984 } ··· 1033 */ 1034 static __init void disable_smp(void) 1035 { 1036 - cpu_present_map = cpumask_of_cpu(0); 1037 - cpu_possible_map = cpumask_of_cpu(0); 1038 smpboot_clear_io_apic_irqs(); 1039 1040 if (smp_found_config) ··· 1043 else 1044 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1045 map_cpu_to_logical_apicid(); 1046 - cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1047 - cpu_set(0, per_cpu(cpu_core_map, 0)); 1048 } 1049 1050 /* ··· 1066 nr = 0; 1067 for_each_present_cpu(cpu) { 1068 if (nr >= 8) 1069 - cpu_clear(cpu, cpu_present_map); 1070 nr++; 1071 } 1072 1073 nr = 0; 1074 for_each_possible_cpu(cpu) { 1075 if (nr >= 8) 1076 - cpu_clear(cpu, cpu_possible_map); 1077 nr++; 1078 } 1079 ··· 1169 preempt_disable(); 1170 smp_cpu_index_default(); 1171 current_cpu_data = boot_cpu_data; 1172 - cpu_callin_map = cpumask_of_cpu(0); 1173 mb(); 1174 /* 1175 * Setup boot CPU information ··· 1244 init_gdt(me); 1245 #endif 1246 switch_to_new_gdt(); 1247 - /* already set me in cpu_online_map in boot_cpu_init() */ 1248 - cpu_set(me, cpu_callout_map); 1249 per_cpu(cpu_state, me) = CPU_ONLINE; 1250 } 1251 ··· 1313 possible, max_t(int, possible - num_processors, 0)); 1314 1315 for (i = 0; i < possible; i++) 1316 - cpu_set(i, cpu_possible_map); 1317 1318 nr_cpu_ids = possible; 1319 } ··· 1325 int sibling; 1326 struct cpuinfo_x86 *c = &cpu_data(cpu); 1327 1328 - for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { 1329 - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1330 /*/ 1331 * last thread sibling in this cpu core going down 1332 */ 1333 - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1334 cpu_data(sibling).booted_cores--; 1335 } 1336 1337 - for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) 1338 - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1339 - cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1340 - cpus_clear(per_cpu(cpu_core_map, cpu)); 1341 c->phys_proc_id = 0; 1342 c->cpu_core_id = 0; 1343 - cpu_clear(cpu, cpu_sibling_setup_map); 1344 } 1345 1346 static void __ref remove_cpu_from_maps(int cpu) 1347 { 1348 - cpu_clear(cpu, cpu_online_map); 1349 - cpu_clear(cpu, cpu_callout_map); 1350 - cpu_clear(cpu, cpu_callin_map); 1351 /* was set by cpu_init() */ 1352 - cpu_clear(cpu, cpu_initialized); 1353 numa_remove_cpu(cpu); 1354 } 1355
··· 102 /* Last level cache ID of each logical CPU */ 103 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 104 105 /* representing HT siblings of each logical CPU */ 106 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 107 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); ··· 119 120 static atomic_t init_deasserted; 121 122 123 /* Set if we find a B stepping CPU */ 124 static int __cpuinitdata smp_b_stepping; ··· 140 static void map_cpu_to_node(int cpu, int node) 141 { 142 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); 143 + cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); 144 cpu_to_node_map[cpu] = node; 145 } 146 ··· 151 152 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); 153 for (node = 0; node < MAX_NUMNODES; node++) 154 + cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); 155 cpu_to_node_map[cpu] = 0; 156 } 157 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ ··· 209 */ 210 phys_id = read_apic_id(); 211 cpuid = smp_processor_id(); 212 + if (cpumask_test_cpu(cpuid, cpu_callin_mask)) { 213 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 214 phys_id, cpuid); 215 } ··· 231 /* 232 * Has the boot CPU finished it's STARTUP sequence? 233 */ 234 + if (cpumask_test_cpu(cpuid, cpu_callout_mask)) 235 break; 236 cpu_relax(); 237 } ··· 274 /* 275 * Allow the master to continue. 276 */ 277 + cpumask_set_cpu(cpuid, cpu_callin_mask); 278 } 279 280 static int __cpuinitdata unsafe_smp; ··· 332 ipi_call_lock(); 333 lock_vector_lock(); 334 __setup_vector_irq(smp_processor_id()); 335 + set_cpu_online(smp_processor_id(), true); 336 unlock_vector_lock(); 337 ipi_call_unlock(); 338 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; ··· 438 int i; 439 struct cpuinfo_x86 *c = &cpu_data(cpu); 440 441 + cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 442 443 if (smp_num_siblings > 1) { 444 + for_each_cpu(i, cpu_sibling_setup_mask) { 445 + struct cpuinfo_x86 *o = &cpu_data(i); 446 + 447 + if (c->phys_proc_id == o->phys_proc_id && 448 + c->cpu_core_id == o->cpu_core_id) { 449 + cpumask_set_cpu(i, cpu_sibling_mask(cpu)); 450 + cpumask_set_cpu(cpu, cpu_sibling_mask(i)); 451 + cpumask_set_cpu(i, cpu_core_mask(cpu)); 452 + cpumask_set_cpu(cpu, cpu_core_mask(i)); 453 + cpumask_set_cpu(i, &c->llc_shared_map); 454 + cpumask_set_cpu(cpu, &o->llc_shared_map); 455 } 456 } 457 } else { 458 + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 459 } 460 461 + cpumask_set_cpu(cpu, &c->llc_shared_map); 462 463 if (current_cpu_data.x86_max_cores == 1) { 464 + cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); 465 c->booted_cores = 1; 466 return; 467 } 468 469 + for_each_cpu(i, cpu_sibling_setup_mask) { 470 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 471 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 472 + cpumask_set_cpu(i, &c->llc_shared_map); 473 + cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); 474 } 475 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 476 + cpumask_set_cpu(i, cpu_core_mask(cpu)); 477 + cpumask_set_cpu(cpu, cpu_core_mask(i)); 478 /* 479 * Does this new cpu bringup a new core? 480 */ 481 + if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { 482 /* 483 * for each core in package, increment 484 * the booted_cores for this new cpu 485 */ 486 + if (cpumask_first(cpu_sibling_mask(i)) == i) 487 c->booted_cores++; 488 /* 489 * increment the core count for all ··· 504 * And for power savings, we return cpu_core_map 505 */ 506 if (sched_mc_power_savings || sched_smt_power_savings) 507 + return cpu_core_mask(cpu); 508 else 509 return &c->llc_shared_map; 510 } ··· 523 */ 524 pr_debug("Before bogomips.\n"); 525 for_each_possible_cpu(cpu) 526 + if (cpumask_test_cpu(cpu, cpu_callout_mask)) 527 bogosum += cpu_data(cpu).loops_per_jiffy; 528 printk(KERN_INFO 529 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", ··· 904 * allow APs to start initializing. 905 */ 906 pr_debug("Before Callout %d.\n", cpu); 907 + cpumask_set_cpu(cpu, cpu_callout_mask); 908 pr_debug("After Callout %d.\n", cpu); 909 910 /* 911 * Wait 5s total for a response 912 */ 913 for (timeout = 0; timeout < 50000; timeout++) { 914 + if (cpumask_test_cpu(cpu, cpu_callin_mask)) 915 break; /* It has booted */ 916 udelay(100); 917 } 918 919 + if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 920 /* number CPUs logically, starting from 1 (BSP is 0) */ 921 pr_debug("OK.\n"); 922 printk(KERN_INFO "CPU%d: ", cpu); ··· 941 if (boot_error) { 942 /* Try to put things back the way they were before ... */ 943 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 944 + 945 + /* was set by do_boot_cpu() */ 946 + cpumask_clear_cpu(cpu, cpu_callout_mask); 947 + 948 + /* was set by cpu_init() */ 949 + cpumask_clear_cpu(cpu, cpu_initialized_mask); 950 + 951 + set_cpu_present(cpu, false); 952 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; 953 } 954 ··· 977 /* 978 * Already booted CPU? 979 */ 980 + if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 981 pr_debug("do_boot_cpu %d Already started\n", cpu); 982 return -ENOSYS; 983 } ··· 1032 */ 1033 static __init void disable_smp(void) 1034 { 1035 + /* use the read/write pointers to the present and possible maps */ 1036 + cpumask_copy(&cpu_present_map, cpumask_of(0)); 1037 + cpumask_copy(&cpu_possible_map, cpumask_of(0)); 1038 smpboot_clear_io_apic_irqs(); 1039 1040 if (smp_found_config) ··· 1041 else 1042 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1043 map_cpu_to_logical_apicid(); 1044 + cpumask_set_cpu(0, cpu_sibling_mask(0)); 1045 + cpumask_set_cpu(0, cpu_core_mask(0)); 1046 } 1047 1048 /* ··· 1064 nr = 0; 1065 for_each_present_cpu(cpu) { 1066 if (nr >= 8) 1067 + set_cpu_present(cpu, false); 1068 nr++; 1069 } 1070 1071 nr = 0; 1072 for_each_possible_cpu(cpu) { 1073 if (nr >= 8) 1074 + set_cpu_possible(cpu, false); 1075 nr++; 1076 } 1077 ··· 1167 preempt_disable(); 1168 smp_cpu_index_default(); 1169 current_cpu_data = boot_cpu_data; 1170 + cpumask_copy(cpu_callin_mask, cpumask_of(0)); 1171 mb(); 1172 /* 1173 * Setup boot CPU information ··· 1242 init_gdt(me); 1243 #endif 1244 switch_to_new_gdt(); 1245 + /* already set me in cpu_online_mask in boot_cpu_init() */ 1246 + cpumask_set_cpu(me, cpu_callout_mask); 1247 per_cpu(cpu_state, me) = CPU_ONLINE; 1248 } 1249 ··· 1311 possible, max_t(int, possible - num_processors, 0)); 1312 1313 for (i = 0; i < possible; i++) 1314 + set_cpu_possible(i, true); 1315 1316 nr_cpu_ids = possible; 1317 } ··· 1323 int sibling; 1324 struct cpuinfo_x86 *c = &cpu_data(cpu); 1325 1326 + for_each_cpu(sibling, cpu_core_mask(cpu)) { 1327 + cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); 1328 /*/ 1329 * last thread sibling in this cpu core going down 1330 */ 1331 + if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) 1332 cpu_data(sibling).booted_cores--; 1333 } 1334 1335 + for_each_cpu(sibling, cpu_sibling_mask(cpu)) 1336 + cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); 1337 + cpumask_clear(cpu_sibling_mask(cpu)); 1338 + cpumask_clear(cpu_core_mask(cpu)); 1339 c->phys_proc_id = 0; 1340 c->cpu_core_id = 0; 1341 + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); 1342 } 1343 1344 static void __ref remove_cpu_from_maps(int cpu) 1345 { 1346 + set_cpu_online(cpu, false); 1347 + cpumask_clear_cpu(cpu, cpu_callout_mask); 1348 + cpumask_clear_cpu(cpu, cpu_callin_mask); 1349 /* was set by cpu_init() */ 1350 + cpumask_clear_cpu(cpu, cpu_initialized_mask); 1351 numa_remove_cpu(cpu); 1352 } 1353