Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Convert cpu_sibling_map to be a per cpu variable

Convert cpu_sibling_map from a static array sized by NR_CPUS to a per_cpu
variable. This saves sizeof(cpumask_t) * NR unused cpus. Access is mostly
from startup and CPU HOTPLUG functions.

Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Travis and committed by
Linus Torvalds
d5a7430d 08357611

+107 -82
+8 -4
arch/ia64/kernel/setup.c
··· 528 528 529 529 #ifdef CONFIG_SMP 530 530 cpu_physical_id(0) = hard_smp_processor_id(); 531 - 532 - cpu_set(0, cpu_sibling_map[0]); 533 - cpu_set(0, cpu_core_map[0]); 534 - 535 531 check_for_logical_procs(); 536 532 if (smp_num_cpucores > 1) 537 533 printk(KERN_INFO ··· 869 873 void *cpu_data; 870 874 871 875 cpu_data = per_cpu_init(); 876 + /* 877 + * insert boot cpu into sibling and core mapes 878 + * (must be done after per_cpu area is setup) 879 + */ 880 + if (smp_processor_id() == 0) { 881 + cpu_set(0, per_cpu(cpu_sibling_map, 0)); 882 + cpu_set(0, cpu_core_map[0]); 883 + } 872 884 873 885 /* 874 886 * We set ar.k3 so that assembly code in MCA handler can compute
+10 -8
arch/ia64/kernel/smpboot.c
··· 138 138 EXPORT_SYMBOL(cpu_possible_map); 139 139 140 140 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 141 - cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 141 + DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 142 + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 143 + 142 144 int smp_num_siblings = 1; 143 145 int smp_num_cpucores = 1; 144 146 ··· 652 650 { 653 651 int i; 654 652 655 - for_each_cpu_mask(i, cpu_sibling_map[cpu]) 656 - cpu_clear(cpu, cpu_sibling_map[i]); 653 + for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 654 + cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 657 655 for_each_cpu_mask(i, cpu_core_map[cpu]) 658 656 cpu_clear(cpu, cpu_core_map[i]); 659 657 660 - cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE; 658 + per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; 661 659 } 662 660 663 661 static void ··· 668 666 if (cpu_data(cpu)->threads_per_core == 1 && 669 667 cpu_data(cpu)->cores_per_socket == 1) { 670 668 cpu_clear(cpu, cpu_core_map[cpu]); 671 - cpu_clear(cpu, cpu_sibling_map[cpu]); 669 + cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); 672 670 return; 673 671 } 674 672 ··· 809 807 cpu_set(i, cpu_core_map[cpu]); 810 808 cpu_set(cpu, cpu_core_map[i]); 811 809 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 812 - cpu_set(i, cpu_sibling_map[cpu]); 813 - cpu_set(cpu, cpu_sibling_map[i]); 810 + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 811 + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 814 812 } 815 813 } 816 814 } ··· 841 839 842 840 if (cpu_data(cpu)->threads_per_core == 1 && 843 841 cpu_data(cpu)->cores_per_socket == 1) { 844 - cpu_set(cpu, cpu_sibling_map[cpu]); 842 + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 845 843 cpu_set(cpu, cpu_core_map[cpu]); 846 844 return 0; 847 845 }
+16 -4
arch/powerpc/kernel/setup-common.c
··· 413 413 of_node_put(dn); 414 414 } 415 415 416 + vdso_data->processorCount = num_present_cpus(); 417 + #endif /* CONFIG_PPC64 */ 418 + } 419 + 420 + /* 421 + * Being that cpu_sibling_map is now a per_cpu array, then it cannot 422 + * be initialized until the per_cpu areas have been created. This 423 + * function is now called from setup_per_cpu_areas(). 424 + */ 425 + void __init smp_setup_cpu_sibling_map(void) 426 + { 427 + #if defined(CONFIG_PPC64) 428 + int cpu; 429 + 416 430 /* 417 431 * Do the sibling map; assume only two threads per processor. 418 432 */ 419 433 for_each_possible_cpu(cpu) { 420 - cpu_set(cpu, cpu_sibling_map[cpu]); 434 + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 421 435 if (cpu_has_feature(CPU_FTR_SMT)) 422 - cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 436 + cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu)); 423 437 } 424 - 425 - vdso_data->processorCount = num_present_cpus(); 426 438 #endif /* CONFIG_PPC64 */ 427 439 } 428 440 #endif /* CONFIG_SMP */
+3
arch/powerpc/kernel/setup_64.c
··· 597 597 paca[i].data_offset = ptr - __per_cpu_start; 598 598 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 599 599 } 600 + 601 + /* Now that per_cpu is setup, initialize cpu_sibling_map */ 602 + smp_setup_cpu_sibling_map(); 600 603 } 601 604 #endif 602 605
+2 -2
arch/powerpc/kernel/smp.c
··· 61 61 62 62 cpumask_t cpu_possible_map = CPU_MASK_NONE; 63 63 cpumask_t cpu_online_map = CPU_MASK_NONE; 64 - cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64 + DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 65 65 66 66 EXPORT_SYMBOL(cpu_online_map); 67 67 EXPORT_SYMBOL(cpu_possible_map); 68 - EXPORT_SYMBOL(cpu_sibling_map); 68 + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 69 69 70 70 /* SMP operations for this machine */ 71 71 struct smp_ops_t *smp_ops;
+1 -1
arch/powerpc/platforms/cell/cbe_cpufreq.c
··· 117 117 policy->cur = cbe_freqs[cur_pmode].frequency; 118 118 119 119 #ifdef CONFIG_SMP 120 - policy->cpus = cpu_sibling_map[policy->cpu]; 120 + policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 121 121 #endif 122 122 123 123 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
+8 -9
arch/sparc64/kernel/smp.c
··· 52 52 53 53 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; 54 54 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 55 - cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = 56 - { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 55 + DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 57 56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 58 57 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 59 58 60 59 EXPORT_SYMBOL(cpu_possible_map); 61 60 EXPORT_SYMBOL(cpu_online_map); 62 - EXPORT_SYMBOL(cpu_sibling_map); 61 + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 63 62 EXPORT_SYMBOL(cpu_core_map); 64 63 65 64 static cpumask_t smp_commenced_mask; ··· 1260 1261 for_each_present_cpu(i) { 1261 1262 unsigned int j; 1262 1263 1263 - cpus_clear(cpu_sibling_map[i]); 1264 + cpus_clear(per_cpu(cpu_sibling_map, i)); 1264 1265 if (cpu_data(i).proc_id == -1) { 1265 - cpu_set(i, cpu_sibling_map[i]); 1266 + cpu_set(i, per_cpu(cpu_sibling_map, i)); 1266 1267 continue; 1267 1268 } 1268 1269 1269 1270 for_each_present_cpu(j) { 1270 1271 if (cpu_data(i).proc_id == 1271 1272 cpu_data(j).proc_id) 1272 - cpu_set(j, cpu_sibling_map[i]); 1273 + cpu_set(j, per_cpu(cpu_sibling_map, i)); 1273 1274 } 1274 1275 } 1275 1276 } ··· 1341 1342 cpu_clear(cpu, cpu_core_map[i]); 1342 1343 cpus_clear(cpu_core_map[cpu]); 1343 1344 1344 - for_each_cpu_mask(i, cpu_sibling_map[cpu]) 1345 - cpu_clear(cpu, cpu_sibling_map[i]); 1346 - cpus_clear(cpu_sibling_map[cpu]); 1345 + for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 1346 + cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 1347 + cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1347 1348 1348 1349 c = &cpu_data(cpu); 1349 1350
+1 -1
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
··· 200 200 unsigned int i; 201 201 202 202 #ifdef CONFIG_SMP 203 - policy->cpus = cpu_sibling_map[policy->cpu]; 203 + policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 204 204 #endif 205 205 206 206 /* Errata workaround */
+1 -1
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
··· 322 322 323 323 /* only run on CPU to be set, or on its sibling */ 324 324 #ifdef CONFIG_SMP 325 - policy->cpus = cpu_sibling_map[policy->cpu]; 325 + policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 326 326 #endif 327 327 328 328 cpus_allowed = current->cpus_allowed;
+2 -2
arch/x86/kernel/io_apic_32.c
··· 378 378 379 379 #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) 380 380 381 - #define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) 381 + #define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i))) 382 382 383 383 static cpumask_t balance_irq_affinity[NR_IRQS] = { 384 384 [0 ... NR_IRQS-1] = CPU_MASK_ALL ··· 598 598 * (A+B)/2 vs B 599 599 */ 600 600 load = CPU_IRQ(min_loaded) >> 1; 601 - for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { 601 + for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) { 602 602 if (load > CPU_IRQ(j)) { 603 603 /* This won't change cpu_sibling_map[min_loaded] */ 604 604 load = CPU_IRQ(j);
+18 -18
arch/x86/kernel/smpboot_32.c
··· 70 70 int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 71 71 72 72 /* representing HT siblings of each logical CPU */ 73 - cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 74 - EXPORT_SYMBOL(cpu_sibling_map); 73 + DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 74 + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 75 75 76 76 /* representing HT and core siblings of each logical CPU */ 77 77 DEFINE_PER_CPU(cpumask_t, cpu_core_map); ··· 319 319 for_each_cpu_mask(i, cpu_sibling_setup_map) { 320 320 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 321 321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 322 - cpu_set(i, cpu_sibling_map[cpu]); 323 - cpu_set(cpu, cpu_sibling_map[i]); 322 + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 323 + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 324 324 cpu_set(i, per_cpu(cpu_core_map, cpu)); 325 325 cpu_set(cpu, per_cpu(cpu_core_map, i)); 326 326 cpu_set(i, c[cpu].llc_shared_map); ··· 328 328 } 329 329 } 330 330 } else { 331 - cpu_set(cpu, cpu_sibling_map[cpu]); 331 + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 332 332 } 333 333 334 334 cpu_set(cpu, c[cpu].llc_shared_map); 335 335 336 336 if (current_cpu_data.x86_max_cores == 1) { 337 - per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; 337 + per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 338 338 c[cpu].booted_cores = 1; 339 339 return; 340 340 } ··· 351 351 /* 352 352 * Does this new cpu bringup a new core? 353 353 */ 354 - if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 354 + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { 355 355 /* 356 356 * for each core in package, increment 357 357 * the booted_cores for this new cpu 358 358 */ 359 - if (first_cpu(cpu_sibling_map[i]) == i) 359 + if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 360 360 c[cpu].booted_cores++; 361 361 /* 362 362 * increment the core count for all ··· 983 983 printk(KERN_NOTICE "Local APIC not detected." 984 984 " Using dummy APIC emulation.\n"); 985 985 map_cpu_to_logical_apicid(); 986 - cpu_set(0, cpu_sibling_map[0]); 986 + cpu_set(0, per_cpu(cpu_sibling_map, 0)); 987 987 cpu_set(0, per_cpu(cpu_core_map, 0)); 988 988 return; 989 989 } ··· 1008 1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 1009 1009 smpboot_clear_io_apic_irqs(); 1010 1010 phys_cpu_present_map = physid_mask_of_physid(0); 1011 - cpu_set(0, cpu_sibling_map[0]); 1011 + cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1012 1012 cpu_set(0, per_cpu(cpu_core_map, 0)); 1013 1013 return; 1014 1014 } ··· 1023 1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1024 1024 smpboot_clear_io_apic_irqs(); 1025 1025 phys_cpu_present_map = physid_mask_of_physid(0); 1026 - cpu_set(0, cpu_sibling_map[0]); 1026 + cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1027 1027 cpu_set(0, per_cpu(cpu_core_map, 0)); 1028 1028 return; 1029 1029 } ··· 1102 1102 Dprintk("Boot done.\n"); 1103 1103 1104 1104 /* 1105 - * construct cpu_sibling_map[], so that we can tell sibling CPUs 1105 + * construct cpu_sibling_map, so that we can tell sibling CPUs 1106 1106 * efficiently. 1107 1107 */ 1108 1108 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1109 - cpus_clear(cpu_sibling_map[cpu]); 1109 + cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1110 1110 cpus_clear(per_cpu(cpu_core_map, cpu)); 1111 1111 } 1112 1112 1113 - cpu_set(0, cpu_sibling_map[0]); 1113 + cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1114 1114 cpu_set(0, per_cpu(cpu_core_map, 0)); 1115 1115 1116 1116 smpboot_setup_io_apic(); ··· 1153 1153 /*/ 1154 1154 * last thread sibling in this cpu core going down 1155 1155 */ 1156 - if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1156 + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1157 1157 c[sibling].booted_cores--; 1158 1158 } 1159 1159 1160 - for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1161 - cpu_clear(cpu, cpu_sibling_map[sibling]); 1162 - cpus_clear(cpu_sibling_map[cpu]); 1160 + for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1161 + cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1162 + cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1163 1163 cpus_clear(per_cpu(cpu_core_map, cpu)); 1164 1164 c[cpu].phys_proc_id = 0; 1165 1165 c[cpu].cpu_core_id = 0;
+13 -13
arch/x86/kernel/smpboot_64.c
··· 91 91 int smp_threads_ready; 92 92 93 93 /* representing HT siblings of each logical CPU */ 94 - cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 95 - EXPORT_SYMBOL(cpu_sibling_map); 94 + DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 95 + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 96 96 97 97 /* representing HT and core siblings of each logical CPU */ 98 98 DEFINE_PER_CPU(cpumask_t, cpu_core_map); ··· 262 262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 263 263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 264 264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 265 - cpu_set(i, cpu_sibling_map[cpu]); 266 - cpu_set(cpu, cpu_sibling_map[i]); 265 + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 266 + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 267 267 cpu_set(i, per_cpu(cpu_core_map, cpu)); 268 268 cpu_set(cpu, per_cpu(cpu_core_map, i)); 269 269 cpu_set(i, c[cpu].llc_shared_map); ··· 271 271 } 272 272 } 273 273 } else { 274 - cpu_set(cpu, cpu_sibling_map[cpu]); 274 + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 275 275 } 276 276 277 277 cpu_set(cpu, c[cpu].llc_shared_map); 278 278 279 279 if (current_cpu_data.x86_max_cores == 1) { 280 - per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; 280 + per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 281 281 c[cpu].booted_cores = 1; 282 282 return; 283 283 } ··· 294 294 /* 295 295 * Does this new cpu bringup a new core? 296 296 */ 297 - if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 297 + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { 298 298 /* 299 299 * for each core in package, increment 300 300 * the booted_cores for this new cpu 301 301 */ 302 - if (first_cpu(cpu_sibling_map[i]) == i) 302 + if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 303 303 c[cpu].booted_cores++; 304 304 /* 305 305 * increment the core count for all ··· 735 735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); 736 736 else 737 737 phys_cpu_present_map = physid_mask_of_physid(0); 738 - cpu_set(0, cpu_sibling_map[0]); 738 + cpu_set(0, per_cpu(cpu_sibling_map, 0)); 739 739 cpu_set(0, per_cpu(cpu_core_map, 0)); 740 740 } 741 741 ··· 976 976 /* 977 977 * last thread sibling in this cpu core going down 978 978 */ 979 - if (cpus_weight(cpu_sibling_map[cpu]) == 1) 979 + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 980 980 c[sibling].booted_cores--; 981 981 } 982 982 983 - for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 984 - cpu_clear(cpu, cpu_sibling_map[sibling]); 985 - cpus_clear(cpu_sibling_map[cpu]); 983 + for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 984 + cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 985 + cpus_clear(per_cpu(cpu_sibling_map, cpu)); 986 986 cpus_clear(per_cpu(cpu_core_map, cpu)); 987 987 c[cpu].phys_proc_id = 0; 988 988 c[cpu].cpu_core_id = 0;
+1 -1
arch/x86/oprofile/op_model_p4.c
··· 379 379 { 380 380 #ifdef CONFIG_SMP 381 381 int cpu = smp_processor_id(); 382 - return (cpu != first_cpu(cpu_sibling_map[cpu])); 382 + return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); 383 383 #endif 384 384 return 0; 385 385 }
+2 -2
arch/x86/xen/smp.c
··· 147 147 make_lowmem_page_readwrite(&per_cpu__gdt_page); 148 148 149 149 for (cpu = 0; cpu < NR_CPUS; cpu++) { 150 - cpus_clear(cpu_sibling_map[cpu]); 150 + cpus_clear(per_cpu(cpu_sibling_map, cpu)); 151 151 /* 152 152 * cpu_core_map lives in a per cpu area that is cleared 153 153 * when the per cpu array is allocated. ··· 164 164 unsigned cpu; 165 165 166 166 for (cpu = 0; cpu < NR_CPUS; cpu++) { 167 - cpus_clear(cpu_sibling_map[cpu]); 167 + cpus_clear(per_cpu(cpu_sibling_map, cpu)); 168 168 /* 169 169 * cpu_core_ map will be zeroed when the per 170 170 * cpu area is allocated.
+1 -1
block/blktrace.c
··· 550 550 for_each_online_cpu(cpu) { 551 551 unsigned long long *cpu_off, *sibling_off; 552 552 553 - for_each_cpu_mask(i, cpu_sibling_map[cpu]) { 553 + for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) { 554 554 if (i == cpu) 555 555 continue; 556 556
+1 -1
include/asm-ia64/smp.h
··· 58 58 59 59 extern cpumask_t cpu_online_map; 60 60 extern cpumask_t cpu_core_map[NR_CPUS]; 61 - extern cpumask_t cpu_sibling_map[NR_CPUS]; 61 + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 62 62 extern int smp_num_siblings; 63 63 extern int smp_num_cpucores; 64 64 extern void __iomem *ipi_base_addr;
+1 -1
include/asm-ia64/topology.h
··· 112 112 #define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) 113 113 #define topology_core_id(cpu) (cpu_data(cpu)->core_id) 114 114 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 115 - #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 115 + #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 116 116 #define smt_capable() (smp_num_siblings > 1) 117 117 #endif 118 118
+3 -1
include/asm-powerpc/smp.h
··· 26 26 #ifdef CONFIG_PPC64 27 27 #include <asm/paca.h> 28 28 #endif 29 + #include <asm/percpu.h> 29 30 30 31 extern int boot_cpuid; 31 32 ··· 59 58 (smp_hw_index[(cpu)] = (phys)) 60 59 #endif 61 60 62 - extern cpumask_t cpu_sibling_map[NR_CPUS]; 61 + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 63 62 64 63 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 65 64 * ··· 78 77 void smp_init_cell(void); 79 78 void smp_init_celleb(void); 80 79 void smp_setup_cpu_maps(void); 80 + void smp_setup_cpu_sibling_map(void); 81 81 82 82 extern int __cpu_disable(void); 83 83 extern void __cpu_die(unsigned int cpu);
+1 -1
include/asm-powerpc/topology.h
··· 108 108 #ifdef CONFIG_PPC64 109 109 #include <asm/smp.h> 110 110 111 - #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 111 + #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 112 112 #endif 113 113 #endif 114 114
+2 -1
include/asm-sparc64/smp.h
··· 28 28 29 29 #include <asm/bitops.h> 30 30 #include <asm/atomic.h> 31 + #include <asm/percpu.h> 31 32 32 - extern cpumask_t cpu_sibling_map[NR_CPUS]; 33 + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 33 34 extern cpumask_t cpu_core_map[NR_CPUS]; 34 35 extern int sparc64_multi_core; 35 36
+1 -1
include/asm-sparc64/topology.h
··· 5 5 #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 6 6 #define topology_core_id(cpu) (cpu_data(cpu).core_id) 7 7 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 8 - #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 8 + #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 9 9 #define mc_capable() (sparc64_multi_core) 10 10 #define smt_capable() (sparc64_multi_core) 11 11 #endif /* CONFIG_SMP */
+1 -1
include/asm-x86/smp_32.h
··· 30 30 extern void smp_alloc_memory(void); 31 31 extern int pic_mode; 32 32 extern int smp_num_siblings; 33 - extern cpumask_t cpu_sibling_map[]; 33 + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34 34 DECLARE_PER_CPU(cpumask_t, cpu_core_map); 35 35 36 36 extern void (*mtrr_hook) (void);
+4 -2
include/asm-x86/smp_64.h
··· 38 38 extern int smp_num_siblings; 39 39 extern void smp_send_reschedule(int cpu); 40 40 41 - extern cpumask_t cpu_sibling_map[NR_CPUS]; 42 41 /* 43 - * cpu_core_map lives in a per cpu area 42 + * cpu_sibling_map and cpu_core_map now live 43 + * in the per cpu area 44 44 * 45 + * extern cpumask_t cpu_sibling_map[NR_CPUS]; 45 46 * extern cpumask_t cpu_core_map[NR_CPUS]; 46 47 */ 48 + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 47 49 DECLARE_PER_CPU(cpumask_t, cpu_core_map); 48 50 extern u8 cpu_llc_id[NR_CPUS]; 49 51
+1 -1
include/asm-x86/topology_32.h
··· 31 31 #define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) 32 32 #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) 33 33 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 34 - #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 34 + #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 35 35 #endif 36 36 37 37 #ifdef CONFIG_NUMA
+1 -1
include/asm-x86/topology_64.h
··· 59 59 #define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) 60 60 #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) 61 61 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 62 - #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 62 + #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 63 63 #define mc_capable() (boot_cpu_data.x86_max_cores > 1) 64 64 #define smt_capable() (smp_num_siblings > 1) 65 65 #endif
+4 -4
kernel/sched.c
··· 5869 5869 struct sched_group **sg) 5870 5870 { 5871 5871 int group; 5872 - cpumask_t mask = cpu_sibling_map[cpu]; 5872 + cpumask_t mask = per_cpu(cpu_sibling_map, cpu); 5873 5873 cpus_and(mask, mask, *cpu_map); 5874 5874 group = first_cpu(mask); 5875 5875 if (sg) ··· 5898 5898 cpus_and(mask, mask, *cpu_map); 5899 5899 group = first_cpu(mask); 5900 5900 #elif defined(CONFIG_SCHED_SMT) 5901 - cpumask_t mask = cpu_sibling_map[cpu]; 5901 + cpumask_t mask = per_cpu(cpu_sibling_map, cpu); 5902 5902 cpus_and(mask, mask, *cpu_map); 5903 5903 group = first_cpu(mask); 5904 5904 #else ··· 6132 6132 p = sd; 6133 6133 sd = &per_cpu(cpu_domains, i); 6134 6134 *sd = SD_SIBLING_INIT; 6135 - sd->span = cpu_sibling_map[i]; 6135 + sd->span = per_cpu(cpu_sibling_map, i); 6136 6136 cpus_and(sd->span, sd->span, *cpu_map); 6137 6137 sd->parent = p; 6138 6138 p->child = sd; ··· 6143 6143 #ifdef CONFIG_SCHED_SMT 6144 6144 /* Set up CPU (sibling) groups */ 6145 6145 for_each_cpu_mask(i, *cpu_map) { 6146 - cpumask_t this_sibling_map = cpu_sibling_map[i]; 6146 + cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i); 6147 6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map); 6148 6148 if (i != first_cpu(this_sibling_map)) 6149 6149 continue;