Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/cpumask: Dynamically allocate cpu_sibling_map and cpu_core_map cpumasks

Dynamically allocate cpu_sibling_map and cpu_core_map cpumasks.

We don't need to set_cpu_online() the boot cpu in smp_prepare_boot_cpu,
init/main.c does it for us.

We also postpone setting of the boot cpu in cpu_sibling_map and cpu_core_map
until when the memory allocator is available (smp_prepare_cpus), similar
to x86.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Anton Blanchard and committed by
Benjamin Herrenschmidt
cc1ba8ea e6532c63

+40 -24
+13 -3
arch/powerpc/include/asm/smp.h
··· 68 68 } 69 69 #endif 70 70 71 - DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 72 - DECLARE_PER_CPU(cpumask_t, cpu_core_map); 71 + DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); 72 + DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); 73 + 74 + static inline struct cpumask *cpu_sibling_mask(int cpu) 75 + { 76 + return per_cpu(cpu_sibling_map, cpu); 77 + } 78 + 79 + static inline struct cpumask *cpu_core_mask(int cpu) 80 + { 81 + return per_cpu(cpu_core_map, cpu); 82 + } 83 + 73 84 extern int cpu_to_core_id(int cpu); 74 85 75 86 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. ··· 104 93 void smp_init_cell(void); 105 94 void smp_init_celleb(void); 106 95 void smp_setup_cpu_maps(void); 107 - void smp_setup_cpu_sibling_map(void); 108 96 109 97 extern int __cpu_disable(void); 110 98 extern void __cpu_die(unsigned int cpu);
+2 -2
arch/powerpc/include/asm/topology.h
··· 112 112 #ifdef CONFIG_PPC64 113 113 #include <asm/smp.h> 114 114 115 - #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 116 - #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 115 + #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 116 + #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 117 117 #define topology_core_id(cpu) (cpu_to_core_id(cpu)) 118 118 #endif 119 119 #endif
+24 -18
arch/powerpc/kernel/smp.c
··· 59 59 60 60 struct thread_info *secondary_ti; 61 61 62 - DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 63 - DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 62 + DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 63 + DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 64 64 65 65 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 66 66 EXPORT_PER_CPU_SYMBOL(cpu_core_map); ··· 271 271 smp_store_cpu_info(boot_cpuid); 272 272 cpu_callin_map[boot_cpuid] = 1; 273 273 274 + for_each_possible_cpu(cpu) { 275 + zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 276 + GFP_KERNEL, cpu_to_node(cpu)); 277 + zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 278 + GFP_KERNEL, cpu_to_node(cpu)); 279 + } 280 + 281 + cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 282 + cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 283 + 274 284 if (smp_ops) 275 285 if (smp_ops->probe) 276 286 max_cpus = smp_ops->probe(); ··· 299 289 void __devinit smp_prepare_boot_cpu(void) 300 290 { 301 291 BUG_ON(smp_processor_id() != boot_cpuid); 302 - 303 - set_cpu_online(boot_cpuid, true); 304 - cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); 305 - cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); 306 292 #ifdef CONFIG_PPC64 307 293 paca[boot_cpuid].__current = current; 308 294 #endif ··· 531 525 for (i = 0; i < threads_per_core; i++) { 532 526 if (cpu_is_offline(base + i)) 533 527 continue; 534 - cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); 535 - cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); 528 + cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 529 + cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 536 530 537 531 /* cpu_core_map should be a superset of 538 532 * cpu_sibling_map even if we don't have cache 539 533 * information, so update the former here, too. 540 534 */ 541 - cpu_set(cpu, per_cpu(cpu_core_map, base +i)); 542 - cpu_set(base + i, per_cpu(cpu_core_map, cpu)); 535 + cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 536 + cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 543 537 } 544 538 l2_cache = cpu_to_l2cache(cpu); 545 539 for_each_online_cpu(i) { ··· 547 541 if (!np) 548 542 continue; 549 543 if (np == l2_cache) { 550 - cpu_set(cpu, per_cpu(cpu_core_map, i)); 551 - cpu_set(i, per_cpu(cpu_core_map, cpu)); 544 + cpumask_set_cpu(cpu, cpu_core_mask(i)); 545 + cpumask_set_cpu(i, cpu_core_mask(cpu)); 552 546 } 553 547 of_node_put(np); 554 548 } ··· 608 602 /* Update sibling maps */ 609 603 base = cpu_first_thread_in_core(cpu); 610 604 for (i = 0; i < threads_per_core; i++) { 611 - cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); 612 - cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); 613 - cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); 614 - cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); 605 + cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 606 + cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 607 + cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 608 + cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 615 609 } 616 610 617 611 l2_cache = cpu_to_l2cache(cpu); ··· 620 614 if (!np) 621 615 continue; 622 616 if (np == l2_cache) { 623 - cpu_clear(cpu, per_cpu(cpu_core_map, i)); 624 - cpu_clear(i, per_cpu(cpu_core_map, cpu)); 617 + cpumask_clear_cpu(cpu, cpu_core_mask(i)); 618 + cpumask_clear_cpu(i, cpu_core_mask(cpu)); 625 619 } 626 620 of_node_put(np); 627 621 }
+1 -1
arch/powerpc/platforms/cell/cbe_cpufreq.c
··· 118 118 policy->cur = cbe_freqs[cur_pmode].frequency; 119 119 120 120 #ifdef CONFIG_SMP 121 - cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 121 + cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); 122 122 #endif 123 123 124 124 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);