Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: unify 32 and 64-bit node_to_cpumask_map

Impact: cleanup

We take the 64-bit code and use it on 32-bit as well. The new file
is called mm/numa.c.

In a minor cleanup, we use cpu_none_mask instead of declaring a local
cpu_mask_none.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

+83 -94
+11 -19
arch/x86/include/asm/topology.h
··· 44 44 45 45 #ifdef CONFIG_X86_32 46 46 47 - /* Mappings between node number and cpus on that node. */ 48 - extern cpumask_t node_to_cpumask_map[]; 49 - 50 47 /* Mappings between logical cpu number and node number */ 51 48 extern int cpu_to_node_map[]; 52 49 ··· 54 57 } 55 58 #define early_cpu_to_node(cpu) cpu_to_node(cpu) 56 59 57 - /* Returns a bitmask of CPUs on Node 'node'. */ 58 - static inline const struct cpumask *cpumask_of_node(int node) 59 - { 60 - return &node_to_cpumask_map[node]; 61 - } 62 - 63 - static inline void setup_node_to_cpumask_map(void) { } 64 - 65 60 #else /* CONFIG_X86_64 */ 66 - 67 - /* Mappings between node number and cpus on that node. */ 68 - extern cpumask_t *node_to_cpumask_map; 69 61 70 62 /* Mappings between logical cpu number and node number */ 71 63 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); ··· 66 80 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 67 81 extern int cpu_to_node(int cpu); 68 82 extern int early_cpu_to_node(int cpu); 69 - extern const cpumask_t *cpumask_of_node(int node); 70 83 71 84 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 72 85 ··· 81 96 return early_per_cpu(x86_cpu_to_node_map, cpu); 82 97 } 83 98 99 + #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 100 + 101 + #endif /* CONFIG_X86_64 */ 102 + 103 + /* Mappings between node number and cpus on that node. */ 104 + extern cpumask_t *node_to_cpumask_map; 105 + 106 + #ifdef CONFIG_DEBUG_PER_CPU_MAPS 107 + extern const cpumask_t *cpumask_of_node(int node); 108 + #else 84 109 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 85 110 static inline const cpumask_t *cpumask_of_node(int node) 86 111 { 87 112 return &node_to_cpumask_map[node]; 88 113 } 89 - 90 - #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 114 + #endif 91 115 92 116 extern void setup_node_to_cpumask_map(void); 93 - 94 - #endif /* CONFIG_X86_64 */ 95 117 96 118 /* 97 119 * Returns the number of the node containing Node 'node'. This
-5
arch/x86/kernel/smpboot.c
··· 115 115 atomic_t init_deasserted; 116 116 117 117 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 118 - 119 - /* which logical CPUs are on which nodes */ 120 - cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = 121 - { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; 122 - EXPORT_SYMBOL(node_to_cpumask_map); 123 118 /* which node each logical CPU is on */ 124 119 int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; 125 120 EXPORT_SYMBOL(cpu_to_node_map);
+1 -1
arch/x86/mm/Makefile
··· 14 14 mmiotrace-y := kmmio.o pf_in.o mmio-mod.o 15 15 obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 16 16 17 - obj-$(CONFIG_NUMA) += numa_$(BITS).o 17 + obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o 18 18 obj-$(CONFIG_K8_NUMA) += k8topology_64.o 19 19 obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 20 20
+71
arch/x86/mm/numa.c
··· 1 + /* Common code for 32 and 64-bit NUMA */ 2 + #include <linux/topology.h> 3 + #include <linux/module.h> 4 + #include <linux/bootmem.h> 5 + 6 + #ifdef CONFIG_DEBUG_PER_CPU_MAPS 7 + # define DBG(x...) printk(KERN_DEBUG x) 8 + #else 9 + # define DBG(x...) 10 + #endif 11 + 12 + /* 13 + * Which logical CPUs are on which nodes 14 + */ 15 + cpumask_t *node_to_cpumask_map; 16 + EXPORT_SYMBOL(node_to_cpumask_map); 17 + 18 + /* 19 + * Allocate node_to_cpumask_map based on number of available nodes 20 + * Requires node_possible_map to be valid. 21 + * 22 + * Note: node_to_cpumask() is not valid until after this is done. 23 + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 24 + */ 25 + void __init setup_node_to_cpumask_map(void) 26 + { 27 + unsigned int node, num = 0; 28 + cpumask_t *map; 29 + 30 + /* setup nr_node_ids if not done yet */ 31 + if (nr_node_ids == MAX_NUMNODES) { 32 + for_each_node_mask(node, node_possible_map) 33 + num = node; 34 + nr_node_ids = num + 1; 35 + } 36 + 37 + /* allocate the map */ 38 + map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 39 + DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); 40 + 41 + pr_debug("Node to cpumask map at %p for %d nodes\n", 42 + map, nr_node_ids); 43 + 44 + /* node_to_cpumask() will now work */ 45 + node_to_cpumask_map = map; 46 + } 47 + 48 + #ifdef CONFIG_DEBUG_PER_CPU_MAPS 49 + /* 50 + * Returns a pointer to the bitmask of CPUs on Node 'node'. 51 + */ 52 + const cpumask_t *cpumask_of_node(int node) 53 + { 54 + if (node_to_cpumask_map == NULL) { 55 + printk(KERN_WARNING 56 + "cpumask_of_node(%d): no node_to_cpumask_map!\n", 57 + node); 58 + dump_stack(); 59 + return cpu_online_mask; 60 + } 61 + if (node >= nr_node_ids) { 62 + printk(KERN_WARNING 63 + "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 64 + node, nr_node_ids); 65 + dump_stack(); 66 + return cpu_none_mask; 67 + } 68 + return &node_to_cpumask_map[node]; 69 + } 70 + EXPORT_SYMBOL(cpumask_of_node); 71 + #endif
-69
arch/x86/mm/numa_64.c
··· 20 20 #include <asm/acpi.h> 21 21 #include <asm/k8.h> 22 22 23 - #ifdef CONFIG_DEBUG_PER_CPU_MAPS 24 - # define DBG(x...) printk(KERN_DEBUG x) 25 - #else 26 - # define DBG(x...) 27 - #endif 28 - 29 23 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 30 24 EXPORT_SYMBOL(node_data); 31 25 ··· 41 47 */ 42 48 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 43 49 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 44 - 45 - /* 46 - * Which logical CPUs are on which nodes 47 - */ 48 - cpumask_t *node_to_cpumask_map; 49 - EXPORT_SYMBOL(node_to_cpumask_map); 50 50 51 51 /* 52 52 * Given a shift value, try to populate memnodemap[] ··· 649 661 #endif 650 662 651 663 652 - /* 653 - * Allocate node_to_cpumask_map based on number of available nodes 654 - * Requires node_possible_map to be valid. 655 - * 656 - * Note: node_to_cpumask() is not valid until after this is done. 657 - * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 658 - */ 659 - void __init setup_node_to_cpumask_map(void) 660 - { 661 - unsigned int node, num = 0; 662 - cpumask_t *map; 663 - 664 - /* setup nr_node_ids if not done yet */ 665 - if (nr_node_ids == MAX_NUMNODES) { 666 - for_each_node_mask(node, node_possible_map) 667 - num = node; 668 - nr_node_ids = num + 1; 669 - } 670 - 671 - /* allocate the map */ 672 - map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 673 - DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); 674 - 675 - pr_debug("Node to cpumask map at %p for %d nodes\n", 676 - map, nr_node_ids); 677 - 678 - /* node_to_cpumask() will now work */ 679 - node_to_cpumask_map = map; 680 - } 681 - 682 664 void __cpuinit numa_set_node(int cpu, int node) 683 665 { 684 666 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); ··· 756 798 } 757 799 return per_cpu(x86_cpu_to_node_map, cpu); 758 800 } 759 - 760 - 761 - /* empty cpumask */ 762 - static const cpumask_t cpu_mask_none; 763 - 764 - /* 765 - * Returns a pointer to the bitmask of CPUs on Node 'node'. 766 - */ 767 - const cpumask_t *cpumask_of_node(int node) 768 - { 769 - if (node_to_cpumask_map == NULL) { 770 - printk(KERN_WARNING 771 - "cpumask_of_node(%d): no node_to_cpumask_map!\n", 772 - node); 773 - dump_stack(); 774 - return (const cpumask_t *)&cpu_online_map; 775 - } 776 - if (node >= nr_node_ids) { 777 - printk(KERN_WARNING 778 - "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 779 - node, nr_node_ids); 780 - dump_stack(); 781 - return &cpu_mask_none; 782 - } 783 - return &node_to_cpumask_map[node]; 784 - } 785 - EXPORT_SYMBOL(cpumask_of_node); 786 801 787 802 /* 788 803 * --------- end of debug versions of the numa functions ---------