x86, numa: Fix CONFIG_DEBUG_PER_CPU_MAPS without NUMA emulation

"x86, numa: Fake node-to-cpumask for NUMA emulation" broke the
build when CONFIG_DEBUG_PER_CPU_MAPS is set and CONFIG_NUMA_EMU
is not. This is because it is possible to map a cpu to multiple
nodes when NUMA emulation is used; the patch required a physical
node address table to find those nodes that was only available
when CONFIG_NUMA_EMU was enabled.

This extracts the common debug functionality to its own function
for CONFIG_DEBUG_PER_CPU_MAPS and uses it regardless of whether
CONFIG_NUMA_EMU is set or not.

NUMA emulation will now iterate over the set of possible nodes
for each cpu and call the new debug function whereas only the
cpu's node will be used without NUMA emulation enabled.

Reported-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: David Rientjes <rientjes@google.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <alpine.DEB.2.00.1012301053590.12995@chino.kir.corp.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by David Rientjes and committed by Ingo Molnar d906f0eb d50e8fc7

+41 -15
+41 -15
arch/x86/mm/numa_64.c
··· 833 #endif /* !CONFIG_NUMA_EMU */ 834 835 #else /* CONFIG_DEBUG_PER_CPU_MAPS */ 836 - 837 - /* 838 - * --------- debug versions of the numa functions --------- 839 - */ 840 - static void __cpuinit numa_set_cpumask(int cpu, int enable) 841 { 842 int node = early_cpu_to_node(cpu); 843 struct cpumask *mask; 844 char buf[64]; 845 int i; 846 847 for_each_online_node(i) { ··· 884 if (addr < physnodes[node].start || 885 addr >= physnodes[node].end) 886 continue; 887 - mask = node_to_cpumask_map[node]; 888 - if (mask == NULL) { 889 - pr_err("node_to_cpumask_map[%i] NULL\n", i); 890 - dump_stack(); 891 return; 892 - } 893 894 if (enable) 895 cpumask_set_cpu(cpu, mask); 896 else 897 cpumask_clear_cpu(cpu, mask); 898 - 899 - cpulist_scnprintf(buf, sizeof(buf), mask); 900 - printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 901 - enable ? "numa_add_cpu" : "numa_remove_cpu", 902 - cpu, node, buf); 903 } 904 } 905 906 void __cpuinit numa_add_cpu(int cpu) 907 {
··· 833 #endif /* !CONFIG_NUMA_EMU */ 834 835 #else /* CONFIG_DEBUG_PER_CPU_MAPS */ 836 + static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) 837 { 838 int node = early_cpu_to_node(cpu); 839 struct cpumask *mask; 840 char buf[64]; 841 + 842 + mask = node_to_cpumask_map[node]; 843 + if (!mask) { 844 + pr_err("node_to_cpumask_map[%i] NULL\n", node); 845 + dump_stack(); 846 + return NULL; 847 + } 848 + 849 + cpulist_scnprintf(buf, sizeof(buf), mask); 850 + printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 851 + enable ? "numa_add_cpu" : "numa_remove_cpu", 852 + cpu, node, buf); 853 + return mask; 854 + } 855 + 856 + /* 857 + * --------- debug versions of the numa functions --------- 858 + */ 859 + #ifndef CONFIG_NUMA_EMU 860 + static void __cpuinit numa_set_cpumask(int cpu, int enable) 861 + { 862 + struct cpumask *mask; 863 + 864 + mask = debug_cpumask_set_cpu(cpu, enable); 865 + if (!mask) 866 + return; 867 + 868 + if (enable) 869 + cpumask_set_cpu(cpu, mask); 870 + else 871 + cpumask_clear_cpu(cpu, mask); 872 + } 873 + #else 874 + static void __cpuinit numa_set_cpumask(int cpu, int enable) 875 + { 876 + int node = early_cpu_to_node(cpu); 877 + struct cpumask *mask; 878 int i; 879 880 for_each_online_node(i) { ··· 851 if (addr < physnodes[node].start || 852 addr >= physnodes[node].end) 853 continue; 854 + mask = debug_cpumask_set_cpu(cpu, enable); 855 + if (!mask) 856 return; 857 858 if (enable) 859 cpumask_set_cpu(cpu, mask); 860 else 861 cpumask_clear_cpu(cpu, mask); 862 } 863 } 864 + #endif /* CONFIG_NUMA_EMU */ 865 866 void __cpuinit numa_add_cpu(int cpu) 867 {