[S390] smp: __smp_call_function_map vs cpu_online_map fix.

Both smp_call_function() and __smp_call_function_map() access
cpu_online_map. Both functions run with preemption disabled which
protects for cpus going offline. However new cpus can be added and
therefore the cpu_online_map can change unexpectedly.
So use the call_lock to protect against changes to the cpu_online_map
in start_secondary() and all smp_call_* functions.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky 85cb185d f455adcf

+8 -8
+8 -8
arch/s390/kernel/smp.c
··· 139 if (wait) 140 data.finished = CPU_MASK_NONE; 141 142 - spin_lock(&call_lock); 143 call_data = &data; 144 145 for_each_cpu_mask(cpu, map) ··· 150 if (wait) 151 while (!cpus_equal(map, data.finished)) 152 cpu_relax(); 153 - spin_unlock(&call_lock); 154 out: 155 if (local) { 156 local_irq_disable(); ··· 175 { 176 cpumask_t map; 177 178 - preempt_disable(); 179 map = cpu_online_map; 180 cpu_clear(smp_processor_id(), map); 181 __smp_call_function_map(func, info, nonatomic, wait, map); 182 - preempt_enable(); 183 return 0; 184 } 185 EXPORT_SYMBOL(smp_call_function); ··· 200 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 201 int nonatomic, int wait) 202 { 203 - preempt_disable(); 204 __smp_call_function_map(func, info, nonatomic, wait, 205 cpumask_of_cpu(cpu)); 206 - preempt_enable(); 207 return 0; 208 } 209 EXPORT_SYMBOL(smp_call_function_single); ··· 226 int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 227 int wait) 228 { 229 - preempt_disable(); 230 cpu_clear(smp_processor_id(), mask); 231 __smp_call_function_map(func, info, 0, wait, mask); 232 - preempt_enable(); 233 return 0; 234 } 235 EXPORT_SYMBOL(smp_call_function_mask); ··· 590 pfault_init(); 591 592 /* Mark this cpu as online */ 593 cpu_set(smp_processor_id(), cpu_online_map); 594 /* Switch on interrupts */ 595 local_irq_enable(); 596 /* Print info about this processor */
··· 139 if (wait) 140 data.finished = CPU_MASK_NONE; 141 142 call_data = &data; 143 144 for_each_cpu_mask(cpu, map) ··· 151 if (wait) 152 while (!cpus_equal(map, data.finished)) 153 cpu_relax(); 154 out: 155 if (local) { 156 local_irq_disable(); ··· 177 { 178 cpumask_t map; 179 180 + spin_lock(&call_lock); 181 map = cpu_online_map; 182 cpu_clear(smp_processor_id(), map); 183 __smp_call_function_map(func, info, nonatomic, wait, map); 184 + spin_unlock(&call_lock); 185 return 0; 186 } 187 EXPORT_SYMBOL(smp_call_function); ··· 202 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 203 int nonatomic, int wait) 204 { 205 + spin_lock(&call_lock); 206 __smp_call_function_map(func, info, nonatomic, wait, 207 cpumask_of_cpu(cpu)); 208 + spin_unlock(&call_lock); 209 return 0; 210 } 211 EXPORT_SYMBOL(smp_call_function_single); ··· 228 int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 229 int wait) 230 { 231 + spin_lock(&call_lock); 232 cpu_clear(smp_processor_id(), mask); 233 __smp_call_function_map(func, info, 0, wait, mask); 234 + spin_unlock(&call_lock); 235 return 0; 236 } 237 EXPORT_SYMBOL(smp_call_function_mask); ··· 592 pfault_init(); 593 594 /* Mark this cpu as online */ 595 + spin_lock(&call_lock); 596 cpu_set(smp_processor_id(), cpu_online_map); 597 + spin_unlock(&call_lock); 598 /* Switch on interrupts */ 599 local_irq_enable(); 600 /* Print info about this processor */