[S390] cpu topology: fix locking

cpu_coregroup_map used to grab a mutex on s390 since it was only
called from process context.
Since c7c22e4d5c1fdebfac4dba76de7d0338c2b0d832 "block: add support
for IO CPU affinity" this is not true anymore.
It now also gets called from softirq context.

To prevent possible deadlocks change this in architecture code and
use a spinlock instead of a mutex.

Cc: stable@kernel.org
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky 74af2831 85acc407

+7 -4
+7 -4
arch/s390/kernel/topology.c
··· 65 static struct timer_list topology_timer; 66 static void set_topology_timer(void); 67 static DECLARE_WORK(topology_work, topology_work_fn); 68 69 cpumask_t cpu_core_map[NR_CPUS]; 70 71 cpumask_t cpu_coregroup_map(unsigned int cpu) 72 { 73 struct core_info *core = &core_info; 74 cpumask_t mask; 75 76 cpus_clear(mask); 77 if (!machine_has_topology) 78 return cpu_present_map; 79 - mutex_lock(&smp_cpu_state_mutex); 80 while (core) { 81 if (cpu_isset(cpu, core->mask)) { 82 mask = core->mask; ··· 87 } 88 core = core->next; 89 } 90 - mutex_unlock(&smp_cpu_state_mutex); 91 if (cpus_empty(mask)) 92 mask = cpumask_of_cpu(cpu); 93 return mask; ··· 136 union tl_entry *tle, *end; 137 struct core_info *core = &core_info; 138 139 - mutex_lock(&smp_cpu_state_mutex); 140 clear_cores(); 141 tle = info->tle; 142 end = (union tl_entry *)((unsigned long)info + info->length); ··· 160 } 161 tle = next_tle(tle); 162 } 163 - mutex_unlock(&smp_cpu_state_mutex); 164 } 165 166 static void topology_update_polarization_simple(void)
··· 65 static struct timer_list topology_timer; 66 static void set_topology_timer(void); 67 static DECLARE_WORK(topology_work, topology_work_fn); 68 + /* topology_lock protects the core linked list */ 69 + static DEFINE_SPINLOCK(topology_lock); 70 71 cpumask_t cpu_core_map[NR_CPUS]; 72 73 cpumask_t cpu_coregroup_map(unsigned int cpu) 74 { 75 struct core_info *core = &core_info; 76 + unsigned long flags; 77 cpumask_t mask; 78 79 cpus_clear(mask); 80 if (!machine_has_topology) 81 return cpu_present_map; 82 + spin_lock_irqsave(&topology_lock, flags); 83 while (core) { 84 if (cpu_isset(cpu, core->mask)) { 85 mask = core->mask; ··· 84 } 85 core = core->next; 86 } 87 + spin_unlock_irqrestore(&topology_lock, flags); 88 if (cpus_empty(mask)) 89 mask = cpumask_of_cpu(cpu); 90 return mask; ··· 133 union tl_entry *tle, *end; 134 struct core_info *core = &core_info; 135 136 + spin_lock_irq(&topology_lock); 137 clear_cores(); 138 tle = info->tle; 139 end = (union tl_entry *)((unsigned long)info + info->length); ··· 157 } 158 tle = next_tle(tle); 159 } 160 + spin_unlock_irq(&topology_lock); 161 } 162 163 static void topology_update_polarization_simple(void)