Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/spinlock: use atomic primitives for spinlocks

Add a couple more __atomic_xxx function to atomic_ops.h and use them
to replace the compare-and-swap inlines in the spinlock code. This
changes the type of the lock value from unsigned int to int.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+70 -76
+12 -10
arch/s390/include/asm/atomic_ops.h
··· 111 111 112 112 static inline int __atomic_cmpxchg(int *ptr, int old, int new) 113 113 { 114 - asm volatile( 115 - " cs %[old],%[new],%[ptr]" 116 - : [old] "+d" (old), [ptr] "+Q" (*ptr) 117 - : [new] "d" (new) : "cc", "memory"); 118 - return old; 114 + return __sync_val_compare_and_swap(ptr, old, new); 115 + } 116 + 117 + static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new) 118 + { 119 + return __sync_bool_compare_and_swap(ptr, old, new); 119 120 } 120 121 121 122 static inline long __atomic64_cmpxchg(long *ptr, long old, long new) 122 123 { 123 - asm volatile( 124 - " csg %[old],%[new],%[ptr]" 125 - : [old] "+d" (old), [ptr] "+Q" (*ptr) 126 - : [new] "d" (new) : "cc", "memory"); 127 - return old; 124 + return __sync_val_compare_and_swap(ptr, old, new); 125 + } 126 + 127 + static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new) 128 + { 129 + return __sync_bool_compare_and_swap(ptr, old, new); 128 130 } 129 131 130 132 #endif /* __ARCH_S390_ATOMIC_OPS__ */
+20 -25
arch/s390/include/asm/spinlock.h
··· 10 10 #define __ASM_SPINLOCK_H 11 11 12 12 #include <linux/smp.h> 13 + #include <asm/atomic_ops.h> 13 14 #include <asm/barrier.h> 14 15 #include <asm/processor.h> 15 16 16 17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 17 18 18 19 extern int spin_retry; 19 - 20 - static inline int 21 - _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 22 - { 23 - return __sync_bool_compare_and_swap(lock, old, new); 24 - } 25 20 26 21 #ifndef CONFIG_SMP 27 22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; } ··· 35 40 * (the type definitions are in asm/spinlock_types.h) 36 41 */ 37 42 38 - void arch_lock_relax(unsigned int cpu); 43 + void arch_lock_relax(int cpu); 39 44 40 45 void arch_spin_lock_wait(arch_spinlock_t *); 41 46 int arch_spin_trylock_retry(arch_spinlock_t *); ··· 65 70 { 66 71 barrier(); 67 72 return likely(arch_spin_value_unlocked(*lp) && 68 - _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); 73 + __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); 69 74 } 70 75 71 76 static inline void arch_spin_lock(arch_spinlock_t *lp) ··· 90 95 91 96 static inline void arch_spin_unlock(arch_spinlock_t *lp) 92 97 { 93 - typecheck(unsigned int, lp->lock); 98 + typecheck(int, lp->lock); 94 99 asm volatile( 95 100 "st %1,%0\n" 96 101 : "+Q" (lp->lock) ··· 136 141 137 142 static inline int arch_read_trylock_once(arch_rwlock_t *rw) 138 143 { 139 - unsigned int old = ACCESS_ONCE(rw->lock); 140 - return likely((int) old >= 0 && 141 - _raw_compare_and_swap(&rw->lock, old, old + 1)); 144 + int old = ACCESS_ONCE(rw->lock); 145 + return likely(old >= 0 && 146 + __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); 142 147 } 143 148 144 149 static inline int arch_write_trylock_once(arch_rwlock_t *rw) 145 150 { 146 - unsigned int old = ACCESS_ONCE(rw->lock); 151 + int old = ACCESS_ONCE(rw->lock); 147 152 return likely(old == 0 && 148 - _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); 153 + __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); 149 154 } 150 155 151 156 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES ··· 156 161 157 162 #define __RAW_LOCK(ptr, op_val, op_string) \ 158 163 ({ \ 159 - unsigned int old_val; \ 164 + int old_val; \ 160 165 \ 161 - typecheck(unsigned int *, ptr); \ 166 + typecheck(int *, ptr); \ 162 167 asm volatile( \ 163 168 op_string " %0,%2,%1\n" \ 164 169 "bcr 14,0\n" \ ··· 170 175 171 176 #define __RAW_UNLOCK(ptr, op_val, op_string) \ 172 177 ({ \ 173 - unsigned int old_val; \ 178 + int old_val; \ 174 179 \ 175 - typecheck(unsigned int *, ptr); \ 180 + typecheck(int *, ptr); \ 176 181 asm volatile( \ 177 182 op_string " %0,%2,%1\n" \ 178 183 : "=d" (old_val), "+Q" (*ptr) \ ··· 182 187 }) 183 188 184 189 extern void _raw_read_lock_wait(arch_rwlock_t *lp); 185 - extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); 190 + extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev); 186 191 187 192 static inline void arch_read_lock(arch_rwlock_t *rw) 188 193 { 189 - unsigned int old; 194 + int old; 190 195 191 196 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); 192 - if ((int) old < 0) 197 + if (old < 0) 193 198 _raw_read_lock_wait(rw); 194 199 } 195 200 ··· 200 205 201 206 static inline void arch_write_lock(arch_rwlock_t *rw) 202 207 { 203 - unsigned int old; 208 + int old; 204 209 205 210 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 206 211 if (old != 0) ··· 227 232 228 233 static inline void arch_read_unlock(arch_rwlock_t *rw) 229 234 { 230 - unsigned int old; 235 + int old; 231 236 232 237 do { 233 238 old = ACCESS_ONCE(rw->lock); 234 - } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); 239 + } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); 235 240 } 236 241 237 242 static inline void arch_write_lock(arch_rwlock_t *rw) ··· 243 248 244 249 static inline void arch_write_unlock(arch_rwlock_t *rw) 245 250 { 246 - typecheck(unsigned int, rw->lock); 251 + typecheck(int, rw->lock); 247 252 248 253 rw->owner = 0; 249 254 asm volatile(
+3 -3
arch/s390/include/asm/spinlock_types.h
··· 6 6 #endif 7 7 8 8 typedef struct { 9 - unsigned int lock; 9 + int lock; 10 10 } __attribute__ ((aligned (4))) arch_spinlock_t; 11 11 12 12 #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } 13 13 14 14 typedef struct { 15 - unsigned int lock; 16 - unsigned int owner; 15 + int lock; 16 + int owner; 17 17 } arch_rwlock_t; 18 18 19 19 #define __ARCH_RW_LOCK_UNLOCKED { 0 }
+35 -38
arch/s390/lib/spinlock.c
··· 32 32 } 33 33 __setup("spin_retry=", spin_retry_setup); 34 34 35 - static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) 35 + static inline void compare_and_delay(int *lock, int old) 36 36 { 37 37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); 38 38 } 39 39 40 40 void arch_spin_lock_wait(arch_spinlock_t *lp) 41 41 { 42 - unsigned int cpu = SPINLOCK_LOCKVAL; 43 - unsigned int owner; 44 - int count, first_diag; 42 + int cpu = SPINLOCK_LOCKVAL; 43 + int owner, count, first_diag; 45 44 46 45 first_diag = 1; 47 46 while (1) { 48 47 owner = ACCESS_ONCE(lp->lock); 49 48 /* Try to get the lock if it is free. */ 50 49 if (!owner) { 51 - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 50 + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) 52 51 return; 53 52 continue; 54 53 } ··· 61 62 count = spin_retry; 62 63 do { 63 64 if (MACHINE_HAS_CAD) 64 - _raw_compare_and_delay(&lp->lock, owner); 65 + compare_and_delay(&lp->lock, owner); 65 66 owner = ACCESS_ONCE(lp->lock); 66 67 } while (owner && count-- > 0); 67 68 if (!owner) ··· 81 82 82 83 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 83 84 { 84 - unsigned int cpu = SPINLOCK_LOCKVAL; 85 - unsigned int owner; 86 - int count, first_diag; 85 + int cpu = SPINLOCK_LOCKVAL; 86 + int owner, count, first_diag; 87 87 88 88 local_irq_restore(flags); 89 89 first_diag = 1; ··· 91 93 /* Try to get the lock if it is free. */ 92 94 if (!owner) { 93 95 local_irq_disable(); 94 - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 96 + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) 95 97 return; 96 98 local_irq_restore(flags); 97 99 continue; ··· 106 108 count = spin_retry; 107 109 do { 108 110 if (MACHINE_HAS_CAD) 109 - _raw_compare_and_delay(&lp->lock, owner); 111 + compare_and_delay(&lp->lock, owner); 110 112 owner = ACCESS_ONCE(lp->lock); 111 113 } while (owner && count-- > 0); 112 114 if (!owner) ··· 126 128 127 129 int arch_spin_trylock_retry(arch_spinlock_t *lp) 128 130 { 129 - unsigned int cpu = SPINLOCK_LOCKVAL; 130 - unsigned int owner; 131 - int count; 131 + int cpu = SPINLOCK_LOCKVAL; 132 + int owner, count; 132 133 133 134 for (count = spin_retry; count > 0; count--) { 134 135 owner = READ_ONCE(lp->lock); 135 136 /* Try to get the lock if it is free. */ 136 137 if (!owner) { 137 - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 138 + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) 138 139 return 1; 139 140 } else if (MACHINE_HAS_CAD) 140 - _raw_compare_and_delay(&lp->lock, owner); 141 + compare_and_delay(&lp->lock, owner); 141 142 } 142 143 return 0; 143 144 } ··· 144 147 145 148 void _raw_read_lock_wait(arch_rwlock_t *rw) 146 149 { 147 - unsigned int owner, old; 148 150 int count = spin_retry; 151 + int owner, old; 149 152 150 153 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 151 154 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); ··· 159 162 } 160 163 old = ACCESS_ONCE(rw->lock); 161 164 owner = ACCESS_ONCE(rw->owner); 162 - if ((int) old < 0) { 165 + if (old < 0) { 163 166 if (MACHINE_HAS_CAD) 164 - _raw_compare_and_delay(&rw->lock, old); 167 + compare_and_delay(&rw->lock, old); 165 168 continue; 166 169 } 167 - if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 170 + if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) 168 171 return; 169 172 } 170 173 } ··· 172 175 173 176 int _raw_read_trylock_retry(arch_rwlock_t *rw) 174 177 { 175 - unsigned int old; 176 178 int count = spin_retry; 179 + int old; 177 180 178 181 while (count-- > 0) { 179 182 old = ACCESS_ONCE(rw->lock); 180 - if ((int) old < 0) { 183 + if (old < 0) { 181 184 if (MACHINE_HAS_CAD) 182 - _raw_compare_and_delay(&rw->lock, old); 185 + compare_and_delay(&rw->lock, old); 183 186 continue; 184 187 } 185 - if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 188 + if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) 186 189 return 1; 187 190 } 188 191 return 0; ··· 191 194 192 195 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 193 196 194 - void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) 197 + void _raw_write_lock_wait(arch_rwlock_t *rw, int prev) 195 198 { 196 - unsigned int owner, old; 197 199 int count = spin_retry; 200 + int owner, old; 198 201 199 202 owner = 0; 200 203 while (1) { ··· 206 209 old = ACCESS_ONCE(rw->lock); 207 210 owner = ACCESS_ONCE(rw->owner); 208 211 smp_mb(); 209 - if ((int) old >= 0) { 212 + if (old >= 0) { 210 213 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 211 214 old = prev; 212 215 } 213 - if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 216 + if ((old & 0x7fffffff) == 0 && prev >= 0) 214 217 break; 215 218 if (MACHINE_HAS_CAD) 216 - _raw_compare_and_delay(&rw->lock, old); 219 + compare_and_delay(&rw->lock, old); 217 220 } 218 221 } 219 222 EXPORT_SYMBOL(_raw_write_lock_wait); ··· 222 225 223 226 void _raw_write_lock_wait(arch_rwlock_t *rw) 224 227 { 225 - unsigned int owner, old, prev; 226 228 int count = spin_retry; 229 + int owner, old, prev; 227 230 228 231 prev = 0x80000000; 229 232 owner = 0; ··· 235 238 } 236 239 old = ACCESS_ONCE(rw->lock); 237 240 owner = ACCESS_ONCE(rw->owner); 238 - if ((int) old >= 0 && 239 - _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) 241 + if (old >= 0 && 242 + __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000)) 240 243 prev = old; 241 244 else 242 245 smp_mb(); 243 - if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 246 + if ((old & 0x7fffffff) == 0 && prev >= 0) 244 247 break; 245 248 if (MACHINE_HAS_CAD) 246 - _raw_compare_and_delay(&rw->lock, old); 249 + compare_and_delay(&rw->lock, old); 247 250 } 248 251 } 249 252 EXPORT_SYMBOL(_raw_write_lock_wait); ··· 252 255 253 256 int _raw_write_trylock_retry(arch_rwlock_t *rw) 254 257 { 255 - unsigned int old; 256 258 int count = spin_retry; 259 + int old; 257 260 258 261 while (count-- > 0) { 259 262 old = ACCESS_ONCE(rw->lock); 260 263 if (old) { 261 264 if (MACHINE_HAS_CAD) 262 - _raw_compare_and_delay(&rw->lock, old); 265 + compare_and_delay(&rw->lock, old); 263 266 continue; 264 267 } 265 - if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 268 + if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)) 266 269 return 1; 267 270 } 268 271 return 0; 269 272 } 270 273 EXPORT_SYMBOL(_raw_write_trylock_retry); 271 274 272 - void arch_lock_relax(unsigned int cpu) 275 + void arch_lock_relax(int cpu) 273 276 { 274 277 if (!cpu) 275 278 return;