Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/rwlock: use the interlocked-access facility 1 instructions

Make use of the load-and-add, load-and-or and load-and-and instructions
to atomically update the read-write lock without a compare-and-swap loop.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+108 -2
+74 -2
arch/s390/include/asm/spinlock.h
··· 130 130 */ 131 131 #define arch_write_can_lock(x) ((x)->lock == 0) 132 132 133 - extern void _raw_read_lock_wait(arch_rwlock_t *lp); 134 - extern void _raw_write_lock_wait(arch_rwlock_t *lp); 135 133 extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 136 134 extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 137 135 ··· 149 151 return likely(old == 0 && 150 152 _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); 151 153 } 154 + 155 + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 156 + 157 + #define __RAW_OP_OR "lao" 158 + #define __RAW_OP_AND "lan" 159 + #define __RAW_OP_ADD "laa" 160 + 161 + #define __RAW_LOCK(ptr, op_val, op_string) \ 162 + ({ \ 163 + unsigned int old_val; \ 164 + \ 165 + typecheck(unsigned int *, ptr); \ 166 + asm volatile( \ 167 + op_string " %0,%2,%1\n" \ 168 + "bcr 14,0\n" \ 169 + : "=d" (old_val), "+Q" (*ptr) \ 170 + : "d" (op_val) \ 171 + : "cc", "memory"); \ 172 + old_val; \ 173 + }) 174 + 175 + #define __RAW_UNLOCK(ptr, op_val, op_string) \ 176 + ({ \ 177 + unsigned int old_val; \ 178 + \ 179 + typecheck(unsigned int *, ptr); \ 180 + asm volatile( \ 181 + "bcr 14,0\n" \ 182 + op_string " %0,%2,%1\n" \ 183 + : "=d" (old_val), "+Q" (*ptr) \ 184 + : "d" (op_val) \ 185 + : "cc", "memory"); \ 186 + old_val; \ 187 + }) 188 + 189 + extern void _raw_read_lock_wait(arch_rwlock_t *lp); 190 + extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); 191 + 192 + static inline void arch_read_lock(arch_rwlock_t *rw) 193 + { 194 + unsigned int old; 195 + 196 + old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); 197 + if ((int) old < 0) 198 + _raw_read_lock_wait(rw); 199 + } 200 + 201 + static inline void arch_read_unlock(arch_rwlock_t *rw) 202 + { 203 + __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); 204 + } 205 + 206 + static inline void arch_write_lock(arch_rwlock_t *rw) 207 + { 208 + unsigned int old; 209 + 210 + old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 211 + if (old != 0) 212 + _raw_write_lock_wait(rw, old); 213 + rw->owner = SPINLOCK_LOCKVAL; 214 + } 215 + 216 + static inline void arch_write_unlock(arch_rwlock_t *rw) 217 + { 218 + rw->owner = 0; 219 + __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); 220 + } 221 + 222 + #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 223 + 224 + extern void _raw_read_lock_wait(arch_rwlock_t *lp); 225 + extern void _raw_write_lock_wait(arch_rwlock_t *lp); 152 226 153 227 static inline void arch_read_lock(arch_rwlock_t *rw) 154 228 { ··· 256 186 : "d" (0) 257 187 : "cc", "memory"); 258 188 } 189 + 190 + #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 259 191 260 192 static inline int arch_read_trylock(arch_rwlock_t *rw) 261 193 {
+34
arch/s390/lib/spinlock.c
··· 114 114 unsigned int owner, old; 115 115 int count = spin_retry; 116 116 117 + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 118 + __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); 119 + #endif 117 120 owner = 0; 118 121 while (1) { 119 122 if (count-- <= 0) { ··· 150 147 } 151 148 EXPORT_SYMBOL(_raw_read_trylock_retry); 152 149 150 + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 151 + 152 + void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) 153 + { 154 + unsigned int owner, old; 155 + int count = spin_retry; 156 + 157 + owner = 0; 158 + while (1) { 159 + if (count-- <= 0) { 160 + if (owner && !smp_vcpu_scheduled(~owner)) 161 + smp_yield_cpu(~owner); 162 + count = spin_retry; 163 + } 164 + old = ACCESS_ONCE(rw->lock); 165 + owner = ACCESS_ONCE(rw->owner); 166 + smp_rmb(); 167 + if ((int) old >= 0) { 168 + prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 169 + old = prev; 170 + } 171 + if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 172 + break; 173 + } 174 + } 175 + EXPORT_SYMBOL(_raw_write_lock_wait); 176 + 177 + #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 178 + 153 179 void _raw_write_lock_wait(arch_rwlock_t *rw) 154 180 { 155 181 unsigned int owner, old, prev; ··· 204 172 } 205 173 } 206 174 EXPORT_SYMBOL(_raw_write_lock_wait); 175 + 176 + #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 207 177 208 178 int _raw_write_trylock_retry(arch_rwlock_t *rw) 209 179 {