at v4.12 283 lines 6.3 kB view raw
1/* 2 * S390 version 3 * Copyright IBM Corp. 1999 4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * 6 * Derived from "include/asm-i386/spinlock.h" 7 */ 8 9#ifndef __ASM_SPINLOCK_H 10#define __ASM_SPINLOCK_H 11 12#include <linux/smp.h> 13#include <asm/atomic_ops.h> 14#include <asm/barrier.h> 15#include <asm/processor.h> 16 17#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 18 19extern int spin_retry; 20 21#ifndef CONFIG_SMP 22static inline bool arch_vcpu_is_preempted(int cpu) { return false; } 23#else 24bool arch_vcpu_is_preempted(int cpu); 25#endif 26 27#define vcpu_is_preempted arch_vcpu_is_preempted 28 29/* 30 * Simple spin lock operations. There are two variants, one clears IRQ's 31 * on the local processor, one does not. 32 * 33 * We make no fairness assumptions. They have a cost. 34 * 35 * (the type definitions are in asm/spinlock_types.h) 36 */ 37 38void arch_lock_relax(int cpu); 39 40void arch_spin_lock_wait(arch_spinlock_t *); 41int arch_spin_trylock_retry(arch_spinlock_t *); 42void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 43 44static inline void arch_spin_relax(arch_spinlock_t *lock) 45{ 46 arch_lock_relax(lock->lock); 47} 48 49static inline u32 arch_spin_lockval(int cpu) 50{ 51 return ~cpu; 52} 53 54static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 55{ 56 return lock.lock == 0; 57} 58 59static inline int arch_spin_is_locked(arch_spinlock_t *lp) 60{ 61 return READ_ONCE(lp->lock) != 0; 62} 63 64static inline int arch_spin_trylock_once(arch_spinlock_t *lp) 65{ 66 barrier(); 67 return likely(arch_spin_value_unlocked(*lp) && 68 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); 69} 70 71static inline void arch_spin_lock(arch_spinlock_t *lp) 72{ 73 if (!arch_spin_trylock_once(lp)) 74 arch_spin_lock_wait(lp); 75} 76 77static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 78 unsigned long flags) 79{ 80 if (!arch_spin_trylock_once(lp)) 81 arch_spin_lock_wait_flags(lp, flags); 82} 83 84static inline int arch_spin_trylock(arch_spinlock_t *lp) 85{ 86 if (!arch_spin_trylock_once(lp)) 87 return arch_spin_trylock_retry(lp); 88 return 1; 89} 90 91static inline void arch_spin_unlock(arch_spinlock_t *lp) 92{ 93 typecheck(int, lp->lock); 94 asm volatile( 95 "st %1,%0\n" 96 : "+Q" (lp->lock) 97 : "d" (0) 98 : "cc", "memory"); 99} 100 101static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 102{ 103 while (arch_spin_is_locked(lock)) 104 arch_spin_relax(lock); 105 smp_acquire__after_ctrl_dep(); 106} 107 108/* 109 * Read-write spinlocks, allowing multiple readers 110 * but only one writer. 111 * 112 * NOTE! it is quite common to have readers in interrupts 113 * but no interrupt writers. For those circumstances we 114 * can "mix" irq-safe locks - any writer needs to get a 115 * irq-safe write-lock, but readers can get non-irqsafe 116 * read-locks. 117 */ 118 119/** 120 * read_can_lock - would read_trylock() succeed? 121 * @lock: the rwlock in question. 122 */ 123#define arch_read_can_lock(x) ((int)(x)->lock >= 0) 124 125/** 126 * write_can_lock - would write_trylock() succeed? 127 * @lock: the rwlock in question. 128 */ 129#define arch_write_can_lock(x) ((x)->lock == 0) 130 131extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 132extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 133 134#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 135#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 136 137static inline int arch_read_trylock_once(arch_rwlock_t *rw) 138{ 139 int old = ACCESS_ONCE(rw->lock); 140 return likely(old >= 0 && 141 __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); 142} 143 144static inline int arch_write_trylock_once(arch_rwlock_t *rw) 145{ 146 int old = ACCESS_ONCE(rw->lock); 147 return likely(old == 0 && 148 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); 149} 150 151#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 152 153#define __RAW_OP_OR "lao" 154#define __RAW_OP_AND "lan" 155#define __RAW_OP_ADD "laa" 156 157#define __RAW_LOCK(ptr, op_val, op_string) \ 158({ \ 159 int old_val; \ 160 \ 161 typecheck(int *, ptr); \ 162 asm volatile( \ 163 op_string " %0,%2,%1\n" \ 164 "bcr 14,0\n" \ 165 : "=d" (old_val), "+Q" (*ptr) \ 166 : "d" (op_val) \ 167 : "cc", "memory"); \ 168 old_val; \ 169}) 170 171#define __RAW_UNLOCK(ptr, op_val, op_string) \ 172({ \ 173 int old_val; \ 174 \ 175 typecheck(int *, ptr); \ 176 asm volatile( \ 177 op_string " %0,%2,%1\n" \ 178 : "=d" (old_val), "+Q" (*ptr) \ 179 : "d" (op_val) \ 180 : "cc", "memory"); \ 181 old_val; \ 182}) 183 184extern void _raw_read_lock_wait(arch_rwlock_t *lp); 185extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev); 186 187static inline void arch_read_lock(arch_rwlock_t *rw) 188{ 189 int old; 190 191 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); 192 if (old < 0) 193 _raw_read_lock_wait(rw); 194} 195 196static inline void arch_read_unlock(arch_rwlock_t *rw) 197{ 198 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); 199} 200 201static inline void arch_write_lock(arch_rwlock_t *rw) 202{ 203 int old; 204 205 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 206 if (old != 0) 207 _raw_write_lock_wait(rw, old); 208 rw->owner = SPINLOCK_LOCKVAL; 209} 210 211static inline void arch_write_unlock(arch_rwlock_t *rw) 212{ 213 rw->owner = 0; 214 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); 215} 216 217#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 218 219extern void _raw_read_lock_wait(arch_rwlock_t *lp); 220extern void _raw_write_lock_wait(arch_rwlock_t *lp); 221 222static inline void arch_read_lock(arch_rwlock_t *rw) 223{ 224 if (!arch_read_trylock_once(rw)) 225 _raw_read_lock_wait(rw); 226} 227 228static inline void arch_read_unlock(arch_rwlock_t *rw) 229{ 230 int old; 231 232 do { 233 old = ACCESS_ONCE(rw->lock); 234 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); 235} 236 237static inline void arch_write_lock(arch_rwlock_t *rw) 238{ 239 if (!arch_write_trylock_once(rw)) 240 _raw_write_lock_wait(rw); 241 rw->owner = SPINLOCK_LOCKVAL; 242} 243 244static inline void arch_write_unlock(arch_rwlock_t *rw) 245{ 246 typecheck(int, rw->lock); 247 248 rw->owner = 0; 249 asm volatile( 250 "st %1,%0\n" 251 : "+Q" (rw->lock) 252 : "d" (0) 253 : "cc", "memory"); 254} 255 256#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 257 258static inline int arch_read_trylock(arch_rwlock_t *rw) 259{ 260 if (!arch_read_trylock_once(rw)) 261 return _raw_read_trylock_retry(rw); 262 return 1; 263} 264 265static inline int arch_write_trylock(arch_rwlock_t *rw) 266{ 267 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) 268 return 0; 269 rw->owner = SPINLOCK_LOCKVAL; 270 return 1; 271} 272 273static inline void arch_read_relax(arch_rwlock_t *rw) 274{ 275 arch_lock_relax(rw->owner); 276} 277 278static inline void arch_write_relax(arch_rwlock_t *rw) 279{ 280 arch_lock_relax(rw->owner); 281} 282 283#endif /* __ASM_SPINLOCK_H */