at v3.5 178 lines 4.6 kB view raw
1/* 2 * include/asm-s390/spinlock.h 3 * 4 * S390 version 5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "include/asm-i386/spinlock.h" 9 */ 10 11#ifndef __ASM_SPINLOCK_H 12#define __ASM_SPINLOCK_H 13 14#include <linux/smp.h> 15 16extern int spin_retry; 17 18static inline int 19_raw_compare_and_swap(volatile unsigned int *lock, 20 unsigned int old, unsigned int new) 21{ 22 asm volatile( 23 " cs %0,%3,%1" 24 : "=d" (old), "=Q" (*lock) 25 : "0" (old), "d" (new), "Q" (*lock) 26 : "cc", "memory" ); 27 return old; 28} 29 30/* 31 * Simple spin lock operations. There are two variants, one clears IRQ's 32 * on the local processor, one does not. 33 * 34 * We make no fairness assumptions. They have a cost. 35 * 36 * (the type definitions are in asm/spinlock_types.h) 37 */ 38 39#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) 40#define arch_spin_unlock_wait(lock) \ 41 do { while (arch_spin_is_locked(lock)) \ 42 arch_spin_relax(lock); } while (0) 43 44extern void arch_spin_lock_wait(arch_spinlock_t *); 45extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 46extern int arch_spin_trylock_retry(arch_spinlock_t *); 47extern void arch_spin_relax(arch_spinlock_t *lock); 48 49static inline void arch_spin_lock(arch_spinlock_t *lp) 50{ 51 int old; 52 53 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 54 if (likely(old == 0)) 55 return; 56 arch_spin_lock_wait(lp); 57} 58 59static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 60 unsigned long flags) 61{ 62 int old; 63 64 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 65 if (likely(old == 0)) 66 return; 67 arch_spin_lock_wait_flags(lp, flags); 68} 69 70static inline int arch_spin_trylock(arch_spinlock_t *lp) 71{ 72 int old; 73 74 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 75 if (likely(old == 0)) 76 return 1; 77 return arch_spin_trylock_retry(lp); 78} 79 80static inline void arch_spin_unlock(arch_spinlock_t *lp) 81{ 82 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 83} 84 85/* 86 * Read-write spinlocks, allowing multiple readers 87 * but only one writer. 88 * 89 * NOTE! it is quite common to have readers in interrupts 90 * but no interrupt writers. For those circumstances we 91 * can "mix" irq-safe locks - any writer needs to get a 92 * irq-safe write-lock, but readers can get non-irqsafe 93 * read-locks. 94 */ 95 96/** 97 * read_can_lock - would read_trylock() succeed? 98 * @lock: the rwlock in question. 99 */ 100#define arch_read_can_lock(x) ((int)(x)->lock >= 0) 101 102/** 103 * write_can_lock - would write_trylock() succeed? 104 * @lock: the rwlock in question. 105 */ 106#define arch_write_can_lock(x) ((x)->lock == 0) 107 108extern void _raw_read_lock_wait(arch_rwlock_t *lp); 109extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 110extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 111extern void _raw_write_lock_wait(arch_rwlock_t *lp); 112extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 113extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 114 115static inline void arch_read_lock(arch_rwlock_t *rw) 116{ 117 unsigned int old; 118 old = rw->lock & 0x7fffffffU; 119 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 120 _raw_read_lock_wait(rw); 121} 122 123static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 124{ 125 unsigned int old; 126 old = rw->lock & 0x7fffffffU; 127 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 128 _raw_read_lock_wait_flags(rw, flags); 129} 130 131static inline void arch_read_unlock(arch_rwlock_t *rw) 132{ 133 unsigned int old, cmp; 134 135 old = rw->lock; 136 do { 137 cmp = old; 138 old = _raw_compare_and_swap(&rw->lock, old, old - 1); 139 } while (cmp != old); 140} 141 142static inline void arch_write_lock(arch_rwlock_t *rw) 143{ 144 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 145 _raw_write_lock_wait(rw); 146} 147 148static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 149{ 150 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 151 _raw_write_lock_wait_flags(rw, flags); 152} 153 154static inline void arch_write_unlock(arch_rwlock_t *rw) 155{ 156 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 157} 158 159static inline int arch_read_trylock(arch_rwlock_t *rw) 160{ 161 unsigned int old; 162 old = rw->lock & 0x7fffffffU; 163 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) 164 return 1; 165 return _raw_read_trylock_retry(rw); 166} 167 168static inline int arch_write_trylock(arch_rwlock_t *rw) 169{ 170 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) 171 return 1; 172 return _raw_write_trylock_retry(rw); 173} 174 175#define arch_read_relax(lock) cpu_relax() 176#define arch_write_relax(lock) cpu_relax() 177 178#endif /* __ASM_SPINLOCK_H */