at v2.6.21 168 lines 3.3 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3 4#include <asm/system.h> 5 6/* 7 * Simple spin lock operations. 8 * 9 * (the type definitions are in asm/raw_spinlock_types.h) 10 */ 11 12#define __raw_spin_is_locked(x) ((x)->slock != 0) 13#define __raw_spin_unlock_wait(lock) \ 14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 16 17static inline void __raw_spin_lock(raw_spinlock_t *lock) 18{ 19 unsigned long tmp; 20 21 __asm__ __volatile__( 22 "b 1f # __raw_spin_lock\n\ 232: lwzx %0,0,%1\n\ 24 cmpwi 0,%0,0\n\ 25 bne+ 2b\n\ 261: lwarx %0,0,%1\n\ 27 cmpwi 0,%0,0\n\ 28 bne- 2b\n" 29 PPC405_ERR77(0,%1) 30" stwcx. %2,0,%1\n\ 31 bne- 2b\n\ 32 isync" 33 : "=&r"(tmp) 34 : "r"(&lock->slock), "r"(1) 35 : "cr0", "memory"); 36} 37 38static inline void __raw_spin_unlock(raw_spinlock_t *lock) 39{ 40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); 41 lock->slock = 0; 42} 43 44#define __raw_spin_trylock(l) (!test_and_set_bit(0,(volatile unsigned long *)(&(l)->slock))) 45 46/* 47 * Read-write spinlocks, allowing multiple readers 48 * but only one writer. 49 * 50 * NOTE! it is quite common to have readers in interrupts 51 * but no interrupt writers. For those circumstances we 52 * can "mix" irq-safe locks - any writer needs to get a 53 * irq-safe write-lock, but readers can get non-irqsafe 54 * read-locks. 55 */ 56 57#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 58#define __raw_write_can_lock(rw) (!(rw)->lock) 59 60static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) 61{ 62 signed int tmp; 63 64 __asm__ __volatile__( 65"2: lwarx %0,0,%1 # read_trylock\n\ 66 addic. %0,%0,1\n\ 67 ble- 1f\n" 68 PPC405_ERR77(0,%1) 69" stwcx. %0,0,%1\n\ 70 bne- 2b\n\ 71 isync\n\ 721:" 73 : "=&r"(tmp) 74 : "r"(&rw->lock) 75 : "cr0", "memory"); 76 77 return tmp > 0; 78} 79 80static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 81{ 82 signed int tmp; 83 84 __asm__ __volatile__( 85 "b 2f # read_lock\n\ 861: lwzx %0,0,%1\n\ 87 cmpwi 0,%0,0\n\ 88 blt+ 1b\n\ 892: lwarx %0,0,%1\n\ 90 addic. %0,%0,1\n\ 91 ble- 1b\n" 92 PPC405_ERR77(0,%1) 93" stwcx. %0,0,%1\n\ 94 bne- 2b\n\ 95 isync" 96 : "=&r"(tmp) 97 : "r"(&rw->lock) 98 : "cr0", "memory"); 99} 100 101static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 102{ 103 signed int tmp; 104 105 __asm__ __volatile__( 106 "eieio # read_unlock\n\ 1071: lwarx %0,0,%1\n\ 108 addic %0,%0,-1\n" 109 PPC405_ERR77(0,%1) 110" stwcx. %0,0,%1\n\ 111 bne- 1b" 112 : "=&r"(tmp) 113 : "r"(&rw->lock) 114 : "cr0", "memory"); 115} 116 117static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 118{ 119 signed int tmp; 120 121 __asm__ __volatile__( 122"2: lwarx %0,0,%1 # write_trylock\n\ 123 cmpwi 0,%0,0\n\ 124 bne- 1f\n" 125 PPC405_ERR77(0,%1) 126" stwcx. %2,0,%1\n\ 127 bne- 2b\n\ 128 isync\n\ 1291:" 130 : "=&r"(tmp) 131 : "r"(&rw->lock), "r"(-1) 132 : "cr0", "memory"); 133 134 return tmp == 0; 135} 136 137static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 138{ 139 signed int tmp; 140 141 __asm__ __volatile__( 142 "b 2f # write_lock\n\ 1431: lwzx %0,0,%1\n\ 144 cmpwi 0,%0,0\n\ 145 bne+ 1b\n\ 1462: lwarx %0,0,%1\n\ 147 cmpwi 0,%0,0\n\ 148 bne- 1b\n" 149 PPC405_ERR77(0,%1) 150" stwcx. %2,0,%1\n\ 151 bne- 2b\n\ 152 isync" 153 : "=&r"(tmp) 154 : "r"(&rw->lock), "r"(-1) 155 : "cr0", "memory"); 156} 157 158static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 159{ 160 __asm__ __volatile__("eieio # write_unlock": : :"memory"); 161 rw->lock = 0; 162} 163 164#define _raw_spin_relax(lock) cpu_relax() 165#define _raw_read_relax(lock) cpu_relax() 166#define _raw_write_relax(lock) cpu_relax() 167 168#endif /* __ASM_SPINLOCK_H */