at v2.6.17-rc2 201 lines 3.8 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3 4#if __LINUX_ARM_ARCH__ < 6 5#error SMP not supported on pre-ARMv6 CPUs 6#endif 7 8/* 9 * ARMv6 Spin-locking. 10 * 11 * We exclusively read the old value. If it is zero, we may have 12 * won the lock, so we try exclusively storing it. A memory barrier 13 * is required after we get a lock, and before we release it, because 14 * V6 CPUs are assumed to have weakly ordered memory. 15 * 16 * Unlocked value: 0 17 * Locked value: 1 18 */ 19 20#define __raw_spin_is_locked(x) ((x)->lock != 0) 21#define __raw_spin_unlock_wait(lock) \ 22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 23 24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 25 26static inline void __raw_spin_lock(raw_spinlock_t *lock) 27{ 28 unsigned long tmp; 29 30 __asm__ __volatile__( 31"1: ldrex %0, [%1]\n" 32" teq %0, #0\n" 33#ifdef CONFIG_CPU_32v6K 34" wfene\n" 35#endif 36" strexeq %0, %2, [%1]\n" 37" teqeq %0, #0\n" 38" bne 1b" 39 : "=&r" (tmp) 40 : "r" (&lock->lock), "r" (1) 41 : "cc"); 42 43 smp_mb(); 44} 45 46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 47{ 48 unsigned long tmp; 49 50 __asm__ __volatile__( 51" ldrex %0, [%1]\n" 52" teq %0, #0\n" 53" strexeq %0, %2, [%1]" 54 : "=&r" (tmp) 55 : "r" (&lock->lock), "r" (1) 56 : "cc"); 57 58 if (tmp == 0) { 59 smp_mb(); 60 return 1; 61 } else { 62 return 0; 63 } 64} 65 66static inline void __raw_spin_unlock(raw_spinlock_t *lock) 67{ 68 smp_mb(); 69 70 __asm__ __volatile__( 71" str %1, [%0]\n" 72#ifdef CONFIG_CPU_32v6K 73" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ 74" sev" 75#endif 76 : 77 : "r" (&lock->lock), "r" (0) 78 : "cc"); 79} 80 81/* 82 * RWLOCKS 83 * 84 * 85 * Write locks are easy - we just set bit 31. When unlocking, we can 86 * just write zero since the lock is exclusively held. 87 */ 88#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) 89 90static inline void __raw_write_lock(raw_rwlock_t *rw) 91{ 92 unsigned long tmp; 93 94 __asm__ __volatile__( 95"1: ldrex %0, [%1]\n" 96" teq %0, #0\n" 97#ifdef CONFIG_CPU_32v6K 98" wfene\n" 99#endif 100" strexeq %0, %2, [%1]\n" 101" teq %0, #0\n" 102" bne 1b" 103 : "=&r" (tmp) 104 : "r" (&rw->lock), "r" (0x80000000) 105 : "cc"); 106 107 smp_mb(); 108} 109 110static inline int __raw_write_trylock(raw_rwlock_t *rw) 111{ 112 unsigned long tmp; 113 114 __asm__ __volatile__( 115"1: ldrex %0, [%1]\n" 116" teq %0, #0\n" 117" strexeq %0, %2, [%1]" 118 : "=&r" (tmp) 119 : "r" (&rw->lock), "r" (0x80000000) 120 : "cc"); 121 122 if (tmp == 0) { 123 smp_mb(); 124 return 1; 125 } else { 126 return 0; 127 } 128} 129 130static inline void __raw_write_unlock(raw_rwlock_t *rw) 131{ 132 smp_mb(); 133 134 __asm__ __volatile__( 135 "str %1, [%0]\n" 136#ifdef CONFIG_CPU_32v6K 137" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ 138" sev\n" 139#endif 140 : 141 : "r" (&rw->lock), "r" (0) 142 : "cc"); 143} 144 145/* 146 * Read locks are a bit more hairy: 147 * - Exclusively load the lock value. 148 * - Increment it. 149 * - Store new lock value if positive, and we still own this location. 150 * If the value is negative, we've already failed. 151 * - If we failed to store the value, we want a negative result. 152 * - If we failed, try again. 153 * Unlocking is similarly hairy. We may have multiple read locks 154 * currently active. However, we know we won't have any write 155 * locks. 156 */ 157static inline void __raw_read_lock(raw_rwlock_t *rw) 158{ 159 unsigned long tmp, tmp2; 160 161 __asm__ __volatile__( 162"1: ldrex %0, [%2]\n" 163" adds %0, %0, #1\n" 164" strexpl %1, %0, [%2]\n" 165#ifdef CONFIG_CPU_32v6K 166" wfemi\n" 167#endif 168" rsbpls %0, %1, #0\n" 169" bmi 1b" 170 : "=&r" (tmp), "=&r" (tmp2) 171 : "r" (&rw->lock) 172 : "cc"); 173 174 smp_mb(); 175} 176 177static inline void __raw_read_unlock(raw_rwlock_t *rw) 178{ 179 unsigned long tmp, tmp2; 180 181 smp_mb(); 182 183 __asm__ __volatile__( 184"1: ldrex %0, [%2]\n" 185" sub %0, %0, #1\n" 186" strex %1, %0, [%2]\n" 187" teq %1, #0\n" 188" bne 1b" 189#ifdef CONFIG_CPU_32v6K 190"\n cmp %0, #0\n" 191" mcreq p15, 0, %0, c7, c10, 4\n" 192" seveq" 193#endif 194 : "=&r" (tmp), "=&r" (tmp2) 195 : "r" (&rw->lock) 196 : "cc"); 197} 198 199#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) 200 201#endif /* __ASM_SPINLOCK_H */