at v2.6.12-rc2 169 lines 3.5 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3 4#if __LINUX_ARM_ARCH__ < 6 5#error SMP not supported on pre-ARMv6 CPUs 6#endif 7 8/* 9 * ARMv6 Spin-locking. 10 * 11 * We (exclusively) read the old value, and decrement it. If it 12 * hits zero, we may have won the lock, so we try (exclusively) 13 * storing it. 14 * 15 * Unlocked value: 0 16 * Locked value: 1 17 */ 18typedef struct { 19 volatile unsigned int lock; 20#ifdef CONFIG_PREEMPT 21 unsigned int break_lock; 22#endif 23} spinlock_t; 24 25#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } 26 27#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) 28#define spin_is_locked(x) ((x)->lock != 0) 29#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) 30#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 31 32static inline void _raw_spin_lock(spinlock_t *lock) 33{ 34 unsigned long tmp; 35 36 __asm__ __volatile__( 37"1: ldrex %0, [%1]\n" 38" teq %0, #0\n" 39" strexeq %0, %2, [%1]\n" 40" teqeq %0, #0\n" 41" bne 1b" 42 : "=&r" (tmp) 43 : "r" (&lock->lock), "r" (1) 44 : "cc", "memory"); 45} 46 47static inline int _raw_spin_trylock(spinlock_t *lock) 48{ 49 unsigned long tmp; 50 51 __asm__ __volatile__( 52" ldrex %0, [%1]\n" 53" teq %0, #0\n" 54" strexeq %0, %2, [%1]" 55 : "=&r" (tmp) 56 : "r" (&lock->lock), "r" (1) 57 : "cc", "memory"); 58 59 return tmp == 0; 60} 61 62static inline void _raw_spin_unlock(spinlock_t *lock) 63{ 64 __asm__ __volatile__( 65" str %1, [%0]" 66 : 67 : "r" (&lock->lock), "r" (0) 68 : "cc", "memory"); 69} 70 71/* 72 * RWLOCKS 73 */ 74typedef struct { 75 volatile unsigned int lock; 76#ifdef CONFIG_PREEMPT 77 unsigned int break_lock; 78#endif 79} rwlock_t; 80 81#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } 82#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0) 83 84/* 85 * Write locks are easy - we just set bit 31. When unlocking, we can 86 * just write zero since the lock is exclusively held. 87 */ 88static inline void _raw_write_lock(rwlock_t *rw) 89{ 90 unsigned long tmp; 91 92 __asm__ __volatile__( 93"1: ldrex %0, [%1]\n" 94" teq %0, #0\n" 95" strexeq %0, %2, [%1]\n" 96" teq %0, #0\n" 97" bne 1b" 98 : "=&r" (tmp) 99 : "r" (&rw->lock), "r" (0x80000000) 100 : "cc", "memory"); 101} 102 103static inline void _raw_write_unlock(rwlock_t *rw) 104{ 105 __asm__ __volatile__( 106 "str %1, [%0]" 107 : 108 : "r" (&rw->lock), "r" (0) 109 : "cc", "memory"); 110} 111 112/* 113 * Read locks are a bit more hairy: 114 * - Exclusively load the lock value. 115 * - Increment it. 116 * - Store new lock value if positive, and we still own this location. 117 * If the value is negative, we've already failed. 118 * - If we failed to store the value, we want a negative result. 119 * - If we failed, try again. 120 * Unlocking is similarly hairy. We may have multiple read locks 121 * currently active. However, we know we won't have any write 122 * locks. 123 */ 124static inline void _raw_read_lock(rwlock_t *rw) 125{ 126 unsigned long tmp, tmp2; 127 128 __asm__ __volatile__( 129"1: ldrex %0, [%2]\n" 130" adds %0, %0, #1\n" 131" strexpl %1, %0, [%2]\n" 132" rsbpls %0, %1, #0\n" 133" bmi 1b" 134 : "=&r" (tmp), "=&r" (tmp2) 135 : "r" (&rw->lock) 136 : "cc", "memory"); 137} 138 139static inline void _raw_read_unlock(rwlock_t *rw) 140{ 141 __asm__ __volatile__( 142"1: ldrex %0, [%2]\n" 143" sub %0, %0, #1\n" 144" strex %1, %0, [%2]\n" 145" teq %1, #0\n" 146" bne 1b" 147 : "=&r" (tmp), "=&r" (tmp2) 148 : "r" (&rw->lock) 149 : "cc", "memory"); 150} 151 152#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 153 154static inline int _raw_write_trylock(rwlock_t *rw) 155{ 156 unsigned long tmp; 157 158 __asm__ __volatile__( 159"1: ldrex %0, [%1]\n" 160" teq %0, #0\n" 161" strexeq %0, %2, [%1]" 162 : "=&r" (tmp) 163 : "r" (&rw->lock), "r" (0x80000000) 164 : "cc", "memory"); 165 166 return tmp == 0; 167} 168 169#endif /* __ASM_SPINLOCK_H */