Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
8static inline void dsb_sev(void)
9{
10#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ (
12 "dsb\n"
13 "sev"
14 );
15#elif defined(CONFIG_CPU_32v6K)
16 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev"
19 : : "r" (0)
20 );
21#endif
22}
23
24/*
25 * ARMv6 Spin-locking.
26 *
27 * We exclusively read the old value. If it is zero, we may have
28 * won the lock, so we try exclusively storing it. A memory barrier
29 * is required after we get a lock, and before we release it, because
30 * V6 CPUs are assumed to have weakly ordered memory.
31 *
32 * Unlocked value: 0
33 * Locked value: 1
34 */
35
36#define arch_spin_is_locked(x) ((x)->lock != 0)
37#define arch_spin_unlock_wait(lock) \
38 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
39
40#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
41
42static inline void arch_spin_lock(arch_spinlock_t *lock)
43{
44 unsigned long tmp;
45
46 __asm__ __volatile__(
47"1: ldrex %0, [%1]\n"
48" teq %0, #0\n"
49#ifdef CONFIG_CPU_32v6K
50" wfene\n"
51#endif
52" strexeq %0, %2, [%1]\n"
53" teqeq %0, #0\n"
54" bne 1b"
55 : "=&r" (tmp)
56 : "r" (&lock->lock), "r" (1)
57 : "cc");
58
59 smp_mb();
60}
61
62static inline int arch_spin_trylock(arch_spinlock_t *lock)
63{
64 unsigned long tmp;
65
66 __asm__ __volatile__(
67" ldrex %0, [%1]\n"
68" teq %0, #0\n"
69" strexeq %0, %2, [%1]"
70 : "=&r" (tmp)
71 : "r" (&lock->lock), "r" (1)
72 : "cc");
73
74 if (tmp == 0) {
75 smp_mb();
76 return 1;
77 } else {
78 return 0;
79 }
80}
81
82static inline void arch_spin_unlock(arch_spinlock_t *lock)
83{
84 smp_mb();
85
86 __asm__ __volatile__(
87" str %1, [%0]\n"
88 :
89 : "r" (&lock->lock), "r" (0)
90 : "cc");
91
92 dsb_sev();
93}
94
95/*
96 * RWLOCKS
97 *
98 *
99 * Write locks are easy - we just set bit 31. When unlocking, we can
100 * just write zero since the lock is exclusively held.
101 */
102
103static inline void arch_write_lock(arch_rwlock_t *rw)
104{
105 unsigned long tmp;
106
107 __asm__ __volatile__(
108"1: ldrex %0, [%1]\n"
109" teq %0, #0\n"
110#ifdef CONFIG_CPU_32v6K
111" wfene\n"
112#endif
113" strexeq %0, %2, [%1]\n"
114" teq %0, #0\n"
115" bne 1b"
116 : "=&r" (tmp)
117 : "r" (&rw->lock), "r" (0x80000000)
118 : "cc");
119
120 smp_mb();
121}
122
123static inline int arch_write_trylock(arch_rwlock_t *rw)
124{
125 unsigned long tmp;
126
127 __asm__ __volatile__(
128"1: ldrex %0, [%1]\n"
129" teq %0, #0\n"
130" strexeq %0, %2, [%1]"
131 : "=&r" (tmp)
132 : "r" (&rw->lock), "r" (0x80000000)
133 : "cc");
134
135 if (tmp == 0) {
136 smp_mb();
137 return 1;
138 } else {
139 return 0;
140 }
141}
142
143static inline void arch_write_unlock(arch_rwlock_t *rw)
144{
145 smp_mb();
146
147 __asm__ __volatile__(
148 "str %1, [%0]\n"
149 :
150 : "r" (&rw->lock), "r" (0)
151 : "cc");
152
153 dsb_sev();
154}
155
156/* write_can_lock - would write_trylock() succeed? */
157#define arch_write_can_lock(x) ((x)->lock == 0)
158
159/*
160 * Read locks are a bit more hairy:
161 * - Exclusively load the lock value.
162 * - Increment it.
163 * - Store new lock value if positive, and we still own this location.
164 * If the value is negative, we've already failed.
165 * - If we failed to store the value, we want a negative result.
166 * - If we failed, try again.
167 * Unlocking is similarly hairy. We may have multiple read locks
168 * currently active. However, we know we won't have any write
169 * locks.
170 */
171static inline void arch_read_lock(arch_rwlock_t *rw)
172{
173 unsigned long tmp, tmp2;
174
175 __asm__ __volatile__(
176"1: ldrex %0, [%2]\n"
177" adds %0, %0, #1\n"
178" strexpl %1, %0, [%2]\n"
179#ifdef CONFIG_CPU_32v6K
180" wfemi\n"
181#endif
182" rsbpls %0, %1, #0\n"
183" bmi 1b"
184 : "=&r" (tmp), "=&r" (tmp2)
185 : "r" (&rw->lock)
186 : "cc");
187
188 smp_mb();
189}
190
191static inline void arch_read_unlock(arch_rwlock_t *rw)
192{
193 unsigned long tmp, tmp2;
194
195 smp_mb();
196
197 __asm__ __volatile__(
198"1: ldrex %0, [%2]\n"
199" sub %0, %0, #1\n"
200" strex %1, %0, [%2]\n"
201" teq %1, #0\n"
202" bne 1b"
203 : "=&r" (tmp), "=&r" (tmp2)
204 : "r" (&rw->lock)
205 : "cc");
206
207 if (tmp == 0)
208 dsb_sev();
209}
210
211static inline int arch_read_trylock(arch_rwlock_t *rw)
212{
213 unsigned long tmp, tmp2 = 1;
214
215 __asm__ __volatile__(
216"1: ldrex %0, [%2]\n"
217" adds %0, %0, #1\n"
218" strexpl %1, %0, [%2]\n"
219 : "=&r" (tmp), "+r" (tmp2)
220 : "r" (&rw->lock)
221 : "cc");
222
223 smp_mb();
224 return tmp2 == 0;
225}
226
227/* read_can_lock - would read_trylock() succeed? */
228#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
229
230#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
231#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
232
233#define arch_spin_relax(lock) cpu_relax()
234#define arch_read_relax(lock) cpu_relax()
235#define arch_write_relax(lock) cpu_relax()
236
237#endif /* __ASM_SPINLOCK_H */