Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/prefetch.h>
16#include <linux/types.h>
17#include <linux/irqflags.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
20
21#define ATOMIC_INIT(i) { (i) }
22
23#ifdef __KERNEL__
24
25/*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
30#define atomic_read(v) (*(volatile int *)&(v)->counter)
31#define atomic_set(v,i) (((v)->counter) = (i))
32
33#if __LINUX_ARM_ARCH__ >= 6
34
35/*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
39 */
40static inline void atomic_add(int i, atomic_t *v)
41{
42 unsigned long tmp;
43 int result;
44
45 prefetchw(&v->counter);
46 __asm__ __volatile__("@ atomic_add\n"
47"1: ldrex %0, [%3]\n"
48" add %0, %0, %4\n"
49" strex %1, %0, [%3]\n"
50" teq %1, #0\n"
51" bne 1b"
52 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 : "r" (&v->counter), "Ir" (i)
54 : "cc");
55}
56
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 unsigned long tmp;
60 int result;
61
62 smp_mb();
63 prefetchw(&v->counter);
64
65 __asm__ __volatile__("@ atomic_add_return\n"
66"1: ldrex %0, [%3]\n"
67" add %0, %0, %4\n"
68" strex %1, %0, [%3]\n"
69" teq %1, #0\n"
70" bne 1b"
71 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
72 : "r" (&v->counter), "Ir" (i)
73 : "cc");
74
75 smp_mb();
76
77 return result;
78}
79
80static inline void atomic_sub(int i, atomic_t *v)
81{
82 unsigned long tmp;
83 int result;
84
85 prefetchw(&v->counter);
86 __asm__ __volatile__("@ atomic_sub\n"
87"1: ldrex %0, [%3]\n"
88" sub %0, %0, %4\n"
89" strex %1, %0, [%3]\n"
90" teq %1, #0\n"
91" bne 1b"
92 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
93 : "r" (&v->counter), "Ir" (i)
94 : "cc");
95}
96
97static inline int atomic_sub_return(int i, atomic_t *v)
98{
99 unsigned long tmp;
100 int result;
101
102 smp_mb();
103 prefetchw(&v->counter);
104
105 __asm__ __volatile__("@ atomic_sub_return\n"
106"1: ldrex %0, [%3]\n"
107" sub %0, %0, %4\n"
108" strex %1, %0, [%3]\n"
109" teq %1, #0\n"
110" bne 1b"
111 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 : "r" (&v->counter), "Ir" (i)
113 : "cc");
114
115 smp_mb();
116
117 return result;
118}
119
120static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121{
122 int oldval;
123 unsigned long res;
124
125 smp_mb();
126 prefetchw(&ptr->counter);
127
128 do {
129 __asm__ __volatile__("@ atomic_cmpxchg\n"
130 "ldrex %1, [%3]\n"
131 "mov %0, #0\n"
132 "teq %1, %4\n"
133 "strexeq %0, %5, [%3]\n"
134 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
135 : "r" (&ptr->counter), "Ir" (old), "r" (new)
136 : "cc");
137 } while (res);
138
139 smp_mb();
140
141 return oldval;
142}
143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146 int oldval, newval;
147 unsigned long tmp;
148
149 smp_mb();
150 prefetchw(&v->counter);
151
152 __asm__ __volatile__ ("@ atomic_add_unless\n"
153"1: ldrex %0, [%4]\n"
154" teq %0, %5\n"
155" beq 2f\n"
156" add %1, %0, %6\n"
157" strex %2, %1, [%4]\n"
158" teq %2, #0\n"
159" bne 1b\n"
160"2:"
161 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162 : "r" (&v->counter), "r" (u), "r" (a)
163 : "cc");
164
165 if (oldval != u)
166 smp_mb();
167
168 return oldval;
169}
170
171#else /* ARM_ARCH_6 */
172
173#ifdef CONFIG_SMP
174#error SMP not supported on pre-ARMv6 CPUs
175#endif
176
177static inline int atomic_add_return(int i, atomic_t *v)
178{
179 unsigned long flags;
180 int val;
181
182 raw_local_irq_save(flags);
183 val = v->counter;
184 v->counter = val += i;
185 raw_local_irq_restore(flags);
186
187 return val;
188}
189#define atomic_add(i, v) (void) atomic_add_return(i, v)
190
191static inline int atomic_sub_return(int i, atomic_t *v)
192{
193 unsigned long flags;
194 int val;
195
196 raw_local_irq_save(flags);
197 val = v->counter;
198 v->counter = val -= i;
199 raw_local_irq_restore(flags);
200
201 return val;
202}
203#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
204
205static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{
207 int ret;
208 unsigned long flags;
209
210 raw_local_irq_save(flags);
211 ret = v->counter;
212 if (likely(ret == old))
213 v->counter = new;
214 raw_local_irq_restore(flags);
215
216 return ret;
217}
218
219static inline int __atomic_add_unless(atomic_t *v, int a, int u)
220{
221 int c, old;
222
223 c = atomic_read(v);
224 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
225 c = old;
226 return c;
227}
228
229#endif /* __LINUX_ARM_ARCH__ */
230
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
233#define atomic_inc(v) atomic_add(1, v)
234#define atomic_dec(v) atomic_sub(1, v)
235
236#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
237#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
238#define atomic_inc_return(v) (atomic_add_return(1, v))
239#define atomic_dec_return(v) (atomic_sub_return(1, v))
240#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
241
242#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
243
244#ifndef CONFIG_GENERIC_ATOMIC64
245typedef struct {
246 long long counter;
247} atomic64_t;
248
249#define ATOMIC64_INIT(i) { (i) }
250
251#ifdef CONFIG_ARM_LPAE
252static inline long long atomic64_read(const atomic64_t *v)
253{
254 long long result;
255
256 __asm__ __volatile__("@ atomic64_read\n"
257" ldrd %0, %H0, [%1]"
258 : "=&r" (result)
259 : "r" (&v->counter), "Qo" (v->counter)
260 );
261
262 return result;
263}
264
265static inline void atomic64_set(atomic64_t *v, long long i)
266{
267 __asm__ __volatile__("@ atomic64_set\n"
268" strd %2, %H2, [%1]"
269 : "=Qo" (v->counter)
270 : "r" (&v->counter), "r" (i)
271 );
272}
273#else
274static inline long long atomic64_read(const atomic64_t *v)
275{
276 long long result;
277
278 __asm__ __volatile__("@ atomic64_read\n"
279" ldrexd %0, %H0, [%1]"
280 : "=&r" (result)
281 : "r" (&v->counter), "Qo" (v->counter)
282 );
283
284 return result;
285}
286
287static inline void atomic64_set(atomic64_t *v, long long i)
288{
289 long long tmp;
290
291 prefetchw(&v->counter);
292 __asm__ __volatile__("@ atomic64_set\n"
293"1: ldrexd %0, %H0, [%2]\n"
294" strexd %0, %3, %H3, [%2]\n"
295" teq %0, #0\n"
296" bne 1b"
297 : "=&r" (tmp), "=Qo" (v->counter)
298 : "r" (&v->counter), "r" (i)
299 : "cc");
300}
301#endif
302
303static inline void atomic64_add(long long i, atomic64_t *v)
304{
305 long long result;
306 unsigned long tmp;
307
308 prefetchw(&v->counter);
309 __asm__ __volatile__("@ atomic64_add\n"
310"1: ldrexd %0, %H0, [%3]\n"
311" adds %Q0, %Q0, %Q4\n"
312" adc %R0, %R0, %R4\n"
313" strexd %1, %0, %H0, [%3]\n"
314" teq %1, #0\n"
315" bne 1b"
316 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
317 : "r" (&v->counter), "r" (i)
318 : "cc");
319}
320
321static inline long long atomic64_add_return(long long i, atomic64_t *v)
322{
323 long long result;
324 unsigned long tmp;
325
326 smp_mb();
327 prefetchw(&v->counter);
328
329 __asm__ __volatile__("@ atomic64_add_return\n"
330"1: ldrexd %0, %H0, [%3]\n"
331" adds %Q0, %Q0, %Q4\n"
332" adc %R0, %R0, %R4\n"
333" strexd %1, %0, %H0, [%3]\n"
334" teq %1, #0\n"
335" bne 1b"
336 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
337 : "r" (&v->counter), "r" (i)
338 : "cc");
339
340 smp_mb();
341
342 return result;
343}
344
345static inline void atomic64_sub(long long i, atomic64_t *v)
346{
347 long long result;
348 unsigned long tmp;
349
350 prefetchw(&v->counter);
351 __asm__ __volatile__("@ atomic64_sub\n"
352"1: ldrexd %0, %H0, [%3]\n"
353" subs %Q0, %Q0, %Q4\n"
354" sbc %R0, %R0, %R4\n"
355" strexd %1, %0, %H0, [%3]\n"
356" teq %1, #0\n"
357" bne 1b"
358 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
359 : "r" (&v->counter), "r" (i)
360 : "cc");
361}
362
363static inline long long atomic64_sub_return(long long i, atomic64_t *v)
364{
365 long long result;
366 unsigned long tmp;
367
368 smp_mb();
369 prefetchw(&v->counter);
370
371 __asm__ __volatile__("@ atomic64_sub_return\n"
372"1: ldrexd %0, %H0, [%3]\n"
373" subs %Q0, %Q0, %Q4\n"
374" sbc %R0, %R0, %R4\n"
375" strexd %1, %0, %H0, [%3]\n"
376" teq %1, #0\n"
377" bne 1b"
378 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
379 : "r" (&v->counter), "r" (i)
380 : "cc");
381
382 smp_mb();
383
384 return result;
385}
386
387static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
388 long long new)
389{
390 long long oldval;
391 unsigned long res;
392
393 smp_mb();
394 prefetchw(&ptr->counter);
395
396 do {
397 __asm__ __volatile__("@ atomic64_cmpxchg\n"
398 "ldrexd %1, %H1, [%3]\n"
399 "mov %0, #0\n"
400 "teq %1, %4\n"
401 "teqeq %H1, %H4\n"
402 "strexdeq %0, %5, %H5, [%3]"
403 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
404 : "r" (&ptr->counter), "r" (old), "r" (new)
405 : "cc");
406 } while (res);
407
408 smp_mb();
409
410 return oldval;
411}
412
413static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
414{
415 long long result;
416 unsigned long tmp;
417
418 smp_mb();
419 prefetchw(&ptr->counter);
420
421 __asm__ __volatile__("@ atomic64_xchg\n"
422"1: ldrexd %0, %H0, [%3]\n"
423" strexd %1, %4, %H4, [%3]\n"
424" teq %1, #0\n"
425" bne 1b"
426 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
427 : "r" (&ptr->counter), "r" (new)
428 : "cc");
429
430 smp_mb();
431
432 return result;
433}
434
435static inline long long atomic64_dec_if_positive(atomic64_t *v)
436{
437 long long result;
438 unsigned long tmp;
439
440 smp_mb();
441 prefetchw(&v->counter);
442
443 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
444"1: ldrexd %0, %H0, [%3]\n"
445" subs %Q0, %Q0, #1\n"
446" sbc %R0, %R0, #0\n"
447" teq %R0, #0\n"
448" bmi 2f\n"
449" strexd %1, %0, %H0, [%3]\n"
450" teq %1, #0\n"
451" bne 1b\n"
452"2:"
453 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
454 : "r" (&v->counter)
455 : "cc");
456
457 smp_mb();
458
459 return result;
460}
461
462static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
463{
464 long long val;
465 unsigned long tmp;
466 int ret = 1;
467
468 smp_mb();
469 prefetchw(&v->counter);
470
471 __asm__ __volatile__("@ atomic64_add_unless\n"
472"1: ldrexd %0, %H0, [%4]\n"
473" teq %0, %5\n"
474" teqeq %H0, %H5\n"
475" moveq %1, #0\n"
476" beq 2f\n"
477" adds %Q0, %Q0, %Q6\n"
478" adc %R0, %R0, %R6\n"
479" strexd %2, %0, %H0, [%4]\n"
480" teq %2, #0\n"
481" bne 1b\n"
482"2:"
483 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
484 : "r" (&v->counter), "r" (u), "r" (a)
485 : "cc");
486
487 if (ret)
488 smp_mb();
489
490 return ret;
491}
492
493#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
494#define atomic64_inc(v) atomic64_add(1LL, (v))
495#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
496#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
497#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
498#define atomic64_dec(v) atomic64_sub(1LL, (v))
499#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
500#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
501#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
502
503#endif /* !CONFIG_GENERIC_ATOMIC64 */
504#endif
505#endif