Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do not include directly; use <linux/atomic.h>.
15 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
22#include <asm/barrier.h>
23#include <arch/spr_def.h>
24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26
27#define atomic_set(v, i) ((v)->counter = (i))
28
29/*
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
33 */
34
35static inline void atomic_add(int i, atomic_t *v)
36{
37 __insn_fetchadd4((void *)&v->counter, i);
38}
39
40static inline int atomic_add_return(int i, atomic_t *v)
41{
42 int val;
43 smp_mb(); /* barrier for proper semantics */
44 val = __insn_fetchadd4((void *)&v->counter, i) + i;
45 barrier(); /* the "+ i" above will wait on memory */
46 return val;
47}
48
49static inline int __atomic_add_unless(atomic_t *v, int a, int u)
50{
51 int guess, oldval = v->counter;
52 do {
53 if (oldval == u)
54 break;
55 guess = oldval;
56 oldval = cmpxchg(&v->counter, guess, guess + a);
57 } while (guess != oldval);
58 return oldval;
59}
60
61static inline void atomic_and(int i, atomic_t *v)
62{
63 __insn_fetchand4((void *)&v->counter, i);
64}
65
66static inline void atomic_or(int i, atomic_t *v)
67{
68 __insn_fetchor4((void *)&v->counter, i);
69}
70
71static inline void atomic_xor(int i, atomic_t *v)
72{
73 int guess, oldval = v->counter;
74 do {
75 guess = oldval;
76 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
77 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
78 } while (guess != oldval);
79}
80
81/* Now the true 64-bit operations. */
82
83#define ATOMIC64_INIT(i) { (i) }
84
85#define atomic64_read(v) ((v)->counter)
86#define atomic64_set(v, i) ((v)->counter = (i))
87
88static inline void atomic64_add(long i, atomic64_t *v)
89{
90 __insn_fetchadd((void *)&v->counter, i);
91}
92
93static inline long atomic64_add_return(long i, atomic64_t *v)
94{
95 int val;
96 smp_mb(); /* barrier for proper semantics */
97 val = __insn_fetchadd((void *)&v->counter, i) + i;
98 barrier(); /* the "+ i" above will wait on memory */
99 return val;
100}
101
102static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
103{
104 long guess, oldval = v->counter;
105 do {
106 if (oldval == u)
107 break;
108 guess = oldval;
109 oldval = cmpxchg(&v->counter, guess, guess + a);
110 } while (guess != oldval);
111 return oldval != u;
112}
113
114static inline void atomic64_and(long i, atomic64_t *v)
115{
116 __insn_fetchand((void *)&v->counter, i);
117}
118
119static inline void atomic64_or(long i, atomic64_t *v)
120{
121 __insn_fetchor((void *)&v->counter, i);
122}
123
124static inline void atomic64_xor(long i, atomic64_t *v)
125{
126 long guess, oldval = v->counter;
127 do {
128 guess = oldval;
129 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
130 oldval = __insn_cmpexch(&v->counter, guess ^ i);
131 } while (guess != oldval);
132}
133
134#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
135#define atomic64_sub(i, v) atomic64_add(-(i), (v))
136#define atomic64_inc_return(v) atomic64_add_return(1, (v))
137#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
138#define atomic64_inc(v) atomic64_add(1, (v))
139#define atomic64_dec(v) atomic64_sub(1, (v))
140
141#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
142#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
143#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
144#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
145
146#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
147
148#endif /* !__ASSEMBLY__ */
149
150#endif /* _ASM_TILE_ATOMIC_64_H */