Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _ASM_X86_ATOMIC_32_H
2#define _ASM_X86_ATOMIC_32_H
3
4#include <linux/compiler.h>
5#include <linux/types.h>
6#include <asm/processor.h>
7#include <asm/cmpxchg.h>
8
9/*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
13
14#define ATOMIC_INIT(i) { (i) }
15
16/**
17 * atomic_read - read atomic variable
18 * @v: pointer of type atomic_t
19 *
20 * Atomically reads the value of @v.
21 */
22#define atomic_read(v) ((v)->counter)
23
24/**
25 * atomic_set - set atomic variable
26 * @v: pointer of type atomic_t
27 * @i: required value
28 *
29 * Atomically sets the value of @v to @i.
30 */
31#define atomic_set(v, i) (((v)->counter) = (i))
32
33/**
34 * atomic_add - add integer to atomic variable
35 * @i: integer value to add
36 * @v: pointer of type atomic_t
37 *
38 * Atomically adds @i to @v.
39 */
40static inline void atomic_add(int i, atomic_t *v)
41{
42 asm volatile(LOCK_PREFIX "addl %1,%0"
43 : "+m" (v->counter)
44 : "ir" (i));
45}
46
47/**
48 * atomic_sub - subtract integer from atomic variable
49 * @i: integer value to subtract
50 * @v: pointer of type atomic_t
51 *
52 * Atomically subtracts @i from @v.
53 */
54static inline void atomic_sub(int i, atomic_t *v)
55{
56 asm volatile(LOCK_PREFIX "subl %1,%0"
57 : "+m" (v->counter)
58 : "ir" (i));
59}
60
61/**
62 * atomic_sub_and_test - subtract value from variable and test result
63 * @i: integer value to subtract
64 * @v: pointer of type atomic_t
65 *
66 * Atomically subtracts @i from @v and returns
67 * true if the result is zero, or false for all
68 * other cases.
69 */
70static inline int atomic_sub_and_test(int i, atomic_t *v)
71{
72 unsigned char c;
73
74 asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
75 : "+m" (v->counter), "=qm" (c)
76 : "ir" (i) : "memory");
77 return c;
78}
79
80/**
81 * atomic_inc - increment atomic variable
82 * @v: pointer of type atomic_t
83 *
84 * Atomically increments @v by 1.
85 */
86static inline void atomic_inc(atomic_t *v)
87{
88 asm volatile(LOCK_PREFIX "incl %0"
89 : "+m" (v->counter));
90}
91
92/**
93 * atomic_dec - decrement atomic variable
94 * @v: pointer of type atomic_t
95 *
96 * Atomically decrements @v by 1.
97 */
98static inline void atomic_dec(atomic_t *v)
99{
100 asm volatile(LOCK_PREFIX "decl %0"
101 : "+m" (v->counter));
102}
103
104/**
105 * atomic_dec_and_test - decrement and test
106 * @v: pointer of type atomic_t
107 *
108 * Atomically decrements @v by 1 and
109 * returns true if the result is 0, or false for all other
110 * cases.
111 */
112static inline int atomic_dec_and_test(atomic_t *v)
113{
114 unsigned char c;
115
116 asm volatile(LOCK_PREFIX "decl %0; sete %1"
117 : "+m" (v->counter), "=qm" (c)
118 : : "memory");
119 return c != 0;
120}
121
122/**
123 * atomic_inc_and_test - increment and test
124 * @v: pointer of type atomic_t
125 *
126 * Atomically increments @v by 1
127 * and returns true if the result is zero, or false for all
128 * other cases.
129 */
130static inline int atomic_inc_and_test(atomic_t *v)
131{
132 unsigned char c;
133
134 asm volatile(LOCK_PREFIX "incl %0; sete %1"
135 : "+m" (v->counter), "=qm" (c)
136 : : "memory");
137 return c != 0;
138}
139
140/**
141 * atomic_add_negative - add and test if negative
142 * @v: pointer of type atomic_t
143 * @i: integer value to add
144 *
145 * Atomically adds @i to @v and returns true
146 * if the result is negative, or false when
147 * result is greater than or equal to zero.
148 */
149static inline int atomic_add_negative(int i, atomic_t *v)
150{
151 unsigned char c;
152
153 asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
154 : "+m" (v->counter), "=qm" (c)
155 : "ir" (i) : "memory");
156 return c;
157}
158
159/**
160 * atomic_add_return - add integer and return
161 * @v: pointer of type atomic_t
162 * @i: integer value to add
163 *
164 * Atomically adds @i to @v and returns @i + @v
165 */
166static inline int atomic_add_return(int i, atomic_t *v)
167{
168 int __i;
169#ifdef CONFIG_M386
170 unsigned long flags;
171 if (unlikely(boot_cpu_data.x86 <= 3))
172 goto no_xadd;
173#endif
174 /* Modern 486+ processor */
175 __i = i;
176 asm volatile(LOCK_PREFIX "xaddl %0, %1"
177 : "+r" (i), "+m" (v->counter)
178 : : "memory");
179 return i + __i;
180
181#ifdef CONFIG_M386
182no_xadd: /* Legacy 386 processor */
183 local_irq_save(flags);
184 __i = atomic_read(v);
185 atomic_set(v, i + __i);
186 local_irq_restore(flags);
187 return i + __i;
188#endif
189}
190
191/**
192 * atomic_sub_return - subtract integer and return
193 * @v: pointer of type atomic_t
194 * @i: integer value to subtract
195 *
196 * Atomically subtracts @i from @v and returns @v - @i
197 */
198static inline int atomic_sub_return(int i, atomic_t *v)
199{
200 return atomic_add_return(-i, v);
201}
202
203#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
204#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
205
206/**
207 * atomic_add_unless - add unless the number is already a given value
208 * @v: pointer of type atomic_t
209 * @a: the amount to add to v...
210 * @u: ...unless v is equal to u.
211 *
212 * Atomically adds @a to @v, so long as @v was not already @u.
213 * Returns non-zero if @v was not @u, and zero otherwise.
214 */
215static inline int atomic_add_unless(atomic_t *v, int a, int u)
216{
217 int c, old;
218 c = atomic_read(v);
219 for (;;) {
220 if (unlikely(c == (u)))
221 break;
222 old = atomic_cmpxchg((v), c, c + (a));
223 if (likely(old == c))
224 break;
225 c = old;
226 }
227 return c != (u);
228}
229
230#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
231
232#define atomic_inc_return(v) (atomic_add_return(1, v))
233#define atomic_dec_return(v) (atomic_sub_return(1, v))
234
235/* These are x86-specific, used by some header files */
236#define atomic_clear_mask(mask, addr) \
237 asm volatile(LOCK_PREFIX "andl %0,%1" \
238 : : "r" (~(mask)), "m" (*(addr)) : "memory")
239
240#define atomic_set_mask(mask, addr) \
241 asm volatile(LOCK_PREFIX "orl %0,%1" \
242 : : "r" (mask), "m" (*(addr)) : "memory")
243
244/* Atomic operations are already serializing on x86 */
245#define smp_mb__before_atomic_dec() barrier()
246#define smp_mb__after_atomic_dec() barrier()
247#define smp_mb__before_atomic_inc() barrier()
248#define smp_mb__after_atomic_inc() barrier()
249
250#include <asm-generic/atomic.h>
251#endif /* _ASM_X86_ATOMIC_32_H */