Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.20-rc7 257 lines 5.9 kB view raw
1#ifndef __ARCH_I386_ATOMIC__ 2#define __ARCH_I386_ATOMIC__ 3 4#include <linux/compiler.h> 5#include <asm/processor.h> 6 7/* 8 * Atomic operations that C can't guarantee us. Useful for 9 * resource counting etc.. 10 */ 11 12/* 13 * Make sure gcc doesn't try to be clever and move things around 14 * on us. We need to use _exactly_ the address the user gave us, 15 * not some alias that contains the same information. 16 */ 17typedef struct { int counter; } atomic_t; 18 19#define ATOMIC_INIT(i) { (i) } 20 21/** 22 * atomic_read - read atomic variable 23 * @v: pointer of type atomic_t 24 * 25 * Atomically reads the value of @v. 26 */ 27#define atomic_read(v) ((v)->counter) 28 29/** 30 * atomic_set - set atomic variable 31 * @v: pointer of type atomic_t 32 * @i: required value 33 * 34 * Atomically sets the value of @v to @i. 35 */ 36#define atomic_set(v,i) (((v)->counter) = (i)) 37 38/** 39 * atomic_add - add integer to atomic variable 40 * @i: integer value to add 41 * @v: pointer of type atomic_t 42 * 43 * Atomically adds @i to @v. 44 */ 45static __inline__ void atomic_add(int i, atomic_t *v) 46{ 47 __asm__ __volatile__( 48 LOCK_PREFIX "addl %1,%0" 49 :"+m" (v->counter) 50 :"ir" (i)); 51} 52 53/** 54 * atomic_sub - subtract the atomic variable 55 * @i: integer value to subtract 56 * @v: pointer of type atomic_t 57 * 58 * Atomically subtracts @i from @v. 59 */ 60static __inline__ void atomic_sub(int i, atomic_t *v) 61{ 62 __asm__ __volatile__( 63 LOCK_PREFIX "subl %1,%0" 64 :"+m" (v->counter) 65 :"ir" (i)); 66} 67 68/** 69 * atomic_sub_and_test - subtract value from variable and test result 70 * @i: integer value to subtract 71 * @v: pointer of type atomic_t 72 * 73 * Atomically subtracts @i from @v and returns 74 * true if the result is zero, or false for all 75 * other cases. 76 */ 77static __inline__ int atomic_sub_and_test(int i, atomic_t *v) 78{ 79 unsigned char c; 80 81 __asm__ __volatile__( 82 LOCK_PREFIX "subl %2,%0; sete %1" 83 :"+m" (v->counter), "=qm" (c) 84 :"ir" (i) : "memory"); 85 return c; 86} 87 88/** 89 * atomic_inc - increment atomic variable 90 * @v: pointer of type atomic_t 91 * 92 * Atomically increments @v by 1. 93 */ 94static __inline__ void atomic_inc(atomic_t *v) 95{ 96 __asm__ __volatile__( 97 LOCK_PREFIX "incl %0" 98 :"+m" (v->counter)); 99} 100 101/** 102 * atomic_dec - decrement atomic variable 103 * @v: pointer of type atomic_t 104 * 105 * Atomically decrements @v by 1. 106 */ 107static __inline__ void atomic_dec(atomic_t *v) 108{ 109 __asm__ __volatile__( 110 LOCK_PREFIX "decl %0" 111 :"+m" (v->counter)); 112} 113 114/** 115 * atomic_dec_and_test - decrement and test 116 * @v: pointer of type atomic_t 117 * 118 * Atomically decrements @v by 1 and 119 * returns true if the result is 0, or false for all other 120 * cases. 121 */ 122static __inline__ int atomic_dec_and_test(atomic_t *v) 123{ 124 unsigned char c; 125 126 __asm__ __volatile__( 127 LOCK_PREFIX "decl %0; sete %1" 128 :"+m" (v->counter), "=qm" (c) 129 : : "memory"); 130 return c != 0; 131} 132 133/** 134 * atomic_inc_and_test - increment and test 135 * @v: pointer of type atomic_t 136 * 137 * Atomically increments @v by 1 138 * and returns true if the result is zero, or false for all 139 * other cases. 140 */ 141static __inline__ int atomic_inc_and_test(atomic_t *v) 142{ 143 unsigned char c; 144 145 __asm__ __volatile__( 146 LOCK_PREFIX "incl %0; sete %1" 147 :"+m" (v->counter), "=qm" (c) 148 : : "memory"); 149 return c != 0; 150} 151 152/** 153 * atomic_add_negative - add and test if negative 154 * @v: pointer of type atomic_t 155 * @i: integer value to add 156 * 157 * Atomically adds @i to @v and returns true 158 * if the result is negative, or false when 159 * result is greater than or equal to zero. 160 */ 161static __inline__ int atomic_add_negative(int i, atomic_t *v) 162{ 163 unsigned char c; 164 165 __asm__ __volatile__( 166 LOCK_PREFIX "addl %2,%0; sets %1" 167 :"+m" (v->counter), "=qm" (c) 168 :"ir" (i) : "memory"); 169 return c; 170} 171 172/** 173 * atomic_add_return - add and return 174 * @v: pointer of type atomic_t 175 * @i: integer value to add 176 * 177 * Atomically adds @i to @v and returns @i + @v 178 */ 179static __inline__ int atomic_add_return(int i, atomic_t *v) 180{ 181 int __i; 182#ifdef CONFIG_M386 183 unsigned long flags; 184 if(unlikely(boot_cpu_data.x86==3)) 185 goto no_xadd; 186#endif 187 /* Modern 486+ processor */ 188 __i = i; 189 __asm__ __volatile__( 190 LOCK_PREFIX "xaddl %0, %1" 191 :"+r" (i), "+m" (v->counter) 192 : : "memory"); 193 return i + __i; 194 195#ifdef CONFIG_M386 196no_xadd: /* Legacy 386 processor */ 197 local_irq_save(flags); 198 __i = atomic_read(v); 199 atomic_set(v, i + __i); 200 local_irq_restore(flags); 201 return i + __i; 202#endif 203} 204 205static __inline__ int atomic_sub_return(int i, atomic_t *v) 206{ 207 return atomic_add_return(-i,v); 208} 209 210#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 211#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 212 213/** 214 * atomic_add_unless - add unless the number is a given value 215 * @v: pointer of type atomic_t 216 * @a: the amount to add to v... 217 * @u: ...unless v is equal to u. 218 * 219 * Atomically adds @a to @v, so long as it was not @u. 220 * Returns non-zero if @v was not @u, and zero otherwise. 221 */ 222#define atomic_add_unless(v, a, u) \ 223({ \ 224 int c, old; \ 225 c = atomic_read(v); \ 226 for (;;) { \ 227 if (unlikely(c == (u))) \ 228 break; \ 229 old = atomic_cmpxchg((v), c, c + (a)); \ 230 if (likely(old == c)) \ 231 break; \ 232 c = old; \ 233 } \ 234 c != (u); \ 235}) 236#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 237 238#define atomic_inc_return(v) (atomic_add_return(1,v)) 239#define atomic_dec_return(v) (atomic_sub_return(1,v)) 240 241/* These are x86-specific, used by some header files */ 242#define atomic_clear_mask(mask, addr) \ 243__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ 244: : "r" (~(mask)),"m" (*addr) : "memory") 245 246#define atomic_set_mask(mask, addr) \ 247__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ 248: : "r" (mask),"m" (*(addr)) : "memory") 249 250/* Atomic operations are already serializing on x86 */ 251#define smp_mb__before_atomic_dec() barrier() 252#define smp_mb__after_atomic_dec() barrier() 253#define smp_mb__before_atomic_inc() barrier() 254#define smp_mb__after_atomic_inc() barrier() 255 256#include <asm-generic/atomic.h> 257#endif