Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency

atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
I agree (with Andi Kleen) this typeof is not needed and more error
prone. All the original atomic.h code that uses cmpxchg (which includes
the atomic_add_unless) uses defines instead of inline functions,
probably to circumvent a circular dependency between system.h and
atomic.h on powerpc (which my patch addresses). Therefore, it makes
sense to use inline functions that will provide type checking.

atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
Digging into the FRV architecture shows me that it is also affected by
such a circular dependency. Here is the diff applying this against the
rest of my atomic.h patches.

It applies over the atomic.h standardization patches.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mathieu Desnoyers and committed by
Linus Torvalds
2856f5e3 79d365a3

+360 -282
+31 -28
include/asm-alpha/atomic.h
··· 2 2 #define _ALPHA_ATOMIC_H 3 3 4 4 #include <asm/barrier.h> 5 + #include <asm/system.h> 5 6 6 7 /* 7 8 * Atomic operations that C can't guarantee us. Useful for ··· 191 190 * Atomically adds @a to @v, so long as it was not @u. 192 191 * Returns non-zero if @v was not @u, and zero otherwise. 193 192 */ 194 - #define atomic_add_unless(v, a, u) \ 195 - ({ \ 196 - __typeof__((v)->counter) c, old; \ 197 - c = atomic_read(v); \ 198 - for (;;) { \ 199 - if (unlikely(c == (u))) \ 200 - break; \ 201 - old = atomic_cmpxchg((v), c, c + (a)); \ 202 - if (likely(old == c)) \ 203 - break; \ 204 - c = old; \ 205 - } \ 206 - c != (u); \ 207 - }) 193 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 194 + { 195 + int c, old; 196 + c = atomic_read(v); 197 + for (;;) { 198 + if (unlikely(c == (u))) 199 + break; 200 + old = atomic_cmpxchg((v), c, c + (a)); 201 + if (likely(old == c)) 202 + break; 203 + c = old; 204 + } 205 + return c != (u); 206 + } 207 + 208 208 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 209 209 210 210 /** ··· 217 215 * Atomically adds @a to @v, so long as it was not @u. 218 216 * Returns non-zero if @v was not @u, and zero otherwise. 219 217 */ 220 - #define atomic64_add_unless(v, a, u) \ 221 - ({ \ 222 - __typeof__((v)->counter) c, old; \ 223 - c = atomic64_read(v); \ 224 - for (;;) { \ 225 - if (unlikely(c == (u))) \ 226 - break; \ 227 - old = atomic64_cmpxchg((v), c, c + (a)); \ 228 - if (likely(old == c)) \ 229 - break; \ 230 - c = old; \ 231 - } \ 232 - c != (u); \ 233 - }) 218 + static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 219 + { 220 + long c, old; 221 + c = atomic64_read(v); 222 + for (;;) { 223 + if (unlikely(c == (u))) 224 + break; 225 + old = atomic64_cmpxchg((v), c, c + (a)); 226 + if (likely(old == c)) 227 + break; 228 + c = old; 229 + } 230 + return c != (u); 231 + } 232 + 234 233 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 235 234 236 235 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+1
include/asm-arm/atomic.h
··· 12 12 #define __ASM_ARM_ATOMIC_H 13 13 14 14 #include <linux/compiler.h> 15 + #include <asm/system.h> 15 16 16 17 typedef struct { volatile int counter; } atomic_t; 17 18
-1
include/asm-arm26/atomic.h
··· 20 20 #ifndef __ASM_ARM_ATOMIC_H 21 21 #define __ASM_ARM_ATOMIC_H 22 22 23 - 24 23 #ifdef CONFIG_SMP 25 24 #error SMP is NOT supported 26 25 #endif
+15 -76
include/asm-frv/atomic.h
··· 16 16 17 17 #include <linux/types.h> 18 18 #include <asm/spr-regs.h> 19 + #include <asm/system.h> 19 20 20 21 #ifdef CONFIG_SMP 21 22 #error not SMP safe ··· 259 258 260 259 #define tas(ptr) (xchg((ptr), 1)) 261 260 262 - /*****************************************************************************/ 263 - /* 264 - * compare and conditionally exchange value with memory 265 - * - if (*ptr == test) then orig = *ptr; *ptr = test; 266 - * - if (*ptr != test) then orig = *ptr; 267 - */ 268 - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 269 - 270 - #define cmpxchg(ptr, test, new) \ 271 - ({ \ 272 - __typeof__(ptr) __xg_ptr = (ptr); \ 273 - __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ 274 - __typeof__(*(ptr)) __xg_test = (test); \ 275 - __typeof__(*(ptr)) __xg_new = (new); \ 276 - \ 277 - switch (sizeof(__xg_orig)) { \ 278 - case 4: \ 279 - asm volatile( \ 280 - "0: \n" \ 281 - " orcc gr0,gr0,gr0,icc3 \n" \ 282 - " ckeq icc3,cc7 \n" \ 283 - " ld.p %M0,%1 \n" \ 284 - " orcr cc7,cc7,cc3 \n" \ 285 - " sub%I4cc %1,%4,%2,icc0 \n" \ 286 - " bne icc0,#0,1f \n" \ 287 - " cst.p %3,%M0 ,cc3,#1 \n" \ 288 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 289 - " beq icc3,#0,0b \n" \ 290 - "1: \n" \ 291 - : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ 292 - : "r"(__xg_new), "NPr"(__xg_test) \ 293 - : "memory", "cc7", "cc3", "icc3", "icc0" \ 294 - ); \ 295 - break; \ 296 - \ 297 - default: \ 298 - __xg_orig = 0; \ 299 - asm volatile("break"); \ 300 - break; \ 301 - } \ 302 - \ 303 - __xg_orig; \ 304 - }) 305 - 306 - #else 307 - 308 - extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); 309 - 310 - #define cmpxchg(ptr, test, new) \ 311 - ({ \ 312 - __typeof__(ptr) __xg_ptr = (ptr); \ 313 - __typeof__(*(ptr)) __xg_orig; \ 314 - __typeof__(*(ptr)) __xg_test = (test); \ 315 - __typeof__(*(ptr)) __xg_new = (new); \ 316 - \ 317 - switch (sizeof(__xg_orig)) { \ 318 - case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ 319 - default: \ 320 - __xg_orig = 0; \ 321 - asm volatile("break"); \ 322 - break; \ 323 - } \ 324 - \ 325 - __xg_orig; \ 326 - }) 327 - 328 - #endif 329 - 330 261 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 331 262 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 332 263 333 - #define atomic_add_unless(v, a, u) \ 334 - ({ \ 335 - int c, old; \ 336 - c = atomic_read(v); \ 337 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 338 - c = old; \ 339 - c != (u); \ 340 - }) 264 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 265 + { 266 + int c, old; 267 + c = atomic_read(v); 268 + for (;;) { 269 + if (unlikely(c == (u))) 270 + break; 271 + old = atomic_cmpxchg((v), c, c + (a)); 272 + if (likely(old == c)) 273 + break; 274 + c = old; 275 + } 276 + return c != (u); 277 + } 341 278 342 279 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 343 280
+69 -1
include/asm-frv/system.h
··· 13 13 #define _ASM_SYSTEM_H 14 14 15 15 #include <linux/linkage.h> 16 - #include <asm/atomic.h> 17 16 18 17 struct thread_struct; 19 18 ··· 195 196 extern void free_initmem(void); 196 197 197 198 #define arch_align_stack(x) (x) 199 + 200 + /*****************************************************************************/ 201 + /* 202 + * compare and conditionally exchange value with memory 203 + * - if (*ptr == test) then orig = *ptr; *ptr = test; 204 + * - if (*ptr != test) then orig = *ptr; 205 + */ 206 + #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 207 + 208 + #define cmpxchg(ptr, test, new) \ 209 + ({ \ 210 + __typeof__(ptr) __xg_ptr = (ptr); \ 211 + __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ 212 + __typeof__(*(ptr)) __xg_test = (test); \ 213 + __typeof__(*(ptr)) __xg_new = (new); \ 214 + \ 215 + switch (sizeof(__xg_orig)) { \ 216 + case 4: \ 217 + asm volatile( \ 218 + "0: \n" \ 219 + " orcc gr0,gr0,gr0,icc3 \n" \ 220 + " ckeq icc3,cc7 \n" \ 221 + " ld.p %M0,%1 \n" \ 222 + " orcr cc7,cc7,cc3 \n" \ 223 + " sub%I4cc %1,%4,%2,icc0 \n" \ 224 + " bne icc0,#0,1f \n" \ 225 + " cst.p %3,%M0 ,cc3,#1 \n" \ 226 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 227 + " beq icc3,#0,0b \n" \ 228 + "1: \n" \ 229 + : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ 230 + : "r"(__xg_new), "NPr"(__xg_test) \ 231 + : "memory", "cc7", "cc3", "icc3", "icc0" \ 232 + ); \ 233 + break; \ 234 + \ 235 + default: \ 236 + __xg_orig = 0; \ 237 + asm volatile("break"); \ 238 + break; \ 239 + } \ 240 + \ 241 + __xg_orig; \ 242 + }) 243 + 244 + #else 245 + 246 + extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); 247 + 248 + #define cmpxchg(ptr, test, new) \ 249 + ({ \ 250 + __typeof__(ptr) __xg_ptr = (ptr); \ 251 + __typeof__(*(ptr)) __xg_orig; \ 252 + __typeof__(*(ptr)) __xg_test = (test); \ 253 + __typeof__(*(ptr)) __xg_new = (new); \ 254 + \ 255 + switch (sizeof(__xg_orig)) { \ 256 + case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ 257 + default: \ 258 + __xg_orig = 0; \ 259 + asm volatile("break"); \ 260 + break; \ 261 + } \ 262 + \ 263 + __xg_orig; \ 264 + }) 265 + 266 + #endif 267 + 198 268 199 269 #endif /* _ASM_SYSTEM_H */
+12 -5
include/asm-generic/atomic.h
··· 9 9 */ 10 10 11 11 #include <asm/types.h> 12 - #include <asm/system.h> 13 12 14 13 /* 15 14 * Suppport for atomic_long_t ··· 122 123 return (long)atomic64_dec_return(v); 123 124 } 124 125 125 - #define atomic_long_add_unless(l, a, u) \ 126 - atomic64_add_unless((atomic64_t *)(l), (a), (u)) 126 + static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 127 + { 128 + atomic64_t *v = (atomic64_t *)l; 129 + 130 + return (long)atomic64_add_unless(v, a, u); 131 + } 127 132 128 133 #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) 129 134 ··· 239 236 return (long)atomic_dec_return(v); 240 237 } 241 238 242 - #define atomic_long_add_unless(l, a, u) \ 243 - atomic_add_unless((atomic_t *)(l), (a), (u)) 239 + static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 240 + { 241 + atomic_t *v = (atomic_t *)l; 242 + 243 + return (long)atomic_add_unless(v, a, u); 244 + } 244 245 245 246 #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) 246 247
+15 -14
include/asm-i386/atomic.h
··· 219 219 * Atomically adds @a to @v, so long as @v was not already @u. 220 220 * Returns non-zero if @v was not @u, and zero otherwise. 221 221 */ 222 - #define atomic_add_unless(v, a, u) \ 223 - ({ \ 224 - __typeof__((v)->counter) c, old; \ 225 - c = atomic_read(v); \ 226 - for (;;) { \ 227 - if (unlikely(c == (u))) \ 228 - break; \ 229 - old = atomic_cmpxchg((v), c, c + (a)); \ 230 - if (likely(old == c)) \ 231 - break; \ 232 - c = old; \ 233 - } \ 234 - c != (u); \ 235 - }) 222 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 223 + { 224 + int c, old; 225 + c = atomic_read(v); 226 + for (;;) { 227 + if (unlikely(c == (u))) 228 + break; 229 + old = atomic_cmpxchg((v), c, c + (a)); 230 + if (likely(old == c)) 231 + break; 232 + c = old; 233 + } 234 + return c != (u); 235 + } 236 + 236 237 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 237 238 238 239 #define atomic_inc_return(v) (atomic_add_return(1,v))
+31 -28
include/asm-ia64/atomic.h
··· 15 15 #include <linux/types.h> 16 16 17 17 #include <asm/intrinsics.h> 18 + #include <asm/system.h> 18 19 19 20 /* 20 21 * On IA-64, counter must always be volatile to ensure that that the ··· 96 95 (cmpxchg(&((v)->counter), old, new)) 97 96 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 98 97 99 - #define atomic_add_unless(v, a, u) \ 100 - ({ \ 101 - __typeof__(v->counter) c, old; \ 102 - c = atomic_read(v); \ 103 - for (;;) { \ 104 - if (unlikely(c == (u))) \ 105 - break; \ 106 - old = atomic_cmpxchg((v), c, c + (a)); \ 107 - if (likely(old == c)) \ 108 - break; \ 109 - c = old; \ 110 - } \ 111 - c != (u); \ 112 - }) 98 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 99 + { 100 + int c, old; 101 + c = atomic_read(v); 102 + for (;;) { 103 + if (unlikely(c == (u))) 104 + break; 105 + old = atomic_cmpxchg((v), c, c + (a)); 106 + if (likely(old == c)) 107 + break; 108 + c = old; 109 + } 110 + return c != (u); 111 + } 112 + 113 113 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 114 114 115 - #define atomic64_add_unless(v, a, u) \ 116 - ({ \ 117 - __typeof__(v->counter) c, old; \ 118 - c = atomic64_read(v); \ 119 - for (;;) { \ 120 - if (unlikely(c == (u))) \ 121 - break; \ 122 - old = atomic64_cmpxchg((v), c, c + (a)); \ 123 - if (likely(old == c)) \ 124 - break; \ 125 - c = old; \ 126 - } \ 127 - c != (u); \ 128 - }) 115 + static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 116 + { 117 + long c, old; 118 + c = atomic64_read(v); 119 + for (;;) { 120 + if (unlikely(c == (u))) 121 + break; 122 + old = atomic64_cmpxchg((v), c, c + (a)); 123 + if (likely(old == c)) 124 + break; 125 + c = old; 126 + } 127 + return c != (u); 128 + } 129 + 129 130 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 130 131 131 132 #define atomic_add_return(i,v) \
+15 -8
include/asm-m32r/atomic.h
··· 253 253 * Atomically adds @a to @v, so long as it was not @u. 254 254 * Returns non-zero if @v was not @u, and zero otherwise. 255 255 */ 256 - #define atomic_add_unless(v, a, u) \ 257 - ({ \ 258 - int c, old; \ 259 - c = atomic_read(v); \ 260 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 261 - c = old; \ 262 - c != (u); \ 263 - }) 256 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 257 + { 258 + int c, old; 259 + c = atomic_read(v); 260 + for (;;) { 261 + if (unlikely(c == (u))) 262 + break; 263 + old = atomic_cmpxchg((v), c, c + (a)); 264 + if (likely(old == c)) 265 + break; 266 + c = old; 267 + } 268 + return c != (u); 269 + } 270 + 264 271 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 265 272 266 273 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
+16 -15
include/asm-m68k/atomic.h
··· 2 2 #define __ARCH_M68K_ATOMIC__ 3 3 4 4 5 - #include <asm/system.h> /* local_irq_XXX() */ 5 + #include <asm/system.h> 6 6 7 7 /* 8 8 * Atomic operations that C can't guarantee us. Useful for ··· 170 170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 171 171 } 172 172 173 - #define atomic_add_unless(v, a, u) \ 174 - ({ \ 175 - int c, old; \ 176 - c = atomic_read(v); \ 177 - for (;;) { \ 178 - if (unlikely(c == (u))) \ 179 - break; \ 180 - old = atomic_cmpxchg((v), c, c + (a)); \ 181 - if (likely(old == c)) \ 182 - break; \ 183 - c = old; \ 184 - } \ 185 - c != (u); \ 186 - }) 173 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 174 + { 175 + int c, old; 176 + c = atomic_read(v); 177 + for (;;) { 178 + if (unlikely(c == (u))) 179 + break; 180 + old = atomic_cmpxchg((v), c, c + (a)); 181 + if (likely(old == c)) 182 + break; 183 + c = old; 184 + } 185 + return c != (u); 186 + } 187 + 187 188 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188 189 189 190 /* Atomic operations are already serializing */
+16 -9
include/asm-m68knommu/atomic.h
··· 1 1 #ifndef __ARCH_M68KNOMMU_ATOMIC__ 2 2 #define __ARCH_M68KNOMMU_ATOMIC__ 3 3 4 - #include <asm/system.h> /* local_irq_XXX() */ 4 + #include <asm/system.h> 5 5 6 6 /* 7 7 * Atomic operations that C can't guarantee us. Useful for ··· 131 131 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 132 132 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 133 133 134 - #define atomic_add_unless(v, a, u) \ 135 - ({ \ 136 - int c, old; \ 137 - c = atomic_read(v); \ 138 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 139 - c = old; \ 140 - c != (u); \ 141 - }) 134 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 135 + { 136 + int c, old; 137 + c = atomic_read(v); 138 + for (;;) { 139 + if (unlikely(c == (u))) 140 + break; 141 + old = atomic_cmpxchg((v), c, c + (a)); 142 + if (likely(old == c)) 143 + break; 144 + c = old; 145 + } 146 + return c != (u); 147 + } 148 + 142 149 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 143 150 144 151 #define atomic_dec_return(v) atomic_sub_return(1,(v))
+30 -16
include/asm-mips/atomic.h
··· 18 18 #include <asm/barrier.h> 19 19 #include <asm/cpu-features.h> 20 20 #include <asm/war.h> 21 + #include <asm/system.h> 21 22 22 23 typedef struct { volatile int counter; } atomic_t; 23 24 ··· 319 318 * Atomically adds @a to @v, so long as it was not @u. 320 319 * Returns non-zero if @v was not @u, and zero otherwise. 321 320 */ 322 - #define atomic_add_unless(v, a, u) \ 323 - ({ \ 324 - __typeof__((v)->counter) c, old; \ 325 - c = atomic_read(v); \ 326 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 327 - c = old; \ 328 - c != (u); \ 329 - }) 321 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 322 + { 323 + int c, old; 324 + c = atomic_read(v); 325 + for (;;) { 326 + if (unlikely(c == (u))) 327 + break; 328 + old = atomic_cmpxchg((v), c, c + (a)); 329 + if (likely(old == c)) 330 + break; 331 + c = old; 332 + } 333 + return c != (u); 334 + } 330 335 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 331 336 332 337 #define atomic_dec_return(v) atomic_sub_return(1,(v)) ··· 701 694 * Atomically adds @a to @v, so long as it was not @u. 702 695 * Returns non-zero if @v was not @u, and zero otherwise. 703 696 */ 704 - #define atomic64_add_unless(v, a, u) \ 705 - ({ \ 706 - __typeof__((v)->counter) c, old; \ 707 - c = atomic_read(v); \ 708 - while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ 709 - c = old; \ 710 - c != (u); \ 711 - }) 697 + static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 698 + { 699 + long c, old; 700 + c = atomic64_read(v); 701 + for (;;) { 702 + if (unlikely(c == (u))) 703 + break; 704 + old = atomic64_cmpxchg((v), c, c + (a)); 705 + if (likely(old == c)) 706 + break; 707 + c = old; 708 + } 709 + return c != (u); 710 + } 711 + 712 712 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 713 713 714 714 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
+31 -16
include/asm-parisc/atomic.h
··· 6 6 #define _ASM_PARISC_ATOMIC_H_ 7 7 8 8 #include <linux/types.h> 9 + #include <asm/system.h> 9 10 10 11 /* 11 12 * Atomic operations that C can't guarantee us. Useful for ··· 175 174 * Atomically adds @a to @v, so long as it was not @u. 176 175 * Returns non-zero if @v was not @u, and zero otherwise. 177 176 */ 178 - #define atomic_add_unless(v, a, u) \ 179 - ({ \ 180 - __typeof__((v)->counter) c, old; \ 181 - c = atomic_read(v); \ 182 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 183 - c = old; \ 184 - c != (u); \ 185 - }) 177 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 178 + { 179 + int c, old; 180 + c = atomic_read(v); 181 + for (;;) { 182 + if (unlikely(c == (u))) 183 + break; 184 + old = atomic_cmpxchg((v), c, c + (a)); 185 + if (likely(old == c)) 186 + break; 187 + c = old; 188 + } 189 + return c != (u); 190 + } 191 + 186 192 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 187 193 188 194 #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) ··· 291 283 * Atomically adds @a to @v, so long as it was not @u. 292 284 * Returns non-zero if @v was not @u, and zero otherwise. 293 285 */ 294 - #define atomic64_add_unless(v, a, u) \ 295 - ({ \ 296 - __typeof__((v)->counter) c, old; \ 297 - c = atomic64_read(v); \ 298 - while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ 299 - c = old; \ 300 - c != (u); \ 301 - }) 286 + static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 287 + { 288 + long c, old; 289 + c = atomic64_read(v); 290 + for (;;) { 291 + if (unlikely(c == (u))) 292 + break; 293 + old = atomic64_cmpxchg((v), c, c + (a)); 294 + if (likely(old == c)) 295 + break; 296 + c = old; 297 + } 298 + return c != (u); 299 + } 300 + 302 301 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 303 302 304 303 #endif /* CONFIG_64BIT */
+1
include/asm-powerpc/atomic.h
··· 11 11 #include <linux/compiler.h> 12 12 #include <asm/synch.h> 13 13 #include <asm/asm-compat.h> 14 + #include <asm/system.h> 14 15 15 16 #define ATOMIC_INIT(i) { (i) } 16 17
-1
include/asm-ppc/system.h
··· 6 6 7 7 #include <linux/kernel.h> 8 8 9 - #include <asm/atomic.h> 10 9 #include <asm/hw_irq.h> 11 10 12 11 /*
+31 -28
include/asm-sparc64/atomic.h
··· 9 9 #define __ARCH_SPARC64_ATOMIC__ 10 10 11 11 #include <linux/types.h> 12 + #include <asm/system.h> 12 13 13 14 typedef struct { volatile int counter; } atomic_t; 14 15 typedef struct { volatile __s64 counter; } atomic64_t; ··· 74 73 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 75 74 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 76 75 77 - #define atomic_add_unless(v, a, u) \ 78 - ({ \ 79 - __typeof__((v)->counter) c, old; \ 80 - c = atomic_read(v); \ 81 - for (;;) { \ 82 - if (unlikely(c == (u))) \ 83 - break; \ 84 - old = atomic_cmpxchg((v), c, c + (a)); \ 85 - if (likely(old == c)) \ 86 - break; \ 87 - c = old; \ 88 - } \ 89 - likely(c != (u)); \ 90 - }) 76 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 77 + { 78 + int c, old; 79 + c = atomic_read(v); 80 + for (;;) { 81 + if (unlikely(c == (u))) 82 + break; 83 + old = atomic_cmpxchg((v), c, c + (a)); 84 + if (likely(old == c)) 85 + break; 86 + c = old; 87 + } 88 + return c != (u); 89 + } 90 + 91 91 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 92 92 93 93 #define atomic64_cmpxchg(v, o, n) \ 94 94 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) 95 95 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 96 96 97 - #define atomic64_add_unless(v, a, u) \ 98 - ({ \ 99 - __typeof__((v)->counter) c, old; \ 100 - c = atomic64_read(v); \ 101 - for (;;) { \ 102 - if (unlikely(c == (u))) \ 103 - break; \ 104 - old = atomic64_cmpxchg((v), c, c + (a)); \ 105 - if (likely(old == c)) \ 106 - break; \ 107 - c = old; \ 108 - } \ 109 - likely(c != (u)); \ 110 - }) 97 + static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 98 + { 99 + long c, old; 100 + c = atomic64_read(v); 101 + for (;;) { 102 + if (unlikely(c == (u))) 103 + break; 104 + old = atomic64_cmpxchg((v), c, c + (a)); 105 + if (likely(old == c)) 106 + break; 107 + c = old; 108 + } 109 + return c != (u); 110 + } 111 + 111 112 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 112 113 113 114 /* Atomic operations are already serializing */
+31 -28
include/asm-x86_64/atomic.h
··· 2 2 #define __ARCH_X86_64_ATOMIC__ 3 3 4 4 #include <asm/alternative.h> 5 + #include <asm/system.h> 5 6 6 7 /* atomic_t should be 32 bit signed type */ 7 8 ··· 404 403 * Atomically adds @a to @v, so long as it was not @u. 405 404 * Returns non-zero if @v was not @u, and zero otherwise. 406 405 */ 407 - #define atomic_add_unless(v, a, u) \ 408 - ({ \ 409 - __typeof__((v)->counter) c, old; \ 410 - c = atomic_read(v); \ 411 - for (;;) { \ 412 - if (unlikely(c == (u))) \ 413 - break; \ 414 - old = atomic_cmpxchg((v), c, c + (a)); \ 415 - if (likely(old == c)) \ 416 - break; \ 417 - c = old; \ 418 - } \ 419 - c != (u); \ 420 - }) 406 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 407 + { 408 + int c, old; 409 + c = atomic_read(v); 410 + for (;;) { 411 + if (unlikely(c == (u))) 412 + break; 413 + old = atomic_cmpxchg((v), c, c + (a)); 414 + if (likely(old == c)) 415 + break; 416 + c = old; 417 + } 418 + return c != (u); 419 + } 420 + 421 421 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 422 422 423 423 /** ··· 430 428 * Atomically adds @a to @v, so long as it was not @u. 431 429 * Returns non-zero if @v was not @u, and zero otherwise. 432 430 */ 433 - #define atomic64_add_unless(v, a, u) \ 434 - ({ \ 435 - __typeof__((v)->counter) c, old; \ 436 - c = atomic64_read(v); \ 437 - for (;;) { \ 438 - if (unlikely(c == (u))) \ 439 - break; \ 440 - old = atomic64_cmpxchg((v), c, c + (a)); \ 441 - if (likely(old == c)) \ 442 - break; \ 443 - c = old; \ 444 - } \ 445 - c != (u); \ 446 - }) 431 + static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 432 + { 433 + long c, old; 434 + c = atomic64_read(v); 435 + for (;;) { 436 + if (unlikely(c == (u))) 437 + break; 438 + old = atomic64_cmpxchg((v), c, c + (a)); 439 + if (likely(old == c)) 440 + break; 441 + c = old; 442 + } 443 + return c != (u); 444 + } 445 + 447 446 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 448 447 449 448 /* These are x86-specific, used by some header files */
+15 -8
include/asm-xtensa/atomic.h
··· 234 234 * Atomically adds @a to @v, so long as it was not @u. 235 235 * Returns non-zero if @v was not @u, and zero otherwise. 236 236 */ 237 - #define atomic_add_unless(v, a, u) \ 238 - ({ \ 239 - int c, old; \ 240 - c = atomic_read(v); \ 241 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 242 - c = old; \ 243 - c != (u); \ 244 - }) 237 + static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 238 + { 239 + int c, old; 240 + c = atomic_read(v); 241 + for (;;) { 242 + if (unlikely(c == (u))) 243 + break; 244 + old = atomic_cmpxchg((v), c, c + (a)); 245 + if (likely(old == c)) 246 + break; 247 + c = old; 248 + } 249 + return c != (u); 250 + } 251 + 245 252 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 246 253 247 254 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)