Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

atomic: move atomic_add_unless to generic code

This is in preparation for more generic atomic primitives based on
__atomic_add_unless.

Signed-off-by: Arun Sharma <asharma@fb.com>
Signed-off-by: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
Reviewed-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David Miller <davem@davemloft.net>
Acked-by: Mike Frysinger <vapier@gentoo.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Arun Sharma and committed by
Linus Torvalds
f24219b4 60063497

+109 -102
+5 -5
arch/alpha/include/asm/atomic.h
··· 176 176 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 177 177 178 178 /** 179 - * atomic_add_unless - add unless the number is a given value 179 + * __atomic_add_unless - add unless the number is a given value 180 180 * @v: pointer of type atomic_t 181 181 * @a: the amount to add to v... 182 182 * @u: ...unless v is equal to u. 183 183 * 184 184 * Atomically adds @a to @v, so long as it was not @u. 185 - * Returns non-zero if @v was not @u, and zero otherwise. 185 + * Returns the old value of @v. 186 186 */ 187 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 187 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 188 188 { 189 189 int c, old; 190 190 c = atomic_read(v); ··· 196 196 break; 197 197 c = old; 198 198 } 199 - return c != (u); 199 + return c; 200 200 } 201 201 202 202 ··· 207 207 * @u: ...unless v is equal to u. 208 208 * 209 209 * Atomically adds @a to @v, so long as it was not @u. 210 - * Returns non-zero if @v was not @u, and zero otherwise. 210 + * Returns the old value of @v. 211 211 */ 212 212 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 213 213 {
+2 -2
arch/arm/include/asm/atomic.h
··· 208 208 209 209 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 210 210 211 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 211 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 212 212 { 213 213 int c, old; 214 214 215 215 c = atomic_read(v); 216 216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) 217 217 c = old; 218 - return c != u; 218 + return c; 219 219 } 220 220 221 221 #define atomic_inc(v) atomic_add(1, v)
+25 -32
arch/avr32/include/asm/atomic.h
··· 78 78 /* 79 79 * atomic_sub_unless - sub unless the number is a given value 80 80 * @v: pointer of type atomic_t 81 - * @a: the amount to add to v... 81 + * @a: the amount to subtract from v... 82 82 * @u: ...unless v is equal to u. 83 83 * 84 - * If the atomic value v is not equal to u, this function subtracts a 85 - * from v, and returns non zero. If v is equal to u then it returns 86 - * zero. This is done as an atomic operation. 84 + * Atomically subtract @a from @v, so long as it was not @u. 85 + * Returns the old value of @v. 87 86 */ 88 - static inline int atomic_sub_unless(atomic_t *v, int a, int u) 87 + static inline void atomic_sub_unless(atomic_t *v, int a, int u) 89 88 { 90 - int tmp, result = 0; 89 + int tmp; 91 90 92 91 asm volatile( 93 92 "/* atomic_sub_unless */\n" 94 93 "1: ssrf 5\n" 95 - " ld.w %0, %3\n" 96 - " cp.w %0, %5\n" 94 + " ld.w %0, %2\n" 95 + " cp.w %0, %4\n" 97 96 " breq 1f\n" 98 - " sub %0, %4\n" 99 - " stcond %2, %0\n" 97 + " sub %0, %3\n" 98 + " stcond %1, %0\n" 100 99 " brne 1b\n" 101 - " mov %1, 1\n" 102 100 "1:" 103 - : "=&r"(tmp), "=&r"(result), "=o"(v->counter) 104 - : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result) 101 + : "=&r"(tmp), "=o"(v->counter) 102 + : "m"(v->counter), "rKs21"(a), "rKs21"(u) 105 103 : "cc", "memory"); 106 - 107 - return result; 108 104 } 109 105 110 106 /* 111 - * atomic_add_unless - add unless the number is a given value 107 + * __atomic_add_unless - add unless the number is a given value 112 108 * @v: pointer of type atomic_t 113 109 * @a: the amount to add to v... 114 110 * @u: ...unless v is equal to u. 115 111 * 116 - * If the atomic value v is not equal to u, this function adds a to v, 117 - * and returns non zero. If v is equal to u then it returns zero. This 118 - * is done as an atomic operation. 112 + * Atomically adds @a to @v, so long as it was not @u. 113 + * Returns the old value of @v. 119 114 */ 120 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 115 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 121 116 { 122 - int tmp, result; 117 + int tmp, old = atomic_read(v); 123 118 124 119 if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) 125 - result = atomic_sub_unless(v, -a, u); 120 + atomic_sub_unless(v, -a, u); 126 121 else { 127 - result = 0; 128 122 asm volatile( 129 - "/* atomic_add_unless */\n" 123 + "/* __atomic_add_unless */\n" 130 124 "1: ssrf 5\n" 131 - " ld.w %0, %3\n" 132 - " cp.w %0, %5\n" 125 + " ld.w %0, %2\n" 126 + " cp.w %0, %4\n" 133 127 " breq 1f\n" 134 - " add %0, %4\n" 135 - " stcond %2, %0\n" 128 + " add %0, %3\n" 129 + " stcond %1, %0\n" 136 130 " brne 1b\n" 137 - " mov %1, 1\n" 138 131 "1:" 139 - : "=&r"(tmp), "=&r"(result), "=o"(v->counter) 140 - : "m"(v->counter), "r"(a), "ir"(u), "1"(result) 132 + : "=&r"(tmp), "=o"(v->counter) 133 + : "m"(v->counter), "r"(a), "ir"(u) 141 134 : "cc", "memory"); 142 135 } 143 136 144 - return result; 137 + return old; 145 138 } 146 139 147 140 /*
+2 -2
arch/blackfin/include/asm/atomic.h
··· 89 89 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 90 90 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 91 91 92 - #define atomic_add_unless(v, a, u) \ 92 + #define __atomic_add_unless(v, a, u) \ 93 93 ({ \ 94 94 int c, old; \ 95 95 c = atomic_read(v); \ 96 96 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 97 97 c = old; \ 98 - c != (u); \ 98 + c; \ 99 99 }) 100 100 101 101 /*
+2 -2
arch/cris/include/asm/atomic.h
··· 138 138 139 139 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 140 140 141 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 141 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 142 142 { 143 143 int ret; 144 144 unsigned long flags; ··· 148 148 if (ret != u) 149 149 v->counter += a; 150 150 cris_atomic_restore(v, flags); 151 - return ret != u; 151 + return ret; 152 152 } 153 153 154 154 /* Atomic operations are already serializing */
+2 -2
arch/frv/include/asm/atomic.h
··· 241 241 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) 242 242 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) 243 243 244 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 244 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 245 245 { 246 246 int c, old; 247 247 c = atomic_read(v); ··· 253 253 break; 254 254 c = old; 255 255 } 256 - return c != (u); 256 + return c; 257 257 } 258 258 259 259
+2 -2
arch/h8300/include/asm/atomic.h
··· 104 104 105 105 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 106 106 107 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 107 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 108 108 { 109 109 int ret; 110 110 unsigned long flags; ··· 114 114 if (ret != u) 115 115 v->counter += a; 116 116 local_irq_restore(flags); 117 - return ret != u; 117 + return ret; 118 118 } 119 119 120 120 static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
+2 -2
arch/ia64/include/asm/atomic.h
··· 90 90 (cmpxchg(&((v)->counter), old, new)) 91 91 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 92 92 93 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 93 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 94 94 { 95 95 int c, old; 96 96 c = atomic_read(v); ··· 102 102 break; 103 103 c = old; 104 104 } 105 - return c != (u); 105 + return c; 106 106 } 107 107 108 108
+4 -4
arch/m32r/include/asm/atomic.h
··· 239 239 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 240 240 241 241 /** 242 - * atomic_add_unless - add unless the number is a given value 242 + * __atomic_add_unless - add unless the number is a given value 243 243 * @v: pointer of type atomic_t 244 244 * @a: the amount to add to v... 245 245 * @u: ...unless v is equal to u. 246 246 * 247 247 * Atomically adds @a to @v, so long as it was not @u. 248 - * Returns non-zero if @v was not @u, and zero otherwise. 248 + * Returns the old value of @v. 249 249 */ 250 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 250 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 251 251 { 252 252 int c, old; 253 253 c = atomic_read(v); ··· 259 259 break; 260 260 c = old; 261 261 } 262 - return c != (u); 262 + return c; 263 263 } 264 264 265 265
+2 -2
arch/m68k/include/asm/atomic.h
··· 183 183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); 184 184 } 185 185 186 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 186 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 187 187 { 188 188 int c, old; 189 189 c = atomic_read(v); ··· 195 195 break; 196 196 c = old; 197 197 } 198 - return c != (u); 198 + return c; 199 199 } 200 200 201 201
+5 -5
arch/mips/include/asm/atomic.h
··· 303 303 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) 304 304 305 305 /** 306 - * atomic_add_unless - add unless the number is a given value 306 + * __atomic_add_unless - add unless the number is a given value 307 307 * @v: pointer of type atomic_t 308 308 * @a: the amount to add to v... 309 309 * @u: ...unless v is equal to u. 310 310 * 311 311 * Atomically adds @a to @v, so long as it was not @u. 312 - * Returns non-zero if @v was not @u, and zero otherwise. 312 + * Returns the old value of @v. 313 313 */ 314 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 314 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 315 315 { 316 316 int c, old; 317 317 c = atomic_read(v); ··· 323 323 break; 324 324 c = old; 325 325 } 326 - return c != (u); 326 + return c; 327 327 } 328 328 329 329 #define atomic_dec_return(v) atomic_sub_return(1, (v)) ··· 679 679 * @u: ...unless v is equal to u. 680 680 * 681 681 * Atomically adds @a to @v, so long as it was not @u. 682 - * Returns non-zero if @v was not @u, and zero otherwise. 682 + * Returns the old value of @v. 683 683 */ 684 684 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 685 685 {
+2 -2
arch/mn10300/include/asm/atomic.h
··· 260 260 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 261 261 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 262 262 263 - #define atomic_add_unless(v, a, u) \ 263 + #define __atomic_add_unless(v, a, u) \ 264 264 ({ \ 265 265 int c, old; \ 266 266 c = atomic_read(v); \ 267 267 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 268 268 c = old; \ 269 - c != (u); \ 269 + c; \ 270 270 }) 271 271 272 272
+5 -5
arch/parisc/include/asm/atomic.h
··· 197 197 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 198 198 199 199 /** 200 - * atomic_add_unless - add unless the number is a given value 200 + * __atomic_add_unless - add unless the number is a given value 201 201 * @v: pointer of type atomic_t 202 202 * @a: the amount to add to v... 203 203 * @u: ...unless v is equal to u. 204 204 * 205 205 * Atomically adds @a to @v, so long as it was not @u. 206 - * Returns non-zero if @v was not @u, and zero otherwise. 206 + * Returns the old value of @v. 207 207 */ 208 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 208 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 209 209 { 210 210 int c, old; 211 211 c = atomic_read(v); ··· 217 217 break; 218 218 c = old; 219 219 } 220 - return c != (u); 220 + return c; 221 221 } 222 222 223 223 ··· 316 316 * @u: ...unless v is equal to u. 317 317 * 318 318 * Atomically adds @a to @v, so long as it was not @u. 319 - * Returns non-zero if @v was not @u, and zero otherwise. 319 + * Returns the old value of @v. 320 320 */ 321 321 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 322 322 {
+7 -7
arch/powerpc/include/asm/atomic.h
··· 181 181 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 182 182 183 183 /** 184 - * atomic_add_unless - add unless the number is a given value 184 + * __atomic_add_unless - add unless the number is a given value 185 185 * @v: pointer of type atomic_t 186 186 * @a: the amount to add to v... 187 187 * @u: ...unless v is equal to u. 188 188 * 189 189 * Atomically adds @a to @v, so long as it was not @u. 190 - * Returns non-zero if @v was not @u, and zero otherwise. 190 + * Returns the old value of @v. 191 191 */ 192 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 192 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 193 193 { 194 194 int t; 195 195 196 196 __asm__ __volatile__ ( 197 197 PPC_RELEASE_BARRIER 198 - "1: lwarx %0,0,%1 # atomic_add_unless\n\ 198 + "1: lwarx %0,0,%1 # __atomic_add_unless\n\ 199 199 cmpw 0,%0,%3 \n\ 200 200 beq- 2f \n\ 201 201 add %0,%2,%0 \n" ··· 209 209 : "r" (&v->counter), "r" (a), "r" (u) 210 210 : "cc", "memory"); 211 211 212 - return t != u; 212 + return t; 213 213 } 214 214 215 215 ··· 443 443 * @u: ...unless v is equal to u. 444 444 * 445 445 * Atomically adds @a to @v, so long as it was not @u. 446 - * Returns non-zero if @v was not @u, and zero otherwise. 446 + * Returns the old value of @v. 447 447 */ 448 448 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 449 449 { ··· 451 451 452 452 __asm__ __volatile__ ( 453 453 PPC_RELEASE_BARRIER 454 - "1: ldarx %0,0,%1 # atomic_add_unless\n\ 454 + "1: ldarx %0,0,%1 # __atomic_add_unless\n\ 455 455 cmpd 0,%0,%3 \n\ 456 456 beq- 2f \n\ 457 457 add %0,%2,%0 \n"
+2 -2
arch/s390/include/asm/atomic.h
··· 93 93 return old; 94 94 } 95 95 96 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 96 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 97 97 { 98 98 int c, old; 99 99 c = atomic_read(v); ··· 105 105 break; 106 106 c = old; 107 107 } 108 - return c != u; 108 + return c; 109 109 } 110 110 111 111
+4 -4
arch/sh/include/asm/atomic.h
··· 38 38 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 39 39 40 40 /** 41 - * atomic_add_unless - add unless the number is a given value 41 + * __atomic_add_unless - add unless the number is a given value 42 42 * @v: pointer of type atomic_t 43 43 * @a: the amount to add to v... 44 44 * @u: ...unless v is equal to u. 45 45 * 46 46 * Atomically adds @a to @v, so long as it was not @u. 47 - * Returns non-zero if @v was not @u, and zero otherwise. 47 + * Returns the old value of @v. 48 48 */ 49 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 49 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 50 50 { 51 51 int c, old; 52 52 c = atomic_read(v); ··· 59 59 c = old; 60 60 } 61 61 62 - return c != (u); 62 + return c; 63 63 } 64 64 65 65 #define smp_mb__before_atomic_dec() smp_mb()
+1 -1
arch/sparc/include/asm/atomic_32.h
··· 22 22 extern int __atomic_add_return(int, atomic_t *); 23 23 extern int atomic_cmpxchg(atomic_t *, int, int); 24 24 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 25 - extern int atomic_add_unless(atomic_t *, int, int); 25 + extern int __atomic_add_unless(atomic_t *, int, int); 26 26 extern void atomic_set(atomic_t *, int); 27 27 28 28 #define atomic_read(v) (*(volatile int *)&(v)->counter)
+2 -2
arch/sparc/include/asm/atomic_64.h
··· 70 70 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 71 71 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 72 72 73 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 73 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 74 74 { 75 75 int c, old; 76 76 c = atomic_read(v); ··· 82 82 break; 83 83 c = old; 84 84 } 85 - return c != (u); 85 + return c; 86 86 } 87 87 88 88
+5 -5
arch/tile/include/asm/atomic_32.h
··· 81 81 } 82 82 83 83 /** 84 - * atomic_add_unless - add unless the number is already a given value 84 + * __atomic_add_unless - add unless the number is already a given value 85 85 * @v: pointer of type atomic_t 86 86 * @a: the amount to add to v... 87 87 * @u: ...unless v is equal to u. 88 88 * 89 89 * Atomically adds @a to @v, so long as @v was not already @u. 90 - * Returns non-zero if @v was not @u, and zero otherwise. 90 + * Returns the old value of @v. 91 91 */ 92 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 92 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 93 93 { 94 94 smp_mb(); /* barrier for proper semantics */ 95 - return _atomic_xchg_add_unless(v, a, u) != u; 95 + return _atomic_xchg_add_unless(v, a, u); 96 96 } 97 97 98 98 /** ··· 199 199 * @u: ...unless v is equal to u. 200 200 * 201 201 * Atomically adds @a to @v, so long as @v was not already @u. 202 - * Returns non-zero if @v was not @u, and zero otherwise. 202 + * Returns the old value of @v. 203 203 */ 204 204 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 205 205 {
+2 -2
arch/tile/include/asm/atomic_64.h
··· 64 64 return val; 65 65 } 66 66 67 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 67 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 68 68 { 69 69 int guess, oldval = v->counter; 70 70 do { ··· 73 73 guess = oldval; 74 74 oldval = atomic_cmpxchg(v, guess, guess + a); 75 75 } while (guess != oldval); 76 - return oldval != u; 76 + return oldval; 77 77 } 78 78 79 79 /* Now the true 64-bit operations. */
+4 -4
arch/x86/include/asm/atomic.h
··· 221 221 } 222 222 223 223 /** 224 - * atomic_add_unless - add unless the number is already a given value 224 + * __atomic_add_unless - add unless the number is already a given value 225 225 * @v: pointer of type atomic_t 226 226 * @a: the amount to add to v... 227 227 * @u: ...unless v is equal to u. 228 228 * 229 229 * Atomically adds @a to @v, so long as @v was not already @u. 230 - * Returns non-zero if @v was not @u, and zero otherwise. 230 + * Returns the old value of @v. 231 231 */ 232 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 232 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 233 233 { 234 234 int c, old; 235 235 c = atomic_read(v); ··· 241 241 break; 242 242 c = old; 243 243 } 244 - return c != (u); 244 + return c; 245 245 } 246 246 247 247
+1 -1
arch/x86/include/asm/atomic64_32.h
··· 263 263 * @u: ...unless v is equal to u. 264 264 * 265 265 * Atomically adds @a to @v, so long as it was not @u. 266 - * Returns non-zero if @v was not @u, and zero otherwise. 266 + * Returns the old value of @v. 267 267 */ 268 268 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 269 269 {
+1 -1
arch/x86/include/asm/atomic64_64.h
··· 202 202 * @u: ...unless v is equal to u. 203 203 * 204 204 * Atomically adds @a to @v, so long as it was not @u. 205 - * Returns non-zero if @v was not @u, and zero otherwise. 205 + * Returns the old value of @v. 206 206 */ 207 207 static inline int atomic64_add_unless(atomic64_t *v, long a, long u) 208 208 {
+4 -4
arch/xtensa/include/asm/atomic.h
··· 225 225 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 226 226 227 227 /** 228 - * atomic_add_unless - add unless the number is a given value 228 + * __atomic_add_unless - add unless the number is a given value 229 229 * @v: pointer of type atomic_t 230 230 * @a: the amount to add to v... 231 231 * @u: ...unless v is equal to u. 232 232 * 233 233 * Atomically adds @a to @v, so long as it was not @u. 234 - * Returns non-zero if @v was not @u, and zero otherwise. 234 + * Returns the old value of @v. 235 235 */ 236 - static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 236 + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 237 237 { 238 238 int c, old; 239 239 c = atomic_read(v); ··· 245 245 break; 246 246 c = old; 247 247 } 248 - return c != (u); 248 + return c; 249 249 } 250 250 251 251
+2 -2
include/asm-generic/atomic.h
··· 129 129 130 130 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 131 131 132 - static inline int atomic_add_unless(atomic_t *v, int a, int u) 132 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 133 133 { 134 134 int c, old; 135 135 c = atomic_read(v); 136 136 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 137 137 c = old; 138 - return c != u; 138 + return c; 139 139 } 140 140 141 141 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+14
include/linux/atomic.h
··· 3 3 #include <asm/atomic.h> 4 4 5 5 /** 6 + * atomic_add_unless - add unless the number is already a given value 7 + * @v: pointer of type atomic_t 8 + * @a: the amount to add to v... 9 + * @u: ...unless v is equal to u. 10 + * 11 + * Atomically adds @a to @v, so long as @v was not already @u. 12 + * Returns non-zero if @v was not @u, and zero otherwise. 13 + */ 14 + static inline int atomic_add_unless(atomic_t *v, int a, int u) 15 + { 16 + return __atomic_add_unless(v, a, u) != u; 17 + } 18 + 19 + /** 6 20 * atomic_inc_not_zero - increment unless the number is zero 7 21 * @v: pointer of type atomic_t 8 22 *