[PATCH] mm: fill arch atomic64 gaps

alpha, sparc64, x86_64 are each missing some primitives from their atomic64
support: fill in the gaps I've noticed by extrapolating asm, follow the
groupings in each file. But powerpc and parisc still lack atomic64.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Andi Kleen <ak@muc.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Hugh Dickins and committed by Linus Torvalds 7c72aaf2 7ce774b4

+44 -15
+5 -2
include/asm-alpha/atomic.h
··· 118 return result; 119 } 120 121 - #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 122 - 123 static __inline__ long atomic64_add_return(long i, atomic64_t * v) 124 { 125 long temp, result; ··· 187 }) 188 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 189 190 #define atomic_dec_return(v) atomic_sub_return(1,(v)) 191 #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 192 ··· 200 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 201 202 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 203 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 204 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 205
··· 118 return result; 119 } 120 121 static __inline__ long atomic64_add_return(long i, atomic64_t * v) 122 { 123 long temp, result; ··· 189 }) 190 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 191 192 + #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 193 + #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 194 + 195 #define atomic_dec_return(v) atomic_sub_return(1,(v)) 196 #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 197 ··· 199 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 200 201 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 202 + #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 203 + 204 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 205 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 206
+1
include/asm-sparc64/atomic.h
··· 54 * other cases. 55 */ 56 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 57 58 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) 59 #define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
··· 54 * other cases. 55 */ 56 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 57 + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 58 59 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) 60 #define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
+38 -13
include/asm-x86_64/atomic.h
··· 160 161 /** 162 * atomic_add_negative - add and test if negative 163 - * @v: pointer of type atomic_t 164 * @i: integer value to add 165 * 166 * Atomically adds @i to @v and returns true 167 * if the result is negative, or false when ··· 177 :"ir" (i), "m" (v->counter) : "memory"); 178 return c; 179 } 180 181 /* An 64bit atomic type */ 182 ··· 345 346 /** 347 * atomic64_add_negative - add and test if negative 348 - * @v: pointer to atomic64_t 349 * @i: integer value to add 350 * 351 * Atomically adds @i to @v and returns true 352 * if the result is negative, or false when 353 * result is greater than or equal to zero. 354 */ 355 - static __inline__ long atomic64_add_negative(long i, atomic64_t *v) 356 { 357 unsigned char c; 358 ··· 364 } 365 366 /** 367 - * atomic_add_return - add and return 368 - * @v: pointer of type atomic_t 369 * @i: integer value to add 370 * 371 * Atomically adds @i to @v and returns @i + @v 372 */ 373 - static __inline__ int atomic_add_return(int i, atomic_t *v) 374 { 375 - int __i = i; 376 __asm__ __volatile__( 377 - LOCK "xaddl %0, %1;" 378 :"=r"(i) 379 :"m"(v->counter), "0"(i)); 380 return i + __i; 381 } 382 383 - static __inline__ int atomic_sub_return(int i, atomic_t *v) 384 { 385 - return atomic_add_return(-i,v); 386 } 387 388 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 389 ··· 408 c != (u); \ 409 }) 410 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 411 - 412 - #define atomic_inc_return(v) (atomic_add_return(1,v)) 413 - #define atomic_dec_return(v) (atomic_sub_return(1,v)) 414 415 /* These are x86-specific, used by some header files */ 416 #define atomic_clear_mask(mask, addr) \
··· 160 161 /** 162 * atomic_add_negative - add and test if negative 163 * @i: integer value to add 164 + * @v: pointer of type atomic_t 165 * 166 * Atomically adds @i to @v and returns true 167 * if the result is negative, or false when ··· 177 :"ir" (i), "m" (v->counter) : "memory"); 178 return c; 179 } 180 + 181 + /** 182 + * atomic_add_return - add and return 183 + * @i: integer value to add 184 + * @v: pointer of type atomic_t 185 + * 186 + * Atomically adds @i to @v and returns @i + @v 187 + */ 188 + static __inline__ int atomic_add_return(int i, atomic_t *v) 189 + { 190 + int __i = i; 191 + __asm__ __volatile__( 192 + LOCK "xaddl %0, %1;" 193 + :"=r"(i) 194 + :"m"(v->counter), "0"(i)); 195 + return i + __i; 196 + } 197 + 198 + static __inline__ int atomic_sub_return(int i, atomic_t *v) 199 + { 200 + return atomic_add_return(-i,v); 201 + } 202 + 203 + #define atomic_inc_return(v) (atomic_add_return(1,v)) 204 + #define atomic_dec_return(v) (atomic_sub_return(1,v)) 205 206 /* An 64bit atomic type */ 207 ··· 320 321 /** 322 * atomic64_add_negative - add and test if negative 323 * @i: integer value to add 324 + * @v: pointer to type atomic64_t 325 * 326 * Atomically adds @i to @v and returns true 327 * if the result is negative, or false when 328 * result is greater than or equal to zero. 329 */ 330 + static __inline__ int atomic64_add_negative(long i, atomic64_t *v) 331 { 332 unsigned char c; 333 ··· 339 } 340 341 /** 342 + * atomic64_add_return - add and return 343 * @i: integer value to add 344 + * @v: pointer to type atomic64_t 345 * 346 * Atomically adds @i to @v and returns @i + @v 347 */ 348 + static __inline__ long atomic64_add_return(long i, atomic64_t *v) 349 { 350 + long __i = i; 351 __asm__ __volatile__( 352 + LOCK "xaddq %0, %1;" 353 :"=r"(i) 354 :"m"(v->counter), "0"(i)); 355 return i + __i; 356 } 357 358 + static __inline__ long atomic64_sub_return(long i, atomic64_t *v) 359 { 360 + return atomic64_add_return(-i,v); 361 } 362 + 363 + #define atomic64_inc_return(v) (atomic64_add_return(1,v)) 364 + #define atomic64_dec_return(v) (atomic64_sub_return(1,v)) 365 366 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 367 ··· 380 c != (u); \ 381 }) 382 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 383 384 /* These are x86-specific, used by some header files */ 385 #define atomic_clear_mask(mask, addr) \