Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] mutex subsystem, add atomic_xchg() to all arches

add atomic_xchg() to all the architectures. Needed by the new mutex code.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>

authored by

Ingo Molnar and committed by
Ingo Molnar
ffbf670f f17578de

+30
+1
include/asm-alpha/atomic.h
··· 176 176 } 177 177 178 178 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 179 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 179 180 180 181 #define atomic_add_unless(v, a, u) \ 181 182 ({ \
+2
include/asm-arm/atomic.h
··· 175 175 176 176 #endif /* __LINUX_ARM_ARCH__ */ 177 177 178 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 179 + 178 180 static inline int atomic_add_unless(atomic_t *v, int a, int u) 179 181 { 180 182 int c, old;
+2
include/asm-arm26/atomic.h
··· 76 76 return ret; 77 77 } 78 78 79 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 80 + 79 81 static inline int atomic_add_unless(atomic_t *v, int a, int u) 80 82 { 81 83 int ret;
+2
include/asm-cris/atomic.h
··· 136 136 return ret; 137 137 } 138 138 139 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 140 + 139 141 static inline int atomic_add_unless(atomic_t *v, int a, int u) 140 142 { 141 143 int ret;
+1
include/asm-frv/atomic.h
··· 328 328 #endif 329 329 330 330 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 331 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 331 332 332 333 #define atomic_add_unless(v, a, u) \ 333 334 ({ \
+2
include/asm-h8300/atomic.h
··· 95 95 return ret; 96 96 } 97 97 98 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 99 + 98 100 static inline int atomic_add_unless(atomic_t *v, int a, int u) 99 101 { 100 102 int ret;
+1
include/asm-i386/atomic.h
··· 216 216 } 217 217 218 218 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 219 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 219 220 220 221 /** 221 222 * atomic_add_unless - add unless the number is a given value
+1
include/asm-ia64/atomic.h
··· 89 89 } 90 90 91 91 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 92 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 92 93 93 94 #define atomic_add_unless(v, a, u) \ 94 95 ({ \
+1
include/asm-m32r/atomic.h
··· 243 243 #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) 244 244 245 245 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 246 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 246 247 247 248 /** 248 249 * atomic_add_unless - add unless the number is a given value
+1
include/asm-m68k/atomic.h
··· 140 140 } 141 141 142 142 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 143 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 143 144 144 145 #define atomic_add_unless(v, a, u) \ 145 146 ({ \
+1
include/asm-m68knommu/atomic.h
··· 129 129 } 130 130 131 131 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 132 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 132 133 133 134 #define atomic_add_unless(v, a, u) \ 134 135 ({ \
+1
include/asm-mips/atomic.h
··· 289 289 } 290 290 291 291 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 292 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 292 293 293 294 /** 294 295 * atomic_add_unless - add unless the number is a given value
+1
include/asm-parisc/atomic.h
··· 165 165 166 166 /* exported interface */ 167 167 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 168 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 168 169 169 170 /** 170 171 * atomic_add_unless - add unless the number is a given value
+1
include/asm-powerpc/atomic.h
··· 165 165 } 166 166 167 167 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 168 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 168 169 169 170 /** 170 171 * atomic_add_unless - add unless the number is a given value
+2
include/asm-s390/atomic.h
··· 75 75 __CS_LOOP(v, mask, "or"); 76 76 } 77 77 78 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 79 + 78 80 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) 79 81 { 80 82 __asm__ __volatile__(" cs %0,%3,0(%2)\n"
+2
include/asm-sh/atomic.h
··· 101 101 return ret; 102 102 } 103 103 104 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 105 + 104 106 static inline int atomic_add_unless(atomic_t *v, int a, int u) 105 107 { 106 108 int ret;
+2
include/asm-sh64/atomic.h
··· 113 113 return ret; 114 114 } 115 115 116 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 117 + 116 118 static inline int atomic_add_unless(atomic_t *v, int a, int u) 117 119 { 118 120 int ret;
+1
include/asm-sparc/atomic.h
··· 20 20 21 21 extern int __atomic_add_return(int, atomic_t *); 22 22 extern int atomic_cmpxchg(atomic_t *, int, int); 23 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 23 24 extern int atomic_add_unless(atomic_t *, int, int); 24 25 extern void atomic_set(atomic_t *, int); 25 26
+1
include/asm-sparc64/atomic.h
··· 72 72 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 73 73 74 74 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 75 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 75 76 76 77 #define atomic_add_unless(v, a, u) \ 77 78 ({ \
+2
include/asm-v850/atomic.h
··· 104 104 return ret; 105 105 } 106 106 107 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 108 + 107 109 static inline int atomic_add_unless(atomic_t *v, int a, int u) 108 110 { 109 111 int ret;
+1
include/asm-x86_64/atomic.h
··· 389 389 #define atomic64_dec_return(v) (atomic64_sub_return(1,v)) 390 390 391 391 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 392 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 392 393 393 394 /** 394 395 * atomic_add_unless - add unless the number is a given value
+1
include/asm-xtensa/atomic.h
··· 224 224 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) 225 225 226 226 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 227 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 227 228 228 229 /** 229 230 * atomic_add_unless - add unless the number is a given value