[PATCH] m32r: Introduce atomic_cmpxchg and atomic_inc_not_zero operations

Introduce atomic_cmpxchg and atomic_inc_not_zero operations for m32r.

Signed-off-by: Hayato Fujiwara <fujiwara@linux-m32r.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Hirokazu Takata and committed by
Linus Torvalds
0332db5a 91f4ab05

+83 -2
+21
include/asm-m32r/atomic.h
··· 242 */ 243 #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) 244 245 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) 246 { 247 unsigned long flags;
··· 242 */ 243 #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) 244 245 + #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 246 + 247 + /** 248 + * atomic_add_unless - add unless the number is a given value 249 + * @v: pointer of type atomic_t 250 + * @a: the amount to add to v... 251 + * @u: ...unless v is equal to u. 252 + * 253 + * Atomically adds @a to @v, so long as it was not @u. 254 + * Returns non-zero if @v was not @u, and zero otherwise. 255 + */ 256 + #define atomic_add_unless(v, a, u) \ 257 + ({ \ 258 + int c, old; \ 259 + c = atomic_read(v); \ 260 + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 261 + c = old; \ 262 + c != (u); \ 263 + }) 264 + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 265 + 266 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) 267 { 268 unsigned long flags;
+62 -2
include/asm-m32r/system.h
··· 11 */ 12 13 #include <linux/config.h> 14 15 #ifdef __KERNEL__ 16 ··· 133 !(flags & 0x40); \ 134 }) 135 136 - #endif /* __KERNEL__ */ 137 - 138 #define nop() __asm__ __volatile__ ("nop" : : ) 139 140 #define xchg(ptr,x) \ ··· 211 212 return (tmp); 213 } 214 215 /* 216 * Memory barrier.
··· 11 */ 12 13 #include <linux/config.h> 14 + #include <asm/assembler.h> 15 16 #ifdef __KERNEL__ 17 ··· 132 !(flags & 0x40); \ 133 }) 134 135 #define nop() __asm__ __volatile__ ("nop" : : ) 136 137 #define xchg(ptr,x) \ ··· 212 213 return (tmp); 214 } 215 + 216 + #define __HAVE_ARCH_CMPXCHG 1 217 + 218 + static __inline__ unsigned long 219 + __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) 220 + { 221 + unsigned long flags; 222 + unsigned int retval; 223 + 224 + local_irq_save(flags); 225 + __asm__ __volatile__ ( 226 + DCACHE_CLEAR("%0", "r4", "%1") 227 + M32R_LOCK" %0, @%1; \n" 228 + " bne %0, %2, 1f; \n" 229 + M32R_UNLOCK" %3, @%1; \n" 230 + " bra 2f; \n" 231 + " .fillinsn \n" 232 + "1:" 233 + M32R_UNLOCK" %2, @%1; \n" 234 + " .fillinsn \n" 235 + "2:" 236 + : "=&r" (retval) 237 + : "r" (p), "r" (old), "r" (new) 238 + : "cbit", "memory" 239 + #ifdef CONFIG_CHIP_M32700_TS1 240 + , "r4" 241 + #endif /* CONFIG_CHIP_M32700_TS1 */ 242 + ); 243 + local_irq_restore(flags); 244 + 245 + return retval; 246 + } 247 + 248 + /* This function doesn't exist, so you'll get a linker error 249 + if something tries to do an invalid cmpxchg(). */ 250 + extern void __cmpxchg_called_with_bad_pointer(void); 251 + 252 + static __inline__ unsigned long 253 + __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 254 + { 255 + switch (size) { 256 + case 4: 257 + return __cmpxchg_u32(ptr, old, new); 258 + #if 0 /* we don't have __cmpxchg_u64 */ 259 + case 8: 260 + return __cmpxchg_u64(ptr, old, new); 261 + #endif /* 0 */ 262 + } 263 + __cmpxchg_called_with_bad_pointer(); 264 + return old; 265 + } 266 + 267 + #define cmpxchg(ptr,o,n) \ 268 + ({ \ 269 + __typeof__(*(ptr)) _o_ = (o); \ 270 + __typeof__(*(ptr)) _n_ = (n); \ 271 + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 272 + (unsigned long)_n_, sizeof(*(ptr))); \ 273 + }) 274 + 275 + #endif /* __KERNEL__ */ 276 277 /* 278 * Memory barrier.