Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Fix sh4a llsc-based cmpxchg()

This fixes up a typo in the ll/sc based cmpxchg code which apparently
wasn't getting a lot of testing due to the swapped old/new pair. With
that fixed up, the ll/sc code also starts using it and provides its own
atomic_add_unless().

Signed-off-by: Aoi Shinkai <shinkoi2005@gmail.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Aoi Shinkai and committed by
Paul Mundt
4c7c9978 f168dd00

+31 -4
+27
arch/sh/include/asm/atomic-llsc.h
··· 104 104 : "t"); 105 105 } 106 106 107 + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 108 + 109 + /** 110 + * atomic_add_unless - add unless the number is a given value 111 + * @v: pointer of type atomic_t 112 + * @a: the amount to add to v... 113 + * @u: ...unless v is equal to u. 114 + * 115 + * Atomically adds @a to @v, so long as it was not @u. 116 + * Returns non-zero if @v was not @u, and zero otherwise. 117 + */ 118 + static inline int atomic_add_unless(atomic_t *v, int a, int u) 119 + { 120 + int c, old; 121 + c = atomic_read(v); 122 + for (;;) { 123 + if (unlikely(c == (u))) 124 + break; 125 + old = atomic_cmpxchg((v), c, c + (a)); 126 + if (likely(old == c)) 127 + break; 128 + c = old; 129 + } 130 + 131 + return c != (u); 132 + } 133 + 107 134 #endif /* __ASM_SH_ATOMIC_LLSC_H */
+2 -2
arch/sh/include/asm/atomic.h
··· 45 45 #define atomic_inc(v) atomic_add(1,(v)) 46 46 #define atomic_dec(v) atomic_sub(1,(v)) 47 47 48 - #ifndef CONFIG_GUSA_RB 48 + #if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A) 49 49 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 50 50 { 51 51 int ret; ··· 73 73 74 74 return ret != u; 75 75 } 76 - #endif 76 + #endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */ 77 77 78 78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 79 79 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+1 -1
arch/sh/include/asm/cmpxchg-llsc.h
··· 55 55 "mov %0, %1 \n\t" 56 56 "cmp/eq %1, %3 \n\t" 57 57 "bf 2f \n\t" 58 - "mov %3, %0 \n\t" 58 + "mov %4, %0 \n\t" 59 59 "2: \n\t" 60 60 "movco.l %0, @%2 \n\t" 61 61 "bf 1b \n\t"
+1 -1
arch/sh/include/asm/spinlock.h
··· 26 26 #define __raw_spin_is_locked(x) ((x)->lock <= 0) 27 27 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28 28 #define __raw_spin_unlock_wait(x) \ 29 - do { cpu_relax(); } while ((x)->lock) 29 + do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 30 30 31 31 /* 32 32 * Simple spin lock operations. There are two variants, one clears IRQ's