Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Use xadd helper more widely

This covers the trivial cases from open-coded xadd to the xadd macros.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

authored by

Jeremy Fitzhardinge and committed by
H. Peter Anvin
8b8bc2f7 433b3520

+5 -23
+2 -6
arch/x86/include/asm/atomic.h
··· 172 172 */ 173 173 static inline int atomic_add_return(int i, atomic_t *v) 174 174 { 175 - int __i; 176 175 #ifdef CONFIG_M386 176 + int __i; 177 177 unsigned long flags; 178 178 if (unlikely(boot_cpu_data.x86 <= 3)) 179 179 goto no_xadd; 180 180 #endif 181 181 /* Modern 486+ processor */ 182 - __i = i; 183 - asm volatile(LOCK_PREFIX "xaddl %0, %1" 184 - : "+r" (i), "+m" (v->counter) 185 - : : "memory"); 186 - return i + __i; 182 + return i + xadd(&v->counter, i); 187 183 188 184 #ifdef CONFIG_M386 189 185 no_xadd: /* Legacy 386 processor */
+1 -5
arch/x86/include/asm/atomic64_64.h
··· 170 170 */ 171 171 static inline long atomic64_add_return(long i, atomic64_t *v) 172 172 { 173 - long __i = i; 174 - asm volatile(LOCK_PREFIX "xaddq %0, %1;" 175 - : "+r" (i), "+m" (v->counter) 176 - : : "memory"); 177 - return i + __i; 173 + return i + xadd(&v->counter, i); 178 174 } 179 175 180 176 static inline long atomic64_sub_return(long i, atomic64_t *v)
+1 -7
arch/x86/include/asm/rwsem.h
··· 204 204 */ 205 205 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) 206 206 { 207 - long tmp = delta; 208 - 209 - asm volatile(LOCK_PREFIX "xadd %0,%1" 210 - : "+r" (tmp), "+m" (sem->count) 211 - : : "memory"); 212 - 213 - return tmp + delta; 207 + return delta + xadd(&sem->count, delta); 214 208 } 215 209 216 210 #endif /* __KERNEL__ */
+1 -5
arch/x86/include/asm/uv/uv_bau.h
··· 656 656 */ 657 657 static inline int atom_asr(short i, struct atomic_short *v) 658 658 { 659 - short __i = i; 660 - asm volatile(LOCK_PREFIX "xaddw %0, %1" 661 - : "+r" (i), "+m" (v->counter) 662 - : : "memory"); 663 - return i + __i; 659 + return i + xadd(&v->counter, i); 664 660 } 665 661 666 662 /*