Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Fixup asm/cmpxchg.h with correct ordering barrier

Optimize the performance of cmpxchg by using more fine-grained
acquire/release barriers.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Paul E. McKenney <paulmck@kernel.org>

Guo Ren c38425df d6c5cb9f

+17 -10
+17 -10
arch/csky/include/asm/cmpxchg.h
··· 3 3 #ifndef __ASM_CSKY_CMPXCHG_H 4 4 #define __ASM_CSKY_CMPXCHG_H 5 5 6 - #ifdef CONFIG_CPU_HAS_LDSTEX 6 + #ifdef CONFIG_SMP 7 7 #include <asm/barrier.h> 8 8 9 9 extern void __bad_xchg(void); 10 10 11 - #define __xchg(new, ptr, size) \ 11 + #define __xchg_relaxed(new, ptr, size) \ 12 12 ({ \ 13 13 __typeof__(ptr) __ptr = (ptr); \ 14 14 __typeof__(new) __new = (new); \ ··· 16 16 unsigned long tmp; \ 17 17 switch (size) { \ 18 18 case 4: \ 19 - smp_mb(); \ 20 19 asm volatile ( \ 21 20 "1: ldex.w %0, (%3) \n" \ 22 21 " mov %1, %2 \n" \ ··· 24 25 : "=&r" (__ret), "=&r" (tmp) \ 25 26 : "r" (__new), "r"(__ptr) \ 26 27 :); \ 27 - smp_mb(); \ 28 28 break; \ 29 29 default: \ 30 30 __bad_xchg(); \ ··· 31 33 __ret; \ 32 34 }) 33 35 34 - #define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) 36 + #define xchg_relaxed(ptr, x) \ 37 + (__xchg_relaxed((x), (ptr), sizeof(*(ptr)))) 35 38 36 - #define __cmpxchg(ptr, old, new, size) \ 39 + #define __cmpxchg_relaxed(ptr, old, new, size) \ 37 40 ({ \ 38 41 __typeof__(ptr) __ptr = (ptr); \ 39 42 __typeof__(new) __new = (new); \ ··· 43 44 __typeof__(*(ptr)) __ret; \ 44 45 switch (size) { \ 45 46 case 4: \ 46 - smp_mb(); \ 47 47 asm volatile ( \ 48 48 "1: ldex.w %0, (%3) \n" \ 49 49 " cmpne %0, %4 \n" \ ··· 54 56 : "=&r" (__ret), "=&r" (__tmp) \ 55 57 : "r" (__new), "r"(__ptr), "r"(__old) \ 56 58 :); \ 57 - smp_mb(); \ 58 59 break; \ 59 60 default: \ 60 61 __bad_xchg(); \ ··· 61 64 __ret; \ 62 65 }) 63 66 64 - #define cmpxchg(ptr, o, n) \ 65 - (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) 67 + #define cmpxchg_relaxed(ptr, o, n) \ 68 + (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) 69 + 70 + #define cmpxchg(ptr, o, n) \ 71 + ({ \ 72 + __typeof__(*(ptr)) __ret; \ 73 + __smp_release_fence(); \ 74 + __ret = cmpxchg_relaxed(ptr, o, n); \ 75 + __smp_acquire_fence(); \ 76 + __ret; \ 77 + }) 78 + 66 79 #else 67 80 #include <asm-generic/cmpxchg.h> 68 81 #endif