Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic: openrisc: move to ARCH_ATOMIC

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates openrisc to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Stafford Horne <shorne@gmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-26-mark.rutland@arm.com

authored by

Mark Rutland and committed by
Peter Zijlstra
3f1e931d 7e517b4c

+26 -21
+1
arch/openrisc/Kconfig
··· 7 7 config OPENRISC 8 8 def_bool y 9 9 select ARCH_32BIT_OFF_T 10 + select ARCH_ATOMIC 10 11 select ARCH_HAS_DMA_SET_UNCACHED 11 12 select ARCH_HAS_DMA_CLEAR_UNCACHED 12 13 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+23 -19
arch/openrisc/include/asm/atomic.h
··· 13 13 14 14 /* Atomically perform op with v->counter and i */ 15 15 #define ATOMIC_OP(op) \ 16 - static inline void atomic_##op(int i, atomic_t *v) \ 16 + static inline void arch_atomic_##op(int i, atomic_t *v) \ 17 17 { \ 18 18 int tmp; \ 19 19 \ ··· 30 30 31 31 /* Atomically perform op with v->counter and i, return the result */ 32 32 #define ATOMIC_OP_RETURN(op) \ 33 - static inline int atomic_##op##_return(int i, atomic_t *v) \ 33 + static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ 34 34 { \ 35 35 int tmp; \ 36 36 \ ··· 49 49 50 50 /* Atomically perform op with v->counter and i, return orig v->counter */ 51 51 #define ATOMIC_FETCH_OP(op) \ 52 - static inline int atomic_fetch_##op(int i, atomic_t *v) \ 52 + static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ 53 53 { \ 54 54 int tmp, old; \ 55 55 \ ··· 75 75 ATOMIC_FETCH_OP(or) 76 76 ATOMIC_FETCH_OP(xor) 77 77 78 + ATOMIC_OP(add) 79 + ATOMIC_OP(sub) 78 80 ATOMIC_OP(and) 79 81 ATOMIC_OP(or) 80 82 ATOMIC_OP(xor) ··· 85 83 #undef ATOMIC_OP_RETURN 86 84 #undef ATOMIC_OP 87 85 88 - #define atomic_add_return atomic_add_return 89 - #define atomic_sub_return atomic_sub_return 90 - #define atomic_fetch_add atomic_fetch_add 91 - #define atomic_fetch_sub atomic_fetch_sub 92 - #define atomic_fetch_and atomic_fetch_and 93 - #define atomic_fetch_or atomic_fetch_or 94 - #define atomic_fetch_xor atomic_fetch_xor 95 - #define atomic_and atomic_and 96 - #define atomic_or atomic_or 97 - #define atomic_xor atomic_xor 86 + #define arch_atomic_add_return arch_atomic_add_return 87 + #define arch_atomic_sub_return arch_atomic_sub_return 88 + #define arch_atomic_fetch_add arch_atomic_fetch_add 89 + #define arch_atomic_fetch_sub arch_atomic_fetch_sub 90 + #define arch_atomic_fetch_and arch_atomic_fetch_and 91 + #define arch_atomic_fetch_or arch_atomic_fetch_or 92 + #define arch_atomic_fetch_xor arch_atomic_fetch_xor 93 + #define arch_atomic_add arch_atomic_add 94 + #define arch_atomic_sub arch_atomic_sub 95 + #define arch_atomic_and arch_atomic_and 96 + #define arch_atomic_or arch_atomic_or 97 + #define arch_atomic_xor arch_atomic_xor 98 98 99 99 /* 100 100 * Atomically add a to v->counter as long as v is not already u. ··· 104 100 * 105 101 * This is often used through atomic_inc_not_zero() 106 102 */ 107 - static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 103 + static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) 108 104 { 109 105 int old, tmp; 110 106 ··· 123 119 124 120 return old; 125 121 } 126 - #define atomic_fetch_add_unless atomic_fetch_add_unless 122 + #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless 127 123 128 - #define atomic_read(v) READ_ONCE((v)->counter) 129 - #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) 124 + #define arch_atomic_read(v) READ_ONCE((v)->counter) 125 + #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) 130 126 131 127 #include <asm/cmpxchg.h> 132 128 133 - #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 134 - #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 129 + #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) 130 + #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) 135 131 136 132 #endif /* __ASM_OPENRISC_ATOMIC_H */
+2 -2
arch/openrisc/include/asm/cmpxchg.h
··· 132 132 } 133 133 } 134 134 135 - #define cmpxchg(ptr, o, n) \ 135 + #define arch_cmpxchg(ptr, o, n) \ 136 136 ({ \ 137 137 (__typeof__(*(ptr))) __cmpxchg((ptr), \ 138 138 (unsigned long)(o), \ ··· 161 161 } 162 162 } 163 163 164 - #define xchg(ptr, with) \ 164 + #define arch_xchg(ptr, with) \ 165 165 ({ \ 166 166 (__typeof__(*(ptr))) __xchg((ptr), \ 167 167 (unsigned long)(with), \