Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic: parisc: move to ARCH_ATOMIC

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates parisc to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-27-mark.rutland@arm.com

authored by

Mark Rutland and committed by
Peter Zijlstra
329c161b 3f1e931d

+24 -23
+1
arch/parisc/Kconfig
··· 2 2 config PARISC 3 3 def_bool y 4 4 select ARCH_32BIT_OFF_T if !64BIT 5 + select ARCH_ATOMIC 5 6 select ARCH_MIGHT_HAVE_PC_PARPORT 6 7 select HAVE_IDE 7 8 select HAVE_FUNCTION_TRACER
+17 -17
arch/parisc/include/asm/atomic.h
··· 56 56 * are atomic, so a reader never sees inconsistent values. 57 57 */ 58 58 59 - static __inline__ void atomic_set(atomic_t *v, int i) 59 + static __inline__ void arch_atomic_set(atomic_t *v, int i) 60 60 { 61 61 unsigned long flags; 62 62 _atomic_spin_lock_irqsave(v, flags); ··· 66 66 _atomic_spin_unlock_irqrestore(v, flags); 67 67 } 68 68 69 - #define atomic_set_release(v, i) atomic_set((v), (i)) 69 + #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) 70 70 71 - static __inline__ int atomic_read(const atomic_t *v) 71 + static __inline__ int arch_atomic_read(const atomic_t *v) 72 72 { 73 73 return READ_ONCE((v)->counter); 74 74 } 75 75 76 76 /* exported interface */ 77 - #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 78 - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 77 + #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) 78 + #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 79 79 80 80 #define ATOMIC_OP(op, c_op) \ 81 - static __inline__ void atomic_##op(int i, atomic_t *v) \ 81 + static __inline__ void arch_atomic_##op(int i, atomic_t *v) \ 82 82 { \ 83 83 unsigned long flags; \ 84 84 \ ··· 88 88 } 89 89 90 90 #define ATOMIC_OP_RETURN(op, c_op) \ 91 - static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ 91 + static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \ 92 92 { \ 93 93 unsigned long flags; \ 94 94 int ret; \ ··· 101 101 } 102 102 103 103 #define ATOMIC_FETCH_OP(op, c_op) \ 104 - static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ 104 + static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \ 105 105 { \ 106 106 unsigned long flags; \ 107 107 int ret; \ ··· 141 141 #define ATOMIC64_INIT(i) { (i) } 142 142 143 143 #define ATOMIC64_OP(op, c_op) \ 144 - static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ 144 + static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \ 145 145 { \ 146 146 unsigned long flags; \ 147 147 \ ··· 151 151 } 152 152 153 153 #define ATOMIC64_OP_RETURN(op, c_op) \ 154 - static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ 154 + static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \ 155 155 { \ 156 156 unsigned long flags; \ 157 157 s64 ret; \ ··· 164 164 } 165 165 166 166 #define ATOMIC64_FETCH_OP(op, c_op) \ 167 - static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ 167 + static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ 168 168 { \ 169 169 unsigned long flags; \ 170 170 s64 ret; \ ··· 200 200 #undef ATOMIC64_OP 201 201 202 202 static __inline__ void 203 - atomic64_set(atomic64_t *v, s64 i) 203 + arch_atomic64_set(atomic64_t *v, s64 i) 204 204 { 205 205 unsigned long flags; 206 206 _atomic_spin_lock_irqsave(v, flags); ··· 210 210 _atomic_spin_unlock_irqrestore(v, flags); 211 211 } 212 212 213 - #define atomic64_set_release(v, i) atomic64_set((v), (i)) 213 + #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i)) 214 214 215 215 static __inline__ s64 216 - atomic64_read(const atomic64_t *v) 216 + arch_atomic64_read(const atomic64_t *v) 217 217 { 218 218 return READ_ONCE((v)->counter); 219 219 } 220 220 221 221 /* exported interface */ 222 - #define atomic64_cmpxchg(v, o, n) \ 223 - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) 224 - #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 222 + #define arch_atomic64_cmpxchg(v, o, n) \ 223 + ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) 224 + #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) 225 225 226 226 #endif /* !CONFIG_64BIT */ 227 227
+6 -6
arch/parisc/include/asm/cmpxchg.h
··· 44 44 ** if (((unsigned long)p & 0xf) == 0) 45 45 ** return __ldcw(p); 46 46 */ 47 - #define xchg(ptr, x) \ 47 + #define arch_xchg(ptr, x) \ 48 48 ({ \ 49 49 __typeof__(*(ptr)) __ret; \ 50 50 __typeof__(*(ptr)) _x_ = (x); \ ··· 78 78 return old; 79 79 } 80 80 81 - #define cmpxchg(ptr, o, n) \ 81 + #define arch_cmpxchg(ptr, o, n) \ 82 82 ({ \ 83 83 __typeof__(*(ptr)) _o_ = (o); \ 84 84 __typeof__(*(ptr)) _n_ = (n); \ ··· 106 106 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 107 107 * them available. 108 108 */ 109 - #define cmpxchg_local(ptr, o, n) \ 109 + #define arch_cmpxchg_local(ptr, o, n) \ 110 110 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ 111 111 (unsigned long)(n), sizeof(*(ptr)))) 112 112 #ifdef CONFIG_64BIT 113 - #define cmpxchg64_local(ptr, o, n) \ 113 + #define arch_cmpxchg64_local(ptr, o, n) \ 114 114 ({ \ 115 115 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 116 116 cmpxchg_local((ptr), (o), (n)); \ 117 117 }) 118 118 #else 119 - #define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) 119 + #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) 120 120 #endif 121 121 122 - #define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) 122 + #define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) 123 123 124 124 #endif /* _ASM_PARISC_CMPXCHG_H_ */