Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Cleanup asm/spinlock.h

There are two implementation of spinlock in arch/csky:
- simple one (NR_CPU = 1,2)
- tick's one (NR_CPU = 3,4)
Remove the simple one.

There is already smp_mb in spinlock, so remove the definition of
smp_mb__after_spinlock.

Link: https://lore.kernel.org/linux-csky/20200807081253.GD2674@hirez.programming.kicks-ass.net/#t
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Cc: Peter Zijlstra <peterz@infradead.org>k
Cc: Arnd Bergmann <arnd@arndb.de>

Guo Ren 8e35ac73 c38425df

+1 -178
+1 -1
arch/csky/Kconfig
··· 7 7 select ARCH_HAS_SYNC_DMA_FOR_CPU 8 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 9 select ARCH_USE_BUILTIN_BSWAP 10 - select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 10 + select ARCH_USE_QUEUED_RWLOCKS 11 11 select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 12 12 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 13 13 select COMMON_CLK
-167
arch/csky/include/asm/spinlock.h
··· 6 6 #include <linux/spinlock_types.h> 7 7 #include <asm/barrier.h> 8 8 9 - #ifdef CONFIG_QUEUED_RWLOCKS 10 - 11 9 /* 12 10 * Ticket-based spin-locking. 13 11 */ ··· 86 88 87 89 #include <asm/qrwlock.h> 88 90 89 - /* See include/linux/spinlock.h */ 90 - #define smp_mb__after_spinlock() smp_mb() 91 - 92 - #else /* CONFIG_QUEUED_RWLOCKS */ 93 - 94 - /* 95 - * Test-and-set spin-locking. 96 - */ 97 - static inline void arch_spin_lock(arch_spinlock_t *lock) 98 - { 99 - u32 *p = &lock->lock; 100 - u32 tmp; 101 - 102 - asm volatile ( 103 - "1: ldex.w %0, (%1) \n" 104 - " bnez %0, 1b \n" 105 - " movi %0, 1 \n" 106 - " stex.w %0, (%1) \n" 107 - " bez %0, 1b \n" 108 - : "=&r" (tmp) 109 - : "r"(p) 110 - : "cc"); 111 - smp_mb(); 112 - } 113 - 114 - static inline void arch_spin_unlock(arch_spinlock_t *lock) 115 - { 116 - smp_mb(); 117 - WRITE_ONCE(lock->lock, 0); 118 - } 119 - 120 - static inline int arch_spin_trylock(arch_spinlock_t *lock) 121 - { 122 - u32 *p = &lock->lock; 123 - u32 tmp; 124 - 125 - asm volatile ( 126 - "1: ldex.w %0, (%1) \n" 127 - " bnez %0, 2f \n" 128 - " movi %0, 1 \n" 129 - " stex.w %0, (%1) \n" 130 - " bez %0, 1b \n" 131 - " movi %0, 0 \n" 132 - "2: \n" 133 - : "=&r" (tmp) 134 - : "r"(p) 135 - : "cc"); 136 - 137 - if (!tmp) 138 - smp_mb(); 139 - 140 - return !tmp; 141 - } 142 - 143 - #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) 144 - 145 - /* 146 - * read lock/unlock/trylock 147 - */ 148 - static inline void arch_read_lock(arch_rwlock_t *lock) 149 - { 150 - u32 *p = &lock->lock; 151 - u32 tmp; 152 - 153 - asm volatile ( 154 - "1: ldex.w %0, (%1) \n" 155 - " blz %0, 1b \n" 156 - " addi %0, 1 \n" 157 - " stex.w %0, (%1) \n" 158 - " bez %0, 1b \n" 159 - : "=&r" (tmp) 160 - : "r"(p) 161 - : "cc"); 162 - smp_mb(); 163 - } 164 - 165 - static inline void arch_read_unlock(arch_rwlock_t *lock) 166 - { 167 - u32 *p = &lock->lock; 168 - u32 tmp; 169 - 170 - smp_mb(); 171 - asm volatile ( 172 - "1: ldex.w %0, (%1) \n" 173 - " subi %0, 1 \n" 174 - " stex.w %0, (%1) \n" 175 - " bez %0, 1b \n" 176 - : "=&r" (tmp) 177 - : "r"(p) 178 - : "cc"); 179 - } 180 - 181 - static inline int arch_read_trylock(arch_rwlock_t *lock) 182 - { 183 - u32 *p = &lock->lock; 184 - u32 tmp; 185 - 186 - asm volatile ( 187 - "1: ldex.w %0, (%1) \n" 188 - " blz %0, 2f \n" 189 - " addi %0, 1 \n" 190 - " stex.w %0, (%1) \n" 191 - " bez %0, 1b \n" 192 - " movi %0, 0 \n" 193 - "2: \n" 194 - : "=&r" (tmp) 195 - : "r"(p) 196 - : "cc"); 197 - 198 - if (!tmp) 199 - smp_mb(); 200 - 201 - return !tmp; 202 - } 203 - 204 - /* 205 - * write lock/unlock/trylock 206 - */ 207 - static inline void arch_write_lock(arch_rwlock_t *lock) 208 - { 209 - u32 *p = &lock->lock; 210 - u32 tmp; 211 - 212 - asm volatile ( 213 - "1: ldex.w %0, (%1) \n" 214 - " bnez %0, 1b \n" 215 - " subi %0, 1 \n" 216 - " stex.w %0, (%1) \n" 217 - " bez %0, 1b \n" 218 - : "=&r" (tmp) 219 - : "r"(p) 220 - : "cc"); 221 - smp_mb(); 222 - } 223 - 224 - static inline void arch_write_unlock(arch_rwlock_t *lock) 225 - { 226 - smp_mb(); 227 - WRITE_ONCE(lock->lock, 0); 228 - } 229 - 230 - static inline int arch_write_trylock(arch_rwlock_t *lock) 231 - { 232 - u32 *p = &lock->lock; 233 - u32 tmp; 234 - 235 - asm volatile ( 236 - "1: ldex.w %0, (%1) \n" 237 - " bnez %0, 2f \n" 238 - " subi %0, 1 \n" 239 - " stex.w %0, (%1) \n" 240 - " bez %0, 1b \n" 241 - " movi %0, 0 \n" 242 - "2: \n" 243 - : "=&r" (tmp) 244 - : "r"(p) 245 - : "cc"); 246 - 247 - if (!tmp) 248 - smp_mb(); 249 - 250 - return !tmp; 251 - } 252 - 253 - #endif /* CONFIG_QUEUED_RWLOCKS */ 254 91 #endif /* __ASM_CSKY_SPINLOCK_H */
-10
arch/csky/include/asm/spinlock_types.h
··· 22 22 23 23 #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 24 24 25 - #ifdef CONFIG_QUEUED_RWLOCKS 26 25 #include <asm-generic/qrwlock_types.h> 27 26 28 - #else /* CONFIG_NR_CPUS > 2 */ 29 - 30 - typedef struct { 31 - u32 lock; 32 - } arch_rwlock_t; 33 - 34 - #define __ARCH_RW_LOCK_UNLOCKED { 0 } 35 - 36 - #endif /* CONFIG_QUEUED_RWLOCKS */ 37 27 #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */