Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Add qspinlock support

Enable qspinlock by the requirements mentioned in a8ad07e5240c9
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").

C-SKY only has "ldex/stex" for all atomic operations. So csky give a
strong forward guarantee for "ldex/stex." That means when ldex grabbed
the cache line into $L1, it would block other cores from snooping the
address with several cycles. The atomic_fetch_add & xchg16 has the same
forward guarantee level in C-SKY.

Qspinlock has better code size and performance in a fast path.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>

Guo Ren 45e15c1a 4e8bb4ba

+44 -2
+1
arch/csky/Kconfig
··· 8 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 9 select ARCH_USE_BUILTIN_BSWAP 10 10 select ARCH_USE_QUEUED_RWLOCKS 11 + select ARCH_USE_QUEUED_SPINLOCKS 11 12 select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) 12 13 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 13 14 select COMMON_CLK
+2 -2
arch/csky/include/asm/Kbuild
··· 3 3 generic-y += extable.h 4 4 generic-y += gpio.h 5 5 generic-y += kvm_para.h 6 - generic-y += spinlock.h 7 - generic-y += spinlock_types.h 6 + generic-y += mcs_spinlock.h 8 7 generic-y += qrwlock.h 9 8 generic-y += qrwlock_types.h 9 + generic-y += qspinlock.h 10 10 generic-y += parport.h 11 11 generic-y += user.h 12 12 generic-y += vmlinux.lds.h
+20
arch/csky/include/asm/cmpxchg.h
··· 15 15 __typeof__(*(ptr)) __ret; \ 16 16 unsigned long tmp; \ 17 17 switch (size) { \ 18 + case 2: { \ 19 + u32 ret; \ 20 + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ 21 + u32 mask = 0xffff << shif; \ 22 + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ 23 + __asm__ __volatile__ ( \ 24 + "1: ldex.w %0, (%4)\n" \ 25 + " and %1, %0, %2\n" \ 26 + " or %1, %1, %3\n" \ 27 + " stex.w %1, (%4)\n" \ 28 + " bez %1, 1b\n" \ 29 + : "=&r" (ret), "=&r" (tmp) \ 30 + : "r" (~mask), \ 31 + "r" ((u32)__new << shif), \ 32 + "r" (__ptr) \ 33 + : "memory"); \ 34 + __ret = (__typeof__(*(ptr))) \ 35 + ((ret & mask) >> shif); \ 36 + break; \ 37 + } \ 18 38 case 4: \ 19 39 asm volatile ( \ 20 40 "1: ldex.w %0, (%3) \n" \
+12
arch/csky/include/asm/spinlock.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_SPINLOCK_H 4 + #define __ASM_CSKY_SPINLOCK_H 5 + 6 + #include <asm/qspinlock.h> 7 + #include <asm/qrwlock.h> 8 + 9 + /* See include/linux/spinlock.h */ 10 + #define smp_mb__after_spinlock() smp_mb() 11 + 12 + #endif /* __ASM_CSKY_SPINLOCK_H */
+9
arch/csky/include/asm/spinlock_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_SPINLOCK_TYPES_H 4 + #define __ASM_CSKY_SPINLOCK_TYPES_H 5 + 6 + #include <asm-generic/qspinlock_types.h> 7 + #include <asm-generic/qrwlock_types.h> 8 + 9 + #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */