Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic: arm: fix sync ops

The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.

Fix this by defining sync ops with the required barriers.

Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.

Fixes: e54d2f61528165bb ("xen/arm: sync_bitops")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com

authored by

Mark Rutland and committed by
Peter Zijlstra
dda5f312 497cc42b

+65 -7
+17
arch/arm/include/asm/assembler.h
··· 394 394 #endif 395 395 .endm 396 396 397 + /* 398 + * Raw SMP data memory barrier 399 + */ 400 + .macro __smp_dmb mode 401 + #if __LINUX_ARM_ARCH__ >= 7 402 + .ifeqs "\mode","arm" 403 + dmb ish 404 + .else 405 + W(dmb) ish 406 + .endif 407 + #elif __LINUX_ARM_ARCH__ == 6 408 + mcr p15, 0, r0, c7, c10, 5 @ dmb 409 + #else 410 + .error "Incompatible SMP platform" 411 + #endif 412 + .endm 413 + 397 414 #if defined(CONFIG_CPU_V7M) 398 415 /* 399 416 * setmode is used to assert to be in svc mode during boot. For v7-M
+25 -4
arch/arm/include/asm/sync_bitops.h
··· 14 14 * ops which are SMP safe even on a UP kernel. 15 15 */ 16 16 17 + /* 18 + * Unordered 19 + */ 20 + 17 21 #define sync_set_bit(nr, p) _set_bit(nr, p) 18 22 #define sync_clear_bit(nr, p) _clear_bit(nr, p) 19 23 #define sync_change_bit(nr, p) _change_bit(nr, p) 20 - #define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p) 21 - #define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) 22 - #define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) 23 24 #define sync_test_bit(nr, addr) test_bit(nr, addr) 24 - #define arch_sync_cmpxchg arch_cmpxchg 25 25 26 + /* 27 + * Fully ordered 28 + */ 29 + 30 + int _sync_test_and_set_bit(int nr, volatile unsigned long * p); 31 + #define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p) 32 + 33 + int _sync_test_and_clear_bit(int nr, volatile unsigned long * p); 34 + #define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p) 35 + 36 + int _sync_test_and_change_bit(int nr, volatile unsigned long * p); 37 + #define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p) 38 + 39 + #define arch_sync_cmpxchg(ptr, old, new) \ 40 + ({ \ 41 + __typeof__(*(ptr)) __ret; \ 42 + __smp_mb__before_atomic(); \ 43 + __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \ 44 + __smp_mb__after_atomic(); \ 45 + __ret; \ 46 + }) 26 47 27 48 #endif
+11 -3
arch/arm/lib/bitops.h
··· 28 28 ENDPROC(\name ) 29 29 .endm 30 30 31 - .macro testop, name, instr, store 31 + .macro __testop, name, instr, store, barrier 32 32 ENTRY( \name ) 33 33 UNWIND( .fnstart ) 34 34 ands ip, r1, #3 ··· 38 38 mov r0, r0, lsr #5 39 39 add r1, r1, r0, lsl #2 @ Get word offset 40 40 mov r3, r2, lsl r3 @ create mask 41 - smp_dmb 41 + \barrier 42 42 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) 43 43 .arch_extension mp 44 44 ALT_SMP(W(pldw) [r1]) ··· 50 50 strex ip, r2, [r1] 51 51 cmp ip, #0 52 52 bne 1b 53 - smp_dmb 53 + \barrier 54 54 cmp r0, #0 55 55 movne r0, #1 56 56 2: bx lr 57 57 UNWIND( .fnend ) 58 58 ENDPROC(\name ) 59 + .endm 60 + 61 + .macro testop, name, instr, store 62 + __testop \name, \instr, \store, smp_dmb 63 + .endm 64 + 65 + .macro sync_testop, name, instr, store 66 + __testop \name, \instr, \store, __smp_dmb 59 67 .endm 60 68 #else 61 69 .macro bitop, name, instr
+4
arch/arm/lib/testchangebit.S
··· 10 10 .text 11 11 12 12 testop _test_and_change_bit, eor, str 13 + 14 + #if __LINUX_ARM_ARCH__ >= 6 15 + sync_testop _sync_test_and_change_bit, eor, str 16 + #endif
+4
arch/arm/lib/testclearbit.S
··· 10 10 .text 11 11 12 12 testop _test_and_clear_bit, bicne, strne 13 + 14 + #if __LINUX_ARM_ARCH__ >= 6 15 + sync_testop _sync_test_and_clear_bit, bicne, strne 16 + #endif
+4
arch/arm/lib/testsetbit.S
··· 10 10 .text 11 11 12 12 testop _test_and_set_bit, orreq, streq 13 + 14 + #if __LINUX_ARM_ARCH__ >= 6 15 + sync_testop _sync_test_and_set_bit, orreq, streq 16 + #endif