Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/locking: Remove semicolon from "lock" prefix

Minimum version of binutils required to compile the kernel is 2.25.
This version correctly handles the "lock" prefix, so it is possible
to remove the semicolon, which was used to support ancient versions
of GNU as.

Due to the semicolon, the compiler considers "lock; insn" as two
separate instructions. Removing the semicolon makes asm length
calculations more accurate, consequently making scheduling and
inlining decisions of the compiler more accurate.

Removing the semicolon also enables assembler checks involving lock
prefix. Trying to assemble e.g. "lock andl %eax, %ebx" results in:

Error: expecting lockable instruction after `lock'

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250228085149.2478245-1-ubizjak@gmail.com

authored by

Uros Bizjak and committed by
Ingo Molnar
023f3290 337369f8

+16 -16
+1 -1
arch/x86/include/asm/alternative.h
··· 48 48 ".popsection\n" \ 49 49 "671:" 50 50 51 - #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " 51 + #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock " 52 52 53 53 #else /* ! CONFIG_SMP */ 54 54 #define LOCK_PREFIX_HERE ""
+4 -4
arch/x86/include/asm/barrier.h
··· 12 12 */ 13 13 14 14 #ifdef CONFIG_X86_32 15 - #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ 15 + #define mb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "mfence", \ 16 16 X86_FEATURE_XMM2) ::: "memory", "cc") 17 - #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \ 17 + #define rmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "lfence", \ 18 18 X86_FEATURE_XMM2) ::: "memory", "cc") 19 - #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \ 19 + #define wmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "sfence", \ 20 20 X86_FEATURE_XMM2) ::: "memory", "cc") 21 21 #else 22 22 #define __mb() asm volatile("mfence":::"memory") ··· 50 50 #define __dma_rmb() barrier() 51 51 #define __dma_wmb() barrier() 52 52 53 - #define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc") 53 + #define __smp_mb() asm volatile("lock addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc") 54 54 55 55 #define __smp_rmb() dma_rmb() 56 56 #define __smp_wmb() barrier()
+2 -2
arch/x86/include/asm/cmpxchg.h
··· 134 134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) 135 135 136 136 #define __sync_cmpxchg(ptr, old, new, size) \ 137 - __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") 137 + __raw_cmpxchg((ptr), (old), (new), (size), "lock ") 138 138 139 139 #define __cmpxchg_local(ptr, old, new, size) \ 140 140 __raw_cmpxchg((ptr), (old), (new), (size), "") ··· 222 222 __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) 223 223 224 224 #define __sync_try_cmpxchg(ptr, pold, new, size) \ 225 - __raw_try_cmpxchg((ptr), (pold), (new), (size), "lock; ") 225 + __raw_try_cmpxchg((ptr), (pold), (new), (size), "lock ") 226 226 227 227 #define __try_cmpxchg_local(ptr, pold, new, size) \ 228 228 __raw_try_cmpxchg((ptr), (pold), (new), (size), "")
+2 -2
arch/x86/include/asm/cmpxchg_32.h
··· 105 105 106 106 static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new) 107 107 { 108 - return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; "); 108 + return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock "); 109 109 } 110 110 #define arch_cmpxchg64 arch_cmpxchg64 111 111 ··· 140 140 141 141 static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new) 142 142 { 143 - return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; "); 143 + return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock "); 144 144 } 145 145 #define arch_try_cmpxchg64 arch_try_cmpxchg64 146 146
+1 -1
arch/x86/include/asm/edac.h
··· 13 13 * are interrupt, DMA and SMP safe. 14 14 */ 15 15 for (i = 0; i < size / 4; i++, virt_addr++) 16 - asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); 16 + asm volatile("lock addl $0, %0"::"m" (*virt_addr)); 17 17 } 18 18 19 19 #endif /* _ASM_X86_EDAC_H */
+6 -6
arch/x86/include/asm/sync_bitops.h
··· 31 31 */ 32 32 static inline void sync_set_bit(long nr, volatile unsigned long *addr) 33 33 { 34 - asm volatile("lock; " __ASM_SIZE(bts) " %1,%0" 34 + asm volatile("lock " __ASM_SIZE(bts) " %1,%0" 35 35 : "+m" (ADDR) 36 36 : "Ir" (nr) 37 37 : "memory"); ··· 49 49 */ 50 50 static inline void sync_clear_bit(long nr, volatile unsigned long *addr) 51 51 { 52 - asm volatile("lock; " __ASM_SIZE(btr) " %1,%0" 52 + asm volatile("lock " __ASM_SIZE(btr) " %1,%0" 53 53 : "+m" (ADDR) 54 54 : "Ir" (nr) 55 55 : "memory"); ··· 66 66 */ 67 67 static inline void sync_change_bit(long nr, volatile unsigned long *addr) 68 68 { 69 - asm volatile("lock; " __ASM_SIZE(btc) " %1,%0" 69 + asm volatile("lock " __ASM_SIZE(btc) " %1,%0" 70 70 : "+m" (ADDR) 71 71 : "Ir" (nr) 72 72 : "memory"); ··· 82 82 */ 83 83 static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr) 84 84 { 85 - return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr); 85 + return GEN_BINARY_RMWcc("lock " __ASM_SIZE(bts), *addr, c, "Ir", nr); 86 86 } 87 87 88 88 /** ··· 95 95 */ 96 96 static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) 97 97 { 98 - return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr); 98 + return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btr), *addr, c, "Ir", nr); 99 99 } 100 100 101 101 /** ··· 108 108 */ 109 109 static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) 110 110 { 111 - return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr); 111 + return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btc), *addr, c, "Ir", nr); 112 112 } 113 113 114 114 #define sync_test_bit(nr, addr) test_bit(nr, addr)