Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch,x86: Convert smp_mb__*()

x86 is strongly ordered and all its atomic ops imply a full barrier.

Implement the two new primitives as the old ones were.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-knswsr5mldkr0w1lrdxvc81w@git.kernel.org
Cc: Dave Jones <davej@redhat.com>
Cc: Jesse Brandeburg <jesse.brandeburg@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michel Lespinasse <walken@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
d00a5692 ce3609f9

+9 -12
+1 -6
arch/x86/include/asm/atomic.h
··· 7 7 #include <asm/alternative.h> 8 8 #include <asm/cmpxchg.h> 9 9 #include <asm/rmwcc.h> 10 + #include <asm/barrier.h> 10 11 11 12 /* 12 13 * Atomic operations that C can't guarantee us. Useful for ··· 243 242 asm volatile(LOCK_PREFIX "orl %0,%1" \ 244 243 : : "r" ((unsigned)(mask)), "m" (*(addr)) \ 245 244 : "memory") 246 - 247 - /* Atomic operations are already serializing on x86 */ 248 - #define smp_mb__before_atomic_dec() barrier() 249 - #define smp_mb__after_atomic_dec() barrier() 250 - #define smp_mb__before_atomic_inc() barrier() 251 - #define smp_mb__after_atomic_inc() barrier() 252 245 253 246 #ifdef CONFIG_X86_32 254 247 # include <asm/atomic64_32.h>
+4
arch/x86/include/asm/barrier.h
··· 137 137 138 138 #endif 139 139 140 + /* Atomic operations are already serializing on x86 */ 141 + #define smp_mb__before_atomic() barrier() 142 + #define smp_mb__after_atomic() barrier() 143 + 140 144 /* 141 145 * Stop RDTSC speculation. This is needed when you need to use RDTSC 142 146 * (or get_cycles or vread that possibly accesses the TSC) in a defined
+2 -4
arch/x86/include/asm/bitops.h
··· 15 15 #include <linux/compiler.h> 16 16 #include <asm/alternative.h> 17 17 #include <asm/rmwcc.h> 18 + #include <asm/barrier.h> 18 19 19 20 #if BITS_PER_LONG == 32 20 21 # define _BITOPS_LONG_SHIFT 5 ··· 103 102 * 104 103 * clear_bit() is atomic and may not be reordered. However, it does 105 104 * not contain a memory barrier, so if it is used for locking purposes, 106 - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 105 + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 107 106 * in order to ensure changes are visible on other processors. 108 107 */ 109 108 static __always_inline void ··· 156 155 barrier(); 157 156 __clear_bit(nr, addr); 158 157 } 159 - 160 - #define smp_mb__before_clear_bit() barrier() 161 - #define smp_mb__after_clear_bit() barrier() 162 158 163 159 /** 164 160 * __change_bit - Toggle a bit in memory
+1 -1
arch/x86/include/asm/sync_bitops.h
··· 41 41 * 42 42 * sync_clear_bit() is atomic and may not be reordered. However, it does 43 43 * not contain a memory barrier, so if it is used for locking purposes, 44 - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 44 + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 45 45 * in order to ensure changes are visible on other processors. 46 46 */ 47 47 static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
+1 -1
arch/x86/kernel/apic/hw_nmi.c
··· 57 57 } 58 58 59 59 clear_bit(0, &backtrace_flag); 60 - smp_mb__after_clear_bit(); 60 + smp_mb__after_atomic(); 61 61 } 62 62 63 63 static int __kprobes