Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm: Modernize sync_bitops.h

Add missing instruction suffixes and use rmwcc.h just like was (more or less)
recently done for bitops.h as well, see:

22636f8c9511: x86/asm: Add instruction suffixes to bitops
288e4521f0f6: x86/asm: 'Simplify' GEN_*_RMWcc() macros

No change in functionality intended.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/5C9B93870200007800222289@prv1-mh.provo.novell.com
[ Cleaned up the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Jan Beulich and committed by
Ingo Molnar
547571b5 28e3ace7

+9 -22
+9 -22
arch/x86/include/asm/sync_bitops.h
··· 14 14 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 15 15 */ 16 16 17 + #include <asm/rmwcc.h> 18 + 17 19 #define ADDR (*(volatile long *)addr) 18 20 19 21 /** ··· 31 29 */ 32 30 static inline void sync_set_bit(long nr, volatile unsigned long *addr) 33 31 { 34 - asm volatile("lock; bts %1,%0" 32 + asm volatile("lock; " __ASM_SIZE(bts) " %1,%0" 35 33 : "+m" (ADDR) 36 34 : "Ir" (nr) 37 35 : "memory"); ··· 49 47 */ 50 48 static inline void sync_clear_bit(long nr, volatile unsigned long *addr) 51 49 { 52 - asm volatile("lock; btr %1,%0" 50 + asm volatile("lock; " __ASM_SIZE(btr) " %1,%0" 53 51 : "+m" (ADDR) 54 52 : "Ir" (nr) 55 53 : "memory"); ··· 66 64 */ 67 65 static inline void sync_change_bit(long nr, volatile unsigned long *addr) 68 66 { 69 - asm volatile("lock; btc %1,%0" 67 + asm volatile("lock; " __ASM_SIZE(btc) " %1,%0" 70 68 : "+m" (ADDR) 71 69 : "Ir" (nr) 72 70 : "memory"); ··· 80 78 * This operation is atomic and cannot be reordered. 81 79 * It also implies a memory barrier. 82 80 */ 83 - static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr) 81 + static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr) 84 82 { 85 - unsigned char oldbit; 86 - 87 - asm volatile("lock; bts %2,%1\n\tsetc %0" 88 - : "=qm" (oldbit), "+m" (ADDR) 89 - : "Ir" (nr) : "memory"); 90 - return oldbit; 83 + return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr); 91 84 } 92 85 93 86 /** ··· 95 98 */ 96 99 static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) 97 100 { 98 - unsigned char oldbit; 99 - 100 - asm volatile("lock; btr %2,%1\n\tsetc %0" 101 - : "=qm" (oldbit), "+m" (ADDR) 102 - : "Ir" (nr) : "memory"); 103 - return oldbit; 101 + return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr); 104 102 } 105 103 106 104 /** ··· 108 116 */ 109 117 static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) 110 118 { 111 - unsigned char oldbit; 112 - 113 - asm volatile("lock; btc %2,%1\n\tsetc %0" 114 - : "=qm" (oldbit), "+m" (ADDR) 115 - : "Ir" (nr) : "memory"); 116 - return oldbit; 119 + return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr); 117 120 } 118 121 119 122 #define sync_test_bit(nr, addr) test_bit(nr, addr)