Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, cmpxchg: Unify cmpxchg into cmpxchg.h

Everything that's actually common between 32 and 64-bit is moved into
cmpxchg.h.

xchg/cmpxchg will fail with a link error if they're passed an
unsupported size (which includes 64-bit args on 32-bit systems).

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

authored by

Jeremy Fitzhardinge and committed by
H. Peter Anvin
e9826380 00a41546

+155 -244
+155
arch/x86/include/asm/cmpxchg.h
··· 1 + #ifndef ASM_X86_CMPXCHG_H 2 + #define ASM_X86_CMPXCHG_H 3 + 4 + #include <asm/alternative.h> /* Provides LOCK_PREFIX */ 5 + 6 + /* Non-existant functions to indicate usage errors at link time. */ 7 + extern void __xchg_wrong_size(void); 8 + extern void __cmpxchg_wrong_size(void); 9 + 10 + /* 11 + * Constants for operation sizes. On 32-bit, the 64-bit size it set to 12 + * -1 because sizeof will never return -1, thereby making those switch 13 + * case statements guaranteeed dead code which the compiler will 14 + * eliminate, and allowing the "missing symbol in the default case" to 15 + * indicate a usage error. 16 + */ 17 + #define __X86_CASE_B 1 18 + #define __X86_CASE_W 2 19 + #define __X86_CASE_L 4 20 + #ifdef CONFIG_64BIT 21 + #define __X86_CASE_Q 8 22 + #else 23 + #define __X86_CASE_Q -1 /* sizeof will never return -1 */ 24 + #endif 25 + 26 + /* 27 + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. 28 + * Since this is generally used to protect other memory information, we 29 + * use "asm volatile" and "memory" clobbers to prevent gcc from moving 30 + * information around. 31 + */ 32 + #define __xchg(x, ptr, size) \ 33 + ({ \ 34 + __typeof(*(ptr)) __x = (x); \ 35 + switch (size) { \ 36 + case __X86_CASE_B: \ 37 + { \ 38 + volatile u8 *__ptr = (volatile u8 *)(ptr); \ 39 + asm volatile("xchgb %0,%1" \ 40 + : "=q" (__x), "+m" (*__ptr) \ 41 + : "0" (__x) \ 42 + : "memory"); \ 43 + break; \ 44 + } \ 45 + case __X86_CASE_W: \ 46 + { \ 47 + volatile u16 *__ptr = (volatile u16 *)(ptr); \ 48 + asm volatile("xchgw %0,%1" \ 49 + : "=r" (__x), "+m" (*__ptr) \ 50 + : "0" (__x) \ 51 + : "memory"); \ 52 + break; \ 53 + } \ 54 + case __X86_CASE_L: \ 55 + { \ 56 + volatile u32 *__ptr = (volatile u32 *)(ptr); \ 57 + asm volatile("xchgl %0,%1" \ 58 + : "=r" (__x), "+m" (*__ptr) \ 59 + : "0" (__x) \ 60 + : "memory"); \ 61 + break; \ 62 + } \ 63 + case __X86_CASE_Q: \ 64 + { \ 65 + volatile u64 *__ptr = (volatile u64 *)(ptr); \ 66 + asm volatile("xchgq %0,%1" \ 67 + : "=r" (__x), "+m" (*__ptr) \ 68 + : "0" (__x) \ 69 + : "memory"); \ 70 + break; \ 71 + } \ 72 + default: \ 73 + __xchg_wrong_size(); \ 74 + } \ 75 + __x; \ 76 + }) 77 + 78 + #define xchg(ptr, v) \ 79 + __xchg((v), (ptr), sizeof(*ptr)) 80 + 81 + /* 82 + * Atomic compare and exchange. Compare OLD with MEM, if identical, 83 + * store NEW in MEM. Return the initial value in MEM. Success is 84 + * indicated by comparing RETURN with OLD. 85 + */ 86 + #define __raw_cmpxchg(ptr, old, new, size, lock) \ 87 + ({ \ 88 + __typeof__(*(ptr)) __ret; \ 89 + __typeof__(*(ptr)) __old = (old); \ 90 + __typeof__(*(ptr)) __new = (new); \ 91 + switch (size) { \ 92 + case __X86_CASE_B: \ 93 + { \ 94 + volatile u8 *__ptr = (volatile u8 *)(ptr); \ 95 + asm volatile(lock "cmpxchgb %2,%1" \ 96 + : "=a" (__ret), "+m" (*__ptr) \ 97 + : "q" (__new), "0" (__old) \ 98 + : "memory"); \ 99 + break; \ 100 + } \ 101 + case __X86_CASE_W: \ 102 + { \ 103 + volatile u16 *__ptr = (volatile u16 *)(ptr); \ 104 + asm volatile(lock "cmpxchgw %2,%1" \ 105 + : "=a" (__ret), "+m" (*__ptr) \ 106 + : "r" (__new), "0" (__old) \ 107 + : "memory"); \ 108 + break; \ 109 + } \ 110 + case __X86_CASE_L: \ 111 + { \ 112 + volatile u32 *__ptr = (volatile u32 *)(ptr); \ 113 + asm volatile(lock "cmpxchgl %2,%1" \ 114 + : "=a" (__ret), "+m" (*__ptr) \ 115 + : "r" (__new), "0" (__old) \ 116 + : "memory"); \ 117 + break; \ 118 + } \ 119 + case __X86_CASE_Q: \ 120 + { \ 121 + volatile u64 *__ptr = (volatile u64 *)(ptr); \ 122 + asm volatile(lock "cmpxchgq %2,%1" \ 123 + : "=a" (__ret), "+m" (*__ptr) \ 124 + : "r" (__new), "0" (__old) \ 125 + : "memory"); \ 126 + break; \ 127 + } \ 128 + default: \ 129 + __cmpxchg_wrong_size(); \ 130 + } \ 131 + __ret; \ 132 + }) 133 + 134 + #define __cmpxchg(ptr, old, new, size) \ 135 + __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) 136 + 137 + #define __sync_cmpxchg(ptr, old, new, size) \ 138 + __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") 139 + 140 + #define __cmpxchg_local(ptr, old, new, size) \ 141 + __raw_cmpxchg((ptr), (old), (new), (size), "") 142 + 1 143 #ifdef CONFIG_X86_32 2 144 # include "cmpxchg_32.h" 3 145 #else 4 146 # include "cmpxchg_64.h" 5 147 #endif 148 + 149 + #ifdef __HAVE_ARCH_CMPXCHG 150 + #define cmpxchg(ptr, old, new) \ 151 + __cmpxchg((ptr), (old), (new), sizeof(*ptr)) 152 + 153 + #define sync_cmpxchg(ptr, old, new) \ 154 + __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) 155 + 156 + #define cmpxchg_local(ptr, old, new) \ 157 + __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 158 + #endif 159 + 160 + #endif /* ASM_X86_CMPXCHG_H */
-113
arch/x86/include/asm/cmpxchg_32.h
··· 1 1 #ifndef _ASM_X86_CMPXCHG_32_H 2 2 #define _ASM_X86_CMPXCHG_32_H 3 3 4 - #include <asm/alternative.h> /* Provides LOCK_PREFIX */ 5 - 6 4 /* 7 5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you 8 6 * you need to test for the feature in boot_cpu_data. 9 7 */ 10 - 11 - extern void __xchg_wrong_size(void); 12 - extern void __cmpxchg_wrong_size(void); 13 - 14 - /* 15 - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. 16 - * Since this is generally used to protect other memory information, we 17 - * use "asm volatile" and "memory" clobbers to prevent gcc from moving 18 - * information around. 19 - */ 20 - #define __xchg(x, ptr, size) \ 21 - ({ \ 22 - __typeof(*(ptr)) __x = (x); \ 23 - switch (size) { \ 24 - case 1: \ 25 - { \ 26 - volatile u8 *__ptr = (volatile u8 *)(ptr); \ 27 - asm volatile("xchgb %0,%1" \ 28 - : "=q" (__x), "+m" (*__ptr) \ 29 - : "0" (__x) \ 30 - : "memory"); \ 31 - break; \ 32 - } \ 33 - case 2: \ 34 - { \ 35 - volatile u16 *__ptr = (volatile u16 *)(ptr); \ 36 - asm volatile("xchgw %0,%1" \ 37 - : "=r" (__x), "+m" (*__ptr) \ 38 - : "0" (__x) \ 39 - : "memory"); \ 40 - break; \ 41 - } \ 42 - case 4: \ 43 - { \ 44 - volatile u32 *__ptr = (volatile u32 *)(ptr); \ 45 - asm volatile("xchgl %0,%1" \ 46 - : "=r" (__x), "+m" (*__ptr) \ 47 - : "0" (__x) \ 48 - : "memory"); \ 49 - break; \ 50 - } \ 51 - default: \ 52 - __xchg_wrong_size(); \ 53 - } \ 54 - __x; \ 55 - }) 56 - 57 - #define xchg(ptr, v) \ 58 - __xchg((v), (ptr), sizeof(*ptr)) 59 8 60 9 /* 61 10 * CMPXCHG8B only writes to the target if we had the previous ··· 34 85 : "memory"); 35 86 } 36 87 37 - /* 38 - * Atomic compare and exchange. Compare OLD with MEM, if identical, 39 - * store NEW in MEM. Return the initial value in MEM. Success is 40 - * indicated by comparing RETURN with OLD. 41 - */ 42 - #define __raw_cmpxchg(ptr, old, new, size, lock) \ 43 - ({ \ 44 - __typeof__(*(ptr)) __ret; \ 45 - __typeof__(*(ptr)) __old = (old); \ 46 - __typeof__(*(ptr)) __new = (new); \ 47 - switch (size) { \ 48 - case 1: \ 49 - { \ 50 - volatile u8 *__ptr = (volatile u8 *)(ptr); \ 51 - asm volatile(lock "cmpxchgb %2,%1" \ 52 - : "=a" (__ret), "+m" (*__ptr) \ 53 - : "q" (__new), "0" (__old) \ 54 - : "memory"); \ 55 - break; \ 56 - } \ 57 - case 2: \ 58 - { \ 59 - volatile u16 *__ptr = (volatile u16 *)(ptr); \ 60 - asm volatile(lock "cmpxchgw %2,%1" \ 61 - : "=a" (__ret), "+m" (*__ptr) \ 62 - : "r" (__new), "0" (__old) \ 63 - : "memory"); \ 64 - break; \ 65 - } \ 66 - case 4: \ 67 - { \ 68 - volatile u32 *__ptr = (volatile u32 *)(ptr); \ 69 - asm volatile(lock "cmpxchgl %2,%1" \ 70 - : "=a" (__ret), "+m" (*__ptr) \ 71 - : "r" (__new), "0" (__old) \ 72 - : "memory"); \ 73 - break; \ 74 - } \ 75 - default: \ 76 - __cmpxchg_wrong_size(); \ 77 - } \ 78 - __ret; \ 79 - }) 80 - 81 - #define __cmpxchg(ptr, old, new, size) \ 82 - __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) 83 - 84 - #define __sync_cmpxchg(ptr, old, new, size) \ 85 - __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") 86 - 87 - #define __cmpxchg_local(ptr, old, new, size) \ 88 - __raw_cmpxchg((ptr), (old), (new), (size), "") 89 - 90 88 #ifdef CONFIG_X86_CMPXCHG 91 89 #define __HAVE_ARCH_CMPXCHG 1 92 - 93 - #define cmpxchg(ptr, old, new) \ 94 - __cmpxchg((ptr), (old), (new), sizeof(*ptr)) 95 - 96 - #define sync_cmpxchg(ptr, old, new) \ 97 - __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) 98 - 99 - #define cmpxchg_local(ptr, old, new) \ 100 - __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 101 90 #endif 102 91 103 92 #ifdef CONFIG_X86_CMPXCHG64
-131
arch/x86/include/asm/cmpxchg_64.h
··· 1 1 #ifndef _ASM_X86_CMPXCHG_64_H 2 2 #define _ASM_X86_CMPXCHG_64_H 3 3 4 - #include <asm/alternative.h> /* Provides LOCK_PREFIX */ 5 - 6 - extern void __xchg_wrong_size(void); 7 - extern void __cmpxchg_wrong_size(void); 8 - 9 - /* 10 - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. 11 - * Since this is generally used to protect other memory information, we 12 - * use "asm volatile" and "memory" clobbers to prevent gcc from moving 13 - * information around. 14 - */ 15 - #define __xchg(x, ptr, size) \ 16 - ({ \ 17 - __typeof(*(ptr)) __x = (x); \ 18 - switch (size) { \ 19 - case 1: \ 20 - { \ 21 - volatile u8 *__ptr = (volatile u8 *)(ptr); \ 22 - asm volatile("xchgb %0,%1" \ 23 - : "=q" (__x), "+m" (*__ptr) \ 24 - : "0" (__x) \ 25 - : "memory"); \ 26 - break; \ 27 - } \ 28 - case 2: \ 29 - { \ 30 - volatile u16 *__ptr = (volatile u16 *)(ptr); \ 31 - asm volatile("xchgw %0,%1" \ 32 - : "=r" (__x), "+m" (*__ptr) \ 33 - : "0" (__x) \ 34 - : "memory"); \ 35 - break; \ 36 - } \ 37 - case 4: \ 38 - { \ 39 - volatile u32 *__ptr = (volatile u32 *)(ptr); \ 40 - asm volatile("xchgl %0,%1" \ 41 - : "=r" (__x), "+m" (*__ptr) \ 42 - : "0" (__x) \ 43 - : "memory"); \ 44 - break; \ 45 - } \ 46 - case 8: \ 47 - { \ 48 - volatile u64 *__ptr = (volatile u64 *)(ptr); \ 49 - asm volatile("xchgq %0,%1" \ 50 - : "=r" (__x), "+m" (*__ptr) \ 51 - : "0" (__x) \ 52 - : "memory"); \ 53 - break; \ 54 - } \ 55 - default: \ 56 - __xchg_wrong_size(); \ 57 - } \ 58 - __x; \ 59 - }) 60 - 61 - #define xchg(ptr, v) \ 62 - __xchg((v), (ptr), sizeof(*ptr)) 63 - 64 4 static inline void set_64bit(volatile u64 *ptr, u64 val) 65 5 { 66 6 *ptr = val; 67 7 } 68 8 69 9 #define __HAVE_ARCH_CMPXCHG 1 70 - 71 - /* 72 - * Atomic compare and exchange. Compare OLD with MEM, if identical, 73 - * store NEW in MEM. Return the initial value in MEM. Success is 74 - * indicated by comparing RETURN with OLD. 75 - */ 76 - #define __raw_cmpxchg(ptr, old, new, size, lock) \ 77 - ({ \ 78 - __typeof__(*(ptr)) __ret; \ 79 - __typeof__(*(ptr)) __old = (old); \ 80 - __typeof__(*(ptr)) __new = (new); \ 81 - switch (size) { \ 82 - case 1: \ 83 - { \ 84 - volatile u8 *__ptr = (volatile u8 *)(ptr); \ 85 - asm volatile(lock "cmpxchgb %2,%1" \ 86 - : "=a" (__ret), "+m" (*__ptr) \ 87 - : "q" (__new), "0" (__old) \ 88 - : "memory"); \ 89 - break; \ 90 - } \ 91 - case 2: \ 92 - { \ 93 - volatile u16 *__ptr = (volatile u16 *)(ptr); \ 94 - asm volatile(lock "cmpxchgw %2,%1" \ 95 - : "=a" (__ret), "+m" (*__ptr) \ 96 - : "r" (__new), "0" (__old) \ 97 - : "memory"); \ 98 - break; \ 99 - } \ 100 - case 4: \ 101 - { \ 102 - volatile u32 *__ptr = (volatile u32 *)(ptr); \ 103 - asm volatile(lock "cmpxchgl %2,%1" \ 104 - : "=a" (__ret), "+m" (*__ptr) \ 105 - : "r" (__new), "0" (__old) \ 106 - : "memory"); \ 107 - break; \ 108 - } \ 109 - case 8: \ 110 - { \ 111 - volatile u64 *__ptr = (volatile u64 *)(ptr); \ 112 - asm volatile(lock "cmpxchgq %2,%1" \ 113 - : "=a" (__ret), "+m" (*__ptr) \ 114 - : "r" (__new), "0" (__old) \ 115 - : "memory"); \ 116 - break; \ 117 - } \ 118 - default: \ 119 - __cmpxchg_wrong_size(); \ 120 - } \ 121 - __ret; \ 122 - }) 123 - 124 - #define __cmpxchg(ptr, old, new, size) \ 125 - __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) 126 - 127 - #define __sync_cmpxchg(ptr, old, new, size) \ 128 - __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") 129 - 130 - #define __cmpxchg_local(ptr, old, new, size) \ 131 - __raw_cmpxchg((ptr), (old), (new), (size), "") 132 - 133 - #define cmpxchg(ptr, old, new) \ 134 - __cmpxchg((ptr), (old), (new), sizeof(*ptr)) 135 - 136 - #define sync_cmpxchg(ptr, old, new) \ 137 - __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) 138 - 139 - #define cmpxchg_local(ptr, old, new) \ 140 - __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 141 10 142 11 #define cmpxchg64(ptr, o, n) \ 143 12 ({ \