Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic/x86: Switch atomic.h to use atomic-instrumented.h

Add arch_ prefix to all atomic operations and include
<asm-generic/atomic-instrumented.h>. This will allow
to add KASAN instrumentation to all atomic ops.

Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kasan-dev@googlegroups.com
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/54f0eb64260b84199e538652e079a89b5423ad41.1517246437.git.dvyukov@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Dmitry Vyukov and committed by
Ingo Molnar
8bf705d1 b06ed71a

+194 -190
+52 -50
arch/x86/include/asm/atomic.h
··· 17 17 #define ATOMIC_INIT(i) { (i) } 18 18 19 19 /** 20 - * atomic_read - read atomic variable 20 + * arch_atomic_read - read atomic variable 21 21 * @v: pointer of type atomic_t 22 22 * 23 23 * Atomically reads the value of @v. 24 24 */ 25 - static __always_inline int atomic_read(const atomic_t *v) 25 + static __always_inline int arch_atomic_read(const atomic_t *v) 26 26 { 27 27 return READ_ONCE((v)->counter); 28 28 } 29 29 30 30 /** 31 - * atomic_set - set atomic variable 31 + * arch_atomic_set - set atomic variable 32 32 * @v: pointer of type atomic_t 33 33 * @i: required value 34 34 * 35 35 * Atomically sets the value of @v to @i. 36 36 */ 37 - static __always_inline void atomic_set(atomic_t *v, int i) 37 + static __always_inline void arch_atomic_set(atomic_t *v, int i) 38 38 { 39 39 WRITE_ONCE(v->counter, i); 40 40 } 41 41 42 42 /** 43 - * atomic_add - add integer to atomic variable 43 + * arch_atomic_add - add integer to atomic variable 44 44 * @i: integer value to add 45 45 * @v: pointer of type atomic_t 46 46 * 47 47 * Atomically adds @i to @v. 48 48 */ 49 - static __always_inline void atomic_add(int i, atomic_t *v) 49 + static __always_inline void arch_atomic_add(int i, atomic_t *v) 50 50 { 51 51 asm volatile(LOCK_PREFIX "addl %1,%0" 52 52 : "+m" (v->counter) ··· 54 54 } 55 55 56 56 /** 57 - * atomic_sub - subtract integer from atomic variable 57 + * arch_atomic_sub - subtract integer from atomic variable 58 58 * @i: integer value to subtract 59 59 * @v: pointer of type atomic_t 60 60 * 61 61 * Atomically subtracts @i from @v. 62 62 */ 63 - static __always_inline void atomic_sub(int i, atomic_t *v) 63 + static __always_inline void arch_atomic_sub(int i, atomic_t *v) 64 64 { 65 65 asm volatile(LOCK_PREFIX "subl %1,%0" 66 66 : "+m" (v->counter) ··· 68 68 } 69 69 70 70 /** 71 - * atomic_sub_and_test - subtract value from variable and test result 71 + * arch_atomic_sub_and_test - subtract value from variable and test result 72 72 * @i: integer value to subtract 73 73 * @v: pointer of type atomic_t 74 74 * ··· 76 76 * true if the result is zero, or false for all 77 77 * other cases. 78 78 */ 79 - static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) 79 + static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) 80 80 { 81 81 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 82 82 } 83 83 84 84 /** 85 - * atomic_inc - increment atomic variable 85 + * arch_atomic_inc - increment atomic variable 86 86 * @v: pointer of type atomic_t 87 87 * 88 88 * Atomically increments @v by 1. 89 89 */ 90 - static __always_inline void atomic_inc(atomic_t *v) 90 + static __always_inline void arch_atomic_inc(atomic_t *v) 91 91 { 92 92 asm volatile(LOCK_PREFIX "incl %0" 93 93 : "+m" (v->counter)); 94 94 } 95 95 96 96 /** 97 - * atomic_dec - decrement atomic variable 97 + * arch_atomic_dec - decrement atomic variable 98 98 * @v: pointer of type atomic_t 99 99 * 100 100 * Atomically decrements @v by 1. 101 101 */ 102 - static __always_inline void atomic_dec(atomic_t *v) 102 + static __always_inline void arch_atomic_dec(atomic_t *v) 103 103 { 104 104 asm volatile(LOCK_PREFIX "decl %0" 105 105 : "+m" (v->counter)); 106 106 } 107 107 108 108 /** 109 - * atomic_dec_and_test - decrement and test 109 + * arch_atomic_dec_and_test - decrement and test 110 110 * @v: pointer of type atomic_t 111 111 * 112 112 * Atomically decrements @v by 1 and 113 113 * returns true if the result is 0, or false for all other 114 114 * cases. 115 115 */ 116 - static __always_inline bool atomic_dec_and_test(atomic_t *v) 116 + static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) 117 117 { 118 118 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 119 119 } 120 120 121 121 /** 122 - * atomic_inc_and_test - increment and test 122 + * arch_atomic_inc_and_test - increment and test 123 123 * @v: pointer of type atomic_t 124 124 * 125 125 * Atomically increments @v by 1 126 126 * and returns true if the result is zero, or false for all 127 127 * other cases. 128 128 */ 129 - static __always_inline bool atomic_inc_and_test(atomic_t *v) 129 + static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) 130 130 { 131 131 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 132 132 } 133 133 134 134 /** 135 - * atomic_add_negative - add and test if negative 135 + * arch_atomic_add_negative - add and test if negative 136 136 * @i: integer value to add 137 137 * @v: pointer of type atomic_t 138 138 * ··· 140 140 * if the result is negative, or false when 141 141 * result is greater than or equal to zero. 142 142 */ 143 - static __always_inline bool atomic_add_negative(int i, atomic_t *v) 143 + static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) 144 144 { 145 145 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 146 146 } 147 147 148 148 /** 149 - * atomic_add_return - add integer and return 149 + * arch_atomic_add_return - add integer and return 150 150 * @i: integer value to add 151 151 * @v: pointer of type atomic_t 152 152 * 153 153 * Atomically adds @i to @v and returns @i + @v 154 154 */ 155 - static __always_inline int atomic_add_return(int i, atomic_t *v) 155 + static __always_inline int arch_atomic_add_return(int i, atomic_t *v) 156 156 { 157 157 return i + xadd(&v->counter, i); 158 158 } 159 159 160 160 /** 161 - * atomic_sub_return - subtract integer and return 161 + * arch_atomic_sub_return - subtract integer and return 162 162 * @v: pointer of type atomic_t 163 163 * @i: integer value to subtract 164 164 * 165 165 * Atomically subtracts @i from @v and returns @v - @i 166 166 */ 167 - static __always_inline int atomic_sub_return(int i, atomic_t *v) 167 + static __always_inline int arch_atomic_sub_return(int i, atomic_t *v) 168 168 { 169 - return atomic_add_return(-i, v); 169 + return arch_atomic_add_return(-i, v); 170 170 } 171 171 172 - #define atomic_inc_return(v) (atomic_add_return(1, v)) 173 - #define atomic_dec_return(v) (atomic_sub_return(1, v)) 172 + #define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v)) 173 + #define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v)) 174 174 175 - static __always_inline int atomic_fetch_add(int i, atomic_t *v) 175 + static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v) 176 176 { 177 177 return xadd(&v->counter, i); 178 178 } 179 179 180 - static __always_inline int atomic_fetch_sub(int i, atomic_t *v) 180 + static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v) 181 181 { 182 182 return xadd(&v->counter, -i); 183 183 } 184 184 185 - static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) 185 + static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) 186 186 { 187 - return cmpxchg(&v->counter, old, new); 187 + return arch_cmpxchg(&v->counter, old, new); 188 188 } 189 189 190 - #define atomic_try_cmpxchg atomic_try_cmpxchg 191 - static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) 190 + #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg 191 + static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) 192 192 { 193 193 return try_cmpxchg(&v->counter, old, new); 194 194 } 195 195 196 - static inline int atomic_xchg(atomic_t *v, int new) 196 + static inline int arch_atomic_xchg(atomic_t *v, int new) 197 197 { 198 198 return xchg(&v->counter, new); 199 199 } 200 200 201 - static inline void atomic_and(int i, atomic_t *v) 201 + static inline void arch_atomic_and(int i, atomic_t *v) 202 202 { 203 203 asm volatile(LOCK_PREFIX "andl %1,%0" 204 204 : "+m" (v->counter) ··· 206 206 : "memory"); 207 207 } 208 208 209 - static inline int atomic_fetch_and(int i, atomic_t *v) 209 + static inline int arch_atomic_fetch_and(int i, atomic_t *v) 210 210 { 211 - int val = atomic_read(v); 211 + int val = arch_atomic_read(v); 212 212 213 - do { } while (!atomic_try_cmpxchg(v, &val, val & i)); 213 + do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i)); 214 214 215 215 return val; 216 216 } 217 217 218 - static inline void atomic_or(int i, atomic_t *v) 218 + static inline void arch_atomic_or(int i, atomic_t *v) 219 219 { 220 220 asm volatile(LOCK_PREFIX "orl %1,%0" 221 221 : "+m" (v->counter) ··· 223 223 : "memory"); 224 224 } 225 225 226 - static inline int atomic_fetch_or(int i, atomic_t *v) 226 + static inline int arch_atomic_fetch_or(int i, atomic_t *v) 227 227 { 228 - int val = atomic_read(v); 228 + int val = arch_atomic_read(v); 229 229 230 - do { } while (!atomic_try_cmpxchg(v, &val, val | i)); 230 + do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i)); 231 231 232 232 return val; 233 233 } 234 234 235 - static inline void atomic_xor(int i, atomic_t *v) 235 + static inline void arch_atomic_xor(int i, atomic_t *v) 236 236 { 237 237 asm volatile(LOCK_PREFIX "xorl %1,%0" 238 238 : "+m" (v->counter) ··· 240 240 : "memory"); 241 241 } 242 242 243 - static inline int atomic_fetch_xor(int i, atomic_t *v) 243 + static inline int arch_atomic_fetch_xor(int i, atomic_t *v) 244 244 { 245 - int val = atomic_read(v); 245 + int val = arch_atomic_read(v); 246 246 247 - do { } while (!atomic_try_cmpxchg(v, &val, val ^ i)); 247 + do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i)); 248 248 249 249 return val; 250 250 } 251 251 252 252 /** 253 - * __atomic_add_unless - add unless the number is already a given value 253 + * __arch_atomic_add_unless - add unless the number is already a given value 254 254 * @v: pointer of type atomic_t 255 255 * @a: the amount to add to v... 256 256 * @u: ...unless v is equal to u. ··· 258 258 * Atomically adds @a to @v, so long as @v was not already @u. 259 259 * Returns the old value of @v. 260 260 */ 261 - static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 261 + static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u) 262 262 { 263 - int c = atomic_read(v); 263 + int c = arch_atomic_read(v); 264 264 265 265 do { 266 266 if (unlikely(c == u)) 267 267 break; 268 - } while (!atomic_try_cmpxchg(v, &c, c + a)); 268 + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); 269 269 270 270 return c; 271 271 } ··· 275 275 #else 276 276 # include <asm/atomic64_64.h> 277 277 #endif 278 + 279 + #include <asm-generic/atomic-instrumented.h> 278 280 279 281 #endif /* _ASM_X86_ATOMIC_H */
+76 -74
arch/x86/include/asm/atomic64_32.h
··· 62 62 #undef ATOMIC64_EXPORT 63 63 64 64 /** 65 - * atomic64_cmpxchg - cmpxchg atomic64 variable 65 + * arch_atomic64_cmpxchg - cmpxchg atomic64 variable 66 66 * @v: pointer to type atomic64_t 67 67 * @o: expected value 68 68 * @n: new value ··· 71 71 * the old value. 72 72 */ 73 73 74 - static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 74 + static inline long long arch_atomic64_cmpxchg(atomic64_t *v, long long o, 75 + long long n) 75 76 { 76 - return cmpxchg64(&v->counter, o, n); 77 + return arch_cmpxchg64(&v->counter, o, n); 77 78 } 78 79 79 80 /** 80 - * atomic64_xchg - xchg atomic64 variable 81 + * arch_atomic64_xchg - xchg atomic64 variable 81 82 * @v: pointer to type atomic64_t 82 83 * @n: value to assign 83 84 * 84 85 * Atomically xchgs the value of @v to @n and returns 85 86 * the old value. 86 87 */ 87 - static inline long long atomic64_xchg(atomic64_t *v, long long n) 88 + static inline long long arch_atomic64_xchg(atomic64_t *v, long long n) 88 89 { 89 90 long long o; 90 91 unsigned high = (unsigned)(n >> 32); ··· 97 96 } 98 97 99 98 /** 100 - * atomic64_set - set atomic64 variable 99 + * arch_atomic64_set - set atomic64 variable 101 100 * @v: pointer to type atomic64_t 102 101 * @i: value to assign 103 102 * 104 103 * Atomically sets the value of @v to @n. 105 104 */ 106 - static inline void atomic64_set(atomic64_t *v, long long i) 105 + static inline void arch_atomic64_set(atomic64_t *v, long long i) 107 106 { 108 107 unsigned high = (unsigned)(i >> 32); 109 108 unsigned low = (unsigned)i; ··· 113 112 } 114 113 115 114 /** 116 - * atomic64_read - read atomic64 variable 115 + * arch_atomic64_read - read atomic64 variable 117 116 * @v: pointer to type atomic64_t 118 117 * 119 118 * Atomically reads the value of @v and returns it. 120 119 */ 121 - static inline long long atomic64_read(const atomic64_t *v) 120 + static inline long long arch_atomic64_read(const atomic64_t *v) 122 121 { 123 122 long long r; 124 123 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); ··· 126 125 } 127 126 128 127 /** 129 - * atomic64_add_return - add and return 128 + * arch_atomic64_add_return - add and return 130 129 * @i: integer value to add 131 130 * @v: pointer to type atomic64_t 132 131 * 133 132 * Atomically adds @i to @v and returns @i + *@v 134 133 */ 135 - static inline long long atomic64_add_return(long long i, atomic64_t *v) 134 + static inline long long arch_atomic64_add_return(long long i, atomic64_t *v) 136 135 { 137 136 alternative_atomic64(add_return, 138 137 ASM_OUTPUT2("+A" (i), "+c" (v)), ··· 143 142 /* 144 143 * Other variants with different arithmetic operators: 145 144 */ 146 - static inline long long atomic64_sub_return(long long i, atomic64_t *v) 145 + static inline long long arch_atomic64_sub_return(long long i, atomic64_t *v) 147 146 { 148 147 alternative_atomic64(sub_return, 149 148 ASM_OUTPUT2("+A" (i), "+c" (v)), ··· 151 150 return i; 152 151 } 153 152 154 - static inline long long atomic64_inc_return(atomic64_t *v) 153 + static inline long long arch_atomic64_inc_return(atomic64_t *v) 155 154 { 156 155 long long a; 157 156 alternative_atomic64(inc_return, "=&A" (a), ··· 159 158 return a; 160 159 } 161 160 162 - static inline long long atomic64_dec_return(atomic64_t *v) 161 + static inline long long arch_atomic64_dec_return(atomic64_t *v) 163 162 { 164 163 long long a; 165 164 alternative_atomic64(dec_return, "=&A" (a), ··· 168 167 } 169 168 170 169 /** 171 - * atomic64_add - add integer to atomic64 variable 170 + * arch_atomic64_add - add integer to atomic64 variable 172 171 * @i: integer value to add 173 172 * @v: pointer to type atomic64_t 174 173 * 175 174 * Atomically adds @i to @v. 176 175 */ 177 - static inline long long atomic64_add(long long i, atomic64_t *v) 176 + static inline long long arch_atomic64_add(long long i, atomic64_t *v) 178 177 { 179 178 __alternative_atomic64(add, add_return, 180 179 ASM_OUTPUT2("+A" (i), "+c" (v)), ··· 183 182 } 184 183 185 184 /** 186 - * atomic64_sub - subtract the atomic64 variable 185 + * arch_atomic64_sub - subtract the atomic64 variable 187 186 * @i: integer value to subtract 188 187 * @v: pointer to type atomic64_t 189 188 * 190 189 * Atomically subtracts @i from @v. 191 190 */ 192 - static inline long long atomic64_sub(long long i, atomic64_t *v) 191 + static inline long long arch_atomic64_sub(long long i, atomic64_t *v) 193 192 { 194 193 __alternative_atomic64(sub, sub_return, 195 194 ASM_OUTPUT2("+A" (i), "+c" (v)), ··· 198 197 } 199 198 200 199 /** 201 - * atomic64_sub_and_test - subtract value from variable and test result 200 + * arch_atomic64_sub_and_test - subtract value from variable and test result 202 201 * @i: integer value to subtract 203 202 * @v: pointer to type atomic64_t 204 203 * ··· 206 205 * true if the result is zero, or false for all 207 206 * other cases. 208 207 */ 209 - static inline int atomic64_sub_and_test(long long i, atomic64_t *v) 208 + static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v) 210 209 { 211 - return atomic64_sub_return(i, v) == 0; 210 + return arch_atomic64_sub_return(i, v) == 0; 212 211 } 213 212 214 213 /** 215 - * atomic64_inc - increment atomic64 variable 214 + * arch_atomic64_inc - increment atomic64 variable 216 215 * @v: pointer to type atomic64_t 217 216 * 218 217 * Atomically increments @v by 1. 219 218 */ 220 - static inline void atomic64_inc(atomic64_t *v) 219 + static inline void arch_atomic64_inc(atomic64_t *v) 221 220 { 222 221 __alternative_atomic64(inc, inc_return, /* no output */, 223 222 "S" (v) : "memory", "eax", "ecx", "edx"); 224 223 } 225 224 226 225 /** 227 - * atomic64_dec - decrement atomic64 variable 226 + * arch_atomic64_dec - decrement atomic64 variable 228 227 * @v: pointer to type atomic64_t 229 228 * 230 229 * Atomically decrements @v by 1. 231 230 */ 232 - static inline void atomic64_dec(atomic64_t *v) 231 + static inline void arch_atomic64_dec(atomic64_t *v) 233 232 { 234 233 __alternative_atomic64(dec, dec_return, /* no output */, 235 234 "S" (v) : "memory", "eax", "ecx", "edx"); 236 235 } 237 236 238 237 /** 239 - * atomic64_dec_and_test - decrement and test 238 + * arch_atomic64_dec_and_test - decrement and test 240 239 * @v: pointer to type atomic64_t 241 240 * 242 241 * Atomically decrements @v by 1 and 243 242 * returns true if the result is 0, or false for all other 244 243 * cases. 245 244 */ 246 - static inline int atomic64_dec_and_test(atomic64_t *v) 245 + static inline int arch_atomic64_dec_and_test(atomic64_t *v) 247 246 { 248 - return atomic64_dec_return(v) == 0; 247 + return arch_atomic64_dec_return(v) == 0; 249 248 } 250 249 251 250 /** ··· 256 255 * and returns true if the result is zero, or false for all 257 256 * other cases. 258 257 */ 259 - static inline int atomic64_inc_and_test(atomic64_t *v) 258 + static inline int arch_atomic64_inc_and_test(atomic64_t *v) 260 259 { 261 - return atomic64_inc_return(v) == 0; 260 + return arch_atomic64_inc_return(v) == 0; 262 261 } 263 262 264 263 /** 265 - * atomic64_add_negative - add and test if negative 264 + * arch_atomic64_add_negative - add and test if negative 266 265 * @i: integer value to add 267 266 * @v: pointer to type atomic64_t 268 267 * ··· 270 269 * if the result is negative, or false when 271 270 * result is greater than or equal to zero. 272 271 */ 273 - static inline int atomic64_add_negative(long long i, atomic64_t *v) 272 + static inline int arch_atomic64_add_negative(long long i, atomic64_t *v) 274 273 { 275 - return atomic64_add_return(i, v) < 0; 274 + return arch_atomic64_add_return(i, v) < 0; 276 275 } 277 276 278 277 /** 279 - * atomic64_add_unless - add unless the number is a given value 278 + * arch_atomic64_add_unless - add unless the number is a given value 280 279 * @v: pointer of type atomic64_t 281 280 * @a: the amount to add to v... 282 281 * @u: ...unless v is equal to u. ··· 284 283 * Atomically adds @a to @v, so long as it was not @u. 285 284 * Returns non-zero if the add was done, zero otherwise. 286 285 */ 287 - static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 286 + static inline int arch_atomic64_add_unless(atomic64_t *v, long long a, 287 + long long u) 288 288 { 289 289 unsigned low = (unsigned)u; 290 290 unsigned high = (unsigned)(u >> 32); ··· 296 294 } 297 295 298 296 299 - static inline int atomic64_inc_not_zero(atomic64_t *v) 297 + static inline int arch_atomic64_inc_not_zero(atomic64_t *v) 300 298 { 301 299 int r; 302 300 alternative_atomic64(inc_not_zero, "=&a" (r), ··· 304 302 return r; 305 303 } 306 304 307 - static inline long long atomic64_dec_if_positive(atomic64_t *v) 305 + static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) 308 306 { 309 307 long long r; 310 308 alternative_atomic64(dec_if_positive, "=&A" (r), ··· 315 313 #undef alternative_atomic64 316 314 #undef __alternative_atomic64 317 315 318 - static inline void atomic64_and(long long i, atomic64_t *v) 316 + static inline void arch_atomic64_and(long long i, atomic64_t *v) 319 317 { 320 318 long long old, c = 0; 321 319 322 - while ((old = atomic64_cmpxchg(v, c, c & i)) != c) 320 + while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c) 323 321 c = old; 324 322 } 325 323 326 - static inline long long atomic64_fetch_and(long long i, atomic64_t *v) 324 + static inline long long arch_atomic64_fetch_and(long long i, atomic64_t *v) 327 325 { 328 326 long long old, c = 0; 329 327 330 - while ((old = atomic64_cmpxchg(v, c, c & i)) != c) 331 - c = old; 332 - 333 - return old; 334 - } 335 - 336 - static inline void atomic64_or(long long i, atomic64_t *v) 337 - { 338 - long long old, c = 0; 339 - 340 - while ((old = atomic64_cmpxchg(v, c, c | i)) != c) 341 - c = old; 342 - } 343 - 344 - static inline long long atomic64_fetch_or(long long i, atomic64_t *v) 345 - { 346 - long long old, c = 0; 347 - 348 - while ((old = atomic64_cmpxchg(v, c, c | i)) != c) 328 + while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c) 349 329 c = old; 350 330 351 331 return old; 352 332 } 353 333 354 - static inline void atomic64_xor(long long i, atomic64_t *v) 334 + static inline void arch_atomic64_or(long long i, atomic64_t *v) 355 335 { 356 336 long long old, c = 0; 357 337 358 - while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c) 338 + while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c) 359 339 c = old; 360 340 } 361 341 362 - static inline long long atomic64_fetch_xor(long long i, atomic64_t *v) 342 + static inline long long arch_atomic64_fetch_or(long long i, atomic64_t *v) 363 343 { 364 344 long long old, c = 0; 365 345 366 - while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c) 367 - c = old; 368 - 369 - return old; 370 - } 371 - 372 - static inline long long atomic64_fetch_add(long long i, atomic64_t *v) 373 - { 374 - long long old, c = 0; 375 - 376 - while ((old = atomic64_cmpxchg(v, c, c + i)) != c) 346 + while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c) 377 347 c = old; 378 348 379 349 return old; 380 350 } 381 351 382 - #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) 352 + static inline void arch_atomic64_xor(long long i, atomic64_t *v) 353 + { 354 + long long old, c = 0; 355 + 356 + while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c) 357 + c = old; 358 + } 359 + 360 + static inline long long arch_atomic64_fetch_xor(long long i, atomic64_t *v) 361 + { 362 + long long old, c = 0; 363 + 364 + while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c) 365 + c = old; 366 + 367 + return old; 368 + } 369 + 370 + static inline long long arch_atomic64_fetch_add(long long i, atomic64_t *v) 371 + { 372 + long long old, c = 0; 373 + 374 + while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c) 375 + c = old; 376 + 377 + return old; 378 + } 379 + 380 + #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v)) 383 381 384 382 #endif /* _ASM_X86_ATOMIC64_32_H */
+54 -54
arch/x86/include/asm/atomic64_64.h
··· 11 11 #define ATOMIC64_INIT(i) { (i) } 12 12 13 13 /** 14 - * atomic64_read - read atomic64 variable 14 + * arch_atomic64_read - read atomic64 variable 15 15 * @v: pointer of type atomic64_t 16 16 * 17 17 * Atomically reads the value of @v. 18 18 * Doesn't imply a read memory barrier. 19 19 */ 20 - static inline long atomic64_read(const atomic64_t *v) 20 + static inline long arch_atomic64_read(const atomic64_t *v) 21 21 { 22 22 return READ_ONCE((v)->counter); 23 23 } 24 24 25 25 /** 26 - * atomic64_set - set atomic64 variable 26 + * arch_atomic64_set - set atomic64 variable 27 27 * @v: pointer to type atomic64_t 28 28 * @i: required value 29 29 * 30 30 * Atomically sets the value of @v to @i. 31 31 */ 32 - static inline void atomic64_set(atomic64_t *v, long i) 32 + static inline void arch_atomic64_set(atomic64_t *v, long i) 33 33 { 34 34 WRITE_ONCE(v->counter, i); 35 35 } 36 36 37 37 /** 38 - * atomic64_add - add integer to atomic64 variable 38 + * arch_atomic64_add - add integer to atomic64 variable 39 39 * @i: integer value to add 40 40 * @v: pointer to type atomic64_t 41 41 * 42 42 * Atomically adds @i to @v. 43 43 */ 44 - static __always_inline void atomic64_add(long i, atomic64_t *v) 44 + static __always_inline void arch_atomic64_add(long i, atomic64_t *v) 45 45 { 46 46 asm volatile(LOCK_PREFIX "addq %1,%0" 47 47 : "=m" (v->counter) ··· 49 49 } 50 50 51 51 /** 52 - * atomic64_sub - subtract the atomic64 variable 52 + * arch_atomic64_sub - subtract the atomic64 variable 53 53 * @i: integer value to subtract 54 54 * @v: pointer to type atomic64_t 55 55 * 56 56 * Atomically subtracts @i from @v. 57 57 */ 58 - static inline void atomic64_sub(long i, atomic64_t *v) 58 + static inline void arch_atomic64_sub(long i, atomic64_t *v) 59 59 { 60 60 asm volatile(LOCK_PREFIX "subq %1,%0" 61 61 : "=m" (v->counter) ··· 63 63 } 64 64 65 65 /** 66 - * atomic64_sub_and_test - subtract value from variable and test result 66 + * arch_atomic64_sub_and_test - subtract value from variable and test result 67 67 * @i: integer value to subtract 68 68 * @v: pointer to type atomic64_t 69 69 * ··· 71 71 * true if the result is zero, or false for all 72 72 * other cases. 73 73 */ 74 - static inline bool atomic64_sub_and_test(long i, atomic64_t *v) 74 + static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) 75 75 { 76 76 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); 77 77 } 78 78 79 79 /** 80 - * atomic64_inc - increment atomic64 variable 80 + * arch_atomic64_inc - increment atomic64 variable 81 81 * @v: pointer to type atomic64_t 82 82 * 83 83 * Atomically increments @v by 1. 84 84 */ 85 - static __always_inline void atomic64_inc(atomic64_t *v) 85 + static __always_inline void arch_atomic64_inc(atomic64_t *v) 86 86 { 87 87 asm volatile(LOCK_PREFIX "incq %0" 88 88 : "=m" (v->counter) ··· 90 90 } 91 91 92 92 /** 93 - * atomic64_dec - decrement atomic64 variable 93 + * arch_atomic64_dec - decrement atomic64 variable 94 94 * @v: pointer to type atomic64_t 95 95 * 96 96 * Atomically decrements @v by 1. 97 97 */ 98 - static __always_inline void atomic64_dec(atomic64_t *v) 98 + static __always_inline void arch_atomic64_dec(atomic64_t *v) 99 99 { 100 100 asm volatile(LOCK_PREFIX "decq %0" 101 101 : "=m" (v->counter) ··· 103 103 } 104 104 105 105 /** 106 - * atomic64_dec_and_test - decrement and test 106 + * arch_atomic64_dec_and_test - decrement and test 107 107 * @v: pointer to type atomic64_t 108 108 * 109 109 * Atomically decrements @v by 1 and 110 110 * returns true if the result is 0, or false for all other 111 111 * cases. 112 112 */ 113 - static inline bool atomic64_dec_and_test(atomic64_t *v) 113 + static inline bool arch_atomic64_dec_and_test(atomic64_t *v) 114 114 { 115 115 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); 116 116 } 117 117 118 118 /** 119 - * atomic64_inc_and_test - increment and test 119 + * arch_atomic64_inc_and_test - increment and test 120 120 * @v: pointer to type atomic64_t 121 121 * 122 122 * Atomically increments @v by 1 123 123 * and returns true if the result is zero, or false for all 124 124 * other cases. 125 125 */ 126 - static inline bool atomic64_inc_and_test(atomic64_t *v) 126 + static inline bool arch_atomic64_inc_and_test(atomic64_t *v) 127 127 { 128 128 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); 129 129 } 130 130 131 131 /** 132 - * atomic64_add_negative - add and test if negative 132 + * arch_atomic64_add_negative - add and test if negative 133 133 * @i: integer value to add 134 134 * @v: pointer to type atomic64_t 135 135 * ··· 137 137 * if the result is negative, or false when 138 138 * result is greater than or equal to zero. 139 139 */ 140 - static inline bool atomic64_add_negative(long i, atomic64_t *v) 140 + static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) 141 141 { 142 142 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); 143 143 } 144 144 145 145 /** 146 - * atomic64_add_return - add and return 146 + * arch_atomic64_add_return - add and return 147 147 * @i: integer value to add 148 148 * @v: pointer to type atomic64_t 149 149 * 150 150 * Atomically adds @i to @v and returns @i + @v 151 151 */ 152 - static __always_inline long atomic64_add_return(long i, atomic64_t *v) 152 + static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v) 153 153 { 154 154 return i + xadd(&v->counter, i); 155 155 } 156 156 157 - static inline long atomic64_sub_return(long i, atomic64_t *v) 157 + static inline long arch_atomic64_sub_return(long i, atomic64_t *v) 158 158 { 159 - return atomic64_add_return(-i, v); 159 + return arch_atomic64_add_return(-i, v); 160 160 } 161 161 162 - static inline long atomic64_fetch_add(long i, atomic64_t *v) 162 + static inline long arch_atomic64_fetch_add(long i, atomic64_t *v) 163 163 { 164 164 return xadd(&v->counter, i); 165 165 } 166 166 167 - static inline long atomic64_fetch_sub(long i, atomic64_t *v) 167 + static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v) 168 168 { 169 169 return xadd(&v->counter, -i); 170 170 } 171 171 172 - #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) 173 - #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) 172 + #define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v))) 173 + #define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v))) 174 174 175 - static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) 175 + static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new) 176 176 { 177 - return cmpxchg(&v->counter, old, new); 177 + return arch_cmpxchg(&v->counter, old, new); 178 178 } 179 179 180 - #define atomic64_try_cmpxchg atomic64_try_cmpxchg 181 - static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new) 180 + #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg 181 + static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new) 182 182 { 183 183 return try_cmpxchg(&v->counter, old, new); 184 184 } 185 185 186 - static inline long atomic64_xchg(atomic64_t *v, long new) 186 + static inline long arch_atomic64_xchg(atomic64_t *v, long new) 187 187 { 188 188 return xchg(&v->counter, new); 189 189 } 190 190 191 191 /** 192 - * atomic64_add_unless - add unless the number is a given value 192 + * arch_atomic64_add_unless - add unless the number is a given value 193 193 * @v: pointer of type atomic64_t 194 194 * @a: the amount to add to v... 195 195 * @u: ...unless v is equal to u. ··· 197 197 * Atomically adds @a to @v, so long as it was not @u. 198 198 * Returns the old value of @v. 199 199 */ 200 - static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) 200 + static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u) 201 201 { 202 - s64 c = atomic64_read(v); 202 + s64 c = arch_atomic64_read(v); 203 203 do { 204 204 if (unlikely(c == u)) 205 205 return false; 206 - } while (!atomic64_try_cmpxchg(v, &c, c + a)); 206 + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); 207 207 return true; 208 208 } 209 209 210 - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 210 + #define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0) 211 211 212 212 /* 213 - * atomic64_dec_if_positive - decrement by 1 if old value positive 213 + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive 214 214 * @v: pointer of type atomic_t 215 215 * 216 216 * The function returns the old value of *v minus 1, even if 217 217 * the atomic variable, v, was not decremented. 218 218 */ 219 - static inline long atomic64_dec_if_positive(atomic64_t *v) 219 + static inline long arch_atomic64_dec_if_positive(atomic64_t *v) 220 220 { 221 - s64 dec, c = atomic64_read(v); 221 + s64 dec, c = arch_atomic64_read(v); 222 222 do { 223 223 dec = c - 1; 224 224 if (unlikely(dec < 0)) 225 225 break; 226 - } while (!atomic64_try_cmpxchg(v, &c, dec)); 226 + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); 227 227 return dec; 228 228 } 229 229 230 - static inline void atomic64_and(long i, atomic64_t *v) 230 + static inline void arch_atomic64_and(long i, atomic64_t *v) 231 231 { 232 232 asm volatile(LOCK_PREFIX "andq %1,%0" 233 233 : "+m" (v->counter) ··· 235 235 : "memory"); 236 236 } 237 237 238 - static inline long atomic64_fetch_and(long i, atomic64_t *v) 238 + static inline long arch_atomic64_fetch_and(long i, atomic64_t *v) 239 239 { 240 - s64 val = atomic64_read(v); 240 + s64 val = arch_atomic64_read(v); 241 241 242 242 do { 243 - } while (!atomic64_try_cmpxchg(v, &val, val & i)); 243 + } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); 244 244 return val; 245 245 } 246 246 247 - static inline void atomic64_or(long i, atomic64_t *v) 247 + static inline void arch_atomic64_or(long i, atomic64_t *v) 248 248 { 249 249 asm volatile(LOCK_PREFIX "orq %1,%0" 250 250 : "+m" (v->counter) ··· 252 252 : "memory"); 253 253 } 254 254 255 - static inline long atomic64_fetch_or(long i, atomic64_t *v) 255 + static inline long arch_atomic64_fetch_or(long i, atomic64_t *v) 256 256 { 257 - s64 val = atomic64_read(v); 257 + s64 val = arch_atomic64_read(v); 258 258 259 259 do { 260 - } while (!atomic64_try_cmpxchg(v, &val, val | i)); 260 + } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); 261 261 return val; 262 262 } 263 263 264 - static inline void atomic64_xor(long i, atomic64_t *v) 264 + static inline void arch_atomic64_xor(long i, atomic64_t *v) 265 265 { 266 266 asm volatile(LOCK_PREFIX "xorq %1,%0" 267 267 : "+m" (v->counter) ··· 269 269 : "memory"); 270 270 } 271 271 272 - static inline long atomic64_fetch_xor(long i, atomic64_t *v) 272 + static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v) 273 273 { 274 - s64 val = atomic64_read(v); 274 + s64 val = arch_atomic64_read(v); 275 275 276 276 do { 277 - } while (!atomic64_try_cmpxchg(v, &val, val ^ i)); 277 + } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); 278 278 return val; 279 279 } 280 280
+6 -6
arch/x86/include/asm/cmpxchg.h
··· 145 145 # include <asm/cmpxchg_64.h> 146 146 #endif 147 147 148 - #define cmpxchg(ptr, old, new) \ 148 + #define arch_cmpxchg(ptr, old, new) \ 149 149 __cmpxchg(ptr, old, new, sizeof(*(ptr))) 150 150 151 - #define sync_cmpxchg(ptr, old, new) \ 151 + #define arch_sync_cmpxchg(ptr, old, new) \ 152 152 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) 153 153 154 - #define cmpxchg_local(ptr, old, new) \ 154 + #define arch_cmpxchg_local(ptr, old, new) \ 155 155 __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) 156 156 157 157 ··· 221 221 #define __try_cmpxchg(ptr, pold, new, size) \ 222 222 __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) 223 223 224 - #define try_cmpxchg(ptr, pold, new) \ 224 + #define try_cmpxchg(ptr, pold, new) \ 225 225 __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr))) 226 226 227 227 /* ··· 250 250 __ret; \ 251 251 }) 252 252 253 - #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 253 + #define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 254 254 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) 255 255 256 - #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ 256 + #define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ 257 257 __cmpxchg_double(, p1, p2, o1, o2, n1, n2) 258 258 259 259 #endif /* ASM_X86_CMPXCHG_H */
+4 -4
arch/x86/include/asm/cmpxchg_32.h
··· 36 36 } 37 37 38 38 #ifdef CONFIG_X86_CMPXCHG64 39 - #define cmpxchg64(ptr, o, n) \ 39 + #define arch_cmpxchg64(ptr, o, n) \ 40 40 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ 41 41 (unsigned long long)(n))) 42 - #define cmpxchg64_local(ptr, o, n) \ 42 + #define arch_cmpxchg64_local(ptr, o, n) \ 43 43 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ 44 44 (unsigned long long)(n))) 45 45 #endif ··· 76 76 * to simulate the cmpxchg8b on the 80386 and 80486 CPU. 77 77 */ 78 78 79 - #define cmpxchg64(ptr, o, n) \ 79 + #define arch_cmpxchg64(ptr, o, n) \ 80 80 ({ \ 81 81 __typeof__(*(ptr)) __ret; \ 82 82 __typeof__(*(ptr)) __old = (o); \ ··· 93 93 __ret; }) 94 94 95 95 96 - #define cmpxchg64_local(ptr, o, n) \ 96 + #define arch_cmpxchg64_local(ptr, o, n) \ 97 97 ({ \ 98 98 __typeof__(*(ptr)) __ret; \ 99 99 __typeof__(*(ptr)) __old = (o); \
+2 -2
arch/x86/include/asm/cmpxchg_64.h
··· 7 7 *ptr = val; 8 8 } 9 9 10 - #define cmpxchg64(ptr, o, n) \ 10 + #define arch_cmpxchg64(ptr, o, n) \ 11 11 ({ \ 12 12 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 13 13 cmpxchg((ptr), (o), (n)); \ 14 14 }) 15 15 16 - #define cmpxchg64_local(ptr, o, n) \ 16 + #define arch_cmpxchg64_local(ptr, o, n) \ 17 17 ({ \ 18 18 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 19 19 cmpxchg_local((ptr), (o), (n)); \