Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch: Remove cmpxchg_double

No moar users, remove the monster.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230531132323.991907085@infradead.org

+5 -371
-2
Documentation/core-api/this_cpu_ops.rst
··· 53 53 this_cpu_add_return(pcp, val) 54 54 this_cpu_xchg(pcp, nval) 55 55 this_cpu_cmpxchg(pcp, oval, nval) 56 - this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 57 56 this_cpu_sub(pcp, val) 58 57 this_cpu_inc(pcp) 59 58 this_cpu_dec(pcp) ··· 241 242 __this_cpu_add_return(pcp, val) 242 243 __this_cpu_xchg(pcp, nval) 243 244 __this_cpu_cmpxchg(pcp, oval, nval) 244 - __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 245 245 __this_cpu_sub(pcp, val) 246 246 __this_cpu_inc(pcp) 247 247 __this_cpu_dec(pcp)
-33
arch/arm64/include/asm/atomic_ll_sc.h
··· 294 294 295 295 #undef __CMPXCHG_CASE 296 296 297 - #define __CMPXCHG_DBL(name, mb, rel, cl) \ 298 - static __always_inline long \ 299 - __ll_sc__cmpxchg_double##name(unsigned long old1, \ 300 - unsigned long old2, \ 301 - unsigned long new1, \ 302 - unsigned long new2, \ 303 - volatile void *ptr) \ 304 - { \ 305 - unsigned long tmp, ret; \ 306 - \ 307 - asm volatile("// __cmpxchg_double" #name "\n" \ 308 - " prfm pstl1strm, %2\n" \ 309 - "1: ldxp %0, %1, %2\n" \ 310 - " eor %0, %0, %3\n" \ 311 - " eor %1, %1, %4\n" \ 312 - " orr %1, %0, %1\n" \ 313 - " cbnz %1, 2f\n" \ 314 - " st" #rel "xp %w0, %5, %6, %2\n" \ 315 - " cbnz %w0, 1b\n" \ 316 - " " #mb "\n" \ 317 - "2:" \ 318 - : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ 319 - : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ 320 - : cl); \ 321 - \ 322 - return ret; \ 323 - } 324 - 325 - __CMPXCHG_DBL( , , , ) 326 - __CMPXCHG_DBL(_mb, dmb ish, l, "memory") 327 - 328 - #undef __CMPXCHG_DBL 329 - 330 297 union __u128_halves { 331 298 u128 full; 332 299 struct {
-36
arch/arm64/include/asm/atomic_lse.h
··· 281 281 282 282 #undef __CMPXCHG_CASE 283 283 284 - #define __CMPXCHG_DBL(name, mb, cl...) \ 285 - static __always_inline long \ 286 - __lse__cmpxchg_double##name(unsigned long old1, \ 287 - unsigned long old2, \ 288 - unsigned long new1, \ 289 - unsigned long new2, \ 290 - volatile void *ptr) \ 291 - { \ 292 - unsigned long oldval1 = old1; \ 293 - unsigned long oldval2 = old2; \ 294 - register unsigned long x0 asm ("x0") = old1; \ 295 - register unsigned long x1 asm ("x1") = old2; \ 296 - register unsigned long x2 asm ("x2") = new1; \ 297 - register unsigned long x3 asm ("x3") = new2; \ 298 - register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ 299 - \ 300 - asm volatile( \ 301 - __LSE_PREAMBLE \ 302 - " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ 303 - " eor %[old1], %[old1], %[oldval1]\n" \ 304 - " eor %[old2], %[old2], %[oldval2]\n" \ 305 - " orr %[old1], %[old1], %[old2]" \ 306 - : [old1] "+&r" (x0), [old2] "+&r" (x1), \ 307 - [v] "+Q" (*(__uint128_t *)ptr) \ 308 - : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ 309 - [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ 310 - : cl); \ 311 - \ 312 - return x0; \ 313 - } 314 - 315 - __CMPXCHG_DBL( , ) 316 - __CMPXCHG_DBL(_mb, al, "memory") 317 - 318 - #undef __CMPXCHG_DBL 319 - 320 284 #define __CMPXCHG128(name, mb, cl...) \ 321 285 static __always_inline u128 \ 322 286 __lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \
-46
arch/arm64/include/asm/cmpxchg.h
··· 130 130 131 131 #undef __CMPXCHG_CASE 132 132 133 - #define __CMPXCHG_DBL(name) \ 134 - static inline long __cmpxchg_double##name(unsigned long old1, \ 135 - unsigned long old2, \ 136 - unsigned long new1, \ 137 - unsigned long new2, \ 138 - volatile void *ptr) \ 139 - { \ 140 - return __lse_ll_sc_body(_cmpxchg_double##name, \ 141 - old1, old2, new1, new2, ptr); \ 142 - } 143 - 144 - __CMPXCHG_DBL( ) 145 - __CMPXCHG_DBL(_mb) 146 - 147 - #undef __CMPXCHG_DBL 148 - 149 133 #define __CMPXCHG128(name) \ 150 134 static inline u128 __cmpxchg128##name(volatile u128 *ptr, \ 151 135 u128 old, u128 new) \ ··· 194 210 #define arch_cmpxchg64_release arch_cmpxchg_release 195 211 #define arch_cmpxchg64 arch_cmpxchg 196 212 #define arch_cmpxchg64_local arch_cmpxchg_local 197 - 198 - /* cmpxchg_double */ 199 - #define system_has_cmpxchg_double() 1 200 - 201 - #define __cmpxchg_double_check(ptr1, ptr2) \ 202 - ({ \ 203 - if (sizeof(*(ptr1)) != 8) \ 204 - BUILD_BUG(); \ 205 - VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ 206 - }) 207 - 208 - #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ 209 - ({ \ 210 - int __ret; \ 211 - __cmpxchg_double_check(ptr1, ptr2); \ 212 - __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ 213 - (unsigned long)(n1), (unsigned long)(n2), \ 214 - ptr1); \ 215 - __ret; \ 216 - }) 217 - 218 - #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ 219 - ({ \ 220 - int __ret; \ 221 - __cmpxchg_double_check(ptr1, ptr2); \ 222 - __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ 223 - (unsigned long)(n1), (unsigned long)(n2), \ 224 - ptr1); \ 225 - __ret; \ 226 - }) 227 213 228 214 /* cmpxchg128 */ 229 215 #define system_has_cmpxchg128() 1
-10
arch/arm64/include/asm/percpu.h
··· 145 145 * preemption point when TIF_NEED_RESCHED gets set while preemption is 146 146 * disabled. 147 147 */ 148 - #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 149 - ({ \ 150 - int __ret; \ 151 - preempt_disable_notrace(); \ 152 - __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ 153 - raw_cpu_ptr(&(ptr2)), \ 154 - o1, o2, n1, n2); \ 155 - preempt_enable_notrace(); \ 156 - __ret; \ 157 - }) 158 148 159 149 #define _pcp_protect(op, pcp, ...) \ 160 150 ({ \
-34
arch/s390/include/asm/cmpxchg.h
··· 190 190 #define arch_cmpxchg_local arch_cmpxchg 191 191 #define arch_cmpxchg64_local arch_cmpxchg 192 192 193 - #define system_has_cmpxchg_double() 1 194 - 195 - static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2, 196 - unsigned long o1, unsigned long o2, 197 - unsigned long n1, unsigned long n2) 198 - { 199 - union register_pair old = { .even = o1, .odd = o2, }; 200 - union register_pair new = { .even = n1, .odd = n2, }; 201 - int cc; 202 - 203 - asm volatile( 204 - " cdsg %[old],%[new],%[ptr]\n" 205 - " ipm %[cc]\n" 206 - " srl %[cc],28\n" 207 - : [cc] "=&d" (cc), [old] "+&d" (old.pair) 208 - : [new] "d" (new.pair), 209 - [ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2) 210 - : "memory", "cc"); 211 - return !cc; 212 - } 213 - 214 - #define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 215 - ({ \ 216 - typeof(p1) __p1 = (p1); \ 217 - typeof(p2) __p2 = (p2); \ 218 - \ 219 - BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ 220 - BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ 221 - VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ 222 - __cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \ 223 - (unsigned long)(o1), (unsigned long)(o2), \ 224 - (unsigned long)(n1), (unsigned long)(n2)); \ 225 - }) 226 - 227 193 #define system_has_cmpxchg128() 1 228 194 229 195 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
-18
arch/s390/include/asm/percpu.h
··· 180 180 #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) 181 181 #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) 182 182 183 - #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \ 184 - ({ \ 185 - typeof(pcp1) *p1__; \ 186 - typeof(pcp2) *p2__; \ 187 - int ret__; \ 188 - \ 189 - preempt_disable_notrace(); \ 190 - p1__ = raw_cpu_ptr(&(pcp1)); \ 191 - p2__ = raw_cpu_ptr(&(pcp2)); \ 192 - ret__ = __cmpxchg_double((unsigned long)p1__, (unsigned long)p2__, \ 193 - (unsigned long)(o1), (unsigned long)(o2), \ 194 - (unsigned long)(n1), (unsigned long)(n2)); \ 195 - preempt_enable_notrace(); \ 196 - ret__; \ 197 - }) 198 - 199 - #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double 200 - 201 183 #include <asm-generic/percpu.h> 202 184 203 185 #endif /* __ARCH_S390_PERCPU__ */
-25
arch/x86/include/asm/cmpxchg.h
··· 239 239 #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) 240 240 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) 241 241 242 - #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ 243 - ({ \ 244 - bool __ret; \ 245 - __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \ 246 - __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \ 247 - BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ 248 - BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ 249 - VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \ 250 - VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \ 251 - asm volatile(pfx "cmpxchg%c5b %1" \ 252 - CC_SET(e) \ 253 - : CC_OUT(e) (__ret), \ 254 - "+m" (*(p1)), "+m" (*(p2)), \ 255 - "+a" (__old1), "+d" (__old2) \ 256 - : "i" (2 * sizeof(long)), \ 257 - "b" (__new1), "c" (__new2)); \ 258 - __ret; \ 259 - }) 260 - 261 - #define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 262 - __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) 263 - 264 - #define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ 265 - __cmpxchg_double(, p1, p2, o1, o2, n1, n2) 266 - 267 242 #endif /* ASM_X86_CMPXCHG_H */
-1
arch/x86/include/asm/cmpxchg_32.h
··· 103 103 104 104 #endif 105 105 106 - #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8) 107 106 #define system_has_cmpxchg64() boot_cpu_has(X86_FEATURE_CX8) 108 107 109 108 #endif /* _ASM_X86_CMPXCHG_32_H */
-1
arch/x86/include/asm/cmpxchg_64.h
··· 81 81 return __arch_try_cmpxchg128(ptr, oldp, new,); 82 82 } 83 83 84 - #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16) 85 84 #define system_has_cmpxchg128() boot_cpu_has(X86_FEATURE_CX16) 86 85 87 86 #endif /* _ASM_X86_CMPXCHG_64_H */
-42
arch/x86/include/asm/percpu.h
··· 351 351 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) 352 352 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) 353 353 354 - #ifdef CONFIG_X86_CMPXCHG64 355 - #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ 356 - ({ \ 357 - bool __ret; \ 358 - typeof(pcp1) __o1 = (o1), __n1 = (n1); \ 359 - typeof(pcp2) __o2 = (o2), __n2 = (n2); \ 360 - asm volatile("cmpxchg8b "__percpu_arg(1) \ 361 - CC_SET(z) \ 362 - : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \ 363 - : "b" (__n1), "c" (__n2)); \ 364 - __ret; \ 365 - }) 366 - 367 - #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 368 - #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 369 - #endif /* CONFIG_X86_CMPXCHG64 */ 370 - 371 354 /* 372 355 * Per cpu atomic 64 bit operations are only available under 64 bit. 373 356 * 32 bit must fall back to generic operations. ··· 373 390 #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) 374 391 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) 375 392 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) 376 - 377 - /* 378 - * Pretty complex macro to generate cmpxchg16 instruction. The instruction 379 - * is not supported on early AMD64 processors so we must be able to emulate 380 - * it in software. The address used in the cmpxchg16 instruction must be 381 - * aligned to a 16 byte boundary. 382 - */ 383 - #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ 384 - ({ \ 385 - bool __ret; \ 386 - typeof(pcp1) __o1 = (o1), __n1 = (n1); \ 387 - typeof(pcp2) __o2 = (o2), __n2 = (n2); \ 388 - asm volatile (ALTERNATIVE("leaq %P1, %%rsi; call this_cpu_cmpxchg16b_emu", \ 389 - "cmpxchg16b " __percpu_arg(1), X86_FEATURE_CX16) \ 390 - "setz %0" \ 391 - : "=a" (__ret), "+m" (pcp1) \ 392 - : "b" (__n1), "c" (__n2), \ 393 - "a" (__o1), "d" (__o2) \ 394 - : "memory", "rsi"); \ 395 - __ret; \ 396 - }) 397 - 398 - #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 399 - #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 400 - 401 393 #endif 402 394 403 395 static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
-58
include/asm-generic/percpu.h
··· 120 120 __old; \ 121 121 }) 122 122 123 - #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 124 - ({ \ 125 - typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \ 126 - typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \ 127 - int __ret = 0; \ 128 - if (*__p1 == (oval1) && *__p2 == (oval2)) { \ 129 - *__p1 = nval1; \ 130 - *__p2 = nval2; \ 131 - __ret = 1; \ 132 - } \ 133 - (__ret); \ 134 - }) 135 - 136 123 #define __this_cpu_generic_read_nopreempt(pcp) \ 137 124 ({ \ 138 125 typeof(pcp) ___ret; \ ··· 194 207 unsigned long __flags; \ 195 208 raw_local_irq_save(__flags); \ 196 209 __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ 197 - raw_local_irq_restore(__flags); \ 198 - __ret; \ 199 - }) 200 - 201 - #define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 202 - ({ \ 203 - int __ret; \ 204 - unsigned long __flags; \ 205 - raw_local_irq_save(__flags); \ 206 - __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 207 - oval1, oval2, nval1, nval2); \ 208 210 raw_local_irq_restore(__flags); \ 209 211 __ret; \ 210 212 }) ··· 371 395 raw_cpu_generic_cmpxchg(pcp, oval, nval) 372 396 #endif 373 397 374 - #ifndef raw_cpu_cmpxchg_double_1 375 - #define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 376 - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 377 - #endif 378 - #ifndef raw_cpu_cmpxchg_double_2 379 - #define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 380 - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 381 - #endif 382 - #ifndef raw_cpu_cmpxchg_double_4 383 - #define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 384 - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 385 - #endif 386 - #ifndef raw_cpu_cmpxchg_double_8 387 - #define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 388 - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 389 - #endif 390 - 391 398 #ifndef this_cpu_read_1 392 399 #define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) 393 400 #endif ··· 542 583 #ifndef this_cpu_cmpxchg128 543 584 #define this_cpu_cmpxchg128(pcp, oval, nval) \ 544 585 this_cpu_generic_cmpxchg(pcp, oval, nval) 545 - #endif 546 - 547 - #ifndef this_cpu_cmpxchg_double_1 548 - #define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 549 - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 550 - #endif 551 - #ifndef this_cpu_cmpxchg_double_2 552 - #define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 553 - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 554 - #endif 555 - #ifndef this_cpu_cmpxchg_double_4 556 - #define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 557 - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 558 - #endif 559 - #ifndef this_cpu_cmpxchg_double_8 560 - #define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 561 - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 562 586 #endif 563 587 564 588 #endif /* _ASM_GENERIC_PERCPU_H_ */
+1 -16
include/linux/atomic/atomic-instrumented.h
··· 2234 2234 arch_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2235 2235 }) 2236 2236 2237 - #define cmpxchg_double(ptr, ...) \ 2238 - ({ \ 2239 - typeof(ptr) __ai_ptr = (ptr); \ 2240 - kcsan_mb(); \ 2241 - instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ 2242 - arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ 2243 - }) 2244 - 2245 - 2246 - #define cmpxchg_double_local(ptr, ...) \ 2247 - ({ \ 2248 - typeof(ptr) __ai_ptr = (ptr); \ 2249 - instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ 2250 - arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ 2251 - }) 2252 2237 2253 2238 #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ 2254 - // 82d1be694fab30414527d0877c29fa75ed5a0b74 2239 + // 3611991b015450e119bcd7417a9431af7f3ba13c
-38
include/linux/percpu-defs.h
··· 358 358 pscr2_ret__; \ 359 359 }) 360 360 361 - /* 362 - * Special handling for cmpxchg_double. cmpxchg_double is passed two 363 - * percpu variables. The first has to be aligned to a double word 364 - * boundary and the second has to follow directly thereafter. 365 - * We enforce this on all architectures even if they don't support 366 - * a double cmpxchg instruction, since it's a cheap requirement, and it 367 - * avoids breaking the requirement for architectures with the instruction. 368 - */ 369 - #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ 370 - ({ \ 371 - bool pdcrb_ret__; \ 372 - __verify_pcpu_ptr(&(pcp1)); \ 373 - BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ 374 - VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ 375 - VM_BUG_ON((unsigned long)(&(pcp2)) != \ 376 - (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ 377 - switch(sizeof(pcp1)) { \ 378 - case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ 379 - case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ 380 - case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ 381 - case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ 382 - default: \ 383 - __bad_size_call_parameter(); break; \ 384 - } \ 385 - pdcrb_ret__; \ 386 - }) 387 - 388 361 #define __pcpu_size_call(stem, variable, ...) \ 389 362 do { \ 390 363 __verify_pcpu_ptr(&(variable)); \ ··· 416 443 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) 417 444 #define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \ 418 445 __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval) 419 - #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 420 - __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) 421 - 422 446 #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) 423 447 #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) 424 448 #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) ··· 475 505 raw_cpu_cmpxchg(pcp, oval, nval); \ 476 506 }) 477 507 478 - #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 479 - ({ __this_cpu_preempt_check("cmpxchg_double"); \ 480 - raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ 481 - }) 482 - 483 508 #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) 484 509 #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) 485 510 #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) ··· 497 532 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) 498 533 #define this_cpu_try_cmpxchg(pcp, ovalp, nval) \ 499 534 __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval) 500 - #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 501 - __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) 502 - 503 535 #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) 504 536 #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) 505 537 #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
+4 -11
scripts/atomic/gen-atomic-instrumented.sh
··· 84 84 { 85 85 local xchg="$1"; shift 86 86 local order="$1"; shift 87 - local mult="$1"; shift 88 87 89 88 kcsan_barrier="" 90 89 if [ "${xchg%_local}" = "${xchg}" ]; then ··· 103 104 EOF 104 105 [ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n" 105 106 cat <<EOF 106 - instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\ 107 - instrument_read_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\ 107 + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\ 108 + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \\ 108 109 arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\ 109 110 }) 110 111 EOF ··· 167 168 168 169 for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do 169 170 for order in "" "_acquire" "_release" "_relaxed"; do 170 - gen_xchg "${xchg}" "${order}" "" 171 + gen_xchg "${xchg}" "${order}" 171 172 printf "\n" 172 173 done 173 174 done 174 175 175 176 for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do 176 - gen_xchg "${xchg}" "" "" 177 + gen_xchg "${xchg}" "" 177 178 printf "\n" 178 179 done 179 - 180 - gen_xchg "cmpxchg_double" "" "2 * " 181 - 182 - printf "\n\n" 183 - 184 - gen_xchg "cmpxchg_double_local" "" "2 * " 185 180 186 181 cat <<EOF 187 182