Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic: scripts: add trivial raw_atomic*_<op>()

Currently a number of arch_atomic*_<op>() functions are optional, and
where an arch does not provide a given arch_atomic*_<op>() we will
define an implementation of arch_atomic*_<op>() in
atomic-arch-fallback.h.

Filling in the missing ops requires special care as we want to select
the optimal definition of each op (e.g. preferentially defining ops in
terms of their relaxed form rather than their fully-ordered form). The
ifdeffery necessary for this requires us to group ordering variants
together, which can be a bit painful to read, and is painful for
kerneldoc generation.

It would be easier to handle this if we generated ops into a separate
namespace, as this would remove the need to take special care with the
ifdeffery, and allow each ordering variant to be generated separately.

This patch adds a new set of raw_atomic_<op>() definitions, which are
currently trivial wrappers of their arch_atomic_<op>() equivalent. This
will allow us to move treewide users of arch_atomic_<op>() over to raw
atomic op before we rework the fallback generation to generate
raw_atomic_<op> directly.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-18-mark.rutland@arm.com

authored by

Mark Rutland and committed by
Peter Zijlstra
c9268ac6 7ed7a156

+2033 -312
+1
include/linux/atomic.h
··· 79 79 80 80 #include <linux/atomic/atomic-arch-fallback.h> 81 81 #include <linux/atomic/atomic-long.h> 82 + #include <linux/atomic/atomic-raw.h> 82 83 #include <linux/atomic/atomic-instrumented.h> 83 84 84 85 #endif /* _LINUX_ATOMIC_H */
+295 -300
include/linux/atomic/atomic-instrumented.h
··· 4 4 // DO NOT MODIFY THIS FILE DIRECTLY 5 5 6 6 /* 7 - * This file provides wrappers with KASAN instrumentation for atomic operations. 8 - * To use this functionality an arch's atomic.h file needs to define all 9 - * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include 10 - * this file at the end. This file provides atomic_read() that forwards to 11 - * arch_atomic_read() for actual atomic operation. 12 - * Note: if an arch atomic operation is implemented by means of other atomic 13 - * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use 14 - * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid 15 - * double instrumentation. 7 + * This file provoides atomic operations with explicit instrumentation (e.g. 8 + * KASAN, KCSAN), which should be used unless it is necessary to avoid 9 + * instrumentation. Where it is necessary to aovid instrumenation, the 10 + * raw_atomic*() operations should be used. 16 11 */ 17 12 #ifndef _LINUX_ATOMIC_INSTRUMENTED_H 18 13 #define _LINUX_ATOMIC_INSTRUMENTED_H ··· 20 25 atomic_read(const atomic_t *v) 21 26 { 22 27 instrument_atomic_read(v, sizeof(*v)); 23 - return arch_atomic_read(v); 28 + return raw_atomic_read(v); 24 29 } 25 30 26 31 static __always_inline int 27 32 atomic_read_acquire(const atomic_t *v) 28 33 { 29 34 instrument_atomic_read(v, sizeof(*v)); 30 - return arch_atomic_read_acquire(v); 35 + return raw_atomic_read_acquire(v); 31 36 } 32 37 33 38 static __always_inline void 34 39 atomic_set(atomic_t *v, int i) 35 40 { 36 41 instrument_atomic_write(v, sizeof(*v)); 37 - arch_atomic_set(v, i); 42 + raw_atomic_set(v, i); 38 43 } 39 44 40 45 static __always_inline void ··· 42 47 { 43 48 kcsan_release(); 44 49 instrument_atomic_write(v, sizeof(*v)); 45 - arch_atomic_set_release(v, i); 50 + raw_atomic_set_release(v, i); 46 51 } 47 52 48 53 static __always_inline void 49 54 atomic_add(int i, atomic_t *v) 50 55 { 51 56 instrument_atomic_read_write(v, sizeof(*v)); 52 - arch_atomic_add(i, v); 57 + raw_atomic_add(i, v); 53 58 } 54 59 55 60 static __always_inline int ··· 57 62 { 58 63 kcsan_mb(); 59 64 instrument_atomic_read_write(v, sizeof(*v)); 60 - return arch_atomic_add_return(i, v); 65 + return raw_atomic_add_return(i, v); 61 66 } 62 67 63 68 static __always_inline int 64 69 atomic_add_return_acquire(int i, atomic_t *v) 65 70 { 66 71 instrument_atomic_read_write(v, sizeof(*v)); 67 - return arch_atomic_add_return_acquire(i, v); 72 + return raw_atomic_add_return_acquire(i, v); 68 73 } 69 74 70 75 static __always_inline int ··· 72 77 { 73 78 kcsan_release(); 74 79 instrument_atomic_read_write(v, sizeof(*v)); 75 - return arch_atomic_add_return_release(i, v); 80 + return raw_atomic_add_return_release(i, v); 76 81 } 77 82 78 83 static __always_inline int 79 84 atomic_add_return_relaxed(int i, atomic_t *v) 80 85 { 81 86 instrument_atomic_read_write(v, sizeof(*v)); 82 - return arch_atomic_add_return_relaxed(i, v); 87 + return raw_atomic_add_return_relaxed(i, v); 83 88 } 84 89 85 90 static __always_inline int ··· 87 92 { 88 93 kcsan_mb(); 89 94 instrument_atomic_read_write(v, sizeof(*v)); 90 - return arch_atomic_fetch_add(i, v); 95 + return raw_atomic_fetch_add(i, v); 91 96 } 92 97 93 98 static __always_inline int 94 99 atomic_fetch_add_acquire(int i, atomic_t *v) 95 100 { 96 101 instrument_atomic_read_write(v, sizeof(*v)); 97 - return arch_atomic_fetch_add_acquire(i, v); 102 + return raw_atomic_fetch_add_acquire(i, v); 98 103 } 99 104 100 105 static __always_inline int ··· 102 107 { 103 108 kcsan_release(); 104 109 instrument_atomic_read_write(v, sizeof(*v)); 105 - return arch_atomic_fetch_add_release(i, v); 110 + return raw_atomic_fetch_add_release(i, v); 106 111 } 107 112 108 113 static __always_inline int 109 114 atomic_fetch_add_relaxed(int i, atomic_t *v) 110 115 { 111 116 instrument_atomic_read_write(v, sizeof(*v)); 112 - return arch_atomic_fetch_add_relaxed(i, v); 117 + return raw_atomic_fetch_add_relaxed(i, v); 113 118 } 114 119 115 120 static __always_inline void 116 121 atomic_sub(int i, atomic_t *v) 117 122 { 118 123 instrument_atomic_read_write(v, sizeof(*v)); 119 - arch_atomic_sub(i, v); 124 + raw_atomic_sub(i, v); 120 125 } 121 126 122 127 static __always_inline int ··· 124 129 { 125 130 kcsan_mb(); 126 131 instrument_atomic_read_write(v, sizeof(*v)); 127 - return arch_atomic_sub_return(i, v); 132 + return raw_atomic_sub_return(i, v); 128 133 } 129 134 130 135 static __always_inline int 131 136 atomic_sub_return_acquire(int i, atomic_t *v) 132 137 { 133 138 instrument_atomic_read_write(v, sizeof(*v)); 134 - return arch_atomic_sub_return_acquire(i, v); 139 + return raw_atomic_sub_return_acquire(i, v); 135 140 } 136 141 137 142 static __always_inline int ··· 139 144 { 140 145 kcsan_release(); 141 146 instrument_atomic_read_write(v, sizeof(*v)); 142 - return arch_atomic_sub_return_release(i, v); 147 + return raw_atomic_sub_return_release(i, v); 143 148 } 144 149 145 150 static __always_inline int 146 151 atomic_sub_return_relaxed(int i, atomic_t *v) 147 152 { 148 153 instrument_atomic_read_write(v, sizeof(*v)); 149 - return arch_atomic_sub_return_relaxed(i, v); 154 + return raw_atomic_sub_return_relaxed(i, v); 150 155 } 151 156 152 157 static __always_inline int ··· 154 159 { 155 160 kcsan_mb(); 156 161 instrument_atomic_read_write(v, sizeof(*v)); 157 - return arch_atomic_fetch_sub(i, v); 162 + return raw_atomic_fetch_sub(i, v); 158 163 } 159 164 160 165 static __always_inline int 161 166 atomic_fetch_sub_acquire(int i, atomic_t *v) 162 167 { 163 168 instrument_atomic_read_write(v, sizeof(*v)); 164 - return arch_atomic_fetch_sub_acquire(i, v); 169 + return raw_atomic_fetch_sub_acquire(i, v); 165 170 } 166 171 167 172 static __always_inline int ··· 169 174 { 170 175 kcsan_release(); 171 176 instrument_atomic_read_write(v, sizeof(*v)); 172 - return arch_atomic_fetch_sub_release(i, v); 177 + return raw_atomic_fetch_sub_release(i, v); 173 178 } 174 179 175 180 static __always_inline int 176 181 atomic_fetch_sub_relaxed(int i, atomic_t *v) 177 182 { 178 183 instrument_atomic_read_write(v, sizeof(*v)); 179 - return arch_atomic_fetch_sub_relaxed(i, v); 184 + return raw_atomic_fetch_sub_relaxed(i, v); 180 185 } 181 186 182 187 static __always_inline void 183 188 atomic_inc(atomic_t *v) 184 189 { 185 190 instrument_atomic_read_write(v, sizeof(*v)); 186 - arch_atomic_inc(v); 191 + raw_atomic_inc(v); 187 192 } 188 193 189 194 static __always_inline int ··· 191 196 { 192 197 kcsan_mb(); 193 198 instrument_atomic_read_write(v, sizeof(*v)); 194 - return arch_atomic_inc_return(v); 199 + return raw_atomic_inc_return(v); 195 200 } 196 201 197 202 static __always_inline int 198 203 atomic_inc_return_acquire(atomic_t *v) 199 204 { 200 205 instrument_atomic_read_write(v, sizeof(*v)); 201 - return arch_atomic_inc_return_acquire(v); 206 + return raw_atomic_inc_return_acquire(v); 202 207 } 203 208 204 209 static __always_inline int ··· 206 211 { 207 212 kcsan_release(); 208 213 instrument_atomic_read_write(v, sizeof(*v)); 209 - return arch_atomic_inc_return_release(v); 214 + return raw_atomic_inc_return_release(v); 210 215 } 211 216 212 217 static __always_inline int 213 218 atomic_inc_return_relaxed(atomic_t *v) 214 219 { 215 220 instrument_atomic_read_write(v, sizeof(*v)); 216 - return arch_atomic_inc_return_relaxed(v); 221 + return raw_atomic_inc_return_relaxed(v); 217 222 } 218 223 219 224 static __always_inline int ··· 221 226 { 222 227 kcsan_mb(); 223 228 instrument_atomic_read_write(v, sizeof(*v)); 224 - return arch_atomic_fetch_inc(v); 229 + return raw_atomic_fetch_inc(v); 225 230 } 226 231 227 232 static __always_inline int 228 233 atomic_fetch_inc_acquire(atomic_t *v) 229 234 { 230 235 instrument_atomic_read_write(v, sizeof(*v)); 231 - return arch_atomic_fetch_inc_acquire(v); 236 + return raw_atomic_fetch_inc_acquire(v); 232 237 } 233 238 234 239 static __always_inline int ··· 236 241 { 237 242 kcsan_release(); 238 243 instrument_atomic_read_write(v, sizeof(*v)); 239 - return arch_atomic_fetch_inc_release(v); 244 + return raw_atomic_fetch_inc_release(v); 240 245 } 241 246 242 247 static __always_inline int 243 248 atomic_fetch_inc_relaxed(atomic_t *v) 244 249 { 245 250 instrument_atomic_read_write(v, sizeof(*v)); 246 - return arch_atomic_fetch_inc_relaxed(v); 251 + return raw_atomic_fetch_inc_relaxed(v); 247 252 } 248 253 249 254 static __always_inline void 250 255 atomic_dec(atomic_t *v) 251 256 { 252 257 instrument_atomic_read_write(v, sizeof(*v)); 253 - arch_atomic_dec(v); 258 + raw_atomic_dec(v); 254 259 } 255 260 256 261 static __always_inline int ··· 258 263 { 259 264 kcsan_mb(); 260 265 instrument_atomic_read_write(v, sizeof(*v)); 261 - return arch_atomic_dec_return(v); 266 + return raw_atomic_dec_return(v); 262 267 } 263 268 264 269 static __always_inline int 265 270 atomic_dec_return_acquire(atomic_t *v) 266 271 { 267 272 instrument_atomic_read_write(v, sizeof(*v)); 268 - return arch_atomic_dec_return_acquire(v); 273 + return raw_atomic_dec_return_acquire(v); 269 274 } 270 275 271 276 static __always_inline int ··· 273 278 { 274 279 kcsan_release(); 275 280 instrument_atomic_read_write(v, sizeof(*v)); 276 - return arch_atomic_dec_return_release(v); 281 + return raw_atomic_dec_return_release(v); 277 282 } 278 283 279 284 static __always_inline int 280 285 atomic_dec_return_relaxed(atomic_t *v) 281 286 { 282 287 instrument_atomic_read_write(v, sizeof(*v)); 283 - return arch_atomic_dec_return_relaxed(v); 288 + return raw_atomic_dec_return_relaxed(v); 284 289 } 285 290 286 291 static __always_inline int ··· 288 293 { 289 294 kcsan_mb(); 290 295 instrument_atomic_read_write(v, sizeof(*v)); 291 - return arch_atomic_fetch_dec(v); 296 + return raw_atomic_fetch_dec(v); 292 297 } 293 298 294 299 static __always_inline int 295 300 atomic_fetch_dec_acquire(atomic_t *v) 296 301 { 297 302 instrument_atomic_read_write(v, sizeof(*v)); 298 - return arch_atomic_fetch_dec_acquire(v); 303 + return raw_atomic_fetch_dec_acquire(v); 299 304 } 300 305 301 306 static __always_inline int ··· 303 308 { 304 309 kcsan_release(); 305 310 instrument_atomic_read_write(v, sizeof(*v)); 306 - return arch_atomic_fetch_dec_release(v); 311 + return raw_atomic_fetch_dec_release(v); 307 312 } 308 313 309 314 static __always_inline int 310 315 atomic_fetch_dec_relaxed(atomic_t *v) 311 316 { 312 317 instrument_atomic_read_write(v, sizeof(*v)); 313 - return arch_atomic_fetch_dec_relaxed(v); 318 + return raw_atomic_fetch_dec_relaxed(v); 314 319 } 315 320 316 321 static __always_inline void 317 322 atomic_and(int i, atomic_t *v) 318 323 { 319 324 instrument_atomic_read_write(v, sizeof(*v)); 320 - arch_atomic_and(i, v); 325 + raw_atomic_and(i, v); 321 326 } 322 327 323 328 static __always_inline int ··· 325 330 { 326 331 kcsan_mb(); 327 332 instrument_atomic_read_write(v, sizeof(*v)); 328 - return arch_atomic_fetch_and(i, v); 333 + return raw_atomic_fetch_and(i, v); 329 334 } 330 335 331 336 static __always_inline int 332 337 atomic_fetch_and_acquire(int i, atomic_t *v) 333 338 { 334 339 instrument_atomic_read_write(v, sizeof(*v)); 335 - return arch_atomic_fetch_and_acquire(i, v); 340 + return raw_atomic_fetch_and_acquire(i, v); 336 341 } 337 342 338 343 static __always_inline int ··· 340 345 { 341 346 kcsan_release(); 342 347 instrument_atomic_read_write(v, sizeof(*v)); 343 - return arch_atomic_fetch_and_release(i, v); 348 + return raw_atomic_fetch_and_release(i, v); 344 349 } 345 350 346 351 static __always_inline int 347 352 atomic_fetch_and_relaxed(int i, atomic_t *v) 348 353 { 349 354 instrument_atomic_read_write(v, sizeof(*v)); 350 - return arch_atomic_fetch_and_relaxed(i, v); 355 + return raw_atomic_fetch_and_relaxed(i, v); 351 356 } 352 357 353 358 static __always_inline void 354 359 atomic_andnot(int i, atomic_t *v) 355 360 { 356 361 instrument_atomic_read_write(v, sizeof(*v)); 357 - arch_atomic_andnot(i, v); 362 + raw_atomic_andnot(i, v); 358 363 } 359 364 360 365 static __always_inline int ··· 362 367 { 363 368 kcsan_mb(); 364 369 instrument_atomic_read_write(v, sizeof(*v)); 365 - return arch_atomic_fetch_andnot(i, v); 370 + return raw_atomic_fetch_andnot(i, v); 366 371 } 367 372 368 373 static __always_inline int 369 374 atomic_fetch_andnot_acquire(int i, atomic_t *v) 370 375 { 371 376 instrument_atomic_read_write(v, sizeof(*v)); 372 - return arch_atomic_fetch_andnot_acquire(i, v); 377 + return raw_atomic_fetch_andnot_acquire(i, v); 373 378 } 374 379 375 380 static __always_inline int ··· 377 382 { 378 383 kcsan_release(); 379 384 instrument_atomic_read_write(v, sizeof(*v)); 380 - return arch_atomic_fetch_andnot_release(i, v); 385 + return raw_atomic_fetch_andnot_release(i, v); 381 386 } 382 387 383 388 static __always_inline int 384 389 atomic_fetch_andnot_relaxed(int i, atomic_t *v) 385 390 { 386 391 instrument_atomic_read_write(v, sizeof(*v)); 387 - return arch_atomic_fetch_andnot_relaxed(i, v); 392 + return raw_atomic_fetch_andnot_relaxed(i, v); 388 393 } 389 394 390 395 static __always_inline void 391 396 atomic_or(int i, atomic_t *v) 392 397 { 393 398 instrument_atomic_read_write(v, sizeof(*v)); 394 - arch_atomic_or(i, v); 399 + raw_atomic_or(i, v); 395 400 } 396 401 397 402 static __always_inline int ··· 399 404 { 400 405 kcsan_mb(); 401 406 instrument_atomic_read_write(v, sizeof(*v)); 402 - return arch_atomic_fetch_or(i, v); 407 + return raw_atomic_fetch_or(i, v); 403 408 } 404 409 405 410 static __always_inline int 406 411 atomic_fetch_or_acquire(int i, atomic_t *v) 407 412 { 408 413 instrument_atomic_read_write(v, sizeof(*v)); 409 - return arch_atomic_fetch_or_acquire(i, v); 414 + return raw_atomic_fetch_or_acquire(i, v); 410 415 } 411 416 412 417 static __always_inline int ··· 414 419 { 415 420 kcsan_release(); 416 421 instrument_atomic_read_write(v, sizeof(*v)); 417 - return arch_atomic_fetch_or_release(i, v); 422 + return raw_atomic_fetch_or_release(i, v); 418 423 } 419 424 420 425 static __always_inline int 421 426 atomic_fetch_or_relaxed(int i, atomic_t *v) 422 427 { 423 428 instrument_atomic_read_write(v, sizeof(*v)); 424 - return arch_atomic_fetch_or_relaxed(i, v); 429 + return raw_atomic_fetch_or_relaxed(i, v); 425 430 } 426 431 427 432 static __always_inline void 428 433 atomic_xor(int i, atomic_t *v) 429 434 { 430 435 instrument_atomic_read_write(v, sizeof(*v)); 431 - arch_atomic_xor(i, v); 436 + raw_atomic_xor(i, v); 432 437 } 433 438 434 439 static __always_inline int ··· 436 441 { 437 442 kcsan_mb(); 438 443 instrument_atomic_read_write(v, sizeof(*v)); 439 - return arch_atomic_fetch_xor(i, v); 444 + return raw_atomic_fetch_xor(i, v); 440 445 } 441 446 442 447 static __always_inline int 443 448 atomic_fetch_xor_acquire(int i, atomic_t *v) 444 449 { 445 450 instrument_atomic_read_write(v, sizeof(*v)); 446 - return arch_atomic_fetch_xor_acquire(i, v); 451 + return raw_atomic_fetch_xor_acquire(i, v); 447 452 } 448 453 449 454 static __always_inline int ··· 451 456 { 452 457 kcsan_release(); 453 458 instrument_atomic_read_write(v, sizeof(*v)); 454 - return arch_atomic_fetch_xor_release(i, v); 459 + return raw_atomic_fetch_xor_release(i, v); 455 460 } 456 461 457 462 static __always_inline int 458 463 atomic_fetch_xor_relaxed(int i, atomic_t *v) 459 464 { 460 465 instrument_atomic_read_write(v, sizeof(*v)); 461 - return arch_atomic_fetch_xor_relaxed(i, v); 466 + return raw_atomic_fetch_xor_relaxed(i, v); 462 467 } 463 468 464 469 static __always_inline int ··· 466 471 { 467 472 kcsan_mb(); 468 473 instrument_atomic_read_write(v, sizeof(*v)); 469 - return arch_atomic_xchg(v, i); 474 + return raw_atomic_xchg(v, i); 470 475 } 471 476 472 477 static __always_inline int 473 478 atomic_xchg_acquire(atomic_t *v, int i) 474 479 { 475 480 instrument_atomic_read_write(v, sizeof(*v)); 476 - return arch_atomic_xchg_acquire(v, i); 481 + return raw_atomic_xchg_acquire(v, i); 477 482 } 478 483 479 484 static __always_inline int ··· 481 486 { 482 487 kcsan_release(); 483 488 instrument_atomic_read_write(v, sizeof(*v)); 484 - return arch_atomic_xchg_release(v, i); 489 + return raw_atomic_xchg_release(v, i); 485 490 } 486 491 487 492 static __always_inline int 488 493 atomic_xchg_relaxed(atomic_t *v, int i) 489 494 { 490 495 instrument_atomic_read_write(v, sizeof(*v)); 491 - return arch_atomic_xchg_relaxed(v, i); 496 + return raw_atomic_xchg_relaxed(v, i); 492 497 } 493 498 494 499 static __always_inline int ··· 496 501 { 497 502 kcsan_mb(); 498 503 instrument_atomic_read_write(v, sizeof(*v)); 499 - return arch_atomic_cmpxchg(v, old, new); 504 + return raw_atomic_cmpxchg(v, old, new); 500 505 } 501 506 502 507 static __always_inline int 503 508 atomic_cmpxchg_acquire(atomic_t *v, int old, int new) 504 509 { 505 510 instrument_atomic_read_write(v, sizeof(*v)); 506 - return arch_atomic_cmpxchg_acquire(v, old, new); 511 + return raw_atomic_cmpxchg_acquire(v, old, new); 507 512 } 508 513 509 514 static __always_inline int ··· 511 516 { 512 517 kcsan_release(); 513 518 instrument_atomic_read_write(v, sizeof(*v)); 514 - return arch_atomic_cmpxchg_release(v, old, new); 519 + return raw_atomic_cmpxchg_release(v, old, new); 515 520 } 516 521 517 522 static __always_inline int 518 523 atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) 519 524 { 520 525 instrument_atomic_read_write(v, sizeof(*v)); 521 - return arch_atomic_cmpxchg_relaxed(v, old, new); 526 + return raw_atomic_cmpxchg_relaxed(v, old, new); 522 527 } 523 528 524 529 static __always_inline bool ··· 527 532 kcsan_mb(); 528 533 instrument_atomic_read_write(v, sizeof(*v)); 529 534 instrument_atomic_read_write(old, sizeof(*old)); 530 - return arch_atomic_try_cmpxchg(v, old, new); 535 + return raw_atomic_try_cmpxchg(v, old, new); 531 536 } 532 537 533 538 static __always_inline bool ··· 535 540 { 536 541 instrument_atomic_read_write(v, sizeof(*v)); 537 542 instrument_atomic_read_write(old, sizeof(*old)); 538 - return arch_atomic_try_cmpxchg_acquire(v, old, new); 543 + return raw_atomic_try_cmpxchg_acquire(v, old, new); 539 544 } 540 545 541 546 static __always_inline bool ··· 544 549 kcsan_release(); 545 550 instrument_atomic_read_write(v, sizeof(*v)); 546 551 instrument_atomic_read_write(old, sizeof(*old)); 547 - return arch_atomic_try_cmpxchg_release(v, old, new); 552 + return raw_atomic_try_cmpxchg_release(v, old, new); 548 553 } 549 554 550 555 static __always_inline bool ··· 552 557 { 553 558 instrument_atomic_read_write(v, sizeof(*v)); 554 559 instrument_atomic_read_write(old, sizeof(*old)); 555 - return arch_atomic_try_cmpxchg_relaxed(v, old, new); 560 + return raw_atomic_try_cmpxchg_relaxed(v, old, new); 556 561 } 557 562 558 563 static __always_inline bool ··· 560 565 { 561 566 kcsan_mb(); 562 567 instrument_atomic_read_write(v, sizeof(*v)); 563 - return arch_atomic_sub_and_test(i, v); 568 + return raw_atomic_sub_and_test(i, v); 564 569 } 565 570 566 571 static __always_inline bool ··· 568 573 { 569 574 kcsan_mb(); 570 575 instrument_atomic_read_write(v, sizeof(*v)); 571 - return arch_atomic_dec_and_test(v); 576 + return raw_atomic_dec_and_test(v); 572 577 } 573 578 574 579 static __always_inline bool ··· 576 581 { 577 582 kcsan_mb(); 578 583 instrument_atomic_read_write(v, sizeof(*v)); 579 - return arch_atomic_inc_and_test(v); 584 + return raw_atomic_inc_and_test(v); 580 585 } 581 586 582 587 static __always_inline bool ··· 584 589 { 585 590 kcsan_mb(); 586 591 instrument_atomic_read_write(v, sizeof(*v)); 587 - return arch_atomic_add_negative(i, v); 592 + return raw_atomic_add_negative(i, v); 588 593 } 589 594 590 595 static __always_inline bool 591 596 atomic_add_negative_acquire(int i, atomic_t *v) 592 597 { 593 598 instrument_atomic_read_write(v, sizeof(*v)); 594 - return arch_atomic_add_negative_acquire(i, v); 599 + return raw_atomic_add_negative_acquire(i, v); 595 600 } 596 601 597 602 static __always_inline bool ··· 599 604 { 600 605 kcsan_release(); 601 606 instrument_atomic_read_write(v, sizeof(*v)); 602 - return arch_atomic_add_negative_release(i, v); 607 + return raw_atomic_add_negative_release(i, v); 603 608 } 604 609 605 610 static __always_inline bool 606 611 atomic_add_negative_relaxed(int i, atomic_t *v) 607 612 { 608 613 instrument_atomic_read_write(v, sizeof(*v)); 609 - return arch_atomic_add_negative_relaxed(i, v); 614 + return raw_atomic_add_negative_relaxed(i, v); 610 615 } 611 616 612 617 static __always_inline int ··· 614 619 { 615 620 kcsan_mb(); 616 621 instrument_atomic_read_write(v, sizeof(*v)); 617 - return arch_atomic_fetch_add_unless(v, a, u); 622 + return raw_atomic_fetch_add_unless(v, a, u); 618 623 } 619 624 620 625 static __always_inline bool ··· 622 627 { 623 628 kcsan_mb(); 624 629 instrument_atomic_read_write(v, sizeof(*v)); 625 - return arch_atomic_add_unless(v, a, u); 630 + return raw_atomic_add_unless(v, a, u); 626 631 } 627 632 628 633 static __always_inline bool ··· 630 635 { 631 636 kcsan_mb(); 632 637 instrument_atomic_read_write(v, sizeof(*v)); 633 - return arch_atomic_inc_not_zero(v); 638 + return raw_atomic_inc_not_zero(v); 634 639 } 635 640 636 641 static __always_inline bool ··· 638 643 { 639 644 kcsan_mb(); 640 645 instrument_atomic_read_write(v, sizeof(*v)); 641 - return arch_atomic_inc_unless_negative(v); 646 + return raw_atomic_inc_unless_negative(v); 642 647 } 643 648 644 649 static __always_inline bool ··· 646 651 { 647 652 kcsan_mb(); 648 653 instrument_atomic_read_write(v, sizeof(*v)); 649 - return arch_atomic_dec_unless_positive(v); 654 + return raw_atomic_dec_unless_positive(v); 650 655 } 651 656 652 657 static __always_inline int ··· 654 659 { 655 660 kcsan_mb(); 656 661 instrument_atomic_read_write(v, sizeof(*v)); 657 - return arch_atomic_dec_if_positive(v); 662 + return raw_atomic_dec_if_positive(v); 658 663 } 659 664 660 665 static __always_inline s64 661 666 atomic64_read(const atomic64_t *v) 662 667 { 663 668 instrument_atomic_read(v, sizeof(*v)); 664 - return arch_atomic64_read(v); 669 + return raw_atomic64_read(v); 665 670 } 666 671 667 672 static __always_inline s64 668 673 atomic64_read_acquire(const atomic64_t *v) 669 674 { 670 675 instrument_atomic_read(v, sizeof(*v)); 671 - return arch_atomic64_read_acquire(v); 676 + return raw_atomic64_read_acquire(v); 672 677 } 673 678 674 679 static __always_inline void 675 680 atomic64_set(atomic64_t *v, s64 i) 676 681 { 677 682 instrument_atomic_write(v, sizeof(*v)); 678 - arch_atomic64_set(v, i); 683 + raw_atomic64_set(v, i); 679 684 } 680 685 681 686 static __always_inline void ··· 683 688 { 684 689 kcsan_release(); 685 690 instrument_atomic_write(v, sizeof(*v)); 686 - arch_atomic64_set_release(v, i); 691 + raw_atomic64_set_release(v, i); 687 692 } 688 693 689 694 static __always_inline void 690 695 atomic64_add(s64 i, atomic64_t *v) 691 696 { 692 697 instrument_atomic_read_write(v, sizeof(*v)); 693 - arch_atomic64_add(i, v); 698 + raw_atomic64_add(i, v); 694 699 } 695 700 696 701 static __always_inline s64 ··· 698 703 { 699 704 kcsan_mb(); 700 705 instrument_atomic_read_write(v, sizeof(*v)); 701 - return arch_atomic64_add_return(i, v); 706 + return raw_atomic64_add_return(i, v); 702 707 } 703 708 704 709 static __always_inline s64 705 710 atomic64_add_return_acquire(s64 i, atomic64_t *v) 706 711 { 707 712 instrument_atomic_read_write(v, sizeof(*v)); 708 - return arch_atomic64_add_return_acquire(i, v); 713 + return raw_atomic64_add_return_acquire(i, v); 709 714 } 710 715 711 716 static __always_inline s64 ··· 713 718 { 714 719 kcsan_release(); 715 720 instrument_atomic_read_write(v, sizeof(*v)); 716 - return arch_atomic64_add_return_release(i, v); 721 + return raw_atomic64_add_return_release(i, v); 717 722 } 718 723 719 724 static __always_inline s64 720 725 atomic64_add_return_relaxed(s64 i, atomic64_t *v) 721 726 { 722 727 instrument_atomic_read_write(v, sizeof(*v)); 723 - return arch_atomic64_add_return_relaxed(i, v); 728 + return raw_atomic64_add_return_relaxed(i, v); 724 729 } 725 730 726 731 static __always_inline s64 ··· 728 733 { 729 734 kcsan_mb(); 730 735 instrument_atomic_read_write(v, sizeof(*v)); 731 - return arch_atomic64_fetch_add(i, v); 736 + return raw_atomic64_fetch_add(i, v); 732 737 } 733 738 734 739 static __always_inline s64 735 740 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) 736 741 { 737 742 instrument_atomic_read_write(v, sizeof(*v)); 738 - return arch_atomic64_fetch_add_acquire(i, v); 743 + return raw_atomic64_fetch_add_acquire(i, v); 739 744 } 740 745 741 746 static __always_inline s64 ··· 743 748 { 744 749 kcsan_release(); 745 750 instrument_atomic_read_write(v, sizeof(*v)); 746 - return arch_atomic64_fetch_add_release(i, v); 751 + return raw_atomic64_fetch_add_release(i, v); 747 752 } 748 753 749 754 static __always_inline s64 750 755 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) 751 756 { 752 757 instrument_atomic_read_write(v, sizeof(*v)); 753 - return arch_atomic64_fetch_add_relaxed(i, v); 758 + return raw_atomic64_fetch_add_relaxed(i, v); 754 759 } 755 760 756 761 static __always_inline void 757 762 atomic64_sub(s64 i, atomic64_t *v) 758 763 { 759 764 instrument_atomic_read_write(v, sizeof(*v)); 760 - arch_atomic64_sub(i, v); 765 + raw_atomic64_sub(i, v); 761 766 } 762 767 763 768 static __always_inline s64 ··· 765 770 { 766 771 kcsan_mb(); 767 772 instrument_atomic_read_write(v, sizeof(*v)); 768 - return arch_atomic64_sub_return(i, v); 773 + return raw_atomic64_sub_return(i, v); 769 774 } 770 775 771 776 static __always_inline s64 772 777 atomic64_sub_return_acquire(s64 i, atomic64_t *v) 773 778 { 774 779 instrument_atomic_read_write(v, sizeof(*v)); 775 - return arch_atomic64_sub_return_acquire(i, v); 780 + return raw_atomic64_sub_return_acquire(i, v); 776 781 } 777 782 778 783 static __always_inline s64 ··· 780 785 { 781 786 kcsan_release(); 782 787 instrument_atomic_read_write(v, sizeof(*v)); 783 - return arch_atomic64_sub_return_release(i, v); 788 + return raw_atomic64_sub_return_release(i, v); 784 789 } 785 790 786 791 static __always_inline s64 787 792 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) 788 793 { 789 794 instrument_atomic_read_write(v, sizeof(*v)); 790 - return arch_atomic64_sub_return_relaxed(i, v); 795 + return raw_atomic64_sub_return_relaxed(i, v); 791 796 } 792 797 793 798 static __always_inline s64 ··· 795 800 { 796 801 kcsan_mb(); 797 802 instrument_atomic_read_write(v, sizeof(*v)); 798 - return arch_atomic64_fetch_sub(i, v); 803 + return raw_atomic64_fetch_sub(i, v); 799 804 } 800 805 801 806 static __always_inline s64 802 807 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) 803 808 { 804 809 instrument_atomic_read_write(v, sizeof(*v)); 805 - return arch_atomic64_fetch_sub_acquire(i, v); 810 + return raw_atomic64_fetch_sub_acquire(i, v); 806 811 } 807 812 808 813 static __always_inline s64 ··· 810 815 { 811 816 kcsan_release(); 812 817 instrument_atomic_read_write(v, sizeof(*v)); 813 - return arch_atomic64_fetch_sub_release(i, v); 818 + return raw_atomic64_fetch_sub_release(i, v); 814 819 } 815 820 816 821 static __always_inline s64 817 822 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) 818 823 { 819 824 instrument_atomic_read_write(v, sizeof(*v)); 820 - return arch_atomic64_fetch_sub_relaxed(i, v); 825 + return raw_atomic64_fetch_sub_relaxed(i, v); 821 826 } 822 827 823 828 static __always_inline void 824 829 atomic64_inc(atomic64_t *v) 825 830 { 826 831 instrument_atomic_read_write(v, sizeof(*v)); 827 - arch_atomic64_inc(v); 832 + raw_atomic64_inc(v); 828 833 } 829 834 830 835 static __always_inline s64 ··· 832 837 { 833 838 kcsan_mb(); 834 839 instrument_atomic_read_write(v, sizeof(*v)); 835 - return arch_atomic64_inc_return(v); 840 + return raw_atomic64_inc_return(v); 836 841 } 837 842 838 843 static __always_inline s64 839 844 atomic64_inc_return_acquire(atomic64_t *v) 840 845 { 841 846 instrument_atomic_read_write(v, sizeof(*v)); 842 - return arch_atomic64_inc_return_acquire(v); 847 + return raw_atomic64_inc_return_acquire(v); 843 848 } 844 849 845 850 static __always_inline s64 ··· 847 852 { 848 853 kcsan_release(); 849 854 instrument_atomic_read_write(v, sizeof(*v)); 850 - return arch_atomic64_inc_return_release(v); 855 + return raw_atomic64_inc_return_release(v); 851 856 } 852 857 853 858 static __always_inline s64 854 859 atomic64_inc_return_relaxed(atomic64_t *v) 855 860 { 856 861 instrument_atomic_read_write(v, sizeof(*v)); 857 - return arch_atomic64_inc_return_relaxed(v); 862 + return raw_atomic64_inc_return_relaxed(v); 858 863 } 859 864 860 865 static __always_inline s64 ··· 862 867 { 863 868 kcsan_mb(); 864 869 instrument_atomic_read_write(v, sizeof(*v)); 865 - return arch_atomic64_fetch_inc(v); 870 + return raw_atomic64_fetch_inc(v); 866 871 } 867 872 868 873 static __always_inline s64 869 874 atomic64_fetch_inc_acquire(atomic64_t *v) 870 875 { 871 876 instrument_atomic_read_write(v, sizeof(*v)); 872 - return arch_atomic64_fetch_inc_acquire(v); 877 + return raw_atomic64_fetch_inc_acquire(v); 873 878 } 874 879 875 880 static __always_inline s64 ··· 877 882 { 878 883 kcsan_release(); 879 884 instrument_atomic_read_write(v, sizeof(*v)); 880 - return arch_atomic64_fetch_inc_release(v); 885 + return raw_atomic64_fetch_inc_release(v); 881 886 } 882 887 883 888 static __always_inline s64 884 889 atomic64_fetch_inc_relaxed(atomic64_t *v) 885 890 { 886 891 instrument_atomic_read_write(v, sizeof(*v)); 887 - return arch_atomic64_fetch_inc_relaxed(v); 892 + return raw_atomic64_fetch_inc_relaxed(v); 888 893 } 889 894 890 895 static __always_inline void 891 896 atomic64_dec(atomic64_t *v) 892 897 { 893 898 instrument_atomic_read_write(v, sizeof(*v)); 894 - arch_atomic64_dec(v); 899 + raw_atomic64_dec(v); 895 900 } 896 901 897 902 static __always_inline s64 ··· 899 904 { 900 905 kcsan_mb(); 901 906 instrument_atomic_read_write(v, sizeof(*v)); 902 - return arch_atomic64_dec_return(v); 907 + return raw_atomic64_dec_return(v); 903 908 } 904 909 905 910 static __always_inline s64 906 911 atomic64_dec_return_acquire(atomic64_t *v) 907 912 { 908 913 instrument_atomic_read_write(v, sizeof(*v)); 909 - return arch_atomic64_dec_return_acquire(v); 914 + return raw_atomic64_dec_return_acquire(v); 910 915 } 911 916 912 917 static __always_inline s64 ··· 914 919 { 915 920 kcsan_release(); 916 921 instrument_atomic_read_write(v, sizeof(*v)); 917 - return arch_atomic64_dec_return_release(v); 922 + return raw_atomic64_dec_return_release(v); 918 923 } 919 924 920 925 static __always_inline s64 921 926 atomic64_dec_return_relaxed(atomic64_t *v) 922 927 { 923 928 instrument_atomic_read_write(v, sizeof(*v)); 924 - return arch_atomic64_dec_return_relaxed(v); 929 + return raw_atomic64_dec_return_relaxed(v); 925 930 } 926 931 927 932 static __always_inline s64 ··· 929 934 { 930 935 kcsan_mb(); 931 936 instrument_atomic_read_write(v, sizeof(*v)); 932 - return arch_atomic64_fetch_dec(v); 937 + return raw_atomic64_fetch_dec(v); 933 938 } 934 939 935 940 static __always_inline s64 936 941 atomic64_fetch_dec_acquire(atomic64_t *v) 937 942 { 938 943 instrument_atomic_read_write(v, sizeof(*v)); 939 - return arch_atomic64_fetch_dec_acquire(v); 944 + return raw_atomic64_fetch_dec_acquire(v); 940 945 } 941 946 942 947 static __always_inline s64 ··· 944 949 { 945 950 kcsan_release(); 946 951 instrument_atomic_read_write(v, sizeof(*v)); 947 - return arch_atomic64_fetch_dec_release(v); 952 + return raw_atomic64_fetch_dec_release(v); 948 953 } 949 954 950 955 static __always_inline s64 951 956 atomic64_fetch_dec_relaxed(atomic64_t *v) 952 957 { 953 958 instrument_atomic_read_write(v, sizeof(*v)); 954 - return arch_atomic64_fetch_dec_relaxed(v); 959 + return raw_atomic64_fetch_dec_relaxed(v); 955 960 } 956 961 957 962 static __always_inline void 958 963 atomic64_and(s64 i, atomic64_t *v) 959 964 { 960 965 instrument_atomic_read_write(v, sizeof(*v)); 961 - arch_atomic64_and(i, v); 966 + raw_atomic64_and(i, v); 962 967 } 963 968 964 969 static __always_inline s64 ··· 966 971 { 967 972 kcsan_mb(); 968 973 instrument_atomic_read_write(v, sizeof(*v)); 969 - return arch_atomic64_fetch_and(i, v); 974 + return raw_atomic64_fetch_and(i, v); 970 975 } 971 976 972 977 static __always_inline s64 973 978 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) 974 979 { 975 980 instrument_atomic_read_write(v, sizeof(*v)); 976 - return arch_atomic64_fetch_and_acquire(i, v); 981 + return raw_atomic64_fetch_and_acquire(i, v); 977 982 } 978 983 979 984 static __always_inline s64 ··· 981 986 { 982 987 kcsan_release(); 983 988 instrument_atomic_read_write(v, sizeof(*v)); 984 - return arch_atomic64_fetch_and_release(i, v); 989 + return raw_atomic64_fetch_and_release(i, v); 985 990 } 986 991 987 992 static __always_inline s64 988 993 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) 989 994 { 990 995 instrument_atomic_read_write(v, sizeof(*v)); 991 - return arch_atomic64_fetch_and_relaxed(i, v); 996 + return raw_atomic64_fetch_and_relaxed(i, v); 992 997 } 993 998 994 999 static __always_inline void 995 1000 atomic64_andnot(s64 i, atomic64_t *v) 996 1001 { 997 1002 instrument_atomic_read_write(v, sizeof(*v)); 998 - arch_atomic64_andnot(i, v); 1003 + raw_atomic64_andnot(i, v); 999 1004 } 1000 1005 1001 1006 static __always_inline s64 ··· 1003 1008 { 1004 1009 kcsan_mb(); 1005 1010 instrument_atomic_read_write(v, sizeof(*v)); 1006 - return arch_atomic64_fetch_andnot(i, v); 1011 + return raw_atomic64_fetch_andnot(i, v); 1007 1012 } 1008 1013 1009 1014 static __always_inline s64 1010 1015 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 1011 1016 { 1012 1017 instrument_atomic_read_write(v, sizeof(*v)); 1013 - return arch_atomic64_fetch_andnot_acquire(i, v); 1018 + return raw_atomic64_fetch_andnot_acquire(i, v); 1014 1019 } 1015 1020 1016 1021 static __always_inline s64 ··· 1018 1023 { 1019 1024 kcsan_release(); 1020 1025 instrument_atomic_read_write(v, sizeof(*v)); 1021 - return arch_atomic64_fetch_andnot_release(i, v); 1026 + return raw_atomic64_fetch_andnot_release(i, v); 1022 1027 } 1023 1028 1024 1029 static __always_inline s64 1025 1030 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) 1026 1031 { 1027 1032 instrument_atomic_read_write(v, sizeof(*v)); 1028 - return arch_atomic64_fetch_andnot_relaxed(i, v); 1033 + return raw_atomic64_fetch_andnot_relaxed(i, v); 1029 1034 } 1030 1035 1031 1036 static __always_inline void 1032 1037 atomic64_or(s64 i, atomic64_t *v) 1033 1038 { 1034 1039 instrument_atomic_read_write(v, sizeof(*v)); 1035 - arch_atomic64_or(i, v); 1040 + raw_atomic64_or(i, v); 1036 1041 } 1037 1042 1038 1043 static __always_inline s64 ··· 1040 1045 { 1041 1046 kcsan_mb(); 1042 1047 instrument_atomic_read_write(v, sizeof(*v)); 1043 - return arch_atomic64_fetch_or(i, v); 1048 + return raw_atomic64_fetch_or(i, v); 1044 1049 } 1045 1050 1046 1051 static __always_inline s64 1047 1052 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) 1048 1053 { 1049 1054 instrument_atomic_read_write(v, sizeof(*v)); 1050 - return arch_atomic64_fetch_or_acquire(i, v); 1055 + return raw_atomic64_fetch_or_acquire(i, v); 1051 1056 } 1052 1057 1053 1058 static __always_inline s64 ··· 1055 1060 { 1056 1061 kcsan_release(); 1057 1062 instrument_atomic_read_write(v, sizeof(*v)); 1058 - return arch_atomic64_fetch_or_release(i, v); 1063 + return raw_atomic64_fetch_or_release(i, v); 1059 1064 } 1060 1065 1061 1066 static __always_inline s64 1062 1067 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) 1063 1068 { 1064 1069 instrument_atomic_read_write(v, sizeof(*v)); 1065 - return arch_atomic64_fetch_or_relaxed(i, v); 1070 + return raw_atomic64_fetch_or_relaxed(i, v); 1066 1071 } 1067 1072 1068 1073 static __always_inline void 1069 1074 atomic64_xor(s64 i, atomic64_t *v) 1070 1075 { 1071 1076 instrument_atomic_read_write(v, sizeof(*v)); 1072 - arch_atomic64_xor(i, v); 1077 + raw_atomic64_xor(i, v); 1073 1078 } 1074 1079 1075 1080 static __always_inline s64 ··· 1077 1082 { 1078 1083 kcsan_mb(); 1079 1084 instrument_atomic_read_write(v, sizeof(*v)); 1080 - return arch_atomic64_fetch_xor(i, v); 1085 + return raw_atomic64_fetch_xor(i, v); 1081 1086 } 1082 1087 1083 1088 static __always_inline s64 1084 1089 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) 1085 1090 { 1086 1091 instrument_atomic_read_write(v, sizeof(*v)); 1087 - return arch_atomic64_fetch_xor_acquire(i, v); 1092 + return raw_atomic64_fetch_xor_acquire(i, v); 1088 1093 } 1089 1094 1090 1095 static __always_inline s64 ··· 1092 1097 { 1093 1098 kcsan_release(); 1094 1099 instrument_atomic_read_write(v, sizeof(*v)); 1095 - return arch_atomic64_fetch_xor_release(i, v); 1100 + return raw_atomic64_fetch_xor_release(i, v); 1096 1101 } 1097 1102 1098 1103 static __always_inline s64 1099 1104 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) 1100 1105 { 1101 1106 instrument_atomic_read_write(v, sizeof(*v)); 1102 - return arch_atomic64_fetch_xor_relaxed(i, v); 1107 + return raw_atomic64_fetch_xor_relaxed(i, v); 1103 1108 } 1104 1109 1105 1110 static __always_inline s64 ··· 1107 1112 { 1108 1113 kcsan_mb(); 1109 1114 instrument_atomic_read_write(v, sizeof(*v)); 1110 - return arch_atomic64_xchg(v, i); 1115 + return raw_atomic64_xchg(v, i); 1111 1116 } 1112 1117 1113 1118 static __always_inline s64 1114 1119 atomic64_xchg_acquire(atomic64_t *v, s64 i) 1115 1120 { 1116 1121 instrument_atomic_read_write(v, sizeof(*v)); 1117 - return arch_atomic64_xchg_acquire(v, i); 1122 + return raw_atomic64_xchg_acquire(v, i); 1118 1123 } 1119 1124 1120 1125 static __always_inline s64 ··· 1122 1127 { 1123 1128 kcsan_release(); 1124 1129 instrument_atomic_read_write(v, sizeof(*v)); 1125 - return arch_atomic64_xchg_release(v, i); 1130 + return raw_atomic64_xchg_release(v, i); 1126 1131 } 1127 1132 1128 1133 static __always_inline s64 1129 1134 atomic64_xchg_relaxed(atomic64_t *v, s64 i) 1130 1135 { 1131 1136 instrument_atomic_read_write(v, sizeof(*v)); 1132 - return arch_atomic64_xchg_relaxed(v, i); 1137 + return raw_atomic64_xchg_relaxed(v, i); 1133 1138 } 1134 1139 1135 1140 static __always_inline s64 ··· 1137 1142 { 1138 1143 kcsan_mb(); 1139 1144 instrument_atomic_read_write(v, sizeof(*v)); 1140 - return arch_atomic64_cmpxchg(v, old, new); 1145 + return raw_atomic64_cmpxchg(v, old, new); 1141 1146 } 1142 1147 1143 1148 static __always_inline s64 1144 1149 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) 1145 1150 { 1146 1151 instrument_atomic_read_write(v, sizeof(*v)); 1147 - return arch_atomic64_cmpxchg_acquire(v, old, new); 1152 + return raw_atomic64_cmpxchg_acquire(v, old, new); 1148 1153 } 1149 1154 1150 1155 static __always_inline s64 ··· 1152 1157 { 1153 1158 kcsan_release(); 1154 1159 instrument_atomic_read_write(v, sizeof(*v)); 1155 - return arch_atomic64_cmpxchg_release(v, old, new); 1160 + return raw_atomic64_cmpxchg_release(v, old, new); 1156 1161 } 1157 1162 1158 1163 static __always_inline s64 1159 1164 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) 1160 1165 { 1161 1166 instrument_atomic_read_write(v, sizeof(*v)); 1162 - return arch_atomic64_cmpxchg_relaxed(v, old, new); 1167 + return raw_atomic64_cmpxchg_relaxed(v, old, new); 1163 1168 } 1164 1169 1165 1170 static __always_inline bool ··· 1168 1173 kcsan_mb(); 1169 1174 instrument_atomic_read_write(v, sizeof(*v)); 1170 1175 instrument_atomic_read_write(old, sizeof(*old)); 1171 - return arch_atomic64_try_cmpxchg(v, old, new); 1176 + return raw_atomic64_try_cmpxchg(v, old, new); 1172 1177 } 1173 1178 1174 1179 static __always_inline bool ··· 1176 1181 { 1177 1182 instrument_atomic_read_write(v, sizeof(*v)); 1178 1183 instrument_atomic_read_write(old, sizeof(*old)); 1179 - return arch_atomic64_try_cmpxchg_acquire(v, old, new); 1184 + return raw_atomic64_try_cmpxchg_acquire(v, old, new); 1180 1185 } 1181 1186 1182 1187 static __always_inline bool ··· 1185 1190 kcsan_release(); 1186 1191 instrument_atomic_read_write(v, sizeof(*v)); 1187 1192 instrument_atomic_read_write(old, sizeof(*old)); 1188 - return arch_atomic64_try_cmpxchg_release(v, old, new); 1193 + return raw_atomic64_try_cmpxchg_release(v, old, new); 1189 1194 } 1190 1195 1191 1196 static __always_inline bool ··· 1193 1198 { 1194 1199 instrument_atomic_read_write(v, sizeof(*v)); 1195 1200 instrument_atomic_read_write(old, sizeof(*old)); 1196 - return arch_atomic64_try_cmpxchg_relaxed(v, old, new); 1201 + return raw_atomic64_try_cmpxchg_relaxed(v, old, new); 1197 1202 } 1198 1203 1199 1204 static __always_inline bool ··· 1201 1206 { 1202 1207 kcsan_mb(); 1203 1208 instrument_atomic_read_write(v, sizeof(*v)); 1204 - return arch_atomic64_sub_and_test(i, v); 1209 + return raw_atomic64_sub_and_test(i, v); 1205 1210 } 1206 1211 1207 1212 static __always_inline bool ··· 1209 1214 { 1210 1215 kcsan_mb(); 1211 1216 instrument_atomic_read_write(v, sizeof(*v)); 1212 - return arch_atomic64_dec_and_test(v); 1217 + return raw_atomic64_dec_and_test(v); 1213 1218 } 1214 1219 1215 1220 static __always_inline bool ··· 1217 1222 { 1218 1223 kcsan_mb(); 1219 1224 instrument_atomic_read_write(v, sizeof(*v)); 1220 - return arch_atomic64_inc_and_test(v); 1225 + return raw_atomic64_inc_and_test(v); 1221 1226 } 1222 1227 1223 1228 static __always_inline bool ··· 1225 1230 { 1226 1231 kcsan_mb(); 1227 1232 instrument_atomic_read_write(v, sizeof(*v)); 1228 - return arch_atomic64_add_negative(i, v); 1233 + return raw_atomic64_add_negative(i, v); 1229 1234 } 1230 1235 1231 1236 static __always_inline bool 1232 1237 atomic64_add_negative_acquire(s64 i, atomic64_t *v) 1233 1238 { 1234 1239 instrument_atomic_read_write(v, sizeof(*v)); 1235 - return arch_atomic64_add_negative_acquire(i, v); 1240 + return raw_atomic64_add_negative_acquire(i, v); 1236 1241 } 1237 1242 1238 1243 static __always_inline bool ··· 1240 1245 { 1241 1246 kcsan_release(); 1242 1247 instrument_atomic_read_write(v, sizeof(*v)); 1243 - return arch_atomic64_add_negative_release(i, v); 1248 + return raw_atomic64_add_negative_release(i, v); 1244 1249 } 1245 1250 1246 1251 static __always_inline bool 1247 1252 atomic64_add_negative_relaxed(s64 i, atomic64_t *v) 1248 1253 { 1249 1254 instrument_atomic_read_write(v, sizeof(*v)); 1250 - return arch_atomic64_add_negative_relaxed(i, v); 1255 + return raw_atomic64_add_negative_relaxed(i, v); 1251 1256 } 1252 1257 1253 1258 static __always_inline s64 ··· 1255 1260 { 1256 1261 kcsan_mb(); 1257 1262 instrument_atomic_read_write(v, sizeof(*v)); 1258 - return arch_atomic64_fetch_add_unless(v, a, u); 1263 + return raw_atomic64_fetch_add_unless(v, a, u); 1259 1264 } 1260 1265 1261 1266 static __always_inline bool ··· 1263 1268 { 1264 1269 kcsan_mb(); 1265 1270 instrument_atomic_read_write(v, sizeof(*v)); 1266 - return arch_atomic64_add_unless(v, a, u); 1271 + return raw_atomic64_add_unless(v, a, u); 1267 1272 } 1268 1273 1269 1274 static __always_inline bool ··· 1271 1276 { 1272 1277 kcsan_mb(); 1273 1278 instrument_atomic_read_write(v, sizeof(*v)); 1274 - return arch_atomic64_inc_not_zero(v); 1279 + return raw_atomic64_inc_not_zero(v); 1275 1280 } 1276 1281 1277 1282 static __always_inline bool ··· 1279 1284 { 1280 1285 kcsan_mb(); 1281 1286 instrument_atomic_read_write(v, sizeof(*v)); 1282 - return arch_atomic64_inc_unless_negative(v); 1287 + return raw_atomic64_inc_unless_negative(v); 1283 1288 } 1284 1289 1285 1290 static __always_inline bool ··· 1287 1292 { 1288 1293 kcsan_mb(); 1289 1294 instrument_atomic_read_write(v, sizeof(*v)); 1290 - return arch_atomic64_dec_unless_positive(v); 1295 + return raw_atomic64_dec_unless_positive(v); 1291 1296 } 1292 1297 1293 1298 static __always_inline s64 ··· 1295 1300 { 1296 1301 kcsan_mb(); 1297 1302 instrument_atomic_read_write(v, sizeof(*v)); 1298 - return arch_atomic64_dec_if_positive(v); 1303 + return raw_atomic64_dec_if_positive(v); 1299 1304 } 1300 1305 1301 1306 static __always_inline long 1302 1307 atomic_long_read(const atomic_long_t *v) 1303 1308 { 1304 1309 instrument_atomic_read(v, sizeof(*v)); 1305 - return arch_atomic_long_read(v); 1310 + return raw_atomic_long_read(v); 1306 1311 } 1307 1312 1308 1313 static __always_inline long 1309 1314 atomic_long_read_acquire(const atomic_long_t *v) 1310 1315 { 1311 1316 instrument_atomic_read(v, sizeof(*v)); 1312 - return arch_atomic_long_read_acquire(v); 1317 + return raw_atomic_long_read_acquire(v); 1313 1318 } 1314 1319 1315 1320 static __always_inline void 1316 1321 atomic_long_set(atomic_long_t *v, long i) 1317 1322 { 1318 1323 instrument_atomic_write(v, sizeof(*v)); 1319 - arch_atomic_long_set(v, i); 1324 + raw_atomic_long_set(v, i); 1320 1325 } 1321 1326 1322 1327 static __always_inline void ··· 1324 1329 { 1325 1330 kcsan_release(); 1326 1331 instrument_atomic_write(v, sizeof(*v)); 1327 - arch_atomic_long_set_release(v, i); 1332 + raw_atomic_long_set_release(v, i); 1328 1333 } 1329 1334 1330 1335 static __always_inline void 1331 1336 atomic_long_add(long i, atomic_long_t *v) 1332 1337 { 1333 1338 instrument_atomic_read_write(v, sizeof(*v)); 1334 - arch_atomic_long_add(i, v); 1339 + raw_atomic_long_add(i, v); 1335 1340 } 1336 1341 1337 1342 static __always_inline long ··· 1339 1344 { 1340 1345 kcsan_mb(); 1341 1346 instrument_atomic_read_write(v, sizeof(*v)); 1342 - return arch_atomic_long_add_return(i, v); 1347 + return raw_atomic_long_add_return(i, v); 1343 1348 } 1344 1349 1345 1350 static __always_inline long 1346 1351 atomic_long_add_return_acquire(long i, atomic_long_t *v) 1347 1352 { 1348 1353 instrument_atomic_read_write(v, sizeof(*v)); 1349 - return arch_atomic_long_add_return_acquire(i, v); 1354 + return raw_atomic_long_add_return_acquire(i, v); 1350 1355 } 1351 1356 1352 1357 static __always_inline long ··· 1354 1359 { 1355 1360 kcsan_release(); 1356 1361 instrument_atomic_read_write(v, sizeof(*v)); 1357 - return arch_atomic_long_add_return_release(i, v); 1362 + return raw_atomic_long_add_return_release(i, v); 1358 1363 } 1359 1364 1360 1365 static __always_inline long 1361 1366 atomic_long_add_return_relaxed(long i, atomic_long_t *v) 1362 1367 { 1363 1368 instrument_atomic_read_write(v, sizeof(*v)); 1364 - return arch_atomic_long_add_return_relaxed(i, v); 1369 + return raw_atomic_long_add_return_relaxed(i, v); 1365 1370 } 1366 1371 1367 1372 static __always_inline long ··· 1369 1374 { 1370 1375 kcsan_mb(); 1371 1376 instrument_atomic_read_write(v, sizeof(*v)); 1372 - return arch_atomic_long_fetch_add(i, v); 1377 + return raw_atomic_long_fetch_add(i, v); 1373 1378 } 1374 1379 1375 1380 static __always_inline long 1376 1381 atomic_long_fetch_add_acquire(long i, atomic_long_t *v) 1377 1382 { 1378 1383 instrument_atomic_read_write(v, sizeof(*v)); 1379 - return arch_atomic_long_fetch_add_acquire(i, v); 1384 + return raw_atomic_long_fetch_add_acquire(i, v); 1380 1385 } 1381 1386 1382 1387 static __always_inline long ··· 1384 1389 { 1385 1390 kcsan_release(); 1386 1391 instrument_atomic_read_write(v, sizeof(*v)); 1387 - return arch_atomic_long_fetch_add_release(i, v); 1392 + return raw_atomic_long_fetch_add_release(i, v); 1388 1393 } 1389 1394 1390 1395 static __always_inline long 1391 1396 atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) 1392 1397 { 1393 1398 instrument_atomic_read_write(v, sizeof(*v)); 1394 - return arch_atomic_long_fetch_add_relaxed(i, v); 1399 + return raw_atomic_long_fetch_add_relaxed(i, v); 1395 1400 } 1396 1401 1397 1402 static __always_inline void 1398 1403 atomic_long_sub(long i, atomic_long_t *v) 1399 1404 { 1400 1405 instrument_atomic_read_write(v, sizeof(*v)); 1401 - arch_atomic_long_sub(i, v); 1406 + raw_atomic_long_sub(i, v); 1402 1407 } 1403 1408 1404 1409 static __always_inline long ··· 1406 1411 { 1407 1412 kcsan_mb(); 1408 1413 instrument_atomic_read_write(v, sizeof(*v)); 1409 - return arch_atomic_long_sub_return(i, v); 1414 + return raw_atomic_long_sub_return(i, v); 1410 1415 } 1411 1416 1412 1417 static __always_inline long 1413 1418 atomic_long_sub_return_acquire(long i, atomic_long_t *v) 1414 1419 { 1415 1420 instrument_atomic_read_write(v, sizeof(*v)); 1416 - return arch_atomic_long_sub_return_acquire(i, v); 1421 + return raw_atomic_long_sub_return_acquire(i, v); 1417 1422 } 1418 1423 1419 1424 static __always_inline long ··· 1421 1426 { 1422 1427 kcsan_release(); 1423 1428 instrument_atomic_read_write(v, sizeof(*v)); 1424 - return arch_atomic_long_sub_return_release(i, v); 1429 + return raw_atomic_long_sub_return_release(i, v); 1425 1430 } 1426 1431 1427 1432 static __always_inline long 1428 1433 atomic_long_sub_return_relaxed(long i, atomic_long_t *v) 1429 1434 { 1430 1435 instrument_atomic_read_write(v, sizeof(*v)); 1431 - return arch_atomic_long_sub_return_relaxed(i, v); 1436 + return raw_atomic_long_sub_return_relaxed(i, v); 1432 1437 } 1433 1438 1434 1439 static __always_inline long ··· 1436 1441 { 1437 1442 kcsan_mb(); 1438 1443 instrument_atomic_read_write(v, sizeof(*v)); 1439 - return arch_atomic_long_fetch_sub(i, v); 1444 + return raw_atomic_long_fetch_sub(i, v); 1440 1445 } 1441 1446 1442 1447 static __always_inline long 1443 1448 atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) 1444 1449 { 1445 1450 instrument_atomic_read_write(v, sizeof(*v)); 1446 - return arch_atomic_long_fetch_sub_acquire(i, v); 1451 + return raw_atomic_long_fetch_sub_acquire(i, v); 1447 1452 } 1448 1453 1449 1454 static __always_inline long ··· 1451 1456 { 1452 1457 kcsan_release(); 1453 1458 instrument_atomic_read_write(v, sizeof(*v)); 1454 - return arch_atomic_long_fetch_sub_release(i, v); 1459 + return raw_atomic_long_fetch_sub_release(i, v); 1455 1460 } 1456 1461 1457 1462 static __always_inline long 1458 1463 atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) 1459 1464 { 1460 1465 instrument_atomic_read_write(v, sizeof(*v)); 1461 - return arch_atomic_long_fetch_sub_relaxed(i, v); 1466 + return raw_atomic_long_fetch_sub_relaxed(i, v); 1462 1467 } 1463 1468 1464 1469 static __always_inline void 1465 1470 atomic_long_inc(atomic_long_t *v) 1466 1471 { 1467 1472 instrument_atomic_read_write(v, sizeof(*v)); 1468 - arch_atomic_long_inc(v); 1473 + raw_atomic_long_inc(v); 1469 1474 } 1470 1475 1471 1476 static __always_inline long ··· 1473 1478 { 1474 1479 kcsan_mb(); 1475 1480 instrument_atomic_read_write(v, sizeof(*v)); 1476 - return arch_atomic_long_inc_return(v); 1481 + return raw_atomic_long_inc_return(v); 1477 1482 } 1478 1483 1479 1484 static __always_inline long 1480 1485 atomic_long_inc_return_acquire(atomic_long_t *v) 1481 1486 { 1482 1487 instrument_atomic_read_write(v, sizeof(*v)); 1483 - return arch_atomic_long_inc_return_acquire(v); 1488 + return raw_atomic_long_inc_return_acquire(v); 1484 1489 } 1485 1490 1486 1491 static __always_inline long ··· 1488 1493 { 1489 1494 kcsan_release(); 1490 1495 instrument_atomic_read_write(v, sizeof(*v)); 1491 - return arch_atomic_long_inc_return_release(v); 1496 + return raw_atomic_long_inc_return_release(v); 1492 1497 } 1493 1498 1494 1499 static __always_inline long 1495 1500 atomic_long_inc_return_relaxed(atomic_long_t *v) 1496 1501 { 1497 1502 instrument_atomic_read_write(v, sizeof(*v)); 1498 - return arch_atomic_long_inc_return_relaxed(v); 1503 + return raw_atomic_long_inc_return_relaxed(v); 1499 1504 } 1500 1505 1501 1506 static __always_inline long ··· 1503 1508 { 1504 1509 kcsan_mb(); 1505 1510 instrument_atomic_read_write(v, sizeof(*v)); 1506 - return arch_atomic_long_fetch_inc(v); 1511 + return raw_atomic_long_fetch_inc(v); 1507 1512 } 1508 1513 1509 1514 static __always_inline long 1510 1515 atomic_long_fetch_inc_acquire(atomic_long_t *v) 1511 1516 { 1512 1517 instrument_atomic_read_write(v, sizeof(*v)); 1513 - return arch_atomic_long_fetch_inc_acquire(v); 1518 + return raw_atomic_long_fetch_inc_acquire(v); 1514 1519 } 1515 1520 1516 1521 static __always_inline long ··· 1518 1523 { 1519 1524 kcsan_release(); 1520 1525 instrument_atomic_read_write(v, sizeof(*v)); 1521 - return arch_atomic_long_fetch_inc_release(v); 1526 + return raw_atomic_long_fetch_inc_release(v); 1522 1527 } 1523 1528 1524 1529 static __always_inline long 1525 1530 atomic_long_fetch_inc_relaxed(atomic_long_t *v) 1526 1531 { 1527 1532 instrument_atomic_read_write(v, sizeof(*v)); 1528 - return arch_atomic_long_fetch_inc_relaxed(v); 1533 + return raw_atomic_long_fetch_inc_relaxed(v); 1529 1534 } 1530 1535 1531 1536 static __always_inline void 1532 1537 atomic_long_dec(atomic_long_t *v) 1533 1538 { 1534 1539 instrument_atomic_read_write(v, sizeof(*v)); 1535 - arch_atomic_long_dec(v); 1540 + raw_atomic_long_dec(v); 1536 1541 } 1537 1542 1538 1543 static __always_inline long ··· 1540 1545 { 1541 1546 kcsan_mb(); 1542 1547 instrument_atomic_read_write(v, sizeof(*v)); 1543 - return arch_atomic_long_dec_return(v); 1548 + return raw_atomic_long_dec_return(v); 1544 1549 } 1545 1550 1546 1551 static __always_inline long 1547 1552 atomic_long_dec_return_acquire(atomic_long_t *v) 1548 1553 { 1549 1554 instrument_atomic_read_write(v, sizeof(*v)); 1550 - return arch_atomic_long_dec_return_acquire(v); 1555 + return raw_atomic_long_dec_return_acquire(v); 1551 1556 } 1552 1557 1553 1558 static __always_inline long ··· 1555 1560 { 1556 1561 kcsan_release(); 1557 1562 instrument_atomic_read_write(v, sizeof(*v)); 1558 - return arch_atomic_long_dec_return_release(v); 1563 + return raw_atomic_long_dec_return_release(v); 1559 1564 } 1560 1565 1561 1566 static __always_inline long 1562 1567 atomic_long_dec_return_relaxed(atomic_long_t *v) 1563 1568 { 1564 1569 instrument_atomic_read_write(v, sizeof(*v)); 1565 - return arch_atomic_long_dec_return_relaxed(v); 1570 + return raw_atomic_long_dec_return_relaxed(v); 1566 1571 } 1567 1572 1568 1573 static __always_inline long ··· 1570 1575 { 1571 1576 kcsan_mb(); 1572 1577 instrument_atomic_read_write(v, sizeof(*v)); 1573 - return arch_atomic_long_fetch_dec(v); 1578 + return raw_atomic_long_fetch_dec(v); 1574 1579 } 1575 1580 1576 1581 static __always_inline long 1577 1582 atomic_long_fetch_dec_acquire(atomic_long_t *v) 1578 1583 { 1579 1584 instrument_atomic_read_write(v, sizeof(*v)); 1580 - return arch_atomic_long_fetch_dec_acquire(v); 1585 + return raw_atomic_long_fetch_dec_acquire(v); 1581 1586 } 1582 1587 1583 1588 static __always_inline long ··· 1585 1590 { 1586 1591 kcsan_release(); 1587 1592 instrument_atomic_read_write(v, sizeof(*v)); 1588 - return arch_atomic_long_fetch_dec_release(v); 1593 + return raw_atomic_long_fetch_dec_release(v); 1589 1594 } 1590 1595 1591 1596 static __always_inline long 1592 1597 atomic_long_fetch_dec_relaxed(atomic_long_t *v) 1593 1598 { 1594 1599 instrument_atomic_read_write(v, sizeof(*v)); 1595 - return arch_atomic_long_fetch_dec_relaxed(v); 1600 + return raw_atomic_long_fetch_dec_relaxed(v); 1596 1601 } 1597 1602 1598 1603 static __always_inline void 1599 1604 atomic_long_and(long i, atomic_long_t *v) 1600 1605 { 1601 1606 instrument_atomic_read_write(v, sizeof(*v)); 1602 - arch_atomic_long_and(i, v); 1607 + raw_atomic_long_and(i, v); 1603 1608 } 1604 1609 1605 1610 static __always_inline long ··· 1607 1612 { 1608 1613 kcsan_mb(); 1609 1614 instrument_atomic_read_write(v, sizeof(*v)); 1610 - return arch_atomic_long_fetch_and(i, v); 1615 + return raw_atomic_long_fetch_and(i, v); 1611 1616 } 1612 1617 1613 1618 static __always_inline long 1614 1619 atomic_long_fetch_and_acquire(long i, atomic_long_t *v) 1615 1620 { 1616 1621 instrument_atomic_read_write(v, sizeof(*v)); 1617 - return arch_atomic_long_fetch_and_acquire(i, v); 1622 + return raw_atomic_long_fetch_and_acquire(i, v); 1618 1623 } 1619 1624 1620 1625 static __always_inline long ··· 1622 1627 { 1623 1628 kcsan_release(); 1624 1629 instrument_atomic_read_write(v, sizeof(*v)); 1625 - return arch_atomic_long_fetch_and_release(i, v); 1630 + return raw_atomic_long_fetch_and_release(i, v); 1626 1631 } 1627 1632 1628 1633 static __always_inline long 1629 1634 atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) 1630 1635 { 1631 1636 instrument_atomic_read_write(v, sizeof(*v)); 1632 - return arch_atomic_long_fetch_and_relaxed(i, v); 1637 + return raw_atomic_long_fetch_and_relaxed(i, v); 1633 1638 } 1634 1639 1635 1640 static __always_inline void 1636 1641 atomic_long_andnot(long i, atomic_long_t *v) 1637 1642 { 1638 1643 instrument_atomic_read_write(v, sizeof(*v)); 1639 - arch_atomic_long_andnot(i, v); 1644 + raw_atomic_long_andnot(i, v); 1640 1645 } 1641 1646 1642 1647 static __always_inline long ··· 1644 1649 { 1645 1650 kcsan_mb(); 1646 1651 instrument_atomic_read_write(v, sizeof(*v)); 1647 - return arch_atomic_long_fetch_andnot(i, v); 1652 + return raw_atomic_long_fetch_andnot(i, v); 1648 1653 } 1649 1654 1650 1655 static __always_inline long 1651 1656 atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) 1652 1657 { 1653 1658 instrument_atomic_read_write(v, sizeof(*v)); 1654 - return arch_atomic_long_fetch_andnot_acquire(i, v); 1659 + return raw_atomic_long_fetch_andnot_acquire(i, v); 1655 1660 } 1656 1661 1657 1662 static __always_inline long ··· 1659 1664 { 1660 1665 kcsan_release(); 1661 1666 instrument_atomic_read_write(v, sizeof(*v)); 1662 - return arch_atomic_long_fetch_andnot_release(i, v); 1667 + return raw_atomic_long_fetch_andnot_release(i, v); 1663 1668 } 1664 1669 1665 1670 static __always_inline long 1666 1671 atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) 1667 1672 { 1668 1673 instrument_atomic_read_write(v, sizeof(*v)); 1669 - return arch_atomic_long_fetch_andnot_relaxed(i, v); 1674 + return raw_atomic_long_fetch_andnot_relaxed(i, v); 1670 1675 } 1671 1676 1672 1677 static __always_inline void 1673 1678 atomic_long_or(long i, atomic_long_t *v) 1674 1679 { 1675 1680 instrument_atomic_read_write(v, sizeof(*v)); 1676 - arch_atomic_long_or(i, v); 1681 + raw_atomic_long_or(i, v); 1677 1682 } 1678 1683 1679 1684 static __always_inline long ··· 1681 1686 { 1682 1687 kcsan_mb(); 1683 1688 instrument_atomic_read_write(v, sizeof(*v)); 1684 - return arch_atomic_long_fetch_or(i, v); 1689 + return raw_atomic_long_fetch_or(i, v); 1685 1690 } 1686 1691 1687 1692 static __always_inline long 1688 1693 atomic_long_fetch_or_acquire(long i, atomic_long_t *v) 1689 1694 { 1690 1695 instrument_atomic_read_write(v, sizeof(*v)); 1691 - return arch_atomic_long_fetch_or_acquire(i, v); 1696 + return raw_atomic_long_fetch_or_acquire(i, v); 1692 1697 } 1693 1698 1694 1699 static __always_inline long ··· 1696 1701 { 1697 1702 kcsan_release(); 1698 1703 instrument_atomic_read_write(v, sizeof(*v)); 1699 - return arch_atomic_long_fetch_or_release(i, v); 1704 + return raw_atomic_long_fetch_or_release(i, v); 1700 1705 } 1701 1706 1702 1707 static __always_inline long 1703 1708 atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) 1704 1709 { 1705 1710 instrument_atomic_read_write(v, sizeof(*v)); 1706 - return arch_atomic_long_fetch_or_relaxed(i, v); 1711 + return raw_atomic_long_fetch_or_relaxed(i, v); 1707 1712 } 1708 1713 1709 1714 static __always_inline void 1710 1715 atomic_long_xor(long i, atomic_long_t *v) 1711 1716 { 1712 1717 instrument_atomic_read_write(v, sizeof(*v)); 1713 - arch_atomic_long_xor(i, v); 1718 + raw_atomic_long_xor(i, v); 1714 1719 } 1715 1720 1716 1721 static __always_inline long ··· 1718 1723 { 1719 1724 kcsan_mb(); 1720 1725 instrument_atomic_read_write(v, sizeof(*v)); 1721 - return arch_atomic_long_fetch_xor(i, v); 1726 + return raw_atomic_long_fetch_xor(i, v); 1722 1727 } 1723 1728 1724 1729 static __always_inline long 1725 1730 atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) 1726 1731 { 1727 1732 instrument_atomic_read_write(v, sizeof(*v)); 1728 - return arch_atomic_long_fetch_xor_acquire(i, v); 1733 + return raw_atomic_long_fetch_xor_acquire(i, v); 1729 1734 } 1730 1735 1731 1736 static __always_inline long ··· 1733 1738 { 1734 1739 kcsan_release(); 1735 1740 instrument_atomic_read_write(v, sizeof(*v)); 1736 - return arch_atomic_long_fetch_xor_release(i, v); 1741 + return raw_atomic_long_fetch_xor_release(i, v); 1737 1742 } 1738 1743 1739 1744 static __always_inline long 1740 1745 atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) 1741 1746 { 1742 1747 instrument_atomic_read_write(v, sizeof(*v)); 1743 - return arch_atomic_long_fetch_xor_relaxed(i, v); 1748 + return raw_atomic_long_fetch_xor_relaxed(i, v); 1744 1749 } 1745 1750 1746 1751 static __always_inline long ··· 1748 1753 { 1749 1754 kcsan_mb(); 1750 1755 instrument_atomic_read_write(v, sizeof(*v)); 1751 - return arch_atomic_long_xchg(v, i); 1756 + return raw_atomic_long_xchg(v, i); 1752 1757 } 1753 1758 1754 1759 static __always_inline long 1755 1760 atomic_long_xchg_acquire(atomic_long_t *v, long i) 1756 1761 { 1757 1762 instrument_atomic_read_write(v, sizeof(*v)); 1758 - return arch_atomic_long_xchg_acquire(v, i); 1763 + return raw_atomic_long_xchg_acquire(v, i); 1759 1764 } 1760 1765 1761 1766 static __always_inline long ··· 1763 1768 { 1764 1769 kcsan_release(); 1765 1770 instrument_atomic_read_write(v, sizeof(*v)); 1766 - return arch_atomic_long_xchg_release(v, i); 1771 + return raw_atomic_long_xchg_release(v, i); 1767 1772 } 1768 1773 1769 1774 static __always_inline long 1770 1775 atomic_long_xchg_relaxed(atomic_long_t *v, long i) 1771 1776 { 1772 1777 instrument_atomic_read_write(v, sizeof(*v)); 1773 - return arch_atomic_long_xchg_relaxed(v, i); 1778 + return raw_atomic_long_xchg_relaxed(v, i); 1774 1779 } 1775 1780 1776 1781 static __always_inline long ··· 1778 1783 { 1779 1784 kcsan_mb(); 1780 1785 instrument_atomic_read_write(v, sizeof(*v)); 1781 - return arch_atomic_long_cmpxchg(v, old, new); 1786 + return raw_atomic_long_cmpxchg(v, old, new); 1782 1787 } 1783 1788 1784 1789 static __always_inline long 1785 1790 atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) 1786 1791 { 1787 1792 instrument_atomic_read_write(v, sizeof(*v)); 1788 - return arch_atomic_long_cmpxchg_acquire(v, old, new); 1793 + return raw_atomic_long_cmpxchg_acquire(v, old, new); 1789 1794 } 1790 1795 1791 1796 static __always_inline long ··· 1793 1798 { 1794 1799 kcsan_release(); 1795 1800 instrument_atomic_read_write(v, sizeof(*v)); 1796 - return arch_atomic_long_cmpxchg_release(v, old, new); 1801 + return raw_atomic_long_cmpxchg_release(v, old, new); 1797 1802 } 1798 1803 1799 1804 static __always_inline long 1800 1805 atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) 1801 1806 { 1802 1807 instrument_atomic_read_write(v, sizeof(*v)); 1803 - return arch_atomic_long_cmpxchg_relaxed(v, old, new); 1808 + return raw_atomic_long_cmpxchg_relaxed(v, old, new); 1804 1809 } 1805 1810 1806 1811 static __always_inline bool ··· 1809 1814 kcsan_mb(); 1810 1815 instrument_atomic_read_write(v, sizeof(*v)); 1811 1816 instrument_atomic_read_write(old, sizeof(*old)); 1812 - return arch_atomic_long_try_cmpxchg(v, old, new); 1817 + return raw_atomic_long_try_cmpxchg(v, old, new); 1813 1818 } 1814 1819 1815 1820 static __always_inline bool ··· 1817 1822 { 1818 1823 instrument_atomic_read_write(v, sizeof(*v)); 1819 1824 instrument_atomic_read_write(old, sizeof(*old)); 1820 - return arch_atomic_long_try_cmpxchg_acquire(v, old, new); 1825 + return raw_atomic_long_try_cmpxchg_acquire(v, old, new); 1821 1826 } 1822 1827 1823 1828 static __always_inline bool ··· 1826 1831 kcsan_release(); 1827 1832 instrument_atomic_read_write(v, sizeof(*v)); 1828 1833 instrument_atomic_read_write(old, sizeof(*old)); 1829 - return arch_atomic_long_try_cmpxchg_release(v, old, new); 1834 + return raw_atomic_long_try_cmpxchg_release(v, old, new); 1830 1835 } 1831 1836 1832 1837 static __always_inline bool ··· 1834 1839 { 1835 1840 instrument_atomic_read_write(v, sizeof(*v)); 1836 1841 instrument_atomic_read_write(old, sizeof(*old)); 1837 - return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); 1842 + return raw_atomic_long_try_cmpxchg_relaxed(v, old, new); 1838 1843 } 1839 1844 1840 1845 static __always_inline bool ··· 1842 1847 { 1843 1848 kcsan_mb(); 1844 1849 instrument_atomic_read_write(v, sizeof(*v)); 1845 - return arch_atomic_long_sub_and_test(i, v); 1850 + return raw_atomic_long_sub_and_test(i, v); 1846 1851 } 1847 1852 1848 1853 static __always_inline bool ··· 1850 1855 { 1851 1856 kcsan_mb(); 1852 1857 instrument_atomic_read_write(v, sizeof(*v)); 1853 - return arch_atomic_long_dec_and_test(v); 1858 + return raw_atomic_long_dec_and_test(v); 1854 1859 } 1855 1860 1856 1861 static __always_inline bool ··· 1858 1863 { 1859 1864 kcsan_mb(); 1860 1865 instrument_atomic_read_write(v, sizeof(*v)); 1861 - return arch_atomic_long_inc_and_test(v); 1866 + return raw_atomic_long_inc_and_test(v); 1862 1867 } 1863 1868 1864 1869 static __always_inline bool ··· 1866 1871 { 1867 1872 kcsan_mb(); 1868 1873 instrument_atomic_read_write(v, sizeof(*v)); 1869 - return arch_atomic_long_add_negative(i, v); 1874 + return raw_atomic_long_add_negative(i, v); 1870 1875 } 1871 1876 1872 1877 static __always_inline bool 1873 1878 atomic_long_add_negative_acquire(long i, atomic_long_t *v) 1874 1879 { 1875 1880 instrument_atomic_read_write(v, sizeof(*v)); 1876 - return arch_atomic_long_add_negative_acquire(i, v); 1881 + return raw_atomic_long_add_negative_acquire(i, v); 1877 1882 } 1878 1883 1879 1884 static __always_inline bool ··· 1881 1886 { 1882 1887 kcsan_release(); 1883 1888 instrument_atomic_read_write(v, sizeof(*v)); 1884 - return arch_atomic_long_add_negative_release(i, v); 1889 + return raw_atomic_long_add_negative_release(i, v); 1885 1890 } 1886 1891 1887 1892 static __always_inline bool 1888 1893 atomic_long_add_negative_relaxed(long i, atomic_long_t *v) 1889 1894 { 1890 1895 instrument_atomic_read_write(v, sizeof(*v)); 1891 - return arch_atomic_long_add_negative_relaxed(i, v); 1896 + return raw_atomic_long_add_negative_relaxed(i, v); 1892 1897 } 1893 1898 1894 1899 static __always_inline long ··· 1896 1901 { 1897 1902 kcsan_mb(); 1898 1903 instrument_atomic_read_write(v, sizeof(*v)); 1899 - return arch_atomic_long_fetch_add_unless(v, a, u); 1904 + return raw_atomic_long_fetch_add_unless(v, a, u); 1900 1905 } 1901 1906 1902 1907 static __always_inline bool ··· 1904 1909 { 1905 1910 kcsan_mb(); 1906 1911 instrument_atomic_read_write(v, sizeof(*v)); 1907 - return arch_atomic_long_add_unless(v, a, u); 1912 + return raw_atomic_long_add_unless(v, a, u); 1908 1913 } 1909 1914 1910 1915 static __always_inline bool ··· 1912 1917 { 1913 1918 kcsan_mb(); 1914 1919 instrument_atomic_read_write(v, sizeof(*v)); 1915 - return arch_atomic_long_inc_not_zero(v); 1920 + return raw_atomic_long_inc_not_zero(v); 1916 1921 } 1917 1922 1918 1923 static __always_inline bool ··· 1920 1925 { 1921 1926 kcsan_mb(); 1922 1927 instrument_atomic_read_write(v, sizeof(*v)); 1923 - return arch_atomic_long_inc_unless_negative(v); 1928 + return raw_atomic_long_inc_unless_negative(v); 1924 1929 } 1925 1930 1926 1931 static __always_inline bool ··· 1928 1933 { 1929 1934 kcsan_mb(); 1930 1935 instrument_atomic_read_write(v, sizeof(*v)); 1931 - return arch_atomic_long_dec_unless_positive(v); 1936 + return raw_atomic_long_dec_unless_positive(v); 1932 1937 } 1933 1938 1934 1939 static __always_inline long ··· 1936 1941 { 1937 1942 kcsan_mb(); 1938 1943 instrument_atomic_read_write(v, sizeof(*v)); 1939 - return arch_atomic_long_dec_if_positive(v); 1944 + return raw_atomic_long_dec_if_positive(v); 1940 1945 } 1941 1946 1942 1947 #define xchg(ptr, ...) \ ··· 1944 1949 typeof(ptr) __ai_ptr = (ptr); \ 1945 1950 kcsan_mb(); \ 1946 1951 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1947 - arch_xchg(__ai_ptr, __VA_ARGS__); \ 1952 + raw_xchg(__ai_ptr, __VA_ARGS__); \ 1948 1953 }) 1949 1954 1950 1955 #define xchg_acquire(ptr, ...) \ 1951 1956 ({ \ 1952 1957 typeof(ptr) __ai_ptr = (ptr); \ 1953 1958 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1954 - arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ 1959 + raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \ 1955 1960 }) 1956 1961 1957 1962 #define xchg_release(ptr, ...) \ ··· 1959 1964 typeof(ptr) __ai_ptr = (ptr); \ 1960 1965 kcsan_release(); \ 1961 1966 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1962 - arch_xchg_release(__ai_ptr, __VA_ARGS__); \ 1967 + raw_xchg_release(__ai_ptr, __VA_ARGS__); \ 1963 1968 }) 1964 1969 1965 1970 #define xchg_relaxed(ptr, ...) \ 1966 1971 ({ \ 1967 1972 typeof(ptr) __ai_ptr = (ptr); \ 1968 1973 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1969 - arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ 1974 + raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ 1970 1975 }) 1971 1976 1972 1977 #define cmpxchg(ptr, ...) \ ··· 1974 1979 typeof(ptr) __ai_ptr = (ptr); \ 1975 1980 kcsan_mb(); \ 1976 1981 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1977 - arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ 1982 + raw_cmpxchg(__ai_ptr, __VA_ARGS__); \ 1978 1983 }) 1979 1984 1980 1985 #define cmpxchg_acquire(ptr, ...) \ 1981 1986 ({ \ 1982 1987 typeof(ptr) __ai_ptr = (ptr); \ 1983 1988 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1984 - arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ 1989 + raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ 1985 1990 }) 1986 1991 1987 1992 #define cmpxchg_release(ptr, ...) \ ··· 1989 1994 typeof(ptr) __ai_ptr = (ptr); \ 1990 1995 kcsan_release(); \ 1991 1996 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1992 - arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ 1997 + raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ 1993 1998 }) 1994 1999 1995 2000 #define cmpxchg_relaxed(ptr, ...) \ 1996 2001 ({ \ 1997 2002 typeof(ptr) __ai_ptr = (ptr); \ 1998 2003 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 1999 - arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ 2004 + raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ 2000 2005 }) 2001 2006 2002 2007 #define cmpxchg64(ptr, ...) \ ··· 2004 2009 typeof(ptr) __ai_ptr = (ptr); \ 2005 2010 kcsan_mb(); \ 2006 2011 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2007 - arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ 2012 + raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \ 2008 2013 }) 2009 2014 2010 2015 #define cmpxchg64_acquire(ptr, ...) \ 2011 2016 ({ \ 2012 2017 typeof(ptr) __ai_ptr = (ptr); \ 2013 2018 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2014 - arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ 2019 + raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ 2015 2020 }) 2016 2021 2017 2022 #define cmpxchg64_release(ptr, ...) \ ··· 2019 2024 typeof(ptr) __ai_ptr = (ptr); \ 2020 2025 kcsan_release(); \ 2021 2026 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2022 - arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ 2027 + raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ 2023 2028 }) 2024 2029 2025 2030 #define cmpxchg64_relaxed(ptr, ...) \ 2026 2031 ({ \ 2027 2032 typeof(ptr) __ai_ptr = (ptr); \ 2028 2033 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2029 - arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ 2034 + raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ 2030 2035 }) 2031 2036 2032 2037 #define cmpxchg128(ptr, ...) \ ··· 2034 2039 typeof(ptr) __ai_ptr = (ptr); \ 2035 2040 kcsan_mb(); \ 2036 2041 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2037 - arch_cmpxchg128(__ai_ptr, __VA_ARGS__); \ 2042 + raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \ 2038 2043 }) 2039 2044 2040 2045 #define cmpxchg128_acquire(ptr, ...) \ 2041 2046 ({ \ 2042 2047 typeof(ptr) __ai_ptr = (ptr); \ 2043 2048 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2044 - arch_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \ 2049 + raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \ 2045 2050 }) 2046 2051 2047 2052 #define cmpxchg128_release(ptr, ...) \ ··· 2049 2054 typeof(ptr) __ai_ptr = (ptr); \ 2050 2055 kcsan_release(); \ 2051 2056 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2052 - arch_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \ 2057 + raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \ 2053 2058 }) 2054 2059 2055 2060 #define cmpxchg128_relaxed(ptr, ...) \ 2056 2061 ({ \ 2057 2062 typeof(ptr) __ai_ptr = (ptr); \ 2058 2063 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2059 - arch_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \ 2064 + raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \ 2060 2065 }) 2061 2066 2062 2067 #define try_cmpxchg(ptr, oldp, ...) \ ··· 2066 2071 kcsan_mb(); \ 2067 2072 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2068 2073 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2069 - arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2074 + raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2070 2075 }) 2071 2076 2072 2077 #define try_cmpxchg_acquire(ptr, oldp, ...) \ ··· 2075 2080 typeof(oldp) __ai_oldp = (oldp); \ 2076 2081 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2077 2082 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2078 - arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2083 + raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2079 2084 }) 2080 2085 2081 2086 #define try_cmpxchg_release(ptr, oldp, ...) \ ··· 2085 2090 kcsan_release(); \ 2086 2091 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2087 2092 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2088 - arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2093 + raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2089 2094 }) 2090 2095 2091 2096 #define try_cmpxchg_relaxed(ptr, oldp, ...) \ ··· 2094 2099 typeof(oldp) __ai_oldp = (oldp); \ 2095 2100 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2096 2101 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2097 - arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2102 + raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2098 2103 }) 2099 2104 2100 2105 #define try_cmpxchg64(ptr, oldp, ...) \ ··· 2104 2109 kcsan_mb(); \ 2105 2110 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2106 2111 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2107 - arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2112 + raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2108 2113 }) 2109 2114 2110 2115 #define try_cmpxchg64_acquire(ptr, oldp, ...) \ ··· 2113 2118 typeof(oldp) __ai_oldp = (oldp); \ 2114 2119 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2115 2120 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2116 - arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2121 + raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2117 2122 }) 2118 2123 2119 2124 #define try_cmpxchg64_release(ptr, oldp, ...) \ ··· 2123 2128 kcsan_release(); \ 2124 2129 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2125 2130 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2126 - arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2131 + raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2127 2132 }) 2128 2133 2129 2134 #define try_cmpxchg64_relaxed(ptr, oldp, ...) \ ··· 2132 2137 typeof(oldp) __ai_oldp = (oldp); \ 2133 2138 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2134 2139 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2135 - arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2140 + raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2136 2141 }) 2137 2142 2138 2143 #define try_cmpxchg128(ptr, oldp, ...) \ ··· 2142 2147 kcsan_mb(); \ 2143 2148 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2144 2149 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2145 - arch_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2150 + raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2146 2151 }) 2147 2152 2148 2153 #define try_cmpxchg128_acquire(ptr, oldp, ...) \ ··· 2151 2156 typeof(oldp) __ai_oldp = (oldp); \ 2152 2157 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2153 2158 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2154 - arch_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2159 + raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2155 2160 }) 2156 2161 2157 2162 #define try_cmpxchg128_release(ptr, oldp, ...) \ ··· 2161 2166 kcsan_release(); \ 2162 2167 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2163 2168 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2164 - arch_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2169 + raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2165 2170 }) 2166 2171 2167 2172 #define try_cmpxchg128_relaxed(ptr, oldp, ...) \ ··· 2170 2175 typeof(oldp) __ai_oldp = (oldp); \ 2171 2176 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2172 2177 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2173 - arch_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2178 + raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2174 2179 }) 2175 2180 2176 2181 #define cmpxchg_local(ptr, ...) \ 2177 2182 ({ \ 2178 2183 typeof(ptr) __ai_ptr = (ptr); \ 2179 2184 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2180 - arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ 2185 + raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ 2181 2186 }) 2182 2187 2183 2188 #define cmpxchg64_local(ptr, ...) \ 2184 2189 ({ \ 2185 2190 typeof(ptr) __ai_ptr = (ptr); \ 2186 2191 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2187 - arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ 2192 + raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ 2188 2193 }) 2189 2194 2190 2195 #define cmpxchg128_local(ptr, ...) \ 2191 2196 ({ \ 2192 2197 typeof(ptr) __ai_ptr = (ptr); \ 2193 2198 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2194 - arch_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \ 2199 + raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \ 2195 2200 }) 2196 2201 2197 2202 #define sync_cmpxchg(ptr, ...) \ ··· 2199 2204 typeof(ptr) __ai_ptr = (ptr); \ 2200 2205 kcsan_mb(); \ 2201 2206 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2202 - arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ 2207 + raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ 2203 2208 }) 2204 2209 2205 2210 #define try_cmpxchg_local(ptr, oldp, ...) \ ··· 2208 2213 typeof(oldp) __ai_oldp = (oldp); \ 2209 2214 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2210 2215 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2211 - arch_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2216 + raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2212 2217 }) 2213 2218 2214 2219 #define try_cmpxchg64_local(ptr, oldp, ...) \ ··· 2217 2222 typeof(oldp) __ai_oldp = (oldp); \ 2218 2223 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2219 2224 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2220 - arch_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2225 + raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2221 2226 }) 2222 2227 2223 2228 #define try_cmpxchg128_local(ptr, oldp, ...) \ ··· 2226 2231 typeof(oldp) __ai_oldp = (oldp); \ 2227 2232 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ 2228 2233 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ 2229 - arch_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2234 + raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ 2230 2235 }) 2231 2236 2232 2237 2233 2238 #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ 2234 - // 3611991b015450e119bcd7417a9431af7f3ba13c 2239 + // f6502977180430e61c1a7c4e5e665f04f501fb8d
+1645
include/linux/atomic/atomic-raw.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + // Generated by scripts/atomic/gen-atomic-raw.sh 4 + // DO NOT MODIFY THIS FILE DIRECTLY 5 + 6 + #ifndef _LINUX_ATOMIC_RAW_H 7 + #define _LINUX_ATOMIC_RAW_H 8 + 9 + static __always_inline int 10 + raw_atomic_read(const atomic_t *v) 11 + { 12 + return arch_atomic_read(v); 13 + } 14 + 15 + static __always_inline int 16 + raw_atomic_read_acquire(const atomic_t *v) 17 + { 18 + return arch_atomic_read_acquire(v); 19 + } 20 + 21 + static __always_inline void 22 + raw_atomic_set(atomic_t *v, int i) 23 + { 24 + arch_atomic_set(v, i); 25 + } 26 + 27 + static __always_inline void 28 + raw_atomic_set_release(atomic_t *v, int i) 29 + { 30 + arch_atomic_set_release(v, i); 31 + } 32 + 33 + static __always_inline void 34 + raw_atomic_add(int i, atomic_t *v) 35 + { 36 + arch_atomic_add(i, v); 37 + } 38 + 39 + static __always_inline int 40 + raw_atomic_add_return(int i, atomic_t *v) 41 + { 42 + return arch_atomic_add_return(i, v); 43 + } 44 + 45 + static __always_inline int 46 + raw_atomic_add_return_acquire(int i, atomic_t *v) 47 + { 48 + return arch_atomic_add_return_acquire(i, v); 49 + } 50 + 51 + static __always_inline int 52 + raw_atomic_add_return_release(int i, atomic_t *v) 53 + { 54 + return arch_atomic_add_return_release(i, v); 55 + } 56 + 57 + static __always_inline int 58 + raw_atomic_add_return_relaxed(int i, atomic_t *v) 59 + { 60 + return arch_atomic_add_return_relaxed(i, v); 61 + } 62 + 63 + static __always_inline int 64 + raw_atomic_fetch_add(int i, atomic_t *v) 65 + { 66 + return arch_atomic_fetch_add(i, v); 67 + } 68 + 69 + static __always_inline int 70 + raw_atomic_fetch_add_acquire(int i, atomic_t *v) 71 + { 72 + return arch_atomic_fetch_add_acquire(i, v); 73 + } 74 + 75 + static __always_inline int 76 + raw_atomic_fetch_add_release(int i, atomic_t *v) 77 + { 78 + return arch_atomic_fetch_add_release(i, v); 79 + } 80 + 81 + static __always_inline int 82 + raw_atomic_fetch_add_relaxed(int i, atomic_t *v) 83 + { 84 + return arch_atomic_fetch_add_relaxed(i, v); 85 + } 86 + 87 + static __always_inline void 88 + raw_atomic_sub(int i, atomic_t *v) 89 + { 90 + arch_atomic_sub(i, v); 91 + } 92 + 93 + static __always_inline int 94 + raw_atomic_sub_return(int i, atomic_t *v) 95 + { 96 + return arch_atomic_sub_return(i, v); 97 + } 98 + 99 + static __always_inline int 100 + raw_atomic_sub_return_acquire(int i, atomic_t *v) 101 + { 102 + return arch_atomic_sub_return_acquire(i, v); 103 + } 104 + 105 + static __always_inline int 106 + raw_atomic_sub_return_release(int i, atomic_t *v) 107 + { 108 + return arch_atomic_sub_return_release(i, v); 109 + } 110 + 111 + static __always_inline int 112 + raw_atomic_sub_return_relaxed(int i, atomic_t *v) 113 + { 114 + return arch_atomic_sub_return_relaxed(i, v); 115 + } 116 + 117 + static __always_inline int 118 + raw_atomic_fetch_sub(int i, atomic_t *v) 119 + { 120 + return arch_atomic_fetch_sub(i, v); 121 + } 122 + 123 + static __always_inline int 124 + raw_atomic_fetch_sub_acquire(int i, atomic_t *v) 125 + { 126 + return arch_atomic_fetch_sub_acquire(i, v); 127 + } 128 + 129 + static __always_inline int 130 + raw_atomic_fetch_sub_release(int i, atomic_t *v) 131 + { 132 + return arch_atomic_fetch_sub_release(i, v); 133 + } 134 + 135 + static __always_inline int 136 + raw_atomic_fetch_sub_relaxed(int i, atomic_t *v) 137 + { 138 + return arch_atomic_fetch_sub_relaxed(i, v); 139 + } 140 + 141 + static __always_inline void 142 + raw_atomic_inc(atomic_t *v) 143 + { 144 + arch_atomic_inc(v); 145 + } 146 + 147 + static __always_inline int 148 + raw_atomic_inc_return(atomic_t *v) 149 + { 150 + return arch_atomic_inc_return(v); 151 + } 152 + 153 + static __always_inline int 154 + raw_atomic_inc_return_acquire(atomic_t *v) 155 + { 156 + return arch_atomic_inc_return_acquire(v); 157 + } 158 + 159 + static __always_inline int 160 + raw_atomic_inc_return_release(atomic_t *v) 161 + { 162 + return arch_atomic_inc_return_release(v); 163 + } 164 + 165 + static __always_inline int 166 + raw_atomic_inc_return_relaxed(atomic_t *v) 167 + { 168 + return arch_atomic_inc_return_relaxed(v); 169 + } 170 + 171 + static __always_inline int 172 + raw_atomic_fetch_inc(atomic_t *v) 173 + { 174 + return arch_atomic_fetch_inc(v); 175 + } 176 + 177 + static __always_inline int 178 + raw_atomic_fetch_inc_acquire(atomic_t *v) 179 + { 180 + return arch_atomic_fetch_inc_acquire(v); 181 + } 182 + 183 + static __always_inline int 184 + raw_atomic_fetch_inc_release(atomic_t *v) 185 + { 186 + return arch_atomic_fetch_inc_release(v); 187 + } 188 + 189 + static __always_inline int 190 + raw_atomic_fetch_inc_relaxed(atomic_t *v) 191 + { 192 + return arch_atomic_fetch_inc_relaxed(v); 193 + } 194 + 195 + static __always_inline void 196 + raw_atomic_dec(atomic_t *v) 197 + { 198 + arch_atomic_dec(v); 199 + } 200 + 201 + static __always_inline int 202 + raw_atomic_dec_return(atomic_t *v) 203 + { 204 + return arch_atomic_dec_return(v); 205 + } 206 + 207 + static __always_inline int 208 + raw_atomic_dec_return_acquire(atomic_t *v) 209 + { 210 + return arch_atomic_dec_return_acquire(v); 211 + } 212 + 213 + static __always_inline int 214 + raw_atomic_dec_return_release(atomic_t *v) 215 + { 216 + return arch_atomic_dec_return_release(v); 217 + } 218 + 219 + static __always_inline int 220 + raw_atomic_dec_return_relaxed(atomic_t *v) 221 + { 222 + return arch_atomic_dec_return_relaxed(v); 223 + } 224 + 225 + static __always_inline int 226 + raw_atomic_fetch_dec(atomic_t *v) 227 + { 228 + return arch_atomic_fetch_dec(v); 229 + } 230 + 231 + static __always_inline int 232 + raw_atomic_fetch_dec_acquire(atomic_t *v) 233 + { 234 + return arch_atomic_fetch_dec_acquire(v); 235 + } 236 + 237 + static __always_inline int 238 + raw_atomic_fetch_dec_release(atomic_t *v) 239 + { 240 + return arch_atomic_fetch_dec_release(v); 241 + } 242 + 243 + static __always_inline int 244 + raw_atomic_fetch_dec_relaxed(atomic_t *v) 245 + { 246 + return arch_atomic_fetch_dec_relaxed(v); 247 + } 248 + 249 + static __always_inline void 250 + raw_atomic_and(int i, atomic_t *v) 251 + { 252 + arch_atomic_and(i, v); 253 + } 254 + 255 + static __always_inline int 256 + raw_atomic_fetch_and(int i, atomic_t *v) 257 + { 258 + return arch_atomic_fetch_and(i, v); 259 + } 260 + 261 + static __always_inline int 262 + raw_atomic_fetch_and_acquire(int i, atomic_t *v) 263 + { 264 + return arch_atomic_fetch_and_acquire(i, v); 265 + } 266 + 267 + static __always_inline int 268 + raw_atomic_fetch_and_release(int i, atomic_t *v) 269 + { 270 + return arch_atomic_fetch_and_release(i, v); 271 + } 272 + 273 + static __always_inline int 274 + raw_atomic_fetch_and_relaxed(int i, atomic_t *v) 275 + { 276 + return arch_atomic_fetch_and_relaxed(i, v); 277 + } 278 + 279 + static __always_inline void 280 + raw_atomic_andnot(int i, atomic_t *v) 281 + { 282 + arch_atomic_andnot(i, v); 283 + } 284 + 285 + static __always_inline int 286 + raw_atomic_fetch_andnot(int i, atomic_t *v) 287 + { 288 + return arch_atomic_fetch_andnot(i, v); 289 + } 290 + 291 + static __always_inline int 292 + raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) 293 + { 294 + return arch_atomic_fetch_andnot_acquire(i, v); 295 + } 296 + 297 + static __always_inline int 298 + raw_atomic_fetch_andnot_release(int i, atomic_t *v) 299 + { 300 + return arch_atomic_fetch_andnot_release(i, v); 301 + } 302 + 303 + static __always_inline int 304 + raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v) 305 + { 306 + return arch_atomic_fetch_andnot_relaxed(i, v); 307 + } 308 + 309 + static __always_inline void 310 + raw_atomic_or(int i, atomic_t *v) 311 + { 312 + arch_atomic_or(i, v); 313 + } 314 + 315 + static __always_inline int 316 + raw_atomic_fetch_or(int i, atomic_t *v) 317 + { 318 + return arch_atomic_fetch_or(i, v); 319 + } 320 + 321 + static __always_inline int 322 + raw_atomic_fetch_or_acquire(int i, atomic_t *v) 323 + { 324 + return arch_atomic_fetch_or_acquire(i, v); 325 + } 326 + 327 + static __always_inline int 328 + raw_atomic_fetch_or_release(int i, atomic_t *v) 329 + { 330 + return arch_atomic_fetch_or_release(i, v); 331 + } 332 + 333 + static __always_inline int 334 + raw_atomic_fetch_or_relaxed(int i, atomic_t *v) 335 + { 336 + return arch_atomic_fetch_or_relaxed(i, v); 337 + } 338 + 339 + static __always_inline void 340 + raw_atomic_xor(int i, atomic_t *v) 341 + { 342 + arch_atomic_xor(i, v); 343 + } 344 + 345 + static __always_inline int 346 + raw_atomic_fetch_xor(int i, atomic_t *v) 347 + { 348 + return arch_atomic_fetch_xor(i, v); 349 + } 350 + 351 + static __always_inline int 352 + raw_atomic_fetch_xor_acquire(int i, atomic_t *v) 353 + { 354 + return arch_atomic_fetch_xor_acquire(i, v); 355 + } 356 + 357 + static __always_inline int 358 + raw_atomic_fetch_xor_release(int i, atomic_t *v) 359 + { 360 + return arch_atomic_fetch_xor_release(i, v); 361 + } 362 + 363 + static __always_inline int 364 + raw_atomic_fetch_xor_relaxed(int i, atomic_t *v) 365 + { 366 + return arch_atomic_fetch_xor_relaxed(i, v); 367 + } 368 + 369 + static __always_inline int 370 + raw_atomic_xchg(atomic_t *v, int i) 371 + { 372 + return arch_atomic_xchg(v, i); 373 + } 374 + 375 + static __always_inline int 376 + raw_atomic_xchg_acquire(atomic_t *v, int i) 377 + { 378 + return arch_atomic_xchg_acquire(v, i); 379 + } 380 + 381 + static __always_inline int 382 + raw_atomic_xchg_release(atomic_t *v, int i) 383 + { 384 + return arch_atomic_xchg_release(v, i); 385 + } 386 + 387 + static __always_inline int 388 + raw_atomic_xchg_relaxed(atomic_t *v, int i) 389 + { 390 + return arch_atomic_xchg_relaxed(v, i); 391 + } 392 + 393 + static __always_inline int 394 + raw_atomic_cmpxchg(atomic_t *v, int old, int new) 395 + { 396 + return arch_atomic_cmpxchg(v, old, new); 397 + } 398 + 399 + static __always_inline int 400 + raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) 401 + { 402 + return arch_atomic_cmpxchg_acquire(v, old, new); 403 + } 404 + 405 + static __always_inline int 406 + raw_atomic_cmpxchg_release(atomic_t *v, int old, int new) 407 + { 408 + return arch_atomic_cmpxchg_release(v, old, new); 409 + } 410 + 411 + static __always_inline int 412 + raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) 413 + { 414 + return arch_atomic_cmpxchg_relaxed(v, old, new); 415 + } 416 + 417 + static __always_inline bool 418 + raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new) 419 + { 420 + return arch_atomic_try_cmpxchg(v, old, new); 421 + } 422 + 423 + static __always_inline bool 424 + raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) 425 + { 426 + return arch_atomic_try_cmpxchg_acquire(v, old, new); 427 + } 428 + 429 + static __always_inline bool 430 + raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) 431 + { 432 + return arch_atomic_try_cmpxchg_release(v, old, new); 433 + } 434 + 435 + static __always_inline bool 436 + raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) 437 + { 438 + return arch_atomic_try_cmpxchg_relaxed(v, old, new); 439 + } 440 + 441 + static __always_inline bool 442 + raw_atomic_sub_and_test(int i, atomic_t *v) 443 + { 444 + return arch_atomic_sub_and_test(i, v); 445 + } 446 + 447 + static __always_inline bool 448 + raw_atomic_dec_and_test(atomic_t *v) 449 + { 450 + return arch_atomic_dec_and_test(v); 451 + } 452 + 453 + static __always_inline bool 454 + raw_atomic_inc_and_test(atomic_t *v) 455 + { 456 + return arch_atomic_inc_and_test(v); 457 + } 458 + 459 + static __always_inline bool 460 + raw_atomic_add_negative(int i, atomic_t *v) 461 + { 462 + return arch_atomic_add_negative(i, v); 463 + } 464 + 465 + static __always_inline bool 466 + raw_atomic_add_negative_acquire(int i, atomic_t *v) 467 + { 468 + return arch_atomic_add_negative_acquire(i, v); 469 + } 470 + 471 + static __always_inline bool 472 + raw_atomic_add_negative_release(int i, atomic_t *v) 473 + { 474 + return arch_atomic_add_negative_release(i, v); 475 + } 476 + 477 + static __always_inline bool 478 + raw_atomic_add_negative_relaxed(int i, atomic_t *v) 479 + { 480 + return arch_atomic_add_negative_relaxed(i, v); 481 + } 482 + 483 + static __always_inline int 484 + raw_atomic_fetch_add_unless(atomic_t *v, int a, int u) 485 + { 486 + return arch_atomic_fetch_add_unless(v, a, u); 487 + } 488 + 489 + static __always_inline bool 490 + raw_atomic_add_unless(atomic_t *v, int a, int u) 491 + { 492 + return arch_atomic_add_unless(v, a, u); 493 + } 494 + 495 + static __always_inline bool 496 + raw_atomic_inc_not_zero(atomic_t *v) 497 + { 498 + return arch_atomic_inc_not_zero(v); 499 + } 500 + 501 + static __always_inline bool 502 + raw_atomic_inc_unless_negative(atomic_t *v) 503 + { 504 + return arch_atomic_inc_unless_negative(v); 505 + } 506 + 507 + static __always_inline bool 508 + raw_atomic_dec_unless_positive(atomic_t *v) 509 + { 510 + return arch_atomic_dec_unless_positive(v); 511 + } 512 + 513 + static __always_inline int 514 + raw_atomic_dec_if_positive(atomic_t *v) 515 + { 516 + return arch_atomic_dec_if_positive(v); 517 + } 518 + 519 + static __always_inline s64 520 + raw_atomic64_read(const atomic64_t *v) 521 + { 522 + return arch_atomic64_read(v); 523 + } 524 + 525 + static __always_inline s64 526 + raw_atomic64_read_acquire(const atomic64_t *v) 527 + { 528 + return arch_atomic64_read_acquire(v); 529 + } 530 + 531 + static __always_inline void 532 + raw_atomic64_set(atomic64_t *v, s64 i) 533 + { 534 + arch_atomic64_set(v, i); 535 + } 536 + 537 + static __always_inline void 538 + raw_atomic64_set_release(atomic64_t *v, s64 i) 539 + { 540 + arch_atomic64_set_release(v, i); 541 + } 542 + 543 + static __always_inline void 544 + raw_atomic64_add(s64 i, atomic64_t *v) 545 + { 546 + arch_atomic64_add(i, v); 547 + } 548 + 549 + static __always_inline s64 550 + raw_atomic64_add_return(s64 i, atomic64_t *v) 551 + { 552 + return arch_atomic64_add_return(i, v); 553 + } 554 + 555 + static __always_inline s64 556 + raw_atomic64_add_return_acquire(s64 i, atomic64_t *v) 557 + { 558 + return arch_atomic64_add_return_acquire(i, v); 559 + } 560 + 561 + static __always_inline s64 562 + raw_atomic64_add_return_release(s64 i, atomic64_t *v) 563 + { 564 + return arch_atomic64_add_return_release(i, v); 565 + } 566 + 567 + static __always_inline s64 568 + raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v) 569 + { 570 + return arch_atomic64_add_return_relaxed(i, v); 571 + } 572 + 573 + static __always_inline s64 574 + raw_atomic64_fetch_add(s64 i, atomic64_t *v) 575 + { 576 + return arch_atomic64_fetch_add(i, v); 577 + } 578 + 579 + static __always_inline s64 580 + raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) 581 + { 582 + return arch_atomic64_fetch_add_acquire(i, v); 583 + } 584 + 585 + static __always_inline s64 586 + raw_atomic64_fetch_add_release(s64 i, atomic64_t *v) 587 + { 588 + return arch_atomic64_fetch_add_release(i, v); 589 + } 590 + 591 + static __always_inline s64 592 + raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) 593 + { 594 + return arch_atomic64_fetch_add_relaxed(i, v); 595 + } 596 + 597 + static __always_inline void 598 + raw_atomic64_sub(s64 i, atomic64_t *v) 599 + { 600 + arch_atomic64_sub(i, v); 601 + } 602 + 603 + static __always_inline s64 604 + raw_atomic64_sub_return(s64 i, atomic64_t *v) 605 + { 606 + return arch_atomic64_sub_return(i, v); 607 + } 608 + 609 + static __always_inline s64 610 + raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v) 611 + { 612 + return arch_atomic64_sub_return_acquire(i, v); 613 + } 614 + 615 + static __always_inline s64 616 + raw_atomic64_sub_return_release(s64 i, atomic64_t *v) 617 + { 618 + return arch_atomic64_sub_return_release(i, v); 619 + } 620 + 621 + static __always_inline s64 622 + raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v) 623 + { 624 + return arch_atomic64_sub_return_relaxed(i, v); 625 + } 626 + 627 + static __always_inline s64 628 + raw_atomic64_fetch_sub(s64 i, atomic64_t *v) 629 + { 630 + return arch_atomic64_fetch_sub(i, v); 631 + } 632 + 633 + static __always_inline s64 634 + raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) 635 + { 636 + return arch_atomic64_fetch_sub_acquire(i, v); 637 + } 638 + 639 + static __always_inline s64 640 + raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v) 641 + { 642 + return arch_atomic64_fetch_sub_release(i, v); 643 + } 644 + 645 + static __always_inline s64 646 + raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) 647 + { 648 + return arch_atomic64_fetch_sub_relaxed(i, v); 649 + } 650 + 651 + static __always_inline void 652 + raw_atomic64_inc(atomic64_t *v) 653 + { 654 + arch_atomic64_inc(v); 655 + } 656 + 657 + static __always_inline s64 658 + raw_atomic64_inc_return(atomic64_t *v) 659 + { 660 + return arch_atomic64_inc_return(v); 661 + } 662 + 663 + static __always_inline s64 664 + raw_atomic64_inc_return_acquire(atomic64_t *v) 665 + { 666 + return arch_atomic64_inc_return_acquire(v); 667 + } 668 + 669 + static __always_inline s64 670 + raw_atomic64_inc_return_release(atomic64_t *v) 671 + { 672 + return arch_atomic64_inc_return_release(v); 673 + } 674 + 675 + static __always_inline s64 676 + raw_atomic64_inc_return_relaxed(atomic64_t *v) 677 + { 678 + return arch_atomic64_inc_return_relaxed(v); 679 + } 680 + 681 + static __always_inline s64 682 + raw_atomic64_fetch_inc(atomic64_t *v) 683 + { 684 + return arch_atomic64_fetch_inc(v); 685 + } 686 + 687 + static __always_inline s64 688 + raw_atomic64_fetch_inc_acquire(atomic64_t *v) 689 + { 690 + return arch_atomic64_fetch_inc_acquire(v); 691 + } 692 + 693 + static __always_inline s64 694 + raw_atomic64_fetch_inc_release(atomic64_t *v) 695 + { 696 + return arch_atomic64_fetch_inc_release(v); 697 + } 698 + 699 + static __always_inline s64 700 + raw_atomic64_fetch_inc_relaxed(atomic64_t *v) 701 + { 702 + return arch_atomic64_fetch_inc_relaxed(v); 703 + } 704 + 705 + static __always_inline void 706 + raw_atomic64_dec(atomic64_t *v) 707 + { 708 + arch_atomic64_dec(v); 709 + } 710 + 711 + static __always_inline s64 712 + raw_atomic64_dec_return(atomic64_t *v) 713 + { 714 + return arch_atomic64_dec_return(v); 715 + } 716 + 717 + static __always_inline s64 718 + raw_atomic64_dec_return_acquire(atomic64_t *v) 719 + { 720 + return arch_atomic64_dec_return_acquire(v); 721 + } 722 + 723 + static __always_inline s64 724 + raw_atomic64_dec_return_release(atomic64_t *v) 725 + { 726 + return arch_atomic64_dec_return_release(v); 727 + } 728 + 729 + static __always_inline s64 730 + raw_atomic64_dec_return_relaxed(atomic64_t *v) 731 + { 732 + return arch_atomic64_dec_return_relaxed(v); 733 + } 734 + 735 + static __always_inline s64 736 + raw_atomic64_fetch_dec(atomic64_t *v) 737 + { 738 + return arch_atomic64_fetch_dec(v); 739 + } 740 + 741 + static __always_inline s64 742 + raw_atomic64_fetch_dec_acquire(atomic64_t *v) 743 + { 744 + return arch_atomic64_fetch_dec_acquire(v); 745 + } 746 + 747 + static __always_inline s64 748 + raw_atomic64_fetch_dec_release(atomic64_t *v) 749 + { 750 + return arch_atomic64_fetch_dec_release(v); 751 + } 752 + 753 + static __always_inline s64 754 + raw_atomic64_fetch_dec_relaxed(atomic64_t *v) 755 + { 756 + return arch_atomic64_fetch_dec_relaxed(v); 757 + } 758 + 759 + static __always_inline void 760 + raw_atomic64_and(s64 i, atomic64_t *v) 761 + { 762 + arch_atomic64_and(i, v); 763 + } 764 + 765 + static __always_inline s64 766 + raw_atomic64_fetch_and(s64 i, atomic64_t *v) 767 + { 768 + return arch_atomic64_fetch_and(i, v); 769 + } 770 + 771 + static __always_inline s64 772 + raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) 773 + { 774 + return arch_atomic64_fetch_and_acquire(i, v); 775 + } 776 + 777 + static __always_inline s64 778 + raw_atomic64_fetch_and_release(s64 i, atomic64_t *v) 779 + { 780 + return arch_atomic64_fetch_and_release(i, v); 781 + } 782 + 783 + static __always_inline s64 784 + raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) 785 + { 786 + return arch_atomic64_fetch_and_relaxed(i, v); 787 + } 788 + 789 + static __always_inline void 790 + raw_atomic64_andnot(s64 i, atomic64_t *v) 791 + { 792 + arch_atomic64_andnot(i, v); 793 + } 794 + 795 + static __always_inline s64 796 + raw_atomic64_fetch_andnot(s64 i, atomic64_t *v) 797 + { 798 + return arch_atomic64_fetch_andnot(i, v); 799 + } 800 + 801 + static __always_inline s64 802 + raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 803 + { 804 + return arch_atomic64_fetch_andnot_acquire(i, v); 805 + } 806 + 807 + static __always_inline s64 808 + raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) 809 + { 810 + return arch_atomic64_fetch_andnot_release(i, v); 811 + } 812 + 813 + static __always_inline s64 814 + raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) 815 + { 816 + return arch_atomic64_fetch_andnot_relaxed(i, v); 817 + } 818 + 819 + static __always_inline void 820 + raw_atomic64_or(s64 i, atomic64_t *v) 821 + { 822 + arch_atomic64_or(i, v); 823 + } 824 + 825 + static __always_inline s64 826 + raw_atomic64_fetch_or(s64 i, atomic64_t *v) 827 + { 828 + return arch_atomic64_fetch_or(i, v); 829 + } 830 + 831 + static __always_inline s64 832 + raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) 833 + { 834 + return arch_atomic64_fetch_or_acquire(i, v); 835 + } 836 + 837 + static __always_inline s64 838 + raw_atomic64_fetch_or_release(s64 i, atomic64_t *v) 839 + { 840 + return arch_atomic64_fetch_or_release(i, v); 841 + } 842 + 843 + static __always_inline s64 844 + raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) 845 + { 846 + return arch_atomic64_fetch_or_relaxed(i, v); 847 + } 848 + 849 + static __always_inline void 850 + raw_atomic64_xor(s64 i, atomic64_t *v) 851 + { 852 + arch_atomic64_xor(i, v); 853 + } 854 + 855 + static __always_inline s64 856 + raw_atomic64_fetch_xor(s64 i, atomic64_t *v) 857 + { 858 + return arch_atomic64_fetch_xor(i, v); 859 + } 860 + 861 + static __always_inline s64 862 + raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) 863 + { 864 + return arch_atomic64_fetch_xor_acquire(i, v); 865 + } 866 + 867 + static __always_inline s64 868 + raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v) 869 + { 870 + return arch_atomic64_fetch_xor_release(i, v); 871 + } 872 + 873 + static __always_inline s64 874 + raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) 875 + { 876 + return arch_atomic64_fetch_xor_relaxed(i, v); 877 + } 878 + 879 + static __always_inline s64 880 + raw_atomic64_xchg(atomic64_t *v, s64 i) 881 + { 882 + return arch_atomic64_xchg(v, i); 883 + } 884 + 885 + static __always_inline s64 886 + raw_atomic64_xchg_acquire(atomic64_t *v, s64 i) 887 + { 888 + return arch_atomic64_xchg_acquire(v, i); 889 + } 890 + 891 + static __always_inline s64 892 + raw_atomic64_xchg_release(atomic64_t *v, s64 i) 893 + { 894 + return arch_atomic64_xchg_release(v, i); 895 + } 896 + 897 + static __always_inline s64 898 + raw_atomic64_xchg_relaxed(atomic64_t *v, s64 i) 899 + { 900 + return arch_atomic64_xchg_relaxed(v, i); 901 + } 902 + 903 + static __always_inline s64 904 + raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) 905 + { 906 + return arch_atomic64_cmpxchg(v, old, new); 907 + } 908 + 909 + static __always_inline s64 910 + raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) 911 + { 912 + return arch_atomic64_cmpxchg_acquire(v, old, new); 913 + } 914 + 915 + static __always_inline s64 916 + raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) 917 + { 918 + return arch_atomic64_cmpxchg_release(v, old, new); 919 + } 920 + 921 + static __always_inline s64 922 + raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) 923 + { 924 + return arch_atomic64_cmpxchg_relaxed(v, old, new); 925 + } 926 + 927 + static __always_inline bool 928 + raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 929 + { 930 + return arch_atomic64_try_cmpxchg(v, old, new); 931 + } 932 + 933 + static __always_inline bool 934 + raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) 935 + { 936 + return arch_atomic64_try_cmpxchg_acquire(v, old, new); 937 + } 938 + 939 + static __always_inline bool 940 + raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) 941 + { 942 + return arch_atomic64_try_cmpxchg_release(v, old, new); 943 + } 944 + 945 + static __always_inline bool 946 + raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) 947 + { 948 + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); 949 + } 950 + 951 + static __always_inline bool 952 + raw_atomic64_sub_and_test(s64 i, atomic64_t *v) 953 + { 954 + return arch_atomic64_sub_and_test(i, v); 955 + } 956 + 957 + static __always_inline bool 958 + raw_atomic64_dec_and_test(atomic64_t *v) 959 + { 960 + return arch_atomic64_dec_and_test(v); 961 + } 962 + 963 + static __always_inline bool 964 + raw_atomic64_inc_and_test(atomic64_t *v) 965 + { 966 + return arch_atomic64_inc_and_test(v); 967 + } 968 + 969 + static __always_inline bool 970 + raw_atomic64_add_negative(s64 i, atomic64_t *v) 971 + { 972 + return arch_atomic64_add_negative(i, v); 973 + } 974 + 975 + static __always_inline bool 976 + raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v) 977 + { 978 + return arch_atomic64_add_negative_acquire(i, v); 979 + } 980 + 981 + static __always_inline bool 982 + raw_atomic64_add_negative_release(s64 i, atomic64_t *v) 983 + { 984 + return arch_atomic64_add_negative_release(i, v); 985 + } 986 + 987 + static __always_inline bool 988 + raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) 989 + { 990 + return arch_atomic64_add_negative_relaxed(i, v); 991 + } 992 + 993 + static __always_inline s64 994 + raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) 995 + { 996 + return arch_atomic64_fetch_add_unless(v, a, u); 997 + } 998 + 999 + static __always_inline bool 1000 + raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) 1001 + { 1002 + return arch_atomic64_add_unless(v, a, u); 1003 + } 1004 + 1005 + static __always_inline bool 1006 + raw_atomic64_inc_not_zero(atomic64_t *v) 1007 + { 1008 + return arch_atomic64_inc_not_zero(v); 1009 + } 1010 + 1011 + static __always_inline bool 1012 + raw_atomic64_inc_unless_negative(atomic64_t *v) 1013 + { 1014 + return arch_atomic64_inc_unless_negative(v); 1015 + } 1016 + 1017 + static __always_inline bool 1018 + raw_atomic64_dec_unless_positive(atomic64_t *v) 1019 + { 1020 + return arch_atomic64_dec_unless_positive(v); 1021 + } 1022 + 1023 + static __always_inline s64 1024 + raw_atomic64_dec_if_positive(atomic64_t *v) 1025 + { 1026 + return arch_atomic64_dec_if_positive(v); 1027 + } 1028 + 1029 + static __always_inline long 1030 + raw_atomic_long_read(const atomic_long_t *v) 1031 + { 1032 + return arch_atomic_long_read(v); 1033 + } 1034 + 1035 + static __always_inline long 1036 + raw_atomic_long_read_acquire(const atomic_long_t *v) 1037 + { 1038 + return arch_atomic_long_read_acquire(v); 1039 + } 1040 + 1041 + static __always_inline void 1042 + raw_atomic_long_set(atomic_long_t *v, long i) 1043 + { 1044 + arch_atomic_long_set(v, i); 1045 + } 1046 + 1047 + static __always_inline void 1048 + raw_atomic_long_set_release(atomic_long_t *v, long i) 1049 + { 1050 + arch_atomic_long_set_release(v, i); 1051 + } 1052 + 1053 + static __always_inline void 1054 + raw_atomic_long_add(long i, atomic_long_t *v) 1055 + { 1056 + arch_atomic_long_add(i, v); 1057 + } 1058 + 1059 + static __always_inline long 1060 + raw_atomic_long_add_return(long i, atomic_long_t *v) 1061 + { 1062 + return arch_atomic_long_add_return(i, v); 1063 + } 1064 + 1065 + static __always_inline long 1066 + raw_atomic_long_add_return_acquire(long i, atomic_long_t *v) 1067 + { 1068 + return arch_atomic_long_add_return_acquire(i, v); 1069 + } 1070 + 1071 + static __always_inline long 1072 + raw_atomic_long_add_return_release(long i, atomic_long_t *v) 1073 + { 1074 + return arch_atomic_long_add_return_release(i, v); 1075 + } 1076 + 1077 + static __always_inline long 1078 + raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v) 1079 + { 1080 + return arch_atomic_long_add_return_relaxed(i, v); 1081 + } 1082 + 1083 + static __always_inline long 1084 + raw_atomic_long_fetch_add(long i, atomic_long_t *v) 1085 + { 1086 + return arch_atomic_long_fetch_add(i, v); 1087 + } 1088 + 1089 + static __always_inline long 1090 + raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) 1091 + { 1092 + return arch_atomic_long_fetch_add_acquire(i, v); 1093 + } 1094 + 1095 + static __always_inline long 1096 + raw_atomic_long_fetch_add_release(long i, atomic_long_t *v) 1097 + { 1098 + return arch_atomic_long_fetch_add_release(i, v); 1099 + } 1100 + 1101 + static __always_inline long 1102 + raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) 1103 + { 1104 + return arch_atomic_long_fetch_add_relaxed(i, v); 1105 + } 1106 + 1107 + static __always_inline void 1108 + raw_atomic_long_sub(long i, atomic_long_t *v) 1109 + { 1110 + arch_atomic_long_sub(i, v); 1111 + } 1112 + 1113 + static __always_inline long 1114 + raw_atomic_long_sub_return(long i, atomic_long_t *v) 1115 + { 1116 + return arch_atomic_long_sub_return(i, v); 1117 + } 1118 + 1119 + static __always_inline long 1120 + raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v) 1121 + { 1122 + return arch_atomic_long_sub_return_acquire(i, v); 1123 + } 1124 + 1125 + static __always_inline long 1126 + raw_atomic_long_sub_return_release(long i, atomic_long_t *v) 1127 + { 1128 + return arch_atomic_long_sub_return_release(i, v); 1129 + } 1130 + 1131 + static __always_inline long 1132 + raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) 1133 + { 1134 + return arch_atomic_long_sub_return_relaxed(i, v); 1135 + } 1136 + 1137 + static __always_inline long 1138 + raw_atomic_long_fetch_sub(long i, atomic_long_t *v) 1139 + { 1140 + return arch_atomic_long_fetch_sub(i, v); 1141 + } 1142 + 1143 + static __always_inline long 1144 + raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) 1145 + { 1146 + return arch_atomic_long_fetch_sub_acquire(i, v); 1147 + } 1148 + 1149 + static __always_inline long 1150 + raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v) 1151 + { 1152 + return arch_atomic_long_fetch_sub_release(i, v); 1153 + } 1154 + 1155 + static __always_inline long 1156 + raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) 1157 + { 1158 + return arch_atomic_long_fetch_sub_relaxed(i, v); 1159 + } 1160 + 1161 + static __always_inline void 1162 + raw_atomic_long_inc(atomic_long_t *v) 1163 + { 1164 + arch_atomic_long_inc(v); 1165 + } 1166 + 1167 + static __always_inline long 1168 + raw_atomic_long_inc_return(atomic_long_t *v) 1169 + { 1170 + return arch_atomic_long_inc_return(v); 1171 + } 1172 + 1173 + static __always_inline long 1174 + raw_atomic_long_inc_return_acquire(atomic_long_t *v) 1175 + { 1176 + return arch_atomic_long_inc_return_acquire(v); 1177 + } 1178 + 1179 + static __always_inline long 1180 + raw_atomic_long_inc_return_release(atomic_long_t *v) 1181 + { 1182 + return arch_atomic_long_inc_return_release(v); 1183 + } 1184 + 1185 + static __always_inline long 1186 + raw_atomic_long_inc_return_relaxed(atomic_long_t *v) 1187 + { 1188 + return arch_atomic_long_inc_return_relaxed(v); 1189 + } 1190 + 1191 + static __always_inline long 1192 + raw_atomic_long_fetch_inc(atomic_long_t *v) 1193 + { 1194 + return arch_atomic_long_fetch_inc(v); 1195 + } 1196 + 1197 + static __always_inline long 1198 + raw_atomic_long_fetch_inc_acquire(atomic_long_t *v) 1199 + { 1200 + return arch_atomic_long_fetch_inc_acquire(v); 1201 + } 1202 + 1203 + static __always_inline long 1204 + raw_atomic_long_fetch_inc_release(atomic_long_t *v) 1205 + { 1206 + return arch_atomic_long_fetch_inc_release(v); 1207 + } 1208 + 1209 + static __always_inline long 1210 + raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v) 1211 + { 1212 + return arch_atomic_long_fetch_inc_relaxed(v); 1213 + } 1214 + 1215 + static __always_inline void 1216 + raw_atomic_long_dec(atomic_long_t *v) 1217 + { 1218 + arch_atomic_long_dec(v); 1219 + } 1220 + 1221 + static __always_inline long 1222 + raw_atomic_long_dec_return(atomic_long_t *v) 1223 + { 1224 + return arch_atomic_long_dec_return(v); 1225 + } 1226 + 1227 + static __always_inline long 1228 + raw_atomic_long_dec_return_acquire(atomic_long_t *v) 1229 + { 1230 + return arch_atomic_long_dec_return_acquire(v); 1231 + } 1232 + 1233 + static __always_inline long 1234 + raw_atomic_long_dec_return_release(atomic_long_t *v) 1235 + { 1236 + return arch_atomic_long_dec_return_release(v); 1237 + } 1238 + 1239 + static __always_inline long 1240 + raw_atomic_long_dec_return_relaxed(atomic_long_t *v) 1241 + { 1242 + return arch_atomic_long_dec_return_relaxed(v); 1243 + } 1244 + 1245 + static __always_inline long 1246 + raw_atomic_long_fetch_dec(atomic_long_t *v) 1247 + { 1248 + return arch_atomic_long_fetch_dec(v); 1249 + } 1250 + 1251 + static __always_inline long 1252 + raw_atomic_long_fetch_dec_acquire(atomic_long_t *v) 1253 + { 1254 + return arch_atomic_long_fetch_dec_acquire(v); 1255 + } 1256 + 1257 + static __always_inline long 1258 + raw_atomic_long_fetch_dec_release(atomic_long_t *v) 1259 + { 1260 + return arch_atomic_long_fetch_dec_release(v); 1261 + } 1262 + 1263 + static __always_inline long 1264 + raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v) 1265 + { 1266 + return arch_atomic_long_fetch_dec_relaxed(v); 1267 + } 1268 + 1269 + static __always_inline void 1270 + raw_atomic_long_and(long i, atomic_long_t *v) 1271 + { 1272 + arch_atomic_long_and(i, v); 1273 + } 1274 + 1275 + static __always_inline long 1276 + raw_atomic_long_fetch_and(long i, atomic_long_t *v) 1277 + { 1278 + return arch_atomic_long_fetch_and(i, v); 1279 + } 1280 + 1281 + static __always_inline long 1282 + raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) 1283 + { 1284 + return arch_atomic_long_fetch_and_acquire(i, v); 1285 + } 1286 + 1287 + static __always_inline long 1288 + raw_atomic_long_fetch_and_release(long i, atomic_long_t *v) 1289 + { 1290 + return arch_atomic_long_fetch_and_release(i, v); 1291 + } 1292 + 1293 + static __always_inline long 1294 + raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) 1295 + { 1296 + return arch_atomic_long_fetch_and_relaxed(i, v); 1297 + } 1298 + 1299 + static __always_inline void 1300 + raw_atomic_long_andnot(long i, atomic_long_t *v) 1301 + { 1302 + arch_atomic_long_andnot(i, v); 1303 + } 1304 + 1305 + static __always_inline long 1306 + raw_atomic_long_fetch_andnot(long i, atomic_long_t *v) 1307 + { 1308 + return arch_atomic_long_fetch_andnot(i, v); 1309 + } 1310 + 1311 + static __always_inline long 1312 + raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) 1313 + { 1314 + return arch_atomic_long_fetch_andnot_acquire(i, v); 1315 + } 1316 + 1317 + static __always_inline long 1318 + raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) 1319 + { 1320 + return arch_atomic_long_fetch_andnot_release(i, v); 1321 + } 1322 + 1323 + static __always_inline long 1324 + raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) 1325 + { 1326 + return arch_atomic_long_fetch_andnot_relaxed(i, v); 1327 + } 1328 + 1329 + static __always_inline void 1330 + raw_atomic_long_or(long i, atomic_long_t *v) 1331 + { 1332 + arch_atomic_long_or(i, v); 1333 + } 1334 + 1335 + static __always_inline long 1336 + raw_atomic_long_fetch_or(long i, atomic_long_t *v) 1337 + { 1338 + return arch_atomic_long_fetch_or(i, v); 1339 + } 1340 + 1341 + static __always_inline long 1342 + raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) 1343 + { 1344 + return arch_atomic_long_fetch_or_acquire(i, v); 1345 + } 1346 + 1347 + static __always_inline long 1348 + raw_atomic_long_fetch_or_release(long i, atomic_long_t *v) 1349 + { 1350 + return arch_atomic_long_fetch_or_release(i, v); 1351 + } 1352 + 1353 + static __always_inline long 1354 + raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) 1355 + { 1356 + return arch_atomic_long_fetch_or_relaxed(i, v); 1357 + } 1358 + 1359 + static __always_inline void 1360 + raw_atomic_long_xor(long i, atomic_long_t *v) 1361 + { 1362 + arch_atomic_long_xor(i, v); 1363 + } 1364 + 1365 + static __always_inline long 1366 + raw_atomic_long_fetch_xor(long i, atomic_long_t *v) 1367 + { 1368 + return arch_atomic_long_fetch_xor(i, v); 1369 + } 1370 + 1371 + static __always_inline long 1372 + raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) 1373 + { 1374 + return arch_atomic_long_fetch_xor_acquire(i, v); 1375 + } 1376 + 1377 + static __always_inline long 1378 + raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v) 1379 + { 1380 + return arch_atomic_long_fetch_xor_release(i, v); 1381 + } 1382 + 1383 + static __always_inline long 1384 + raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) 1385 + { 1386 + return arch_atomic_long_fetch_xor_relaxed(i, v); 1387 + } 1388 + 1389 + static __always_inline long 1390 + raw_atomic_long_xchg(atomic_long_t *v, long i) 1391 + { 1392 + return arch_atomic_long_xchg(v, i); 1393 + } 1394 + 1395 + static __always_inline long 1396 + raw_atomic_long_xchg_acquire(atomic_long_t *v, long i) 1397 + { 1398 + return arch_atomic_long_xchg_acquire(v, i); 1399 + } 1400 + 1401 + static __always_inline long 1402 + raw_atomic_long_xchg_release(atomic_long_t *v, long i) 1403 + { 1404 + return arch_atomic_long_xchg_release(v, i); 1405 + } 1406 + 1407 + static __always_inline long 1408 + raw_atomic_long_xchg_relaxed(atomic_long_t *v, long i) 1409 + { 1410 + return arch_atomic_long_xchg_relaxed(v, i); 1411 + } 1412 + 1413 + static __always_inline long 1414 + raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) 1415 + { 1416 + return arch_atomic_long_cmpxchg(v, old, new); 1417 + } 1418 + 1419 + static __always_inline long 1420 + raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) 1421 + { 1422 + return arch_atomic_long_cmpxchg_acquire(v, old, new); 1423 + } 1424 + 1425 + static __always_inline long 1426 + raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) 1427 + { 1428 + return arch_atomic_long_cmpxchg_release(v, old, new); 1429 + } 1430 + 1431 + static __always_inline long 1432 + raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) 1433 + { 1434 + return arch_atomic_long_cmpxchg_relaxed(v, old, new); 1435 + } 1436 + 1437 + static __always_inline bool 1438 + raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) 1439 + { 1440 + return arch_atomic_long_try_cmpxchg(v, old, new); 1441 + } 1442 + 1443 + static __always_inline bool 1444 + raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) 1445 + { 1446 + return arch_atomic_long_try_cmpxchg_acquire(v, old, new); 1447 + } 1448 + 1449 + static __always_inline bool 1450 + raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) 1451 + { 1452 + return arch_atomic_long_try_cmpxchg_release(v, old, new); 1453 + } 1454 + 1455 + static __always_inline bool 1456 + raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) 1457 + { 1458 + return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); 1459 + } 1460 + 1461 + static __always_inline bool 1462 + raw_atomic_long_sub_and_test(long i, atomic_long_t *v) 1463 + { 1464 + return arch_atomic_long_sub_and_test(i, v); 1465 + } 1466 + 1467 + static __always_inline bool 1468 + raw_atomic_long_dec_and_test(atomic_long_t *v) 1469 + { 1470 + return arch_atomic_long_dec_and_test(v); 1471 + } 1472 + 1473 + static __always_inline bool 1474 + raw_atomic_long_inc_and_test(atomic_long_t *v) 1475 + { 1476 + return arch_atomic_long_inc_and_test(v); 1477 + } 1478 + 1479 + static __always_inline bool 1480 + raw_atomic_long_add_negative(long i, atomic_long_t *v) 1481 + { 1482 + return arch_atomic_long_add_negative(i, v); 1483 + } 1484 + 1485 + static __always_inline bool 1486 + raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v) 1487 + { 1488 + return arch_atomic_long_add_negative_acquire(i, v); 1489 + } 1490 + 1491 + static __always_inline bool 1492 + raw_atomic_long_add_negative_release(long i, atomic_long_t *v) 1493 + { 1494 + return arch_atomic_long_add_negative_release(i, v); 1495 + } 1496 + 1497 + static __always_inline bool 1498 + raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) 1499 + { 1500 + return arch_atomic_long_add_negative_relaxed(i, v); 1501 + } 1502 + 1503 + static __always_inline long 1504 + raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) 1505 + { 1506 + return arch_atomic_long_fetch_add_unless(v, a, u); 1507 + } 1508 + 1509 + static __always_inline bool 1510 + raw_atomic_long_add_unless(atomic_long_t *v, long a, long u) 1511 + { 1512 + return arch_atomic_long_add_unless(v, a, u); 1513 + } 1514 + 1515 + static __always_inline bool 1516 + raw_atomic_long_inc_not_zero(atomic_long_t *v) 1517 + { 1518 + return arch_atomic_long_inc_not_zero(v); 1519 + } 1520 + 1521 + static __always_inline bool 1522 + raw_atomic_long_inc_unless_negative(atomic_long_t *v) 1523 + { 1524 + return arch_atomic_long_inc_unless_negative(v); 1525 + } 1526 + 1527 + static __always_inline bool 1528 + raw_atomic_long_dec_unless_positive(atomic_long_t *v) 1529 + { 1530 + return arch_atomic_long_dec_unless_positive(v); 1531 + } 1532 + 1533 + static __always_inline long 1534 + raw_atomic_long_dec_if_positive(atomic_long_t *v) 1535 + { 1536 + return arch_atomic_long_dec_if_positive(v); 1537 + } 1538 + 1539 + #define raw_xchg(...) \ 1540 + arch_xchg(__VA_ARGS__) 1541 + 1542 + #define raw_xchg_acquire(...) \ 1543 + arch_xchg_acquire(__VA_ARGS__) 1544 + 1545 + #define raw_xchg_release(...) \ 1546 + arch_xchg_release(__VA_ARGS__) 1547 + 1548 + #define raw_xchg_relaxed(...) \ 1549 + arch_xchg_relaxed(__VA_ARGS__) 1550 + 1551 + #define raw_cmpxchg(...) \ 1552 + arch_cmpxchg(__VA_ARGS__) 1553 + 1554 + #define raw_cmpxchg_acquire(...) \ 1555 + arch_cmpxchg_acquire(__VA_ARGS__) 1556 + 1557 + #define raw_cmpxchg_release(...) \ 1558 + arch_cmpxchg_release(__VA_ARGS__) 1559 + 1560 + #define raw_cmpxchg_relaxed(...) \ 1561 + arch_cmpxchg_relaxed(__VA_ARGS__) 1562 + 1563 + #define raw_cmpxchg64(...) \ 1564 + arch_cmpxchg64(__VA_ARGS__) 1565 + 1566 + #define raw_cmpxchg64_acquire(...) \ 1567 + arch_cmpxchg64_acquire(__VA_ARGS__) 1568 + 1569 + #define raw_cmpxchg64_release(...) \ 1570 + arch_cmpxchg64_release(__VA_ARGS__) 1571 + 1572 + #define raw_cmpxchg64_relaxed(...) \ 1573 + arch_cmpxchg64_relaxed(__VA_ARGS__) 1574 + 1575 + #define raw_cmpxchg128(...) \ 1576 + arch_cmpxchg128(__VA_ARGS__) 1577 + 1578 + #define raw_cmpxchg128_acquire(...) \ 1579 + arch_cmpxchg128_acquire(__VA_ARGS__) 1580 + 1581 + #define raw_cmpxchg128_release(...) \ 1582 + arch_cmpxchg128_release(__VA_ARGS__) 1583 + 1584 + #define raw_cmpxchg128_relaxed(...) \ 1585 + arch_cmpxchg128_relaxed(__VA_ARGS__) 1586 + 1587 + #define raw_try_cmpxchg(...) \ 1588 + arch_try_cmpxchg(__VA_ARGS__) 1589 + 1590 + #define raw_try_cmpxchg_acquire(...) \ 1591 + arch_try_cmpxchg_acquire(__VA_ARGS__) 1592 + 1593 + #define raw_try_cmpxchg_release(...) \ 1594 + arch_try_cmpxchg_release(__VA_ARGS__) 1595 + 1596 + #define raw_try_cmpxchg_relaxed(...) \ 1597 + arch_try_cmpxchg_relaxed(__VA_ARGS__) 1598 + 1599 + #define raw_try_cmpxchg64(...) \ 1600 + arch_try_cmpxchg64(__VA_ARGS__) 1601 + 1602 + #define raw_try_cmpxchg64_acquire(...) \ 1603 + arch_try_cmpxchg64_acquire(__VA_ARGS__) 1604 + 1605 + #define raw_try_cmpxchg64_release(...) \ 1606 + arch_try_cmpxchg64_release(__VA_ARGS__) 1607 + 1608 + #define raw_try_cmpxchg64_relaxed(...) \ 1609 + arch_try_cmpxchg64_relaxed(__VA_ARGS__) 1610 + 1611 + #define raw_try_cmpxchg128(...) \ 1612 + arch_try_cmpxchg128(__VA_ARGS__) 1613 + 1614 + #define raw_try_cmpxchg128_acquire(...) \ 1615 + arch_try_cmpxchg128_acquire(__VA_ARGS__) 1616 + 1617 + #define raw_try_cmpxchg128_release(...) \ 1618 + arch_try_cmpxchg128_release(__VA_ARGS__) 1619 + 1620 + #define raw_try_cmpxchg128_relaxed(...) \ 1621 + arch_try_cmpxchg128_relaxed(__VA_ARGS__) 1622 + 1623 + #define raw_cmpxchg_local(...) \ 1624 + arch_cmpxchg_local(__VA_ARGS__) 1625 + 1626 + #define raw_cmpxchg64_local(...) \ 1627 + arch_cmpxchg64_local(__VA_ARGS__) 1628 + 1629 + #define raw_cmpxchg128_local(...) \ 1630 + arch_cmpxchg128_local(__VA_ARGS__) 1631 + 1632 + #define raw_sync_cmpxchg(...) \ 1633 + arch_sync_cmpxchg(__VA_ARGS__) 1634 + 1635 + #define raw_try_cmpxchg_local(...) \ 1636 + arch_try_cmpxchg_local(__VA_ARGS__) 1637 + 1638 + #define raw_try_cmpxchg64_local(...) \ 1639 + arch_try_cmpxchg64_local(__VA_ARGS__) 1640 + 1641 + #define raw_try_cmpxchg128_local(...) \ 1642 + arch_try_cmpxchg128_local(__VA_ARGS__) 1643 + 1644 + #endif /* _LINUX_ATOMIC_RAW_H */ 1645 + // 01d54200571b3857755a07c10074a4fd58cef6b1
+7 -12
scripts/atomic/gen-atomic-instrumented.sh
··· 73 73 ${atomicname}(${params}) 74 74 { 75 75 ${checks} 76 - ${retstmt}arch_${atomicname}(${args}); 76 + ${retstmt}raw_${atomicname}(${args}); 77 77 } 78 78 EOF 79 79 ··· 105 105 cat <<EOF 106 106 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\ 107 107 instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \\ 108 - arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\ 108 + raw_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\ 109 109 }) 110 110 EOF 111 111 ··· 119 119 [ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n" 120 120 cat <<EOF 121 121 instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\ 122 - arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\ 122 + raw_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\ 123 123 }) 124 124 EOF 125 125 ··· 133 133 // DO NOT MODIFY THIS FILE DIRECTLY 134 134 135 135 /* 136 - * This file provides wrappers with KASAN instrumentation for atomic operations. 137 - * To use this functionality an arch's atomic.h file needs to define all 138 - * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include 139 - * this file at the end. This file provides atomic_read() that forwards to 140 - * arch_atomic_read() for actual atomic operation. 141 - * Note: if an arch atomic operation is implemented by means of other atomic 142 - * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use 143 - * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid 144 - * double instrumentation. 136 + * This file provoides atomic operations with explicit instrumentation (e.g. 137 + * KASAN, KCSAN), which should be used unless it is necessary to avoid 138 + * instrumentation. Where it is necessary to aovid instrumenation, the 139 + * raw_atomic*() operations should be used. 145 140 */ 146 141 #ifndef _LINUX_ATOMIC_INSTRUMENTED_H 147 142 #define _LINUX_ATOMIC_INSTRUMENTED_H
+84
scripts/atomic/gen-atomic-raw.sh
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + ATOMICDIR=$(dirname $0) 5 + 6 + . ${ATOMICDIR}/atomic-tbl.sh 7 + 8 + #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) 9 + gen_proto_order_variant() 10 + { 11 + local meta="$1"; shift 12 + local pfx="$1"; shift 13 + local name="$1"; shift 14 + local sfx="$1"; shift 15 + local order="$1"; shift 16 + local atomic="$1"; shift 17 + local int="$1"; shift 18 + 19 + local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 20 + 21 + local ret="$(gen_ret_type "${meta}" "${int}")" 22 + local params="$(gen_params "${int}" "${atomic}" "$@")" 23 + local args="$(gen_args "$@")" 24 + local retstmt="$(gen_ret_stmt "${meta}")" 25 + 26 + cat <<EOF 27 + static __always_inline ${ret} 28 + raw_${atomicname}(${params}) 29 + { 30 + ${retstmt}arch_${atomicname}(${args}); 31 + } 32 + 33 + EOF 34 + } 35 + 36 + gen_xchg() 37 + { 38 + local xchg="$1"; shift 39 + local order="$1"; shift 40 + 41 + cat <<EOF 42 + #define raw_${xchg}${order}(...) \\ 43 + arch_${xchg}${order}(__VA_ARGS__) 44 + EOF 45 + } 46 + 47 + cat << EOF 48 + // SPDX-License-Identifier: GPL-2.0 49 + 50 + // Generated by $0 51 + // DO NOT MODIFY THIS FILE DIRECTLY 52 + 53 + #ifndef _LINUX_ATOMIC_RAW_H 54 + #define _LINUX_ATOMIC_RAW_H 55 + 56 + EOF 57 + 58 + grep '^[a-z]' "$1" | while read name meta args; do 59 + gen_proto "${meta}" "${name}" "atomic" "int" ${args} 60 + done 61 + 62 + grep '^[a-z]' "$1" | while read name meta args; do 63 + gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} 64 + done 65 + 66 + grep '^[a-z]' "$1" | while read name meta args; do 67 + gen_proto "${meta}" "${name}" "atomic_long" "long" ${args} 68 + done 69 + 70 + for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do 71 + for order in "" "_acquire" "_release" "_relaxed"; do 72 + gen_xchg "${xchg}" "${order}" 73 + printf "\n" 74 + done 75 + done 76 + 77 + for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do 78 + gen_xchg "${xchg}" "" 79 + printf "\n" 80 + done 81 + 82 + cat <<EOF 83 + #endif /* _LINUX_ATOMIC_RAW_H */ 84 + EOF
+1
scripts/atomic/gen-atomics.sh
··· 11 11 gen-atomic-instrumented.sh linux/atomic/atomic-instrumented.h 12 12 gen-atomic-long.sh linux/atomic/atomic-long.h 13 13 gen-atomic-fallback.sh linux/atomic/atomic-arch-fallback.h 14 + gen-atomic-raw.sh linux/atomic/atomic-raw.h 14 15 EOF 15 16 while read script header args; do 16 17 /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}