Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()

While __atomic_add_unless() was originally intended as a building-block
for atomic_add_unless(), it's now used in a number of places around the
kernel. It's the only common atomic operation named __atomic*(), rather
than atomic_*(), and for consistency it would be better named
atomic_fetch_add_unless().

This lack of consistency is slightly confusing, and gets in the way of
scripting atomics. Given that, let's clean things up and promote it to
an official part of the atomics API, in the form of
atomic_fetch_add_unless().

This patch converts definitions and invocations over to the new name,
including the instrumented version, using the following script:

----
git grep -w __atomic_add_unless | while read line; do
sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}";
done
git grep -w __arch_atomic_add_unless | while read line; do
sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}";
done
----

Note that we do not have atomic{64,_long}_fetch_add_unless(), which will
be introduced by later patches.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Palmer Dabbelt <palmer@sifive.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Mark Rutland and committed by
Ingo Molnar
bfc18e38 356c6fe7

+50 -50
+2 -2
arch/alpha/include/asm/atomic.h
··· 206 206 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 207 207 208 208 /** 209 - * __atomic_add_unless - add unless the number is a given value 209 + * atomic_fetch_add_unless - add unless the number is a given value 210 210 * @v: pointer of type atomic_t 211 211 * @a: the amount to add to v... 212 212 * @u: ...unless v is equal to u. ··· 214 214 * Atomically adds @a to @v, so long as it was not @u. 215 215 * Returns the old value of @v. 216 216 */ 217 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 217 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 218 218 { 219 219 int c, new, old; 220 220 smp_mb();
+2 -2
arch/arc/include/asm/atomic.h
··· 309 309 #undef ATOMIC_OP 310 310 311 311 /** 312 - * __atomic_add_unless - add unless the number is a given value 312 + * atomic_fetch_add_unless - add unless the number is a given value 313 313 * @v: pointer of type atomic_t 314 314 * @a: the amount to add to v... 315 315 * @u: ...unless v is equal to u. ··· 317 317 * Atomically adds @a to @v, so long as it was not @u. 318 318 * Returns the old value of @v 319 319 */ 320 - #define __atomic_add_unless(v, a, u) \ 320 + #define atomic_fetch_add_unless(v, a, u) \ 321 321 ({ \ 322 322 int c, old; \ 323 323 \
+2 -2
arch/arm/include/asm/atomic.h
··· 130 130 } 131 131 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed 132 132 133 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 133 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 134 134 { 135 135 int oldval, newval; 136 136 unsigned long tmp; ··· 215 215 return ret; 216 216 } 217 217 218 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 218 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 219 219 { 220 220 int c, old; 221 221
+1 -1
arch/arm64/include/asm/atomic.h
··· 125 125 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 126 126 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 127 127 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 128 - #define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) 128 + #define atomic_fetch_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) 129 129 #define atomic_andnot atomic_andnot 130 130 131 131 /*
+1 -1
arch/h8300/include/asm/atomic.h
··· 94 94 return ret; 95 95 } 96 96 97 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 97 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 98 98 { 99 99 int ret; 100 100 h8300flags flags;
+2 -2
arch/hexagon/include/asm/atomic.h
··· 164 164 #undef ATOMIC_OP 165 165 166 166 /** 167 - * __atomic_add_unless - add unless the number is a given value 167 + * atomic_fetch_add_unless - add unless the number is a given value 168 168 * @v: pointer to value 169 169 * @a: amount to add 170 170 * @u: unless value is equal to u ··· 173 173 * 174 174 */ 175 175 176 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 176 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 177 177 { 178 178 int __oldval; 179 179 register int tmp;
+1 -1
arch/ia64/include/asm/atomic.h
··· 215 215 (cmpxchg(&((v)->counter), old, new)) 216 216 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 217 217 218 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 218 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 219 219 { 220 220 int c, old; 221 221 c = atomic_read(v);
+1 -1
arch/m68k/include/asm/atomic.h
··· 211 211 return c != 0; 212 212 } 213 213 214 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 214 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 215 215 { 216 216 int c, old; 217 217 c = atomic_read(v);
+2 -2
arch/mips/include/asm/atomic.h
··· 275 275 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) 276 276 277 277 /** 278 - * __atomic_add_unless - add unless the number is a given value 278 + * atomic_fetch_add_unless - add unless the number is a given value 279 279 * @v: pointer of type atomic_t 280 280 * @a: the amount to add to v... 281 281 * @u: ...unless v is equal to u. ··· 283 283 * Atomically adds @a to @v, so long as it was not @u. 284 284 * Returns the old value of @v. 285 285 */ 286 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 286 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 287 287 { 288 288 int c, old; 289 289 c = atomic_read(v);
+2 -2
arch/openrisc/include/asm/atomic.h
··· 100 100 * 101 101 * This is often used through atomic_inc_not_zero() 102 102 */ 103 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 103 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 104 104 { 105 105 int old, tmp; 106 106 ··· 119 119 120 120 return old; 121 121 } 122 - #define __atomic_add_unless __atomic_add_unless 122 + #define atomic_fetch_add_unless atomic_fetch_add_unless 123 123 124 124 #include <asm-generic/atomic.h> 125 125
+2 -2
arch/parisc/include/asm/atomic.h
··· 78 78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 79 79 80 80 /** 81 - * __atomic_add_unless - add unless the number is a given value 81 + * atomic_fetch_add_unless - add unless the number is a given value 82 82 * @v: pointer of type atomic_t 83 83 * @a: the amount to add to v... 84 84 * @u: ...unless v is equal to u. ··· 86 86 * Atomically adds @a to @v, so long as it was not @u. 87 87 * Returns the old value of @v. 88 88 */ 89 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 89 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 90 90 { 91 91 int c, old; 92 92 c = atomic_read(v);
+4 -4
arch/powerpc/include/asm/atomic.h
··· 218 218 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) 219 219 220 220 /** 221 - * __atomic_add_unless - add unless the number is a given value 221 + * atomic_fetch_add_unless - add unless the number is a given value 222 222 * @v: pointer of type atomic_t 223 223 * @a: the amount to add to v... 224 224 * @u: ...unless v is equal to u. ··· 226 226 * Atomically adds @a to @v, so long as it was not @u. 227 227 * Returns the old value of @v. 228 228 */ 229 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 229 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 230 230 { 231 231 int t; 232 232 233 233 __asm__ __volatile__ ( 234 234 PPC_ATOMIC_ENTRY_BARRIER 235 - "1: lwarx %0,0,%1 # __atomic_add_unless\n\ 235 + "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ 236 236 cmpw 0,%0,%3 \n\ 237 237 beq 2f \n\ 238 238 add %0,%2,%0 \n" ··· 538 538 539 539 __asm__ __volatile__ ( 540 540 PPC_ATOMIC_ENTRY_BARRIER 541 - "1: ldarx %0,0,%1 # __atomic_add_unless\n\ 541 + "1: ldarx %0,0,%1 # atomic_fetch_add_unless\n\ 542 542 cmpd 0,%0,%3 \n\ 543 543 beq 2f \n\ 544 544 add %0,%2,%0 \n"
+2 -2
arch/riscv/include/asm/atomic.h
··· 332 332 #undef ATOMIC_OP 333 333 334 334 /* This is required to provide a full barrier on success. */ 335 - static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 335 + static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 336 336 { 337 337 int prev, rc; 338 338 ··· 381 381 */ 382 382 static __always_inline int atomic_inc_not_zero(atomic_t *v) 383 383 { 384 - return __atomic_add_unless(v, 1, 0); 384 + return atomic_fetch_add_unless(v, 1, 0); 385 385 } 386 386 387 387 #ifndef CONFIG_GENERIC_ATOMIC64
+1 -1
arch/s390/include/asm/atomic.h
··· 90 90 return __atomic_cmpxchg(&v->counter, old, new); 91 91 } 92 92 93 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 93 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 94 94 { 95 95 int c, old; 96 96 c = atomic_read(v);
+2 -2
arch/sh/include/asm/atomic.h
··· 46 46 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 47 47 48 48 /** 49 - * __atomic_add_unless - add unless the number is a given value 49 + * atomic_fetch_add_unless - add unless the number is a given value 50 50 * @v: pointer of type atomic_t 51 51 * @a: the amount to add to v... 52 52 * @u: ...unless v is equal to u. ··· 54 54 * Atomically adds @a to @v, so long as it was not @u. 55 55 * Returns the old value of @v. 56 56 */ 57 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 57 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 58 58 { 59 59 int c, old; 60 60 c = atomic_read(v);
+1 -1
arch/sparc/include/asm/atomic_32.h
··· 27 27 int atomic_fetch_xor(int, atomic_t *); 28 28 int atomic_cmpxchg(atomic_t *, int, int); 29 29 int atomic_xchg(atomic_t *, int); 30 - int __atomic_add_unless(atomic_t *, int, int); 30 + int atomic_fetch_add_unless(atomic_t *, int, int); 31 31 void atomic_set(atomic_t *, int); 32 32 33 33 #define atomic_set_release(v, i) atomic_set((v), (i))
+1 -1
arch/sparc/include/asm/atomic_64.h
··· 89 89 return xchg(&v->counter, new); 90 90 } 91 91 92 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 92 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 93 93 { 94 94 int c, old; 95 95 c = atomic_read(v);
+2 -2
arch/sparc/lib/atomic32.c
··· 95 95 } 96 96 EXPORT_SYMBOL(atomic_cmpxchg); 97 97 98 - int __atomic_add_unless(atomic_t *v, int a, int u) 98 + int atomic_fetch_add_unless(atomic_t *v, int a, int u) 99 99 { 100 100 int ret; 101 101 unsigned long flags; ··· 107 107 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 108 108 return ret; 109 109 } 110 - EXPORT_SYMBOL(__atomic_add_unless); 110 + EXPORT_SYMBOL(atomic_fetch_add_unless); 111 111 112 112 /* Atomic operations are already serializing */ 113 113 void atomic_set(atomic_t *v, int i)
+2 -2
arch/x86/include/asm/atomic.h
··· 254 254 } 255 255 256 256 /** 257 - * __arch_atomic_add_unless - add unless the number is already a given value 257 + * arch_atomic_fetch_add_unless - add unless the number is already a given value 258 258 * @v: pointer of type atomic_t 259 259 * @a: the amount to add to v... 260 260 * @u: ...unless v is equal to u. ··· 262 262 * Atomically adds @a to @v, so long as @v was not already @u. 263 263 * Returns the old value of @v. 264 264 */ 265 - static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u) 265 + static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) 266 266 { 267 267 int c = arch_atomic_read(v); 268 268
+2 -2
arch/xtensa/include/asm/atomic.h
··· 275 275 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 276 276 277 277 /** 278 - * __atomic_add_unless - add unless the number is a given value 278 + * atomic_fetch_add_unless - add unless the number is a given value 279 279 * @v: pointer of type atomic_t 280 280 * @a: the amount to add to v... 281 281 * @u: ...unless v is equal to u. ··· 283 283 * Atomically adds @a to @v, so long as it was not @u. 284 284 * Returns the old value of @v. 285 285 */ 286 - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 286 + static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) 287 287 { 288 288 int c, old; 289 289 c = atomic_read(v);
+1 -1
drivers/block/rbd.c
··· 61 61 { 62 62 unsigned int counter; 63 63 64 - counter = (unsigned int)__atomic_add_unless(v, 1, 0); 64 + counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0); 65 65 if (counter <= (unsigned int)INT_MAX) 66 66 return (int)counter; 67 67
+1 -1
drivers/infiniband/core/rdma_core.c
··· 121 121 * this lock. 122 122 */ 123 123 if (!exclusive) 124 - return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ? 124 + return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? 125 125 -EBUSY : 0; 126 126 127 127 /* lock is either WRITE or DESTROY - should be exclusive */
+1 -1
fs/afs/rxrpc.c
··· 648 648 trace_afs_notify_call(rxcall, call); 649 649 call->need_attention = true; 650 650 651 - u = __atomic_add_unless(&call->usage, 1, 0); 651 + u = atomic_fetch_add_unless(&call->usage, 1, 0); 652 652 if (u != 0) { 653 653 trace_afs_call(call, afs_call_trace_wake, u, 654 654 atomic_read(&call->net->nr_outstanding_calls),
+2 -2
include/asm-generic/atomic-instrumented.h
··· 84 84 } 85 85 #endif 86 86 87 - static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 87 + static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 88 88 { 89 89 kasan_check_write(v, sizeof(*v)); 90 - return __arch_atomic_add_unless(v, a, u); 90 + return arch_atomic_fetch_add_unless(v, a, u); 91 91 } 92 92 93 93
+2 -2
include/asm-generic/atomic.h
··· 221 221 #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 222 222 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 223 223 224 - #ifndef __atomic_add_unless 225 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 224 + #ifndef atomic_fetch_add_unless 225 + static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 226 226 { 227 227 int c, old; 228 228 c = atomic_read(v);
+1 -1
include/linux/atomic.h
··· 530 530 */ 531 531 static inline int atomic_add_unless(atomic_t *v, int a, int u) 532 532 { 533 - return __atomic_add_unless(v, a, u) != u; 533 + return atomic_fetch_add_unless(v, a, u) != u; 534 534 } 535 535 536 536 /**
+2 -2
kernel/bpf/syscall.c
··· 575 575 { 576 576 int refold; 577 577 578 - refold = __atomic_add_unless(&map->refcnt, 1, 0); 578 + refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); 579 579 580 580 if (refold >= BPF_MAX_REFCNT) { 581 581 __bpf_map_put(map, false); ··· 1142 1142 { 1143 1143 int refold; 1144 1144 1145 - refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); 1145 + refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1146 1146 1147 1147 if (refold >= BPF_MAX_REFCNT) { 1148 1148 __bpf_prog_put(prog, false);
+1 -1
net/rxrpc/call_object.c
··· 415 415 bool rxrpc_queue_call(struct rxrpc_call *call) 416 416 { 417 417 const void *here = __builtin_return_address(0); 418 - int n = __atomic_add_unless(&call->usage, 1, 0); 418 + int n = atomic_fetch_add_unless(&call->usage, 1, 0); 419 419 if (n == 0) 420 420 return false; 421 421 if (rxrpc_queue_work(&call->processor))
+2 -2
net/rxrpc/conn_object.c
··· 266 266 bool rxrpc_queue_conn(struct rxrpc_connection *conn) 267 267 { 268 268 const void *here = __builtin_return_address(0); 269 - int n = __atomic_add_unless(&conn->usage, 1, 0); 269 + int n = atomic_fetch_add_unless(&conn->usage, 1, 0); 270 270 if (n == 0) 271 271 return false; 272 272 if (rxrpc_queue_work(&conn->processor)) ··· 309 309 const void *here = __builtin_return_address(0); 310 310 311 311 if (conn) { 312 - int n = __atomic_add_unless(&conn->usage, 1, 0); 312 + int n = atomic_fetch_add_unless(&conn->usage, 1, 0); 313 313 if (n > 0) 314 314 trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here); 315 315 else
+1 -1
net/rxrpc/local_object.c
··· 305 305 const void *here = __builtin_return_address(0); 306 306 307 307 if (local) { 308 - int n = __atomic_add_unless(&local->usage, 1, 0); 308 + int n = atomic_fetch_add_unless(&local->usage, 1, 0); 309 309 if (n > 0) 310 310 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 311 311 else
+1 -1
net/rxrpc/peer_object.c
··· 406 406 const void *here = __builtin_return_address(0); 407 407 408 408 if (peer) { 409 - int n = __atomic_add_unless(&peer->usage, 1, 0); 409 + int n = atomic_fetch_add_unless(&peer->usage, 1, 0); 410 410 if (n > 0) 411 411 trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here); 412 412 else