Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rxrpc: Use refcount_t rather than atomic_t

Move to using refcount_t rather than atomic_t for refcounts in rxrpc.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

David Howells and committed by
David S. Miller
a0575429 33912c26

+119 -122
+1 -1
include/trace/events/rxrpc.h
··· 583 583 TP_fast_assign( 584 584 __entry->conn = conn ? conn->debug_id : 0; 585 585 __entry->channel = channel; 586 - __entry->usage = conn ? atomic_read(&conn->usage) : -2; 586 + __entry->usage = conn ? refcount_read(&conn->ref) : -2; 587 587 __entry->op = op; 588 588 __entry->cid = conn ? conn->proto.cid : 0; 589 589 ),
+1 -1
net/rxrpc/af_rxrpc.c
··· 351 351 */ 352 352 void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) 353 353 { 354 - _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 354 + _enter("%d{%d}", call->debug_id, refcount_read(&call->ref)); 355 355 356 356 mutex_lock(&call->user_mutex); 357 357 rxrpc_release_call(rxrpc_sk(sock->sk), call);
+5 -13
net/rxrpc/ar-internal.h
··· 15 15 #include <keys/rxrpc-type.h> 16 16 #include "protocol.h" 17 17 18 - #if 0 19 - #define CHECK_SLAB_OKAY(X) \ 20 - BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \ 21 - (POISON_FREE << 8 | POISON_FREE)) 22 - #else 23 - #define CHECK_SLAB_OKAY(X) do {} while (0) 24 - #endif 25 - 26 18 #define FCRYPT_BSIZE 8 27 19 struct rxrpc_crypt { 28 20 union { ··· 271 279 struct rxrpc_local { 272 280 struct rcu_head rcu; 273 281 atomic_t active_users; /* Number of users of the local endpoint */ 274 - atomic_t usage; /* Number of references to the structure */ 282 + refcount_t ref; /* Number of references to the structure */ 275 283 struct rxrpc_net *rxnet; /* The network ns in which this resides */ 276 284 struct hlist_node link; 277 285 struct socket *socket; /* my UDP socket */ ··· 296 304 */ 297 305 struct rxrpc_peer { 298 306 struct rcu_head rcu; /* This must be first */ 299 - atomic_t usage; 307 + refcount_t ref; 300 308 unsigned long hash_key; 301 309 struct hlist_node hash_link; 302 310 struct rxrpc_local *local; ··· 398 406 */ 399 407 struct rxrpc_bundle { 400 408 struct rxrpc_conn_parameters params; 401 - atomic_t usage; 409 + refcount_t ref; 402 410 unsigned int debug_id; 403 411 bool try_upgrade; /* True if the bundle is attempting upgrade */ 404 412 bool alloc_conn; /* True if someone's getting a conn */ ··· 419 427 struct rxrpc_conn_proto proto; 420 428 struct rxrpc_conn_parameters params; 421 429 422 - atomic_t usage; 430 + refcount_t ref; 423 431 struct rcu_head rcu; 424 432 struct list_head cache_link; 425 433 ··· 601 609 int error; /* Local error incurred */ 602 610 enum rxrpc_call_state state; /* current state of call */ 603 611 enum rxrpc_call_completion completion; /* Call completion condition */ 604 - atomic_t usage; 612 + refcount_t ref; 605 613 u16 service_id; /* service ID */ 606 614 u8 security_ix; /* Security type */ 607 615 enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
+2 -2
net/rxrpc/call_accept.c
··· 91 91 (head + 1) & (size - 1)); 92 92 93 93 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, 94 - atomic_read(&conn->usage), here); 94 + refcount_read(&conn->ref), here); 95 95 } 96 96 97 97 /* Now it gets complicated, because calls get registered with the ··· 104 104 call->state = RXRPC_CALL_SERVER_PREALLOC; 105 105 106 106 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, 107 - atomic_read(&call->usage), 107 + refcount_read(&call->ref), 108 108 here, (const void *)user_call_ID); 109 109 110 110 write_lock(&rx->call_lock);
+23 -21
net/rxrpc/call_object.c
··· 112 112 found_extant_call: 113 113 rxrpc_get_call(call, rxrpc_call_got); 114 114 read_unlock(&rx->call_lock); 115 - _leave(" = %p [%d]", call, atomic_read(&call->usage)); 115 + _leave(" = %p [%d]", call, refcount_read(&call->ref)); 116 116 return call; 117 117 } 118 118 ··· 160 160 spin_lock_init(&call->notify_lock); 161 161 spin_lock_init(&call->input_lock); 162 162 rwlock_init(&call->state_lock); 163 - atomic_set(&call->usage, 1); 163 + refcount_set(&call->ref, 1); 164 164 call->debug_id = debug_id; 165 165 call->tx_total_len = -1; 166 166 call->next_rx_timo = 20 * HZ; ··· 299 299 call->interruptibility = p->interruptibility; 300 300 call->tx_total_len = p->tx_total_len; 301 301 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, 302 - atomic_read(&call->usage), 302 + refcount_read(&call->ref), 303 303 here, (const void *)p->user_call_ID); 304 304 if (p->kernel) 305 305 __set_bit(RXRPC_CALL_KERNEL, &call->flags); ··· 352 352 goto error_attached_to_socket; 353 353 354 354 trace_rxrpc_call(call->debug_id, rxrpc_call_connected, 355 - atomic_read(&call->usage), here, NULL); 355 + refcount_read(&call->ref), here, NULL); 356 356 357 357 rxrpc_start_call_timer(call); 358 358 ··· 372 372 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 373 373 RX_CALL_DEAD, -EEXIST); 374 374 trace_rxrpc_call(call->debug_id, rxrpc_call_error, 375 - atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); 375 + refcount_read(&call->ref), here, ERR_PTR(-EEXIST)); 376 376 rxrpc_release_call(rx, call); 377 377 mutex_unlock(&call->user_mutex); 378 378 rxrpc_put_call(call, rxrpc_call_put); ··· 386 386 */ 387 387 error_attached_to_socket: 388 388 trace_rxrpc_call(call->debug_id, rxrpc_call_error, 389 - atomic_read(&call->usage), here, ERR_PTR(ret)); 389 + refcount_read(&call->ref), here, ERR_PTR(ret)); 390 390 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 391 391 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 392 392 RX_CALL_DEAD, ret); ··· 442 442 bool rxrpc_queue_call(struct rxrpc_call *call) 443 443 { 444 444 const void *here = __builtin_return_address(0); 445 - int n = atomic_fetch_add_unless(&call->usage, 1, 0); 446 - if (n == 0) 445 + int n; 446 + 447 + if (!__refcount_inc_not_zero(&call->ref, &n)) 447 448 return false; 448 449 if (rxrpc_queue_work(&call->processor)) 449 450 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1, ··· 460 459 bool __rxrpc_queue_call(struct rxrpc_call *call) 461 460 { 462 461 const void *here = __builtin_return_address(0); 463 - int n = atomic_read(&call->usage); 462 + int n = refcount_read(&call->ref); 464 463 ASSERTCMP(n, >=, 1); 465 464 if (rxrpc_queue_work(&call->processor)) 466 465 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n, ··· 477 476 { 478 477 const void *here = __builtin_return_address(0); 479 478 if (call) { 480 - int n = atomic_read(&call->usage); 479 + int n = refcount_read(&call->ref); 481 480 482 481 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n, 483 482 here, NULL); ··· 487 486 bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 488 487 { 489 488 const void *here = __builtin_return_address(0); 490 - int n = atomic_fetch_add_unless(&call->usage, 1, 0); 489 + int n; 491 490 492 - if (n == 0) 491 + if (!__refcount_inc_not_zero(&call->ref, &n)) 493 492 return false; 494 - trace_rxrpc_call(call->debug_id, op, n, here, NULL); 493 + trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL); 495 494 return true; 496 495 } 497 496 ··· 501 500 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 502 501 { 503 502 const void *here = __builtin_return_address(0); 504 - int n = atomic_inc_return(&call->usage); 503 + int n; 505 504 506 - trace_rxrpc_call(call->debug_id, op, n, here, NULL); 505 + __refcount_inc(&call->ref, &n); 506 + trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL); 507 507 } 508 508 509 509 /* ··· 529 527 struct rxrpc_connection *conn = call->conn; 530 528 bool put = false; 531 529 532 - _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 530 + _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref)); 533 531 534 532 trace_rxrpc_call(call->debug_id, rxrpc_call_release, 535 - atomic_read(&call->usage), 533 + refcount_read(&call->ref), 536 534 here, (const void *)call->flags); 537 535 538 536 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ··· 621 619 struct rxrpc_net *rxnet = call->rxnet; 622 620 const void *here = __builtin_return_address(0); 623 621 unsigned int debug_id = call->debug_id; 622 + bool dead; 624 623 int n; 625 624 626 625 ASSERT(call != NULL); 627 626 628 - n = atomic_dec_return(&call->usage); 627 + dead = __refcount_dec_and_test(&call->ref, &n); 629 628 trace_rxrpc_call(debug_id, op, n, here, NULL); 630 - ASSERTCMP(n, >=, 0); 631 - if (n == 0) { 629 + if (dead) { 632 630 _debug("call %d dead", call->debug_id); 633 631 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 634 632 ··· 718 716 list_del_init(&call->link); 719 717 720 718 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", 721 - call, atomic_read(&call->usage), 719 + call, refcount_read(&call->ref), 722 720 rxrpc_call_states[call->state], 723 721 call->flags, call->events); 724 722
+16 -14
net/rxrpc/conn_client.c
··· 102 102 if (!idr_is_empty(&rxrpc_client_conn_ids)) { 103 103 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { 104 104 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 105 - conn, atomic_read(&conn->usage)); 105 + conn, refcount_read(&conn->ref)); 106 106 } 107 107 BUG(); 108 108 } ··· 122 122 if (bundle) { 123 123 bundle->params = *cp; 124 124 rxrpc_get_peer(bundle->params.peer); 125 - atomic_set(&bundle->usage, 1); 125 + refcount_set(&bundle->ref, 1); 126 126 spin_lock_init(&bundle->channel_lock); 127 127 INIT_LIST_HEAD(&bundle->waiting_calls); 128 128 } ··· 131 131 132 132 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) 133 133 { 134 - atomic_inc(&bundle->usage); 134 + refcount_inc(&bundle->ref); 135 135 return bundle; 136 136 } 137 137 ··· 144 144 void rxrpc_put_bundle(struct rxrpc_bundle *bundle) 145 145 { 146 146 unsigned int d = bundle->debug_id; 147 - unsigned int u = atomic_dec_return(&bundle->usage); 147 + bool dead; 148 + int r; 148 149 149 - _debug("PUT B=%x %u", d, u); 150 - if (u == 0) 150 + dead = __refcount_dec_and_test(&bundle->ref, &r); 151 + 152 + _debug("PUT B=%x %d", d, r); 153 + if (dead) 151 154 rxrpc_free_bundle(bundle); 152 155 } 153 156 ··· 172 169 return ERR_PTR(-ENOMEM); 173 170 } 174 171 175 - atomic_set(&conn->usage, 1); 172 + refcount_set(&conn->ref, 1); 176 173 conn->bundle = bundle; 177 174 conn->params = bundle->params; 178 175 conn->out_clientflag = RXRPC_CLIENT_INITIATED; ··· 198 195 key_get(conn->params.key); 199 196 200 197 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client, 201 - atomic_read(&conn->usage), 198 + refcount_read(&conn->ref), 202 199 __builtin_return_address(0)); 203 200 204 201 atomic_inc(&rxnet->nr_client_conns); ··· 969 966 { 970 967 const void *here = __builtin_return_address(0); 971 968 unsigned int debug_id = conn->debug_id; 972 - int n; 969 + bool dead; 970 + int r; 973 971 974 - n = atomic_dec_return(&conn->usage); 975 - trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); 976 - if (n <= 0) { 977 - ASSERTCMP(n, >=, 0); 972 + dead = __refcount_dec_and_test(&conn->ref, &r); 973 + trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here); 974 + if (dead) 978 975 rxrpc_kill_client_conn(conn); 979 - } 980 976 } 981 977 982 978 /*
+25 -24
net/rxrpc/conn_object.c
··· 104 104 goto not_found; 105 105 *_peer = peer; 106 106 conn = rxrpc_find_service_conn_rcu(peer, skb); 107 - if (!conn || atomic_read(&conn->usage) == 0) 107 + if (!conn || refcount_read(&conn->ref) == 0) 108 108 goto not_found; 109 109 _leave(" = %p", conn); 110 110 return conn; ··· 114 114 */ 115 115 conn = idr_find(&rxrpc_client_conn_ids, 116 116 sp->hdr.cid >> RXRPC_CIDSHIFT); 117 - if (!conn || atomic_read(&conn->usage) == 0) { 117 + if (!conn || refcount_read(&conn->ref) == 0) { 118 118 _debug("no conn"); 119 119 goto not_found; 120 120 } ··· 263 263 bool rxrpc_queue_conn(struct rxrpc_connection *conn) 264 264 { 265 265 const void *here = __builtin_return_address(0); 266 - int n = atomic_fetch_add_unless(&conn->usage, 1, 0); 267 - if (n == 0) 266 + int r; 267 + 268 + if (!__refcount_inc_not_zero(&conn->ref, &r)) 268 269 return false; 269 270 if (rxrpc_queue_work(&conn->processor)) 270 - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here); 271 + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, r + 1, here); 271 272 else 272 273 rxrpc_put_connection(conn); 273 274 return true; ··· 281 280 { 282 281 const void *here = __builtin_return_address(0); 283 282 if (conn) { 284 - int n = atomic_read(&conn->usage); 283 + int n = refcount_read(&conn->ref); 285 284 286 285 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here); 287 286 } ··· 293 292 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn) 294 293 { 295 294 const void *here = __builtin_return_address(0); 296 - int n = atomic_inc_return(&conn->usage); 295 + int r; 297 296 298 - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here); 297 + __refcount_inc(&conn->ref, &r); 298 + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r, here); 299 299 return conn; 300 300 } 301 301 ··· 307 305 rxrpc_get_connection_maybe(struct rxrpc_connection *conn) 308 306 { 309 307 const void *here = __builtin_return_address(0); 308 + int r; 310 309 311 310 if (conn) { 312 - int n = atomic_fetch_add_unless(&conn->usage, 1, 0); 313 - if (n > 0) 314 - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here); 311 + if (__refcount_inc_not_zero(&conn->ref, &r)) 312 + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r + 1, here); 315 313 else 316 314 conn = NULL; 317 315 } ··· 335 333 { 336 334 const void *here = __builtin_return_address(0); 337 335 unsigned int debug_id = conn->debug_id; 338 - int n; 336 + int r; 339 337 340 - n = atomic_dec_return(&conn->usage); 341 - trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here); 342 - ASSERTCMP(n, >=, 0); 343 - if (n == 1) 338 + __refcount_dec(&conn->ref, &r); 339 + trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, r - 1, here); 340 + if (r - 1 == 1) 344 341 rxrpc_set_service_reap_timer(conn->params.local->rxnet, 345 342 jiffies + rxrpc_connection_expiry); 346 343 } ··· 352 351 struct rxrpc_connection *conn = 353 352 container_of(rcu, struct rxrpc_connection, rcu); 354 353 355 - _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); 354 + _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref)); 356 355 357 - ASSERTCMP(atomic_read(&conn->usage), ==, 0); 356 + ASSERTCMP(refcount_read(&conn->ref), ==, 0); 358 357 359 358 _net("DESTROY CONN %d", conn->debug_id); 360 359 ··· 393 392 394 393 write_lock(&rxnet->conn_lock); 395 394 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { 396 - ASSERTCMP(atomic_read(&conn->usage), >, 0); 397 - if (likely(atomic_read(&conn->usage) > 1)) 395 + ASSERTCMP(refcount_read(&conn->ref), >, 0); 396 + if (likely(refcount_read(&conn->ref) > 1)) 398 397 continue; 399 398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 400 399 continue; ··· 406 405 expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; 407 406 408 407 _debug("reap CONN %d { u=%d,t=%ld }", 409 - conn->debug_id, atomic_read(&conn->usage), 408 + conn->debug_id, refcount_read(&conn->ref), 410 409 (long)expire_at - (long)now); 411 410 412 411 if (time_before(now, expire_at)) { ··· 419 418 /* The usage count sits at 1 whilst the object is unused on the 420 419 * list; we reduce that to 0 to make the object unavailable. 421 420 */ 422 - if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 421 + if (!refcount_dec_if_one(&conn->ref)) 423 422 continue; 424 423 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL); 425 424 ··· 443 442 link); 444 443 list_del_init(&conn->link); 445 444 446 - ASSERTCMP(atomic_read(&conn->usage), ==, 0); 445 + ASSERTCMP(refcount_read(&conn->ref), ==, 0); 447 446 rxrpc_kill_connection(conn); 448 447 } 449 448 ··· 471 470 write_lock(&rxnet->conn_lock); 472 471 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { 473 472 pr_err("AF_RXRPC: Leaked conn %p {%d}\n", 474 - conn, atomic_read(&conn->usage)); 473 + conn, refcount_read(&conn->ref)); 475 474 leak = true; 476 475 } 477 476 write_unlock(&rxnet->conn_lock);
+4 -4
net/rxrpc/conn_service.c
··· 9 9 #include "ar-internal.h" 10 10 11 11 static struct rxrpc_bundle rxrpc_service_dummy_bundle = { 12 - .usage = ATOMIC_INIT(1), 12 + .ref = REFCOUNT_INIT(1), 13 13 .debug_id = UINT_MAX, 14 14 .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock), 15 15 }; ··· 99 99 return; 100 100 101 101 found_extant_conn: 102 - if (atomic_read(&cursor->usage) == 0) 102 + if (refcount_read(&cursor->ref) == 0) 103 103 goto replace_old_connection; 104 104 write_sequnlock_bh(&peer->service_conn_lock); 105 105 /* We should not be able to get here. rxrpc_incoming_connection() is ··· 132 132 * the rxrpc_connections list. 133 133 */ 134 134 conn->state = RXRPC_CONN_SERVICE_PREALLOC; 135 - atomic_set(&conn->usage, 2); 135 + refcount_set(&conn->ref, 2); 136 136 conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle); 137 137 138 138 atomic_inc(&rxnet->nr_conns); ··· 142 142 write_unlock(&rxnet->conn_lock); 143 143 144 144 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, 145 - atomic_read(&conn->usage), 145 + refcount_read(&conn->ref), 146 146 __builtin_return_address(0)); 147 147 } 148 148
+1 -3
net/rxrpc/input.c
··· 1154 1154 */ 1155 1155 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) 1156 1156 { 1157 - CHECK_SLAB_OKAY(&local->usage); 1158 - 1159 1157 if (rxrpc_get_local_maybe(local)) { 1160 1158 skb_queue_tail(&local->reject_queue, skb); 1161 1159 rxrpc_queue_local(local); ··· 1411 1413 } 1412 1414 } 1413 1415 1414 - if (!call || atomic_read(&call->usage) == 0) { 1416 + if (!call || refcount_read(&call->ref) == 0) { 1415 1417 if (rxrpc_to_client(sp) || 1416 1418 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1417 1419 goto bad_message;
+16 -15
net/rxrpc/local_object.c
··· 79 79 80 80 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 81 81 if (local) { 82 - atomic_set(&local->usage, 1); 82 + refcount_set(&local->ref, 1); 83 83 atomic_set(&local->active_users, 1); 84 84 local->rxnet = rxnet; 85 85 INIT_HLIST_NODE(&local->link); ··· 265 265 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) 266 266 { 267 267 const void *here = __builtin_return_address(0); 268 - int n; 268 + int r; 269 269 270 - n = atomic_inc_return(&local->usage); 271 - trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); 270 + __refcount_inc(&local->ref, &r); 271 + trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here); 272 272 return local; 273 273 } 274 274 ··· 278 278 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) 279 279 { 280 280 const void *here = __builtin_return_address(0); 281 + int r; 281 282 282 283 if (local) { 283 - int n = atomic_fetch_add_unless(&local->usage, 1, 0); 284 - if (n > 0) 284 + if (__refcount_inc_not_zero(&local->ref, &r)) 285 285 trace_rxrpc_local(local->debug_id, rxrpc_local_got, 286 - n + 1, here); 286 + r + 1, here); 287 287 else 288 288 local = NULL; 289 289 } ··· 297 297 { 298 298 const void *here = __builtin_return_address(0); 299 299 unsigned int debug_id = local->debug_id; 300 - int n = atomic_read(&local->usage); 300 + int r = refcount_read(&local->ref); 301 301 302 302 if (rxrpc_queue_work(&local->processor)) 303 - trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); 303 + trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here); 304 304 else 305 305 rxrpc_put_local(local); 306 306 } ··· 312 312 { 313 313 const void *here = __builtin_return_address(0); 314 314 unsigned int debug_id; 315 - int n; 315 + bool dead; 316 + int r; 316 317 317 318 if (local) { 318 319 debug_id = local->debug_id; 319 320 320 - n = atomic_dec_return(&local->usage); 321 - trace_rxrpc_local(debug_id, rxrpc_local_put, n, here); 321 + dead = __refcount_dec_and_test(&local->ref, &r); 322 + trace_rxrpc_local(debug_id, rxrpc_local_put, r, here); 322 323 323 - if (n == 0) 324 + if (dead) 324 325 call_rcu(&local->rcu, rxrpc_local_rcu); 325 326 } 326 327 } ··· 406 405 bool again; 407 406 408 407 trace_rxrpc_local(local->debug_id, rxrpc_local_processing, 409 - atomic_read(&local->usage), NULL); 408 + refcount_read(&local->ref), NULL); 410 409 411 410 do { 412 411 again = false; ··· 462 461 mutex_lock(&rxnet->local_mutex); 463 462 hlist_for_each_entry(local, &rxnet->local_endpoints, link) { 464 463 pr_err("AF_RXRPC: Leaked local %p {%d}\n", 465 - local, atomic_read(&local->usage)); 464 + local, refcount_read(&local->ref)); 466 465 } 467 466 mutex_unlock(&rxnet->local_mutex); 468 467 BUG();
+21 -19
net/rxrpc/peer_object.c
··· 121 121 122 122 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { 123 123 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 && 124 - atomic_read(&peer->usage) > 0) 124 + refcount_read(&peer->ref) > 0) 125 125 return peer; 126 126 } 127 127 ··· 140 140 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); 141 141 if (peer) { 142 142 _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport); 143 - _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); 143 + _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); 144 144 } 145 145 return peer; 146 146 } ··· 216 216 217 217 peer = kzalloc(sizeof(struct rxrpc_peer), gfp); 218 218 if (peer) { 219 - atomic_set(&peer->usage, 1); 219 + refcount_set(&peer->ref, 1); 220 220 peer->local = rxrpc_get_local(local); 221 221 INIT_HLIST_HEAD(&peer->error_targets); 222 222 peer->service_conns = RB_ROOT; ··· 378 378 379 379 _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport); 380 380 381 - _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); 381 + _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); 382 382 return peer; 383 383 } 384 384 ··· 388 388 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) 389 389 { 390 390 const void *here = __builtin_return_address(0); 391 - int n; 391 + int r; 392 392 393 - n = atomic_inc_return(&peer->usage); 394 - trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here); 393 + __refcount_inc(&peer->ref, &r); 394 + trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here); 395 395 return peer; 396 396 } 397 397 ··· 401 401 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) 402 402 { 403 403 const void *here = __builtin_return_address(0); 404 + int r; 404 405 405 406 if (peer) { 406 - int n = atomic_fetch_add_unless(&peer->usage, 1, 0); 407 - if (n > 0) 408 - trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here); 407 + if (__refcount_inc_not_zero(&peer->ref, &r)) 408 + trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here); 409 409 else 410 410 peer = NULL; 411 411 } ··· 436 436 { 437 437 const void *here = __builtin_return_address(0); 438 438 unsigned int debug_id; 439 - int n; 439 + bool dead; 440 + int r; 440 441 441 442 if (peer) { 442 443 debug_id = peer->debug_id; 443 - n = atomic_dec_return(&peer->usage); 444 - trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); 445 - if (n == 0) 444 + dead = __refcount_dec_and_test(&peer->ref, &r); 445 + trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here); 446 + if (dead) 446 447 __rxrpc_put_peer(peer); 447 448 } 448 449 } ··· 456 455 { 457 456 const void *here = __builtin_return_address(0); 458 457 unsigned int debug_id = peer->debug_id; 459 - int n; 458 + bool dead; 459 + int r; 460 460 461 - n = atomic_dec_return(&peer->usage); 462 - trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); 463 - if (n == 0) { 461 + dead = __refcount_dec_and_test(&peer->ref, &r); 462 + trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here); 463 + if (dead) { 464 464 hash_del_rcu(&peer->hash_link); 465 465 list_del_init(&peer->keepalive_link); 466 466 rxrpc_free_peer(peer); ··· 483 481 hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) { 484 482 pr_err("Leaked peer %u {%u} %pISp\n", 485 483 peer->debug_id, 486 - atomic_read(&peer->usage), 484 + refcount_read(&peer->ref), 487 485 &peer->srx.transport); 488 486 } 489 487 }
+4 -4
net/rxrpc/proc.c
··· 107 107 call->cid, 108 108 call->call_id, 109 109 rxrpc_is_service_call(call) ? "Svc" : "Clt", 110 - atomic_read(&call->usage), 110 + refcount_read(&call->ref), 111 111 rxrpc_call_states[call->state], 112 112 call->abort_code, 113 113 call->debug_id, ··· 189 189 conn->service_id, 190 190 conn->proto.cid, 191 191 rxrpc_conn_is_service(conn) ? "Svc" : "Clt", 192 - atomic_read(&conn->usage), 192 + refcount_read(&conn->ref), 193 193 rxrpc_conn_states[conn->state], 194 194 key_serial(conn->params.key), 195 195 atomic_read(&conn->serial), ··· 239 239 " %3u %5u %6llus %8u %8u\n", 240 240 lbuff, 241 241 rbuff, 242 - atomic_read(&peer->usage), 242 + refcount_read(&peer->ref), 243 243 peer->cong_cwnd, 244 244 peer->mtu, 245 245 now - peer->last_tx_at, ··· 357 357 seq_printf(seq, 358 358 "UDP %-47.47s %3u %3u\n", 359 359 lbuff, 360 - atomic_read(&local->usage), 360 + refcount_read(&local->ref), 361 361 atomic_read(&local->active_users)); 362 362 363 363 return 0;
-1
net/rxrpc/skbuff.c
··· 71 71 const void *here = __builtin_return_address(0); 72 72 if (skb) { 73 73 int n; 74 - CHECK_SLAB_OKAY(&skb->users); 75 74 n = atomic_dec_return(select_skb_count(skb)); 76 75 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, 77 76 rxrpc_skb(skb)->rx_flags, here);