Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

afs: Use refcount_t rather than atomic_t

Use refcount_t rather than atomic_t in afs to make use of the count
checking facilities provided.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Link: https://lore.kernel.org/r/165911277768.3745403.423349776836296452.stgit@warthog.procyon.org.uk/ # v1

+110 -107
+29 -32
fs/afs/cell.c
··· 158 158 cell->name[i] = tolower(name[i]); 159 159 cell->name[i] = 0; 160 160 161 - atomic_set(&cell->ref, 1); 161 + refcount_set(&cell->ref, 1); 162 162 atomic_set(&cell->active, 0); 163 163 INIT_WORK(&cell->manager, afs_manage_cell_work); 164 164 cell->volumes = RB_ROOT; ··· 287 287 cell = candidate; 288 288 candidate = NULL; 289 289 atomic_set(&cell->active, 2); 290 - trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 2, afs_cell_trace_insert); 290 + trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert); 291 291 rb_link_node_rcu(&cell->net_node, parent, pp); 292 292 rb_insert_color(&cell->net_node, &net->cells); 293 293 up_write(&net->cells_lock); ··· 295 295 afs_queue_cell(cell, afs_cell_trace_get_queue_new); 296 296 297 297 wait_for_cell: 298 - trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), atomic_read(&cell->active), 298 + trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active), 299 299 afs_cell_trace_wait); 300 300 _debug("wait_for_cell"); 301 301 wait_var_event(&cell->state, ··· 490 490 { 491 491 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); 492 492 struct afs_net *net = cell->net; 493 - int u; 493 + int r; 494 494 495 495 _enter("%p{%s}", cell, cell->name); 496 496 497 - u = atomic_read(&cell->ref); 498 - ASSERTCMP(u, ==, 0); 499 - trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), afs_cell_trace_free); 497 + r = refcount_read(&cell->ref); 498 + ASSERTCMP(r, ==, 0); 499 + trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free); 500 500 501 501 afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); 502 502 afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias); ··· 539 539 */ 540 540 struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason) 541 541 { 542 - int u; 542 + int r; 543 543 544 - if (atomic_read(&cell->ref) <= 0) 545 - BUG(); 546 - 547 - u = atomic_inc_return(&cell->ref); 548 - trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), reason); 544 + __refcount_inc(&cell->ref, &r); 545 + trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason); 549 546 return cell; 550 547 } 551 548 ··· 553 556 { 554 557 if (cell) { 555 558 unsigned int debug_id = cell->debug_id; 556 - unsigned int u, a; 559 + unsigned int a; 560 + bool zero; 561 + int r; 557 562 558 563 a = atomic_read(&cell->active); 559 - u = atomic_dec_return(&cell->ref); 560 - trace_afs_cell(debug_id, u, a, reason); 561 - if (u == 0) { 564 + zero = __refcount_dec_and_test(&cell->ref, &r); 565 + trace_afs_cell(debug_id, r - 1, a, reason); 566 + if (zero) { 562 567 a = atomic_read(&cell->active); 563 568 WARN(a != 0, "Cell active count %u > 0\n", a); 564 569 call_rcu(&cell->rcu, afs_cell_destroy); ··· 573 574 */ 574 575 struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason) 575 576 { 576 - int u, a; 577 + int r, a; 577 578 578 - if (atomic_read(&cell->ref) <= 0) 579 - BUG(); 580 - 581 - u = atomic_read(&cell->ref); 579 + r = refcount_read(&cell->ref); 580 + WARN_ON(r == 0); 582 581 a = atomic_inc_return(&cell->active); 583 - trace_afs_cell(cell->debug_id, u, a, reason); 582 + trace_afs_cell(cell->debug_id, r, a, reason); 584 583 return cell; 585 584 } 586 585 ··· 590 593 { 591 594 unsigned int debug_id; 592 595 time64_t now, expire_delay; 593 - int u, a; 596 + int r, a; 594 597 595 598 if (!cell) 596 599 return; ··· 604 607 expire_delay = afs_cell_gc_delay; 605 608 606 609 debug_id = cell->debug_id; 607 - u = atomic_read(&cell->ref); 610 + r = refcount_read(&cell->ref); 608 611 a = atomic_dec_return(&cell->active); 609 - trace_afs_cell(debug_id, u, a, reason); 612 + trace_afs_cell(debug_id, r, a, reason); 610 613 WARN_ON(a == 0); 611 614 if (a == 1) 612 615 /* 'cell' may now be garbage collected. */ ··· 618 621 */ 619 622 void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason) 620 623 { 621 - int u, a; 624 + int r, a; 622 625 623 - u = atomic_read(&cell->ref); 626 + r = refcount_read(&cell->ref); 624 627 a = atomic_read(&cell->active); 625 - trace_afs_cell(cell->debug_id, u, a, reason); 628 + trace_afs_cell(cell->debug_id, r, a, reason); 626 629 } 627 630 628 631 /* ··· 736 739 active = 1; 737 740 if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) { 738 741 rb_erase(&cell->net_node, &net->cells); 739 - trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 0, 742 + trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0, 740 743 afs_cell_trace_unuse_delete); 741 744 smp_store_release(&cell->state, AFS_CELL_REMOVED); 742 745 } ··· 863 866 bool sched_cell = false; 864 867 865 868 active = atomic_read(&cell->active); 866 - trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 869 + trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 867 870 active, afs_cell_trace_manage); 868 871 869 872 ASSERTCMP(active, >=, 1); ··· 871 874 if (purging) { 872 875 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) { 873 876 active = atomic_dec_return(&cell->active); 874 - trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 877 + trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 875 878 active, afs_cell_trace_unuse_pin); 876 879 } 877 880 }
+1 -1
fs/afs/cmservice.c
··· 213 213 */ 214 214 if (call->server) { 215 215 trace_afs_server(call->server, 216 - atomic_read(&call->server->ref), 216 + refcount_read(&call->server->ref), 217 217 atomic_read(&call->server->active), 218 218 afs_server_trace_callback); 219 219 afs_break_callbacks(call->server, call->count, call->request);
+8 -8
fs/afs/internal.h
··· 122 122 }; 123 123 struct afs_operation *op; 124 124 unsigned int server_index; 125 - atomic_t usage; 125 + refcount_t ref; 126 126 enum afs_call_state state; 127 127 spinlock_t state_lock; 128 128 int error; /* error code */ ··· 365 365 struct hlist_node proc_link; /* /proc cell list link */ 366 366 time64_t dns_expiry; /* Time AFSDB/SRV record expires */ 367 367 time64_t last_inactive; /* Time of last drop of usage count */ 368 - atomic_t ref; /* Struct refcount */ 368 + refcount_t ref; /* Struct refcount */ 369 369 atomic_t active; /* Active usage counter */ 370 370 unsigned long flags; 371 371 #define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */ ··· 410 410 #define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */ 411 411 #define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */ 412 412 rwlock_t lock; /* Lock on addresses */ 413 - atomic_t usage; 413 + refcount_t ref; 414 414 unsigned int rtt; /* Server's current RTT in uS */ 415 415 416 416 /* Probe state */ ··· 446 446 447 447 struct afs_vlserver_list { 448 448 struct rcu_head rcu; 449 - atomic_t usage; 449 + refcount_t ref; 450 450 u8 nr_servers; 451 451 u8 index; /* Server currently in use */ 452 452 u8 preferred; /* Preferred server */ ··· 517 517 #define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */ 518 518 #define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */ 519 519 #define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */ 520 - atomic_t ref; /* Object refcount */ 520 + refcount_t ref; /* Object refcount */ 521 521 atomic_t active; /* Active user count */ 522 522 u32 addr_version; /* Address list version */ 523 523 unsigned int rtt; /* Server's current RTT in uS */ ··· 571 571 struct rcu_head rcu; 572 572 afs_volid_t vid; /* volume ID */ 573 573 }; 574 - atomic_t usage; 574 + refcount_t ref; 575 575 time64_t update_at; /* Time at which to next update */ 576 576 struct afs_cell *cell; /* Cell to which belongs (pins ref) */ 577 577 struct rb_node cell_node; /* Link in cell->volumes */ ··· 1493 1493 */ 1494 1494 static inline struct afs_vlserver *afs_get_vlserver(struct afs_vlserver *vlserver) 1495 1495 { 1496 - atomic_inc(&vlserver->usage); 1496 + refcount_inc(&vlserver->ref); 1497 1497 return vlserver; 1498 1498 } 1499 1499 1500 1500 static inline struct afs_vlserver_list *afs_get_vlserverlist(struct afs_vlserver_list *vllist) 1501 1501 { 1502 1502 if (vllist) 1503 - atomic_inc(&vllist->usage); 1503 + refcount_inc(&vllist->ref); 1504 1504 return vllist; 1505 1505 } 1506 1506
+3 -3
fs/afs/proc.c
··· 47 47 48 48 /* display one cell per line on subsequent lines */ 49 49 seq_printf(m, "%3u %3u %6lld %2u %2u %s\n", 50 - atomic_read(&cell->ref), 50 + refcount_read(&cell->ref), 51 51 atomic_read(&cell->active), 52 52 cell->dns_expiry - ktime_get_real_seconds(), 53 53 vllist ? vllist->nr_servers : 0, ··· 217 217 } 218 218 219 219 seq_printf(m, "%3d %08llx %s %s\n", 220 - atomic_read(&vol->usage), vol->vid, 220 + refcount_read(&vol->ref), vol->vid, 221 221 afs_vol_types[vol->type], 222 222 vol->name); 223 223 ··· 388 388 alist = rcu_dereference(server->addresses); 389 389 seq_printf(m, "%pU %3d %3d\n", 390 390 &server->uuid, 391 - atomic_read(&server->ref), 391 + refcount_read(&server->ref), 392 392 atomic_read(&server->active)); 393 393 seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n", 394 394 server->flags, server->rtt, server->cb_s_break);
+14 -12
fs/afs/rxrpc.c
··· 145 145 call->type = type; 146 146 call->net = net; 147 147 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 148 - atomic_set(&call->usage, 1); 148 + refcount_set(&call->ref, 1); 149 149 INIT_WORK(&call->async_work, afs_process_async_call); 150 150 init_waitqueue_head(&call->waitq); 151 151 spin_lock_init(&call->state_lock); ··· 163 163 void afs_put_call(struct afs_call *call) 164 164 { 165 165 struct afs_net *net = call->net; 166 - int n = atomic_dec_return(&call->usage); 167 - int o = atomic_read(&net->nr_outstanding_calls); 166 + bool zero; 167 + int r, o; 168 168 169 - trace_afs_call(call, afs_call_trace_put, n, o, 169 + zero = __refcount_dec_and_test(&call->ref, &r); 170 + o = atomic_read(&net->nr_outstanding_calls); 171 + trace_afs_call(call, afs_call_trace_put, r - 1, o, 170 172 __builtin_return_address(0)); 171 173 172 - ASSERTCMP(n, >=, 0); 173 - if (n == 0) { 174 + if (zero) { 174 175 ASSERT(!work_pending(&call->async_work)); 175 176 ASSERT(call->type->name != NULL); 176 177 ··· 199 198 static struct afs_call *afs_get_call(struct afs_call *call, 200 199 enum afs_call_trace why) 201 200 { 202 - int u = atomic_inc_return(&call->usage); 201 + int r; 203 202 204 - trace_afs_call(call, why, u, 203 + __refcount_inc(&call->ref, &r); 204 + 205 + trace_afs_call(call, why, r + 1, 205 206 atomic_read(&call->net->nr_outstanding_calls), 206 207 __builtin_return_address(0)); 207 208 return call; ··· 671 668 unsigned long call_user_ID) 672 669 { 673 670 struct afs_call *call = (struct afs_call *)call_user_ID; 674 - int u; 671 + int r; 675 672 676 673 trace_afs_notify_call(rxcall, call); 677 674 call->need_attention = true; 678 675 679 - u = atomic_fetch_add_unless(&call->usage, 1, 0); 680 - if (u != 0) { 681 - trace_afs_call(call, afs_call_trace_wake, u + 1, 676 + if (__refcount_inc_not_zero(&call->ref, &r)) { 677 + trace_afs_call(call, afs_call_trace_wake, r + 1, 682 678 atomic_read(&call->net->nr_outstanding_calls), 683 679 __builtin_return_address(0)); 684 680
+23 -17
fs/afs/server.c
··· 228 228 if (!server) 229 229 goto enomem; 230 230 231 - atomic_set(&server->ref, 1); 231 + refcount_set(&server->ref, 1); 232 232 atomic_set(&server->active, 1); 233 233 server->debug_id = atomic_inc_return(&afs_server_debug_id); 234 234 RCU_INIT_POINTER(server->addresses, alist); ··· 352 352 struct afs_server *afs_get_server(struct afs_server *server, 353 353 enum afs_server_trace reason) 354 354 { 355 - unsigned int u = atomic_inc_return(&server->ref); 355 + int r; 356 356 357 - trace_afs_server(server, u, atomic_read(&server->active), reason); 357 + __refcount_inc(&server->ref, &r); 358 + trace_afs_server(server, r + 1, atomic_read(&server->active), reason); 358 359 return server; 359 360 } 360 361 ··· 365 364 static struct afs_server *afs_maybe_use_server(struct afs_server *server, 366 365 enum afs_server_trace reason) 367 366 { 368 - unsigned int r = atomic_fetch_add_unless(&server->ref, 1, 0); 369 367 unsigned int a; 368 + int r; 370 369 371 - if (r == 0) 370 + if (!__refcount_inc_not_zero(&server->ref, &r)) 372 371 return NULL; 373 372 374 373 a = atomic_inc_return(&server->active); 375 - trace_afs_server(server, r, a, reason); 374 + trace_afs_server(server, r + 1, a, reason); 376 375 return server; 377 376 } 378 377 ··· 381 380 */ 382 381 struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason) 383 382 { 384 - unsigned int r = atomic_inc_return(&server->ref); 385 - unsigned int a = atomic_inc_return(&server->active); 383 + unsigned int a; 384 + int r; 386 385 387 - trace_afs_server(server, r, a, reason); 386 + __refcount_inc(&server->ref, &r); 387 + a = atomic_inc_return(&server->active); 388 + 389 + trace_afs_server(server, r + 1, a, reason); 388 390 return server; 389 391 } 390 392 ··· 397 393 void afs_put_server(struct afs_net *net, struct afs_server *server, 398 394 enum afs_server_trace reason) 399 395 { 400 - unsigned int usage; 396 + bool zero; 397 + int r; 401 398 402 399 if (!server) 403 400 return; 404 401 405 - usage = atomic_dec_return(&server->ref); 406 - trace_afs_server(server, usage, atomic_read(&server->active), reason); 407 - if (unlikely(usage == 0)) 402 + zero = __refcount_dec_and_test(&server->ref, &r); 403 + trace_afs_server(server, r - 1, atomic_read(&server->active), reason); 404 + if (unlikely(zero)) 408 405 __afs_put_server(net, server); 409 406 } 410 407 ··· 441 436 { 442 437 struct afs_server *server = container_of(rcu, struct afs_server, rcu); 443 438 444 - trace_afs_server(server, atomic_read(&server->ref), 439 + trace_afs_server(server, refcount_read(&server->ref), 445 440 atomic_read(&server->active), afs_server_trace_free); 446 441 afs_put_addrlist(rcu_access_pointer(server->addresses)); 447 442 kfree(server); ··· 492 487 493 488 active = atomic_read(&server->active); 494 489 if (active == 0) { 495 - trace_afs_server(server, atomic_read(&server->ref), 490 + trace_afs_server(server, refcount_read(&server->ref), 496 491 active, afs_server_trace_gc); 497 492 next = rcu_dereference_protected( 498 493 server->uuid_next, lockdep_is_held(&net->fs_lock.lock)); ··· 558 553 _debug("manage %pU %u", &server->uuid, active); 559 554 560 555 if (purging) { 561 - trace_afs_server(server, atomic_read(&server->ref), 556 + trace_afs_server(server, refcount_read(&server->ref), 562 557 active, afs_server_trace_purging); 563 558 if (active != 0) 564 559 pr_notice("Can't purge s=%08x\n", server->debug_id); ··· 638 633 639 634 _enter(""); 640 635 641 - trace_afs_server(server, atomic_read(&server->ref), atomic_read(&server->active), 636 + trace_afs_server(server, refcount_read(&server->ref), 637 + atomic_read(&server->active), 642 638 afs_server_trace_update); 643 639 644 640 alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
+6 -13
fs/afs/vl_list.c
··· 17 17 vlserver = kzalloc(struct_size(vlserver, name, name_len + 1), 18 18 GFP_KERNEL); 19 19 if (vlserver) { 20 - atomic_set(&vlserver->usage, 1); 20 + refcount_set(&vlserver->ref, 1); 21 21 rwlock_init(&vlserver->lock); 22 22 init_waitqueue_head(&vlserver->probe_wq); 23 23 spin_lock_init(&vlserver->probe_lock); ··· 39 39 40 40 void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver) 41 41 { 42 - if (vlserver) { 43 - unsigned int u = atomic_dec_return(&vlserver->usage); 44 - //_debug("VL PUT %p{%u}", vlserver, u); 45 - 46 - if (u == 0) 47 - call_rcu(&vlserver->rcu, afs_vlserver_rcu); 48 - } 42 + if (vlserver && 43 + refcount_dec_and_test(&vlserver->ref)) 44 + call_rcu(&vlserver->rcu, afs_vlserver_rcu); 49 45 } 50 46 51 47 struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers) ··· 50 54 51 55 vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL); 52 56 if (vllist) { 53 - atomic_set(&vllist->usage, 1); 57 + refcount_set(&vllist->ref, 1); 54 58 rwlock_init(&vllist->lock); 55 59 } 56 60 ··· 60 64 void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist) 61 65 { 62 66 if (vllist) { 63 - unsigned int u = atomic_dec_return(&vllist->usage); 64 - 65 - //_debug("VLLS PUT %p{%u}", vllist, u); 66 - if (u == 0) { 67 + if (refcount_dec_and_test(&vllist->ref)) { 67 68 int i; 68 69 69 70 for (i = 0; i < vllist->nr_servers; i++) {
+13 -8
fs/afs/volume.c
··· 52 52 struct afs_cell *cell = volume->cell; 53 53 54 54 if (!hlist_unhashed(&volume->proc_link)) { 55 - trace_afs_volume(volume->vid, atomic_read(&volume->usage), 55 + trace_afs_volume(volume->vid, refcount_read(&cell->ref), 56 56 afs_volume_trace_remove); 57 57 write_seqlock(&cell->volume_lock); 58 58 hlist_del_rcu(&volume->proc_link); ··· 87 87 volume->type_force = params->force; 88 88 volume->name_len = vldb->name_len; 89 89 90 - atomic_set(&volume->usage, 1); 90 + refcount_set(&volume->ref, 1); 91 91 INIT_HLIST_NODE(&volume->proc_link); 92 92 rwlock_init(&volume->servers_lock); 93 93 rwlock_init(&volume->cb_v_break_lock); ··· 228 228 afs_remove_volume_from_cell(volume); 229 229 afs_put_serverlist(net, rcu_access_pointer(volume->servers)); 230 230 afs_put_cell(volume->cell, afs_cell_trace_put_vol); 231 - trace_afs_volume(volume->vid, atomic_read(&volume->usage), 231 + trace_afs_volume(volume->vid, refcount_read(&volume->ref), 232 232 afs_volume_trace_free); 233 233 kfree_rcu(volume, rcu); 234 234 ··· 242 242 enum afs_volume_trace reason) 243 243 { 244 244 if (volume) { 245 - int u = atomic_inc_return(&volume->usage); 246 - trace_afs_volume(volume->vid, u, reason); 245 + int r; 246 + 247 + __refcount_inc(&volume->ref, &r); 248 + trace_afs_volume(volume->vid, r + 1, reason); 247 249 } 248 250 return volume; 249 251 } ··· 259 257 { 260 258 if (volume) { 261 259 afs_volid_t vid = volume->vid; 262 - int u = atomic_dec_return(&volume->usage); 263 - trace_afs_volume(vid, u, reason); 264 - if (u == 0) 260 + bool zero; 261 + int r; 262 + 263 + zero = __refcount_dec_and_test(&volume->ref, &r); 264 + trace_afs_volume(vid, r - 1, reason); 265 + if (zero) 265 266 afs_destroy_volume(net, volume); 266 267 } 267 268 }
+13 -13
include/trace/events/afs.h
··· 728 728 729 729 TRACE_EVENT(afs_call, 730 730 TP_PROTO(struct afs_call *call, enum afs_call_trace op, 731 - int usage, int outstanding, const void *where), 731 + int ref, int outstanding, const void *where), 732 732 733 - TP_ARGS(call, op, usage, outstanding, where), 733 + TP_ARGS(call, op, ref, outstanding, where), 734 734 735 735 TP_STRUCT__entry( 736 736 __field(unsigned int, call ) 737 737 __field(int, op ) 738 - __field(int, usage ) 738 + __field(int, ref ) 739 739 __field(int, outstanding ) 740 740 __field(const void *, where ) 741 741 ), ··· 743 743 TP_fast_assign( 744 744 __entry->call = call->debug_id; 745 745 __entry->op = op; 746 - __entry->usage = usage; 746 + __entry->ref = ref; 747 747 __entry->outstanding = outstanding; 748 748 __entry->where = where; 749 749 ), 750 750 751 - TP_printk("c=%08x %s u=%d o=%d sp=%pSR", 751 + TP_printk("c=%08x %s r=%d o=%d sp=%pSR", 752 752 __entry->call, 753 753 __print_symbolic(__entry->op, afs_call_traces), 754 - __entry->usage, 754 + __entry->ref, 755 755 __entry->outstanding, 756 756 __entry->where) 757 757 ); ··· 1476 1476 __entry->reason = reason; 1477 1477 ), 1478 1478 1479 - TP_printk("V=%llx %s u=%d", 1479 + TP_printk("V=%llx %s ur=%d", 1480 1480 __entry->vid, 1481 1481 __print_symbolic(__entry->reason, afs_volume_traces), 1482 1482 __entry->ref) 1483 1483 ); 1484 1484 1485 1485 TRACE_EVENT(afs_cell, 1486 - TP_PROTO(unsigned int cell_debug_id, int usage, int active, 1486 + TP_PROTO(unsigned int cell_debug_id, int ref, int active, 1487 1487 enum afs_cell_trace reason), 1488 1488 1489 - TP_ARGS(cell_debug_id, usage, active, reason), 1489 + TP_ARGS(cell_debug_id, ref, active, reason), 1490 1490 1491 1491 TP_STRUCT__entry( 1492 1492 __field(unsigned int, cell ) 1493 - __field(int, usage ) 1493 + __field(int, ref ) 1494 1494 __field(int, active ) 1495 1495 __field(int, reason ) 1496 1496 ), 1497 1497 1498 1498 TP_fast_assign( 1499 1499 __entry->cell = cell_debug_id; 1500 - __entry->usage = usage; 1500 + __entry->ref = ref; 1501 1501 __entry->active = active; 1502 1502 __entry->reason = reason; 1503 1503 ), 1504 1504 1505 - TP_printk("L=%08x %s u=%d a=%d", 1505 + TP_printk("L=%08x %s r=%d a=%d", 1506 1506 __entry->cell, 1507 1507 __print_symbolic(__entry->reason, afs_cell_traces), 1508 - __entry->usage, 1508 + __entry->ref, 1509 1509 __entry->active) 1510 1510 ); 1511 1511