Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic, kref: Add kref_read()

Since we need to change the implementation, stop exposing internals.

Provide kref_read() to read the current reference count; typically
used for debug messages.

Kills two anti-patterns:

atomic_read(&kref->refcount)
kref->refcount.counter

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
2c935bc5 1e24edca

+121 -117
+1 -1
drivers/block/drbd/drbd_req.c
··· 520 520 /* Completion does it's own kref_put. If we are going to 521 521 * kref_sub below, we need req to be still around then. */ 522 522 int at_least = k_put + !!c_put; 523 - int refcount = atomic_read(&req->kref.refcount); 523 + int refcount = kref_read(&req->kref); 524 524 if (refcount < at_least) 525 525 drbd_err(device, 526 526 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
+4 -4
drivers/block/rbd.c
··· 1535 1535 static void rbd_obj_request_get(struct rbd_obj_request *obj_request) 1536 1536 { 1537 1537 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1538 - atomic_read(&obj_request->kref.refcount)); 1538 + kref_read(&obj_request->kref)); 1539 1539 kref_get(&obj_request->kref); 1540 1540 } 1541 1541 ··· 1544 1544 { 1545 1545 rbd_assert(obj_request != NULL); 1546 1546 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1547 - atomic_read(&obj_request->kref.refcount)); 1547 + kref_read(&obj_request->kref)); 1548 1548 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1549 1549 } 1550 1550 1551 1551 static void rbd_img_request_get(struct rbd_img_request *img_request) 1552 1552 { 1553 1553 dout("%s: img %p (was %d)\n", __func__, img_request, 1554 - atomic_read(&img_request->kref.refcount)); 1554 + kref_read(&img_request->kref)); 1555 1555 kref_get(&img_request->kref); 1556 1556 } 1557 1557 ··· 1562 1562 { 1563 1563 rbd_assert(img_request != NULL); 1564 1564 dout("%s: img %p (was %d)\n", __func__, img_request, 1565 - atomic_read(&img_request->kref.refcount)); 1565 + kref_read(&img_request->kref)); 1566 1566 if (img_request_child_test(img_request)) 1567 1567 kref_put(&img_request->kref, rbd_parent_request_destroy); 1568 1568 else
+1 -1
drivers/block/virtio_blk.c
··· 767 767 /* Stop all the virtqueues. */ 768 768 vdev->config->reset(vdev); 769 769 770 - refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); 770 + refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); 771 771 put_disk(vblk->disk); 772 772 vdev->config->del_vqs(vdev); 773 773 kfree(vblk->vqs);
+1 -1
drivers/gpu/drm/drm_gem_cma_helper.c
··· 376 376 off = drm_vma_node_start(&obj->vma_node); 377 377 378 378 seq_printf(m, "%2d (%2d) %08llx %pad %p %zu", 379 - obj->name, obj->refcount.refcount.counter, 379 + obj->name, kref_read(&obj->refcount), 380 380 off, &cma_obj->paddr, cma_obj->vaddr, obj->size); 381 381 382 382 seq_printf(m, "\n");
+1 -1
drivers/gpu/drm/drm_info.c
··· 118 118 seq_printf(m, "%6d %8zd %7d %8d\n", 119 119 obj->name, obj->size, 120 120 obj->handle_count, 121 - atomic_read(&obj->refcount.refcount)); 121 + kref_read(&obj->refcount)); 122 122 return 0; 123 123 } 124 124
+2 -2
drivers/gpu/drm/drm_mode_object.c
··· 159 159 void drm_mode_object_unreference(struct drm_mode_object *obj) 160 160 { 161 161 if (obj->free_cb) { 162 - DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount)); 162 + DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount)); 163 163 kref_put(&obj->refcount, obj->free_cb); 164 164 } 165 165 } ··· 176 176 void drm_mode_object_reference(struct drm_mode_object *obj) 177 177 { 178 178 if (obj->free_cb) { 179 - DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount)); 179 + DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount)); 180 180 kref_get(&obj->refcount); 181 181 } 182 182 }
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 486 486 487 487 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", 488 488 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', 489 - obj->name, obj->refcount.refcount.counter, 489 + obj->name, kref_read(&obj->refcount), 490 490 off, etnaviv_obj->vaddr, obj->size); 491 491 492 492 rcu_read_lock();
+1 -1
drivers/gpu/drm/i915/i915_gem_object.h
··· 256 256 static inline bool 257 257 i915_gem_object_is_dead(const struct drm_i915_gem_object *obj) 258 258 { 259 - return atomic_read(&obj->base.refcount.refcount) == 0; 259 + return kref_read(&obj->base.refcount) == 0; 260 260 } 261 261 262 262 static inline bool
+1 -1
drivers/gpu/drm/msm/msm_gem.c
··· 640 640 641 641 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", 642 642 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 643 - obj->name, obj->refcount.refcount.counter, 643 + obj->name, kref_read(&obj->refcount), 644 644 off, msm_obj->vaddr); 645 645 646 646 for (id = 0; id < priv->num_aspaces; id++)
+1 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 527 527 * caller should have a reference on the fence, 528 528 * else fence could get freed here 529 529 */ 530 - WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); 530 + WARN_ON(kref_read(&fence->base.refcount) <= 1); 531 531 532 532 /* 533 533 * This needs uevents to work correctly, but dma_fence_add_callback relies on
+1 -1
drivers/gpu/drm/omapdrm/omap_gem.c
··· 1033 1033 off = drm_vma_node_start(&obj->vma_node); 1034 1034 1035 1035 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 1036 - omap_obj->flags, obj->name, obj->refcount.refcount.counter, 1036 + omap_obj->flags, obj->name, kref_read(&obj->refcount), 1037 1037 off, &omap_obj->paddr, omap_obj->paddr_cnt, 1038 1038 omap_obj->vaddr, omap_obj->roll); 1039 1039
+2 -2
drivers/gpu/drm/ttm/ttm_bo.c
··· 140 140 struct ttm_bo_device *bdev = bo->bdev; 141 141 size_t acc_size = bo->acc_size; 142 142 143 - BUG_ON(atomic_read(&bo->list_kref.refcount)); 144 - BUG_ON(atomic_read(&bo->kref.refcount)); 143 + BUG_ON(kref_read(&bo->list_kref)); 144 + BUG_ON(kref_read(&bo->kref)); 145 145 BUG_ON(atomic_read(&bo->cpu_writers)); 146 146 BUG_ON(bo->mem.mm_node != NULL); 147 147 BUG_ON(!list_empty(&bo->lru));
+1 -1
drivers/gpu/drm/ttm/ttm_object.c
··· 304 304 * Verify that the ref->obj pointer was actually valid! 305 305 */ 306 306 rmb(); 307 - if (unlikely(atomic_read(&ref->kref.refcount) == 0)) 307 + if (unlikely(kref_read(&ref->kref) == 0)) 308 308 goto out_false; 309 309 310 310 rcu_read_unlock();
+3 -3
drivers/infiniband/hw/cxgb3/iwch_cm.h
··· 55 55 56 56 #define put_ep(ep) { \ 57 57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ 58 - ep, atomic_read(&((ep)->kref.refcount))); \ 59 - WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \ 58 + ep, kref_read(&((ep)->kref))); \ 59 + WARN_ON(kref_read(&((ep)->kref)) < 1); \ 60 60 kref_put(&((ep)->kref), __free_ep); \ 61 61 } 62 62 63 63 #define get_ep(ep) { \ 64 64 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \ 65 - ep, atomic_read(&((ep)->kref.refcount))); \ 65 + ep, kref_read(&((ep)->kref))); \ 66 66 kref_get(&((ep)->kref)); \ 67 67 } 68 68
+1 -1
drivers/infiniband/hw/cxgb3/iwch_qp.c
··· 961 961 case IWCH_QP_STATE_RTS: 962 962 switch (attrs->next_state) { 963 963 case IWCH_QP_STATE_CLOSING: 964 - BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 964 + BUG_ON(kref_read(&qhp->ep->com.kref) < 2); 965 965 qhp->attr.state = IWCH_QP_STATE_CLOSING; 966 966 if (!internal) { 967 967 abort=0;
+3 -3
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 654 654 655 655 #define c4iw_put_ep(ep) { \ 656 656 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ 657 - ep, atomic_read(&((ep)->kref.refcount))); \ 658 - WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \ 657 + ep, kref_read(&((ep)->kref))); \ 658 + WARN_ON(kref_read(&((ep)->kref)) < 1); \ 659 659 kref_put(&((ep)->kref), _c4iw_free_ep); \ 660 660 } 661 661 662 662 #define c4iw_get_ep(ep) { \ 663 663 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \ 664 - ep, atomic_read(&((ep)->kref.refcount))); \ 664 + ep, kref_read(&((ep)->kref))); \ 665 665 kref_get(&((ep)->kref)); \ 666 666 } 667 667 void _c4iw_free_ep(struct kref *kref);
+1 -1
drivers/infiniband/hw/cxgb4/qp.c
··· 1503 1503 case C4IW_QP_STATE_RTS: 1504 1504 switch (attrs->next_state) { 1505 1505 case C4IW_QP_STATE_CLOSING: 1506 - BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 1506 + BUG_ON(kref_read(&qhp->ep->com.kref) < 2); 1507 1507 t4_set_wq_in_error(&qhp->wq); 1508 1508 set_state(qhp, C4IW_QP_STATE_CLOSING); 1509 1509 ep = qhp->ep;
+3 -3
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
··· 80 80 left = PAGE_SIZE; 81 81 82 82 mutex_lock(&us_ibdev->usdev_lock); 83 - if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) { 83 + if (kref_read(&us_ibdev->vf_cnt) > 0) { 84 84 char *busname; 85 85 86 86 /* ··· 99 99 PCI_FUNC(us_ibdev->pdev->devfn), 100 100 netdev_name(us_ibdev->netdev), 101 101 us_ibdev->ufdev->mac, 102 - atomic_read(&us_ibdev->vf_cnt.refcount)); 102 + kref_read(&us_ibdev->vf_cnt)); 103 103 UPDATE_PTR_LEFT(n, ptr, left); 104 104 105 105 for (res_type = USNIC_VNIC_RES_TYPE_EOL; ··· 147 147 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); 148 148 149 149 return scnprintf(buf, PAGE_SIZE, "%u\n", 150 - atomic_read(&us_ibdev->vf_cnt.refcount)); 150 + kref_read(&us_ibdev->vf_cnt)); 151 151 } 152 152 153 153 static ssize_t
+2 -2
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 291 291 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], 292 292 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); 293 293 props->max_qp = qp_per_vf * 294 - atomic_read(&us_ibdev->vf_cnt.refcount); 294 + kref_read(&us_ibdev->vf_cnt); 295 295 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | 296 296 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 297 297 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * 298 - atomic_read(&us_ibdev->vf_cnt.refcount); 298 + kref_read(&us_ibdev->vf_cnt); 299 299 props->max_pd = USNIC_UIOM_MAX_PD_CNT; 300 300 props->max_mr = USNIC_UIOM_MAX_MR_CNT; 301 301 props->local_ca_ack_delay = 0;
+1 -1
drivers/misc/genwqe/card_dev.c
··· 1396 1396 * application which will decrease this reference from 1397 1397 * 1/unused to 0/illegal and not from 2/used 1/empty. 1398 1398 */ 1399 - rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); 1399 + rc = kref_read(&cd->cdev_genwqe.kobj.kref); 1400 1400 if (rc != 1) { 1401 1401 dev_err(&pci_dev->dev, 1402 1402 "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
+1 -1
drivers/misc/mei/debugfs.c
··· 67 67 me_cl->props.max_number_of_connections, 68 68 me_cl->props.max_msg_length, 69 69 me_cl->props.single_recv_buf, 70 - atomic_read(&me_cl->refcnt.refcount)); 70 + kref_read(&me_cl->refcnt)); 71 71 72 72 mei_me_cl_put(me_cl); 73 73 }
+1 -1
drivers/pci/hotplug/pnv_php.c
··· 155 155 pnv_php_detach_device_nodes(dn); 156 156 157 157 of_node_put(dn); 158 - refcount = atomic_read(&dn->kobj.kref.refcount); 158 + refcount = kref_read(&dn->kobj.kref); 159 159 if (refcount != 1) 160 160 pr_warn("Invalid refcount %d on <%s>\n", 161 161 refcount, of_node_full_name(dn));
+1 -1
drivers/pci/slot.c
··· 345 345 void pci_destroy_slot(struct pci_slot *slot) 346 346 { 347 347 dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n", 348 - slot->number, atomic_read(&slot->kobj.kref.refcount) - 1); 348 + slot->number, kref_read(&slot->kobj.kref) - 1); 349 349 350 350 mutex_lock(&pci_slot_mutex); 351 351 kobject_put(&slot->kobj);
+4 -4
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 74 74 &io_req->req_flags)) { 75 75 /* Handle internally generated ABTS timeout */ 76 76 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 77 - io_req->refcount.refcount.counter); 77 + kref_read(&io_req->refcount)); 78 78 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 79 79 &io_req->req_flags))) { 80 80 /* ··· 1141 1141 return SUCCESS; 1142 1142 } 1143 1143 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1144 - io_req->refcount.refcount.counter); 1144 + kref_read(&io_req->refcount)); 1145 1145 1146 1146 /* Hold IO request across abort processing */ 1147 1147 kref_get(&io_req->refcount); ··· 1299 1299 { 1300 1300 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1301 1301 "refcnt = %d, cmd_type = %d\n", 1302 - io_req->refcount.refcount.counter, io_req->cmd_type); 1302 + kref_read(&io_req->refcount), io_req->cmd_type); 1303 1303 bnx2fc_scsi_done(io_req, DID_ERROR); 1304 1304 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1305 1305 if (io_req->wait_for_comp) ··· 1318 1318 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1319 1319 "refcnt = %d, cmd_type = %d\n", 1320 1320 io_req->xid, 1321 - io_req->refcount.refcount.counter, io_req->cmd_type); 1321 + kref_read(&io_req->refcount), io_req->cmd_type); 1322 1322 1323 1323 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1324 1324 &io_req->req_flags)) {
+2 -2
drivers/scsi/cxgbi/libcxgbi.h
··· 301 301 { 302 302 log_debug(1 << CXGBI_DBG_SOCK, 303 303 "%s, put csk 0x%p, ref %u-1.\n", 304 - fn, csk, atomic_read(&csk->refcnt.refcount)); 304 + fn, csk, kref_read(&csk->refcnt)); 305 305 kref_put(&csk->refcnt, cxgbi_sock_free); 306 306 } 307 307 #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) ··· 310 310 { 311 311 log_debug(1 << CXGBI_DBG_SOCK, 312 312 "%s, get csk 0x%p, ref %u+1.\n", 313 - fn, csk, atomic_read(&csk->refcnt.refcount)); 313 + fn, csk, kref_read(&csk->refcnt)); 314 314 kref_get(&csk->refcnt); 315 315 } 316 316 #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
+1 -1
drivers/scsi/lpfc/lpfc_debugfs.c
··· 607 607 len += snprintf(buf+len, size-len, "usgmap:%x ", 608 608 ndlp->nlp_usg_map); 609 609 len += snprintf(buf+len, size-len, "refcnt:%x", 610 - atomic_read(&ndlp->kref.refcount)); 610 + kref_read(&ndlp->kref)); 611 611 len += snprintf(buf+len, size-len, "\n"); 612 612 } 613 613 spin_unlock_irq(shost->host_lock);
+1 -1
drivers/scsi/lpfc/lpfc_els.c
··· 3688 3688 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3689 3689 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", 3690 3690 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3691 - atomic_read(&ndlp->kref.refcount), 3691 + kref_read(&ndlp->kref), 3692 3692 ndlp->nlp_usg_map, ndlp); 3693 3693 if (NLP_CHK_NODE_ACT(ndlp)) { 3694 3694 lpfc_nlp_put(ndlp);
+20 -20
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 3440 3440 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3441 3441 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", 3442 3442 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3443 - atomic_read(&ndlp->kref.refcount), 3443 + kref_read(&ndlp->kref), 3444 3444 ndlp->nlp_usg_map, ndlp); 3445 3445 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 3446 3446 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; ··· 3861 3861 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3862 3862 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n", 3863 3863 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3864 - atomic_read(&ndlp->kref.refcount), 3864 + kref_read(&ndlp->kref), 3865 3865 ndlp->nlp_usg_map, ndlp); 3866 3866 3867 3867 if (vport->port_state < LPFC_VPORT_READY) { ··· 4238 4238 "0277 lpfc_enable_node: ndlp:x%p " 4239 4239 "usgmap:x%x refcnt:%d\n", 4240 4240 (void *)ndlp, ndlp->nlp_usg_map, 4241 - atomic_read(&ndlp->kref.refcount)); 4241 + kref_read(&ndlp->kref)); 4242 4242 return NULL; 4243 4243 } 4244 4244 /* The ndlp should not already be in active mode */ ··· 4248 4248 "0278 lpfc_enable_node: ndlp:x%p " 4249 4249 "usgmap:x%x refcnt:%d\n", 4250 4250 (void *)ndlp, ndlp->nlp_usg_map, 4251 - atomic_read(&ndlp->kref.refcount)); 4251 + kref_read(&ndlp->kref)); 4252 4252 return NULL; 4253 4253 } 4254 4254 ··· 4272 4272 "0008 rpi:%x DID:%x flg:%x refcnt:%d " 4273 4273 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, 4274 4274 ndlp->nlp_flag, 4275 - atomic_read(&ndlp->kref.refcount), 4275 + kref_read(&ndlp->kref), 4276 4276 ndlp->nlp_usg_map, ndlp); 4277 4277 } 4278 4278 ··· 4546 4546 (bf_get(lpfc_sli_intf_if_type, 4547 4547 &phba->sli4_hba.sli_intf) == 4548 4548 LPFC_SLI_INTF_IF_TYPE_2) && 4549 - (atomic_read(&ndlp->kref.refcount) > 0)) { 4549 + (kref_read(&ndlp->kref) > 0)) { 4550 4550 mbox->context1 = lpfc_nlp_get(ndlp); 4551 4551 mbox->mbox_cmpl = 4552 4552 lpfc_sli4_unreg_rpi_cmpl_clr; ··· 4695 4695 "0280 lpfc_cleanup_node: ndlp:x%p " 4696 4696 "usgmap:x%x refcnt:%d\n", 4697 4697 (void *)ndlp, ndlp->nlp_usg_map, 4698 - atomic_read(&ndlp->kref.refcount)); 4698 + kref_read(&ndlp->kref)); 4699 4699 lpfc_dequeue_node(vport, ndlp); 4700 4700 } else { 4701 4701 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4702 4702 "0281 lpfc_cleanup_node: ndlp:x%p " 4703 4703 "usgmap:x%x refcnt:%d\n", 4704 4704 (void *)ndlp, ndlp->nlp_usg_map, 4705 - atomic_read(&ndlp->kref.refcount)); 4705 + kref_read(&ndlp->kref)); 4706 4706 lpfc_disable_node(vport, ndlp); 4707 4707 } 4708 4708 ··· 4791 4791 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4792 4792 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n", 4793 4793 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4794 - atomic_read(&ndlp->kref.refcount), 4794 + kref_read(&ndlp->kref), 4795 4795 ndlp->nlp_usg_map, ndlp); 4796 4796 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 4797 4797 != NULL) { ··· 5557 5557 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 5558 5558 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n", 5559 5559 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5560 - atomic_read(&ndlp->kref.refcount), 5560 + kref_read(&ndlp->kref), 5561 5561 ndlp->nlp_usg_map, ndlp); 5562 5562 /* 5563 5563 * Start issuing Fabric-Device Management Interface (FDMI) command to ··· 5728 5728 "0007 rpi:%x DID:%x flg:%x refcnt:%d " 5729 5729 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, 5730 5730 ndlp->nlp_flag, 5731 - atomic_read(&ndlp->kref.refcount), 5731 + kref_read(&ndlp->kref), 5732 5732 ndlp->nlp_usg_map, ndlp); 5733 5733 5734 5734 ndlp->active_rrqs_xri_bitmap = ··· 5767 5767 "0279 lpfc_nlp_release: ndlp:x%p did %x " 5768 5768 "usgmap:x%x refcnt:%d rpi:%x\n", 5769 5769 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, 5770 - atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi); 5770 + kref_read(&ndlp->kref), ndlp->nlp_rpi); 5771 5771 5772 5772 /* remove ndlp from action. */ 5773 5773 lpfc_nlp_remove(ndlp->vport, ndlp); ··· 5804 5804 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5805 5805 "node get: did:x%x flg:x%x refcnt:x%x", 5806 5806 ndlp->nlp_DID, ndlp->nlp_flag, 5807 - atomic_read(&ndlp->kref.refcount)); 5807 + kref_read(&ndlp->kref)); 5808 5808 /* The check of ndlp usage to prevent incrementing the 5809 5809 * ndlp reference count that is in the process of being 5810 5810 * released. ··· 5817 5817 "0276 lpfc_nlp_get: ndlp:x%p " 5818 5818 "usgmap:x%x refcnt:%d\n", 5819 5819 (void *)ndlp, ndlp->nlp_usg_map, 5820 - atomic_read(&ndlp->kref.refcount)); 5820 + kref_read(&ndlp->kref)); 5821 5821 return NULL; 5822 5822 } else 5823 5823 kref_get(&ndlp->kref); ··· 5844 5844 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5845 5845 "node put: did:x%x flg:x%x refcnt:x%x", 5846 5846 ndlp->nlp_DID, ndlp->nlp_flag, 5847 - atomic_read(&ndlp->kref.refcount)); 5847 + kref_read(&ndlp->kref)); 5848 5848 phba = ndlp->phba; 5849 5849 spin_lock_irqsave(&phba->ndlp_lock, flags); 5850 5850 /* Check the ndlp memory free acknowledge flag to avoid the ··· 5857 5857 "0274 lpfc_nlp_put: ndlp:x%p " 5858 5858 "usgmap:x%x refcnt:%d\n", 5859 5859 (void *)ndlp, ndlp->nlp_usg_map, 5860 - atomic_read(&ndlp->kref.refcount)); 5860 + kref_read(&ndlp->kref)); 5861 5861 return 1; 5862 5862 } 5863 5863 /* Check the ndlp inactivate log flag to avoid the possible ··· 5870 5870 "0275 lpfc_nlp_put: ndlp:x%p " 5871 5871 "usgmap:x%x refcnt:%d\n", 5872 5872 (void *)ndlp, ndlp->nlp_usg_map, 5873 - atomic_read(&ndlp->kref.refcount)); 5873 + kref_read(&ndlp->kref)); 5874 5874 return 1; 5875 5875 } 5876 5876 /* For last put, mark the ndlp usage flags to make sure no ··· 5878 5878 * in between the process when the final kref_put has been 5879 5879 * invoked on this ndlp. 5880 5880 */ 5881 - if (atomic_read(&ndlp->kref.refcount) == 1) { 5881 + if (kref_read(&ndlp->kref) == 1) { 5882 5882 /* Indicate ndlp is put to inactive state. */ 5883 5883 NLP_SET_IACT_REQ(ndlp); 5884 5884 /* Acknowledge ndlp memory free has been seen. */ ··· 5906 5906 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5907 5907 "node not used: did:x%x flg:x%x refcnt:x%x", 5908 5908 ndlp->nlp_DID, ndlp->nlp_flag, 5909 - atomic_read(&ndlp->kref.refcount)); 5910 - if (atomic_read(&ndlp->kref.refcount) == 1) 5909 + kref_read(&ndlp->kref)); 5910 + if (kref_read(&ndlp->kref) == 1) 5911 5911 if (lpfc_nlp_put(ndlp)) 5912 5912 return 1; 5913 5913 return 0;
+1 -2
drivers/scsi/lpfc/lpfc_init.c
··· 2660 2660 "usgmap:x%x refcnt:%d\n", 2661 2661 ndlp->nlp_DID, (void *)ndlp, 2662 2662 ndlp->nlp_usg_map, 2663 - atomic_read( 2664 - &ndlp->kref.refcount)); 2663 + kref_read(&ndlp->kref)); 2665 2664 } 2666 2665 break; 2667 2666 }
+2 -2
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 371 371 */ 372 372 pr_debug("write_pending aborted cmd[%p] refcount %d " 373 373 "transport_state %x, t_state %x, se_cmd_flags %x\n", 374 - cmd,cmd->se_cmd.cmd_kref.refcount.counter, 374 + cmd, kref_read(&cmd->se_cmd.cmd_kref), 375 375 cmd->se_cmd.transport_state, 376 376 cmd->se_cmd.t_state, 377 377 cmd->se_cmd.se_cmd_flags); ··· 584 584 */ 585 585 pr_debug("queue_data_in aborted cmd[%p] refcount %d " 586 586 "transport_state %x, t_state %x, se_cmd_flags %x\n", 587 - cmd,cmd->se_cmd.cmd_kref.refcount.counter, 587 + cmd, kref_read(&cmd->se_cmd.cmd_kref), 588 588 cmd->se_cmd.transport_state, 589 589 cmd->se_cmd.t_state, 590 590 cmd->se_cmd.se_cmd_flags);
+1 -1
drivers/staging/android/ion/ion.c
··· 1300 1300 seq_printf(s, "%16s %16u %16zu %d %d\n", 1301 1301 buffer->task_comm, buffer->pid, 1302 1302 buffer->size, buffer->kmap_cnt, 1303 - atomic_read(&buffer->ref.refcount)); 1303 + kref_read(&buffer->ref)); 1304 1304 total_orphaned_size += buffer->size; 1305 1305 } 1306 1306 }
+1 -1
drivers/staging/comedi/comedi_buf.c
··· 188 188 { 189 189 struct comedi_buf_map *bm = s->async->buf_map; 190 190 191 - return bm && (atomic_read(&bm->refcount.refcount) > 1); 191 + return bm && (kref_read(&bm->refcount) > 1); 192 192 } 193 193 194 194 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
+5 -5
drivers/target/target_core_pr.c
··· 788 788 * __core_scsi3_add_registration() 789 789 */ 790 790 dest_lun = rcu_dereference_check(deve_tmp->se_lun, 791 - atomic_read(&deve_tmp->pr_kref.refcount) != 0); 791 + kref_read(&deve_tmp->pr_kref) != 0); 792 792 793 793 pr_reg_atp = __core_scsi3_do_alloc_registration(dev, 794 794 nacl_tmp, dest_lun, deve_tmp, ··· 1463 1463 * For nacl->dynamic_node_acl=1 1464 1464 */ 1465 1465 lun_acl = rcu_dereference_check(se_deve->se_lun_acl, 1466 - atomic_read(&se_deve->pr_kref.refcount) != 0); 1466 + kref_read(&se_deve->pr_kref) != 0); 1467 1467 if (!lun_acl) 1468 1468 return 0; 1469 1469 ··· 1478 1478 * For nacl->dynamic_node_acl=1 1479 1479 */ 1480 1480 lun_acl = rcu_dereference_check(se_deve->se_lun_acl, 1481 - atomic_read(&se_deve->pr_kref.refcount) != 0); 1481 + kref_read(&se_deve->pr_kref) != 0); 1482 1482 if (!lun_acl) { 1483 1483 kref_put(&se_deve->pr_kref, target_pr_kref_release); 1484 1484 return; ··· 1759 1759 * 2nd loop which will never fail. 1760 1760 */ 1761 1761 dest_lun = rcu_dereference_check(dest_se_deve->se_lun, 1762 - atomic_read(&dest_se_deve->pr_kref.refcount) != 0); 1762 + kref_read(&dest_se_deve->pr_kref) != 0); 1763 1763 1764 1764 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1765 1765 dest_node_acl, dest_lun, dest_se_deve, ··· 3466 3466 iport_ptr); 3467 3467 if (!dest_pr_reg) { 3468 3468 struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun, 3469 - atomic_read(&dest_se_deve->pr_kref.refcount) != 0); 3469 + kref_read(&dest_se_deve->pr_kref) != 0); 3470 3470 3471 3471 spin_unlock(&dev->dev_reservation_lock); 3472 3472 if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
+1 -1
drivers/target/tcm_fc/tfc_sess.c
··· 454 454 455 455 void ft_sess_put(struct ft_sess *sess) 456 456 { 457 - int sess_held = atomic_read(&sess->kref.refcount); 457 + int sess_held = kref_read(&sess->kref); 458 458 459 459 BUG_ON(!sess_held); 460 460 kref_put(&sess->kref, ft_sess_free);
+1 -1
drivers/usb/gadget/function/f_fs.c
··· 3687 3687 goto done; 3688 3688 3689 3689 if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent 3690 - || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) 3690 + || !kref_read(&opts->func_inst.group.cg_item.ci_kref)) 3691 3691 goto done; 3692 3692 3693 3693 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+1 -1
fs/exofs/sys.c
··· 122 122 list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) { 123 123 printk(KERN_INFO "%s: name %s ref %d\n", 124 124 __func__, kobject_name(k_name), 125 - (int)atomic_read(&k_name->kref.refcount)); 125 + (int)kref_read(&k_name->kref)); 126 126 } 127 127 #endif 128 128 }
+1 -1
fs/ocfs2/cluster/netdebug.c
··· 349 349 " func key: 0x%08x\n" 350 350 " func type: %u\n", 351 351 sc, 352 - atomic_read(&sc->sc_kref.refcount), 352 + kref_read(&sc->sc_kref), 353 353 &saddr, inet ? ntohs(sport) : 0, 354 354 &daddr, inet ? ntohs(dport) : 0, 355 355 sc->sc_node->nd_name,
+1 -1
fs/ocfs2/cluster/tcp.c
··· 97 97 typeof(sc) __sc = (sc); \ 98 98 mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \ 99 99 "pg_off %zu] " fmt, __sc, \ 100 - atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \ 100 + kref_read(&__sc->sc_kref), __sc->sc_sock, \ 101 101 __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \ 102 102 ##args); \ 103 103 } while (0)
+6 -6
fs/ocfs2/dlm/dlmdebug.c
··· 81 81 lock->ml.type, lock->ml.convert_type, lock->ml.node, 82 82 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 83 83 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 84 - atomic_read(&lock->lock_refs.refcount), 84 + kref_read(&lock->lock_refs), 85 85 (list_empty(&lock->ast_list) ? 'y' : 'n'), 86 86 (lock->ast_pending ? 'y' : 'n'), 87 87 (list_empty(&lock->bast_list) ? 'y' : 'n'), ··· 106 106 printk("lockres: %s, owner=%u, state=%u\n", 107 107 buf, res->owner, res->state); 108 108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n", 109 - res->last_used, atomic_read(&res->refs.refcount), 109 + res->last_used, kref_read(&res->refs), 110 110 list_empty(&res->purge) ? "no" : "yes"); 111 111 printk(" on dirty list: %s, on reco list: %s, " 112 112 "migrating pending: %s\n", ··· 298 298 mle_type, mle->master, mle->new_master, 299 299 !list_empty(&mle->hb_events), 300 300 !!mle->inuse, 301 - atomic_read(&mle->mle_refs.refcount)); 301 + kref_read(&mle->mle_refs)); 302 302 303 303 out += snprintf(buf + out, len - out, "Maybe="); 304 304 out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES, ··· 494 494 lock->ast_pending, lock->bast_pending, 495 495 lock->convert_pending, lock->lock_pending, 496 496 lock->cancel_pending, lock->unlock_pending, 497 - atomic_read(&lock->lock_refs.refcount)); 497 + kref_read(&lock->lock_refs)); 498 498 spin_unlock(&lock->spinlock); 499 499 500 500 return out; ··· 521 521 !list_empty(&res->recovering), 522 522 res->inflight_locks, res->migration_pending, 523 523 atomic_read(&res->asts_reserved), 524 - atomic_read(&res->refs.refcount)); 524 + kref_read(&res->refs)); 525 525 526 526 /* refmap */ 527 527 out += snprintf(buf + out, len - out, "RMAP:"); ··· 777 777 /* Purge Count: xxx Refs: xxx */ 778 778 out += snprintf(buf + out, len - out, 779 779 "Purge Count: %d Refs: %d\n", dlm->purge_count, 780 - atomic_read(&dlm->dlm_refs.refcount)); 780 + kref_read(&dlm->dlm_refs)); 781 781 782 782 /* Dead Node: xxx */ 783 783 out += snprintf(buf + out, len - out,
+1 -1
fs/ocfs2/dlm/dlmdomain.c
··· 2072 2072 INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); 2073 2073 2074 2074 mlog(0, "context init: refcount %u\n", 2075 - atomic_read(&dlm->dlm_refs.refcount)); 2075 + kref_read(&dlm->dlm_refs)); 2076 2076 2077 2077 leave: 2078 2078 if (ret < 0 && dlm) {
+4 -4
fs/ocfs2/dlm/dlmmaster.c
··· 233 233 234 234 assert_spin_locked(&dlm->spinlock); 235 235 assert_spin_locked(&dlm->master_lock); 236 - if (!atomic_read(&mle->mle_refs.refcount)) { 236 + if (!kref_read(&mle->mle_refs)) { 237 237 /* this may or may not crash, but who cares. 238 238 * it's a BUG. */ 239 239 mlog(ML_ERROR, "bad mle: %p\n", mle); ··· 1124 1124 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1125 1125 1126 1126 /* 1127 - if (atomic_read(&mle->mle_refs.refcount) < 2) 1127 + if (kref_read(&mle->mle_refs) < 2) 1128 1128 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 1129 - atomic_read(&mle->mle_refs.refcount), 1129 + kref_read(&mle->mle_refs), 1130 1130 res->lockname.len, res->lockname.name); 1131 1131 */ 1132 1132 atomic_set(&mle->woken, 0); ··· 1979 1979 * on this mle. */ 1980 1980 spin_lock(&dlm->master_lock); 1981 1981 1982 - rr = atomic_read(&mle->mle_refs.refcount); 1982 + rr = kref_read(&mle->mle_refs); 1983 1983 if (mle->inuse > 0) { 1984 1984 if (extra_ref && rr < 3) 1985 1985 err = 1;
+1 -1
fs/ocfs2/dlm/dlmunlock.c
··· 251 251 mlog(0, "lock %u:%llu should be gone now! refs=%d\n", 252 252 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 253 253 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 254 - atomic_read(&lock->lock_refs.refcount)-1); 254 + kref_read(&lock->lock_refs)-1); 255 255 dlm_lock_put(lock); 256 256 } 257 257 if (actions & DLM_UNLOCK_CALL_AST)
+1 -1
include/drm/drm_framebuffer.h
··· 247 247 */ 248 248 static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) 249 249 { 250 - return atomic_read(&fb->base.refcount.refcount); 250 + return kref_read(&fb->base.refcount); 251 251 } 252 252 253 253 /**
+2 -2
include/drm/ttm/ttm_bo_driver.h
··· 878 878 { 879 879 int ret; 880 880 881 - WARN_ON(!atomic_read(&bo->kref.refcount)); 881 + WARN_ON(!kref_read(&bo->kref)); 882 882 883 883 ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); 884 884 if (likely(ret == 0)) ··· 903 903 { 904 904 int ret = 0; 905 905 906 - WARN_ON(!atomic_read(&bo->kref.refcount)); 906 + WARN_ON(!kref_read(&bo->kref)); 907 907 908 908 if (interruptible) 909 909 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+5
include/linux/kref.h
··· 35 35 atomic_set(&kref->refcount, 1); 36 36 } 37 37 38 + static inline int kref_read(const struct kref *kref) 39 + { 40 + return atomic_read(&kref->refcount); 41 + } 42 + 38 43 /** 39 44 * kref_get - increment refcount for object. 40 45 * @kref: object.
+1 -1
include/linux/sunrpc/cache.h
··· 198 198 199 199 static inline void cache_put(struct cache_head *h, struct cache_detail *cd) 200 200 { 201 - if (atomic_read(&h->ref.refcount) <= 2 && 201 + if (kref_read(&h->ref) <= 2 && 202 202 h->expiry_time < cd->nextcheck) 203 203 cd->nextcheck = h->expiry_time; 204 204 kref_put(&h->ref, cd->cache_put);
+2 -2
include/net/bluetooth/hci_core.h
··· 987 987 static inline void hci_dev_put(struct hci_dev *d) 988 988 { 989 989 BT_DBG("%s orig refcnt %d", d->name, 990 - atomic_read(&d->dev.kobj.kref.refcount)); 990 + kref_read(&d->dev.kobj.kref)); 991 991 992 992 put_device(&d->dev); 993 993 } ··· 995 995 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) 996 996 { 997 997 BT_DBG("%s orig refcnt %d", d->name, 998 - atomic_read(&d->dev.kobj.kref.refcount)); 998 + kref_read(&d->dev.kobj.kref)); 999 999 1000 1000 get_device(&d->dev); 1001 1001 return d;
+1 -1
net/bluetooth/6lowpan.c
··· 920 920 BT_DBG("dev %p removing %speer %p", dev, 921 921 last ? "last " : "1 ", peer); 922 922 BT_DBG("chan %p orig refcnt %d", chan, 923 - atomic_read(&chan->kref.refcount)); 923 + kref_read(&chan->kref)); 924 924 925 925 l2cap_chan_put(chan); 926 926 break;
+2 -2
net/bluetooth/a2mp.c
··· 810 810 /* AMP Manager functions */ 811 811 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr) 812 812 { 813 - BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount)); 813 + BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); 814 814 815 815 kref_get(&mgr->kref); 816 816 ··· 833 833 834 834 int amp_mgr_put(struct amp_mgr *mgr) 835 835 { 836 - BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount)); 836 + BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); 837 837 838 838 return kref_put(&mgr->kref, &amp_mgr_destroy); 839 839 }
+2 -2
net/bluetooth/amp.c
··· 24 24 void amp_ctrl_get(struct amp_ctrl *ctrl) 25 25 { 26 26 BT_DBG("ctrl %p orig refcnt %d", ctrl, 27 - atomic_read(&ctrl->kref.refcount)); 27 + kref_read(&ctrl->kref)); 28 28 29 29 kref_get(&ctrl->kref); 30 30 } ··· 42 42 int amp_ctrl_put(struct amp_ctrl *ctrl) 43 43 { 44 44 BT_DBG("ctrl %p orig refcnt %d", ctrl, 45 - atomic_read(&ctrl->kref.refcount)); 45 + kref_read(&ctrl->kref)); 46 46 47 47 return kref_put(&ctrl->kref, &amp_ctrl_destroy); 48 48 }
+2 -2
net/bluetooth/l2cap_core.c
··· 481 481 482 482 void l2cap_chan_hold(struct l2cap_chan *c) 483 483 { 484 - BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount)); 484 + BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref)); 485 485 486 486 kref_get(&c->kref); 487 487 } 488 488 489 489 void l2cap_chan_put(struct l2cap_chan *c) 490 490 { 491 - BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount)); 491 + BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref)); 492 492 493 493 kref_put(&c->kref, l2cap_chan_destroy); 494 494 }
+2 -2
net/ceph/messenger.c
··· 3425 3425 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3426 3426 { 3427 3427 dout("%s %p (was %d)\n", __func__, msg, 3428 - atomic_read(&msg->kref.refcount)); 3428 + kref_read(&msg->kref)); 3429 3429 kref_get(&msg->kref); 3430 3430 return msg; 3431 3431 } ··· 3434 3434 void ceph_msg_put(struct ceph_msg *msg) 3435 3435 { 3436 3436 dout("%s %p (was %d)\n", __func__, msg, 3437 - atomic_read(&msg->kref.refcount)); 3437 + kref_read(&msg->kref)); 3438 3438 kref_put(&msg->kref, ceph_msg_release); 3439 3439 } 3440 3440 EXPORT_SYMBOL(ceph_msg_put);
+5 -5
net/ceph/osd_client.c
··· 438 438 void ceph_osdc_get_request(struct ceph_osd_request *req) 439 439 { 440 440 dout("%s %p (was %d)\n", __func__, req, 441 - atomic_read(&req->r_kref.refcount)); 441 + kref_read(&req->r_kref)); 442 442 kref_get(&req->r_kref); 443 443 } 444 444 EXPORT_SYMBOL(ceph_osdc_get_request); ··· 447 447 { 448 448 if (req) { 449 449 dout("%s %p (was %d)\n", __func__, req, 450 - atomic_read(&req->r_kref.refcount)); 450 + kref_read(&req->r_kref)); 451 451 kref_put(&req->r_kref, ceph_osdc_release_request); 452 452 } 453 453 } ··· 487 487 struct ceph_msg *reply_msg = req->r_reply; 488 488 489 489 dout("%s req %p\n", __func__, req); 490 - WARN_ON(atomic_read(&req->r_kref.refcount) != 1); 490 + WARN_ON(kref_read(&req->r_kref) != 1); 491 491 request_release_checks(req); 492 492 493 - WARN_ON(atomic_read(&request_msg->kref.refcount) != 1); 494 - WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1); 493 + WARN_ON(kref_read(&request_msg->kref) != 1); 494 + WARN_ON(kref_read(&reply_msg->kref) != 1); 495 495 target_destroy(&req->r_t); 496 496 497 497 request_init(req);
+1 -1
net/sunrpc/cache.c
··· 1358 1358 ifdebug(CACHE) 1359 1359 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", 1360 1360 convert_to_wallclock(cp->expiry_time), 1361 - atomic_read(&cp->ref.refcount), cp->flags); 1361 + kref_read(&cp->ref), cp->flags); 1362 1362 cache_get(cp); 1363 1363 if (cache_check(cd, cp, NULL)) 1364 1364 /* cache_check does a cache_put on failure */
+3 -3
net/sunrpc/svc_xprt.c
··· 490 490 svc_xprt_get(xprt); 491 491 492 492 dprintk("svc: transport %p dequeued, inuse=%d\n", 493 - xprt, atomic_read(&xprt->xpt_ref.refcount)); 493 + xprt, kref_read(&xprt->xpt_ref)); 494 494 } 495 495 spin_unlock_bh(&pool->sp_lock); 496 496 out: ··· 820 820 /* XPT_DATA|XPT_DEFERRED case: */ 821 821 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 822 822 rqstp, rqstp->rq_pool->sp_id, xprt, 823 - atomic_read(&xprt->xpt_ref.refcount)); 823 + kref_read(&xprt->xpt_ref)); 824 824 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 825 825 if (rqstp->rq_deferred) 826 826 len = svc_deferred_recv(rqstp); ··· 978 978 * through, close it. */ 979 979 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 980 980 continue; 981 - if (atomic_read(&xprt->xpt_ref.refcount) > 1 || 981 + if (kref_read(&xprt->xpt_ref) > 1 || 982 982 test_bit(XPT_BUSY, &xprt->xpt_flags)) 983 983 continue; 984 984 list_del_init(le);
+2 -2
net/sunrpc/xprtrdma/svc_rdma_transport.c
··· 1201 1201 ib_drain_qp(rdma->sc_qp); 1202 1202 1203 1203 /* We should only be called from kref_put */ 1204 - if (atomic_read(&xprt->xpt_ref.refcount) != 0) 1204 + if (kref_read(&xprt->xpt_ref) != 0) 1205 1205 pr_err("svcrdma: sc_xprt still in use? (%d)\n", 1206 - atomic_read(&xprt->xpt_ref.refcount)); 1206 + kref_read(&xprt->xpt_ref)); 1207 1207 1208 1208 /* 1209 1209 * Destroy queued, but not processed read completions. Note