Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: move lkb xarray lookup out of lock

This patch moves the xarray lookup functionality for the lkb out of the
ls_lkbxa_lock read lock handling. We can do that as the xarray should be
possible to access lockless in case of reader like xa_load(). We confirm
under ls_lkbxa_lock that the lkb is still part of the data structure and
take a reference when its still part of ls_lkbxa to avoid being freed
after doing the lookup. To do a check if the lkb is still part of the
ls_lkbxa data structure we use a kref_read() as the last put will remove
it from the ls_lkbxa data structure and any reference taken means it is
still part of ls_lkbxa.

A similar approach was done with the DLM rsb rhashtable just with a flag
instead of the refcounter because the refcounter has a slightly
different meaning.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
c846f732 5be323b0

+23 -5
+1
fs/dlm/dlm_internal.h
··· 295 295 void *lkb_astparam; /* caller's ast arg */ 296 296 struct dlm_user_args *lkb_ua; 297 297 }; 298 + struct rcu_head rcu; 298 299 }; 299 300 300 301 /*
+14 -4
fs/dlm/lock.c
··· 1527 1527 { 1528 1528 struct dlm_lkb *lkb; 1529 1529 1530 - read_lock_bh(&ls->ls_lkbxa_lock); 1530 + rcu_read_lock(); 1531 1531 lkb = xa_load(&ls->ls_lkbxa, lkid); 1532 - if (lkb) 1533 - kref_get(&lkb->lkb_ref); 1534 - read_unlock_bh(&ls->ls_lkbxa_lock); 1532 + if (lkb) { 1533 + /* check if lkb is still part of lkbxa under lkbxa_lock as 1534 + * the lkb_ref is tight to the lkbxa data structure, see 1535 + * __put_lkb(). 1536 + */ 1537 + read_lock_bh(&ls->ls_lkbxa_lock); 1538 + if (kref_read(&lkb->lkb_ref)) 1539 + kref_get(&lkb->lkb_ref); 1540 + else 1541 + lkb = NULL; 1542 + read_unlock_bh(&ls->ls_lkbxa_lock); 1543 + } 1544 + rcu_read_unlock(); 1535 1545 1536 1546 *lkb_ret = lkb; 1537 1547 return lkb ? 0 : -ENOENT;
+8 -1
fs/dlm/memory.c
··· 115 115 return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC); 116 116 } 117 117 118 - void dlm_free_lkb(struct dlm_lkb *lkb) 118 + static void __free_lkb_rcu(struct rcu_head *rcu) 119 119 { 120 + struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu); 121 + 120 122 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { 121 123 struct dlm_user_args *ua; 122 124 ua = lkb->lkb_ua; ··· 129 127 } 130 128 131 129 kmem_cache_free(lkb_cache, lkb); 130 + } 131 + 132 + void dlm_free_lkb(struct dlm_lkb *lkb) 133 + { 134 + call_rcu(&lkb->rcu, __free_lkb_rcu); 132 135 } 133 136 134 137 struct dlm_mhandle *dlm_allocate_mhandle(void)