Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: move lkb idr to xarray datastructure

According to kernel doc idr is deprecated and xarrays should be used
nowadays. This patch is moving the lkb idr implementation to xarrays.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
f455eb84 1ffefc19

+49 -46
+3 -2
fs/dlm/dlm_internal.h
··· 37 37 #include <linux/rhashtable.h> 38 38 #include <linux/mutex.h> 39 39 #include <linux/idr.h> 40 + #include <linux/xarray.h> 40 41 #include <linux/ratelimit.h> 41 42 #include <linux/uaccess.h> 42 43 ··· 570 569 unsigned long ls_flags; /* LSFL_ */ 571 570 struct kobject ls_kobj; 572 571 573 - struct idr ls_lkbidr; 574 - rwlock_t ls_lkbidr_lock; 572 + struct xarray ls_lkbxa; 573 + rwlock_t ls_lkbxa_lock; 575 574 576 575 struct rhashtable ls_rsbtbl; 577 576 rwlock_t ls_rsbtbl_lock;
+16 -14
fs/dlm/lock.c
··· 1435 1435 } 1436 1436 1437 1437 static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, 1438 - int start, int end) 1438 + unsigned long start, unsigned long end) 1439 1439 { 1440 + struct xa_limit limit; 1440 1441 struct dlm_lkb *lkb; 1441 1442 int rv; 1443 + 1444 + limit.max = end; 1445 + limit.min = start; 1442 1446 1443 1447 lkb = dlm_allocate_lkb(ls); 1444 1448 if (!lkb) ··· 1457 1453 INIT_LIST_HEAD(&lkb->lkb_ownqueue); 1458 1454 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); 1459 1455 1460 - write_lock_bh(&ls->ls_lkbidr_lock); 1461 - rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); 1462 - if (rv >= 0) 1463 - lkb->lkb_id = rv; 1464 - write_unlock_bh(&ls->ls_lkbidr_lock); 1456 + write_lock_bh(&ls->ls_lkbxa_lock); 1457 + rv = xa_alloc(&ls->ls_lkbxa, &lkb->lkb_id, lkb, limit, GFP_ATOMIC); 1458 + write_unlock_bh(&ls->ls_lkbxa_lock); 1465 1459 1466 1460 if (rv < 0) { 1467 - log_error(ls, "create_lkb idr error %d", rv); 1461 + log_error(ls, "create_lkb xa error %d", rv); 1468 1462 dlm_free_lkb(lkb); 1469 1463 return rv; 1470 1464 } ··· 1473 1471 1474 1472 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) 1475 1473 { 1476 - return _create_lkb(ls, lkb_ret, 1, 0); 1474 + return _create_lkb(ls, lkb_ret, 1, ULONG_MAX); 1477 1475 } 1478 1476 1479 1477 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) 1480 1478 { 1481 1479 struct dlm_lkb *lkb; 1482 1480 1483 - read_lock_bh(&ls->ls_lkbidr_lock); 1484 - lkb = idr_find(&ls->ls_lkbidr, lkid); 1481 + read_lock_bh(&ls->ls_lkbxa_lock); 1482 + lkb = xa_load(&ls->ls_lkbxa, lkid); 1485 1483 if (lkb) 1486 1484 kref_get(&lkb->lkb_ref); 1487 - read_unlock_bh(&ls->ls_lkbidr_lock); 1485 + read_unlock_bh(&ls->ls_lkbxa_lock); 1488 1486 1489 1487 *lkb_ret = lkb; 1490 1488 return lkb ? 0 : -ENOENT; ··· 1509 1507 int rv; 1510 1508 1511 1509 rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb, 1512 - &ls->ls_lkbidr_lock); 1510 + &ls->ls_lkbxa_lock); 1513 1511 if (rv) { 1514 - idr_remove(&ls->ls_lkbidr, lkid); 1515 - write_unlock_bh(&ls->ls_lkbidr_lock); 1512 + xa_erase(&ls->ls_lkbxa, lkid); 1513 + write_unlock_bh(&ls->ls_lkbxa_lock); 1516 1514 1517 1515 detach_lkb(lkb); 1518 1516
+30 -30
fs/dlm/lockspace.c
··· 420 420 if (error) 421 421 goto out_lsfree; 422 422 423 - idr_init(&ls->ls_lkbidr); 424 - rwlock_init(&ls->ls_lkbidr_lock); 423 + xa_init_flags(&ls->ls_lkbxa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH); 424 + rwlock_init(&ls->ls_lkbxa_lock); 425 425 426 426 INIT_LIST_HEAD(&ls->ls_waiters); 427 427 spin_lock_init(&ls->ls_waiters_lock); ··· 471 471 ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); 472 472 if (!ls->ls_recover_buf) { 473 473 error = -ENOMEM; 474 - goto out_lkbidr; 474 + goto out_lkbxa; 475 475 } 476 476 477 477 ls->ls_slot = 0; ··· 572 572 spin_unlock_bh(&lslist_lock); 573 573 idr_destroy(&ls->ls_recover_idr); 574 574 kfree(ls->ls_recover_buf); 575 - out_lkbidr: 576 - idr_destroy(&ls->ls_lkbidr); 575 + out_lkbxa: 576 + xa_destroy(&ls->ls_lkbxa); 577 577 rhashtable_destroy(&ls->ls_rsbtbl); 578 578 out_lsfree: 579 579 if (do_unreg) ··· 633 633 ops_arg, ops_result, lockspace); 634 634 } 635 635 636 - static int lkb_idr_is_local(int id, void *p, void *data) 636 + static int lkb_idr_free(struct dlm_lkb *lkb) 637 637 { 638 - struct dlm_lkb *lkb = p; 639 - 640 - return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV; 641 - } 642 - 643 - static int lkb_idr_is_any(int id, void *p, void *data) 644 - { 645 - return 1; 646 - } 647 - 648 - static int lkb_idr_free(int id, void *p, void *data) 649 - { 650 - struct dlm_lkb *lkb = p; 651 - 652 638 if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) 653 639 dlm_free_lvb(lkb->lkb_lvbptr); 654 640 ··· 642 656 return 0; 643 657 } 644 658 645 - /* NOTE: We check the lkbidr here rather than the resource table. 659 + /* NOTE: We check the lkbxa here rather than the resource table. 646 660 This is because there may be LKBs queued as ASTs that have been unlinked 647 661 from their RSBs and are pending deletion once the AST has been delivered */ 648 662 649 663 static int lockspace_busy(struct dlm_ls *ls, int force) 650 664 { 651 - int rv; 665 + struct dlm_lkb *lkb; 666 + unsigned long id; 667 + int rv = 0; 652 668 653 - read_lock_bh(&ls->ls_lkbidr_lock); 669 + read_lock_bh(&ls->ls_lkbxa_lock); 654 670 if (force == 0) { 655 - rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); 671 + xa_for_each(&ls->ls_lkbxa, id, lkb) { 672 + rv = 1; 673 + break; 674 + } 656 675 } else if (force == 1) { 657 - rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls); 676 + xa_for_each(&ls->ls_lkbxa, id, lkb) { 677 + if (lkb->lkb_nodeid == 0 && 678 + lkb->lkb_grmode != DLM_LOCK_IV) { 679 + rv = 1; 680 + break; 681 + } 682 + } 658 683 } else { 659 684 rv = 0; 660 685 } 661 - read_unlock_bh(&ls->ls_lkbidr_lock); 686 + read_unlock_bh(&ls->ls_lkbxa_lock); 662 687 return rv; 663 688 } 664 689 ··· 682 685 683 686 static int release_lockspace(struct dlm_ls *ls, int force) 684 687 { 688 + struct dlm_lkb *lkb; 689 + unsigned long id; 685 690 int busy, rv; 686 691 687 692 busy = lockspace_busy(ls, force); ··· 740 741 kfree(ls->ls_recover_buf); 741 742 742 743 /* 743 - * Free all lkb's in idr 744 + * Free all lkb's in xa 744 745 */ 745 - 746 - idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); 747 - idr_destroy(&ls->ls_lkbidr); 746 + xa_for_each(&ls->ls_lkbxa, id, lkb) { 747 + lkb_idr_free(lkb); 748 + } 749 + xa_destroy(&ls->ls_lkbxa); 748 750 749 751 /* 750 752 * Free all rsb's on rsbtbl