Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: move recover idr to xarray datastructure

According to kdoc idr is deprecated and xarrays should be used nowadays.
This patch is moving the recover idr implementation to xarray
datastructure.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
fa0b54f1 f455eb84

+40 -36
+3 -4
fs/dlm/dlm_internal.h
··· 36 36 #include <linux/miscdevice.h> 37 37 #include <linux/rhashtable.h> 38 38 #include <linux/mutex.h> 39 - #include <linux/idr.h> 40 39 #include <linux/xarray.h> 41 40 #include <linux/ratelimit.h> 42 41 #include <linux/uaccess.h> ··· 316 317 int res_nodeid; 317 318 int res_master_nodeid; 318 319 int res_dir_nodeid; 319 - int res_id; /* for ls_recover_idr */ 320 + unsigned long res_id; /* for ls_recover_xa */ 320 321 uint32_t res_lvbseq; 321 322 uint32_t res_hash; 322 323 unsigned long res_toss_time; ··· 648 649 struct list_head ls_recover_list; 649 650 spinlock_t ls_recover_list_lock; 650 651 int ls_recover_list_count; 651 - struct idr ls_recover_idr; 652 - spinlock_t ls_recover_idr_lock; 652 + struct xarray ls_recover_xa; 653 + spinlock_t ls_recover_xa_lock; 653 654 wait_queue_head_t ls_wait_general; 654 655 wait_queue_head_t ls_recover_lock_wait; 655 656 spinlock_t ls_clear_proc_locks;
+4 -4
fs/dlm/lockspace.c
··· 481 481 482 482 INIT_LIST_HEAD(&ls->ls_recover_list); 483 483 spin_lock_init(&ls->ls_recover_list_lock); 484 - idr_init(&ls->ls_recover_idr); 485 - spin_lock_init(&ls->ls_recover_idr_lock); 484 + xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH); 485 + spin_lock_init(&ls->ls_recover_xa_lock); 486 486 ls->ls_recover_list_count = 0; 487 487 init_waitqueue_head(&ls->ls_wait_general); 488 488 INIT_LIST_HEAD(&ls->ls_masters_list); ··· 570 570 spin_lock_bh(&lslist_lock); 571 571 list_del(&ls->ls_list); 572 572 spin_unlock_bh(&lslist_lock); 573 - idr_destroy(&ls->ls_recover_idr); 573 + xa_destroy(&ls->ls_recover_xa); 574 574 kfree(ls->ls_recover_buf); 575 575 out_lkbxa: 576 576 xa_destroy(&ls->ls_lkbxa); ··· 736 736 737 737 dlm_delete_debug_file(ls); 738 738 739 - idr_destroy(&ls->ls_recover_idr); 739 + xa_destroy(&ls->ls_recover_xa); 740 740 kfree(ls->ls_recover_buf); 741 741 742 742 /*
+33 -28
fs/dlm/recover.c
··· 293 293 spin_unlock_bh(&ls->ls_recover_list_lock); 294 294 } 295 295 296 - static int recover_idr_empty(struct dlm_ls *ls) 296 + static int recover_xa_empty(struct dlm_ls *ls) 297 297 { 298 298 int empty = 1; 299 299 300 - spin_lock_bh(&ls->ls_recover_idr_lock); 300 + spin_lock_bh(&ls->ls_recover_xa_lock); 301 301 if (ls->ls_recover_list_count) 302 302 empty = 0; 303 - spin_unlock_bh(&ls->ls_recover_idr_lock); 303 + spin_unlock_bh(&ls->ls_recover_xa_lock); 304 304 305 305 return empty; 306 306 } 307 307 308 - static int recover_idr_add(struct dlm_rsb *r) 308 + static int recover_xa_add(struct dlm_rsb *r) 309 309 { 310 310 struct dlm_ls *ls = r->res_ls; 311 + struct xa_limit limit = { 312 + .min = 1, 313 + .max = UINT_MAX, 314 + }; 315 + uint32_t id; 311 316 int rv; 312 317 313 - spin_lock_bh(&ls->ls_recover_idr_lock); 318 + spin_lock_bh(&ls->ls_recover_xa_lock); 314 319 if (r->res_id) { 315 320 rv = -1; 316 321 goto out_unlock; 317 322 } 318 - rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT); 323 + rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC); 319 324 if (rv < 0) 320 325 goto out_unlock; 321 326 322 - r->res_id = rv; 327 + r->res_id = id; 323 328 ls->ls_recover_list_count++; 324 329 dlm_hold_rsb(r); 325 330 rv = 0; 326 331 out_unlock: 327 - spin_unlock_bh(&ls->ls_recover_idr_lock); 332 + spin_unlock_bh(&ls->ls_recover_xa_lock); 328 333 return rv; 329 334 } 330 335 331 - static void recover_idr_del(struct dlm_rsb *r) 336 + static void recover_xa_del(struct dlm_rsb *r) 332 337 { 333 338 struct dlm_ls *ls = r->res_ls; 334 339 335 - spin_lock_bh(&ls->ls_recover_idr_lock); 336 - idr_remove(&ls->ls_recover_idr, r->res_id); 340 + spin_lock_bh(&ls->ls_recover_xa_lock); 341 + xa_erase_bh(&ls->ls_recover_xa, r->res_id); 337 342 r->res_id = 0; 338 343 ls->ls_recover_list_count--; 339 - spin_unlock_bh(&ls->ls_recover_idr_lock); 344 + spin_unlock_bh(&ls->ls_recover_xa_lock); 340 345 341 346 dlm_put_rsb(r); 342 347 } 343 348 344 - static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) 349 + static struct dlm_rsb *recover_xa_find(struct dlm_ls *ls, uint64_t id) 345 350 { 346 351 struct dlm_rsb *r; 347 352 348 - spin_lock_bh(&ls->ls_recover_idr_lock); 349 - r = idr_find(&ls->ls_recover_idr, (int)id); 350 - spin_unlock_bh(&ls->ls_recover_idr_lock); 353 + spin_lock_bh(&ls->ls_recover_xa_lock); 354 + r = xa_load(&ls->ls_recover_xa, (int)id); 355 + spin_unlock_bh(&ls->ls_recover_xa_lock); 351 356 return r; 352 357 } 353 358 354 - static void recover_idr_clear(struct dlm_ls *ls) 359 + static void recover_xa_clear(struct dlm_ls *ls) 355 360 { 356 361 struct dlm_rsb *r; 357 - int id; 362 + unsigned long id; 358 363 359 - spin_lock_bh(&ls->ls_recover_idr_lock); 364 + spin_lock_bh(&ls->ls_recover_xa_lock); 360 365 361 - idr_for_each_entry(&ls->ls_recover_idr, r, id) { 362 - idr_remove(&ls->ls_recover_idr, id); 366 + xa_for_each(&ls->ls_recover_xa, id, r) { 367 + xa_erase_bh(&ls->ls_recover_xa, id); 363 368 r->res_id = 0; 364 369 r->res_recover_locks_count = 0; 365 370 ls->ls_recover_list_count--; ··· 377 372 ls->ls_recover_list_count); 378 373 ls->ls_recover_list_count = 0; 379 374 } 380 - spin_unlock_bh(&ls->ls_recover_idr_lock); 375 + spin_unlock_bh(&ls->ls_recover_xa_lock); 381 376 } 382 377 383 378 ··· 475 470 set_new_master(r); 476 471 error = 0; 477 472 } else { 478 - recover_idr_add(r); 473 + recover_xa_add(r); 479 474 error = dlm_send_rcom_lookup(r, dir_nodeid, seq); 480 475 } 481 476 ··· 556 551 557 552 log_rinfo(ls, "dlm_recover_masters %u of %u", count, total); 558 553 559 - error = dlm_wait_function(ls, &recover_idr_empty); 554 + error = dlm_wait_function(ls, &recover_xa_empty); 560 555 out: 561 556 if (error) 562 - recover_idr_clear(ls); 557 + recover_xa_clear(ls); 563 558 return error; 564 559 } 565 560 ··· 568 563 struct dlm_rsb *r; 569 564 int ret_nodeid, new_master; 570 565 571 - r = recover_idr_find(ls, le64_to_cpu(rc->rc_id)); 566 + r = recover_xa_find(ls, le64_to_cpu(rc->rc_id)); 572 567 if (!r) { 573 568 log_error(ls, "dlm_recover_master_reply no id %llx", 574 569 (unsigned long long)le64_to_cpu(rc->rc_id)); ··· 587 582 r->res_nodeid = new_master; 588 583 set_new_master(r); 589 584 unlock_rsb(r); 590 - recover_idr_del(r); 585 + recover_xa_del(r); 591 586 592 - if (recover_idr_empty(ls)) 587 + if (recover_xa_empty(ls)) 593 588 wake_up(&ls->ls_wait_general); 594 589 out: 595 590 return 0;