Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: drop own rsb pre allocation mechanism

This patch drops the own written rsb pre allocation mechanism as this is
already done by using kmem caches, we don't need another layer on top of
that to running some pre allocation scheme.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
1ffefc19 4db41bf4

+13 -99
+1 -8
fs/dlm/dlm_internal.h
··· 322 322 unsigned long res_toss_time; 323 323 uint32_t res_first_lkid; 324 324 struct list_head res_lookup; /* lkbs waiting on first */ 325 - union { 326 - struct list_head res_hashchain; 327 - struct rhash_head res_node; /* rsbtbl */ 328 - }; 325 + struct rhash_head res_node; /* rsbtbl */ 329 326 struct list_head res_grantqueue; 330 327 struct list_head res_convertqueue; 331 328 struct list_head res_waitqueue; ··· 592 595 593 596 spinlock_t ls_orphans_lock; 594 597 struct list_head ls_orphans; 595 - 596 - spinlock_t ls_new_rsb_spin; 597 - int ls_new_rsb_count; 598 - struct list_head ls_new_rsb; /* new rsb structs */ 599 598 600 599 struct list_head ls_nodes; /* current nodes in ls */ 601 600 struct list_head ls_nodes_gone; /* dead node list, recovery */
+12 -80
fs/dlm/lock.c
··· 389 389 put_rsb(r); 390 390 } 391 391 392 - static int pre_rsb_struct(struct dlm_ls *ls) 393 - { 394 - struct dlm_rsb *r1, *r2; 395 - int count = 0; 396 - 397 - spin_lock_bh(&ls->ls_new_rsb_spin); 398 - if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { 399 - spin_unlock_bh(&ls->ls_new_rsb_spin); 400 - return 0; 401 - } 402 - spin_unlock_bh(&ls->ls_new_rsb_spin); 403 - 404 - r1 = dlm_allocate_rsb(ls); 405 - r2 = dlm_allocate_rsb(ls); 406 - 407 - spin_lock_bh(&ls->ls_new_rsb_spin); 408 - if (r1) { 409 - list_add(&r1->res_hashchain, &ls->ls_new_rsb); 410 - ls->ls_new_rsb_count++; 411 - } 412 - if (r2) { 413 - list_add(&r2->res_hashchain, &ls->ls_new_rsb); 414 - ls->ls_new_rsb_count++; 415 - } 416 - count = ls->ls_new_rsb_count; 417 - spin_unlock_bh(&ls->ls_new_rsb_spin); 418 - 419 - if (!count) 420 - return -ENOMEM; 421 - return 0; 422 - } 423 - 424 392 /* connected with timer_delete_sync() in dlm_ls_stop() to stop 425 393 * new timers when recovery is triggered and don't run them 426 394 * again until a dlm_timer_resume() tries it again. ··· 620 652 struct dlm_rsb **r_ret) 621 653 { 622 654 struct dlm_rsb *r; 623 - int count; 624 655 625 - spin_lock_bh(&ls->ls_new_rsb_spin); 626 - if (list_empty(&ls->ls_new_rsb)) { 627 - count = ls->ls_new_rsb_count; 628 - spin_unlock_bh(&ls->ls_new_rsb_spin); 629 - log_debug(ls, "find_rsb retry %d %d %s", 630 - count, dlm_config.ci_new_rsb_count, 631 - (const char *)name); 632 - return -EAGAIN; 633 - } 634 - 635 - r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); 636 - list_del(&r->res_hashchain); 637 - ls->ls_new_rsb_count--; 638 - spin_unlock_bh(&ls->ls_new_rsb_spin); 656 + r = dlm_allocate_rsb(ls); 657 + if (!r) 658 + return -ENOMEM; 639 659 640 660 r->res_ls = ls; 641 661 r->res_length = len; ··· 748 792 } 749 793 750 794 retry: 751 - if (create) { 752 - error = pre_rsb_struct(ls); 753 - if (error < 0) 754 - goto out; 755 - } 756 - 757 - retry_lookup: 758 795 759 796 /* check if the rsb is in keep state under read lock - likely path */ 760 797 read_lock_bh(&ls->ls_rsbtbl_lock); ··· 781 832 if (!error) { 782 833 if (!rsb_flag(r, RSB_TOSS)) { 783 834 write_unlock_bh(&ls->ls_rsbtbl_lock); 784 - goto retry_lookup; 835 + goto retry; 785 836 } 786 837 } else { 787 838 write_unlock_bh(&ls->ls_rsbtbl_lock); ··· 847 898 goto out; 848 899 849 900 error = get_rsb_struct(ls, name, len, &r); 850 - if (error == -EAGAIN) 851 - goto retry; 852 - if (error) 901 + if (WARN_ON_ONCE(error)) 853 902 goto out; 854 903 855 904 r->res_hash = hash; ··· 899 952 */ 900 953 write_unlock_bh(&ls->ls_rsbtbl_lock); 901 954 dlm_free_rsb(r); 902 - goto retry_lookup; 955 + goto retry; 903 956 } else if (!error) { 904 957 list_add(&r->res_rsbs_list, &ls->ls_keep); 905 958 } ··· 923 976 int error; 924 977 925 978 retry: 926 - error = pre_rsb_struct(ls); 927 - if (error < 0) 928 - goto out; 929 - 930 - retry_lookup: 931 979 932 980 /* check if the rsb is in keep state under read lock - likely path */ 933 981 read_lock_bh(&ls->ls_rsbtbl_lock); ··· 957 1015 if (!error) { 958 1016 if (!rsb_flag(r, RSB_TOSS)) { 959 1017 write_unlock_bh(&ls->ls_rsbtbl_lock); 960 - goto retry_lookup; 1018 + goto retry; 961 1019 } 962 1020 } else { 963 1021 write_unlock_bh(&ls->ls_rsbtbl_lock); ··· 1012 1070 */ 1013 1071 1014 1072 error = get_rsb_struct(ls, name, len, &r); 1015 - if (error == -EAGAIN) { 1016 - goto retry; 1017 - } 1018 - if (error) 1073 + if (WARN_ON_ONCE(error)) 1019 1074 goto out; 1020 1075 1021 1076 r->res_hash = hash; ··· 1029 1090 */ 1030 1091 write_unlock_bh(&ls->ls_rsbtbl_lock); 1031 1092 dlm_free_rsb(r); 1032 - goto retry_lookup; 1093 + goto retry; 1033 1094 } else if (!error) { 1034 1095 list_add(&r->res_rsbs_list, &ls->ls_keep); 1035 1096 } ··· 1243 1304 } 1244 1305 1245 1306 retry: 1246 - error = pre_rsb_struct(ls); 1247 - if (error < 0) 1248 - return error; 1249 - 1250 - retry_lookup: 1251 1307 1252 1308 /* check if the rsb is in keep state under read lock - likely path */ 1253 1309 read_lock_bh(&ls->ls_rsbtbl_lock); ··· 1288 1354 /* something as changed, very unlikely but 1289 1355 * try again 1290 1356 */ 1291 - goto retry_lookup; 1357 + goto retry; 1292 1358 } 1293 1359 } else { 1294 1360 write_unlock_bh(&ls->ls_rsbtbl_lock); ··· 1310 1376 1311 1377 not_found: 1312 1378 error = get_rsb_struct(ls, name, len, &r); 1313 - if (error == -EAGAIN) 1314 - goto retry; 1315 - if (error) 1379 + if (WARN_ON_ONCE(error)) 1316 1380 goto out; 1317 1381 1318 1382 r->res_hash = hash; ··· 1327 1395 */ 1328 1396 write_unlock_bh(&ls->ls_rsbtbl_lock); 1329 1397 dlm_free_rsb(r); 1330 - goto retry_lookup; 1398 + goto retry; 1331 1399 } else if (error) { 1332 1400 write_unlock_bh(&ls->ls_rsbtbl_lock); 1333 1401 /* should never happen */
-11
fs/dlm/lockspace.c
··· 428 428 INIT_LIST_HEAD(&ls->ls_orphans); 429 429 spin_lock_init(&ls->ls_orphans_lock); 430 430 431 - INIT_LIST_HEAD(&ls->ls_new_rsb); 432 - spin_lock_init(&ls->ls_new_rsb_spin); 433 - 434 431 INIT_LIST_HEAD(&ls->ls_nodes); 435 432 INIT_LIST_HEAD(&ls->ls_nodes_gone); 436 433 ls->ls_num_nodes = 0; ··· 685 688 686 689 static int release_lockspace(struct dlm_ls *ls, int force) 687 690 { 688 - struct dlm_rsb *rsb; 689 691 int busy, rv; 690 692 691 693 busy = lockspace_busy(ls, force); ··· 751 755 * Free all rsb's on rsbtbl 752 756 */ 753 757 rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL); 754 - 755 - while (!list_empty(&ls->ls_new_rsb)) { 756 - rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, 757 - res_hashchain); 758 - list_del(&rsb->res_hashchain); 759 - dlm_free_rsb(rsb); 760 - } 761 758 762 759 /* 763 760 * Free structures on any other lists