Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: use rwlock for rsb hash table

The conversion to rhashtable introduced a hash table lock per lockspace,
in place of per bucket locks. To make this more scalable, switch to
using a rwlock for hash table access. The common case fast path uses
it as a read lock.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
e9131359 b1f2381c

+206 -87
+2 -2
fs/dlm/debug_fs.c
··· 413 413 else 414 414 list = &ls->ls_keep; 415 415 416 - spin_lock_bh(&ls->ls_rsbtbl_lock); 416 + read_lock_bh(&ls->ls_rsbtbl_lock); 417 417 return seq_list_start(list, *pos); 418 418 } 419 419 ··· 434 434 { 435 435 struct dlm_ls *ls = seq->private; 436 436 437 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 437 + read_unlock_bh(&ls->ls_rsbtbl_lock); 438 438 } 439 439 440 440 static const struct seq_operations format1_seq_ops = {
+2 -2
fs/dlm/dir.c
··· 200 200 struct dlm_rsb *r; 201 201 int rv; 202 202 203 - spin_lock_bh(&ls->ls_rsbtbl_lock); 203 + read_lock_bh(&ls->ls_rsbtbl_lock); 204 204 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 205 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 205 + read_unlock_bh(&ls->ls_rsbtbl_lock); 206 206 if (!rv) 207 207 return r; 208 208
+1 -1
fs/dlm/dlm_internal.h
··· 585 585 spinlock_t ls_lkbidr_spin; 586 586 587 587 struct rhashtable ls_rsbtbl; 588 - spinlock_t ls_rsbtbl_lock; 588 + rwlock_t ls_rsbtbl_lock; 589 589 590 590 struct list_head ls_toss; 591 591 struct list_head ls_keep;
+194 -75
fs/dlm/lock.c
··· 342 342 343 343 /* TODO move this to lib/refcount.c */ 344 344 static __must_check bool 345 - dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) 345 + dlm_refcount_dec_and_write_lock_bh(refcount_t *r, rwlock_t *lock) 346 346 __cond_acquires(lock) 347 347 { 348 348 if (refcount_dec_not_one(r)) 349 349 return false; 350 350 351 - spin_lock_bh(lock); 351 + write_lock_bh(lock); 352 352 if (!refcount_dec_and_test(r)) { 353 - spin_unlock_bh(lock); 353 + write_unlock_bh(lock); 354 354 return false; 355 355 } 356 356 ··· 358 358 } 359 359 360 360 /* TODO move this to include/linux/kref.h */ 361 - static inline int dlm_kref_put_lock_bh(struct kref *kref, 362 - void (*release)(struct kref *kref), 363 - spinlock_t *lock) 361 + static inline int dlm_kref_put_write_lock_bh(struct kref *kref, 362 + void (*release)(struct kref *kref), 363 + rwlock_t *lock) 364 364 { 365 - if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { 365 + if (dlm_refcount_dec_and_write_lock_bh(&kref->refcount, lock)) { 366 366 release(kref); 367 367 return 1; 368 368 } ··· 378 378 struct dlm_ls *ls = r->res_ls; 379 379 int rv; 380 380 381 - rv = dlm_kref_put_lock_bh(&r->res_ref, toss_rsb, 382 - &ls->ls_rsbtbl_lock); 381 + rv = dlm_kref_put_write_lock_bh(&r->res_ref, toss_rsb, 382 + &ls->ls_rsbtbl_lock); 383 383 if (rv) 384 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 384 + write_unlock_bh(&ls->ls_rsbtbl_lock); 385 385 } 386 386 387 387 void dlm_put_rsb(struct dlm_rsb *r) ··· 603 603 * a caching handling and the other holders might to put 604 604 * this rsb out of the toss state. 605 605 */ 606 - rv = spin_trylock(&ls->ls_rsbtbl_lock); 606 + rv = write_trylock(&ls->ls_rsbtbl_lock); 607 607 if (!rv) { 608 608 spin_unlock(&ls->ls_toss_q_lock); 609 609 /* rearm again try timer */ ··· 618 618 /* not necessary to held the ls_rsbtbl_lock when 619 619 * calling send_remove() 620 620 */ 621 - spin_unlock(&ls->ls_rsbtbl_lock); 621 + write_unlock(&ls->ls_rsbtbl_lock); 622 622 623 623 /* remove the rsb out of the toss queue its gone 624 624 * drom DLM now ··· 702 702 703 703 static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash) 704 704 { 705 - int rv; 706 - 707 - rv = rhashtable_insert_fast(rhash, &rsb->res_node, 708 - dlm_rhash_rsb_params); 709 - if (rv == -EEXIST) { 710 - log_print("%s match", __func__); 711 - dlm_dump_rsb(rsb); 712 - } 713 - 714 - return rv; 705 + return rhashtable_insert_fast(rhash, &rsb->res_node, 706 + dlm_rhash_rsb_params); 715 707 } 716 708 717 709 /* ··· 798 806 goto out; 799 807 } 800 808 801 - spin_lock_bh(&ls->ls_rsbtbl_lock); 809 + retry_lookup: 802 810 811 + /* check if the rsb is in keep state under read lock - likely path */ 812 + read_lock_bh(&ls->ls_rsbtbl_lock); 803 813 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 804 - if (error) 814 + if (error) { 815 + read_unlock_bh(&ls->ls_rsbtbl_lock); 805 816 goto do_new; 817 + } 806 818 807 819 /* 808 820 * rsb is active, so we can't check master_nodeid without lock_rsb. 809 821 */ 810 822 811 - if (rsb_flag(r, RSB_TOSS)) 823 + if (rsb_flag(r, RSB_TOSS)) { 824 + read_unlock_bh(&ls->ls_rsbtbl_lock); 812 825 goto do_toss; 826 + } 813 827 814 828 kref_get(&r->res_ref); 815 - goto out_unlock; 829 + read_unlock_bh(&ls->ls_rsbtbl_lock); 830 + goto out; 816 831 817 832 818 833 do_toss: 834 + write_lock_bh(&ls->ls_rsbtbl_lock); 835 + 836 + /* retry lookup under write lock to see if its still in toss state 837 + * if not it's in keep state and we relookup - unlikely path. 838 + */ 839 + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 840 + if (!error) { 841 + if (!rsb_flag(r, RSB_TOSS)) { 842 + write_unlock_bh(&ls->ls_rsbtbl_lock); 843 + goto retry_lookup; 844 + } 845 + } else { 846 + write_unlock_bh(&ls->ls_rsbtbl_lock); 847 + goto do_new; 848 + } 849 + 819 850 /* 820 851 * rsb found inactive (master_nodeid may be out of date unless 821 852 * we are the dir_nodeid or were the master) No other thread ··· 852 837 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s", 853 838 from_nodeid, r->res_master_nodeid, dir_nodeid, 854 839 r->res_name); 840 + write_unlock_bh(&ls->ls_rsbtbl_lock); 855 841 error = -ENOTBLK; 856 - goto out_unlock; 842 + goto out; 857 843 } 858 844 859 845 if ((r->res_master_nodeid != our_nodeid) && from_dir) { ··· 884 868 */ 885 869 kref_init(&r->res_ref); 886 870 rsb_delete_toss_timer(ls, r); 887 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 871 + write_unlock_bh(&ls->ls_rsbtbl_lock); 888 872 889 - goto out_unlock; 873 + goto out; 890 874 891 875 892 876 do_new: ··· 895 879 */ 896 880 897 881 if (error == -EBADR && !create) 898 - goto out_unlock; 882 + goto out; 899 883 900 884 error = get_rsb_struct(ls, name, len, &r); 901 - if (error == -EAGAIN) { 902 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 885 + if (error == -EAGAIN) 903 886 goto retry; 904 - } 905 887 if (error) 906 - goto out_unlock; 888 + goto out; 907 889 908 890 r->res_hash = hash; 909 891 r->res_dir_nodeid = dir_nodeid; ··· 923 909 dlm_free_rsb(r); 924 910 r = NULL; 925 911 error = -ENOTBLK; 926 - goto out_unlock; 912 + goto out; 927 913 } 928 914 929 915 if (from_other) { ··· 943 929 } 944 930 945 931 out_add: 932 + 933 + write_lock_bh(&ls->ls_rsbtbl_lock); 946 934 error = rsb_insert(r, &ls->ls_rsbtbl); 947 - if (!error) 935 + if (error == -EEXIST) { 936 + /* somebody else was faster and it seems the 937 + * rsb exists now, we do a whole relookup 938 + */ 939 + write_unlock_bh(&ls->ls_rsbtbl_lock); 940 + dlm_free_rsb(r); 941 + goto retry_lookup; 942 + } else if (!error) { 948 943 list_add(&r->res_rsbs_list, &ls->ls_keep); 949 - out_unlock: 950 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 944 + } 945 + write_unlock_bh(&ls->ls_rsbtbl_lock); 951 946 out: 952 947 *r_ret = r; 953 948 return error; ··· 980 957 if (error < 0) 981 958 goto out; 982 959 983 - spin_lock_bh(&ls->ls_rsbtbl_lock); 960 + retry_lookup: 984 961 962 + /* check if the rsb is in keep state under read lock - likely path */ 963 + read_lock_bh(&ls->ls_rsbtbl_lock); 985 964 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 986 - if (error) 965 + if (error) { 966 + read_unlock_bh(&ls->ls_rsbtbl_lock); 987 967 goto do_new; 968 + } 988 969 989 - if (rsb_flag(r, RSB_TOSS)) 970 + if (rsb_flag(r, RSB_TOSS)) { 971 + read_unlock_bh(&ls->ls_rsbtbl_lock); 990 972 goto do_toss; 973 + } 991 974 992 975 /* 993 976 * rsb is active, so we can't check master_nodeid without lock_rsb. 994 977 */ 995 978 996 979 kref_get(&r->res_ref); 997 - goto out_unlock; 980 + read_unlock_bh(&ls->ls_rsbtbl_lock); 981 + 982 + goto out; 998 983 999 984 1000 985 do_toss: 986 + write_lock_bh(&ls->ls_rsbtbl_lock); 987 + 988 + /* retry lookup under write lock to see if its still in toss state 989 + * if not it's in keep state and we relookup - unlikely path. 990 + */ 991 + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 992 + if (!error) { 993 + if (!rsb_flag(r, RSB_TOSS)) { 994 + write_unlock_bh(&ls->ls_rsbtbl_lock); 995 + goto retry_lookup; 996 + } 997 + } else { 998 + write_unlock_bh(&ls->ls_rsbtbl_lock); 999 + goto do_new; 1000 + } 1001 + 1002 + 1001 1003 /* 1002 1004 * rsb found inactive. No other thread is using this rsb because 1003 1005 * it's on the toss list, so we can look at or update ··· 1035 987 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d", 1036 988 from_nodeid, r->res_master_nodeid, dir_nodeid); 1037 989 dlm_print_rsb(r); 990 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1038 991 error = -ENOTBLK; 1039 - goto out_unlock; 992 + goto out; 1040 993 } 1041 994 1042 995 if (!recover && (r->res_master_nodeid != our_nodeid) && ··· 1059 1010 */ 1060 1011 kref_init(&r->res_ref); 1061 1012 rsb_delete_toss_timer(ls, r); 1062 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1013 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1063 1014 1064 - goto out_unlock; 1015 + goto out; 1065 1016 1066 1017 1067 1018 do_new: ··· 1071 1022 1072 1023 error = get_rsb_struct(ls, name, len, &r); 1073 1024 if (error == -EAGAIN) { 1074 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1075 1025 goto retry; 1076 1026 } 1077 1027 if (error) 1078 - goto out_unlock; 1028 + goto out; 1079 1029 1080 1030 r->res_hash = hash; 1081 1031 r->res_dir_nodeid = dir_nodeid; ··· 1082 1034 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; 1083 1035 kref_init(&r->res_ref); 1084 1036 1037 + write_lock_bh(&ls->ls_rsbtbl_lock); 1085 1038 error = rsb_insert(r, &ls->ls_rsbtbl); 1086 - if (!error) 1039 + if (error == -EEXIST) { 1040 + /* somebody else was faster and it seems the 1041 + * rsb exists now, we do a whole relookup 1042 + */ 1043 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1044 + dlm_free_rsb(r); 1045 + goto retry_lookup; 1046 + } else if (!error) { 1087 1047 list_add(&r->res_rsbs_list, &ls->ls_keep); 1088 - out_unlock: 1089 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1048 + } 1049 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1050 + 1090 1051 out: 1091 1052 *r_ret = r; 1092 1053 return error; ··· 1308 1251 if (error < 0) 1309 1252 return error; 1310 1253 1311 - spin_lock_bh(&ls->ls_rsbtbl_lock); 1254 + retry_lookup: 1255 + 1256 + /* check if the rsb is in keep state under read lock - likely path */ 1257 + read_lock_bh(&ls->ls_rsbtbl_lock); 1312 1258 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 1313 1259 if (!error) { 1314 - if (rsb_flag(r, RSB_TOSS)) 1260 + if (rsb_flag(r, RSB_TOSS)) { 1261 + read_unlock_bh(&ls->ls_rsbtbl_lock); 1315 1262 goto do_toss; 1263 + } 1316 1264 1317 1265 /* because the rsb is active, we need to lock_rsb before 1318 1266 * checking/changing re_master_nodeid 1319 1267 */ 1320 1268 1321 1269 hold_rsb(r); 1322 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1270 + read_unlock_bh(&ls->ls_rsbtbl_lock); 1323 1271 lock_rsb(r); 1324 1272 1325 1273 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, ··· 1336 1274 1337 1275 return 0; 1338 1276 } else { 1277 + read_unlock_bh(&ls->ls_rsbtbl_lock); 1339 1278 goto not_found; 1340 1279 } 1341 1280 1342 1281 do_toss: 1282 + /* unlikely path - relookup under write */ 1283 + write_lock_bh(&ls->ls_rsbtbl_lock); 1284 + 1285 + /* rsb_mod_timer() requires to held ls_rsbtbl_lock in write lock 1286 + * check if the rsb is still in toss state, if not relookup 1287 + */ 1288 + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 1289 + if (!error) { 1290 + if (!rsb_flag(r, RSB_TOSS)) { 1291 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1292 + /* something as changed, very unlikely but 1293 + * try again 1294 + */ 1295 + goto retry_lookup; 1296 + } 1297 + } else { 1298 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1299 + goto not_found; 1300 + } 1301 + 1343 1302 /* because the rsb is inactive (on toss list), it's not refcounted 1344 1303 * and lock_rsb is not used, but is protected by the rsbtbl lock 1345 1304 */ ··· 1370 1287 1371 1288 rsb_mod_timer(ls, r); 1372 1289 /* the rsb was inactive (on toss list) */ 1373 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1290 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1374 1291 1375 1292 return 0; 1376 1293 1377 1294 not_found: 1378 1295 error = get_rsb_struct(ls, name, len, &r); 1379 - if (error == -EAGAIN) { 1380 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1296 + if (error == -EAGAIN) 1381 1297 goto retry; 1382 - } 1383 1298 if (error) 1384 - goto out_unlock; 1299 + goto out; 1385 1300 1386 1301 r->res_hash = hash; 1387 1302 r->res_dir_nodeid = our_nodeid; ··· 1388 1307 kref_init(&r->res_ref); 1389 1308 rsb_set_flag(r, RSB_TOSS); 1390 1309 1310 + write_lock_bh(&ls->ls_rsbtbl_lock); 1391 1311 error = rsb_insert(r, &ls->ls_rsbtbl); 1392 - if (error) { 1312 + if (error == -EEXIST) { 1313 + /* somebody else was faster and it seems the 1314 + * rsb exists now, we do a whole relookup 1315 + */ 1316 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1317 + dlm_free_rsb(r); 1318 + goto retry_lookup; 1319 + } else if (error) { 1320 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1393 1321 /* should never happen */ 1394 1322 dlm_free_rsb(r); 1395 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1396 1323 goto retry; 1397 1324 } 1398 1325 1399 1326 list_add(&r->res_rsbs_list, &ls->ls_toss); 1400 1327 rsb_mod_timer(ls, r); 1328 + write_unlock_bh(&ls->ls_rsbtbl_lock); 1401 1329 1402 1330 if (result) 1403 1331 *result = DLM_LU_ADD; 1404 1332 *r_nodeid = from_nodeid; 1405 - out_unlock: 1406 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1333 + out: 1407 1334 return error; 1408 1335 } 1409 1336 ··· 1419 1330 { 1420 1331 struct dlm_rsb *r; 1421 1332 1422 - spin_lock_bh(&ls->ls_rsbtbl_lock); 1333 + read_lock_bh(&ls->ls_rsbtbl_lock); 1423 1334 list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { 1424 1335 if (r->res_hash == hash) 1425 1336 dlm_dump_rsb(r); 1426 1337 } 1427 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1338 + read_unlock_bh(&ls->ls_rsbtbl_lock); 1428 1339 } 1429 1340 1430 1341 void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) ··· 1432 1343 struct dlm_rsb *r = NULL; 1433 1344 int error; 1434 1345 1435 - spin_lock_bh(&ls->ls_rsbtbl_lock); 1346 + read_lock_bh(&ls->ls_rsbtbl_lock); 1436 1347 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 1437 1348 if (!error) 1438 1349 goto out; 1439 1350 1440 1351 dlm_dump_rsb(r); 1441 1352 out: 1442 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 1353 + read_unlock_bh(&ls->ls_rsbtbl_lock); 1443 1354 } 1444 1355 1445 1356 static void toss_rsb(struct kref *kref) ··· 1565 1476 can release the write_lock before the detach_lkb */ 1566 1477 1567 1478 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); 1479 + } 1480 + 1481 + /* TODO move this to lib/refcount.c */ 1482 + static __must_check bool 1483 + dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) 1484 + __cond_acquires(lock) 1485 + { 1486 + if (refcount_dec_not_one(r)) 1487 + return false; 1488 + 1489 + spin_lock_bh(lock); 1490 + if (!refcount_dec_and_test(r)) { 1491 + spin_unlock_bh(lock); 1492 + return false; 1493 + } 1494 + 1495 + return true; 1496 + } 1497 + 1498 + /* TODO move this to include/linux/kref.h */ 1499 + static inline int dlm_kref_put_lock_bh(struct kref *kref, 1500 + void (*release)(struct kref *kref), 1501 + spinlock_t *lock) 1502 + { 1503 + if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { 1504 + release(kref); 1505 + return 1; 1506 + } 1507 + 1508 + return 0; 1568 1509 } 1569 1510 1570 1511 /* __put_lkb() is used when an lkb may not have an rsb attached to ··· 4366 4247 memset(name, 0, sizeof(name)); 4367 4248 memcpy(name, ms->m_extra, len); 4368 4249 4369 - spin_lock_bh(&ls->ls_rsbtbl_lock); 4250 + write_lock_bh(&ls->ls_rsbtbl_lock); 4370 4251 4371 4252 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); 4372 4253 if (rv) { 4373 4254 /* should not happen */ 4374 4255 log_error(ls, "%s from %d not found %s", __func__, 4375 4256 from_nodeid, name); 4376 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 4257 + write_unlock_bh(&ls->ls_rsbtbl_lock); 4377 4258 return; 4378 4259 } 4379 4260 ··· 4383 4264 log_error(ls, "receive_remove keep from %d master %d", 4384 4265 from_nodeid, r->res_master_nodeid); 4385 4266 dlm_print_rsb(r); 4386 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 4267 + write_unlock_bh(&ls->ls_rsbtbl_lock); 4387 4268 return; 4388 4269 } 4389 4270 4390 4271 log_debug(ls, "receive_remove from %d master %d first %x %s", 4391 4272 from_nodeid, r->res_master_nodeid, r->res_first_lkid, 4392 4273 name); 4393 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 4274 + write_unlock_bh(&ls->ls_rsbtbl_lock); 4394 4275 return; 4395 4276 } 4396 4277 ··· 4398 4279 log_error(ls, "receive_remove toss from %d master %d", 4399 4280 from_nodeid, r->res_master_nodeid); 4400 4281 dlm_print_rsb(r); 4401 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 4282 + write_unlock_bh(&ls->ls_rsbtbl_lock); 4402 4283 return; 4403 4284 } 4404 4285 4405 4286 list_del(&r->res_rsbs_list); 4406 4287 rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, 4407 4288 dlm_rhash_rsb_params); 4408 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 4289 + write_unlock_bh(&ls->ls_rsbtbl_lock); 4409 4290 4410 4291 free_toss_rsb(r); 4411 4292 } ··· 5473 5354 { 5474 5355 struct dlm_rsb *r; 5475 5356 5476 - spin_lock_bh(&ls->ls_rsbtbl_lock); 5357 + read_lock_bh(&ls->ls_rsbtbl_lock); 5477 5358 list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { 5478 5359 if (!rsb_flag(r, RSB_RECOVER_GRANT)) 5479 5360 continue; ··· 5482 5363 continue; 5483 5364 } 5484 5365 hold_rsb(r); 5485 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 5366 + read_unlock_bh(&ls->ls_rsbtbl_lock); 5486 5367 return r; 5487 5368 } 5488 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 5369 + read_unlock_bh(&ls->ls_rsbtbl_lock); 5489 5370 return NULL; 5490 5371 } 5491 5372
+1 -1
fs/dlm/lockspace.c
··· 424 424 425 425 INIT_LIST_HEAD(&ls->ls_toss); 426 426 INIT_LIST_HEAD(&ls->ls_keep); 427 - spin_lock_init(&ls->ls_rsbtbl_lock); 427 + rwlock_init(&ls->ls_rsbtbl_lock); 428 428 429 429 error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params); 430 430 if (error)
+2 -2
fs/dlm/recover.c
··· 884 884 struct dlm_rsb *r, *safe; 885 885 unsigned int count = 0; 886 886 887 - spin_lock_bh(&ls->ls_rsbtbl_lock); 887 + write_lock_bh(&ls->ls_rsbtbl_lock); 888 888 list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) { 889 889 list_del(&r->res_rsbs_list); 890 890 rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, ··· 897 897 free_toss_rsb(r); 898 898 count++; 899 899 } 900 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 900 + write_unlock_bh(&ls->ls_rsbtbl_lock); 901 901 902 902 if (count) 903 903 log_rinfo(ls, "dlm_clear_toss %u done", count);
+4 -4
fs/dlm/recoverd.c
··· 32 32 goto out; 33 33 } 34 34 35 - spin_lock_bh(&ls->ls_rsbtbl_lock); 35 + read_lock_bh(&ls->ls_rsbtbl_lock); 36 36 list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { 37 37 if (r->res_nodeid) 38 38 continue; ··· 40 40 list_add(&r->res_masters_list, &ls->ls_masters_list); 41 41 dlm_hold_rsb(r); 42 42 } 43 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 43 + read_unlock_bh(&ls->ls_rsbtbl_lock); 44 44 out: 45 45 write_unlock_bh(&ls->ls_masters_lock); 46 46 return error; ··· 62 62 { 63 63 struct dlm_rsb *r; 64 64 65 - spin_lock_bh(&ls->ls_rsbtbl_lock); 65 + read_lock_bh(&ls->ls_rsbtbl_lock); 66 66 list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { 67 67 list_add(&r->res_root_list, root_list); 68 68 dlm_hold_rsb(r); 69 69 } 70 70 71 71 WARN_ON_ONCE(!list_empty(&ls->ls_toss)); 72 - spin_unlock_bh(&ls->ls_rsbtbl_lock); 72 + read_unlock_bh(&ls->ls_rsbtbl_lock); 73 73 } 74 74 75 75 static void dlm_release_root_list(struct list_head *root_list)