Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: use spin_lock_bh for message processing

Use spin_lock_bh for all spinlocks involved in message processing,
in preparation for softirq message processing. DLM lock requests
from user space involve dlm processing in user context, in addition
to the standard kernel context, necessitating bh variants.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
578acf9a 308533b4

+287 -258
+6 -6
fs/dlm/ast.c
··· 142 142 cb->astparam = lkb->lkb_astparam; 143 143 INIT_WORK(&cb->work, dlm_callback_work); 144 144 145 - spin_lock(&ls->ls_cb_lock); 145 + spin_lock_bh(&ls->ls_cb_lock); 146 146 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) 147 147 list_add(&cb->list, &ls->ls_cb_delay); 148 148 else 149 149 queue_work(ls->ls_callback_wq, &cb->work); 150 - spin_unlock(&ls->ls_cb_lock); 150 + spin_unlock_bh(&ls->ls_cb_lock); 151 151 break; 152 152 case DLM_ENQUEUE_CALLBACK_SUCCESS: 153 153 break; ··· 179 179 void dlm_callback_suspend(struct dlm_ls *ls) 180 180 { 181 181 if (ls->ls_callback_wq) { 182 - spin_lock(&ls->ls_cb_lock); 182 + spin_lock_bh(&ls->ls_cb_lock); 183 183 set_bit(LSFL_CB_DELAY, &ls->ls_flags); 184 - spin_unlock(&ls->ls_cb_lock); 184 + spin_unlock_bh(&ls->ls_cb_lock); 185 185 186 186 flush_workqueue(ls->ls_callback_wq); 187 187 } ··· 199 199 return; 200 200 201 201 more: 202 - spin_lock(&ls->ls_cb_lock); 202 + spin_lock_bh(&ls->ls_cb_lock); 203 203 list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) { 204 204 list_del(&cb->list); 205 205 queue_work(ls->ls_callback_wq, &cb->work); ··· 210 210 empty = list_empty(&ls->ls_cb_delay); 211 211 if (empty) 212 212 clear_bit(LSFL_CB_DELAY, &ls->ls_flags); 213 - spin_unlock(&ls->ls_cb_lock); 213 + spin_unlock_bh(&ls->ls_cb_lock); 214 214 215 215 sum += count; 216 216 if (!empty) {
+14 -14
fs/dlm/debug_fs.c
··· 452 452 453 453 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; 454 454 455 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 455 + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); 456 456 if (!RB_EMPTY_ROOT(tree)) { 457 457 for (node = rb_first(tree); node; node = rb_next(node)) { 458 458 r = rb_entry(node, struct dlm_rsb, res_hashnode); ··· 460 460 dlm_hold_rsb(r); 461 461 ri->rsb = r; 462 462 ri->bucket = bucket; 463 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 463 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 464 464 return ri; 465 465 } 466 466 } 467 467 } 468 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 468 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 469 469 470 470 /* 471 471 * move to the first rsb in the next non-empty bucket ··· 484 484 } 485 485 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; 486 486 487 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 487 + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); 488 488 if (!RB_EMPTY_ROOT(tree)) { 489 489 node = rb_first(tree); 490 490 r = rb_entry(node, struct dlm_rsb, res_hashnode); 491 491 dlm_hold_rsb(r); 492 492 ri->rsb = r; 493 493 ri->bucket = bucket; 494 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 494 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 495 495 *pos = n; 496 496 return ri; 497 497 } 498 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 498 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 499 499 } 500 500 } 501 501 ··· 516 516 * move to the next rsb in the same bucket 517 517 */ 518 518 519 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 519 + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); 520 520 rp = ri->rsb; 521 521 next = rb_next(&rp->res_hashnode); 522 522 ··· 524 524 r = rb_entry(next, struct dlm_rsb, res_hashnode); 525 525 dlm_hold_rsb(r); 526 526 ri->rsb = r; 527 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 527 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 528 528 dlm_put_rsb(rp); 529 529 ++*pos; 530 530 return ri; 531 531 } 532 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 532 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 533 533 dlm_put_rsb(rp); 534 534 535 535 /* ··· 550 550 } 551 551 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; 552 552 553 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 553 + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); 554 554 if (!RB_EMPTY_ROOT(tree)) { 555 555 next = rb_first(tree); 556 556 r = rb_entry(next, struct dlm_rsb, res_hashnode); 557 557 dlm_hold_rsb(r); 558 558 ri->rsb = r; 559 559 ri->bucket = bucket; 560 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 560 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 561 561 *pos = n; 562 562 return ri; 563 563 } 564 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 564 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 565 565 } 566 566 } 567 567 ··· 743 743 goto out; 744 744 } 745 745 746 - spin_lock(&ls->ls_waiters_lock); 746 + spin_lock_bh(&ls->ls_waiters_lock); 747 747 memset(debug_buf, 0, sizeof(debug_buf)); 748 748 749 749 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { ··· 754 754 break; 755 755 pos += ret; 756 756 } 757 - spin_unlock(&ls->ls_waiters_lock); 757 + spin_unlock_bh(&ls->ls_waiters_lock); 758 758 dlm_unlock_recovery(ls); 759 759 760 760 rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
+12 -12
fs/dlm/dir.c
··· 204 204 hash = jhash(name, len, 0); 205 205 bucket = hash & (ls->ls_rsbtbl_size - 1); 206 206 207 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 207 + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); 208 208 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); 209 209 if (rv) 210 210 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, 211 211 name, len, &r); 212 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 212 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 213 213 214 214 if (!rv) 215 215 return r; ··· 245 245 { 246 246 struct dlm_dir_dump *dd, *safe; 247 247 248 - write_lock(&ls->ls_dir_dump_lock); 248 + write_lock_bh(&ls->ls_dir_dump_lock); 249 249 list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) { 250 250 if (dd->nodeid_init == nodeid) { 251 251 log_error(ls, "drop dump seq %llu", ··· 254 254 kfree(dd); 255 255 } 256 256 } 257 - write_unlock(&ls->ls_dir_dump_lock); 257 + write_unlock_bh(&ls->ls_dir_dump_lock); 258 258 } 259 259 260 260 static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid) 261 261 { 262 262 struct dlm_dir_dump *iter, *dd = NULL; 263 263 264 - read_lock(&ls->ls_dir_dump_lock); 264 + read_lock_bh(&ls->ls_dir_dump_lock); 265 265 list_for_each_entry(iter, &ls->ls_dir_dump_list, list) { 266 266 if (iter->nodeid_init == nodeid) { 267 267 dd = iter; 268 268 break; 269 269 } 270 270 } 271 - read_unlock(&ls->ls_dir_dump_lock); 271 + read_unlock_bh(&ls->ls_dir_dump_lock); 272 272 273 273 return dd; 274 274 } ··· 291 291 dd->seq_init = ls->ls_recover_seq; 292 292 dd->nodeid_init = nodeid; 293 293 294 - write_lock(&ls->ls_dir_dump_lock); 294 + write_lock_bh(&ls->ls_dir_dump_lock); 295 295 list_add(&dd->list, &ls->ls_dir_dump_list); 296 - write_unlock(&ls->ls_dir_dump_lock); 296 + write_unlock_bh(&ls->ls_dir_dump_lock); 297 297 298 298 return dd; 299 299 } ··· 311 311 struct dlm_dir_dump *dd; 312 312 __be16 be_namelen; 313 313 314 - read_lock(&ls->ls_masters_lock); 314 + read_lock_bh(&ls->ls_masters_lock); 315 315 316 316 if (inlen > 1) { 317 317 dd = lookup_dir_dump(ls, nodeid); ··· 397 397 log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages", 398 398 nodeid, dd->sent_res, dd->sent_msg); 399 399 400 - write_lock(&ls->ls_dir_dump_lock); 400 + write_lock_bh(&ls->ls_dir_dump_lock); 401 401 list_del_init(&dd->list); 402 - write_unlock(&ls->ls_dir_dump_lock); 402 + write_unlock_bh(&ls->ls_dir_dump_lock); 403 403 kfree(dd); 404 404 } 405 405 out: 406 - read_unlock(&ls->ls_masters_lock); 406 + read_unlock_bh(&ls->ls_masters_lock); 407 407 } 408 408
+118 -88
fs/dlm/lock.c
··· 333 333 hold_rsb(r); 334 334 } 335 335 336 + /* TODO move this to lib/refcount.c */ 337 + static __must_check bool 338 + dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) 339 + __cond_acquires(lock) 340 + { 341 + if (refcount_dec_not_one(r)) 342 + return false; 343 + 344 + spin_lock_bh(lock); 345 + if (!refcount_dec_and_test(r)) { 346 + spin_unlock_bh(lock); 347 + return false; 348 + } 349 + 350 + return true; 351 + } 352 + 353 + /* TODO move this to include/linux/kref.h */ 354 + static inline int dlm_kref_put_lock_bh(struct kref *kref, 355 + void (*release)(struct kref *kref), 356 + spinlock_t *lock) 357 + { 358 + if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { 359 + release(kref); 360 + return 1; 361 + } 362 + 363 + return 0; 364 + } 365 + 336 366 /* When all references to the rsb are gone it's transferred to 337 367 the tossed list for later disposal. */ 338 368 ··· 372 342 uint32_t bucket = r->res_bucket; 373 343 int rv; 374 344 375 - rv = kref_put_lock(&r->res_ref, toss_rsb, 376 - &ls->ls_rsbtbl[bucket].lock); 345 + rv = dlm_kref_put_lock_bh(&r->res_ref, toss_rsb, 346 + &ls->ls_rsbtbl[bucket].lock); 377 347 if (rv) 378 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 348 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 379 349 } 380 350 381 351 void dlm_put_rsb(struct dlm_rsb *r) ··· 388 358 struct dlm_rsb *r1, *r2; 389 359 int count = 0; 390 360 391 - spin_lock(&ls->ls_new_rsb_spin); 361 + spin_lock_bh(&ls->ls_new_rsb_spin); 392 362 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { 393 - spin_unlock(&ls->ls_new_rsb_spin); 363 + spin_unlock_bh(&ls->ls_new_rsb_spin); 394 364 return 0; 395 365 } 396 - spin_unlock(&ls->ls_new_rsb_spin); 366 + spin_unlock_bh(&ls->ls_new_rsb_spin); 397 367 398 368 r1 = dlm_allocate_rsb(ls); 399 369 r2 = dlm_allocate_rsb(ls); 400 370 401 - spin_lock(&ls->ls_new_rsb_spin); 371 + spin_lock_bh(&ls->ls_new_rsb_spin); 402 372 if (r1) { 403 373 list_add(&r1->res_hashchain, &ls->ls_new_rsb); 404 374 ls->ls_new_rsb_count++; ··· 408 378 ls->ls_new_rsb_count++; 409 379 } 410 380 count = ls->ls_new_rsb_count; 411 - spin_unlock(&ls->ls_new_rsb_spin); 381 + spin_unlock_bh(&ls->ls_new_rsb_spin); 412 382 413 383 if (!count) 414 384 return -ENOMEM; ··· 425 395 struct dlm_rsb *r; 426 396 int count; 427 397 428 - spin_lock(&ls->ls_new_rsb_spin); 398 + spin_lock_bh(&ls->ls_new_rsb_spin); 429 399 if (list_empty(&ls->ls_new_rsb)) { 430 400 count = ls->ls_new_rsb_count; 431 - spin_unlock(&ls->ls_new_rsb_spin); 401 + spin_unlock_bh(&ls->ls_new_rsb_spin); 432 402 log_debug(ls, "find_rsb retry %d %d %s", 433 403 count, dlm_config.ci_new_rsb_count, 434 404 (const char *)name); ··· 440 410 /* Convert the empty list_head to a NULL rb_node for tree usage: */ 441 411 memset(&r->res_hashnode, 0, sizeof(struct rb_node)); 442 412 ls->ls_new_rsb_count--; 443 - spin_unlock(&ls->ls_new_rsb_spin); 413 + spin_unlock_bh(&ls->ls_new_rsb_spin); 444 414 445 415 r->res_ls = ls; 446 416 r->res_length = len; ··· 615 585 goto out; 616 586 } 617 587 618 - spin_lock(&ls->ls_rsbtbl[b].lock); 588 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 619 589 620 590 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); 621 591 if (error) ··· 685 655 686 656 error = get_rsb_struct(ls, name, len, &r); 687 657 if (error == -EAGAIN) { 688 - spin_unlock(&ls->ls_rsbtbl[b].lock); 658 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 689 659 goto retry; 690 660 } 691 661 if (error) ··· 734 704 out_add: 735 705 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); 736 706 out_unlock: 737 - spin_unlock(&ls->ls_rsbtbl[b].lock); 707 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 738 708 out: 739 709 *r_ret = r; 740 710 return error; ··· 759 729 if (error < 0) 760 730 goto out; 761 731 762 - spin_lock(&ls->ls_rsbtbl[b].lock); 732 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 763 733 764 734 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); 765 735 if (error) ··· 817 787 818 788 error = get_rsb_struct(ls, name, len, &r); 819 789 if (error == -EAGAIN) { 820 - spin_unlock(&ls->ls_rsbtbl[b].lock); 790 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 821 791 goto retry; 822 792 } 823 793 if (error) ··· 832 802 833 803 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); 834 804 out_unlock: 835 - spin_unlock(&ls->ls_rsbtbl[b].lock); 805 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 836 806 out: 837 807 *r_ret = r; 838 808 return error; ··· 1049 1019 if (error < 0) 1050 1020 return error; 1051 1021 1052 - spin_lock(&ls->ls_rsbtbl[b].lock); 1022 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 1053 1023 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); 1054 1024 if (!error) { 1055 1025 /* because the rsb is active, we need to lock_rsb before ··· 1057 1027 */ 1058 1028 1059 1029 hold_rsb(r); 1060 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1030 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1061 1031 lock_rsb(r); 1062 1032 1063 1033 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, ··· 1083 1053 1084 1054 r->res_toss_time = jiffies; 1085 1055 /* the rsb was inactive (on toss list) */ 1086 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1056 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1087 1057 1088 1058 return 0; 1089 1059 1090 1060 not_found: 1091 1061 error = get_rsb_struct(ls, name, len, &r); 1092 1062 if (error == -EAGAIN) { 1093 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1063 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1094 1064 goto retry; 1095 1065 } 1096 1066 if (error) ··· 1108 1078 if (error) { 1109 1079 /* should never happen */ 1110 1080 dlm_free_rsb(r); 1111 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1081 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1112 1082 goto retry; 1113 1083 } 1114 1084 ··· 1116 1086 *result = DLM_LU_ADD; 1117 1087 *r_nodeid = from_nodeid; 1118 1088 out_unlock: 1119 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1089 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1120 1090 return error; 1121 1091 } 1122 1092 ··· 1127 1097 int i; 1128 1098 1129 1099 for (i = 0; i < ls->ls_rsbtbl_size; i++) { 1130 - spin_lock(&ls->ls_rsbtbl[i].lock); 1100 + spin_lock_bh(&ls->ls_rsbtbl[i].lock); 1131 1101 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { 1132 1102 r = rb_entry(n, struct dlm_rsb, res_hashnode); 1133 1103 if (r->res_hash == hash) 1134 1104 dlm_dump_rsb(r); 1135 1105 } 1136 - spin_unlock(&ls->ls_rsbtbl[i].lock); 1106 + spin_unlock_bh(&ls->ls_rsbtbl[i].lock); 1137 1107 } 1138 1108 } 1139 1109 ··· 1146 1116 hash = jhash(name, len, 0); 1147 1117 b = hash & (ls->ls_rsbtbl_size - 1); 1148 1118 1149 - spin_lock(&ls->ls_rsbtbl[b].lock); 1119 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 1150 1120 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); 1151 1121 if (!error) 1152 1122 goto out_dump; ··· 1157 1127 out_dump: 1158 1128 dlm_dump_rsb(r); 1159 1129 out: 1160 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1130 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1161 1131 } 1162 1132 1163 1133 static void toss_rsb(struct kref *kref) ··· 1238 1208 INIT_LIST_HEAD(&lkb->lkb_ownqueue); 1239 1209 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); 1240 1210 1241 - spin_lock(&ls->ls_lkbidr_spin); 1211 + spin_lock_bh(&ls->ls_lkbidr_spin); 1242 1212 rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); 1243 1213 if (rv >= 0) 1244 1214 lkb->lkb_id = rv; 1245 - spin_unlock(&ls->ls_lkbidr_spin); 1215 + spin_unlock_bh(&ls->ls_lkbidr_spin); 1246 1216 1247 1217 if (rv < 0) { 1248 1218 log_error(ls, "create_lkb idr error %d", rv); ··· 1263 1233 { 1264 1234 struct dlm_lkb *lkb; 1265 1235 1266 - spin_lock(&ls->ls_lkbidr_spin); 1236 + spin_lock_bh(&ls->ls_lkbidr_spin); 1267 1237 lkb = idr_find(&ls->ls_lkbidr, lkid); 1268 1238 if (lkb) 1269 1239 kref_get(&lkb->lkb_ref); 1270 - spin_unlock(&ls->ls_lkbidr_spin); 1240 + spin_unlock_bh(&ls->ls_lkbidr_spin); 1271 1241 1272 1242 *lkb_ret = lkb; 1273 1243 return lkb ? 0 : -ENOENT; ··· 1291 1261 uint32_t lkid = lkb->lkb_id; 1292 1262 int rv; 1293 1263 1294 - rv = kref_put_lock(&lkb->lkb_ref, kill_lkb, 1295 - &ls->ls_lkbidr_spin); 1264 + rv = dlm_kref_put_lock_bh(&lkb->lkb_ref, kill_lkb, 1265 + &ls->ls_lkbidr_spin); 1296 1266 if (rv) { 1297 1267 idr_remove(&ls->ls_lkbidr, lkid); 1298 - spin_unlock(&ls->ls_lkbidr_spin); 1268 + spin_unlock_bh(&ls->ls_lkbidr_spin); 1299 1269 1300 1270 detach_lkb(lkb); 1301 1271 ··· 1436 1406 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 1437 1407 int error = 0; 1438 1408 1439 - spin_lock(&ls->ls_waiters_lock); 1409 + spin_lock_bh(&ls->ls_waiters_lock); 1440 1410 1441 1411 if (is_overlap_unlock(lkb) || 1442 1412 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) { ··· 1479 1449 log_error(ls, "addwait error %x %d flags %x %d %d %s", 1480 1450 lkb->lkb_id, error, dlm_iflags_val(lkb), mstype, 1481 1451 lkb->lkb_wait_type, lkb->lkb_resource->res_name); 1482 - spin_unlock(&ls->ls_waiters_lock); 1452 + spin_unlock_bh(&ls->ls_waiters_lock); 1483 1453 return error; 1484 1454 } 1485 1455 ··· 1579 1549 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 1580 1550 int error; 1581 1551 1582 - spin_lock(&ls->ls_waiters_lock); 1552 + spin_lock_bh(&ls->ls_waiters_lock); 1583 1553 error = _remove_from_waiters(lkb, mstype, NULL); 1584 - spin_unlock(&ls->ls_waiters_lock); 1554 + spin_unlock_bh(&ls->ls_waiters_lock); 1585 1555 return error; 1586 1556 } 1587 1557 ··· 1599 1569 int error; 1600 1570 1601 1571 if (!local) 1602 - spin_lock(&ls->ls_waiters_lock); 1572 + spin_lock_bh(&ls->ls_waiters_lock); 1603 1573 else 1604 1574 WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || 1605 1575 !dlm_locking_stopped(ls)); 1606 1576 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); 1607 1577 if (!local) 1608 - spin_unlock(&ls->ls_waiters_lock); 1578 + spin_unlock_bh(&ls->ls_waiters_lock); 1609 1579 return error; 1610 1580 } 1611 1581 ··· 1621 1591 1622 1592 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); 1623 1593 1624 - spin_lock(&ls->ls_rsbtbl[b].lock); 1594 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 1625 1595 1626 1596 if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { 1627 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1597 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1628 1598 return; 1629 1599 } 1630 1600 ··· 1681 1651 set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); 1682 1652 else 1683 1653 clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); 1684 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1654 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1685 1655 1686 1656 /* 1687 1657 * While searching for rsb's to free, we found some that require ··· 1696 1666 name = ls->ls_remove_names[i]; 1697 1667 len = ls->ls_remove_lens[i]; 1698 1668 1699 - spin_lock(&ls->ls_rsbtbl[b].lock); 1669 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 1700 1670 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); 1701 1671 if (rv) { 1702 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1672 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1703 1673 log_debug(ls, "remove_name not toss %s", name); 1704 1674 continue; 1705 1675 } 1706 1676 1707 1677 if (r->res_master_nodeid != our_nodeid) { 1708 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1678 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1709 1679 log_debug(ls, "remove_name master %d dir %d our %d %s", 1710 1680 r->res_master_nodeid, r->res_dir_nodeid, 1711 1681 our_nodeid, name); ··· 1714 1684 1715 1685 if (r->res_dir_nodeid == our_nodeid) { 1716 1686 /* should never happen */ 1717 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1687 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1718 1688 log_error(ls, "remove_name dir %d master %d our %d %s", 1719 1689 r->res_dir_nodeid, r->res_master_nodeid, 1720 1690 our_nodeid, name); ··· 1723 1693 1724 1694 if (!time_after_eq(jiffies, r->res_toss_time + 1725 1695 dlm_config.ci_toss_secs * HZ)) { 1726 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1696 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1727 1697 log_debug(ls, "remove_name toss_time %lu now %lu %s", 1728 1698 r->res_toss_time, jiffies, name); 1729 1699 continue; 1730 1700 } 1731 1701 1732 1702 if (!kref_put(&r->res_ref, kill_rsb)) { 1733 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1703 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1734 1704 log_error(ls, "remove_name in use %s", name); 1735 1705 continue; 1736 1706 } 1737 1707 1738 1708 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); 1739 1709 send_remove(r); 1740 - spin_unlock(&ls->ls_rsbtbl[b].lock); 1710 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 1741 1711 1742 1712 dlm_free_rsb(r); 1743 1713 } ··· 4201 4171 hash = jhash(name, len, 0); 4202 4172 b = hash & (ls->ls_rsbtbl_size - 1); 4203 4173 4204 - spin_lock(&ls->ls_rsbtbl[b].lock); 4174 + spin_lock_bh(&ls->ls_rsbtbl[b].lock); 4205 4175 4206 4176 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); 4207 4177 if (rv) { ··· 4211 4181 /* should not happen */ 4212 4182 log_error(ls, "receive_remove from %d not found %s", 4213 4183 from_nodeid, name); 4214 - spin_unlock(&ls->ls_rsbtbl[b].lock); 4184 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 4215 4185 return; 4216 4186 } 4217 4187 if (r->res_master_nodeid != from_nodeid) { ··· 4219 4189 log_error(ls, "receive_remove keep from %d master %d", 4220 4190 from_nodeid, r->res_master_nodeid); 4221 4191 dlm_print_rsb(r); 4222 - spin_unlock(&ls->ls_rsbtbl[b].lock); 4192 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 4223 4193 return; 4224 4194 } 4225 4195 4226 4196 log_debug(ls, "receive_remove from %d master %d first %x %s", 4227 4197 from_nodeid, r->res_master_nodeid, r->res_first_lkid, 4228 4198 name); 4229 - spin_unlock(&ls->ls_rsbtbl[b].lock); 4199 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 4230 4200 return; 4231 4201 } 4232 4202 ··· 4234 4204 log_error(ls, "receive_remove toss from %d master %d", 4235 4205 from_nodeid, r->res_master_nodeid); 4236 4206 dlm_print_rsb(r); 4237 - spin_unlock(&ls->ls_rsbtbl[b].lock); 4207 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 4238 4208 return; 4239 4209 } 4240 4210 4241 4211 if (kref_put(&r->res_ref, kill_rsb)) { 4242 4212 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); 4243 - spin_unlock(&ls->ls_rsbtbl[b].lock); 4213 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 4244 4214 dlm_free_rsb(r); 4245 4215 } else { 4246 4216 log_error(ls, "receive_remove from %d rsb ref error", 4247 4217 from_nodeid); 4248 4218 dlm_print_rsb(r); 4249 - spin_unlock(&ls->ls_rsbtbl[b].lock); 4219 + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); 4250 4220 } 4251 4221 } 4252 4222 ··· 4782 4752 int nodeid) 4783 4753 { 4784 4754 try_again: 4785 - read_lock(&ls->ls_requestqueue_lock); 4755 + read_lock_bh(&ls->ls_requestqueue_lock); 4786 4756 if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { 4787 4757 /* If we were a member of this lockspace, left, and rejoined, 4788 4758 other nodes may still be sending us messages from the 4789 4759 lockspace generation before we left. */ 4790 4760 if (WARN_ON_ONCE(!ls->ls_generation)) { 4791 - read_unlock(&ls->ls_requestqueue_lock); 4761 + read_unlock_bh(&ls->ls_requestqueue_lock); 4792 4762 log_limit(ls, "receive %d from %d ignore old gen", 4793 4763 le32_to_cpu(ms->m_type), nodeid); 4794 4764 return; 4795 4765 } 4796 4766 4797 - read_unlock(&ls->ls_requestqueue_lock); 4798 - write_lock(&ls->ls_requestqueue_lock); 4767 + read_unlock_bh(&ls->ls_requestqueue_lock); 4768 + write_lock_bh(&ls->ls_requestqueue_lock); 4799 4769 /* recheck because we hold writelock now */ 4800 4770 if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { 4801 4771 write_unlock_bh(&ls->ls_requestqueue_lock); ··· 4803 4773 } 4804 4774 4805 4775 dlm_add_requestqueue(ls, nodeid, ms); 4806 - write_unlock(&ls->ls_requestqueue_lock); 4776 + write_unlock_bh(&ls->ls_requestqueue_lock); 4807 4777 } else { 4808 4778 _receive_message(ls, ms, 0); 4809 - read_unlock(&ls->ls_requestqueue_lock); 4779 + read_unlock_bh(&ls->ls_requestqueue_lock); 4810 4780 } 4811 4781 } 4812 4782 ··· 4866 4836 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to 4867 4837 be inactive (in this ls) before transitioning to recovery mode */ 4868 4838 4869 - read_lock(&ls->ls_recv_active); 4839 + read_lock_bh(&ls->ls_recv_active); 4870 4840 if (hd->h_cmd == DLM_MSG) 4871 4841 dlm_receive_message(ls, &p->message, nodeid); 4872 4842 else if (hd->h_cmd == DLM_RCOM) ··· 4874 4844 else 4875 4845 log_error(ls, "invalid h_cmd %d from %d lockspace %x", 4876 4846 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); 4877 - read_unlock(&ls->ls_recv_active); 4847 + read_unlock_bh(&ls->ls_recv_active); 4878 4848 4879 4849 dlm_put_lockspace(ls); 4880 4850 } ··· 5034 5004 { 5035 5005 struct dlm_lkb *lkb = NULL, *iter; 5036 5006 5037 - spin_lock(&ls->ls_waiters_lock); 5007 + spin_lock_bh(&ls->ls_waiters_lock); 5038 5008 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { 5039 5009 if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { 5040 5010 hold_lkb(iter); ··· 5042 5012 break; 5043 5013 } 5044 5014 } 5045 - spin_unlock(&ls->ls_waiters_lock); 5015 + spin_unlock_bh(&ls->ls_waiters_lock); 5046 5016 5047 5017 return lkb; 5048 5018 } ··· 5142 5112 } 5143 5113 5144 5114 /* Forcibly remove from waiters list */ 5145 - spin_lock(&ls->ls_waiters_lock); 5115 + spin_lock_bh(&ls->ls_waiters_lock); 5146 5116 list_del_init(&lkb->lkb_wait_reply); 5147 - spin_unlock(&ls->ls_waiters_lock); 5117 + spin_unlock_bh(&ls->ls_waiters_lock); 5148 5118 5149 5119 /* 5150 5120 * The lkb is now clear of all prior waiters state and can be ··· 5314 5284 struct rb_node *n; 5315 5285 struct dlm_rsb *r; 5316 5286 5317 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 5287 + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); 5318 5288 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { 5319 5289 r = rb_entry(n, struct dlm_rsb, res_hashnode); 5320 5290 ··· 5325 5295 continue; 5326 5296 } 5327 5297 hold_rsb(r); 5328 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 5298 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 5329 5299 return r; 5330 5300 } 5331 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 5301 + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); 5332 5302 return NULL; 5333 5303 } 5334 5304 ··· 5672 5642 } 5673 5643 5674 5644 /* add this new lkb to the per-process list of locks */ 5675 - spin_lock(&ua->proc->locks_spin); 5645 + spin_lock_bh(&ua->proc->locks_spin); 5676 5646 hold_lkb(lkb); 5677 5647 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); 5678 - spin_unlock(&ua->proc->locks_spin); 5648 + spin_unlock_bh(&ua->proc->locks_spin); 5679 5649 do_put = false; 5680 5650 out_put: 5681 5651 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false); ··· 5805 5775 * for the proc locks list. 5806 5776 */ 5807 5777 5808 - spin_lock(&ua->proc->locks_spin); 5778 + spin_lock_bh(&ua->proc->locks_spin); 5809 5779 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); 5810 - spin_unlock(&ua->proc->locks_spin); 5780 + spin_unlock_bh(&ua->proc->locks_spin); 5811 5781 out: 5812 5782 kfree(ua_tmp); 5813 5783 return rv; ··· 5851 5821 if (error) 5852 5822 goto out_put; 5853 5823 5854 - spin_lock(&ua->proc->locks_spin); 5824 + spin_lock_bh(&ua->proc->locks_spin); 5855 5825 /* dlm_user_add_cb() may have already taken lkb off the proc list */ 5856 5826 if (!list_empty(&lkb->lkb_ownqueue)) 5857 5827 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); 5858 - spin_unlock(&ua->proc->locks_spin); 5828 + spin_unlock_bh(&ua->proc->locks_spin); 5859 5829 out_put: 5860 5830 trace_dlm_unlock_end(ls, lkb, flags, error); 5861 5831 dlm_put_lkb(lkb); ··· 6006 5976 { 6007 5977 struct dlm_lkb *lkb = NULL; 6008 5978 6009 - spin_lock(&ls->ls_clear_proc_locks); 5979 + spin_lock_bh(&ls->ls_clear_proc_locks); 6010 5980 if (list_empty(&proc->locks)) 6011 5981 goto out; 6012 5982 ··· 6018 5988 else 6019 5989 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); 6020 5990 out: 6021 - spin_unlock(&ls->ls_clear_proc_locks); 5991 + spin_unlock_bh(&ls->ls_clear_proc_locks); 6022 5992 return lkb; 6023 5993 } 6024 5994 ··· 6055 6025 dlm_put_lkb(lkb); 6056 6026 } 6057 6027 6058 - spin_lock(&ls->ls_clear_proc_locks); 6028 + spin_lock_bh(&ls->ls_clear_proc_locks); 6059 6029 6060 6030 /* in-progress unlocks */ 6061 6031 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { ··· 6069 6039 dlm_free_cb(cb); 6070 6040 } 6071 6041 6072 - spin_unlock(&ls->ls_clear_proc_locks); 6042 + spin_unlock_bh(&ls->ls_clear_proc_locks); 6073 6043 dlm_unlock_recovery(ls); 6074 6044 } 6075 6045 ··· 6080 6050 6081 6051 while (1) { 6082 6052 lkb = NULL; 6083 - spin_lock(&proc->locks_spin); 6053 + spin_lock_bh(&proc->locks_spin); 6084 6054 if (!list_empty(&proc->locks)) { 6085 6055 lkb = list_entry(proc->locks.next, struct dlm_lkb, 6086 6056 lkb_ownqueue); 6087 6057 list_del_init(&lkb->lkb_ownqueue); 6088 6058 } 6089 - spin_unlock(&proc->locks_spin); 6059 + spin_unlock_bh(&proc->locks_spin); 6090 6060 6091 6061 if (!lkb) 6092 6062 break; ··· 6096 6066 dlm_put_lkb(lkb); /* ref from proc->locks list */ 6097 6067 } 6098 6068 6099 - spin_lock(&proc->locks_spin); 6069 + spin_lock_bh(&proc->locks_spin); 6100 6070 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { 6101 6071 list_del_init(&lkb->lkb_ownqueue); 6102 6072 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); 6103 6073 dlm_put_lkb(lkb); 6104 6074 } 6105 - spin_unlock(&proc->locks_spin); 6075 + spin_unlock_bh(&proc->locks_spin); 6106 6076 6107 - spin_lock(&proc->asts_spin); 6077 + spin_lock_bh(&proc->asts_spin); 6108 6078 list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { 6109 6079 list_del(&cb->list); 6110 6080 dlm_free_cb(cb); 6111 6081 } 6112 - spin_unlock(&proc->asts_spin); 6082 + spin_unlock_bh(&proc->asts_spin); 6113 6083 } 6114 6084 6115 6085 /* pid of 0 means purge all orphans */
+2 -2
fs/dlm/lock.h
··· 69 69 70 70 static inline void lock_rsb(struct dlm_rsb *r) 71 71 { 72 - spin_lock(&r->res_lock); 72 + spin_lock_bh(&r->res_lock); 73 73 } 74 74 75 75 static inline void unlock_rsb(struct dlm_rsb *r) 76 76 { 77 - spin_unlock(&r->res_lock); 77 + spin_unlock_bh(&r->res_lock); 78 78 } 79 79 80 80 #endif
+25 -26
fs/dlm/lockspace.c
··· 251 251 { 252 252 struct dlm_ls *ls; 253 253 254 - spin_lock(&lslist_lock); 254 + spin_lock_bh(&lslist_lock); 255 255 list_for_each_entry(ls, &lslist, ls_list) { 256 256 if (time_after_eq(jiffies, ls->ls_scan_time + 257 257 dlm_config.ci_scan_secs * HZ)) { 258 - spin_unlock(&lslist_lock); 258 + spin_unlock_bh(&lslist_lock); 259 259 return ls; 260 260 } 261 261 } 262 - spin_unlock(&lslist_lock); 262 + spin_unlock_bh(&lslist_lock); 263 263 return NULL; 264 264 } 265 265 ··· 306 306 { 307 307 struct dlm_ls *ls; 308 308 309 - spin_lock(&lslist_lock); 309 + spin_lock_bh(&lslist_lock); 310 310 311 311 list_for_each_entry(ls, &lslist, ls_list) { 312 312 if (ls->ls_global_id == id) { ··· 316 316 } 317 317 ls = NULL; 318 318 out: 319 - spin_unlock(&lslist_lock); 319 + spin_unlock_bh(&lslist_lock); 320 320 return ls; 321 321 } 322 322 ··· 324 324 { 325 325 struct dlm_ls *ls; 326 326 327 - spin_lock(&lslist_lock); 327 + spin_lock_bh(&lslist_lock); 328 328 list_for_each_entry(ls, &lslist, ls_list) { 329 329 if (ls->ls_local_handle == lockspace) { 330 330 atomic_inc(&ls->ls_count); ··· 333 333 } 334 334 ls = NULL; 335 335 out: 336 - spin_unlock(&lslist_lock); 336 + spin_unlock_bh(&lslist_lock); 337 337 return ls; 338 338 } 339 339 ··· 341 341 { 342 342 struct dlm_ls *ls; 343 343 344 - spin_lock(&lslist_lock); 344 + spin_lock_bh(&lslist_lock); 345 345 list_for_each_entry(ls, &lslist, ls_list) { 346 346 if (ls->ls_device.minor == minor) { 347 347 atomic_inc(&ls->ls_count); ··· 350 350 } 351 351 ls = NULL; 352 352 out: 353 - spin_unlock(&lslist_lock); 353 + spin_unlock_bh(&lslist_lock); 354 354 return ls; 355 355 } 356 356 ··· 365 365 retry: 366 366 wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); 367 367 368 - spin_lock(&lslist_lock); 368 + spin_lock_bh(&lslist_lock); 369 369 if (atomic_read(&ls->ls_count) != 0) { 370 - spin_unlock(&lslist_lock); 370 + spin_unlock_bh(&lslist_lock); 371 371 goto retry; 372 372 } 373 373 374 374 WARN_ON(ls->ls_create_count != 0); 375 375 list_del(&ls->ls_list); 376 - spin_unlock(&lslist_lock); 376 + spin_unlock_bh(&lslist_lock); 377 377 } 378 378 379 379 static int threads_start(void) ··· 448 448 449 449 error = 0; 450 450 451 - spin_lock(&lslist_lock); 451 + spin_lock_bh(&lslist_lock); 452 452 list_for_each_entry(ls, &lslist, ls_list) { 453 453 WARN_ON(ls->ls_create_count <= 0); 454 454 if (ls->ls_namelen != namelen) ··· 464 464 error = 1; 465 465 break; 466 466 } 467 - spin_unlock(&lslist_lock); 467 + spin_unlock_bh(&lslist_lock); 468 468 469 469 if (error) 470 470 goto out; ··· 583 583 INIT_LIST_HEAD(&ls->ls_dir_dump_list); 584 584 rwlock_init(&ls->ls_dir_dump_lock); 585 585 586 - spin_lock(&lslist_lock); 586 + spin_lock_bh(&lslist_lock); 587 587 ls->ls_create_count = 1; 588 588 list_add(&ls->ls_list, &lslist); 589 - spin_unlock(&lslist_lock); 589 + spin_unlock_bh(&lslist_lock); 590 590 591 591 if (flags & DLM_LSFL_FS) { 592 592 error = dlm_callback_start(ls); ··· 655 655 out_callback: 656 656 dlm_callback_stop(ls); 657 657 out_delist: 658 - spin_lock(&lslist_lock); 658 + spin_lock_bh(&lslist_lock); 659 659 list_del(&ls->ls_list); 660 - spin_unlock(&lslist_lock); 660 + spin_unlock_bh(&lslist_lock); 661 661 idr_destroy(&ls->ls_recover_idr); 662 662 kfree(ls->ls_recover_buf); 663 663 out_lkbidr: ··· 756 756 { 757 757 int rv; 758 758 759 - spin_lock(&ls->ls_lkbidr_spin); 759 + spin_lock_bh(&ls->ls_lkbidr_spin); 760 760 if (force == 0) { 761 761 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); 762 762 } else if (force == 1) { ··· 764 764 } else { 765 765 rv = 0; 766 766 } 767 - spin_unlock(&ls->ls_lkbidr_spin); 767 + spin_unlock_bh(&ls->ls_lkbidr_spin); 768 768 return rv; 769 769 } 770 770 ··· 776 776 777 777 busy = lockspace_busy(ls, force); 778 778 779 - spin_lock(&lslist_lock); 779 + spin_lock_bh(&lslist_lock); 780 780 if (ls->ls_create_count == 1) { 781 781 if (busy) { 782 782 rv = -EBUSY; ··· 790 790 } else { 791 791 rv = -EINVAL; 792 792 } 793 - spin_unlock(&lslist_lock); 793 + spin_unlock_bh(&lslist_lock); 794 794 795 795 if (rv) { 796 796 log_debug(ls, "release_lockspace no remove %d", rv); ··· 918 918 919 919 restart: 920 920 count = 0; 921 - spin_lock(&lslist_lock); 921 + spin_lock_bh(&lslist_lock); 922 922 list_for_each_entry(ls, &lslist, ls_list) { 923 923 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { 924 924 count++; 925 925 continue; 926 926 } 927 - spin_unlock(&lslist_lock); 927 + spin_unlock_bh(&lslist_lock); 928 928 log_error(ls, "no userland control daemon, stopping lockspace"); 929 929 dlm_ls_stop(ls); 930 930 goto restart; 931 931 } 932 - spin_unlock(&lslist_lock); 932 + spin_unlock_bh(&lslist_lock); 933 933 934 934 if (count) 935 935 log_print("dlm user daemon left %d lockspaces", count); 936 936 } 937 -
+8 -8
fs/dlm/lowcomms.c
··· 867 867 { 868 868 struct processqueue_entry *pentry; 869 869 870 - spin_lock(&processqueue_lock); 870 + spin_lock_bh(&processqueue_lock); 871 871 pentry = list_first_entry_or_null(&processqueue, 872 872 struct processqueue_entry, list); 873 873 if (WARN_ON_ONCE(!pentry)) { 874 874 process_dlm_messages_pending = false; 875 - spin_unlock(&processqueue_lock); 875 + spin_unlock_bh(&processqueue_lock); 876 876 return; 877 877 } 878 878 879 879 list_del(&pentry->list); 880 880 atomic_dec(&processqueue_count); 881 - spin_unlock(&processqueue_lock); 881 + spin_unlock_bh(&processqueue_lock); 882 882 883 883 for (;;) { 884 884 dlm_process_incoming_buffer(pentry->nodeid, pentry->buf, 885 885 pentry->buflen); 886 886 free_processqueue_entry(pentry); 887 887 888 - spin_lock(&processqueue_lock); 888 + spin_lock_bh(&processqueue_lock); 889 889 pentry = list_first_entry_or_null(&processqueue, 890 890 struct processqueue_entry, list); 891 891 if (!pentry) { 892 892 process_dlm_messages_pending = false; 893 - spin_unlock(&processqueue_lock); 893 + spin_unlock_bh(&processqueue_lock); 894 894 break; 895 895 } 896 896 897 897 list_del(&pentry->list); 898 898 atomic_dec(&processqueue_count); 899 - spin_unlock(&processqueue_lock); 899 + spin_unlock_bh(&processqueue_lock); 900 900 } 901 901 } 902 902 ··· 966 966 memmove(con->rx_leftover_buf, pentry->buf + ret, 967 967 con->rx_leftover); 968 968 969 - spin_lock(&processqueue_lock); 969 + spin_lock_bh(&processqueue_lock); 970 970 ret = atomic_inc_return(&processqueue_count); 971 971 list_add_tail(&pentry->list, &processqueue); 972 972 if (!process_dlm_messages_pending) { 973 973 process_dlm_messages_pending = true; 974 974 queue_work(process_workqueue, &process_work); 975 975 } 976 - spin_unlock(&processqueue_lock); 976 + spin_unlock_bh(&processqueue_lock); 977 977 978 978 if (ret > DLM_MAX_PROCESS_BUFFERS) 979 979 return DLM_IO_FLUSH;
+11 -11
fs/dlm/member.c
··· 630 630 * message to the requestqueue without races. 631 631 */ 632 632 633 - write_lock(&ls->ls_recv_active); 633 + write_lock_bh(&ls->ls_recv_active); 634 634 635 635 /* 636 636 * Abort any recovery that's in progress (see RECOVER_STOP, ··· 638 638 * dlm to quit any processing (see RUNNING, dlm_locking_stopped()). 639 639 */ 640 640 641 - spin_lock(&ls->ls_recover_lock); 641 + spin_lock_bh(&ls->ls_recover_lock); 642 642 set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); 643 643 new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); 644 644 ls->ls_recover_seq++; 645 645 646 646 /* activate requestqueue and stop processing */ 647 - write_lock(&ls->ls_requestqueue_lock); 647 + write_lock_bh(&ls->ls_requestqueue_lock); 648 648 set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); 649 - write_unlock(&ls->ls_requestqueue_lock); 650 - spin_unlock(&ls->ls_recover_lock); 649 + write_unlock_bh(&ls->ls_requestqueue_lock); 650 + spin_unlock_bh(&ls->ls_recover_lock); 651 651 652 652 /* 653 653 * Let dlm_recv run again, now any normal messages will be saved on the 654 654 * requestqueue for later. 655 655 */ 656 656 657 - write_unlock(&ls->ls_recv_active); 657 + write_unlock_bh(&ls->ls_recv_active); 658 658 659 659 /* 660 660 * This in_recovery lock does two things: ··· 679 679 680 680 dlm_recoverd_suspend(ls); 681 681 682 - spin_lock(&ls->ls_recover_lock); 682 + spin_lock_bh(&ls->ls_recover_lock); 683 683 kfree(ls->ls_slots); 684 684 ls->ls_slots = NULL; 685 685 ls->ls_num_slots = 0; 686 686 ls->ls_slots_size = 0; 687 687 ls->ls_recover_status = 0; 688 - spin_unlock(&ls->ls_recover_lock); 688 + spin_unlock_bh(&ls->ls_recover_lock); 689 689 690 690 dlm_recoverd_resume(ls); 691 691 ··· 719 719 if (error < 0) 720 720 goto fail_rv; 721 721 722 - spin_lock(&ls->ls_recover_lock); 722 + spin_lock_bh(&ls->ls_recover_lock); 723 723 724 724 /* the lockspace needs to be stopped before it can be started */ 725 725 726 726 if (!dlm_locking_stopped(ls)) { 727 - spin_unlock(&ls->ls_recover_lock); 727 + spin_unlock_bh(&ls->ls_recover_lock); 728 728 log_error(ls, "start ignored: lockspace running"); 729 729 error = -EINVAL; 730 730 goto fail; ··· 735 735 rv->seq = ++ls->ls_recover_seq; 736 736 rv_old = ls->ls_recover_args; 737 737 ls->ls_recover_args = rv; 738 - spin_unlock(&ls->ls_recover_lock); 738 + spin_unlock_bh(&ls->ls_recover_lock); 739 739 740 740 if (rv_old) { 741 741 log_error(ls, "unused recovery %llx %d",
+20 -20
fs/dlm/midcomms.c
··· 364 364 node->users = 0; 365 365 midcomms_node_reset(node); 366 366 367 - spin_lock(&nodes_lock); 367 + spin_lock_bh(&nodes_lock); 368 368 hlist_add_head_rcu(&node->hlist, &node_hash[r]); 369 - spin_unlock(&nodes_lock); 369 + spin_unlock_bh(&nodes_lock); 370 370 371 371 node->debugfs = dlm_create_debug_comms_file(nodeid, node); 372 372 return 0; ··· 477 477 478 478 static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) 479 479 { 480 - spin_lock(&node->state_lock); 480 + spin_lock_bh(&node->state_lock); 481 481 pr_debug("receive passive fin ack from node %d with state %s\n", 482 482 node->nodeid, dlm_state_str(node->state)); 483 483 ··· 491 491 wake_up(&node->shutdown_wait); 492 492 break; 493 493 default: 494 - spin_unlock(&node->state_lock); 494 + spin_unlock_bh(&node->state_lock); 495 495 log_print("%s: unexpected state: %d", 496 496 __func__, node->state); 497 497 WARN_ON_ONCE(1); 498 498 return; 499 499 } 500 - spin_unlock(&node->state_lock); 500 + spin_unlock_bh(&node->state_lock); 501 501 } 502 502 503 503 static void dlm_receive_buffer_3_2_trace(uint32_t seq, ··· 534 534 if (is_expected_seq) { 535 535 switch (p->header.h_cmd) { 536 536 case DLM_FIN: 537 - spin_lock(&node->state_lock); 537 + spin_lock_bh(&node->state_lock); 538 538 pr_debug("receive fin msg from node %d with state %s\n", 539 539 node->nodeid, dlm_state_str(node->state)); 540 540 ··· 575 575 /* probably remove_member caught it, do nothing */ 576 576 break; 577 577 default: 578 - spin_unlock(&node->state_lock); 578 + spin_unlock_bh(&node->state_lock); 579 579 log_print("%s: unexpected state: %d", 580 580 __func__, node->state); 581 581 WARN_ON_ONCE(1); 582 582 return; 583 583 } 584 - spin_unlock(&node->state_lock); 584 + spin_unlock_bh(&node->state_lock); 585 585 break; 586 586 default: 587 587 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); ··· 1182 1182 1183 1183 static void dlm_act_fin_ack_rcv(struct midcomms_node *node) 1184 1184 { 1185 - spin_lock(&node->state_lock); 1185 + spin_lock_bh(&node->state_lock); 1186 1186 pr_debug("receive active fin ack from node %d with state %s\n", 1187 1187 node->nodeid, dlm_state_str(node->state)); 1188 1188 ··· 1202 1202 wake_up(&node->shutdown_wait); 1203 1203 break; 1204 1204 default: 1205 - spin_unlock(&node->state_lock); 1205 + spin_unlock_bh(&node->state_lock); 1206 1206 log_print("%s: unexpected state: %d", 1207 1207 __func__, node->state); 1208 1208 WARN_ON_ONCE(1); 1209 1209 return; 1210 1210 } 1211 - spin_unlock(&node->state_lock); 1211 + spin_unlock_bh(&node->state_lock); 1212 1212 } 1213 1213 1214 1214 void dlm_midcomms_add_member(int nodeid) ··· 1223 1223 return; 1224 1224 } 1225 1225 1226 - spin_lock(&node->state_lock); 1226 + spin_lock_bh(&node->state_lock); 1227 1227 if (!node->users) { 1228 1228 pr_debug("receive add member from node %d with state %s\n", 1229 1229 node->nodeid, dlm_state_str(node->state)); ··· 1251 1251 1252 1252 node->users++; 1253 1253 pr_debug("node %d users inc count %d\n", nodeid, node->users); 1254 - spin_unlock(&node->state_lock); 1254 + spin_unlock_bh(&node->state_lock); 1255 1255 1256 1256 srcu_read_unlock(&nodes_srcu, idx); 1257 1257 } ··· 1269 1269 return; 1270 1270 } 1271 1271 1272 - spin_lock(&node->state_lock); 1272 + spin_lock_bh(&node->state_lock); 1273 1273 /* case of dlm_midcomms_addr() created node but 1274 1274 * was not added before because dlm_midcomms_close() 1275 1275 * removed the node 1276 1276 */ 1277 1277 if (!node->users) { 1278 - spin_unlock(&node->state_lock); 1278 + spin_unlock_bh(&node->state_lock); 1279 1279 srcu_read_unlock(&nodes_srcu, idx); 1280 1280 return; 1281 1281 } ··· 1313 1313 break; 1314 1314 } 1315 1315 } 1316 - spin_unlock(&node->state_lock); 1316 + spin_unlock_bh(&node->state_lock); 1317 1317 1318 1318 srcu_read_unlock(&nodes_srcu, idx); 1319 1319 } ··· 1351 1351 return; 1352 1352 } 1353 1353 1354 - spin_lock(&node->state_lock); 1354 + spin_lock_bh(&node->state_lock); 1355 1355 pr_debug("receive active shutdown for node %d with state %s\n", 1356 1356 node->nodeid, dlm_state_str(node->state)); 1357 1357 switch (node->state) { ··· 1370 1370 */ 1371 1371 break; 1372 1372 } 1373 - spin_unlock(&node->state_lock); 1373 + spin_unlock_bh(&node->state_lock); 1374 1374 1375 1375 if (DLM_DEBUG_FENCE_TERMINATION) 1376 1376 msleep(5000); ··· 1441 1441 ret = dlm_lowcomms_close(nodeid); 1442 1442 dlm_delete_debug_comms_file(node->debugfs); 1443 1443 1444 - spin_lock(&nodes_lock); 1444 + spin_lock_bh(&nodes_lock); 1445 1445 hlist_del_rcu(&node->hlist); 1446 - spin_unlock(&nodes_lock); 1446 + spin_unlock_bh(&nodes_lock); 1447 1447 srcu_read_unlock(&nodes_srcu, idx); 1448 1448 1449 1449 /* wait that all readers left until flush send queue */
+13 -13
fs/dlm/rcom.c
··· 143 143 144 144 static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq) 145 145 { 146 - spin_lock(&ls->ls_rcom_spin); 146 + spin_lock_bh(&ls->ls_rcom_spin); 147 147 *new_seq = cpu_to_le64(++ls->ls_rcom_seq); 148 148 set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); 149 - spin_unlock(&ls->ls_rcom_spin); 149 + spin_unlock_bh(&ls->ls_rcom_spin); 150 150 } 151 151 152 152 static void disallow_sync_reply(struct dlm_ls *ls) 153 153 { 154 - spin_lock(&ls->ls_rcom_spin); 154 + spin_lock_bh(&ls->ls_rcom_spin); 155 155 clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); 156 156 clear_bit(LSFL_RCOM_READY, &ls->ls_flags); 157 - spin_unlock(&ls->ls_rcom_spin); 157 + spin_unlock_bh(&ls->ls_rcom_spin); 158 158 } 159 159 160 160 /* ··· 245 245 goto do_create; 246 246 } 247 247 248 - spin_lock(&ls->ls_recover_lock); 248 + spin_lock_bh(&ls->ls_recover_lock); 249 249 status = ls->ls_recover_status; 250 250 num_slots = ls->ls_num_slots; 251 - spin_unlock(&ls->ls_recover_lock); 251 + spin_unlock_bh(&ls->ls_recover_lock); 252 252 len += num_slots * sizeof(struct rcom_slot); 253 253 254 254 do_create: ··· 266 266 if (!num_slots) 267 267 goto do_send; 268 268 269 - spin_lock(&ls->ls_recover_lock); 269 + spin_lock_bh(&ls->ls_recover_lock); 270 270 if (ls->ls_num_slots != num_slots) { 271 - spin_unlock(&ls->ls_recover_lock); 271 + spin_unlock_bh(&ls->ls_recover_lock); 272 272 log_debug(ls, "receive_rcom_status num_slots %d to %d", 273 273 num_slots, ls->ls_num_slots); 274 274 rc->rc_result = 0; ··· 277 277 } 278 278 279 279 dlm_slots_copy_out(ls, rc); 280 - spin_unlock(&ls->ls_recover_lock); 280 + spin_unlock_bh(&ls->ls_recover_lock); 281 281 282 282 do_send: 283 283 send_rcom_stateless(msg, rc); ··· 285 285 286 286 static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) 287 287 { 288 - spin_lock(&ls->ls_rcom_spin); 288 + spin_lock_bh(&ls->ls_rcom_spin); 289 289 if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || 290 290 le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) { 291 291 log_debug(ls, "reject reply %d from %d seq %llx expect %llx", ··· 301 301 clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); 302 302 wake_up(&ls->ls_wait_general); 303 303 out: 304 - spin_unlock(&ls->ls_rcom_spin); 304 + spin_unlock_bh(&ls->ls_rcom_spin); 305 305 } 306 306 307 307 int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, ··· 613 613 break; 614 614 } 615 615 616 - spin_lock(&ls->ls_recover_lock); 616 + spin_lock_bh(&ls->ls_recover_lock); 617 617 status = ls->ls_recover_status; 618 618 stop = dlm_recovery_stopped(ls); 619 619 seq = ls->ls_recover_seq; 620 - spin_unlock(&ls->ls_recover_lock); 620 + spin_unlock_bh(&ls->ls_recover_lock); 621 621 622 622 if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS))) 623 623 goto ignore;
+26 -26
fs/dlm/recover.c
··· 74 74 uint32_t dlm_recover_status(struct dlm_ls *ls) 75 75 { 76 76 uint32_t status; 77 - spin_lock(&ls->ls_recover_lock); 77 + spin_lock_bh(&ls->ls_recover_lock); 78 78 status = ls->ls_recover_status; 79 - spin_unlock(&ls->ls_recover_lock); 79 + spin_unlock_bh(&ls->ls_recover_lock); 80 80 return status; 81 81 } 82 82 ··· 87 87 88 88 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) 89 89 { 90 - spin_lock(&ls->ls_recover_lock); 90 + spin_lock_bh(&ls->ls_recover_lock); 91 91 _set_recover_status(ls, status); 92 - spin_unlock(&ls->ls_recover_lock); 92 + spin_unlock_bh(&ls->ls_recover_lock); 93 93 } 94 94 95 95 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, ··· 188 188 189 189 rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); 190 190 if (!rv) { 191 - spin_lock(&ls->ls_recover_lock); 191 + spin_lock_bh(&ls->ls_recover_lock); 192 192 _set_recover_status(ls, DLM_RS_NODES_ALL); 193 193 ls->ls_num_slots = num_slots; 194 194 ls->ls_slots_size = slots_size; 195 195 ls->ls_slots = slots; 196 196 ls->ls_generation = gen; 197 - spin_unlock(&ls->ls_recover_lock); 197 + spin_unlock_bh(&ls->ls_recover_lock); 198 198 } else { 199 199 dlm_set_recover_status(ls, DLM_RS_NODES_ALL); 200 200 } ··· 241 241 { 242 242 int empty; 243 243 244 - spin_lock(&ls->ls_recover_list_lock); 244 + spin_lock_bh(&ls->ls_recover_list_lock); 245 245 empty = list_empty(&ls->ls_recover_list); 246 - spin_unlock(&ls->ls_recover_list_lock); 246 + spin_unlock_bh(&ls->ls_recover_list_lock); 247 247 248 248 return empty; 249 249 } ··· 252 252 { 253 253 struct dlm_ls *ls = r->res_ls; 254 254 255 - spin_lock(&ls->ls_recover_list_lock); 255 + spin_lock_bh(&ls->ls_recover_list_lock); 256 256 if (list_empty(&r->res_recover_list)) { 257 257 list_add_tail(&r->res_recover_list, &ls->ls_recover_list); 258 258 ls->ls_recover_list_count++; 259 259 dlm_hold_rsb(r); 260 260 } 261 - spin_unlock(&ls->ls_recover_list_lock); 261 + spin_unlock_bh(&ls->ls_recover_list_lock); 262 262 } 263 263 264 264 static void recover_list_del(struct dlm_rsb *r) 265 265 { 266 266 struct dlm_ls *ls = r->res_ls; 267 267 268 - spin_lock(&ls->ls_recover_list_lock); 268 + spin_lock_bh(&ls->ls_recover_list_lock); 269 269 list_del_init(&r->res_recover_list); 270 270 ls->ls_recover_list_count--; 271 - spin_unlock(&ls->ls_recover_list_lock); 271 + spin_unlock_bh(&ls->ls_recover_list_lock); 272 272 273 273 dlm_put_rsb(r); 274 274 } ··· 277 277 { 278 278 struct dlm_rsb *r, *s; 279 279 280 - spin_lock(&ls->ls_recover_list_lock); 280 + spin_lock_bh(&ls->ls_recover_list_lock); 281 281 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { 282 282 list_del_init(&r->res_recover_list); 283 283 r->res_recover_locks_count = 0; ··· 290 290 ls->ls_recover_list_count); 291 291 ls->ls_recover_list_count = 0; 292 292 } 293 - spin_unlock(&ls->ls_recover_list_lock); 293 + spin_unlock_bh(&ls->ls_recover_list_lock); 294 294 } 295 295 296 296 static int recover_idr_empty(struct dlm_ls *ls) 297 297 { 298 298 int empty = 1; 299 299 300 - spin_lock(&ls->ls_recover_idr_lock); 300 + spin_lock_bh(&ls->ls_recover_idr_lock); 301 301 if (ls->ls_recover_list_count) 302 302 empty = 0; 303 - spin_unlock(&ls->ls_recover_idr_lock); 303 + spin_unlock_bh(&ls->ls_recover_idr_lock); 304 304 305 305 return empty; 306 306 } ··· 310 310 struct dlm_ls *ls = r->res_ls; 311 311 int rv; 312 312 313 - spin_lock(&ls->ls_recover_idr_lock); 313 + spin_lock_bh(&ls->ls_recover_idr_lock); 314 314 if (r->res_id) { 315 315 rv = -1; 316 316 goto out_unlock; ··· 324 324 dlm_hold_rsb(r); 325 325 rv = 0; 326 326 out_unlock: 327 - spin_unlock(&ls->ls_recover_idr_lock); 327 + spin_unlock_bh(&ls->ls_recover_idr_lock); 328 328 return rv; 329 329 } 330 330 ··· 332 332 { 333 333 struct dlm_ls *ls = r->res_ls; 334 334 335 - spin_lock(&ls->ls_recover_idr_lock); 335 + spin_lock_bh(&ls->ls_recover_idr_lock); 336 336 idr_remove(&ls->ls_recover_idr, r->res_id); 337 337 r->res_id = 0; 338 338 ls->ls_recover_list_count--; 339 - spin_unlock(&ls->ls_recover_idr_lock); 339 + spin_unlock_bh(&ls->ls_recover_idr_lock); 340 340 341 341 dlm_put_rsb(r); 342 342 } ··· 345 345 { 346 346 struct dlm_rsb *r; 347 347 348 - spin_lock(&ls->ls_recover_idr_lock); 348 + spin_lock_bh(&ls->ls_recover_idr_lock); 349 349 r = idr_find(&ls->ls_recover_idr, (int)id); 350 - spin_unlock(&ls->ls_recover_idr_lock); 350 + spin_unlock_bh(&ls->ls_recover_idr_lock); 351 351 return r; 352 352 } 353 353 ··· 356 356 struct dlm_rsb *r; 357 357 int id; 358 358 359 - spin_lock(&ls->ls_recover_idr_lock); 359 + spin_lock_bh(&ls->ls_recover_idr_lock); 360 360 361 361 idr_for_each_entry(&ls->ls_recover_idr, r, id) { 362 362 idr_remove(&ls->ls_recover_idr, id); ··· 372 372 ls->ls_recover_list_count); 373 373 ls->ls_recover_list_count = 0; 374 374 } 375 - spin_unlock(&ls->ls_recover_idr_lock); 375 + spin_unlock_bh(&ls->ls_recover_idr_lock); 376 376 } 377 377 378 378 ··· 887 887 int i; 888 888 889 889 for (i = 0; i < ls->ls_rsbtbl_size; i++) { 890 - spin_lock(&ls->ls_rsbtbl[i].lock); 890 + spin_lock_bh(&ls->ls_rsbtbl[i].lock); 891 891 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { 892 892 next = rb_next(n); 893 893 r = rb_entry(n, struct dlm_rsb, res_hashnode); ··· 895 895 dlm_free_rsb(r); 896 896 count++; 897 897 } 898 - spin_unlock(&ls->ls_rsbtbl[i].lock); 898 + spin_unlock_bh(&ls->ls_rsbtbl[i].lock); 899 899 } 900 900 901 901 if (count)
+10 -10
fs/dlm/recoverd.c
··· 26 26 struct dlm_rsb *r; 27 27 int i, error = 0; 28 28 29 - write_lock(&ls->ls_masters_lock); 29 + write_lock_bh(&ls->ls_masters_lock); 30 30 if (!list_empty(&ls->ls_masters_list)) { 31 31 log_error(ls, "root list not empty"); 32 32 error = -EINVAL; ··· 46 46 spin_unlock_bh(&ls->ls_rsbtbl[i].lock); 47 47 } 48 48 out: 49 - write_unlock(&ls->ls_masters_lock); 49 + write_unlock_bh(&ls->ls_masters_lock); 50 50 return error; 51 51 } 52 52 ··· 54 54 { 55 55 struct dlm_rsb *r, *safe; 56 56 57 - write_lock(&ls->ls_masters_lock); 57 + write_lock_bh(&ls->ls_masters_lock); 58 58 list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) { 59 59 list_del_init(&r->res_masters_list); 60 60 dlm_put_rsb(r); 61 61 } 62 - write_unlock(&ls->ls_masters_lock); 62 + write_unlock_bh(&ls->ls_masters_lock); 63 63 } 64 64 65 65 static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) ··· 103 103 { 104 104 int error = -EINTR; 105 105 106 - write_lock(&ls->ls_recv_active); 106 + write_lock_bh(&ls->ls_recv_active); 107 107 108 - spin_lock(&ls->ls_recover_lock); 108 + spin_lock_bh(&ls->ls_recover_lock); 109 109 if (ls->ls_recover_seq == seq) { 110 110 set_bit(LSFL_RUNNING, &ls->ls_flags); 111 111 /* unblocks processes waiting to enter the dlm */ ··· 113 113 clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); 114 114 error = 0; 115 115 } 116 - spin_unlock(&ls->ls_recover_lock); 116 + spin_unlock_bh(&ls->ls_recover_lock); 117 117 118 - write_unlock(&ls->ls_recv_active); 118 + write_unlock_bh(&ls->ls_recv_active); 119 119 return error; 120 120 } 121 121 ··· 349 349 struct dlm_recover *rv = NULL; 350 350 int error; 351 351 352 - spin_lock(&ls->ls_recover_lock); 352 + spin_lock_bh(&ls->ls_recover_lock); 353 353 rv = ls->ls_recover_args; 354 354 ls->ls_recover_args = NULL; 355 355 if (rv && ls->ls_recover_seq == rv->seq) 356 356 clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); 357 - spin_unlock(&ls->ls_recover_lock); 357 + spin_unlock_bh(&ls->ls_recover_lock); 358 358 359 359 if (rv) { 360 360 error = ls_recover(ls, rv);
+6 -6
fs/dlm/requestqueue.c
··· 68 68 struct dlm_message *ms; 69 69 int error = 0; 70 70 71 - write_lock(&ls->ls_requestqueue_lock); 71 + write_lock_bh(&ls->ls_requestqueue_lock); 72 72 for (;;) { 73 73 if (list_empty(&ls->ls_requestqueue)) { 74 74 clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); ··· 96 96 error = -EINTR; 97 97 break; 98 98 } 99 - write_unlock(&ls->ls_requestqueue_lock); 99 + write_unlock_bh(&ls->ls_requestqueue_lock); 100 100 schedule(); 101 - write_lock(&ls->ls_requestqueue_lock); 101 + write_lock_bh(&ls->ls_requestqueue_lock); 102 102 } 103 - write_unlock(&ls->ls_requestqueue_lock); 103 + write_unlock_bh(&ls->ls_requestqueue_lock); 104 104 105 105 return error; 106 106 } ··· 135 135 struct dlm_message *ms; 136 136 struct rq_entry *e, *safe; 137 137 138 - write_lock(&ls->ls_requestqueue_lock); 138 + write_lock_bh(&ls->ls_requestqueue_lock); 139 139 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 140 140 ms = &e->request; 141 141 ··· 144 144 kfree(e); 145 145 } 146 146 } 147 - write_unlock(&ls->ls_requestqueue_lock); 147 + write_unlock_bh(&ls->ls_requestqueue_lock); 148 148 } 149 149
+16 -16
fs/dlm/user.c
··· 189 189 return; 190 190 191 191 ls = lkb->lkb_resource->res_ls; 192 - spin_lock(&ls->ls_clear_proc_locks); 192 + spin_lock_bh(&ls->ls_clear_proc_locks); 193 193 194 194 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast 195 195 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed ··· 211 211 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status)) 212 212 set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags); 213 213 214 - spin_lock(&proc->asts_spin); 214 + spin_lock_bh(&proc->asts_spin); 215 215 216 216 rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb); 217 217 switch (rv) { ··· 232 232 case DLM_ENQUEUE_CALLBACK_FAILURE: 233 233 fallthrough; 234 234 default: 235 - spin_unlock(&proc->asts_spin); 235 + spin_unlock_bh(&proc->asts_spin); 236 236 WARN_ON_ONCE(1); 237 237 goto out; 238 238 } 239 - spin_unlock(&proc->asts_spin); 239 + spin_unlock_bh(&proc->asts_spin); 240 240 241 241 if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { 242 242 /* N.B. spin_lock locks_spin, not asts_spin */ 243 - spin_lock(&proc->locks_spin); 243 + spin_lock_bh(&proc->locks_spin); 244 244 if (!list_empty(&lkb->lkb_ownqueue)) { 245 245 list_del_init(&lkb->lkb_ownqueue); 246 246 dlm_put_lkb(lkb); 247 247 } 248 - spin_unlock(&proc->locks_spin); 248 + spin_unlock_bh(&proc->locks_spin); 249 249 } 250 250 out: 251 - spin_unlock(&ls->ls_clear_proc_locks); 251 + spin_unlock_bh(&ls->ls_clear_proc_locks); 252 252 } 253 253 254 254 static int device_user_lock(struct dlm_user_proc *proc, ··· 817 817 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)) 818 818 return -EINVAL; 819 819 820 - spin_lock(&proc->asts_spin); 820 + spin_lock_bh(&proc->asts_spin); 821 821 if (list_empty(&proc->asts)) { 822 822 if (file->f_flags & O_NONBLOCK) { 823 - spin_unlock(&proc->asts_spin); 823 + spin_unlock_bh(&proc->asts_spin); 824 824 return -EAGAIN; 825 825 } 826 826 ··· 829 829 repeat: 830 830 set_current_state(TASK_INTERRUPTIBLE); 831 831 if (list_empty(&proc->asts) && !signal_pending(current)) { 832 - spin_unlock(&proc->asts_spin); 832 + spin_unlock_bh(&proc->asts_spin); 833 833 schedule(); 834 - spin_lock(&proc->asts_spin); 834 + spin_lock_bh(&proc->asts_spin); 835 835 goto repeat; 836 836 } 837 837 set_current_state(TASK_RUNNING); 838 838 remove_wait_queue(&proc->wait, &wait); 839 839 840 840 if (signal_pending(current)) { 841 - spin_unlock(&proc->asts_spin); 841 + spin_unlock_bh(&proc->asts_spin); 842 842 return -ERESTARTSYS; 843 843 } 844 844 } ··· 849 849 850 850 cb = list_first_entry(&proc->asts, struct dlm_callback, list); 851 851 list_del(&cb->list); 852 - spin_unlock(&proc->asts_spin); 852 + spin_unlock_bh(&proc->asts_spin); 853 853 854 854 if (cb->flags & DLM_CB_BAST) { 855 855 trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name, ··· 874 874 875 875 poll_wait(file, &proc->wait, wait); 876 876 877 - spin_lock(&proc->asts_spin); 877 + spin_lock_bh(&proc->asts_spin); 878 878 if (!list_empty(&proc->asts)) { 879 - spin_unlock(&proc->asts_spin); 879 + spin_unlock_bh(&proc->asts_spin); 880 880 return EPOLLIN | EPOLLRDNORM; 881 881 } 882 - spin_unlock(&proc->asts_spin); 882 + spin_unlock_bh(&proc->asts_spin); 883 883 return 0; 884 884 } 885 885