Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: move rsb root_list to ls_recover() stack

Move the rsb root_list from the lockspace to a stack variable since
it is now only used by the ls_recover() function.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
3a747f4a aff46e0f

+47 -70
+2 -4
fs/dlm/dir.c
··· 47 47 return r->res_dir_nodeid; 48 48 } 49 49 50 - void dlm_recover_dir_nodeid(struct dlm_ls *ls) 50 + void dlm_recover_dir_nodeid(struct dlm_ls *ls, const struct list_head *root_list) 51 51 { 52 52 struct dlm_rsb *r; 53 53 54 - down_read(&ls->ls_root_sem); 55 - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 54 + list_for_each_entry(r, root_list, res_root_list) { 56 55 r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash); 57 56 } 58 - up_read(&ls->ls_root_sem); 59 57 } 60 58 61 59 int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq)
+2 -1
fs/dlm/dir.h
··· 14 14 15 15 int dlm_dir_nodeid(struct dlm_rsb *rsb); 16 16 int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash); 17 - void dlm_recover_dir_nodeid(struct dlm_ls *ls); 17 + void dlm_recover_dir_nodeid(struct dlm_ls *ls, 18 + const struct list_head *root_list); 18 19 int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq); 19 20 void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, 20 21 char *outbuf, int outlen, int nodeid);
+2 -4
fs/dlm/dlm_internal.h
··· 674 674 wait_queue_head_t ls_recover_lock_wait; 675 675 spinlock_t ls_clear_proc_locks; 676 676 677 - struct list_head ls_root_list; /* root resources */ 678 - struct rw_semaphore ls_root_sem; /* protect root_list */ 679 - struct list_head ls_masters_list; /* root resources */ 680 - rwlock_t ls_masters_lock; /* protect root_list */ 677 + struct list_head ls_masters_list; /* root resources */ 678 + rwlock_t ls_masters_lock; /* protect root_list */ 681 679 682 680 const struct dlm_lockspace_ops *ls_ops; 683 681 void *ls_ops_arg;
+2 -4
fs/dlm/lock.c
··· 5227 5227 5228 5228 /* Get rid of locks held by nodes that are gone. */ 5229 5229 5230 - void dlm_recover_purge(struct dlm_ls *ls) 5230 + void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list) 5231 5231 { 5232 5232 struct dlm_rsb *r; 5233 5233 struct dlm_member *memb; ··· 5246 5246 if (!nodes_count) 5247 5247 return; 5248 5248 5249 - down_write(&ls->ls_root_sem); 5250 - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 5249 + list_for_each_entry(r, root_list, res_root_list) { 5251 5250 hold_rsb(r); 5252 5251 lock_rsb(r); 5253 5252 if (is_master(r)) { ··· 5261 5262 unhold_rsb(r); 5262 5263 cond_resched(); 5263 5264 } 5264 - up_write(&ls->ls_root_sem); 5265 5265 5266 5266 if (lkb_count) 5267 5267 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
+1 -1
fs/dlm/lock.h
··· 31 31 int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len, 32 32 struct dlm_rsb **r_ret); 33 33 34 - void dlm_recover_purge(struct dlm_ls *ls); 34 + void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list); 35 35 void dlm_purge_mstcpy_locks(struct dlm_rsb *r); 36 36 void dlm_recover_grant(struct dlm_ls *ls); 37 37 int dlm_recover_waiters_post(struct dlm_ls *ls);
-2
fs/dlm/lockspace.c
··· 580 580 ls->ls_recover_list_count = 0; 581 581 ls->ls_local_handle = ls; 582 582 init_waitqueue_head(&ls->ls_wait_general); 583 - INIT_LIST_HEAD(&ls->ls_root_list); 584 - init_rwsem(&ls->ls_root_sem); 585 583 INIT_LIST_HEAD(&ls->ls_masters_list); 586 584 rwlock_init(&ls->ls_masters_lock); 587 585
+10 -20
fs/dlm/recover.c
··· 519 519 * the correct dir node. 520 520 */ 521 521 522 - int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq) 522 + int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, 523 + const struct list_head *root_list) 523 524 { 524 525 struct dlm_rsb *r; 525 526 unsigned int total = 0; ··· 530 529 531 530 log_rinfo(ls, "dlm_recover_masters"); 532 531 533 - down_read(&ls->ls_root_sem); 534 - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 532 + list_for_each_entry(r, root_list, res_root_list) { 535 533 if (dlm_recovery_stopped(ls)) { 536 - up_read(&ls->ls_root_sem); 537 534 error = -EINTR; 538 535 goto out; 539 536 } ··· 545 546 cond_resched(); 546 547 total++; 547 548 548 - if (error) { 549 - up_read(&ls->ls_root_sem); 549 + if (error) 550 550 goto out; 551 - } 552 551 } 553 - up_read(&ls->ls_root_sem); 554 552 555 553 log_rinfo(ls, "dlm_recover_masters %u of %u", count, total); 556 554 ··· 652 656 return error; 653 657 } 654 658 655 - int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq) 659 + int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq, 660 + const struct list_head *root_list) 656 661 { 657 662 struct dlm_rsb *r; 658 663 int error, count = 0; 659 664 660 - down_read(&ls->ls_root_sem); 661 - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 665 + list_for_each_entry(r, root_list, res_root_list) { 662 666 if (is_master(r)) { 663 667 rsb_clear_flag(r, RSB_NEW_MASTER); 664 668 continue; ··· 669 673 670 674 if (dlm_recovery_stopped(ls)) { 671 675 error = -EINTR; 672 - up_read(&ls->ls_root_sem); 673 676 goto out; 674 677 } 675 678 676 679 error = recover_locks(r, seq); 677 - if (error) { 678 - up_read(&ls->ls_root_sem); 680 + if (error) 679 681 goto out; 680 - } 681 682 682 683 count += r->res_recover_locks_count; 683 684 } 684 - up_read(&ls->ls_root_sem); 685 685 686 686 log_rinfo(ls, "dlm_recover_locks %d out", count); 687 687 ··· 846 854 rsb_set_flag(r, RSB_RECOVER_GRANT); 847 855 } 848 856 849 - void dlm_recover_rsbs(struct dlm_ls *ls) 857 + void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list) 850 858 { 851 859 struct dlm_rsb *r; 852 860 unsigned int count = 0; 853 861 854 - down_read(&ls->ls_root_sem); 855 - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 862 + list_for_each_entry(r, root_list, res_root_list) { 856 863 lock_rsb(r); 857 864 if (is_master(r)) { 858 865 if (rsb_flag(r, RSB_RECOVER_CONVERT)) ··· 872 881 rsb_clear_flag(r, RSB_NEW_MASTER2); 873 882 unlock_rsb(r); 874 883 } 875 - up_read(&ls->ls_root_sem); 876 884 877 885 if (count) 878 886 log_rinfo(ls, "dlm_recover_rsbs %d done", count);
+5 -3
fs/dlm/recover.h
··· 19 19 int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq); 20 20 int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq); 21 21 int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq); 22 - int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq); 22 + int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, 23 + const struct list_head *root_list); 23 24 int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc); 24 - int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq); 25 + int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq, 26 + const struct list_head *root_list); 25 27 void dlm_recovered_lock(struct dlm_rsb *r); 26 28 void dlm_clear_toss(struct dlm_ls *ls); 27 - void dlm_recover_rsbs(struct dlm_ls *ls); 29 + void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list); 28 30 29 31 #endif /* __RECOVER_DOT_H__ */ 30 32
+23 -31
fs/dlm/recoverd.c
··· 62 62 write_unlock(&ls->ls_masters_lock); 63 63 } 64 64 65 - static void dlm_create_root_list(struct dlm_ls *ls) 65 + static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) 66 66 { 67 67 struct rb_node *n; 68 68 struct dlm_rsb *r; 69 69 int i; 70 70 71 - down_write(&ls->ls_root_sem); 72 - if (!list_empty(&ls->ls_root_list)) { 73 - log_error(ls, "root list not empty"); 74 - goto out; 75 - } 76 - 77 71 for (i = 0; i < ls->ls_rsbtbl_size; i++) { 78 72 spin_lock_bh(&ls->ls_rsbtbl[i].lock); 79 73 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { 80 74 r = rb_entry(n, struct dlm_rsb, res_hashnode); 81 - list_add(&r->res_root_list, &ls->ls_root_list); 75 + list_add(&r->res_root_list, root_list); 82 76 dlm_hold_rsb(r); 83 77 } 84 78 ··· 80 86 log_error(ls, "%s toss not empty", __func__); 81 87 spin_unlock_bh(&ls->ls_rsbtbl[i].lock); 82 88 } 83 - out: 84 - up_write(&ls->ls_root_sem); 85 89 } 86 90 87 - static void dlm_release_root_list(struct dlm_ls *ls) 91 + static void dlm_release_root_list(struct list_head *root_list) 88 92 { 89 93 struct dlm_rsb *r, *safe; 90 94 91 - down_write(&ls->ls_root_sem); 92 - list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { 95 + list_for_each_entry_safe(r, safe, root_list, res_root_list) { 93 96 list_del_init(&r->res_root_list); 94 97 dlm_put_rsb(r); 95 98 } 96 - up_write(&ls->ls_root_sem); 97 99 } 98 100 99 101 /* If the start for which we're re-enabling locking (seq) has been superseded ··· 121 131 122 132 static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) 123 133 { 134 + LIST_HEAD(root_list); 124 135 unsigned long start; 125 136 int error, neg = 0; 126 137 ··· 138 147 * routines. 139 148 */ 140 149 141 - dlm_create_root_list(ls); 150 + dlm_create_root_list(ls, &root_list); 142 151 143 152 /* 144 153 * Add or remove nodes from the lockspace's ls_nodes list. ··· 154 163 goto fail; 155 164 } 156 165 157 - dlm_recover_dir_nodeid(ls); 166 + dlm_recover_dir_nodeid(ls, &root_list); 158 167 159 168 /* Create a snapshot of all active rsbs were we are the master of. 160 169 * During the barrier between dlm_recover_members_wait() and ··· 170 179 error = dlm_create_masters_list(ls); 171 180 if (error) { 172 181 log_rinfo(ls, "dlm_create_masters_list error %d", error); 173 - goto fail; 182 + goto fail_root_list; 174 183 } 175 184 176 185 ls->ls_recover_dir_sent_res = 0; ··· 183 192 if (error) { 184 193 log_rinfo(ls, "dlm_recover_members_wait error %d", error); 185 194 dlm_release_masters_list(ls); 186 - goto fail; 195 + goto fail_root_list; 187 196 } 188 197 189 198 start = jiffies; ··· 197 206 if (error) { 198 207 log_rinfo(ls, "dlm_recover_directory error %d", error); 199 208 dlm_release_masters_list(ls); 200 - goto fail; 209 + goto fail_root_list; 201 210 } 202 211 203 212 dlm_set_recover_status(ls, DLM_RS_DIR); ··· 206 215 if (error) { 207 216 log_rinfo(ls, "dlm_recover_directory_wait error %d", error); 208 217 dlm_release_masters_list(ls); 209 - goto fail; 218 + goto fail_root_list; 210 219 } 211 220 212 221 dlm_release_masters_list(ls); ··· 224 233 225 234 if (dlm_recovery_stopped(ls)) { 226 235 error = -EINTR; 227 - goto fail; 236 + goto fail_root_list; 228 237 } 229 238 230 239 if (neg || dlm_no_directory(ls)) { ··· 232 241 * Clear lkb's for departed nodes. 233 242 */ 234 243 235 - dlm_recover_purge(ls); 244 + dlm_recover_purge(ls, &root_list); 236 245 237 246 /* 238 247 * Get new master nodeid's for rsb's that were mastered on 239 248 * departed nodes. 240 249 */ 241 250 242 - error = dlm_recover_masters(ls, rv->seq); 251 + error = dlm_recover_masters(ls, rv->seq, &root_list); 243 252 if (error) { 244 253 log_rinfo(ls, "dlm_recover_masters error %d", error); 245 - goto fail; 254 + goto fail_root_list; 246 255 } 247 256 248 257 /* 249 258 * Send our locks on remastered rsb's to the new masters. 250 259 */ 251 260 252 - error = dlm_recover_locks(ls, rv->seq); 261 + error = dlm_recover_locks(ls, rv->seq, &root_list); 253 262 if (error) { 254 263 log_rinfo(ls, "dlm_recover_locks error %d", error); 255 - goto fail; 264 + goto fail_root_list; 256 265 } 257 266 258 267 dlm_set_recover_status(ls, DLM_RS_LOCKS); ··· 260 269 error = dlm_recover_locks_wait(ls, rv->seq); 261 270 if (error) { 262 271 log_rinfo(ls, "dlm_recover_locks_wait error %d", error); 263 - goto fail; 272 + goto fail_root_list; 264 273 } 265 274 266 275 log_rinfo(ls, "dlm_recover_locks %u in", ··· 272 281 * settings. 273 282 */ 274 283 275 - dlm_recover_rsbs(ls); 284 + dlm_recover_rsbs(ls, &root_list); 276 285 } else { 277 286 /* 278 287 * Other lockspace members may be going through the "neg" steps ··· 284 293 error = dlm_recover_locks_wait(ls, rv->seq); 285 294 if (error) { 286 295 log_rinfo(ls, "dlm_recover_locks_wait error %d", error); 287 - goto fail; 296 + goto fail_root_list; 288 297 } 289 298 } 290 299 291 - dlm_release_root_list(ls); 300 + dlm_release_root_list(&root_list); 292 301 293 302 /* 294 303 * Purge directory-related requests that are saved in requestqueue. ··· 337 346 338 347 return 0; 339 348 349 + fail_root_list: 350 + dlm_release_root_list(&root_list); 340 351 fail: 341 - dlm_release_root_list(ls); 342 352 mutex_unlock(&ls->ls_recoverd_active); 343 353 344 354 return error;