Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm:
dlm: add __init and __exit marks to init and exit functions
dlm: eliminate astparam type casting
dlm: proper types for asts and basts
dlm: dlm/user.c input validation fixes
dlm: fix dlm_dir_lookup() handling of too long names
dlm: fix overflows when copying from ->m_extra to lvb
dlm: make find_rsb() fail gracefully when namelen is too large
dlm: receive_rcom_lock_args() overflow check
dlm: verify that places expecting rcom_lock have packet long enough
dlm: validate data in dlm_recover_directory()
dlm: missing length check in check_config()
dlm: use proper type for ->ls_recover_buf
dlm: do not byteswap rcom_config
dlm: do not byteswap rcom_lock
dlm: dlm_process_incoming_buffer() fixes
dlm: use proper C for dlm/requestqueue stuff (and fix alignment bug)

+235 -225
+4 -5
fs/dlm/ast.c
··· 39 dlm_user_add_ast(lkb, type); 40 return; 41 } 42 - DLM_ASSERT(lkb->lkb_astaddr != DLM_FAKE_USER_AST, dlm_print_lkb(lkb);); 43 44 spin_lock(&ast_queue_lock); 45 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { ··· 57 struct dlm_ls *ls = NULL; 58 struct dlm_rsb *r = NULL; 59 struct dlm_lkb *lkb; 60 - void (*cast) (long param); 61 - void (*bast) (long param, int mode); 62 int type = 0, found, bmode; 63 64 for (;;) { ··· 82 if (!found) 83 break; 84 85 - cast = lkb->lkb_astaddr; 86 - bast = lkb->lkb_bastaddr; 87 bmode = lkb->lkb_bastmode; 88 89 if ((type & AST_COMP) && cast)
··· 39 dlm_user_add_ast(lkb, type); 40 return; 41 } 42 43 spin_lock(&ast_queue_lock); 44 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { ··· 58 struct dlm_ls *ls = NULL; 59 struct dlm_rsb *r = NULL; 60 struct dlm_lkb *lkb; 61 + void (*cast) (void *astparam); 62 + void (*bast) (void *astparam, int mode); 63 int type = 0, found, bmode; 64 65 for (;;) { ··· 83 if (!found) 84 break; 85 86 + cast = lkb->lkb_astfn; 87 + bast = lkb->lkb_bastfn; 88 bmode = lkb->lkb_bastmode; 89 90 if ((type & AST_COMP) && cast)
+1 -1
fs/dlm/config.c
··· 604 }, 605 }; 606 607 - int dlm_config_init(void) 608 { 609 config_group_init(&clusters_root.subsys.su_group); 610 mutex_init(&clusters_root.subsys.su_mutex);
··· 604 }, 605 }; 606 607 + int __init dlm_config_init(void) 608 { 609 config_group_init(&clusters_root.subsys.su_group); 610 mutex_init(&clusters_root.subsys.su_mutex);
+3 -5
fs/dlm/debug_fs.c
··· 162 163 static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) 164 { 165 - struct dlm_user_args *ua; 166 unsigned int waiting = 0; 167 uint64_t xid = 0; 168 169 if (lkb->lkb_flags & DLM_IFL_USER) { 170 - ua = (struct dlm_user_args *) lkb->lkb_astparam; 171 - if (ua) 172 - xid = ua->xid; 173 } 174 175 if (lkb->lkb_timestamp) ··· 541 debugfs_remove(ls->ls_debug_locks_dentry); 542 } 543 544 - int dlm_register_debugfs(void) 545 { 546 mutex_init(&debug_buf_lock); 547 dlm_root = debugfs_create_dir("dlm", NULL);
··· 162 163 static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) 164 { 165 unsigned int waiting = 0; 166 uint64_t xid = 0; 167 168 if (lkb->lkb_flags & DLM_IFL_USER) { 169 + if (lkb->lkb_ua) 170 + xid = lkb->lkb_ua->xid; 171 } 172 173 if (lkb->lkb_timestamp) ··· 543 debugfs_remove(ls->ls_debug_locks_dentry); 544 } 545 546 + int __init dlm_register_debugfs(void) 547 { 548 mutex_init(&debug_buf_lock); 549 dlm_root = debugfs_create_dir("dlm", NULL);
+24 -4
fs/dlm/dir.c
··· 220 last_len = 0; 221 222 for (;;) { 223 error = dlm_recovery_stopped(ls); 224 if (error) 225 goto out_free; ··· 236 * pick namelen/name pairs out of received buffer 237 */ 238 239 - b = ls->ls_recover_buf + sizeof(struct dlm_rcom); 240 241 for (;;) { 242 - memcpy(&namelen, b, sizeof(uint16_t)); 243 - namelen = be16_to_cpu(namelen); 244 - b += sizeof(uint16_t); 245 246 /* namelen of 0xFFFFF marks end of names for 247 this node; namelen of 0 marks end of the ··· 260 goto done; 261 if (!namelen) 262 break; 263 264 error = -ENOMEM; 265 de = get_free_de(ls, namelen); ··· 278 memcpy(de->name, b, namelen); 279 memcpy(last_name, b, namelen); 280 b += namelen; 281 282 add_entry_to_hash(ls, de); 283 count++; ··· 318 } 319 320 write_unlock(&ls->ls_dirtbl[bucket].lock); 321 322 de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL); 323 if (!de)
··· 220 last_len = 0; 221 222 for (;;) { 223 + int left; 224 error = dlm_recovery_stopped(ls); 225 if (error) 226 goto out_free; ··· 235 * pick namelen/name pairs out of received buffer 236 */ 237 238 + b = ls->ls_recover_buf->rc_buf; 239 + left = ls->ls_recover_buf->rc_header.h_length; 240 + left -= sizeof(struct dlm_rcom); 241 242 for (;;) { 243 + __be16 v; 244 + 245 + error = -EINVAL; 246 + if (left < sizeof(__be16)) 247 + goto out_free; 248 + 249 + memcpy(&v, b, sizeof(__be16)); 250 + namelen = be16_to_cpu(v); 251 + b += sizeof(__be16); 252 + left -= sizeof(__be16); 253 254 /* namelen of 0xFFFFF marks end of names for 255 this node; namelen of 0 marks end of the ··· 250 goto done; 251 if (!namelen) 252 break; 253 + 254 + if (namelen > left) 255 + goto out_free; 256 + 257 + if (namelen > DLM_RESNAME_MAXLEN) 258 + goto out_free; 259 260 error = -ENOMEM; 261 de = get_free_de(ls, namelen); ··· 262 memcpy(de->name, b, namelen); 263 memcpy(last_name, b, namelen); 264 b += namelen; 265 + left -= namelen; 266 267 add_entry_to_hash(ls, de); 268 count++; ··· 301 } 302 303 write_unlock(&ls->ls_dirtbl[bucket].lock); 304 + 305 + if (namelen > DLM_RESNAME_MAXLEN) 306 + return -EINVAL; 307 308 de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL); 309 if (!de)
+30 -23
fs/dlm/dlm_internal.h
··· 92 } \ 93 } 94 95 - #define DLM_FAKE_USER_AST ERR_PTR(-EINVAL) 96 - 97 98 struct dlm_direntry { 99 struct list_head list; ··· 144 145 struct dlm_args { 146 uint32_t flags; 147 - void *astaddr; 148 - long astparam; 149 - void *bastaddr; 150 int mode; 151 struct dlm_lksb *lksb; 152 unsigned long timeout; ··· 251 252 char *lkb_lvbptr; 253 struct dlm_lksb *lkb_lksb; /* caller's status block */ 254 - void *lkb_astaddr; /* caller's ast function */ 255 - void *lkb_bastaddr; /* caller's bast function */ 256 - long lkb_astparam; /* caller's ast arg */ 257 }; 258 259 ··· 404 char rc_buf[0]; 405 }; 406 407 struct rcom_config { 408 - uint32_t rf_lvblen; 409 - uint32_t rf_lsflags; 410 - uint64_t rf_unused; 411 }; 412 413 struct rcom_lock { 414 - uint32_t rl_ownpid; 415 - uint32_t rl_lkid; 416 - uint32_t rl_remid; 417 - uint32_t rl_parent_lkid; 418 - uint32_t rl_parent_remid; 419 - uint32_t rl_exflags; 420 - uint32_t rl_flags; 421 - uint32_t rl_lvbseq; 422 - int rl_result; 423 int8_t rl_rqmode; 424 int8_t rl_grmode; 425 int8_t rl_status; 426 int8_t rl_asts; 427 - uint16_t rl_wait_type; 428 - uint16_t rl_namelen; 429 char rl_name[DLM_RESNAME_MAXLEN]; 430 char rl_lvb[0]; 431 }; ··· 501 struct rw_semaphore ls_recv_active; /* block dlm_recv */ 502 struct list_head ls_requestqueue;/* queue remote requests */ 503 struct mutex ls_requestqueue_mutex; 504 - char *ls_recover_buf; 505 int ls_recover_nodeid; /* for debugging */ 506 uint64_t ls_rcom_seq; 507 spinlock_t ls_rcom_spin;
··· 92 } \ 93 } 94 95 96 struct dlm_direntry { 97 struct list_head list; ··· 146 147 struct dlm_args { 148 uint32_t flags; 149 + void (*astfn) (void *astparam); 150 + void *astparam; 151 + void (*bastfn) (void *astparam, int mode); 152 int mode; 153 struct dlm_lksb *lksb; 154 unsigned long timeout; ··· 253 254 char *lkb_lvbptr; 255 struct dlm_lksb *lkb_lksb; /* caller's status block */ 256 + void (*lkb_astfn) (void *astparam); 257 + void (*lkb_bastfn) (void *astparam, int mode); 258 + union { 259 + void *lkb_astparam; /* caller's ast arg */ 260 + struct dlm_user_args *lkb_ua; 261 + }; 262 }; 263 264 ··· 403 char rc_buf[0]; 404 }; 405 406 + union dlm_packet { 407 + struct dlm_header header; /* common to other two */ 408 + struct dlm_message message; 409 + struct dlm_rcom rcom; 410 + }; 411 + 412 struct rcom_config { 413 + __le32 rf_lvblen; 414 + __le32 rf_lsflags; 415 + __le64 rf_unused; 416 }; 417 418 struct rcom_lock { 419 + __le32 rl_ownpid; 420 + __le32 rl_lkid; 421 + __le32 rl_remid; 422 + __le32 rl_parent_lkid; 423 + __le32 rl_parent_remid; 424 + __le32 rl_exflags; 425 + __le32 rl_flags; 426 + __le32 rl_lvbseq; 427 + __le32 rl_result; 428 int8_t rl_rqmode; 429 int8_t rl_grmode; 430 int8_t rl_status; 431 int8_t rl_asts; 432 + __le16 rl_wait_type; 433 + __le16 rl_namelen; 434 char rl_name[DLM_RESNAME_MAXLEN]; 435 char rl_lvb[0]; 436 }; ··· 494 struct rw_semaphore ls_recv_active; /* block dlm_recv */ 495 struct list_head ls_requestqueue;/* queue remote requests */ 496 struct mutex ls_requestqueue_mutex; 497 + struct dlm_rcom *ls_recover_buf; 498 int ls_recover_nodeid; /* for debugging */ 499 uint64_t ls_rcom_seq; 500 spinlock_t ls_rcom_spin;
+83 -56
fs/dlm/lock.c
··· 436 { 437 struct dlm_rsb *r, *tmp; 438 uint32_t hash, bucket; 439 - int error = 0; 440 441 if (dlm_no_directory(ls)) 442 flags |= R_CREATE; 443 444 hash = jhash(name, namelen, 0); 445 bucket = hash & (ls->ls_rsbtbl_size - 1); 446 ··· 1226 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; 1227 if (b == 1) { 1228 int len = receive_extralen(ms); 1229 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 1230 lkb->lkb_lvbseq = ms->m_lvbseq; 1231 } ··· 1781 */ 1782 1783 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { 1784 - if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) { 1785 if (cw && high == DLM_LOCK_PR) 1786 queue_bast(r, lkb, DLM_LOCK_CW); 1787 else ··· 1811 struct dlm_lkb *gr; 1812 1813 list_for_each_entry(gr, head, lkb_statequeue) { 1814 - if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) { 1815 queue_bast(r, gr, lkb->lkb_rqmode); 1816 gr->lkb_highbast = lkb->lkb_rqmode; 1817 } ··· 1966 } 1967 1968 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, 1969 - int namelen, unsigned long timeout_cs, void *ast, 1970 - void *astarg, void *bast, struct dlm_args *args) 1971 { 1972 int rv = -EINVAL; 1973 ··· 2020 an active lkb cannot be modified before locking the rsb */ 2021 2022 args->flags = flags; 2023 - args->astaddr = ast; 2024 - args->astparam = (long) astarg; 2025 - args->bastaddr = bast; 2026 args->timeout = timeout_cs; 2027 args->mode = mode; 2028 args->lksb = lksb; ··· 2041 return -EINVAL; 2042 2043 args->flags = flags; 2044 - args->astparam = (long) astarg; 2045 return 0; 2046 } 2047 ··· 2071 2072 lkb->lkb_exflags = args->flags; 2073 lkb->lkb_sbflags = 0; 2074 - lkb->lkb_astaddr = args->astaddr; 2075 lkb->lkb_astparam = args->astparam; 2076 - lkb->lkb_bastaddr = args->bastaddr; 2077 lkb->lkb_rqmode = args->mode; 2078 lkb->lkb_lksb = args->lksb; 2079 lkb->lkb_lvbptr = args->lksb->sb_lvbptr; ··· 2720 /* m_result and m_bastmode are set from function args, 2721 not from lkb fields */ 2722 2723 - if (lkb->lkb_bastaddr) 2724 ms->m_asts |= AST_BAST; 2725 - if (lkb->lkb_astaddr) 2726 ms->m_asts |= AST_COMP; 2727 2728 /* compare with switch in create_message; send_remove() doesn't ··· 2998 if (!lkb->lkb_lvbptr) 2999 return -ENOMEM; 3000 len = receive_extralen(ms); 3001 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 3002 } 3003 return 0; 3004 } 3005 3006 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ··· 3023 lkb->lkb_remid = ms->m_lkid; 3024 lkb->lkb_grmode = DLM_LOCK_IV; 3025 lkb->lkb_rqmode = ms->m_rqmode; 3026 - lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST); 3027 - lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP); 3028 3029 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { 3030 /* lkb was just created so there won't be an lvb yet */ ··· 3824 int nodeid) 3825 { 3826 if (dlm_locking_stopped(ls)) { 3827 - dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms); 3828 } else { 3829 dlm_wait_requestqueue(ls); 3830 _receive_message(ls, ms); ··· 3844 standard locking activity) or an RCOM (recovery message sent as part of 3845 lockspace recovery). */ 3846 3847 - void dlm_receive_buffer(struct dlm_header *hd, int nodeid) 3848 { 3849 - struct dlm_message *ms = (struct dlm_message *) hd; 3850 - struct dlm_rcom *rc = (struct dlm_rcom *) hd; 3851 struct dlm_ls *ls; 3852 int type = 0; 3853 3854 switch (hd->h_cmd) { 3855 case DLM_MSG: 3856 - dlm_message_in(ms); 3857 - type = ms->m_type; 3858 break; 3859 case DLM_RCOM: 3860 - dlm_rcom_in(rc); 3861 - type = rc->rc_type; 3862 break; 3863 default: 3864 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); ··· 3877 hd->h_lockspace, nodeid, hd->h_cmd, type); 3878 3879 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) 3880 - dlm_send_ls_not_ready(nodeid, rc); 3881 return; 3882 } 3883 ··· 3886 3887 down_read(&ls->ls_recv_active); 3888 if (hd->h_cmd == DLM_MSG) 3889 - dlm_receive_message(ls, ms, nodeid); 3890 else 3891 - dlm_receive_rcom(ls, rc, nodeid); 3892 up_read(&ls->ls_recv_active); 3893 3894 dlm_put_lockspace(ls); ··· 4288 return NULL; 4289 } 4290 4291 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, 4292 struct dlm_rsb *r, struct dlm_rcom *rc) 4293 { 4294 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; 4295 - int lvblen; 4296 4297 lkb->lkb_nodeid = rc->rc_header.h_nodeid; 4298 - lkb->lkb_ownpid = rl->rl_ownpid; 4299 - lkb->lkb_remid = rl->rl_lkid; 4300 - lkb->lkb_exflags = rl->rl_exflags; 4301 - lkb->lkb_flags = rl->rl_flags & 0x0000FFFF; 4302 lkb->lkb_flags |= DLM_IFL_MSTCPY; 4303 - lkb->lkb_lvbseq = rl->rl_lvbseq; 4304 lkb->lkb_rqmode = rl->rl_rqmode; 4305 lkb->lkb_grmode = rl->rl_grmode; 4306 /* don't set lkb_status because add_lkb wants to itself */ 4307 4308 - lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST); 4309 - lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP); 4310 4311 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { 4312 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); 4313 if (!lkb->lkb_lvbptr) 4314 return -ENOMEM; 4315 - lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) - 4316 - sizeof(struct rcom_lock); 4317 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); 4318 } 4319 ··· 4323 The real granted mode of these converting locks cannot be determined 4324 until all locks have been rebuilt on the rsb (recover_conversion) */ 4325 4326 - if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) { 4327 rl->rl_status = DLM_LKSTS_CONVERT; 4328 lkb->lkb_grmode = DLM_LOCK_IV; 4329 rsb_set_flag(r, RSB_RECOVER_CONVERT); ··· 4339 the given values and send back our lkid. We send back our lkid by sending 4340 back the rcom_lock struct we got but with the remid field filled in. */ 4341 4342 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) 4343 { 4344 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; ··· 4352 goto out; 4353 } 4354 4355 - error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r); 4356 if (error) 4357 goto out; 4358 4359 lock_rsb(r); 4360 4361 - lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid); 4362 if (lkb) { 4363 error = -EEXIST; 4364 goto out_remid; ··· 4382 out_remid: 4383 /* this is the new value returned to the lock holder for 4384 saving in its process-copy lkb */ 4385 - rl->rl_remid = lkb->lkb_id; 4386 4387 out_unlock: 4388 unlock_rsb(r); 4389 put_rsb(r); 4390 out: 4391 if (error) 4392 - log_debug(ls, "recover_master_copy %d %x", error, rl->rl_lkid); 4393 - rl->rl_result = error; 4394 return error; 4395 } 4396 4397 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) 4398 { 4399 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; ··· 4403 struct dlm_lkb *lkb; 4404 int error; 4405 4406 - error = find_lkb(ls, rl->rl_lkid, &lkb); 4407 if (error) { 4408 - log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid); 4409 return error; 4410 } 4411 4412 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb);); 4413 4414 - error = rl->rl_result; 4415 4416 r = lkb->lkb_resource; 4417 hold_rsb(r); ··· 4431 log_debug(ls, "master copy exists %x", lkb->lkb_id); 4432 /* fall through */ 4433 case 0: 4434 - lkb->lkb_remid = rl->rl_remid; 4435 break; 4436 default: 4437 log_error(ls, "dlm_recover_process_copy unknown error %d %x", ··· 4480 lock and that lkb_astparam is the dlm_user_args structure. */ 4481 4482 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, 4483 - DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args); 4484 lkb->lkb_flags |= DLM_IFL_USER; 4485 ua->old_mode = DLM_LOCK_IV; 4486 ··· 4533 /* user can change the params on its lock when it converts it, or 4534 add an lvb that didn't exist before */ 4535 4536 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 4537 4538 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { 4539 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL); ··· 4554 ua->old_mode = lkb->lkb_grmode; 4555 4556 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, 4557 - DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args); 4558 if (error) 4559 goto out_put; 4560 ··· 4584 if (error) 4585 goto out; 4586 4587 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 4588 4589 if (lvb_in && ua->lksb.sb_lvbptr) 4590 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); ··· 4633 if (error) 4634 goto out; 4635 4636 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 4637 if (ua_tmp->castparam) 4638 ua->castparam = ua_tmp->castparam; 4639 ua->user_lksb = ua_tmp->user_lksb; ··· 4671 if (error) 4672 goto out; 4673 4674 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 4675 4676 error = set_unlock_args(flags, ua, &args); 4677 if (error) ··· 4710 4711 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) 4712 { 4713 - struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam; 4714 struct dlm_args args; 4715 int error; 4716 ··· 4718 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); 4719 mutex_unlock(&ls->ls_orphans_mutex); 4720 4721 - set_unlock_args(0, ua, &args); 4722 4723 error = cancel_lock(ls, lkb, &args); 4724 if (error == -DLM_ECANCEL) ··· 4731 4732 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) 4733 { 4734 - struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam; 4735 struct dlm_args args; 4736 int error; 4737 4738 - set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args); 4739 4740 error = unlock_lock(ls, lkb, &args); 4741 if (error == -DLM_EUNLOCK)
··· 436 { 437 struct dlm_rsb *r, *tmp; 438 uint32_t hash, bucket; 439 + int error = -EINVAL; 440 + 441 + if (namelen > DLM_RESNAME_MAXLEN) 442 + goto out; 443 444 if (dlm_no_directory(ls)) 445 flags |= R_CREATE; 446 447 + error = 0; 448 hash = jhash(name, namelen, 0); 449 bucket = hash & (ls->ls_rsbtbl_size - 1); 450 ··· 1222 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; 1223 if (b == 1) { 1224 int len = receive_extralen(ms); 1225 + if (len > DLM_RESNAME_MAXLEN) 1226 + len = DLM_RESNAME_MAXLEN; 1227 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 1228 lkb->lkb_lvbseq = ms->m_lvbseq; 1229 } ··· 1775 */ 1776 1777 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { 1778 + if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) { 1779 if (cw && high == DLM_LOCK_PR) 1780 queue_bast(r, lkb, DLM_LOCK_CW); 1781 else ··· 1805 struct dlm_lkb *gr; 1806 1807 list_for_each_entry(gr, head, lkb_statequeue) { 1808 + if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { 1809 queue_bast(r, gr, lkb->lkb_rqmode); 1810 gr->lkb_highbast = lkb->lkb_rqmode; 1811 } ··· 1960 } 1961 1962 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, 1963 + int namelen, unsigned long timeout_cs, 1964 + void (*ast) (void *astparam), 1965 + void *astparam, 1966 + void (*bast) (void *astparam, int mode), 1967 + struct dlm_args *args) 1968 { 1969 int rv = -EINVAL; 1970 ··· 2011 an active lkb cannot be modified before locking the rsb */ 2012 2013 args->flags = flags; 2014 + args->astfn = ast; 2015 + args->astparam = astparam; 2016 + args->bastfn = bast; 2017 args->timeout = timeout_cs; 2018 args->mode = mode; 2019 args->lksb = lksb; ··· 2032 return -EINVAL; 2033 2034 args->flags = flags; 2035 + args->astparam = astarg; 2036 return 0; 2037 } 2038 ··· 2062 2063 lkb->lkb_exflags = args->flags; 2064 lkb->lkb_sbflags = 0; 2065 + lkb->lkb_astfn = args->astfn; 2066 lkb->lkb_astparam = args->astparam; 2067 + lkb->lkb_bastfn = args->bastfn; 2068 lkb->lkb_rqmode = args->mode; 2069 lkb->lkb_lksb = args->lksb; 2070 lkb->lkb_lvbptr = args->lksb->sb_lvbptr; ··· 2711 /* m_result and m_bastmode are set from function args, 2712 not from lkb fields */ 2713 2714 + if (lkb->lkb_bastfn) 2715 ms->m_asts |= AST_BAST; 2716 + if (lkb->lkb_astfn) 2717 ms->m_asts |= AST_COMP; 2718 2719 /* compare with switch in create_message; send_remove() doesn't ··· 2989 if (!lkb->lkb_lvbptr) 2990 return -ENOMEM; 2991 len = receive_extralen(ms); 2992 + if (len > DLM_RESNAME_MAXLEN) 2993 + len = DLM_RESNAME_MAXLEN; 2994 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 2995 } 2996 return 0; 2997 + } 2998 + 2999 + static void fake_bastfn(void *astparam, int mode) 3000 + { 3001 + log_print("fake_bastfn should not be called"); 3002 + } 3003 + 3004 + static void fake_astfn(void *astparam) 3005 + { 3006 + log_print("fake_astfn should not be called"); 3007 } 3008 3009 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ··· 3002 lkb->lkb_remid = ms->m_lkid; 3003 lkb->lkb_grmode = DLM_LOCK_IV; 3004 lkb->lkb_rqmode = ms->m_rqmode; 3005 + 3006 + lkb->lkb_bastfn = (ms->m_asts & AST_BAST) ? &fake_bastfn : NULL; 3007 + lkb->lkb_astfn = (ms->m_asts & AST_COMP) ? &fake_astfn : NULL; 3008 3009 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { 3010 /* lkb was just created so there won't be an lvb yet */ ··· 3802 int nodeid) 3803 { 3804 if (dlm_locking_stopped(ls)) { 3805 + dlm_add_requestqueue(ls, nodeid, ms); 3806 } else { 3807 dlm_wait_requestqueue(ls); 3808 _receive_message(ls, ms); ··· 3822 standard locking activity) or an RCOM (recovery message sent as part of 3823 lockspace recovery). */ 3824 3825 + void dlm_receive_buffer(union dlm_packet *p, int nodeid) 3826 { 3827 + struct dlm_header *hd = &p->header; 3828 struct dlm_ls *ls; 3829 int type = 0; 3830 3831 switch (hd->h_cmd) { 3832 case DLM_MSG: 3833 + dlm_message_in(&p->message); 3834 + type = p->message.m_type; 3835 break; 3836 case DLM_RCOM: 3837 + dlm_rcom_in(&p->rcom); 3838 + type = p->rcom.rc_type; 3839 break; 3840 default: 3841 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); ··· 3856 hd->h_lockspace, nodeid, hd->h_cmd, type); 3857 3858 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) 3859 + dlm_send_ls_not_ready(nodeid, &p->rcom); 3860 return; 3861 } 3862 ··· 3865 3866 down_read(&ls->ls_recv_active); 3867 if (hd->h_cmd == DLM_MSG) 3868 + dlm_receive_message(ls, &p->message, nodeid); 3869 else 3870 + dlm_receive_rcom(ls, &p->rcom, nodeid); 3871 up_read(&ls->ls_recv_active); 3872 3873 dlm_put_lockspace(ls); ··· 4267 return NULL; 4268 } 4269 4270 + /* needs at least dlm_rcom + rcom_lock */ 4271 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, 4272 struct dlm_rsb *r, struct dlm_rcom *rc) 4273 { 4274 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; 4275 4276 lkb->lkb_nodeid = rc->rc_header.h_nodeid; 4277 + lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); 4278 + lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); 4279 + lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); 4280 + lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF; 4281 lkb->lkb_flags |= DLM_IFL_MSTCPY; 4282 + lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); 4283 lkb->lkb_rqmode = rl->rl_rqmode; 4284 lkb->lkb_grmode = rl->rl_grmode; 4285 /* don't set lkb_status because add_lkb wants to itself */ 4286 4287 + lkb->lkb_bastfn = (rl->rl_asts & AST_BAST) ? &fake_bastfn : NULL; 4288 + lkb->lkb_astfn = (rl->rl_asts & AST_COMP) ? &fake_astfn : NULL; 4289 4290 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { 4291 + int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) - 4292 + sizeof(struct rcom_lock); 4293 + if (lvblen > ls->ls_lvblen) 4294 + return -EINVAL; 4295 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); 4296 if (!lkb->lkb_lvbptr) 4297 return -ENOMEM; 4298 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); 4299 } 4300 ··· 4300 The real granted mode of these converting locks cannot be determined 4301 until all locks have been rebuilt on the rsb (recover_conversion) */ 4302 4303 + if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && 4304 + middle_conversion(lkb)) { 4305 rl->rl_status = DLM_LKSTS_CONVERT; 4306 lkb->lkb_grmode = DLM_LOCK_IV; 4307 rsb_set_flag(r, RSB_RECOVER_CONVERT); ··· 4315 the given values and send back our lkid. We send back our lkid by sending 4316 back the rcom_lock struct we got but with the remid field filled in. */ 4317 4318 + /* needs at least dlm_rcom + rcom_lock */ 4319 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) 4320 { 4321 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; ··· 4327 goto out; 4328 } 4329 4330 + error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), 4331 + R_MASTER, &r); 4332 if (error) 4333 goto out; 4334 4335 lock_rsb(r); 4336 4337 + lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid)); 4338 if (lkb) { 4339 error = -EEXIST; 4340 goto out_remid; ··· 4356 out_remid: 4357 /* this is the new value returned to the lock holder for 4358 saving in its process-copy lkb */ 4359 + rl->rl_remid = cpu_to_le32(lkb->lkb_id); 4360 4361 out_unlock: 4362 unlock_rsb(r); 4363 put_rsb(r); 4364 out: 4365 if (error) 4366 + log_debug(ls, "recover_master_copy %d %x", error, 4367 + le32_to_cpu(rl->rl_lkid)); 4368 + rl->rl_result = cpu_to_le32(error); 4369 return error; 4370 } 4371 4372 + /* needs at least dlm_rcom + rcom_lock */ 4373 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) 4374 { 4375 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; ··· 4375 struct dlm_lkb *lkb; 4376 int error; 4377 4378 + error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb); 4379 if (error) { 4380 + log_error(ls, "recover_process_copy no lkid %x", 4381 + le32_to_cpu(rl->rl_lkid)); 4382 return error; 4383 } 4384 4385 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb);); 4386 4387 + error = le32_to_cpu(rl->rl_result); 4388 4389 r = lkb->lkb_resource; 4390 hold_rsb(r); ··· 4402 log_debug(ls, "master copy exists %x", lkb->lkb_id); 4403 /* fall through */ 4404 case 0: 4405 + lkb->lkb_remid = le32_to_cpu(rl->rl_remid); 4406 break; 4407 default: 4408 log_error(ls, "dlm_recover_process_copy unknown error %d %x", ··· 4451 lock and that lkb_astparam is the dlm_user_args structure. */ 4452 4453 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, 4454 + fake_astfn, ua, fake_bastfn, &args); 4455 lkb->lkb_flags |= DLM_IFL_USER; 4456 ua->old_mode = DLM_LOCK_IV; 4457 ··· 4504 /* user can change the params on its lock when it converts it, or 4505 add an lvb that didn't exist before */ 4506 4507 + ua = lkb->lkb_ua; 4508 4509 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { 4510 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL); ··· 4525 ua->old_mode = lkb->lkb_grmode; 4526 4527 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, 4528 + fake_astfn, ua, fake_bastfn, &args); 4529 if (error) 4530 goto out_put; 4531 ··· 4555 if (error) 4556 goto out; 4557 4558 + ua = lkb->lkb_ua; 4559 4560 if (lvb_in && ua->lksb.sb_lvbptr) 4561 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); ··· 4604 if (error) 4605 goto out; 4606 4607 + ua = lkb->lkb_ua; 4608 if (ua_tmp->castparam) 4609 ua->castparam = ua_tmp->castparam; 4610 ua->user_lksb = ua_tmp->user_lksb; ··· 4642 if (error) 4643 goto out; 4644 4645 + ua = lkb->lkb_ua; 4646 4647 error = set_unlock_args(flags, ua, &args); 4648 if (error) ··· 4681 4682 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) 4683 { 4684 struct dlm_args args; 4685 int error; 4686 ··· 4690 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); 4691 mutex_unlock(&ls->ls_orphans_mutex); 4692 4693 + set_unlock_args(0, lkb->lkb_ua, &args); 4694 4695 error = cancel_lock(ls, lkb, &args); 4696 if (error == -DLM_ECANCEL) ··· 4703 4704 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) 4705 { 4706 struct dlm_args args; 4707 int error; 4708 4709 + set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args); 4710 4711 error = unlock_lock(ls, lkb, &args); 4712 if (error == -DLM_EUNLOCK)
+1 -1
fs/dlm/lock.h
··· 17 void dlm_dump_rsb(struct dlm_rsb *r); 18 void dlm_print_lkb(struct dlm_lkb *lkb); 19 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); 20 - void dlm_receive_buffer(struct dlm_header *hd, int nodeid); 21 int dlm_modes_compat(int mode1, int mode2); 22 void dlm_put_rsb(struct dlm_rsb *r); 23 void dlm_hold_rsb(struct dlm_rsb *r);
··· 17 void dlm_dump_rsb(struct dlm_rsb *r); 18 void dlm_print_lkb(struct dlm_lkb *lkb); 19 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); 20 + void dlm_receive_buffer(union dlm_packet *p, int nodeid); 21 int dlm_modes_compat(int mode1, int mode2); 22 void dlm_put_rsb(struct dlm_rsb *r); 23 void dlm_hold_rsb(struct dlm_rsb *r);
+1 -1
fs/dlm/lockspace.c
··· 191 } 192 193 194 - int dlm_lockspace_init(void) 195 { 196 ls_count = 0; 197 mutex_init(&ls_lock);
··· 191 } 192 193 194 + int __init dlm_lockspace_init(void) 195 { 196 ls_count = 0; 197 mutex_init(&ls_lock);
+2 -2
fs/dlm/memory.c
··· 18 static struct kmem_cache *lkb_cache; 19 20 21 - int dlm_memory_init(void) 22 { 23 int ret = 0; 24 ··· 80 { 81 if (lkb->lkb_flags & DLM_IFL_USER) { 82 struct dlm_user_args *ua; 83 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 84 if (ua) { 85 if (ua->lksb.sb_lvbptr) 86 kfree(ua->lksb.sb_lvbptr);
··· 18 static struct kmem_cache *lkb_cache; 19 20 21 + int __init dlm_memory_init(void) 22 { 23 int ret = 0; 24 ··· 80 { 81 if (lkb->lkb_flags & DLM_IFL_USER) { 82 struct dlm_user_args *ua; 83 + ua = lkb->lkb_ua; 84 if (ua) { 85 if (ua->lksb.sb_lvbptr) 86 kfree(ua->lksb.sb_lvbptr);
+20 -13
fs/dlm/midcomms.c
··· 61 union { 62 unsigned char __buf[DLM_INBUF_LEN]; 63 /* this is to force proper alignment on some arches */ 64 - struct dlm_header dlm; 65 } __tmp; 66 - struct dlm_header *msg = &__tmp.dlm; 67 int ret = 0; 68 int err = 0; 69 uint16_t msglen; ··· 75 message may wrap around the end of the buffer back to the 76 start, so we need to use a temp buffer and copy_from_cb. */ 77 78 - copy_from_cb(msg, base, offset, sizeof(struct dlm_header), 79 limit); 80 81 - msglen = le16_to_cpu(msg->h_length); 82 - lockspace = msg->h_lockspace; 83 84 err = -EINVAL; 85 if (msglen < sizeof(struct dlm_header)) 86 break; 87 err = -E2BIG; 88 if (msglen > dlm_config.ci_buffer_size) { 89 log_print("message size %d from %d too big, buf len %d", ··· 111 in the buffer on the stack (which should work for most 112 ordinary messages). */ 113 114 - if (msglen > DLM_INBUF_LEN && msg == &__tmp.dlm) { 115 - msg = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL); 116 - if (msg == NULL) 117 return ret; 118 } 119 120 - copy_from_cb(msg, base, offset, msglen, limit); 121 122 - BUG_ON(lockspace != msg->h_lockspace); 123 124 ret += msglen; 125 offset += msglen; 126 offset &= (limit - 1); 127 len -= msglen; 128 129 - dlm_receive_buffer(msg, nodeid); 130 } 131 132 - if (msg != &__tmp.dlm) 133 - kfree(msg); 134 135 return err ? err : ret; 136 }
··· 61 union { 62 unsigned char __buf[DLM_INBUF_LEN]; 63 /* this is to force proper alignment on some arches */ 64 + union dlm_packet p; 65 } __tmp; 66 + union dlm_packet *p = &__tmp.p; 67 int ret = 0; 68 int err = 0; 69 uint16_t msglen; ··· 75 message may wrap around the end of the buffer back to the 76 start, so we need to use a temp buffer and copy_from_cb. */ 77 78 + copy_from_cb(p, base, offset, sizeof(struct dlm_header), 79 limit); 80 81 + msglen = le16_to_cpu(p->header.h_length); 82 + lockspace = p->header.h_lockspace; 83 84 err = -EINVAL; 85 if (msglen < sizeof(struct dlm_header)) 86 break; 87 + if (p->header.h_cmd == DLM_MSG) { 88 + if (msglen < sizeof(struct dlm_message)) 89 + break; 90 + } else { 91 + if (msglen < sizeof(struct dlm_rcom)) 92 + break; 93 + } 94 err = -E2BIG; 95 if (msglen > dlm_config.ci_buffer_size) { 96 log_print("message size %d from %d too big, buf len %d", ··· 104 in the buffer on the stack (which should work for most 105 ordinary messages). */ 106 107 + if (msglen > sizeof(__tmp) && p == &__tmp.p) { 108 + p = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL); 109 + if (p == NULL) 110 return ret; 111 } 112 113 + copy_from_cb(p, base, offset, msglen, limit); 114 115 + BUG_ON(lockspace != p->header.h_lockspace); 116 117 ret += msglen; 118 offset += msglen; 119 offset &= (limit - 1); 120 len -= msglen; 121 122 + dlm_receive_buffer(p, nodeid); 123 } 124 125 + if (p != &__tmp.p) 126 + kfree(p); 127 128 return err ? err : ret; 129 }
+4 -5
fs/dlm/netlink.c
··· 78 .doit = user_cmd, 79 }; 80 81 - int dlm_netlink_init(void) 82 { 83 int rv; 84 ··· 95 return rv; 96 } 97 98 - void dlm_netlink_exit(void) 99 { 100 genl_unregister_ops(&family, &dlm_nl_ops); 101 genl_unregister_family(&family); ··· 104 static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) 105 { 106 struct dlm_rsb *r = lkb->lkb_resource; 107 - struct dlm_user_args *ua = (struct dlm_user_args *) lkb->lkb_astparam; 108 109 memset(data, 0, sizeof(struct dlm_lock_data)); 110 ··· 116 data->grmode = lkb->lkb_grmode; 117 data->rqmode = lkb->lkb_rqmode; 118 data->timestamp = lkb->lkb_timestamp; 119 - if (ua) 120 - data->xid = ua->xid; 121 if (r) { 122 data->lockspace_id = r->res_ls->ls_global_id; 123 data->resource_namelen = r->res_length;
··· 78 .doit = user_cmd, 79 }; 80 81 + int __init dlm_netlink_init(void) 82 { 83 int rv; 84 ··· 95 return rv; 96 } 97 98 + void __exit dlm_netlink_exit(void) 99 { 100 genl_unregister_ops(&family, &dlm_nl_ops); 101 genl_unregister_family(&family); ··· 104 static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) 105 { 106 struct dlm_rsb *r = lkb->lkb_resource; 107 108 memset(data, 0, sizeof(struct dlm_lock_data)); 109 ··· 117 data->grmode = lkb->lkb_grmode; 118 data->rqmode = lkb->lkb_rqmode; 119 data->timestamp = lkb->lkb_timestamp; 120 + if (lkb->lkb_ua) 121 + data->xid = lkb->lkb_ua->xid; 122 if (r) { 123 data->lockspace_id = r->res_ls->ls_global_id; 124 data->resource_namelen = r->res_length;
+41 -22
fs/dlm/rcom.c
··· 78 79 static void make_config(struct dlm_ls *ls, struct rcom_config *rf) 80 { 81 - rf->rf_lvblen = ls->ls_lvblen; 82 - rf->rf_lsflags = ls->ls_exflags; 83 } 84 85 static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) 86 { 87 struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; 88 89 if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { 90 log_error(ls, "version mismatch: %x nodeid %d: %x", ··· 94 return -EPROTO; 95 } 96 97 - if (rf->rf_lvblen != ls->ls_lvblen || 98 - rf->rf_lsflags != ls->ls_exflags) { 99 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", 100 - ls->ls_lvblen, ls->ls_exflags, 101 - nodeid, rf->rf_lvblen, rf->rf_lsflags); 102 return -EPROTO; 103 } 104 return 0; ··· 136 ls->ls_recover_nodeid = nodeid; 137 138 if (nodeid == dlm_our_nodeid()) { 139 - rc = (struct dlm_rcom *) ls->ls_recover_buf; 140 rc->rc_result = dlm_recover_status(ls); 141 goto out; 142 } ··· 155 if (error) 156 goto out; 157 158 - rc = (struct dlm_rcom *) ls->ls_recover_buf; 159 160 if (rc->rc_result == -ESRCH) { 161 /* we pretend the remote lockspace exists with 0 status */ ··· 209 { 210 struct dlm_rcom *rc; 211 struct dlm_mhandle *mh; 212 - int error = 0, len = sizeof(struct dlm_rcom); 213 214 ls->ls_recover_nodeid = nodeid; 215 216 if (nodeid == dlm_our_nodeid()) { 217 dlm_copy_master_names(ls, last_name, last_len, 218 - ls->ls_recover_buf + len, 219 - dlm_config.ci_buffer_size - len, nodeid); 220 goto out; 221 } 222 ··· 308 { 309 memset(rl, 0, sizeof(*rl)); 310 311 - rl->rl_ownpid = lkb->lkb_ownpid; 312 - rl->rl_lkid = lkb->lkb_id; 313 - rl->rl_exflags = lkb->lkb_exflags; 314 - rl->rl_flags = lkb->lkb_flags; 315 - rl->rl_lvbseq = lkb->lkb_lvbseq; 316 rl->rl_rqmode = lkb->lkb_rqmode; 317 rl->rl_grmode = lkb->lkb_grmode; 318 rl->rl_status = lkb->lkb_status; 319 - rl->rl_wait_type = lkb->lkb_wait_type; 320 321 - if (lkb->lkb_bastaddr) 322 rl->rl_asts |= AST_BAST; 323 - if (lkb->lkb_astaddr) 324 rl->rl_asts |= AST_COMP; 325 326 - rl->rl_namelen = r->res_length; 327 memcpy(rl->rl_name, r->res_name, r->res_length); 328 329 /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? ··· 357 return error; 358 } 359 360 static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) 361 { 362 struct dlm_rcom *rc; ··· 411 rc->rc_result = -ESRCH; 412 413 rf = (struct rcom_config *) rc->rc_buf; 414 - rf->rf_lvblen = -1; 415 416 dlm_rcom_out(rc); 417 dlm_lowcomms_commit_buffer(mh); ··· 449 450 void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) 451 { 452 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { 453 log_debug(ls, "ignoring recovery message %x from %d", 454 rc->rc_type, nodeid); ··· 474 break; 475 476 case DLM_RCOM_LOCK: 477 receive_rcom_lock(ls, rc); 478 break; 479 ··· 492 break; 493 494 case DLM_RCOM_LOCK_REPLY: 495 dlm_recover_process_copy(ls, rc); 496 break; 497 498 default: 499 log_error(ls, "receive_rcom bad type %d", rc->rc_type); 500 } 501 - out: 502 return; 503 } 504
··· 78 79 static void make_config(struct dlm_ls *ls, struct rcom_config *rf) 80 { 81 + rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen); 82 + rf->rf_lsflags = cpu_to_le32(ls->ls_exflags); 83 } 84 85 static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) 86 { 87 struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; 88 + size_t conf_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); 89 90 if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { 91 log_error(ls, "version mismatch: %x nodeid %d: %x", ··· 93 return -EPROTO; 94 } 95 96 + if (rc->rc_header.h_length < conf_size) { 97 + log_error(ls, "config too short: %d nodeid %d", 98 + rc->rc_header.h_length, nodeid); 99 + return -EPROTO; 100 + } 101 + 102 + if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen || 103 + le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) { 104 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", 105 + ls->ls_lvblen, ls->ls_exflags, nodeid, 106 + le32_to_cpu(rf->rf_lvblen), 107 + le32_to_cpu(rf->rf_lsflags)); 108 return -EPROTO; 109 } 110 return 0; ··· 128 ls->ls_recover_nodeid = nodeid; 129 130 if (nodeid == dlm_our_nodeid()) { 131 + rc = ls->ls_recover_buf; 132 rc->rc_result = dlm_recover_status(ls); 133 goto out; 134 } ··· 147 if (error) 148 goto out; 149 150 + rc = ls->ls_recover_buf; 151 152 if (rc->rc_result == -ESRCH) { 153 /* we pretend the remote lockspace exists with 0 status */ ··· 201 { 202 struct dlm_rcom *rc; 203 struct dlm_mhandle *mh; 204 + int error = 0; 205 + int max_size = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom); 206 207 ls->ls_recover_nodeid = nodeid; 208 209 if (nodeid == dlm_our_nodeid()) { 210 dlm_copy_master_names(ls, last_name, last_len, 211 + ls->ls_recover_buf->rc_buf, 212 + max_size, nodeid); 213 goto out; 214 } 215 ··· 299 { 300 memset(rl, 0, sizeof(*rl)); 301 302 + rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); 303 + rl->rl_lkid = cpu_to_le32(lkb->lkb_id); 304 + rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); 305 + rl->rl_flags = cpu_to_le32(lkb->lkb_flags); 306 + rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); 307 rl->rl_rqmode = lkb->lkb_rqmode; 308 rl->rl_grmode = lkb->lkb_grmode; 309 rl->rl_status = lkb->lkb_status; 310 + rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type); 311 312 + if (lkb->lkb_bastfn) 313 rl->rl_asts |= AST_BAST; 314 + if (lkb->lkb_astfn) 315 rl->rl_asts |= AST_COMP; 316 317 + rl->rl_namelen = cpu_to_le16(r->res_length); 318 memcpy(rl->rl_name, r->res_name, r->res_length); 319 320 /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? ··· 348 return error; 349 } 350 351 + /* needs at least dlm_rcom + rcom_lock */ 352 static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) 353 { 354 struct dlm_rcom *rc; ··· 401 rc->rc_result = -ESRCH; 402 403 rf = (struct rcom_config *) rc->rc_buf; 404 + rf->rf_lvblen = cpu_to_le32(~0U); 405 406 dlm_rcom_out(rc); 407 dlm_lowcomms_commit_buffer(mh); ··· 439 440 void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) 441 { 442 + int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); 443 + 444 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { 445 log_debug(ls, "ignoring recovery message %x from %d", 446 rc->rc_type, nodeid); ··· 462 break; 463 464 case DLM_RCOM_LOCK: 465 + if (rc->rc_header.h_length < lock_size) 466 + goto Eshort; 467 receive_rcom_lock(ls, rc); 468 break; 469 ··· 478 break; 479 480 case DLM_RCOM_LOCK_REPLY: 481 + if (rc->rc_header.h_length < lock_size) 482 + goto Eshort; 483 dlm_recover_process_copy(ls, rc); 484 break; 485 486 default: 487 log_error(ls, "receive_rcom bad type %d", rc->rc_type); 488 } 489 + out: 490 return; 491 + Eshort: 492 + log_error(ls, "recovery message %x from %d is too short", 493 + rc->rc_type, nodeid); 494 } 495
+2 -2
fs/dlm/recover.c
··· 94 95 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) 96 { 97 - struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; 98 struct dlm_member *memb; 99 int error = 0, delay; 100 ··· 123 124 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status) 125 { 126 - struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; 127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; 128 129 for (;;) {
··· 94 95 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) 96 { 97 + struct dlm_rcom *rc = ls->ls_recover_buf; 98 struct dlm_member *memb; 99 int error = 0, delay; 100 ··· 123 124 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status) 125 { 126 + struct dlm_rcom *rc = ls->ls_recover_buf; 127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; 128 129 for (;;) {
+6 -6
fs/dlm/requestqueue.c
··· 20 struct rq_entry { 21 struct list_head list; 22 int nodeid; 23 - char request[0]; 24 }; 25 26 /* ··· 30 * lockspace is enabled on some while still suspended on others. 31 */ 32 33 - void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) 34 { 35 struct rq_entry *e; 36 - int length = hd->h_length; 37 38 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 39 if (!e) { ··· 42 } 43 44 e->nodeid = nodeid; 45 - memcpy(e->request, hd, length); 46 47 mutex_lock(&ls->ls_requestqueue_mutex); 48 list_add_tail(&e->list, &ls->ls_requestqueue); ··· 76 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); 77 mutex_unlock(&ls->ls_requestqueue_mutex); 78 79 - dlm_receive_message_saved(ls, (struct dlm_message *)e->request); 80 81 mutex_lock(&ls->ls_requestqueue_mutex); 82 list_del(&e->list); ··· 176 177 mutex_lock(&ls->ls_requestqueue_mutex); 178 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 179 - ms = (struct dlm_message *) e->request; 180 181 if (purge_request(ls, ms, e->nodeid)) { 182 list_del(&e->list);
··· 20 struct rq_entry { 21 struct list_head list; 22 int nodeid; 23 + struct dlm_message request; 24 }; 25 26 /* ··· 30 * lockspace is enabled on some while still suspended on others. 31 */ 32 33 + void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) 34 { 35 struct rq_entry *e; 36 + int length = ms->m_header.h_length - sizeof(struct dlm_message); 37 38 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 39 if (!e) { ··· 42 } 43 44 e->nodeid = nodeid; 45 + memcpy(&e->request, ms, ms->m_header.h_length); 46 47 mutex_lock(&ls->ls_requestqueue_mutex); 48 list_add_tail(&e->list, &ls->ls_requestqueue); ··· 76 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); 77 mutex_unlock(&ls->ls_requestqueue_mutex); 78 79 + dlm_receive_message_saved(ls, &e->request); 80 81 mutex_lock(&ls->ls_requestqueue_mutex); 82 list_del(&e->list); ··· 176 177 mutex_lock(&ls->ls_requestqueue_mutex); 178 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 179 + ms = &e->request; 180 181 if (purge_request(ls, ms, e->nodeid)) { 182 list_del(&e->list);
+1 -1
fs/dlm/requestqueue.h
··· 13 #ifndef __REQUESTQUEUE_DOT_H__ 14 #define __REQUESTQUEUE_DOT_H__ 15 16 - void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); 17 int dlm_process_requestqueue(struct dlm_ls *ls); 18 void dlm_wait_requestqueue(struct dlm_ls *ls); 19 void dlm_purge_requestqueue(struct dlm_ls *ls);
··· 13 #ifndef __REQUESTQUEUE_DOT_H__ 14 #define __REQUESTQUEUE_DOT_H__ 15 16 + void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms); 17 int dlm_process_requestqueue(struct dlm_ls *ls); 18 void dlm_wait_requestqueue(struct dlm_ls *ls); 19 void dlm_purge_requestqueue(struct dlm_ls *ls);
+12 -17
fs/dlm/user.c
··· 82 83 static void compat_input(struct dlm_write_request *kb, 84 struct dlm_write_request32 *kb32, 85 - int max_namelen) 86 { 87 kb->version[0] = kb32->version[0]; 88 kb->version[1] = kb32->version[1]; ··· 94 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { 95 kb->i.lspace.flags = kb32->i.lspace.flags; 96 kb->i.lspace.minor = kb32->i.lspace.minor; 97 - strcpy(kb->i.lspace.name, kb32->i.lspace.name); 98 } else if (kb->cmd == DLM_USER_PURGE) { 99 kb->i.purge.nodeid = kb32->i.purge.nodeid; 100 kb->i.purge.pid = kb32->i.purge.pid; ··· 113 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; 114 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; 115 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); 116 - if (kb->i.lock.namelen <= max_namelen) 117 - memcpy(kb->i.lock.name, kb32->i.lock.name, 118 - kb->i.lock.namelen); 119 - else 120 - kb->i.lock.namelen = max_namelen; 121 } 122 } 123 ··· 195 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) 196 goto out; 197 198 - DLM_ASSERT(lkb->lkb_astparam, dlm_print_lkb(lkb);); 199 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 200 proc = ua->proc; 201 202 if (type == AST_BAST && ua->bastaddr == NULL) ··· 506 #endif 507 return -EINVAL; 508 509 - kbuf = kmalloc(count, GFP_KERNEL); 510 if (!kbuf) 511 return -ENOMEM; 512 ··· 524 if (!kbuf->is64bit) { 525 struct dlm_write_request32 *k32buf; 526 k32buf = (struct dlm_write_request32 *)kbuf; 527 - kbuf = kmalloc(count + (sizeof(struct dlm_write_request) - 528 sizeof(struct dlm_write_request32)), GFP_KERNEL); 529 if (!kbuf) 530 return -ENOMEM; 531 532 if (proc) 533 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); 534 - compat_input(kbuf, k32buf, 535 - count - sizeof(struct dlm_write_request32)); 536 kfree(k32buf); 537 } 538 #endif ··· 771 { 772 struct dlm_user_proc *proc = file->private_data; 773 struct dlm_lkb *lkb; 774 - struct dlm_user_args *ua; 775 DECLARE_WAITQUEUE(wait, current); 776 int error, type=0, bmode=0, removed = 0; 777 ··· 841 } 842 spin_unlock(&proc->asts_spin); 843 844 - ua = (struct dlm_user_args *)lkb->lkb_astparam; 845 - error = copy_result_to_user(ua, 846 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), 847 type, bmode, buf, count); 848 ··· 902 .minor = MISC_DYNAMIC_MINOR, 903 }; 904 905 - int dlm_user_init(void) 906 { 907 int error; 908
··· 82 83 static void compat_input(struct dlm_write_request *kb, 84 struct dlm_write_request32 *kb32, 85 + size_t count) 86 { 87 kb->version[0] = kb32->version[0]; 88 kb->version[1] = kb32->version[1]; ··· 94 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { 95 kb->i.lspace.flags = kb32->i.lspace.flags; 96 kb->i.lspace.minor = kb32->i.lspace.minor; 97 + memcpy(kb->i.lspace.name, kb32->i.lspace.name, count - 98 + offsetof(struct dlm_write_request32, i.lspace.name)); 99 } else if (kb->cmd == DLM_USER_PURGE) { 100 kb->i.purge.nodeid = kb32->i.purge.nodeid; 101 kb->i.purge.pid = kb32->i.purge.pid; ··· 112 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; 113 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; 114 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); 115 + memcpy(kb->i.lock.name, kb32->i.lock.name, count - 116 + offsetof(struct dlm_write_request32, i.lock.name)); 117 } 118 } 119 ··· 197 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) 198 goto out; 199 200 + DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb);); 201 + ua = lkb->lkb_ua; 202 proc = ua->proc; 203 204 if (type == AST_BAST && ua->bastaddr == NULL) ··· 508 #endif 509 return -EINVAL; 510 511 + kbuf = kzalloc(count + 1, GFP_KERNEL); 512 if (!kbuf) 513 return -ENOMEM; 514 ··· 526 if (!kbuf->is64bit) { 527 struct dlm_write_request32 *k32buf; 528 k32buf = (struct dlm_write_request32 *)kbuf; 529 + kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - 530 sizeof(struct dlm_write_request32)), GFP_KERNEL); 531 if (!kbuf) 532 return -ENOMEM; 533 534 if (proc) 535 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); 536 + compat_input(kbuf, k32buf, count + 1); 537 kfree(k32buf); 538 } 539 #endif ··· 774 { 775 struct dlm_user_proc *proc = file->private_data; 776 struct dlm_lkb *lkb; 777 DECLARE_WAITQUEUE(wait, current); 778 int error, type=0, bmode=0, removed = 0; 779 ··· 845 } 846 spin_unlock(&proc->asts_spin); 847 848 + error = copy_result_to_user(lkb->lkb_ua, 849 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), 850 type, bmode, buf, count); 851 ··· 907 .minor = MISC_DYNAMIC_MINOR, 908 }; 909 910 + int __init dlm_user_init(void) 911 { 912 int error; 913
-61
fs/dlm/util.c
··· 131 ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); 132 } 133 134 - static void rcom_lock_out(struct rcom_lock *rl) 135 - { 136 - rl->rl_ownpid = cpu_to_le32(rl->rl_ownpid); 137 - rl->rl_lkid = cpu_to_le32(rl->rl_lkid); 138 - rl->rl_remid = cpu_to_le32(rl->rl_remid); 139 - rl->rl_parent_lkid = cpu_to_le32(rl->rl_parent_lkid); 140 - rl->rl_parent_remid = cpu_to_le32(rl->rl_parent_remid); 141 - rl->rl_exflags = cpu_to_le32(rl->rl_exflags); 142 - rl->rl_flags = cpu_to_le32(rl->rl_flags); 143 - rl->rl_lvbseq = cpu_to_le32(rl->rl_lvbseq); 144 - rl->rl_result = cpu_to_le32(rl->rl_result); 145 - rl->rl_wait_type = cpu_to_le16(rl->rl_wait_type); 146 - rl->rl_namelen = cpu_to_le16(rl->rl_namelen); 147 - } 148 - 149 - static void rcom_lock_in(struct rcom_lock *rl) 150 - { 151 - rl->rl_ownpid = le32_to_cpu(rl->rl_ownpid); 152 - rl->rl_lkid = le32_to_cpu(rl->rl_lkid); 153 - rl->rl_remid = le32_to_cpu(rl->rl_remid); 154 - rl->rl_parent_lkid = le32_to_cpu(rl->rl_parent_lkid); 155 - rl->rl_parent_remid = le32_to_cpu(rl->rl_parent_remid); 156 - rl->rl_exflags = le32_to_cpu(rl->rl_exflags); 157 - rl->rl_flags = le32_to_cpu(rl->rl_flags); 158 - rl->rl_lvbseq = le32_to_cpu(rl->rl_lvbseq); 159 - rl->rl_result = le32_to_cpu(rl->rl_result); 160 - rl->rl_wait_type = le16_to_cpu(rl->rl_wait_type); 161 - rl->rl_namelen = le16_to_cpu(rl->rl_namelen); 162 - } 163 - 164 - static void rcom_config_out(struct rcom_config *rf) 165 - { 166 - rf->rf_lvblen = cpu_to_le32(rf->rf_lvblen); 167 - rf->rf_lsflags = cpu_to_le32(rf->rf_lsflags); 168 - } 169 - 170 - static void rcom_config_in(struct rcom_config *rf) 171 - { 172 - rf->rf_lvblen = le32_to_cpu(rf->rf_lvblen); 173 - rf->rf_lsflags = le32_to_cpu(rf->rf_lsflags); 174 - } 175 - 176 void dlm_rcom_out(struct dlm_rcom *rc) 177 { 178 - int type = rc->rc_type; 179 - 180 header_out(&rc->rc_header); 181 182 rc->rc_type = cpu_to_le32(rc->rc_type); ··· 140 rc->rc_id = cpu_to_le64(rc->rc_id); 141 rc->rc_seq = cpu_to_le64(rc->rc_seq); 142 rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); 143 - 144 - if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY)) 145 - rcom_lock_out((struct rcom_lock *) rc->rc_buf); 146 - 147 - else if (type == DLM_RCOM_STATUS_REPLY) 148 - rcom_config_out((struct rcom_config *) rc->rc_buf); 149 } 150 151 void dlm_rcom_in(struct dlm_rcom *rc) 152 { 153 - int type; 154 - 155 header_in(&rc->rc_header); 156 157 rc->rc_type = le32_to_cpu(rc->rc_type); ··· 151 rc->rc_id = le64_to_cpu(rc->rc_id); 152 rc->rc_seq = le64_to_cpu(rc->rc_seq); 153 rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); 154 - 155 - type = rc->rc_type; 156 - 157 - if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY)) 158 - rcom_lock_in((struct rcom_lock *) rc->rc_buf); 159 - 160 - else if (type == DLM_RCOM_STATUS_REPLY) 161 - rcom_config_in((struct rcom_config *) rc->rc_buf); 162 } 163 -
··· 131 ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); 132 } 133 134 void dlm_rcom_out(struct dlm_rcom *rc) 135 { 136 header_out(&rc->rc_header); 137 138 rc->rc_type = cpu_to_le32(rc->rc_type); ··· 184 rc->rc_id = cpu_to_le64(rc->rc_id); 185 rc->rc_seq = cpu_to_le64(rc->rc_seq); 186 rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); 187 } 188 189 void dlm_rcom_in(struct dlm_rcom *rc) 190 { 191 header_in(&rc->rc_header); 192 193 rc->rc_type = le32_to_cpu(rc->rc_type); ··· 203 rc->rc_id = le64_to_cpu(rc->rc_id); 204 rc->rc_seq = le64_to_cpu(rc->rc_seq); 205 rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); 206 }