Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw:
GFS2: Fix recovery stuck bug (try #2)
GFS2: Fix typo in stuffed file data copy handling
Revert "GFS2: recovery stuck on transaction lock"
GFS2: Make "try" lock not try quite so hard
GFS2: remove dependency on __GFP_NOFAIL
GFS2: Simplify gfs2_write_alloc_required
GFS2: Wait for journal id on mount if not specified on mount command line
GFS2: Use nobh_writepage

+169 -88
+2 -7
fs/gfs2/aops.c
··· 136 if (ret <= 0) 137 return ret; 138 139 - ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc); 140 - if (ret == -EAGAIN) 141 - ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc); 142 - return ret; 143 } 144 145 /** ··· 634 } 635 } 636 637 - error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); 638 - if (error) 639 - goto out_unlock; 640 641 if (alloc_required || gfs2_is_jdata(ip)) 642 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
··· 136 if (ret <= 0) 137 return ret; 138 139 + return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 140 } 141 142 /** ··· 637 } 638 } 639 640 + alloc_required = gfs2_write_alloc_required(ip, pos, len); 641 642 if (alloc_required || gfs2_is_jdata(ip)) 643 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
+6 -11
fs/gfs2/bmap.c
··· 1040 goto out; 1041 1042 if (gfs2_is_stuffed(ip)) { 1043 - u64 dsize = size + sizeof(struct gfs2_inode); 1044 ip->i_disksize = size; 1045 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1046 gfs2_trans_add_bh(ip->i_gl, dibh, 1); ··· 1244 * @ip: the file being written to 1245 * @offset: the offset to write to 1246 * @len: the number of bytes being written 1247 - * @alloc_required: set to 1 if an alloc is required, 0 otherwise 1248 * 1249 - * Returns: errno 1250 */ 1251 1252 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 1253 - unsigned int len, int *alloc_required) 1254 { 1255 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1256 struct buffer_head bh; ··· 1257 u64 lblock, lblock_stop, size; 1258 u64 end_of_file; 1259 1260 - *alloc_required = 0; 1261 - 1262 if (!len) 1263 return 0; 1264 1265 if (gfs2_is_stuffed(ip)) { 1266 if (offset + len > 1267 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) 1268 - *alloc_required = 1; 1269 return 0; 1270 } 1271 1272 - *alloc_required = 1; 1273 shift = sdp->sd_sb.sb_bsize_shift; 1274 BUG_ON(gfs2_is_dir(ip)); 1275 end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; 1276 lblock = offset >> shift; 1277 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1278 if (lblock_stop > end_of_file) 1279 - return 0; 1280 1281 size = (lblock_stop - lblock) << shift; 1282 do { ··· 1281 bh.b_size = size; 1282 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); 1283 if (!buffer_mapped(&bh)) 1284 - return 0; 1285 size -= bh.b_size; 1286 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 1287 } while(size > 0); 1288 1289 - *alloc_required = 0; 1290 return 0; 1291 } 1292
··· 1040 goto out; 1041 1042 if (gfs2_is_stuffed(ip)) { 1043 + u64 dsize = size + sizeof(struct gfs2_dinode); 1044 ip->i_disksize = size; 1045 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1046 gfs2_trans_add_bh(ip->i_gl, dibh, 1); ··· 1244 * @ip: the file being written to 1245 * @offset: the offset to write to 1246 * @len: the number of bytes being written 1247 * 1248 + * Returns: 1 if an alloc is required, 0 otherwise 1249 */ 1250 1251 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 1252 + unsigned int len) 1253 { 1254 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1255 struct buffer_head bh; ··· 1258 u64 lblock, lblock_stop, size; 1259 u64 end_of_file; 1260 1261 if (!len) 1262 return 0; 1263 1264 if (gfs2_is_stuffed(ip)) { 1265 if (offset + len > 1266 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) 1267 + return 1; 1268 return 0; 1269 } 1270 1271 shift = sdp->sd_sb.sb_bsize_shift; 1272 BUG_ON(gfs2_is_dir(ip)); 1273 end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; 1274 lblock = offset >> shift; 1275 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1276 if (lblock_stop > end_of_file) 1277 + return 1; 1278 1279 size = (lblock_stop - lblock) << shift; 1280 do { ··· 1285 bh.b_size = size; 1286 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); 1287 if (!buffer_mapped(&bh)) 1288 + return 1; 1289 size -= bh.b_size; 1290 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 1291 } while(size > 0); 1292 1293 return 0; 1294 } 1295
+1 -1
fs/gfs2/bmap.h
··· 52 int gfs2_truncatei_resume(struct gfs2_inode *ip); 53 int gfs2_file_dealloc(struct gfs2_inode *ip); 54 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 55 - unsigned int len, int *alloc_required); 56 57 #endif /* __BMAP_DOT_H__ */
··· 52 int gfs2_truncatei_resume(struct gfs2_inode *ip); 53 int gfs2_file_dealloc(struct gfs2_inode *ip); 54 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 55 + unsigned int len); 56 57 #endif /* __BMAP_DOT_H__ */
+9 -2
fs/gfs2/dir.c
··· 955 /* Change the pointers. 956 Don't bother distinguishing stuffed from non-stuffed. 957 This code is complicated enough already. */ 958 - lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL); 959 /* Change the pointers */ 960 for (x = 0; x < half_len; x++) 961 lp[x] = cpu_to_be64(bn); ··· 1068 1069 /* Allocate both the "from" and "to" buffers in one big chunk */ 1070 1071 - buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL); 1072 1073 for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { 1074 error = gfs2_dir_read_data(dip, (char *)buf,
··· 955 /* Change the pointers. 956 Don't bother distinguishing stuffed from non-stuffed. 957 This code is complicated enough already. */ 958 + lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS); 959 + if (!lp) { 960 + error = -ENOMEM; 961 + goto fail_brelse; 962 + } 963 + 964 /* Change the pointers */ 965 for (x = 0; x < half_len; x++) 966 lp[x] = cpu_to_be64(bn); ··· 1063 1064 /* Allocate both the "from" and "to" buffers in one big chunk */ 1065 1066 + buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS); 1067 + if (!buf) 1068 + return -ENOMEM; 1069 1070 for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { 1071 error = gfs2_dir_read_data(dip, (char *)buf,
+1 -3
fs/gfs2/file.c
··· 351 unsigned long last_index; 352 u64 pos = page->index << PAGE_CACHE_SHIFT; 353 unsigned int data_blocks, ind_blocks, rblocks; 354 - int alloc_required = 0; 355 struct gfs2_holder gh; 356 struct gfs2_alloc *al; 357 int ret; ··· 363 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 364 set_bit(GIF_SW_PAGED, &ip->i_flags); 365 366 - ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); 367 - if (ret || !alloc_required) 368 goto out_unlock; 369 ret = -ENOMEM; 370 al = gfs2_alloc_get(ip);
··· 351 unsigned long last_index; 352 u64 pos = page->index << PAGE_CACHE_SHIFT; 353 unsigned int data_blocks, ind_blocks, rblocks; 354 struct gfs2_holder gh; 355 struct gfs2_alloc *al; 356 int ret; ··· 364 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 365 set_bit(GIF_SW_PAGED, &ip->i_flags); 366 367 + if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) 368 goto out_unlock; 369 ret = -ENOMEM; 370 al = gfs2_alloc_get(ip);
+64 -41
fs/gfs2/glock.c
··· 328 } 329 330 /** 331 * do_promote - promote as many requests as possible on the current queue 332 * @gl: The glock 333 * ··· 399 } 400 if (gh->gh_list.prev == &gl->gl_holders) 401 return 1; 402 break; 403 } 404 return 0; 405 - } 406 - 407 - /** 408 - * do_error - Something unexpected has happened during a lock request 409 - * 410 - */ 411 - 412 - static inline void do_error(struct gfs2_glock *gl, const int ret) 413 - { 414 - struct gfs2_holder *gh, *tmp; 415 - 416 - list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 417 - if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 418 - continue; 419 - if (ret & LM_OUT_ERROR) 420 - gh->gh_error = -EIO; 421 - else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 422 - gh->gh_error = GLR_TRYFAILED; 423 - else 424 - continue; 425 - list_del_init(&gh->gh_list); 426 - trace_gfs2_glock_queue(gh, 0); 427 - gfs2_holder_wake(gh); 428 - } 429 } 430 431 /** ··· 707 { 708 unsigned long delay = 0; 709 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 710 - struct gfs2_holder *gh; 711 int drop_ref = 0; 712 - 713 - if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) { 714 - spin_lock(&gl->gl_spin); 715 - gh = find_first_waiter(gl); 716 - if (gh && (gh->gh_flags & LM_FLAG_NOEXP) && 717 - test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 718 - set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 719 - spin_unlock(&gl->gl_spin); 720 - } 721 722 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 723 finish_xmote(gl, gl->gl_reply); ··· 1063 1064 spin_lock(&gl->gl_spin); 1065 add_to_queue(gh); 1066 run_queue(gl, 1); 1067 spin_unlock(&gl->gl_spin); 1068 ··· 1323 } 1324 1325 /** 1326 * gfs2_glock_complete - Callback used by locking 1327 * @gl: Pointer to the glock 1328 * @ret: The return value from the dlm ··· 1362 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1363 { 1364 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1365 gl->gl_reply = ret; 1366 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { 1367 - struct gfs2_holder *gh; 1368 spin_lock(&gl->gl_spin); 1369 - gh = find_first_waiter(gl); 1370 - if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) && 1371 - (gl->gl_target != LM_ST_UNLOCKED)) || 1372 - ((ret & ~LM_OUT_ST_MASK) != 0)) 1373 set_bit(GLF_FROZEN, &gl->gl_flags); 1374 - spin_unlock(&gl->gl_spin); 1375 - if (test_bit(GLF_FROZEN, &gl->gl_flags)) 1376 return; 1377 } 1378 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1379 gfs2_glock_hold(gl);
··· 328 } 329 330 /** 331 + * do_error - Something unexpected has happened during a lock request 332 + * 333 + */ 334 + 335 + static inline void do_error(struct gfs2_glock *gl, const int ret) 336 + { 337 + struct gfs2_holder *gh, *tmp; 338 + 339 + list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 340 + if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 341 + continue; 342 + if (ret & LM_OUT_ERROR) 343 + gh->gh_error = -EIO; 344 + else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 345 + gh->gh_error = GLR_TRYFAILED; 346 + else 347 + continue; 348 + list_del_init(&gh->gh_list); 349 + trace_gfs2_glock_queue(gh, 0); 350 + gfs2_holder_wake(gh); 351 + } 352 + } 353 + 354 + /** 355 * do_promote - promote as many requests as possible on the current queue 356 * @gl: The glock 357 * ··· 375 } 376 if (gh->gh_list.prev == &gl->gl_holders) 377 return 1; 378 + do_error(gl, 0); 379 break; 380 } 381 return 0; 382 } 383 384 /** ··· 706 { 707 unsigned long delay = 0; 708 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 709 int drop_ref = 0; 710 711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 712 finish_xmote(gl, gl->gl_reply); ··· 1072 1073 spin_lock(&gl->gl_spin); 1074 add_to_queue(gh); 1075 + if ((LM_FLAG_NOEXP & gh->gh_flags) && 1076 + test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 1077 + set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1078 run_queue(gl, 1); 1079 spin_unlock(&gl->gl_spin); 1080 ··· 1329 } 1330 1331 /** 1332 + * gfs2_should_freeze - Figure out if glock should be frozen 1333 + * @gl: The glock in question 1334 + * 1335 + * Glocks are not frozen if (a) the result of the dlm operation is 1336 + * an error, (b) the locking operation was an unlock operation or 1337 + * (c) if there is a "noexp" flagged request anywhere in the queue 1338 + * 1339 + * Returns: 1 if freezing should occur, 0 otherwise 1340 + */ 1341 + 1342 + static int gfs2_should_freeze(const struct gfs2_glock *gl) 1343 + { 1344 + const struct gfs2_holder *gh; 1345 + 1346 + if (gl->gl_reply & ~LM_OUT_ST_MASK) 1347 + return 0; 1348 + if (gl->gl_target == LM_ST_UNLOCKED) 1349 + return 0; 1350 + 1351 + list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1352 + if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1353 + continue; 1354 + if (LM_FLAG_NOEXP & gh->gh_flags) 1355 + return 0; 1356 + } 1357 + 1358 + return 1; 1359 + } 1360 + 1361 + /** 1362 * gfs2_glock_complete - Callback used by locking 1363 * @gl: Pointer to the glock 1364 * @ret: The return value from the dlm ··· 1338 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1339 { 1340 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1341 + 1342 gl->gl_reply = ret; 1343 + 1344 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { 1345 spin_lock(&gl->gl_spin); 1346 + if (gfs2_should_freeze(gl)) { 1347 set_bit(GLF_FROZEN, &gl->gl_flags); 1348 + spin_unlock(&gl->gl_spin); 1349 return; 1350 + } 1351 + spin_unlock(&gl->gl_spin); 1352 } 1353 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1354 gfs2_glock_hold(gl);
+1
fs/gfs2/incore.h
··· 460 SDF_NOBARRIERS = 3, 461 SDF_NORECOVERY = 4, 462 SDF_DEMOTE = 5, 463 }; 464 465 #define GFS2_FSNAME_LEN 256
··· 460 SDF_NOBARRIERS = 3, 461 SDF_NORECOVERY = 4, 462 SDF_DEMOTE = 5, 463 + SDF_NOJOURNALID = 6, 464 }; 465 466 #define GFS2_FSNAME_LEN 256
+25 -2
fs/gfs2/ops_fstype.c
··· 76 77 sb->s_fs_info = sdp; 78 sdp->sd_vfs = sb; 79 - 80 gfs2_tune_init(&sdp->sd_tune); 81 82 init_waitqueue_head(&sdp->sd_glock_wait); ··· 1050 ret = match_int(&tmp[0], &option); 1051 if (ret || option < 0) 1052 goto hostdata_error; 1053 - ls->ls_jid = option; 1054 break; 1055 case Opt_id: 1056 /* Obsolete, but left for backward compat purposes */ ··· 1101 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) && 1102 lm->lm_unmount) 1103 lm->lm_unmount(sdp); 1104 } 1105 1106 void gfs2_online_uevent(struct gfs2_sbd *sdp) ··· 1212 error = init_sb(sdp, silent); 1213 if (error) 1214 goto fail_locking; 1215 1216 error = init_inodes(sdp, DO); 1217 if (error)
··· 76 77 sb->s_fs_info = sdp; 78 sdp->sd_vfs = sb; 79 + set_bit(SDF_NOJOURNALID, &sdp->sd_flags); 80 gfs2_tune_init(&sdp->sd_tune); 81 82 init_waitqueue_head(&sdp->sd_glock_wait); ··· 1050 ret = match_int(&tmp[0], &option); 1051 if (ret || option < 0) 1052 goto hostdata_error; 1053 + if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 1054 + ls->ls_jid = option; 1055 break; 1056 case Opt_id: 1057 /* Obsolete, but left for backward compat purposes */ ··· 1100 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) && 1101 lm->lm_unmount) 1102 lm->lm_unmount(sdp); 1103 + } 1104 + 1105 + static int gfs2_journalid_wait(void *word) 1106 + { 1107 + if (signal_pending(current)) 1108 + return -EINTR; 1109 + schedule(); 1110 + return 0; 1111 + } 1112 + 1113 + static int wait_on_journal(struct gfs2_sbd *sdp) 1114 + { 1115 + if (sdp->sd_args.ar_spectator) 1116 + return 0; 1117 + if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 1118 + return 0; 1119 + 1120 + return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE); 1121 } 1122 1123 void gfs2_online_uevent(struct gfs2_sbd *sdp) ··· 1193 error = init_sb(sdp, silent); 1194 if (error) 1195 goto fail_locking; 1196 + 1197 + error = wait_on_journal(sdp); 1198 + if (error) 1199 + goto fail_sb; 1200 1201 error = init_inodes(sdp, DO); 1202 if (error)
+3 -12
fs/gfs2/quota.c
··· 787 goto out; 788 789 for (x = 0; x < num_qd; x++) { 790 - int alloc_required; 791 - 792 offset = qd2offset(qda[x]); 793 - error = gfs2_write_alloc_required(ip, offset, 794 - sizeof(struct gfs2_quota), 795 - &alloc_required); 796 - if (error) 797 - goto out_gunlock; 798 - if (alloc_required) 799 nalloc++; 800 } 801 ··· 1578 goto out_i; 1579 1580 offset = qd2offset(qd); 1581 - error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), 1582 - &alloc_required); 1583 - if (error) 1584 - goto out_i; 1585 if (alloc_required) { 1586 al = gfs2_alloc_get(ip); 1587 if (al == NULL)
··· 787 goto out; 788 789 for (x = 0; x < num_qd; x++) { 790 offset = qd2offset(qda[x]); 791 + if (gfs2_write_alloc_required(ip, offset, 792 + sizeof(struct gfs2_quota))) 793 nalloc++; 794 } 795 ··· 1584 goto out_i; 1585 1586 offset = qd2offset(qd); 1587 + alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1588 if (alloc_required) { 1589 al = gfs2_alloc_get(ip); 1590 if (al == NULL)
+3 -6
fs/gfs2/super.c
··· 342 { 343 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 344 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 345 - int ar; 346 - int error; 347 348 if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || 349 (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { ··· 350 } 351 jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 352 353 - error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar); 354 - if (!error && ar) { 355 gfs2_consist_inode(ip); 356 - error = -EIO; 357 } 358 359 - return error; 360 } 361 362 /**
··· 342 { 343 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 344 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 345 346 if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || 347 (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { ··· 352 } 353 jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 354 355 + if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) { 356 gfs2_consist_inode(ip); 357 + return -EIO; 358 } 359 360 + return 0; 361 } 362 363 /**
+54 -3
fs/gfs2/sys.c
··· 325 return sprintf(buf, "%d\n", ls->ls_first); 326 } 327 328 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) 329 { 330 struct lm_lockstruct *ls = &sdp->sd_lockstruct; ··· 401 return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); 402 } 403 404 #define GDLM_ATTR(_name,_mode,_show,_store) \ 405 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 406 407 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 408 GDLM_ATTR(block, 0644, block_show, block_store); 409 GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 410 - GDLM_ATTR(jid, 0444, jid_show, NULL); 411 - GDLM_ATTR(first, 0444, lkfirst_show, NULL); 412 GDLM_ATTR(first_done, 0444, first_done_show, NULL); 413 GDLM_ATTR(recover, 0600, NULL, recover_store); 414 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); ··· 615 616 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 617 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 618 - if (!sdp->sd_args.ar_spectator) 619 add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); 620 if (gfs2_uuid_valid(uuid)) 621 add_uevent_var(env, "UUID=%pUB", uuid);
··· 325 return sprintf(buf, "%d\n", ls->ls_first); 326 } 327 328 + static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 329 + { 330 + unsigned first; 331 + int rv; 332 + 333 + rv = sscanf(buf, "%u", &first); 334 + if (rv != 1 || first > 1) 335 + return -EINVAL; 336 + spin_lock(&sdp->sd_jindex_spin); 337 + rv = -EBUSY; 338 + if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 339 + goto out; 340 + rv = -EINVAL; 341 + if (sdp->sd_args.ar_spectator) 342 + goto out; 343 + if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 344 + goto out; 345 + sdp->sd_lockstruct.ls_first = first; 346 + rv = 0; 347 + out: 348 + spin_unlock(&sdp->sd_jindex_spin); 349 + return rv ? rv : len; 350 + } 351 + 352 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) 353 { 354 struct lm_lockstruct *ls = &sdp->sd_lockstruct; ··· 377 return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); 378 } 379 380 + static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 381 + { 382 + unsigned jid; 383 + int rv; 384 + 385 + rv = sscanf(buf, "%u", &jid); 386 + if (rv != 1) 387 + return -EINVAL; 388 + 389 + spin_lock(&sdp->sd_jindex_spin); 390 + rv = -EINVAL; 391 + if (sdp->sd_args.ar_spectator) 392 + goto out; 393 + if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 394 + goto out; 395 + rv = -EBUSY; 396 + if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 397 + goto out; 398 + sdp->sd_lockstruct.ls_jid = jid; 399 + smp_mb__after_clear_bit(); 400 + wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 401 + rv = 0; 402 + out: 403 + spin_unlock(&sdp->sd_jindex_spin); 404 + return rv ? rv : len; 405 + } 406 + 407 #define GDLM_ATTR(_name,_mode,_show,_store) \ 408 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 409 410 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 411 GDLM_ATTR(block, 0644, block_show, block_store); 412 GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 413 + GDLM_ATTR(jid, 0644, jid_show, jid_store); 414 + GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); 415 GDLM_ATTR(first_done, 0444, first_done_show, NULL); 416 GDLM_ATTR(recover, 0600, NULL, recover_store); 417 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); ··· 564 565 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 566 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 567 + if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 568 add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); 569 if (gfs2_uuid_valid(uuid)) 570 add_uevent_var(env, "UUID=%pUB", uuid);