Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gfs2-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

- Log space and revoke accounting rework to fix some failed asserts.

- Local resource group glock sharing for better local performance.

- Add support for version 1802 filesystems: trusted xattr support and
'-o rgrplvb' mounts by default.

- Actually synchronize on the inode glock's FREEING bit during withdraw
("gfs2: fix glock confusion in function signal_our_withdraw").

- Fix parallel recovery of multiple journals ("gfs2: keep bios separate
for each journal").

- Various other bug fixes.

* tag 'gfs2-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (49 commits)
gfs2: Don't get stuck with I/O plugged in gfs2_ail1_flush
gfs2: Per-revoke accounting in transactions
gfs2: Rework the log space allocation logic
gfs2: Minor calc_reserved cleanup
gfs2: Use resource group glock sharing
gfs2: Allow node-wide exclusive glock sharing
gfs2: Add local resource group locking
gfs2: Add per-reservation reserved block accounting
gfs2: Rename rs_{free -> requested} and rd_{reserved -> requested}
gfs2: Check for active reservation in gfs2_release
gfs2: Don't search for unreserved space twice
gfs2: Only pass reservation down to gfs2_rbm_find
gfs2: Also reflect single-block allocations in rgd->rd_extfail_pt
gfs2: Recursive gfs2_quota_hold in gfs2_iomap_end
gfs2: Add trusted xattr support
gfs2: Enable rgrplvb for sb_fs_format 1802
gfs2: Don't skip dlm unlock if glock has an lvb
gfs2: Lock imbalance on error path in gfs2_recover_one
gfs2: Move function gfs2_ail_empty_tr
gfs2: Get rid of current_tail()
...

+970 -665
+5 -5
fs/gfs2/bmap.c
··· 1230 1230 1231 1231 gfs2_inplace_release(ip); 1232 1232 1233 + if (ip->i_qadata && ip->i_qadata->qa_qd_num) 1234 + gfs2_quota_unlock(ip); 1235 + 1233 1236 if (length != written && (iomap->flags & IOMAP_F_NEW)) { 1234 1237 /* Deallocate blocks that were just allocated. */ 1235 1238 loff_t blockmask = i_blocksize(inode) - 1; ··· 1244 1241 punch_hole(ip, pos, end - pos); 1245 1242 } 1246 1243 } 1247 - 1248 - if (ip->i_qadata && ip->i_qadata->qa_qd_num) 1249 - gfs2_quota_unlock(ip); 1250 1244 1251 1245 if (unlikely(!written)) 1252 1246 goto out_unlock; ··· 1538 1538 goto out; 1539 1539 } 1540 1540 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1541 - 0, rd_gh); 1541 + LM_FLAG_NODE_SCOPE, rd_gh); 1542 1542 if (ret) 1543 1543 goto out; 1544 1544 1545 1545 /* Must be done with the rgrp glock held: */ 1546 1546 if (gfs2_rs_active(&ip->i_res) && 1547 - rgd == ip->i_res.rs_rbm.rgd) 1547 + rgd == ip->i_res.rs_rgd) 1548 1548 gfs2_rs_deltree(&ip->i_res); 1549 1549 } 1550 1550
+4 -4
fs/gfs2/file.c
··· 716 716 kfree(file->private_data); 717 717 file->private_data = NULL; 718 718 719 - if (file->f_mode & FMODE_WRITE) { 719 + if (gfs2_rs_active(&ip->i_res)) 720 720 gfs2_rs_delete(ip, &inode->i_writecount); 721 + if (file->f_mode & FMODE_WRITE) 721 722 gfs2_qa_put(ip); 722 - } 723 723 return 0; 724 724 } 725 725 ··· 1112 1112 goto out_qunlock; 1113 1113 1114 1114 /* check if the selected rgrp limits our max_blks further */ 1115 - if (ap.allowed && ap.allowed < max_blks) 1116 - max_blks = ap.allowed; 1115 + if (ip->i_res.rs_reserved < max_blks) 1116 + max_blks = ip->i_res.rs_reserved; 1117 1117 1118 1118 /* Almost done. Calculate bytes that can be written using 1119 1119 * max_blks. We also recompute max_bytes, data_blocks and
+19 -3
fs/gfs2/glock.c
··· 313 313 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 314 314 { 315 315 const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); 316 - if ((gh->gh_state == LM_ST_EXCLUSIVE || 317 - gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) 318 - return 0; 316 + 317 + if (gh != gh_head) { 318 + /** 319 + * Here we make a special exception to grant holders who agree 320 + * to share the EX lock with other holders who also have the 321 + * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit 322 + * is set, we grant more holders with the bit set. 323 + */ 324 + if (gh_head->gh_state == LM_ST_EXCLUSIVE && 325 + (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) && 326 + gh->gh_state == LM_ST_EXCLUSIVE && 327 + (gh->gh_flags & LM_FLAG_NODE_SCOPE)) 328 + return 1; 329 + if ((gh->gh_state == LM_ST_EXCLUSIVE || 330 + gh_head->gh_state == LM_ST_EXCLUSIVE)) 331 + return 0; 332 + } 319 333 if (gl->gl_state == gh->gh_state) 320 334 return 1; 321 335 if (gh->gh_flags & GL_EXACT) ··· 2044 2030 *p++ = 'A'; 2045 2031 if (flags & LM_FLAG_PRIORITY) 2046 2032 *p++ = 'p'; 2033 + if (flags & LM_FLAG_NODE_SCOPE) 2034 + *p++ = 'n'; 2047 2035 if (flags & GL_ASYNC) 2048 2036 *p++ = 'a'; 2049 2037 if (flags & GL_EXACT)
+6
fs/gfs2/glock.h
··· 75 75 * request and directly join the other shared lock. A shared lock request 76 76 * without the priority flag might be forced to wait until the deferred 77 77 * requested had acquired and released the lock. 78 + * 79 + * LM_FLAG_NODE_SCOPE 80 + * This holder agrees to share the lock within this node. In other words, 81 + * the glock is held in EX mode according to DLM, but local holders on the 82 + * same node can share it. 78 83 */ 79 84 80 85 #define LM_FLAG_TRY 0x0001 ··· 87 82 #define LM_FLAG_NOEXP 0x0004 88 83 #define LM_FLAG_ANY 0x0008 89 84 #define LM_FLAG_PRIORITY 0x0010 85 + #define LM_FLAG_NODE_SCOPE 0x0020 90 86 #define GL_ASYNC 0x0040 91 87 #define GL_EXACT 0x0080 92 88 #define GL_SKIP 0x0100
+12 -26
fs/gfs2/glops.c
··· 86 86 { 87 87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 88 88 struct gfs2_trans tr; 89 + unsigned int revokes; 89 90 int ret; 90 91 91 - memset(&tr, 0, sizeof(tr)); 92 - INIT_LIST_HEAD(&tr.tr_buf); 93 - INIT_LIST_HEAD(&tr.tr_databuf); 94 - INIT_LIST_HEAD(&tr.tr_ail1_list); 95 - INIT_LIST_HEAD(&tr.tr_ail2_list); 96 - tr.tr_revokes = atomic_read(&gl->gl_ail_count); 92 + revokes = atomic_read(&gl->gl_ail_count); 97 93 98 - if (!tr.tr_revokes) { 94 + if (!revokes) { 99 95 bool have_revokes; 100 96 bool log_in_flight; 101 97 ··· 118 122 return 0; 119 123 } 120 124 121 - /* A shortened, inline version of gfs2_trans_begin() 122 - * tr->alloced is not set since the transaction structure is 123 - * on the stack */ 124 - tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes); 125 - tr.tr_ip = _RET_IP_; 126 - ret = gfs2_log_reserve(sdp, tr.tr_reserved); 127 - if (ret < 0) 128 - return ret; 129 - WARN_ON_ONCE(current->journal_info); 130 - current->journal_info = &tr; 131 - 132 - __gfs2_ail_flush(gl, 0, tr.tr_revokes); 133 - 125 + memset(&tr, 0, sizeof(tr)); 126 + set_bit(TR_ONSTACK, &tr.tr_flags); 127 + ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); 128 + if (ret) 129 + goto flush; 130 + __gfs2_ail_flush(gl, 0, revokes); 134 131 gfs2_trans_end(sdp); 132 + 135 133 flush: 136 134 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 137 135 GFS2_LFC_AIL_EMPTY_GL); ··· 136 146 { 137 147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 138 148 unsigned int revokes = atomic_read(&gl->gl_ail_count); 139 - unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 140 149 int ret; 141 150 142 151 if (!revokes) 143 152 return; 144 153 145 - while (revokes > max_revokes) 146 - max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 147 - 148 - ret = gfs2_trans_begin(sdp, 0, max_revokes); 154 + ret = gfs2_trans_begin(sdp, 0, revokes); 149 155 if (ret) 150 156 return; 151 - __gfs2_ail_flush(gl, fsync, max_revokes); 157 + __gfs2_ail_flush(gl, fsync, revokes); 152 158 gfs2_trans_end(sdp); 153 159 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 154 160 GFS2_LFC_AIL_FLUSH);
+16 -38
fs/gfs2/incore.h
··· 20 20 #include <linux/percpu.h> 21 21 #include <linux/lockref.h> 22 22 #include <linux/rhashtable.h> 23 + #include <linux/mutex.h> 23 24 24 25 #define DIO_WAIT 0x00000010 25 26 #define DIO_METADATA 0x00000020 ··· 107 106 u32 rd_data; /* num of data blocks in rgrp */ 108 107 u32 rd_bitbytes; /* number of bytes in data bitmaps */ 109 108 u32 rd_free; 110 - u32 rd_reserved; /* number of blocks reserved */ 109 + u32 rd_requested; /* number of blocks in rd_rstree */ 110 + u32 rd_reserved; /* number of reserved blocks */ 111 111 u32 rd_free_clone; 112 112 u32 rd_dinodes; 113 113 u64 rd_igeneration; ··· 124 122 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */ 125 123 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */ 126 124 spinlock_t rd_rsspin; /* protects reservation related vars */ 125 + struct mutex rd_mutex; 127 126 struct rb_root rd_rstree; /* multi-block reservation tree */ 128 127 }; 129 - 130 - struct gfs2_rbm { 131 - struct gfs2_rgrpd *rgd; 132 - u32 offset; /* The offset is bitmap relative */ 133 - int bii; /* Bitmap index */ 134 - }; 135 - 136 - static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm) 137 - { 138 - return rbm->rgd->rd_bits + rbm->bii; 139 - } 140 - 141 - static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) 142 - { 143 - BUG_ON(rbm->offset >= rbm->rgd->rd_data); 144 - return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) + 145 - rbm->offset; 146 - } 147 - 148 - static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1, 149 - const struct gfs2_rbm *rbm2) 150 - { 151 - return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) && 152 - (rbm1->offset == rbm2->offset); 153 - } 154 128 155 129 enum gfs2_state_bits { 156 130 BH_Pinned = BH_PrivateStart, ··· 291 313 */ 292 314 293 315 struct gfs2_blkreserv { 294 - struct rb_node rs_node; /* link to other block reservations */ 295 - struct gfs2_rbm rs_rbm; /* Start of reservation */ 296 - u32 rs_free; /* how many blocks are still free */ 316 + struct rb_node rs_node; /* node within rd_rstree */ 317 + struct gfs2_rgrpd *rs_rgd; 318 + u64 rs_start; 319 + u32 rs_requested; 320 + u32 rs_reserved; /* number of reserved blocks */ 297 321 }; 298 322 299 323 /* ··· 470 490 enum { 471 491 TR_TOUCHED = 1, 472 492 TR_ATTACHED = 2, 473 - TR_ALLOCED = 3, 493 + TR_ONSTACK = 3, 474 494 }; 475 495 476 496 struct gfs2_trans { ··· 486 506 unsigned int tr_num_buf_rm; 487 507 unsigned int tr_num_databuf_rm; 488 508 unsigned int tr_num_revoke; 489 - unsigned int tr_num_revoke_rm; 490 509 491 510 struct list_head tr_list; 492 511 struct list_head tr_databuf; ··· 510 531 unsigned int nr_extents; 511 532 struct work_struct jd_work; 512 533 struct inode *jd_inode; 534 + struct bio *jd_log_bio; 513 535 unsigned long jd_flags; 514 536 #define JDF_RECOVERY 1 515 537 unsigned int jd_jid; ··· 565 585 unsigned int ar_errors:2; /* errors=withdraw | panic */ 566 586 unsigned int ar_nobarrier:1; /* do not send barriers */ 567 587 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */ 588 + unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */ 568 589 unsigned int ar_loccookie:1; /* use location based readdir 569 590 cookies */ 570 591 s32 ar_commit; /* Commit interval */ ··· 802 821 803 822 struct gfs2_trans *sd_log_tr; 804 823 unsigned int sd_log_blks_reserved; 805 - int sd_log_committed_revoke; 806 824 807 825 atomic_t sd_log_pinned; 808 826 unsigned int sd_log_num_revoke; ··· 814 834 atomic_t sd_log_thresh2; 815 835 atomic_t sd_log_blks_free; 816 836 atomic_t sd_log_blks_needed; 837 + atomic_t sd_log_revokes_available; 817 838 wait_queue_head_t sd_log_waitq; 818 839 wait_queue_head_t sd_logd_waitq; 819 840 820 841 u64 sd_log_sequence; 821 - unsigned int sd_log_head; 822 - unsigned int sd_log_tail; 823 842 int sd_log_idle; 824 843 825 844 struct rw_semaphore sd_log_flush_lock; 826 845 atomic_t sd_log_in_flight; 827 - struct bio *sd_log_bio; 828 846 wait_queue_head_t sd_log_flush_wait; 829 847 int sd_log_error; /* First log error */ 830 848 wait_queue_head_t sd_withdraw_wait; 831 849 832 - atomic_t sd_reserving_log; 833 - wait_queue_head_t sd_reserving_log_wait; 834 - 850 + unsigned int sd_log_tail; 851 + unsigned int sd_log_flush_tail; 852 + unsigned int sd_log_head; 835 853 unsigned int sd_log_flush_head; 836 854 837 855 spinlock_t sd_ail_lock;
+3 -3
fs/gfs2/inode.c
··· 1147 1147 if (!rgd) 1148 1148 goto out_inodes; 1149 1149 1150 - gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); 1150 + gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2); 1151 1151 1152 1152 1153 1153 error = gfs2_glock_nq(ghs); /* parent */ ··· 1453 1453 error = -ENOENT; 1454 1454 goto out_gunlock; 1455 1455 } 1456 - error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, 1457 - &rd_gh); 1456 + error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 1457 + LM_FLAG_NODE_SCOPE, &rd_gh); 1458 1458 if (error) 1459 1459 goto out_gunlock; 1460 1460 }
+2 -6
fs/gfs2/lock_dlm.c
··· 284 284 { 285 285 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 286 286 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 287 - int lvb_needs_unlock = 0; 288 287 int error; 289 288 290 289 if (gl->gl_lksb.sb_lkid == 0) { ··· 296 297 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); 297 298 gfs2_update_request_times(gl); 298 299 299 - /* don't want to skip dlm_unlock writing the lvb when lock is ex */ 300 - 301 - if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) 302 - lvb_needs_unlock = 1; 300 + /* don't want to skip dlm_unlock writing the lvb when lock has one */ 303 301 304 302 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && 305 - !lvb_needs_unlock) { 303 + !gl->gl_lksb.sb_lvbptr) { 306 304 gfs2_glock_free(gl); 307 305 return; 308 306 }
+303 -228
fs/gfs2/log.c
··· 50 50 unsigned int blks; 51 51 unsigned int first, second; 52 52 53 + /* The initial struct gfs2_log_descriptor block */ 53 54 blks = 1; 54 55 first = sdp->sd_ldptrs; 55 56 56 57 if (nstruct > first) { 58 + /* Subsequent struct gfs2_meta_header blocks */ 57 59 second = sdp->sd_inptrs; 58 60 blks += DIV_ROUND_UP(nstruct - first, second); 59 61 } ··· 91 89 92 90 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, 93 91 struct writeback_control *wbc, 94 - struct gfs2_trans *tr) 92 + struct gfs2_trans *tr, struct blk_plug *plug) 95 93 __releases(&sdp->sd_ail_lock) 96 94 __acquires(&sdp->sd_ail_lock) 97 95 { ··· 133 131 continue; 134 132 spin_unlock(&sdp->sd_ail_lock); 135 133 ret = generic_writepages(mapping, wbc); 134 + if (need_resched()) { 135 + blk_finish_plug(plug); 136 + cond_resched(); 137 + blk_start_plug(plug); 138 + } 136 139 spin_lock(&sdp->sd_ail_lock); 137 140 if (ret == -ENODATA) /* if a jdata write into a new hole */ 138 141 ret = 0; /* ignore it */ ··· 212 205 list_for_each_entry_reverse(tr, head, tr_list) { 213 206 if (wbc->nr_to_write <= 0) 214 207 break; 215 - ret = gfs2_ail1_start_one(sdp, wbc, tr); 208 + ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug); 216 209 if (ret) { 217 210 if (ret == -EBUSY) 218 211 goto restart; ··· 245 238 }; 246 239 247 240 return gfs2_ail1_flush(sdp, &wbc); 241 + } 242 + 243 + static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp) 244 + { 245 + unsigned int new_flush_tail = sdp->sd_log_head; 246 + struct gfs2_trans *tr; 247 + 248 + if (!list_empty(&sdp->sd_ail1_list)) { 249 + tr = list_last_entry(&sdp->sd_ail1_list, 250 + struct gfs2_trans, tr_list); 251 + new_flush_tail = tr->tr_first; 252 + } 253 + sdp->sd_log_flush_tail = new_flush_tail; 254 + } 255 + 256 + static void gfs2_log_update_head(struct gfs2_sbd *sdp) 257 + { 258 + unsigned int new_head = sdp->sd_log_flush_head; 259 + 260 + if (sdp->sd_log_flush_tail == sdp->sd_log_head) 261 + sdp->sd_log_flush_tail = new_head; 262 + sdp->sd_log_head = new_head; 263 + } 264 + 265 + /** 266 + * gfs2_ail_empty_tr - empty one of the ail lists of a transaction 267 + */ 268 + 269 + static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr, 270 + struct list_head *head) 271 + { 272 + struct gfs2_bufdata *bd; 273 + 274 + while (!list_empty(head)) { 275 + bd = list_first_entry(head, struct gfs2_bufdata, 276 + bd_ail_st_list); 277 + gfs2_assert(sdp, bd->bd_tr == tr); 278 + gfs2_remove_from_ail(bd); 279 + } 248 280 } 249 281 250 282 /** ··· 361 315 else 362 316 oldest_tr = 0; 363 317 } 318 + gfs2_log_update_flush_tail(sdp); 364 319 ret = list_empty(&sdp->sd_ail1_list); 365 320 spin_unlock(&sdp->sd_ail_lock); 366 321 ··· 395 348 spin_unlock(&sdp->sd_ail_lock); 396 349 } 397 350 398 - /** 399 - * gfs2_ail_empty_tr - empty one of the ail lists for a transaction 400 - */ 401 - 402 - static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr, 403 - struct list_head *head) 351 + static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 404 352 { 405 - struct gfs2_bufdata *bd; 406 - 407 - while (!list_empty(head)) { 408 - bd = list_first_entry(head, struct gfs2_bufdata, 409 - bd_ail_st_list); 410 - gfs2_assert(sdp, bd->bd_tr == tr); 411 - gfs2_remove_from_ail(bd); 412 - } 353 + gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list); 354 + list_del(&tr->tr_list); 355 + gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); 356 + gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); 357 + gfs2_trans_free(sdp, tr); 413 358 } 414 359 415 360 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) 416 361 { 417 - struct gfs2_trans *tr, *safe; 362 + struct list_head *ail2_list = &sdp->sd_ail2_list; 418 363 unsigned int old_tail = sdp->sd_log_tail; 419 - int wrap = (new_tail < old_tail); 420 - int a, b, rm; 364 + struct gfs2_trans *tr, *safe; 421 365 422 366 spin_lock(&sdp->sd_ail_lock); 423 - 424 - list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { 425 - a = (old_tail <= tr->tr_first); 426 - b = (tr->tr_first < new_tail); 427 - rm = (wrap) ? (a || b) : (a && b); 428 - if (!rm) 429 - continue; 430 - 431 - gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list); 432 - list_del(&tr->tr_list); 433 - gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); 434 - gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); 435 - gfs2_trans_free(sdp, tr); 367 + if (old_tail <= new_tail) { 368 + list_for_each_entry_safe(tr, safe, ail2_list, tr_list) { 369 + if (old_tail <= tr->tr_first && tr->tr_first < new_tail) 370 + __ail2_empty(sdp, tr); 371 + } 372 + } else { 373 + list_for_each_entry_safe(tr, safe, ail2_list, tr_list) { 374 + if (old_tail <= tr->tr_first || tr->tr_first < new_tail) 375 + __ail2_empty(sdp, tr); 376 + } 436 377 } 437 - 438 378 spin_unlock(&sdp->sd_ail_lock); 379 + } 380 + 381 + /** 382 + * gfs2_log_is_empty - Check if the log is empty 383 + * @sdp: The GFS2 superblock 384 + */ 385 + 386 + bool gfs2_log_is_empty(struct gfs2_sbd *sdp) { 387 + return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks; 388 + } 389 + 390 + static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes) 391 + { 392 + unsigned int available; 393 + 394 + available = atomic_read(&sdp->sd_log_revokes_available); 395 + while (available >= revokes) { 396 + if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available, 397 + &available, available - revokes)) 398 + return true; 399 + } 400 + return false; 401 + } 402 + 403 + /** 404 + * gfs2_log_release_revokes - Release a given number of revokes 405 + * @sdp: The GFS2 superblock 406 + * @revokes: The number of revokes to release 407 + * 408 + * sdp->sd_log_flush_lock must be held. 409 + */ 410 + void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes) 411 + { 412 + if (revokes) 413 + atomic_add(revokes, &sdp->sd_log_revokes_available); 439 414 } 440 415 441 416 /** ··· 469 400 470 401 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) 471 402 { 472 - 473 403 atomic_add(blks, &sdp->sd_log_blks_free); 474 404 trace_gfs2_log_blocks(sdp, blks); 475 405 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 476 406 sdp->sd_jdesc->jd_blocks); 477 - up_read(&sdp->sd_log_flush_lock); 407 + if (atomic_read(&sdp->sd_log_blks_needed)) 408 + wake_up(&sdp->sd_log_waitq); 478 409 } 479 410 480 411 /** 481 - * gfs2_log_reserve - Make a log reservation 412 + * __gfs2_log_try_reserve - Try to make a log reservation 482 413 * @sdp: The GFS2 superblock 483 414 * @blks: The number of blocks to reserve 415 + * @taboo_blks: The number of blocks to leave free 484 416 * 485 - * Note that we never give out the last few blocks of the journal. Thats 486 - * due to the fact that there is a small number of header blocks 487 - * associated with each log flush. The exact number can't be known until 488 - * flush time, so we ensure that we have just enough free blocks at all 489 - * times to avoid running out during a log flush. 417 + * Try to do the same as __gfs2_log_reserve(), but fail if no more log 418 + * space is immediately available. 419 + */ 420 + static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks, 421 + unsigned int taboo_blks) 422 + { 423 + unsigned wanted = blks + taboo_blks; 424 + unsigned int free_blocks; 425 + 426 + free_blocks = atomic_read(&sdp->sd_log_blks_free); 427 + while (free_blocks >= wanted) { 428 + if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks, 429 + free_blocks - blks)) { 430 + trace_gfs2_log_blocks(sdp, -blks); 431 + return true; 432 + } 433 + } 434 + return false; 435 + } 436 + 437 + /** 438 + * __gfs2_log_reserve - Make a log reservation 439 + * @sdp: The GFS2 superblock 440 + * @blks: The number of blocks to reserve 441 + * @taboo_blks: The number of blocks to leave free 442 + * 443 + * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS 444 + * for all other processes. This ensures that when the log is almost full, 445 + * logd will still be able to call gfs2_log_flush one more time without 446 + * blocking, which will advance the tail and make some more log space 447 + * available. 490 448 * 491 449 * We no longer flush the log here, instead we wake up logd to do that 492 450 * for us. To avoid the thundering herd and to ensure that we deal fairly 493 451 * with queued waiters, we use an exclusive wait. This means that when we 494 452 * get woken with enough journal space to get our reservation, we need to 495 453 * wake the next waiter on the list. 496 - * 497 - * Returns: errno 498 454 */ 499 455 500 - int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) 456 + static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks, 457 + unsigned int taboo_blks) 501 458 { 502 - int ret = 0; 503 - unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); 504 - unsigned wanted = blks + reserved_blks; 505 - DEFINE_WAIT(wait); 506 - int did_wait = 0; 459 + unsigned wanted = blks + taboo_blks; 507 460 unsigned int free_blocks; 508 461 509 - if (gfs2_assert_warn(sdp, blks) || 510 - gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) 511 - return -EINVAL; 512 462 atomic_add(blks, &sdp->sd_log_blks_needed); 513 - retry: 514 - free_blocks = atomic_read(&sdp->sd_log_blks_free); 515 - if (unlikely(free_blocks <= wanted)) { 516 - do { 517 - prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, 518 - TASK_UNINTERRUPTIBLE); 463 + for (;;) { 464 + if (current != sdp->sd_logd_process) 519 465 wake_up(&sdp->sd_logd_waitq); 520 - did_wait = 1; 521 - if (atomic_read(&sdp->sd_log_blks_free) <= wanted) 522 - io_schedule(); 523 - free_blocks = atomic_read(&sdp->sd_log_blks_free); 524 - } while(free_blocks <= wanted); 525 - finish_wait(&sdp->sd_log_waitq, &wait); 466 + io_wait_event(sdp->sd_log_waitq, 467 + (free_blocks = atomic_read(&sdp->sd_log_blks_free), 468 + free_blocks >= wanted)); 469 + do { 470 + if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, 471 + &free_blocks, 472 + free_blocks - blks)) 473 + goto reserved; 474 + } while (free_blocks >= wanted); 526 475 } 527 - atomic_inc(&sdp->sd_reserving_log); 528 - if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, 529 - free_blocks - blks) != free_blocks) { 530 - if (atomic_dec_and_test(&sdp->sd_reserving_log)) 531 - wake_up(&sdp->sd_reserving_log_wait); 532 - goto retry; 533 - } 534 - atomic_sub(blks, &sdp->sd_log_blks_needed); 476 + 477 + reserved: 535 478 trace_gfs2_log_blocks(sdp, -blks); 536 - 537 - /* 538 - * If we waited, then so might others, wake them up _after_ we get 539 - * our share of the log. 540 - */ 541 - if (unlikely(did_wait)) 479 + if (atomic_sub_return(blks, &sdp->sd_log_blks_needed)) 542 480 wake_up(&sdp->sd_log_waitq); 481 + } 543 482 544 - down_read(&sdp->sd_log_flush_lock); 545 - if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { 546 - gfs2_log_release(sdp, blks); 547 - ret = -EROFS; 483 + /** 484 + * gfs2_log_try_reserve - Try to make a log reservation 485 + * @sdp: The GFS2 superblock 486 + * @tr: The transaction 487 + * @extra_revokes: The number of additional revokes reserved (output) 488 + * 489 + * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be 490 + * held for correct revoke accounting. 491 + */ 492 + 493 + bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr, 494 + unsigned int *extra_revokes) 495 + { 496 + unsigned int blks = tr->tr_reserved; 497 + unsigned int revokes = tr->tr_revokes; 498 + unsigned int revoke_blks = 0; 499 + 500 + *extra_revokes = 0; 501 + if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) { 502 + revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs); 503 + *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes; 504 + blks += revoke_blks; 548 505 } 549 - if (atomic_dec_and_test(&sdp->sd_reserving_log)) 550 - wake_up(&sdp->sd_reserving_log_wait); 551 - return ret; 506 + if (!blks) 507 + return true; 508 + if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS)) 509 + return true; 510 + if (!revoke_blks) 511 + gfs2_log_release_revokes(sdp, revokes); 512 + return false; 513 + } 514 + 515 + /** 516 + * gfs2_log_reserve - Make a log reservation 517 + * @sdp: The GFS2 superblock 518 + * @tr: The transaction 519 + * @extra_revokes: The number of additional revokes reserved (output) 520 + * 521 + * sdp->sd_log_flush_lock must not be held. 522 + */ 523 + 524 + void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr, 525 + unsigned int *extra_revokes) 526 + { 527 + unsigned int blks = tr->tr_reserved; 528 + unsigned int revokes = tr->tr_revokes; 529 + unsigned int revoke_blks = 0; 530 + 531 + *extra_revokes = 0; 532 + if (revokes) { 533 + revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs); 534 + *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes; 535 + blks += revoke_blks; 536 + } 537 + __gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS); 552 538 } 553 539 554 540 /** ··· 631 507 } 632 508 633 509 /** 634 - * calc_reserved - Calculate the number of blocks to reserve when 635 - * refunding a transaction's unused buffers. 510 + * calc_reserved - Calculate the number of blocks to keep reserved 636 511 * @sdp: The GFS2 superblock 637 512 * 638 513 * This is complex. We need to reserve room for all our currently used 639 - * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 640 - * all our journaled data buffers for journaled files (e.g. files in the 514 + * metadata blocks (e.g. normal file I/O rewriting file time stamps) and 515 + * all our journaled data blocks for journaled files (e.g. files in the 641 516 * meta_fs like rindex, or files for which chattr +j was done.) 642 - * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush 643 - * will count it as free space (sd_log_blks_free) and corruption will follow. 517 + * If we don't reserve enough space, corruption will follow. 644 518 * 645 - * We can have metadata bufs and jdata bufs in the same journal. So each 646 - * type gets its own log header, for which we need to reserve a block. 647 - * In fact, each type has the potential for needing more than one header 648 - * in cases where we have more buffers than will fit on a journal page. 519 + * We can have metadata blocks and jdata blocks in the same journal. Each 520 + * type gets its own log descriptor, for which we need to reserve a block. 521 + * In fact, each type has the potential for needing more than one log descriptor 522 + * in cases where we have more blocks than will fit in a log descriptor. 649 523 * Metadata journal entries take up half the space of journaled buffer entries. 650 - * Thus, metadata entries have buf_limit (502) and journaled buffers have 651 - * databuf_limit (251) before they cause a wrap around. 652 524 * 653 525 * Also, we need to reserve blocks for revoke journal entries and one for an 654 526 * overall header for the lot. ··· 653 533 */ 654 534 static unsigned int calc_reserved(struct gfs2_sbd *sdp) 655 535 { 656 - unsigned int reserved = 0; 657 - unsigned int mbuf; 658 - unsigned int dbuf; 536 + unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS; 537 + unsigned int blocks; 659 538 struct gfs2_trans *tr = sdp->sd_log_tr; 660 539 661 540 if (tr) { 662 - mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; 663 - dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 664 - reserved = mbuf + dbuf; 665 - /* Account for header blocks */ 666 - reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); 667 - reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); 541 + blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm; 542 + reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp)); 543 + blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 544 + reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp)); 668 545 } 669 - 670 - if (sdp->sd_log_committed_revoke > 0) 671 - reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke); 672 - /* One for the overall header */ 673 - if (reserved) 674 - reserved++; 675 546 return reserved; 676 547 } 677 548 678 - static unsigned int current_tail(struct gfs2_sbd *sdp) 549 + static void log_pull_tail(struct gfs2_sbd *sdp) 679 550 { 680 - struct gfs2_trans *tr; 681 - unsigned int tail; 551 + unsigned int new_tail = sdp->sd_log_flush_tail; 552 + unsigned int dist; 682 553 683 - spin_lock(&sdp->sd_ail_lock); 684 - 685 - if (list_empty(&sdp->sd_ail1_list)) { 686 - tail = sdp->sd_log_head; 687 - } else { 688 - tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans, 689 - tr_list); 690 - tail = tr->tr_first; 691 - } 692 - 693 - spin_unlock(&sdp->sd_ail_lock); 694 - 695 - return tail; 696 - } 697 - 698 - static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) 699 - { 700 - unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); 701 - 554 + if (new_tail == sdp->sd_log_tail) 555 + return; 556 + dist = log_distance(sdp, new_tail, sdp->sd_log_tail); 702 557 ail2_empty(sdp, new_tail); 703 - 704 - atomic_add(dist, &sdp->sd_log_blks_free); 705 - trace_gfs2_log_blocks(sdp, dist); 706 - gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 707 - sdp->sd_jdesc->jd_blocks); 708 - 558 + gfs2_log_release(sdp, dist); 709 559 sdp->sd_log_tail = new_tail; 710 560 } 711 561 ··· 788 698 } 789 699 790 700 /** 791 - * gfs2_write_revokes - Add as many revokes to the system transaction as we can 701 + * gfs2_flush_revokes - Add as many revokes to the system transaction as we can 792 702 * @sdp: The GFS2 superblock 793 703 * 794 704 * Our usual strategy is to defer writing revokes as much as we can in the hope ··· 799 709 * been written back. This will basically come at no cost now, and will save 800 710 * us from having to keep track of those blocks on the AIL2 list later. 801 711 */ 802 - void gfs2_write_revokes(struct gfs2_sbd *sdp) 712 + void gfs2_flush_revokes(struct gfs2_sbd *sdp) 803 713 { 804 714 /* number of revokes we still have room for */ 805 - int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 715 + unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available); 806 716 807 717 gfs2_log_lock(sdp); 808 - while (sdp->sd_log_num_revoke > max_revokes) 809 - max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 810 - max_revokes -= sdp->sd_log_num_revoke; 811 - if (!sdp->sd_log_num_revoke) { 812 - atomic_dec(&sdp->sd_log_blks_free); 813 - /* If no blocks have been reserved, we need to also 814 - * reserve a block for the header */ 815 - if (!sdp->sd_log_blks_reserved) { 816 - atomic_dec(&sdp->sd_log_blks_free); 817 - trace_gfs2_log_blocks(sdp, -2); 818 - } else { 819 - trace_gfs2_log_blocks(sdp, -1); 820 - } 821 - } 822 718 gfs2_ail1_empty(sdp, max_revokes); 823 719 gfs2_log_unlock(sdp); 824 - 825 - if (!sdp->sd_log_num_revoke) { 826 - atomic_inc(&sdp->sd_log_blks_free); 827 - if (!sdp->sd_log_blks_reserved) { 828 - atomic_inc(&sdp->sd_log_blks_free); 829 - trace_gfs2_log_blocks(sdp, 2); 830 - } else { 831 - trace_gfs2_log_blocks(sdp, 1); 832 - } 833 - } 834 720 } 835 721 836 722 /** ··· 835 769 u64 dblock; 836 770 837 771 if (gfs2_withdrawn(sdp)) 838 - goto out; 772 + return; 839 773 840 774 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 841 775 lh = page_address(page); ··· 888 822 sb->s_blocksize - LH_V1_SIZE - 4); 889 823 lh->lh_crc = cpu_to_be32(crc); 890 824 891 - gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock); 892 - gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); 893 - out: 894 - log_flush_wait(sdp); 825 + gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock); 826 + gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags); 895 827 } 896 828 897 829 /** ··· 902 838 903 839 static void log_write_header(struct gfs2_sbd *sdp, u32 flags) 904 840 { 905 - unsigned int tail; 906 841 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; 907 842 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 908 843 909 844 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); 910 - tail = current_tail(sdp); 911 845 912 846 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { 913 847 gfs2_ordered_wait(sdp); 914 848 log_flush_wait(sdp); 915 849 op_flags = REQ_SYNC | REQ_META | REQ_PRIO; 916 850 } 917 - sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); 918 - gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail, 919 - sdp->sd_log_flush_head, flags, op_flags); 851 + sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head); 852 + gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, 853 + sdp->sd_log_flush_tail, sdp->sd_log_flush_head, 854 + flags, op_flags); 920 855 gfs2_log_incr_head(sdp); 921 - 922 - if (sdp->sd_log_tail != tail) 923 - log_pull_tail(sdp, tail); 856 + log_flush_wait(sdp); 857 + log_pull_tail(sdp); 858 + gfs2_log_update_head(sdp); 924 859 } 925 860 926 861 /** ··· 1019 956 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) 1020 957 { 1021 958 struct gfs2_trans *tr = NULL; 959 + unsigned int reserved_blocks = 0, used_blocks = 0; 1022 960 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 961 + unsigned int first_log_head; 962 + unsigned int reserved_revokes = 0; 1023 963 1024 964 down_write(&sdp->sd_log_flush_lock); 965 + trace_gfs2_log_flush(sdp, 1, flags); 1025 966 967 + repeat: 1026 968 /* 1027 969 * Do this check while holding the log_flush_lock to prevent new 1028 970 * buffers from being added to the ail via gfs2_pin() ··· 1038 970 /* Log might have been flushed while we waited for the flush lock */ 1039 971 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) 1040 972 goto out; 1041 - trace_gfs2_log_flush(sdp, 1, flags); 973 + 974 + first_log_head = sdp->sd_log_head; 975 + sdp->sd_log_flush_head = first_log_head; 976 + 977 + tr = sdp->sd_log_tr; 978 + if (tr || sdp->sd_log_num_revoke) { 979 + if (reserved_blocks) 980 + gfs2_log_release(sdp, reserved_blocks); 981 + reserved_blocks = sdp->sd_log_blks_reserved; 982 + reserved_revokes = sdp->sd_log_num_revoke; 983 + if (tr) { 984 + sdp->sd_log_tr = NULL; 985 + tr->tr_first = first_log_head; 986 + if (unlikely (state == SFS_FROZEN)) { 987 + if (gfs2_assert_withdraw_delayed(sdp, 988 + !tr->tr_num_buf_new && !tr->tr_num_databuf_new)) 989 + goto out_withdraw; 990 + } 991 + } 992 + } else if (!reserved_blocks) { 993 + unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS; 994 + 995 + reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS; 996 + if (current == sdp->sd_logd_process) 997 + taboo_blocks = 0; 998 + 999 + if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) { 1000 + up_write(&sdp->sd_log_flush_lock); 1001 + __gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks); 1002 + down_write(&sdp->sd_log_flush_lock); 1003 + goto repeat; 1004 + } 1005 + BUG_ON(sdp->sd_log_num_revoke); 1006 + } 1042 1007 1043 1008 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN) 1044 1009 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 1045 1010 1046 - sdp->sd_log_flush_head = sdp->sd_log_head; 1047 - tr = sdp->sd_log_tr; 1048 - if (tr) { 1049 - sdp->sd_log_tr = NULL; 1050 - tr->tr_first = sdp->sd_log_flush_head; 1051 - if (unlikely (state == SFS_FROZEN)) 1052 - if (gfs2_assert_withdraw_delayed(sdp, 1053 - !tr->tr_num_buf_new && !tr->tr_num_databuf_new)) 1054 - goto out_withdraw; 1055 - } 1056 - 1057 1011 if (unlikely(state == SFS_FROZEN)) 1058 - if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke)) 1012 + if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes)) 1059 1013 goto out_withdraw; 1060 - if (gfs2_assert_withdraw_delayed(sdp, 1061 - sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke)) 1062 - goto out_withdraw; 1063 1014 1064 1015 gfs2_ordered_write(sdp); 1065 1016 if (gfs2_withdrawn(sdp)) ··· 1086 999 lops_before_commit(sdp, tr); 1087 1000 if (gfs2_withdrawn(sdp)) 1088 1001 goto out_withdraw; 1089 - gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); 1002 + gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); 1090 1003 if (gfs2_withdrawn(sdp)) 1091 1004 goto out_withdraw; 1092 1005 1093 1006 if (sdp->sd_log_head != sdp->sd_log_flush_head) { 1094 - log_flush_wait(sdp); 1095 1007 log_write_header(sdp, flags); 1096 - } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ 1097 - atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 1098 - trace_gfs2_log_blocks(sdp, -1); 1008 + } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) { 1099 1009 log_write_header(sdp, flags); 1100 1010 } 1101 1011 if (gfs2_withdrawn(sdp)) ··· 1100 1016 lops_after_commit(sdp, tr); 1101 1017 1102 1018 gfs2_log_lock(sdp); 1103 - sdp->sd_log_head = sdp->sd_log_flush_head; 1104 1019 sdp->sd_log_blks_reserved = 0; 1105 - sdp->sd_log_committed_revoke = 0; 1106 1020 1107 1021 spin_lock(&sdp->sd_ail_lock); 1108 1022 if (tr && !list_empty(&tr->tr_ail1_list)) { ··· 1115 1033 empty_ail1_list(sdp); 1116 1034 if (gfs2_withdrawn(sdp)) 1117 1035 goto out_withdraw; 1118 - atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 1119 - trace_gfs2_log_blocks(sdp, -1); 1120 1036 log_write_header(sdp, flags); 1121 - sdp->sd_log_head = sdp->sd_log_flush_head; 1122 1037 } 1123 1038 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 1124 1039 GFS2_LOG_HEAD_FLUSH_FREEZE)) ··· 1125 1046 } 1126 1047 1127 1048 out_end: 1128 - trace_gfs2_log_flush(sdp, 0, flags); 1049 + used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head); 1050 + reserved_revokes += atomic_read(&sdp->sd_log_revokes_available); 1051 + atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs); 1052 + gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs); 1053 + if (reserved_revokes > sdp->sd_ldptrs) 1054 + reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs; 1129 1055 out: 1056 + if (used_blocks != reserved_blocks) { 1057 + gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks); 1058 + gfs2_log_release(sdp, reserved_blocks - used_blocks); 1059 + } 1130 1060 up_write(&sdp->sd_log_flush_lock); 1131 1061 gfs2_trans_free(sdp, tr); 1132 1062 if (gfs2_withdrawing(sdp)) 1133 1063 gfs2_withdraw(sdp); 1064 + trace_gfs2_log_flush(sdp, 0, flags); 1134 1065 return; 1135 1066 1136 1067 out_withdraw: ··· 1176 1087 old->tr_num_databuf_new += new->tr_num_databuf_new; 1177 1088 old->tr_num_buf_rm += new->tr_num_buf_rm; 1178 1089 old->tr_num_databuf_rm += new->tr_num_databuf_rm; 1090 + old->tr_revokes += new->tr_revokes; 1179 1091 old->tr_num_revoke += new->tr_num_revoke; 1180 - old->tr_num_revoke_rm += new->tr_num_revoke_rm; 1181 1092 1182 1093 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); 1183 1094 list_splice_tail_init(&new->tr_buf, &old->tr_buf); ··· 1199 1110 if (sdp->sd_log_tr) { 1200 1111 gfs2_merge_trans(sdp, tr); 1201 1112 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { 1202 - gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); 1113 + gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags)); 1203 1114 sdp->sd_log_tr = tr; 1204 1115 set_bit(TR_ATTACHED, &tr->tr_flags); 1205 1116 } 1206 1117 1207 - sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; 1208 1118 reserved = calc_reserved(sdp); 1209 1119 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; 1210 1120 gfs2_assert_withdraw(sdp, maxres >= reserved); 1211 1121 unused = maxres - reserved; 1212 - atomic_add(unused, &sdp->sd_log_blks_free); 1213 - trace_gfs2_log_blocks(sdp, unused); 1214 - gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 1215 - sdp->sd_jdesc->jd_blocks); 1122 + if (unused) 1123 + gfs2_log_release(sdp, unused); 1216 1124 sdp->sd_log_blks_reserved = reserved; 1217 1125 1218 1126 gfs2_log_unlock(sdp); ··· 1252 1166 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 1253 1167 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); 1254 1168 1255 - sdp->sd_log_flush_head = sdp->sd_log_head; 1256 - 1257 1169 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN); 1170 + log_pull_tail(sdp); 1258 1171 1259 1172 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); 1260 1173 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); 1261 - 1262 - sdp->sd_log_head = sdp->sd_log_flush_head; 1263 - sdp->sd_log_tail = sdp->sd_log_head; 1264 1174 } 1265 1175 1266 1176 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) ··· 1290 1208 struct gfs2_sbd *sdp = data; 1291 1209 unsigned long t = 1; 1292 1210 DEFINE_WAIT(wait); 1293 - bool did_flush; 1294 1211 1295 1212 while (!kthread_should_stop()) { 1296 1213 ··· 1308 1227 continue; 1309 1228 } 1310 1229 1311 - did_flush = false; 1312 1230 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { 1313 1231 gfs2_ail1_empty(sdp, 0); 1314 1232 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 1315 - GFS2_LFC_LOGD_JFLUSH_REQD); 1316 - did_flush = true; 1233 + GFS2_LFC_LOGD_JFLUSH_REQD); 1317 1234 } 1318 1235 1319 1236 if (gfs2_ail_flush_reqd(sdp)) { ··· 1319 1240 gfs2_ail1_wait(sdp); 1320 1241 gfs2_ail1_empty(sdp, 0); 1321 1242 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 1322 - GFS2_LFC_LOGD_AIL_FLUSH_REQD); 1323 - did_flush = true; 1243 + GFS2_LFC_LOGD_AIL_FLUSH_REQD); 1324 1244 } 1325 - 1326 - if (!gfs2_ail_flush_reqd(sdp) || did_flush) 1327 - wake_up(&sdp->sd_log_waitq); 1328 1245 1329 1246 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; 1330 1247
+17 -3
fs/gfs2/log.h
··· 13 13 #include "incore.h" 14 14 #include "inode.h" 15 15 16 + /* 17 + * The minimum amount of log space required for a log flush is one block for 18 + * revokes and one block for the log header. Log flushes other than 19 + * GFS2_LOG_HEAD_FLUSH_NORMAL may write one or two more log headers. 20 + */ 21 + #define GFS2_LOG_FLUSH_MIN_BLOCKS 4 22 + 16 23 /** 17 24 * gfs2_log_lock - acquire the right to mess with the log manager 18 25 * @sdp: the filesystem ··· 50 43 if (++value == sdp->sd_jdesc->jd_blocks) { 51 44 value = 0; 52 45 } 53 - sdp->sd_log_head = sdp->sd_log_tail = value; 46 + sdp->sd_log_tail = value; 47 + sdp->sd_log_flush_tail = value; 48 + sdp->sd_log_head = value; 54 49 } 55 50 56 51 static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) ··· 73 64 extern void gfs2_ordered_del_inode(struct gfs2_inode *ip); 74 65 extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct); 75 66 extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); 67 + extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp); 68 + extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes); 76 69 extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks); 77 - extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); 70 + extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr, 71 + unsigned int *extra_revokes); 72 + extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr, 73 + unsigned int *extra_revokes); 78 74 extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 79 75 u64 seq, u32 tail, u32 lblock, u32 flags, 80 76 int op_flags); ··· 92 78 extern int gfs2_logd(void *data); 93 79 extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); 94 80 extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl); 95 - extern void gfs2_write_revokes(struct gfs2_sbd *sdp); 81 + extern void gfs2_flush_revokes(struct gfs2_sbd *sdp); 96 82 97 83 #endif /* __LOG_DOT_H__ */
+16 -10
fs/gfs2/lops.c
··· 76 76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 77 77 struct gfs2_bitmap *bi = rgd->rd_bits + index; 78 78 79 + rgrp_lock_local(rgd); 79 80 if (bi->bi_clone == NULL) 80 - return; 81 + goto out; 81 82 if (sdp->sd_args.ar_discard) 82 83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); 83 84 memcpy(bi->bi_clone + bi->bi_offset, 84 85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); 85 86 clear_bit(GBF_FULL, &bi->bi_flags); 86 87 rgd->rd_free_clone = rgd->rd_free; 88 + BUG_ON(rgd->rd_free_clone < rgd->rd_reserved); 87 89 rgd->rd_extfail_pt = rgd->rd_free; 90 + 91 + out: 92 + rgrp_unlock_local(rgd); 88 93 } 89 94 90 95 /** ··· 327 322 * then add the page segment to that. 328 323 */ 329 324 330 - void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 331 - unsigned size, unsigned offset, u64 blkno) 325 + void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 326 + struct page *page, unsigned size, unsigned offset, 327 + u64 blkno) 332 328 { 333 329 struct bio *bio; 334 330 int ret; 335 331 336 - bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE, 332 + bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, 337 333 gfs2_end_log_write, false); 338 334 ret = bio_add_page(bio, page, size, offset); 339 335 if (ret == 0) { 340 - bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, 336 + bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, 341 337 REQ_OP_WRITE, gfs2_end_log_write, true); 342 338 ret = bio_add_page(bio, page, size, offset); 343 339 WARN_ON(ret == 0); ··· 361 355 362 356 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 363 357 gfs2_log_incr_head(sdp); 364 - gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock); 358 + gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size, 359 + bh_offset(bh), dblock); 365 360 } 366 361 367 362 /** ··· 376 369 * the page may be freed at any time. 377 370 */ 378 371 379 - void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) 372 + static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) 380 373 { 381 374 struct super_block *sb = sdp->sd_vfs; 382 375 u64 dblock; 383 376 384 377 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 385 378 gfs2_log_incr_head(sdp); 386 - gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock); 379 + gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock); 387 380 } 388 381 389 382 /** ··· 852 845 struct page *page; 853 846 unsigned int length; 854 847 855 - gfs2_write_revokes(sdp); 848 + gfs2_flush_revokes(sdp); 856 849 if (!sdp->sd_log_num_revoke) 857 850 return; 858 851 ··· 864 857 sdp->sd_log_num_revoke--; 865 858 866 859 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 867 - 868 860 gfs2_log_write_page(sdp, page); 869 861 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 870 862 mh = page_address(page);
+5 -18
fs/gfs2/lops.h
··· 10 10 #include <linux/list.h> 11 11 #include "incore.h" 12 12 13 - #define BUF_OFFSET \ 14 - ((sizeof(struct gfs2_log_descriptor) + sizeof(__be64) - 1) & \ 15 - ~(sizeof(__be64) - 1)) 16 - #define DATABUF_OFFSET \ 17 - ((sizeof(struct gfs2_log_descriptor) + (2 * sizeof(__be64) - 1)) & \ 18 - ~(2 * sizeof(__be64) - 1)) 19 - 20 13 extern const struct gfs2_log_operations *gfs2_log_ops[]; 21 14 extern void gfs2_log_incr_head(struct gfs2_sbd *sdp); 22 15 extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn); 23 - extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 24 - unsigned size, unsigned offset, u64 blkno); 25 - extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); 16 + extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 17 + struct page *page, unsigned size, unsigned offset, 18 + u64 blkno); 26 19 extern void gfs2_log_submit_bio(struct bio **biop, int opf); 27 20 extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); 28 21 extern int gfs2_find_jhead(struct gfs2_jdesc *jd, 29 22 struct gfs2_log_header_host *head, bool keep_cache); 30 23 static inline unsigned int buf_limit(struct gfs2_sbd *sdp) 31 24 { 32 - unsigned int limit; 33 - 34 - limit = (sdp->sd_sb.sb_bsize - BUF_OFFSET) / sizeof(__be64); 35 - return limit; 25 + return sdp->sd_ldptrs; 36 26 } 37 27 38 28 static inline unsigned int databuf_limit(struct gfs2_sbd *sdp) 39 29 { 40 - unsigned int limit; 41 - 42 - limit = (sdp->sd_sb.sb_bsize - DATABUF_OFFSET) / (2 * sizeof(__be64)); 43 - return limit; 30 + return sdp->sd_ldptrs / 2; 44 31 } 45 32 46 33 static inline void lops_before_commit(struct gfs2_sbd *sdp,
+2 -2
fs/gfs2/main.c
··· 98 98 error = -ENOMEM; 99 99 gfs2_glock_cachep = kmem_cache_create("gfs2_glock", 100 100 sizeof(struct gfs2_glock), 101 - 0, 0, 101 + 0, SLAB_RECLAIM_ACCOUNT, 102 102 gfs2_init_glock_once); 103 103 if (!gfs2_glock_cachep) 104 104 goto fail_cachep1; ··· 134 134 135 135 gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", 136 136 sizeof(struct gfs2_quota_data), 137 - 0, 0, NULL); 137 + 0, SLAB_RECLAIM_ACCOUNT, NULL); 138 138 if (!gfs2_quotad_cachep) 139 139 goto fail_cachep6; 140 140
+48 -23
fs/gfs2/ops_fstype.c
··· 136 136 137 137 init_rwsem(&sdp->sd_log_flush_lock); 138 138 atomic_set(&sdp->sd_log_in_flight, 0); 139 - atomic_set(&sdp->sd_reserving_log, 0); 140 - init_waitqueue_head(&sdp->sd_reserving_log_wait); 141 139 init_waitqueue_head(&sdp->sd_log_flush_wait); 142 140 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); 143 141 mutex_init(&sdp->sd_freeze_mutex); ··· 169 171 return -EINVAL; 170 172 } 171 173 172 - if (sb->sb_fs_format != GFS2_FORMAT_FS || 174 + if (sb->sb_fs_format < GFS2_FS_FORMAT_MIN || 175 + sb->sb_fs_format > GFS2_FS_FORMAT_MAX || 173 176 sb->sb_multihost_format != GFS2_FORMAT_MULTI) { 174 177 fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); 175 178 return -EINVAL; ··· 178 179 179 180 if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE || 180 181 (sb->sb_bsize & (sb->sb_bsize - 1))) { 181 - pr_warn("Invalid superblock size\n"); 182 + pr_warn("Invalid block size\n"); 182 183 return -EINVAL; 183 184 } 184 185 ··· 315 316 sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize - 316 317 sizeof(struct gfs2_meta_header)) 317 318 * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */ 319 + 320 + /* 321 + * We always keep at least one block reserved for revokes in 322 + * transactions. This greatly simplifies allocating additional 323 + * revoke blocks. 324 + */ 325 + atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs); 318 326 319 327 /* Compute maximum reservation required to add a entry to a directory */ 320 328 ··· 492 486 if (ret) { 493 487 fs_err(sdp, "can't read superblock: %d\n", ret); 494 488 goto out; 489 + } 490 + 491 + switch(sdp->sd_sb.sb_fs_format) { 492 + case GFS2_FS_FORMAT_MAX: 493 + sb->s_xattr = gfs2_xattr_handlers_max; 494 + break; 495 + 496 + case GFS2_FS_FORMAT_MIN: 497 + sb->s_xattr = gfs2_xattr_handlers_min; 498 + break; 499 + 500 + default: 501 + BUG(); 495 502 } 496 503 497 504 /* Set up the buffer cache and SB for real */ ··· 1051 1032 } 1052 1033 1053 1034 if (lm->lm_mount == NULL) { 1054 - fs_info(sdp, "Now mounting FS...\n"); 1035 + fs_info(sdp, "Now mounting FS (format %u)...\n", sdp->sd_sb.sb_fs_format); 1055 1036 complete_all(&sdp->sd_locking_init); 1056 1037 return 0; 1057 1038 } 1058 1039 ret = lm->lm_mount(sdp, table); 1059 1040 if (ret == 0) 1060 - fs_info(sdp, "Joined cluster. Now mounting FS...\n"); 1041 + fs_info(sdp, "Joined cluster. Now mounting FS (format %u)...\n", 1042 + sdp->sd_sb.sb_fs_format); 1061 1043 complete_all(&sdp->sd_locking_init); 1062 1044 return ret; 1063 1045 } ··· 1104 1084 int silent = fc->sb_flags & SB_SILENT; 1105 1085 struct gfs2_sbd *sdp; 1106 1086 struct gfs2_holder mount_gh; 1087 + struct gfs2_holder freeze_gh; 1107 1088 int error; 1108 1089 1109 1090 sdp = init_sbd(sb); ··· 1128 1107 sb->s_op = &gfs2_super_ops; 1129 1108 sb->s_d_op = &gfs2_dops; 1130 1109 sb->s_export_op = &gfs2_export_ops; 1131 - sb->s_xattr = gfs2_xattr_handlers; 1132 1110 sb->s_qcop = &gfs2_quotactl_ops; 1133 1111 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 1134 1112 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; ··· 1176 1156 if (error) 1177 1157 goto fail_locking; 1178 1158 1159 + /* Turn rgrplvb on by default if fs format is recent enough */ 1160 + if (!sdp->sd_args.ar_got_rgrplvb && sdp->sd_sb.sb_fs_format > 1801) 1161 + sdp->sd_args.ar_rgrplvb = 1; 1162 + 1179 1163 error = wait_on_journal(sdp); 1180 1164 if (error) 1181 1165 goto fail_sb; ··· 1219 1195 goto fail_per_node; 1220 1196 } 1221 1197 1222 - if (sb_rdonly(sb)) { 1223 - struct gfs2_holder freeze_gh; 1198 + error = gfs2_freeze_lock(sdp, &freeze_gh, 0); 1199 + if (error) 1200 + goto fail_per_node; 1224 1201 1225 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 1226 - LM_FLAG_NOEXP | GL_EXACT, 1227 - &freeze_gh); 1228 - if (error) { 1229 - fs_err(sdp, "can't make FS RO: %d\n", error); 1230 - goto fail_per_node; 1231 - } 1232 - gfs2_glock_dq_uninit(&freeze_gh); 1233 - } else { 1202 + if (!sb_rdonly(sb)) 1234 1203 error = gfs2_make_fs_rw(sdp); 1235 - if (error) { 1236 - fs_err(sdp, "can't make FS RW: %d\n", error); 1237 - goto fail_per_node; 1238 - } 1239 - } 1240 1204 1205 + gfs2_freeze_unlock(&freeze_gh); 1206 + if (error) { 1207 + fs_err(sdp, "can't make FS RW: %d\n", error); 1208 + goto fail_per_node; 1209 + } 1241 1210 gfs2_glock_dq_uninit(&mount_gh); 1242 1211 gfs2_online_uevent(sdp); 1243 1212 return 0; ··· 1473 1456 break; 1474 1457 case Opt_rgrplvb: 1475 1458 args->ar_rgrplvb = result.boolean; 1459 + args->ar_got_rgrplvb = 1; 1476 1460 break; 1477 1461 case Opt_loccookie: 1478 1462 args->ar_loccookie = result.boolean; ··· 1532 1514 fc->sb_flags |= SB_RDONLY; 1533 1515 1534 1516 if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) { 1517 + struct gfs2_holder freeze_gh; 1518 + 1519 + error = gfs2_freeze_lock(sdp, &freeze_gh, 0); 1520 + if (error) 1521 + return -EINVAL; 1522 + 1535 1523 if (fc->sb_flags & SB_RDONLY) { 1536 1524 error = gfs2_make_fs_ro(sdp); 1537 1525 if (error) ··· 1547 1523 if (error) 1548 1524 errorfc(fc, "unable to remount read-write"); 1549 1525 } 1526 + gfs2_freeze_unlock(&freeze_gh); 1550 1527 } 1551 1528 sdp->sd_args = *newargs; 1552 1529
+7 -7
fs/gfs2/recovery.c
··· 470 470 471 471 /* Acquire a shared hold on the freeze lock */ 472 472 473 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 474 - LM_FLAG_NOEXP | LM_FLAG_PRIORITY | 475 - GL_EXACT, &thaw_gh); 473 + error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY); 476 474 if (error) 477 475 goto fail_gunlock_ji; 478 476 ··· 505 507 506 508 /* We take the sd_log_flush_lock here primarily to prevent log 507 509 * flushes and simultaneous journal replays from stomping on 508 - * each other wrt sd_log_bio. */ 510 + * each other wrt jd_log_bio. */ 509 511 down_read(&sdp->sd_log_flush_lock); 510 512 for (pass = 0; pass < 2; pass++) { 511 513 lops_before_scan(jd, &head, pass); 512 514 error = foreach_descriptor(jd, head.lh_tail, 513 515 head.lh_blkno, pass); 514 516 lops_after_scan(jd, error, pass); 515 - if (error) 517 + if (error) { 518 + up_read(&sdp->sd_log_flush_lock); 516 519 goto fail_gunlock_thaw; 520 + } 517 521 } 518 522 519 523 recover_local_statfs(jd, &head); 520 524 clean_journal(jd, &head); 521 525 up_read(&sdp->sd_log_flush_lock); 522 526 523 - gfs2_glock_dq_uninit(&thaw_gh); 527 + gfs2_freeze_unlock(&thaw_gh); 524 528 t_rep = ktime_get(); 525 529 fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, " 526 530 "jhead:%lldms, tlck:%lldms, replay:%lldms]\n", ··· 544 544 goto done; 545 545 546 546 fail_gunlock_thaw: 547 - gfs2_glock_dq_uninit(&thaw_gh); 547 + gfs2_freeze_unlock(&thaw_gh); 548 548 fail_gunlock_ji: 549 549 if (jlocked) { 550 550 gfs2_glock_dq_uninit(&ji_gh);
+278 -164
fs/gfs2/rgrp.c
··· 36 36 #define BFITNOENT ((u32)~0) 37 37 #define NO_BLOCK ((u64)~0) 38 38 39 + struct gfs2_rbm { 40 + struct gfs2_rgrpd *rgd; 41 + u32 offset; /* The offset is bitmap relative */ 42 + int bii; /* Bitmap index */ 43 + }; 44 + 45 + static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm) 46 + { 47 + return rbm->rgd->rd_bits + rbm->bii; 48 + } 49 + 50 + static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) 51 + { 52 + BUG_ON(rbm->offset >= rbm->rgd->rd_data); 53 + return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) + 54 + rbm->offset; 55 + } 56 + 39 57 /* 40 58 * These routines are used by the resource group routines (rgrp.c) 41 59 * to keep track of block allocation. Each block is represented by two ··· 79 61 }; 80 62 81 63 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, 82 - const struct gfs2_inode *ip, bool nowrap); 64 + struct gfs2_blkreserv *rs, bool nowrap); 83 65 84 66 85 67 /** ··· 193 175 194 176 /** 195 177 * rs_cmp - multi-block reservation range compare 196 - * @blk: absolute file system block number of the new reservation 178 + * @start: start of the new reservation 197 179 * @len: number of blocks in the new reservation 198 180 * @rs: existing reservation to compare against 199 181 * ··· 201 183 * -1 if the block range is before the start of the reservation 202 184 * 0 if the block range overlaps with the reservation 203 185 */ 204 - static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs) 186 + static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs) 205 187 { 206 - u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm); 207 - 208 - if (blk >= startblk + rs->rs_free) 188 + if (start >= rs->rs_start + rs->rs_requested) 209 189 return 1; 210 - if (blk + len - 1 < startblk) 190 + if (rs->rs_start >= start + len) 211 191 return -1; 212 192 return 0; 213 193 } ··· 293 277 } 294 278 295 279 /** 296 - * gfs2_rbm_incr - increment an rbm structure 280 + * gfs2_rbm_add - add a number of blocks to an rbm 297 281 * @rbm: The rbm with rgd already set correctly 282 + * @blocks: The number of blocks to add to rpm 298 283 * 299 - * This function takes an existing rbm structure and increments it to the next 300 - * viable block offset. 284 + * This function takes an existing rbm structure and adds a number of blocks to 285 + * it. 301 286 * 302 - * Returns: If incrementing the offset would cause the rbm to go past the 303 - * end of the rgrp, true is returned, otherwise false. 304 - * 287 + * Returns: True if the new rbm would point past the end of the rgrp. 305 288 */ 306 289 307 - static bool gfs2_rbm_incr(struct gfs2_rbm *rbm) 290 + static bool gfs2_rbm_add(struct gfs2_rbm *rbm, u32 blocks) 308 291 { 309 - if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */ 310 - rbm->offset++; 292 + struct gfs2_rgrpd *rgd = rbm->rgd; 293 + struct gfs2_bitmap *bi = rgd->rd_bits + rbm->bii; 294 + 295 + if (rbm->offset + blocks < bi->bi_blocks) { 296 + rbm->offset += blocks; 311 297 return false; 312 298 } 313 - if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */ 314 - return true; 299 + blocks -= bi->bi_blocks - rbm->offset; 315 300 316 - rbm->offset = 0; 317 - rbm->bii++; 318 - return false; 301 + for(;;) { 302 + bi++; 303 + if (bi == rgd->rd_bits + rgd->rd_length) 304 + return true; 305 + if (blocks < bi->bi_blocks) { 306 + rbm->offset = blocks; 307 + rbm->bii = bi - rgd->rd_bits; 308 + return false; 309 + } 310 + blocks -= bi->bi_blocks; 311 + } 319 312 } 320 313 321 314 /** ··· 333 308 * @n_unaligned: Number of unaligned blocks to check 334 309 * @len: Decremented for each block found (terminate on zero) 335 310 * 336 - * Returns: true if a non-free block is encountered 311 + * Returns: true if a non-free block is encountered or the end of the resource 312 + * group is reached. 337 313 */ 338 314 339 315 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) ··· 349 323 (*len)--; 350 324 if (*len == 0) 351 325 return true; 352 - if (gfs2_rbm_incr(rbm)) 326 + if (gfs2_rbm_add(rbm, 1)) 353 327 return true; 354 328 } 355 329 ··· 621 595 { 622 596 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res); 623 597 624 - gfs2_print_dbg(seq, "%s B: n:%llu s:%llu b:%u f:%u\n", fs_id_buf, 598 + gfs2_print_dbg(seq, "%s B: n:%llu s:%llu f:%u\n", 599 + fs_id_buf, 625 600 (unsigned long long)ip->i_no_addr, 626 - (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm), 627 - rs->rs_rbm.offset, rs->rs_free); 601 + (unsigned long long)rs->rs_start, 602 + rs->rs_requested); 628 603 } 629 604 630 605 /** ··· 640 613 if (!gfs2_rs_active(rs)) 641 614 return; 642 615 643 - rgd = rs->rs_rbm.rgd; 616 + rgd = rs->rs_rgd; 644 617 trace_gfs2_rs(rs, TRACE_RS_TREEDEL); 645 618 rb_erase(&rs->rs_node, &rgd->rd_rstree); 646 619 RB_CLEAR_NODE(&rs->rs_node); 647 620 648 - if (rs->rs_free) { 649 - u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) + 650 - rs->rs_free - 1; 651 - struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, }; 652 - struct gfs2_bitmap *start, *last; 621 + if (rs->rs_requested) { 622 + /* return requested blocks to the rgrp */ 623 + BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested); 624 + rs->rs_rgd->rd_requested -= rs->rs_requested; 653 625 654 - /* return reserved blocks to the rgrp */ 655 - BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); 656 - rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; 657 626 /* The rgrp extent failure point is likely not to increase; 658 627 it will only do so if the freed blocks are somehow 659 628 contiguous with a span of free blocks that follows. Still, 660 629 it will force the number to be recalculated later. */ 661 - rgd->rd_extfail_pt += rs->rs_free; 662 - rs->rs_free = 0; 663 - if (gfs2_rbm_from_block(&last_rbm, last_block)) 664 - return; 665 - start = rbm_bi(&rs->rs_rbm); 666 - last = rbm_bi(&last_rbm); 667 - do 668 - clear_bit(GBF_FULL, &start->bi_flags); 669 - while (start++ != last); 630 + rgd->rd_extfail_pt += rs->rs_requested; 631 + rs->rs_requested = 0; 670 632 } 671 633 } 672 634 ··· 668 652 { 669 653 struct gfs2_rgrpd *rgd; 670 654 671 - rgd = rs->rs_rbm.rgd; 655 + rgd = rs->rs_rgd; 672 656 if (rgd) { 673 657 spin_lock(&rgd->rd_rsspin); 674 658 __rs_deltree(rs); 675 - BUG_ON(rs->rs_free); 659 + BUG_ON(rs->rs_requested); 676 660 spin_unlock(&rgd->rd_rsspin); 677 661 } 678 662 } ··· 920 904 rgd->rd_data = be32_to_cpu(buf.ri_data); 921 905 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); 922 906 spin_lock_init(&rgd->rd_rsspin); 907 + mutex_init(&rgd->rd_mutex); 923 908 924 909 error = compute_bitstructs(rgd); 925 910 if (error) ··· 1166 1149 return count; 1167 1150 } 1168 1151 1152 + static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd) 1153 + { 1154 + struct gfs2_bitmap *bi; 1155 + int x; 1156 + 1157 + if (rgd->rd_free) { 1158 + for (x = 0; x < rgd->rd_length; x++) { 1159 + bi = rgd->rd_bits + x; 1160 + clear_bit(GBF_FULL, &bi->bi_flags); 1161 + } 1162 + } else { 1163 + for (x = 0; x < rgd->rd_length; x++) { 1164 + bi = rgd->rd_bits + x; 1165 + set_bit(GBF_FULL, &bi->bi_flags); 1166 + } 1167 + } 1168 + } 1169 1169 1170 1170 /** 1171 1171 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps ··· 1226 1192 } 1227 1193 1228 1194 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) { 1229 - for (x = 0; x < length; x++) 1230 - clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags); 1231 1195 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); 1196 + rgrp_set_bitmap_flags(rgd); 1232 1197 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); 1233 1198 rgd->rd_free_clone = rgd->rd_free; 1199 + BUG_ON(rgd->rd_reserved); 1234 1200 /* max out the rgrp allocation failure point */ 1235 1201 rgd->rd_extfail_pt = rgd->rd_free; 1236 1202 } ··· 1278 1244 if (rgd->rd_rgl->rl_unlinked == 0) 1279 1245 rgd->rd_flags &= ~GFS2_RDF_CHECK; 1280 1246 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free); 1247 + rgrp_set_bitmap_flags(rgd); 1281 1248 rgd->rd_free_clone = rgd->rd_free; 1249 + BUG_ON(rgd->rd_reserved); 1250 + /* max out the rgrp allocation failure point */ 1251 + rgd->rd_extfail_pt = rgd->rd_free; 1282 1252 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes); 1283 1253 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration); 1284 1254 return 0; ··· 1442 1404 1443 1405 while (1) { 1444 1406 1445 - ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); 1407 + ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1408 + LM_FLAG_NODE_SCOPE, &gh); 1446 1409 if (ret) 1447 1410 goto out; 1448 1411 ··· 1451 1412 /* Trim each bitmap in the rgrp */ 1452 1413 for (x = 0; x < rgd->rd_length; x++) { 1453 1414 struct gfs2_bitmap *bi = rgd->rd_bits + x; 1415 + rgrp_lock_local(rgd); 1454 1416 ret = gfs2_rgrp_send_discards(sdp, 1455 1417 rgd->rd_data0, NULL, bi, minlen, 1456 1418 &amt); 1419 + rgrp_unlock_local(rgd); 1457 1420 if (ret) { 1458 1421 gfs2_glock_dq_uninit(&gh); 1459 1422 goto out; ··· 1467 1426 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); 1468 1427 if (ret == 0) { 1469 1428 bh = rgd->rd_bits[0].bi_bh; 1429 + rgrp_lock_local(rgd); 1470 1430 rgd->rd_flags |= GFS2_RGF_TRIMMED; 1471 1431 gfs2_trans_add_meta(rgd->rd_gl, bh); 1472 1432 gfs2_rgrp_out(rgd, bh->b_data); 1433 + rgrp_unlock_local(rgd); 1473 1434 gfs2_trans_end(sdp); 1474 1435 } 1475 1436 } ··· 1501 1458 struct rb_node **newn, *parent = NULL; 1502 1459 int rc; 1503 1460 struct gfs2_blkreserv *rs = &ip->i_res; 1504 - struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd; 1505 - u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm); 1461 + struct gfs2_rgrpd *rgd = rs->rs_rgd; 1506 1462 1507 1463 BUG_ON(gfs2_rs_active(rs)); 1508 1464 ··· 1512 1470 rb_entry(*newn, struct gfs2_blkreserv, rs_node); 1513 1471 1514 1472 parent = *newn; 1515 - rc = rs_cmp(fsblock, rs->rs_free, cur); 1473 + rc = rs_cmp(rs->rs_start, rs->rs_requested, cur); 1516 1474 if (rc > 0) 1517 1475 newn = &((*newn)->rb_right); 1518 1476 else if (rc < 0) ··· 1528 1486 rb_insert_color(&rs->rs_node, &rgd->rd_rstree); 1529 1487 1530 1488 /* Do our rgrp accounting for the reservation */ 1531 - rgd->rd_reserved += rs->rs_free; /* blocks reserved */ 1489 + rgd->rd_requested += rs->rs_requested; /* blocks requested */ 1532 1490 spin_unlock(&rgd->rd_rsspin); 1533 1491 trace_gfs2_rs(rs, TRACE_RS_INSERT); 1534 1492 } ··· 1549 1507 { 1550 1508 u32 tot_reserved, tot_free; 1551 1509 1552 - if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free)) 1510 + if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested)) 1553 1511 return 0; 1554 - tot_reserved = rgd->rd_reserved - rs->rs_free; 1512 + tot_reserved = rgd->rd_requested - rs->rs_requested; 1555 1513 1556 1514 if (rgd->rd_free_clone < tot_reserved) 1557 1515 tot_reserved = 0; ··· 1576 1534 u64 goal; 1577 1535 struct gfs2_blkreserv *rs = &ip->i_res; 1578 1536 u32 extlen; 1579 - u32 free_blocks = rgd_free(rgd, rs); 1537 + u32 free_blocks, blocks_available; 1580 1538 int ret; 1581 1539 struct inode *inode = &ip->i_inode; 1540 + 1541 + spin_lock(&rgd->rd_rsspin); 1542 + free_blocks = rgd_free(rgd, rs); 1543 + if (rgd->rd_free_clone < rgd->rd_requested) 1544 + free_blocks = 0; 1545 + blocks_available = rgd->rd_free_clone - rgd->rd_reserved; 1546 + if (rgd == rs->rs_rgd) 1547 + blocks_available += rs->rs_reserved; 1548 + spin_unlock(&rgd->rd_rsspin); 1582 1549 1583 1550 if (S_ISDIR(inode->i_mode)) 1584 1551 extlen = 1; ··· 1595 1544 extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target); 1596 1545 extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks); 1597 1546 } 1598 - if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) 1547 + if (free_blocks < extlen || blocks_available < extlen) 1599 1548 return; 1600 1549 1601 1550 /* Find bitmap block that contains bits for goal block */ ··· 1607 1556 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) 1608 1557 return; 1609 1558 1610 - ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true); 1559 + ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true); 1611 1560 if (ret == 0) { 1612 - rs->rs_rbm = rbm; 1613 - rs->rs_free = extlen; 1561 + rs->rs_start = gfs2_rbm_to_block(&rbm); 1562 + rs->rs_requested = extlen; 1614 1563 rs_insert(ip); 1615 1564 } else { 1616 1565 if (goal == rgd->rd_last_alloc + rgd->rd_data0) ··· 1623 1572 * @rgd: The resource group 1624 1573 * @block: The starting block 1625 1574 * @length: The required length 1626 - * @ip: Ignore any reservations for this inode 1575 + * @ignore_rs: Reservation to ignore 1627 1576 * 1628 1577 * If the block does not appear in any reservation, then return the 1629 1578 * block number unchanged. If it does appear in the reservation, then ··· 1633 1582 1634 1583 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, 1635 1584 u32 length, 1636 - const struct gfs2_inode *ip) 1585 + struct gfs2_blkreserv *ignore_rs) 1637 1586 { 1638 1587 struct gfs2_blkreserv *rs; 1639 1588 struct rb_node *n; ··· 1653 1602 } 1654 1603 1655 1604 if (n) { 1656 - while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) { 1657 - block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free; 1605 + while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) { 1606 + block = rs->rs_start + rs->rs_requested; 1658 1607 n = n->rb_right; 1659 1608 if (n == NULL) 1660 1609 break; ··· 1669 1618 /** 1670 1619 * gfs2_reservation_check_and_update - Check for reservations during block alloc 1671 1620 * @rbm: The current position in the resource group 1672 - * @ip: The inode for which we are searching for blocks 1621 + * @rs: Our own reservation 1673 1622 * @minext: The minimum extent length 1674 1623 * @maxext: A pointer to the maximum extent structure 1675 1624 * ··· 1683 1632 */ 1684 1633 1685 1634 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, 1686 - const struct gfs2_inode *ip, 1635 + struct gfs2_blkreserv *rs, 1687 1636 u32 minext, 1688 1637 struct gfs2_extent *maxext) 1689 1638 { 1690 1639 u64 block = gfs2_rbm_to_block(rbm); 1691 1640 u32 extlen = 1; 1692 1641 u64 nblock; 1693 - int ret; 1694 1642 1695 1643 /* 1696 1644 * If we have a minimum extent length, then skip over any extent 1697 1645 * which is less than the min extent length in size. 1698 1646 */ 1699 - if (minext) { 1647 + if (minext > 1) { 1700 1648 extlen = gfs2_free_extlen(rbm, minext); 1701 1649 if (extlen <= maxext->len) 1702 1650 goto fail; ··· 1705 1655 * Check the extent which has been found against the reservations 1706 1656 * and skip if parts of it are already reserved 1707 1657 */ 1708 - nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip); 1658 + nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs); 1709 1659 if (nblock == block) { 1710 1660 if (!minext || extlen >= minext) 1711 1661 return 0; ··· 1714 1664 maxext->len = extlen; 1715 1665 maxext->rbm = *rbm; 1716 1666 } 1717 - fail: 1718 - nblock = block + extlen; 1667 + } else { 1668 + u64 len = nblock - block; 1669 + if (len >= (u64)1 << 32) 1670 + return -E2BIG; 1671 + extlen = len; 1719 1672 } 1720 - ret = gfs2_rbm_from_block(rbm, nblock); 1721 - if (ret < 0) 1722 - return ret; 1673 + fail: 1674 + if (gfs2_rbm_add(rbm, extlen)) 1675 + return -E2BIG; 1723 1676 return 1; 1724 1677 } 1725 1678 ··· 1730 1677 * gfs2_rbm_find - Look for blocks of a particular state 1731 1678 * @rbm: Value/result starting position and final position 1732 1679 * @state: The state which we want to find 1733 - * @minext: Pointer to the requested extent length (NULL for a single block) 1680 + * @minext: Pointer to the requested extent length 1734 1681 * This is updated to be the actual reservation size. 1735 - * @ip: If set, check for reservations 1682 + * @rs: Our own reservation (NULL to skip checking for reservations) 1736 1683 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping 1737 1684 * around until we've reached the starting point. 1738 1685 * ··· 1746 1693 */ 1747 1694 1748 1695 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, 1749 - const struct gfs2_inode *ip, bool nowrap) 1696 + struct gfs2_blkreserv *rs, bool nowrap) 1750 1697 { 1751 1698 bool scan_from_start = rbm->bii == 0 && rbm->offset == 0; 1752 1699 struct buffer_head *bh; ··· 1767 1714 1768 1715 while(1) { 1769 1716 bi = rbm_bi(rbm); 1770 - if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) && 1771 - test_bit(GBF_FULL, &bi->bi_flags) && 1717 + if (test_bit(GBF_FULL, &bi->bi_flags) && 1772 1718 (state == GFS2_BLKST_FREE)) 1773 1719 goto next_bitmap; 1774 1720 ··· 1783 1731 goto next_bitmap; 1784 1732 } 1785 1733 rbm->offset = offset; 1786 - if (ip == NULL) 1734 + if (!rs) 1787 1735 return 0; 1788 1736 1789 - ret = gfs2_reservation_check_and_update(rbm, ip, 1790 - minext ? *minext : 0, 1737 + ret = gfs2_reservation_check_and_update(rbm, rs, *minext, 1791 1738 &maxext); 1792 1739 if (ret == 0) 1793 1740 return 0; ··· 1818 1767 break; 1819 1768 } 1820 1769 1821 - if (minext == NULL || state != GFS2_BLKST_FREE) 1770 + if (state != GFS2_BLKST_FREE) 1822 1771 return -ENOSPC; 1823 1772 1824 1773 /* If the extent was too small, and it's smaller than the smallest ··· 1826 1775 useless to search this rgrp again for this amount or more. */ 1827 1776 if (wrapped && (scan_from_start || rbm->bii > last_bii) && 1828 1777 *minext < rbm->rgd->rd_extfail_pt) 1829 - rbm->rgd->rd_extfail_pt = *minext; 1778 + rbm->rgd->rd_extfail_pt = *minext - 1; 1830 1779 1831 1780 /* If the maximum extent we found is big enough to fulfill the 1832 1781 minimum requirements, use it anyway. */ ··· 1989 1938 u64 tdiff; 1990 1939 1991 1940 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(), 1992 - rs->rs_rbm.rgd->rd_gl->gl_dstamp)); 1941 + rs->rs_rgd->rd_gl->gl_dstamp)); 1993 1942 1994 1943 return tdiff > (msecs * 1000 * 1000); 1995 1944 } ··· 2044 1993 * We try our best to find an rgrp that has at least ap->target blocks 2045 1994 * available. After a couple of passes (loops == 2), the prospects of finding 2046 1995 * such an rgrp diminish. At this stage, we return the first rgrp that has 2047 - * at least ap->min_target blocks available. Either way, we set ap->allowed to 2048 - * the number of blocks available in the chosen rgrp. 1996 + * at least ap->min_target blocks available. 2049 1997 * 2050 1998 * Returns: 0 on success, 2051 1999 * -ENOMEM if a suitable rgrp can't be found ··· 2056 2006 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2057 2007 struct gfs2_rgrpd *begin = NULL; 2058 2008 struct gfs2_blkreserv *rs = &ip->i_res; 2059 - int error = 0, rg_locked, flags = 0; 2009 + int error = 0, flags = LM_FLAG_NODE_SCOPE; 2010 + bool rg_locked; 2060 2011 u64 last_unlinked = NO_BLOCK; 2012 + u32 target = ap->target; 2061 2013 int loops = 0; 2062 - u32 free_blocks, skip = 0; 2014 + u32 free_blocks, blocks_available, skip = 0; 2015 + 2016 + BUG_ON(rs->rs_reserved); 2063 2017 2064 2018 if (sdp->sd_args.ar_rgrplvb) 2065 2019 flags |= GL_SKIP; 2066 - if (gfs2_assert_warn(sdp, ap->target)) 2020 + if (gfs2_assert_warn(sdp, target)) 2067 2021 return -EINVAL; 2068 2022 if (gfs2_rs_active(rs)) { 2069 - begin = rs->rs_rbm.rgd; 2070 - } else if (rs->rs_rbm.rgd && 2071 - rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) { 2072 - begin = rs->rs_rbm.rgd; 2023 + begin = rs->rs_rgd; 2024 + } else if (rs->rs_rgd && 2025 + rgrp_contains_block(rs->rs_rgd, ip->i_goal)) { 2026 + begin = rs->rs_rgd; 2073 2027 } else { 2074 2028 check_and_update_goal(ip); 2075 - rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); 2029 + rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); 2076 2030 } 2077 2031 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV)) 2078 2032 skip = gfs2_orlov_skip(ip); 2079 - if (rs->rs_rbm.rgd == NULL) 2033 + if (rs->rs_rgd == NULL) 2080 2034 return -EBADSLT; 2081 2035 2082 2036 while (loops < 3) { 2083 - rg_locked = 1; 2037 + struct gfs2_rgrpd *rgd; 2084 2038 2085 - if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) { 2086 - rg_locked = 0; 2039 + rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl); 2040 + if (rg_locked) { 2041 + rgrp_lock_local(rs->rs_rgd); 2042 + } else { 2087 2043 if (skip && skip--) 2088 2044 goto next_rgrp; 2089 2045 if (!gfs2_rs_active(rs)) { 2090 2046 if (loops == 0 && 2091 - !fast_to_acquire(rs->rs_rbm.rgd)) 2047 + !fast_to_acquire(rs->rs_rgd)) 2092 2048 goto next_rgrp; 2093 2049 if ((loops < 2) && 2094 2050 gfs2_rgrp_used_recently(rs, 1000) && 2095 - gfs2_rgrp_congested(rs->rs_rbm.rgd, loops)) 2051 + gfs2_rgrp_congested(rs->rs_rgd, loops)) 2096 2052 goto next_rgrp; 2097 2053 } 2098 - error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl, 2054 + error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl, 2099 2055 LM_ST_EXCLUSIVE, flags, 2100 2056 &ip->i_rgd_gh); 2101 2057 if (unlikely(error)) 2102 2058 return error; 2059 + rgrp_lock_local(rs->rs_rgd); 2103 2060 if (!gfs2_rs_active(rs) && (loops < 2) && 2104 - gfs2_rgrp_congested(rs->rs_rbm.rgd, loops)) 2061 + gfs2_rgrp_congested(rs->rs_rgd, loops)) 2105 2062 goto skip_rgrp; 2106 2063 if (sdp->sd_args.ar_rgrplvb) { 2107 - error = update_rgrp_lvb(rs->rs_rbm.rgd); 2064 + error = update_rgrp_lvb(rs->rs_rgd); 2108 2065 if (unlikely(error)) { 2066 + rgrp_unlock_local(rs->rs_rgd); 2109 2067 gfs2_glock_dq_uninit(&ip->i_rgd_gh); 2110 2068 return error; 2111 2069 } ··· 2121 2063 } 2122 2064 2123 2065 /* Skip unusable resource groups */ 2124 - if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | 2066 + if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC | 2125 2067 GFS2_RDF_ERROR)) || 2126 - (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt)) 2068 + (loops == 0 && target > rs->rs_rgd->rd_extfail_pt)) 2127 2069 goto skip_rgrp; 2128 2070 2129 2071 if (sdp->sd_args.ar_rgrplvb) 2130 - gfs2_rgrp_bh_get(rs->rs_rbm.rgd); 2072 + gfs2_rgrp_bh_get(rs->rs_rgd); 2131 2073 2132 2074 /* Get a reservation if we don't already have one */ 2133 2075 if (!gfs2_rs_active(rs)) 2134 - rg_mblk_search(rs->rs_rbm.rgd, ip, ap); 2076 + rg_mblk_search(rs->rs_rgd, ip, ap); 2135 2077 2136 2078 /* Skip rgrps when we can't get a reservation on first pass */ 2137 2079 if (!gfs2_rs_active(rs) && (loops < 1)) 2138 2080 goto check_rgrp; 2139 2081 2140 2082 /* If rgrp has enough free space, use it */ 2141 - free_blocks = rgd_free(rs->rs_rbm.rgd, rs); 2142 - if (free_blocks >= ap->target || 2143 - (loops == 2 && ap->min_target && 2144 - free_blocks >= ap->min_target)) { 2145 - ap->allowed = free_blocks; 2146 - return 0; 2083 + rgd = rs->rs_rgd; 2084 + spin_lock(&rgd->rd_rsspin); 2085 + free_blocks = rgd_free(rgd, rs); 2086 + blocks_available = rgd->rd_free_clone - rgd->rd_reserved; 2087 + if (free_blocks < target || blocks_available < target) { 2088 + spin_unlock(&rgd->rd_rsspin); 2089 + goto check_rgrp; 2147 2090 } 2091 + rs->rs_reserved = ap->target; 2092 + if (rs->rs_reserved > blocks_available) 2093 + rs->rs_reserved = blocks_available; 2094 + rgd->rd_reserved += rs->rs_reserved; 2095 + spin_unlock(&rgd->rd_rsspin); 2096 + rgrp_unlock_local(rs->rs_rgd); 2097 + return 0; 2148 2098 check_rgrp: 2149 2099 /* Check for unlinked inodes which can be reclaimed */ 2150 - if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) 2151 - try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked, 2100 + if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK) 2101 + try_rgrp_unlink(rs->rs_rgd, &last_unlinked, 2152 2102 ip->i_no_addr); 2153 2103 skip_rgrp: 2104 + rgrp_unlock_local(rs->rs_rgd); 2105 + 2154 2106 /* Drop reservation, if we couldn't use reserved rgrp */ 2155 2107 if (gfs2_rs_active(rs)) 2156 2108 gfs2_rs_deltree(rs); ··· 2170 2102 gfs2_glock_dq_uninit(&ip->i_rgd_gh); 2171 2103 next_rgrp: 2172 2104 /* Find the next rgrp, and continue looking */ 2173 - if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin)) 2105 + if (gfs2_select_rgrp(&rs->rs_rgd, begin)) 2174 2106 continue; 2175 2107 if (skip) 2176 2108 continue; ··· 2187 2119 return error; 2188 2120 } 2189 2121 /* Flushing the log may release space */ 2190 - if (loops == 2) 2122 + if (loops == 2) { 2123 + if (ap->min_target) 2124 + target = ap->min_target; 2191 2125 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 2192 2126 GFS2_LFC_INPLACE_RESERVE); 2127 + } 2193 2128 } 2194 2129 2195 2130 return -ENOSPC; ··· 2207 2136 2208 2137 void gfs2_inplace_release(struct gfs2_inode *ip) 2209 2138 { 2139 + struct gfs2_blkreserv *rs = &ip->i_res; 2140 + 2141 + if (rs->rs_reserved) { 2142 + struct gfs2_rgrpd *rgd = rs->rs_rgd; 2143 + 2144 + spin_lock(&rgd->rd_rsspin); 2145 + BUG_ON(rgd->rd_reserved < rs->rs_reserved); 2146 + rgd->rd_reserved -= rs->rs_reserved; 2147 + spin_unlock(&rgd->rd_rsspin); 2148 + rs->rs_reserved = 0; 2149 + } 2210 2150 if (gfs2_holder_initialized(&ip->i_rgd_gh)) 2211 2151 gfs2_glock_dq_uninit(&ip->i_rgd_gh); 2212 2152 } ··· 2287 2205 bi_prev = bi; 2288 2206 } 2289 2207 gfs2_setbit(&rbm, false, new_state); 2290 - gfs2_rbm_incr(&rbm); 2208 + gfs2_rbm_add(&rbm, 1); 2291 2209 } 2292 2210 } 2293 2211 ··· 2305 2223 struct gfs2_blkreserv *trs; 2306 2224 const struct rb_node *n; 2307 2225 2308 - gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n", 2226 + spin_lock(&rgd->rd_rsspin); 2227 + gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u q:%u r:%u e:%u\n", 2309 2228 fs_id_buf, 2310 2229 (unsigned long long)rgd->rd_addr, rgd->rd_flags, 2311 2230 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes, 2312 - rgd->rd_reserved, rgd->rd_extfail_pt); 2231 + rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt); 2313 2232 if (rgd->rd_sbd->sd_args.ar_rgrplvb) { 2314 2233 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; 2315 2234 ··· 2319 2236 be32_to_cpu(rgl->rl_free), 2320 2237 be32_to_cpu(rgl->rl_dinodes)); 2321 2238 } 2322 - spin_lock(&rgd->rd_rsspin); 2323 2239 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { 2324 2240 trs = rb_entry(n, struct gfs2_blkreserv, rs_node); 2325 2241 dump_rs(seq, trs, fs_id_buf); ··· 2355 2273 { 2356 2274 struct gfs2_blkreserv *rs = &ip->i_res; 2357 2275 struct gfs2_rgrpd *rgd = rbm->rgd; 2358 - unsigned rlen; 2359 - u64 block; 2360 - int ret; 2361 2276 2362 - spin_lock(&rgd->rd_rsspin); 2277 + BUG_ON(rs->rs_reserved < len); 2278 + rs->rs_reserved -= len; 2363 2279 if (gfs2_rs_active(rs)) { 2364 - if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) { 2365 - block = gfs2_rbm_to_block(rbm); 2366 - ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len); 2367 - rlen = min(rs->rs_free, len); 2368 - rs->rs_free -= rlen; 2369 - rgd->rd_reserved -= rlen; 2280 + u64 start = gfs2_rbm_to_block(rbm); 2281 + 2282 + if (rs->rs_start == start) { 2283 + unsigned int rlen; 2284 + 2285 + rs->rs_start += len; 2286 + rlen = min(rs->rs_requested, len); 2287 + rs->rs_requested -= rlen; 2288 + rgd->rd_requested -= rlen; 2370 2289 trace_gfs2_rs(rs, TRACE_RS_CLAIM); 2371 - if (rs->rs_free && !ret) 2372 - goto out; 2290 + if (rs->rs_start < rgd->rd_data0 + rgd->rd_data && 2291 + rs->rs_requested) 2292 + return; 2373 2293 /* We used up our block reservation, so we should 2374 2294 reserve more blocks next time. */ 2375 2295 atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint); 2376 2296 } 2377 2297 __rs_deltree(rs); 2378 2298 } 2379 - out: 2380 - spin_unlock(&rgd->rd_rsspin); 2381 2299 } 2382 2300 2383 2301 /** ··· 2397 2315 u64 goal; 2398 2316 2399 2317 if (gfs2_rs_active(&ip->i_res)) { 2400 - *rbm = ip->i_res.rs_rbm; 2401 - return; 2318 + goal = ip->i_res.rs_start; 2319 + } else { 2320 + if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal)) 2321 + goal = ip->i_goal; 2322 + else 2323 + goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0; 2402 2324 } 2403 - 2404 - if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal)) 2405 - goal = ip->i_goal; 2406 - else 2407 - goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0; 2408 - 2409 2325 if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) { 2410 2326 rbm->bii = 0; 2411 2327 rbm->offset = 0; ··· 2426 2346 { 2427 2347 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2428 2348 struct buffer_head *dibh; 2429 - struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, }; 2430 - unsigned int ndata; 2349 + struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rgd, }; 2431 2350 u64 block; /* block, within the file system scope */ 2432 - int error; 2351 + u32 minext = 1; 2352 + int error = -ENOSPC; 2433 2353 2434 - gfs2_set_alloc_start(&rbm, ip, dinode); 2435 - error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false); 2354 + BUG_ON(ip->i_res.rs_reserved < *nblocks); 2436 2355 2356 + rgrp_lock_local(rbm.rgd); 2357 + if (gfs2_rs_active(&ip->i_res)) { 2358 + gfs2_set_alloc_start(&rbm, ip, dinode); 2359 + error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, &ip->i_res, false); 2360 + } 2437 2361 if (error == -ENOSPC) { 2438 2362 gfs2_set_alloc_start(&rbm, ip, dinode); 2439 - error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false); 2363 + error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, NULL, false); 2440 2364 } 2441 2365 2442 2366 /* Since all blocks are reserved in advance, this shouldn't happen */ ··· 2455 2371 gfs2_alloc_extent(&rbm, dinode, nblocks); 2456 2372 block = gfs2_rbm_to_block(&rbm); 2457 2373 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0; 2458 - if (gfs2_rs_active(&ip->i_res)) 2459 - gfs2_adjust_reservation(ip, &rbm, *nblocks); 2460 - ndata = *nblocks; 2461 - if (dinode) 2462 - ndata--; 2463 - 2464 2374 if (!dinode) { 2465 - ip->i_goal = block + ndata - 1; 2375 + ip->i_goal = block + *nblocks - 1; 2466 2376 error = gfs2_meta_inode_buffer(ip, &dibh); 2467 2377 if (error == 0) { 2468 2378 struct gfs2_dinode *di = ··· 2467 2389 brelse(dibh); 2468 2390 } 2469 2391 } 2470 - if (rbm.rgd->rd_free < *nblocks) { 2392 + spin_lock(&rbm.rgd->rd_rsspin); 2393 + gfs2_adjust_reservation(ip, &rbm, *nblocks); 2394 + if (rbm.rgd->rd_free < *nblocks || rbm.rgd->rd_reserved < *nblocks) { 2471 2395 fs_warn(sdp, "nblocks=%u\n", *nblocks); 2396 + spin_unlock(&rbm.rgd->rd_rsspin); 2472 2397 goto rgrp_error; 2473 2398 } 2474 - 2399 + BUG_ON(rbm.rgd->rd_reserved < *nblocks); 2400 + BUG_ON(rbm.rgd->rd_free_clone < *nblocks); 2401 + BUG_ON(rbm.rgd->rd_free < *nblocks); 2402 + rbm.rgd->rd_reserved -= *nblocks; 2403 + rbm.rgd->rd_free_clone -= *nblocks; 2475 2404 rbm.rgd->rd_free -= *nblocks; 2405 + spin_unlock(&rbm.rgd->rd_rsspin); 2476 2406 if (dinode) { 2477 2407 rbm.rgd->rd_dinodes++; 2478 2408 *generation = rbm.rgd->rd_igeneration++; ··· 2490 2404 2491 2405 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh); 2492 2406 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); 2407 + rgrp_unlock_local(rbm.rgd); 2493 2408 2494 2409 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); 2495 2410 if (dinode) ··· 2498 2411 2499 2412 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid); 2500 2413 2501 - rbm.rgd->rd_free_clone -= *nblocks; 2502 2414 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, 2503 2415 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 2504 2416 *bn = block; 2505 2417 return 0; 2506 2418 2507 2419 rgrp_error: 2420 + rgrp_unlock_local(rbm.rgd); 2508 2421 gfs2_rgrp_error(rbm.rgd); 2509 2422 return -EIO; 2510 2423 } ··· 2524 2437 { 2525 2438 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2526 2439 2440 + rgrp_lock_local(rgd); 2527 2441 rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE); 2528 2442 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); 2529 2443 rgd->rd_free += blen; 2530 2444 rgd->rd_flags &= ~GFS2_RGF_TRIMMED; 2531 2445 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); 2532 2446 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2447 + rgrp_unlock_local(rgd); 2533 2448 2534 2449 /* Directories keep their data in the metadata address space */ 2535 2450 if (meta || ip->i_depth || gfs2_is_jdata(ip)) ··· 2567 2478 rgd = gfs2_blk2rgrpd(sdp, blkno, true); 2568 2479 if (!rgd) 2569 2480 return; 2481 + rgrp_lock_local(rgd); 2570 2482 rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED); 2571 2483 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); 2572 2484 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); 2573 2485 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2574 2486 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1); 2487 + rgrp_unlock_local(rgd); 2575 2488 } 2576 2489 2577 2490 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 2578 2491 { 2579 2492 struct gfs2_sbd *sdp = rgd->rd_sbd; 2580 2493 2494 + rgrp_lock_local(rgd); 2581 2495 rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); 2582 2496 if (!rgd->rd_dinodes) 2583 2497 gfs2_consist_rgrpd(rgd); ··· 2589 2497 2590 2498 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); 2591 2499 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2500 + rgrp_unlock_local(rgd); 2592 2501 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1); 2593 2502 2594 2503 gfs2_statfs_change(sdp, 0, +1, -1); ··· 2603 2510 * @sdp: The superblock 2604 2511 * @no_addr: The block number to check 2605 2512 * @type: The block type we are looking for 2513 + * 2514 + * The inode glock of @no_addr must be held. The @type to check for is either 2515 + * GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED; checking for type GFS2_BLKST_FREE 2516 + * or GFS2_BLKST_USED would make no sense. 2606 2517 * 2607 2518 * Returns: 0 if the block type matches the expected type 2608 2519 * -ESTALE if it doesn't match ··· 2631 2534 rbm.rgd = rgd; 2632 2535 error = gfs2_rbm_from_block(&rbm, no_addr); 2633 2536 if (!WARN_ON_ONCE(error)) { 2537 + /* 2538 + * No need to take the local resource group lock here; the 2539 + * inode glock of @no_addr provides the necessary 2540 + * synchronization in case the block is an inode. (In case 2541 + * the block is not an inode, the block type will not match 2542 + * the @type we are looking for.) 2543 + */ 2634 2544 if (gfs2_testbit(&rbm, false) != type) 2635 2545 error = -ESTALE; 2636 2546 } ··· 2682 2578 return; 2683 2579 rgd = gfs2_blk2rgrpd(sdp, block, 1); 2684 2580 } else { 2685 - rgd = ip->i_res.rs_rbm.rgd; 2581 + rgd = ip->i_res.rs_rgd; 2686 2582 if (!rgd || !rgrp_contains_block(rgd, block)) 2687 2583 rgd = gfs2_blk2rgrpd(sdp, block, 1); 2688 2584 } ··· 2737 2633 sizeof(struct gfs2_holder), 2738 2634 GFP_NOFS | __GFP_NOFAIL); 2739 2635 for (x = 0; x < rlist->rl_rgrps; x++) 2740 - gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, 2741 - LM_ST_EXCLUSIVE, 0, 2742 - &rlist->rl_ghs[x]); 2636 + gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE, 2637 + LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]); 2743 2638 } 2744 2639 2745 2640 /** ··· 2761 2658 } 2762 2659 } 2763 2660 2661 + void rgrp_lock_local(struct gfs2_rgrpd *rgd) 2662 + { 2663 + BUG_ON(!gfs2_glock_is_held_excl(rgd->rd_gl) && 2664 + !test_bit(SDF_NORECOVERY, &rgd->rd_sbd->sd_flags)); 2665 + mutex_lock(&rgd->rd_mutex); 2666 + } 2667 + 2668 + void rgrp_unlock_local(struct gfs2_rgrpd *rgd) 2669 + { 2670 + mutex_unlock(&rgd->rd_mutex); 2671 + }
+5 -1
fs/gfs2/rgrp.h
··· 77 77 /* This is how to tell if a reservation is in the rgrp tree: */ 78 78 static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs) 79 79 { 80 - return rs && !RB_EMPTY_NODE(&rs->rs_node); 80 + return !RB_EMPTY_NODE(&rs->rs_node); 81 81 } 82 82 83 83 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) ··· 88 88 } 89 89 90 90 extern void check_and_update_goal(struct gfs2_inode *ip); 91 + 92 + extern void rgrp_lock_local(struct gfs2_rgrpd *rgd); 93 + extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd); 94 + 91 95 #endif /* __RGRP_DOT_H__ */
+15 -58
fs/gfs2/super.c
··· 81 81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) 82 82 { 83 83 struct gfs2_jdesc *jd; 84 - int found = 0; 85 84 86 85 list_for_each_entry(jd, head, jd_list) { 87 - if (jd->jd_jid == jid) { 88 - found = 1; 89 - break; 90 - } 86 + if (jd->jd_jid == jid) 87 + return jd; 91 88 } 92 - 93 - if (!found) 94 - jd = NULL; 95 - 96 - return jd; 89 + return NULL; 97 90 } 98 91 99 92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) ··· 158 165 { 159 166 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 160 167 struct gfs2_glock *j_gl = ip->i_gl; 161 - struct gfs2_holder freeze_gh; 162 168 struct gfs2_log_header_host head; 163 169 int error; 164 170 165 171 error = init_threads(sdp); 166 172 if (error) 167 173 return error; 168 - 169 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 170 - LM_FLAG_NOEXP | GL_EXACT, 171 - &freeze_gh); 172 - if (error) 173 - goto fail_threads; 174 174 175 175 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 176 176 if (gfs2_withdrawn(sdp)) { ··· 191 205 192 206 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 193 207 194 - gfs2_glock_dq_uninit(&freeze_gh); 195 - 196 208 return 0; 197 209 198 210 fail: 199 - gfs2_glock_dq_uninit(&freeze_gh); 200 - fail_threads: 201 211 if (sdp->sd_quotad_process) 202 212 kthread_stop(sdp->sd_quotad_process); 203 213 sdp->sd_quotad_process = NULL; ··· 434 452 } 435 453 436 454 if (error) 437 - gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); 455 + gfs2_freeze_unlock(&sdp->sd_freeze_gh); 438 456 439 457 out: 440 458 while (!list_empty(&list)) { ··· 589 607 590 608 int gfs2_make_fs_ro(struct gfs2_sbd *sdp) 591 609 { 592 - struct gfs2_holder freeze_gh; 593 610 int error = 0; 594 611 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 595 - 596 - gfs2_holder_mark_uninitialized(&freeze_gh); 597 - if (sdp->sd_freeze_gl && 598 - !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { 599 - if (!log_write_allowed) { 600 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 601 - LM_ST_SHARED, LM_FLAG_TRY | 602 - LM_FLAG_NOEXP | GL_EXACT, 603 - &freeze_gh); 604 - if (error == GLR_TRYFAILED) 605 - error = 0; 606 - } else { 607 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 608 - LM_ST_SHARED, 609 - LM_FLAG_NOEXP | GL_EXACT, 610 - &freeze_gh); 611 - if (error && !gfs2_withdrawn(sdp)) 612 - return error; 613 - } 614 - } 615 612 616 613 gfs2_flush_delete_work(sdp); 617 614 if (!log_write_allowed && current == sdp->sd_quotad_process) ··· 611 650 612 651 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 613 652 GFS2_LFC_MAKE_FS_RO); 614 - wait_event(sdp->sd_reserving_log_wait, 615 - atomic_read(&sdp->sd_reserving_log) == 0); 616 - gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == 617 - sdp->sd_jdesc->jd_blocks); 653 + wait_event_timeout(sdp->sd_log_waitq, 654 + gfs2_log_is_empty(sdp), 655 + HZ * 5); 656 + gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp)); 618 657 } else { 619 - wait_event_timeout(sdp->sd_reserving_log_wait, 620 - atomic_read(&sdp->sd_reserving_log) == 0, 658 + wait_event_timeout(sdp->sd_log_waitq, 659 + gfs2_log_is_empty(sdp), 621 660 HZ * 5); 622 661 } 623 - if (gfs2_holder_initialized(&freeze_gh)) 624 - gfs2_glock_dq_uninit(&freeze_gh); 625 - 626 662 gfs2_quota_cleanup(sdp); 627 663 628 664 if (!log_write_allowed) ··· 728 770 struct super_block *sb = sdp->sd_vfs; 729 771 730 772 atomic_inc(&sb->s_active); 731 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 732 - LM_FLAG_NOEXP | GL_EXACT, &freeze_gh); 773 + error = gfs2_freeze_lock(sdp, &freeze_gh, 0); 733 774 if (error) { 734 - fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error); 735 775 gfs2_assert_withdraw(sdp, 0); 736 776 } else { 737 777 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); ··· 739 783 error); 740 784 gfs2_assert_withdraw(sdp, 0); 741 785 } 742 - gfs2_glock_dq_uninit(&freeze_gh); 786 + gfs2_freeze_unlock(&freeze_gh); 743 787 } 744 788 deactivate_super(sb); 745 789 clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags); ··· 807 851 return 0; 808 852 } 809 853 810 - gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); 854 + gfs2_freeze_unlock(&sdp->sd_freeze_gh); 811 855 mutex_unlock(&sdp->sd_freeze_mutex); 812 856 return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE); 813 857 } ··· 1183 1227 goto out_qs; 1184 1228 } 1185 1229 1186 - error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); 1230 + error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1231 + LM_FLAG_NODE_SCOPE, &gh); 1187 1232 if (error) 1188 1233 goto out_qs; 1189 1234
+7 -1
fs/gfs2/super.h
··· 11 11 #include <linux/dcache.h> 12 12 #include "incore.h" 13 13 14 + /* Supported fs format version range */ 15 + #define GFS2_FS_FORMAT_MIN (1801) 16 + #define GFS2_FS_FORMAT_MAX (1802) 17 + 14 18 extern void gfs2_lm_unmount(struct gfs2_sbd *sdp); 15 19 16 20 static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) ··· 58 54 extern const struct export_operations gfs2_export_ops; 59 55 extern const struct super_operations gfs2_super_ops; 60 56 extern const struct dentry_operations gfs2_dops; 61 - extern const struct xattr_handler *gfs2_xattr_handlers[]; 57 + 58 + extern const struct xattr_handler *gfs2_xattr_handlers_max[]; 59 + extern const struct xattr_handler **gfs2_xattr_handlers_min; 62 60 63 61 #endif /* __SUPER_DOT_H__ */ 64 62
+24 -13
fs/gfs2/trace_gfs2.h
··· 560 560 __field( u8, block_state ) 561 561 __field( u64, rd_addr ) 562 562 __field( u32, rd_free_clone ) 563 + __field( u32, rd_requested ) 563 564 __field( u32, rd_reserved ) 564 565 ), 565 566 ··· 572 571 __entry->block_state = block_state; 573 572 __entry->rd_addr = rgd->rd_addr; 574 573 __entry->rd_free_clone = rgd->rd_free_clone; 574 + __entry->rd_requested = rgd->rd_requested; 575 575 __entry->rd_reserved = rgd->rd_reserved; 576 576 ), 577 577 578 - TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu", 578 + TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rq:%u rr:%u", 579 579 MAJOR(__entry->dev), MINOR(__entry->dev), 580 580 (unsigned long long)__entry->inum, 581 581 (unsigned long long)__entry->start, 582 582 (unsigned long)__entry->len, 583 583 block_state_name(__entry->block_state), 584 584 (unsigned long long)__entry->rd_addr, 585 - __entry->rd_free_clone, (unsigned long)__entry->rd_reserved) 585 + __entry->rd_free_clone, 586 + __entry->rd_requested, 587 + __entry->rd_reserved) 586 588 ); 587 589 588 590 /* Keep track of multi-block reservations as they are allocated/freed */ ··· 599 595 __field( dev_t, dev ) 600 596 __field( u64, rd_addr ) 601 597 __field( u32, rd_free_clone ) 598 + __field( u32, rd_requested ) 602 599 __field( u32, rd_reserved ) 603 600 __field( u64, inum ) 604 601 __field( u64, start ) 605 - __field( u32, free ) 602 + __field( u32, requested ) 603 + __field( u32, reserved ) 606 604 __field( u8, func ) 607 605 ), 608 606 609 607 TP_fast_assign( 610 - __entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev; 611 - __entry->rd_addr = rs->rs_rbm.rgd->rd_addr; 612 - __entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone; 613 - __entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved; 608 + __entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev; 609 + __entry->rd_addr = rs->rs_rgd->rd_addr; 610 + __entry->rd_free_clone = rs->rs_rgd->rd_free_clone; 611 + __entry->rd_requested = rs->rs_rgd->rd_requested; 612 + __entry->rd_reserved = rs->rs_rgd->rd_reserved; 614 613 __entry->inum = container_of(rs, struct gfs2_inode, 615 614 i_res)->i_no_addr; 616 - __entry->start = gfs2_rbm_to_block(&rs->rs_rbm); 617 - __entry->free = rs->rs_free; 615 + __entry->start = rs->rs_start; 616 + __entry->requested = rs->rs_requested; 617 + __entry->reserved = rs->rs_reserved; 618 618 __entry->func = func; 619 619 ), 620 620 621 - TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu", 621 + TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%u rq:%u rr:%u %s q:%u r:%u", 622 622 MAJOR(__entry->dev), MINOR(__entry->dev), 623 623 (unsigned long long)__entry->inum, 624 624 (unsigned long long)__entry->start, 625 625 (unsigned long long)__entry->rd_addr, 626 - (unsigned long)__entry->rd_free_clone, 627 - (unsigned long)__entry->rd_reserved, 628 - rs_func_name(__entry->func), (unsigned long)__entry->free) 626 + __entry->rd_free_clone, 627 + __entry->rd_requested, 628 + __entry->rd_reserved, 629 + rs_func_name(__entry->func), 630 + __entry->requested, 631 + __entry->reserved) 629 632 ); 630 633 631 634 #endif /* _TRACE_GFS2_H */
+68 -34
fs/gfs2/trans.c
··· 31 31 fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n", 32 32 tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, 33 33 test_bit(TR_TOUCHED, &tr->tr_flags)); 34 - fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n", 34 + fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n", 35 35 tr->tr_num_buf_new, tr->tr_num_buf_rm, 36 36 tr->tr_num_databuf_new, tr->tr_num_databuf_rm, 37 - tr->tr_num_revoke, tr->tr_num_revoke_rm); 37 + tr->tr_num_revoke); 38 38 } 39 39 40 - int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, 41 - unsigned int revokes) 40 + int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp, 41 + unsigned int blocks, unsigned int revokes, 42 + unsigned long ip) 42 43 { 43 - struct gfs2_trans *tr; 44 - int error; 44 + unsigned int extra_revokes; 45 45 46 46 if (current->journal_info) { 47 47 gfs2_print_trans(sdp, current->journal_info); ··· 52 52 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) 53 53 return -EROFS; 54 54 55 - tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS); 56 - if (!tr) 57 - return -ENOMEM; 58 - 59 - tr->tr_ip = _RET_IP_; 55 + tr->tr_ip = ip; 60 56 tr->tr_blocks = blocks; 61 57 tr->tr_revokes = revokes; 62 - tr->tr_reserved = 1; 63 - set_bit(TR_ALLOCED, &tr->tr_flags); 64 - if (blocks) 65 - tr->tr_reserved += 6 + blocks; 66 - if (revokes) 67 - tr->tr_reserved += gfs2_struct2blk(sdp, revokes); 58 + tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS; 59 + if (blocks) { 60 + /* 61 + * The reserved blocks are either used for data or metadata. 62 + * We can have mixed data and metadata, each with its own log 63 + * descriptor block; see calc_reserved(). 64 + */ 65 + tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp)); 66 + } 68 67 INIT_LIST_HEAD(&tr->tr_databuf); 69 68 INIT_LIST_HEAD(&tr->tr_buf); 70 69 INIT_LIST_HEAD(&tr->tr_list); 71 70 INIT_LIST_HEAD(&tr->tr_ail1_list); 72 71 INIT_LIST_HEAD(&tr->tr_ail2_list); 73 72 73 + if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks)) 74 + return -EINVAL; 75 + 74 76 sb_start_intwrite(sdp->sd_vfs); 75 77 76 - error = gfs2_log_reserve(sdp, tr->tr_reserved); 77 - if (error) 78 - goto fail; 78 + /* 79 + * Try the reservations under sd_log_flush_lock to prevent log flushes 80 + * from creating inconsistencies between the number of allocated and 81 + * reserved revokes. If that fails, do a full-block allocation outside 82 + * of the lock to avoid stalling log flushes. Then, allot the 83 + * appropriate number of blocks to revokes, use as many revokes locally 84 + * as needed, and "release" the surplus into the revokes pool. 85 + */ 86 + 87 + down_read(&sdp->sd_log_flush_lock); 88 + if (gfs2_log_try_reserve(sdp, tr, &extra_revokes)) 89 + goto reserved; 90 + up_read(&sdp->sd_log_flush_lock); 91 + gfs2_log_reserve(sdp, tr, &extra_revokes); 92 + down_read(&sdp->sd_log_flush_lock); 93 + 94 + reserved: 95 + gfs2_log_release_revokes(sdp, extra_revokes); 96 + if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { 97 + gfs2_log_release_revokes(sdp, tr->tr_revokes); 98 + up_read(&sdp->sd_log_flush_lock); 99 + gfs2_log_release(sdp, tr->tr_reserved); 100 + sb_end_intwrite(sdp->sd_vfs); 101 + return -EROFS; 102 + } 79 103 80 104 current->journal_info = tr; 81 105 82 106 return 0; 107 + } 83 108 84 - fail: 85 - sb_end_intwrite(sdp->sd_vfs); 86 - kmem_cache_free(gfs2_trans_cachep, tr); 109 + int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, 110 + unsigned int revokes) 111 + { 112 + struct gfs2_trans *tr; 113 + int error; 87 114 115 + tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS); 116 + if (!tr) 117 + return -ENOMEM; 118 + error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_); 119 + if (error) 120 + kmem_cache_free(gfs2_trans_cachep, tr); 88 121 return error; 89 122 } 90 123 ··· 125 92 { 126 93 struct gfs2_trans *tr = current->journal_info; 127 94 s64 nbuf; 128 - int alloced = test_bit(TR_ALLOCED, &tr->tr_flags); 129 95 130 96 current->journal_info = NULL; 131 97 132 98 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) { 99 + gfs2_log_release_revokes(sdp, tr->tr_revokes); 100 + up_read(&sdp->sd_log_flush_lock); 133 101 gfs2_log_release(sdp, tr->tr_reserved); 134 - if (alloced) { 102 + if (!test_bit(TR_ONSTACK, &tr->tr_flags)) 135 103 gfs2_trans_free(sdp, tr); 136 - sb_end_intwrite(sdp->sd_vfs); 137 - } 104 + sb_end_intwrite(sdp->sd_vfs); 138 105 return; 139 106 } 107 + 108 + gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke); 140 109 141 110 nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new; 142 111 nbuf -= tr->tr_num_buf_rm; 143 112 nbuf -= tr->tr_num_databuf_rm; 144 113 145 - if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) && 146 - (tr->tr_num_revoke <= tr->tr_revokes))) 114 + if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) || 115 + gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes)) 147 116 gfs2_print_trans(sdp, tr); 148 117 149 118 gfs2_log_commit(sdp, tr); 150 - if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags)) 119 + if (!test_bit(TR_ONSTACK, &tr->tr_flags) && 120 + !test_bit(TR_ATTACHED, &tr->tr_flags)) 151 121 gfs2_trans_free(sdp, tr); 152 122 up_read(&sdp->sd_log_flush_lock); 153 123 154 124 if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS) 155 125 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 156 126 GFS2_LFC_TRANS_END); 157 - if (alloced) 158 - sb_end_intwrite(sdp->sd_vfs); 127 + sb_end_intwrite(sdp->sd_vfs); 159 128 } 160 129 161 130 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, ··· 297 262 void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) 298 263 { 299 264 struct gfs2_bufdata *bd, *tmp; 300 - struct gfs2_trans *tr = current->journal_info; 301 265 unsigned int n = len; 302 266 303 267 gfs2_log_lock(sdp); ··· 308 274 if (bd->bd_gl) 309 275 gfs2_glock_remove_revoke(bd->bd_gl); 310 276 kmem_cache_free(gfs2_bufdata_cachep, bd); 311 - tr->tr_num_revoke_rm++; 277 + gfs2_log_release_revokes(sdp, 1); 312 278 if (--n == 0) 313 279 break; 314 280 }
+4 -1
fs/gfs2/trans.h
··· 27 27 * block, or all of the blocks in the rg, whichever is smaller */ 28 28 static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested) 29 29 { 30 - struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd; 30 + struct gfs2_rgrpd *rgd = ip->i_res.rs_rgd; 31 31 32 32 if (requested < rgd->rd_length) 33 33 return requested + 1; 34 34 return rgd->rd_length; 35 35 } 36 36 37 + extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp, 38 + unsigned int blocks, unsigned int revokes, 39 + unsigned long ip); 37 40 extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, 38 41 unsigned int revokes); 39 42
+50 -9
fs/gfs2/util.c
··· 91 91 return error; 92 92 } 93 93 94 + /** 95 + * gfs2_freeze_lock - hold the freeze glock 96 + * @sdp: the superblock 97 + * @freeze_gh: pointer to the requested holder 98 + * @caller_flags: any additional flags needed by the caller 99 + */ 100 + int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh, 101 + int caller_flags) 102 + { 103 + int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags; 104 + int error; 105 + 106 + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags, 107 + freeze_gh); 108 + if (error && error != GLR_TRYFAILED) 109 + fs_err(sdp, "can't lock the freeze lock: %d\n", error); 110 + return error; 111 + } 112 + 113 + void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh) 114 + { 115 + if (gfs2_holder_initialized(freeze_gh)) 116 + gfs2_glock_dq_uninit(freeze_gh); 117 + } 118 + 94 119 static void signal_our_withdraw(struct gfs2_sbd *sdp) 95 120 { 96 - struct gfs2_glock *gl = sdp->sd_live_gh.gh_gl; 121 + struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl; 97 122 struct inode *inode = sdp->sd_jdesc->jd_inode; 98 123 struct gfs2_inode *ip = GFS2_I(inode); 124 + struct gfs2_glock *i_gl = ip->i_gl; 99 125 u64 no_formal_ino = ip->i_no_formal_ino; 126 + int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 100 127 int ret = 0; 101 128 int tries; 102 129 ··· 144 117 * therefore we need to clear SDF_JOURNAL_LIVE manually. 145 118 */ 146 119 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 147 - if (!sb_rdonly(sdp->sd_vfs)) 148 - ret = gfs2_make_fs_ro(sdp); 120 + if (!sb_rdonly(sdp->sd_vfs)) { 121 + struct gfs2_holder freeze_gh; 122 + 123 + gfs2_holder_mark_uninitialized(&freeze_gh); 124 + if (sdp->sd_freeze_gl && 125 + !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { 126 + ret = gfs2_freeze_lock(sdp, &freeze_gh, 127 + log_write_allowed ? 0 : LM_FLAG_TRY); 128 + if (ret == GLR_TRYFAILED) 129 + ret = 0; 130 + } 131 + if (!ret) 132 + ret = gfs2_make_fs_ro(sdp); 133 + gfs2_freeze_unlock(&freeze_gh); 134 + } 149 135 150 136 if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */ 151 137 if (!ret) ··· 181 141 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); 182 142 thaw_super(sdp->sd_vfs); 183 143 } else { 184 - wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 144 + wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, 145 + TASK_UNINTERRUPTIBLE); 185 146 } 186 147 187 148 /* ··· 202 161 * on other nodes to be successful, otherwise we remain the owner of 203 162 * the glock as far as dlm is concerned. 204 163 */ 205 - if (gl->gl_ops->go_free) { 206 - set_bit(GLF_FREEING, &gl->gl_flags); 207 - wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); 164 + if (i_gl->gl_ops->go_free) { 165 + set_bit(GLF_FREEING, &i_gl->gl_flags); 166 + wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); 208 167 } 209 168 210 169 /* 211 170 * Dequeue the "live" glock, but keep a reference so it's never freed. 212 171 */ 213 - gfs2_glock_hold(gl); 172 + gfs2_glock_hold(live_gl); 214 173 gfs2_glock_dq_wait(&sdp->sd_live_gh); 215 174 /* 216 175 * We enqueue the "live" glock in EX so that all other nodes ··· 249 208 gfs2_glock_nq(&sdp->sd_live_gh); 250 209 } 251 210 252 - gfs2_glock_queue_put(gl); /* drop the extra reference we acquired */ 211 + gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */ 253 212 clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); 254 213 255 214 /*
+3
fs/gfs2/util.h
··· 149 149 150 150 extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 151 151 bool verbose); 152 + extern int gfs2_freeze_lock(struct gfs2_sbd *sdp, 153 + struct gfs2_holder *freeze_gh, int caller_flags); 154 + extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh); 152 155 153 156 #define gfs2_io_error(sdp) \ 154 157 gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
+48 -6
fs/gfs2/xattr.c
··· 70 70 return 0; 71 71 } 72 72 73 + static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type) 74 + { 75 + switch(sdp->sd_sb.sb_fs_format) { 76 + case GFS2_FS_FORMAT_MAX: 77 + return true; 78 + 79 + case GFS2_FS_FORMAT_MIN: 80 + return type <= GFS2_EATYPE_SECURITY; 81 + 82 + default: 83 + return false; 84 + } 85 + } 86 + 73 87 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh, 74 88 struct gfs2_ea_header *ea, 75 89 struct gfs2_ea_header *prev, void *private); ··· 91 77 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, 92 78 ea_call_t ea_call, void *data) 93 79 { 80 + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 94 81 struct gfs2_ea_header *ea, *prev = NULL; 95 82 int error = 0; 96 83 ··· 104 89 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= 105 90 bh->b_data + bh->b_size)) 106 91 goto fail; 107 - if (!GFS2_EATYPE_VALID(ea->ea_type)) 92 + if (!gfs2_eatype_valid(sdp, ea->ea_type)) 108 93 goto fail; 109 - 110 94 error = ea_call(ip, bh, ea, prev, data); 111 95 if (error) 112 96 return error; ··· 273 259 return -EIO; 274 260 } 275 261 276 - error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh); 262 + error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 263 + LM_FLAG_NODE_SCOPE, &rg_gh); 277 264 if (error) 278 265 return error; 279 266 ··· 359 344 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 360 345 void *private) 361 346 { 347 + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 362 348 struct ea_list *ei = private; 363 349 struct gfs2_ea_request *er = ei->ei_er; 364 350 unsigned int ea_size; ··· 369 353 if (ea->ea_type == GFS2_EATYPE_UNUSED) 370 354 return 0; 371 355 356 + BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY && 357 + sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN); 372 358 switch (ea->ea_type) { 373 359 case GFS2_EATYPE_USR: 374 360 prefix = "user."; ··· 384 366 prefix = "security."; 385 367 l = 9; 386 368 break; 369 + case GFS2_EATYPE_TRUSTED: 370 + prefix = "trusted."; 371 + l = 8; 372 + break; 387 373 default: 388 - BUG(); 374 + return 0; 389 375 } 390 376 391 377 ea_size = l + ea->ea_name_len + 1; ··· 1408 1386 return -EIO; 1409 1387 } 1410 1388 1411 - error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); 1389 + error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1390 + LM_FLAG_NODE_SCOPE, &gh); 1412 1391 if (error) 1413 1392 return error; 1414 1393 ··· 1487 1464 .set = gfs2_xattr_set, 1488 1465 }; 1489 1466 1490 - const struct xattr_handler *gfs2_xattr_handlers[] = { 1467 + static bool 1468 + gfs2_xattr_trusted_list(struct dentry *dentry) 1469 + { 1470 + return capable(CAP_SYS_ADMIN); 1471 + } 1472 + 1473 + static const struct xattr_handler gfs2_xattr_trusted_handler = { 1474 + .prefix = XATTR_TRUSTED_PREFIX, 1475 + .flags = GFS2_EATYPE_TRUSTED, 1476 + .list = gfs2_xattr_trusted_list, 1477 + .get = gfs2_xattr_get, 1478 + .set = gfs2_xattr_set, 1479 + }; 1480 + 1481 + const struct xattr_handler *gfs2_xattr_handlers_max[] = { 1482 + /* GFS2_FS_FORMAT_MAX */ 1483 + &gfs2_xattr_trusted_handler, 1484 + 1485 + /* GFS2_FS_FORMAT_MIN */ 1491 1486 &gfs2_xattr_user_handler, 1492 1487 &gfs2_xattr_security_handler, 1493 1488 &posix_acl_access_xattr_handler, ··· 1513 1472 NULL, 1514 1473 }; 1515 1474 1475 + const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
+3 -2
include/uapi/linux/gfs2_ondisk.h
··· 47 47 #define GFS2_FORMAT_DE 1200 48 48 #define GFS2_FORMAT_QU 1500 49 49 /* These are part of the superblock */ 50 - #define GFS2_FORMAT_FS 1801 50 + #define GFS2_FORMAT_FS 1802 51 51 #define GFS2_FORMAT_MULTI 1900 52 52 53 53 /* ··· 389 389 #define GFS2_EATYPE_USR 1 390 390 #define GFS2_EATYPE_SYS 2 391 391 #define GFS2_EATYPE_SECURITY 3 392 + #define GFS2_EATYPE_TRUSTED 4 392 393 393 - #define GFS2_EATYPE_LAST 3 394 + #define GFS2_EATYPE_LAST 4 394 395 #define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST) 395 396 396 397 #define GFS2_EAFLAG_LAST 0x01 /* last ea in block */