Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gfs2-v6.5-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

- Fix a glock state (non-)transition bug when a dlm request times out
and is canceled, and we have locking requests that can now be granted
immediately

- Various fixes and cleanups in how the logd and quotad daemons are
woken up and terminated

- Fix several bugs in the quota data reference counting and shrinking.
Free quota data objects synchronously in put_super() instead of
letting call_rcu() run wild

- Make sure not to deallocate quota data during a withdraw; rather,
defer quota data deallocation to put_super(). Withdraws can happen in
contexts in which callers on the stack are holding quota data
references

- Many minor quota fixes and cleanups by Bob

- Update the the mailing list address for gfs2 and dlm. (It's the same
list for both and we are moving it to gfs2@lists.linux.dev)

- Various other minor cleanups

* tag 'gfs2-v6.5-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (51 commits)
MAINTAINERS: Update dlm mailing list
MAINTAINERS: Update gfs2 mailing list
gfs2: change qd_slot_count to qd_slot_ref
gfs2: check for no eligible quota changes
gfs2: Remove useless assignment
gfs2: simplify slot_get
gfs2: Simplify qd2offset
gfs2: introduce qd_bh_get_or_undo
gfs2: Remove quota allocation info from quota file
gfs2: use constant for array size
gfs2: Set qd_sync_gen in do_sync
gfs2: Remove useless err set
gfs2: Small gfs2_quota_lock cleanup
gfs2: move qdsb_put and reduce redundancy
gfs2: improvements to sysfs status
gfs2: Don't try to sync non-changes
gfs2: Simplify function need_sync
gfs2: remove unneeded pg_oflow variable
gfs2: remove unneeded variable done
gfs2: pass sdp to gfs2_write_buf_to_page
...

+348 -331
+1 -2
Documentation/filesystems/gfs2-glocks.rst
··· 20 20 just the holders) associated with the glock. If there are any 21 21 held locks, then they will be contiguous entries at the head 22 22 of the list. Locks are granted in strictly the order that they 23 - are queued, except for those marked LM_FLAG_PRIORITY which are 24 - used only during recovery, and even then only for journal locks. 23 + are queued. 25 24 26 25 There are three lock states that users of the glock layer can request, 27 26 namely shared (SH), deferred (DF) and exclusive (EX). Those translate
+2 -2
MAINTAINERS
··· 6118 6118 DISTRIBUTED LOCK MANAGER (DLM) 6119 6119 M: Christine Caulfield <ccaulfie@redhat.com> 6120 6120 M: David Teigland <teigland@redhat.com> 6121 - L: cluster-devel@redhat.com 6121 + L: gfs2@lists.linux.dev 6122 6122 S: Supported 6123 6123 W: http://sources.redhat.com/cluster/ 6124 6124 T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git ··· 8774 8774 GFS2 FILE SYSTEM 8775 8775 M: Bob Peterson <rpeterso@redhat.com> 8776 8776 M: Andreas Gruenbacher <agruenba@redhat.com> 8777 - L: cluster-devel@redhat.com 8777 + L: gfs2@lists.linux.dev 8778 8778 S: Supported 8779 8779 B: https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2 8780 8780 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
+3 -4
fs/gfs2/aops.c
··· 183 183 int ret; 184 184 185 185 /* 186 - * Even if we didn't write any pages here, we might still be holding 186 + * Even if we didn't write enough pages here, we might still be holding 187 187 * dirty pages in the ail. We forcibly flush the ail because we don't 188 188 * want balance_dirty_pages() to loop indefinitely trying to write out 189 189 * pages held in the ail that it can't find. 190 190 */ 191 191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); 192 - if (ret == 0) 192 + if (ret == 0 && wbc->nr_to_write > 0) 193 193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 194 194 return ret; 195 195 } ··· 272 272 * not be suitable for data integrity 273 273 * writeout). 274 274 */ 275 - *done_index = folio->index + 276 - folio_nr_pages(folio); 275 + *done_index = folio_next_index(folio); 277 276 ret = 1; 278 277 break; 279 278 }
+1 -1
fs/gfs2/bmap.c
··· 161 161 int error; 162 162 163 163 down_write(&ip->i_rw_mutex); 164 - page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); 164 + page = grab_cache_page(inode->i_mapping, 0); 165 165 error = -ENOMEM; 166 166 if (!page) 167 167 goto out;
+19 -28
fs/gfs2/glock.c
··· 176 176 wake_up_glock(gl); 177 177 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 178 178 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 179 - wake_up(&sdp->sd_glock_wait); 179 + wake_up(&sdp->sd_kill_wait); 180 180 } 181 181 182 182 /** ··· 468 468 * do_promote - promote as many requests as possible on the current queue 469 469 * @gl: The glock 470 470 * 471 - * Returns: 1 if there is a blocked holder at the head of the list 471 + * Returns true on success (i.e., progress was made or there are no waiters). 472 472 */ 473 473 474 - static int do_promote(struct gfs2_glock *gl) 474 + static bool do_promote(struct gfs2_glock *gl) 475 475 { 476 476 struct gfs2_holder *gh, *current_gh; 477 477 ··· 484 484 * If we get here, it means we may not grant this 485 485 * holder for some reason. If this holder is at the 486 486 * head of the list, it means we have a blocked holder 487 - * at the head, so return 1. 487 + * at the head, so return false. 488 488 */ 489 489 if (list_is_first(&gh->gh_list, &gl->gl_holders)) 490 - return 1; 490 + return false; 491 491 do_error(gl, 0); 492 492 break; 493 493 } ··· 497 497 if (!current_gh) 498 498 current_gh = gh; 499 499 } 500 - return 0; 500 + return true; 501 501 } 502 502 503 503 /** ··· 591 591 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 592 592 /* move to back of queue and try next entry */ 593 593 if (ret & LM_OUT_CANCELED) { 594 - if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 595 - list_move_tail(&gh->gh_list, &gl->gl_holders); 594 + list_move_tail(&gh->gh_list, &gl->gl_holders); 596 595 gh = find_first_waiter(gl); 597 596 gl->gl_target = gh->gh_state; 597 + if (do_promote(gl)) 598 + goto out; 598 599 goto retry; 599 600 } 600 601 /* Some error or failed "try lock" - report it */ ··· 680 679 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 681 680 goto skip_inval; 682 681 683 - lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 684 - LM_FLAG_PRIORITY); 682 + lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP); 685 683 GLOCK_BUG_ON(gl, gl->gl_state == target); 686 684 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 687 685 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && ··· 834 834 } else { 835 835 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 836 836 gfs2_demote_wake(gl); 837 - if (do_promote(gl) == 0) 837 + if (do_promote(gl)) 838 838 goto out_unlock; 839 839 gh = find_first_waiter(gl); 840 840 gl->gl_target = gh->gh_state; ··· 1022 1022 * step entirely. 1023 1023 */ 1024 1024 if (gfs2_try_evict(gl)) { 1025 - if (test_bit(SDF_DEACTIVATING, &sdp->sd_flags)) 1025 + if (test_bit(SDF_KILL, &sdp->sd_flags)) 1026 1026 goto out; 1027 1027 if (gfs2_queue_verify_evict(gl)) 1028 1028 return; ··· 1035 1035 GFS2_BLKST_UNLINKED); 1036 1036 if (IS_ERR(inode)) { 1037 1037 if (PTR_ERR(inode) == -EAGAIN && 1038 - !test_bit(SDF_DEACTIVATING, &sdp->sd_flags) && 1038 + !test_bit(SDF_KILL, &sdp->sd_flags) && 1039 1039 gfs2_queue_verify_evict(gl)) 1040 1040 return; 1041 1041 } else { ··· 1231 1231 out_free: 1232 1232 gfs2_glock_dealloc(&gl->gl_rcu); 1233 1233 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1234 - wake_up(&sdp->sd_glock_wait); 1234 + wake_up(&sdp->sd_kill_wait); 1235 1235 1236 1236 out: 1237 1237 return ret; ··· 1515 1515 } 1516 1516 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1517 1517 continue; 1518 - if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1519 - insert_pt = &gh2->gh_list; 1520 1518 } 1521 1519 trace_gfs2_glock_queue(gh, 1); 1522 1520 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1523 1521 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1524 1522 if (likely(insert_pt == NULL)) { 1525 1523 list_add_tail(&gh->gh_list, &gl->gl_holders); 1526 - if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1527 - goto do_cancel; 1528 1524 return; 1529 1525 } 1530 1526 list_add_tail(&gh->gh_list, insert_pt); 1531 - do_cancel: 1532 1527 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1533 - if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1534 - spin_unlock(&gl->gl_lockref.lock); 1535 - if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1536 - sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1537 - spin_lock(&gl->gl_lockref.lock); 1538 - } 1528 + spin_unlock(&gl->gl_lockref.lock); 1529 + if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1530 + sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1531 + spin_lock(&gl->gl_lockref.lock); 1539 1532 return; 1540 1533 1541 1534 trap_recursive: ··· 2188 2195 flush_workqueue(glock_workqueue); 2189 2196 glock_hash_walk(clear_glock, sdp); 2190 2197 flush_workqueue(glock_workqueue); 2191 - wait_event_timeout(sdp->sd_glock_wait, 2198 + wait_event_timeout(sdp->sd_kill_wait, 2192 2199 atomic_read(&sdp->sd_glock_disposal) == 0, 2193 2200 HZ * 600); 2194 2201 glock_hash_walk(dump_glock_func, sdp); ··· 2220 2227 *p++ = 'e'; 2221 2228 if (flags & LM_FLAG_ANY) 2222 2229 *p++ = 'A'; 2223 - if (flags & LM_FLAG_PRIORITY) 2224 - *p++ = 'p'; 2225 2230 if (flags & LM_FLAG_NODE_SCOPE) 2226 2231 *p++ = 'n'; 2227 2232 if (flags & GL_ASYNC)
-9
fs/gfs2/glock.h
··· 68 68 * also be granted in SHARED. The preferred state is whichever is compatible 69 69 * with other granted locks, or the specified state if no other locks exist. 70 70 * 71 - * LM_FLAG_PRIORITY 72 - * Override fairness considerations. Suppose a lock is held in a shared state 73 - * and there is a pending request for the deferred state. A shared lock 74 - * request with the priority flag would be allowed to bypass the deferred 75 - * request and directly join the other shared lock. A shared lock request 76 - * without the priority flag might be forced to wait until the deferred 77 - * requested had acquired and released the lock. 78 - * 79 71 * LM_FLAG_NODE_SCOPE 80 72 * This holder agrees to share the lock within this node. In other words, 81 73 * the glock is held in EX mode according to DLM, but local holders on the ··· 78 86 #define LM_FLAG_TRY_1CB 0x0002 79 87 #define LM_FLAG_NOEXP 0x0004 80 88 #define LM_FLAG_ANY 0x0008 81 - #define LM_FLAG_PRIORITY 0x0010 82 89 #define LM_FLAG_NODE_SCOPE 0x0020 83 90 #define GL_ASYNC 0x0040 84 91 #define GL_EXACT 0x0080
+1 -1
fs/gfs2/glops.c
··· 637 637 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 638 638 639 639 if (!remote || sb_rdonly(sdp->sd_vfs) || 640 - test_bit(SDF_DEACTIVATING, &sdp->sd_flags)) 640 + test_bit(SDF_KILL, &sdp->sd_flags)) 641 641 return; 642 642 643 643 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
+4 -3
fs/gfs2/incore.h
··· 452 452 s64 qd_change_sync; 453 453 454 454 unsigned int qd_slot; 455 - unsigned int qd_slot_count; 455 + unsigned int qd_slot_ref; 456 456 457 457 struct buffer_head *qd_bh; 458 458 struct gfs2_quota_change *qd_bh_qc; ··· 537 537 #define GFS2_QUOTA_OFF 0 538 538 #define GFS2_QUOTA_ACCOUNT 1 539 539 #define GFS2_QUOTA_ON 2 540 + #define GFS2_QUOTA_QUIET 3 /* on but not complaining */ 540 541 541 542 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED 542 543 #define GFS2_DATA_WRITEBACK 1 ··· 607 606 SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */ 608 607 SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are 609 608 withdrawing */ 610 - SDF_DEACTIVATING = 15, 609 + SDF_KILL = 15, 611 610 SDF_EVICTING = 16, 612 611 SDF_FROZEN = 17, 613 612 }; ··· 717 716 struct gfs2_glock *sd_rename_gl; 718 717 struct gfs2_glock *sd_freeze_gl; 719 718 struct work_struct sd_freeze_work; 720 - wait_queue_head_t sd_glock_wait; 719 + wait_queue_head_t sd_kill_wait; 721 720 wait_queue_head_t sd_async_glock_wait; 722 721 atomic_t sd_glock_disposal; 723 722 struct completion sd_locking_init;
+10 -4
fs/gfs2/inode.c
··· 276 276 * gfs2_lookup_simple callers expect ENOENT 277 277 * and do not check for NULL. 278 278 */ 279 - if (inode == NULL) 280 - return ERR_PTR(-ENOENT); 281 - else 282 - return inode; 279 + if (IS_ERR_OR_NULL(inode)) 280 + return inode ? inode : ERR_PTR(-ENOENT); 281 + 282 + /* 283 + * Must not call back into the filesystem when allocating 284 + * pages in the metadata inode's address space. 285 + */ 286 + mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 287 + 288 + return inode; 283 289 } 284 290 285 291
-5
fs/gfs2/lock_dlm.c
··· 222 222 lkf |= DLM_LKF_NOQUEUEBAST; 223 223 } 224 224 225 - if (gfs_flags & LM_FLAG_PRIORITY) { 226 - lkf |= DLM_LKF_NOORDER; 227 - lkf |= DLM_LKF_HEADQUE; 228 - } 229 - 230 225 if (gfs_flags & LM_FLAG_ANY) { 231 226 if (req == DLM_LOCK_PR) 232 227 lkf |= DLM_LKF_ALTCW;
+30 -39
fs/gfs2/log.c
··· 1227 1227 gfs2_log_unlock(sdp); 1228 1228 } 1229 1229 1230 + static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) 1231 + { 1232 + return atomic_read(&sdp->sd_log_pinned) + 1233 + atomic_read(&sdp->sd_log_blks_needed) >= 1234 + atomic_read(&sdp->sd_log_thresh1); 1235 + } 1236 + 1237 + static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) 1238 + { 1239 + return sdp->sd_jdesc->jd_blocks - 1240 + atomic_read(&sdp->sd_log_blks_free) + 1241 + atomic_read(&sdp->sd_log_blks_needed) >= 1242 + atomic_read(&sdp->sd_log_thresh2); 1243 + } 1244 + 1230 1245 /** 1231 1246 * gfs2_log_commit - Commit a transaction to the log 1232 1247 * @sdp: the filesystem ··· 1261 1246 { 1262 1247 log_refund(sdp, tr); 1263 1248 1264 - if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || 1265 - ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > 1266 - atomic_read(&sdp->sd_log_thresh2))) 1249 + if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp)) 1267 1250 wake_up(&sdp->sd_logd_waitq); 1268 1251 } 1269 1252 ··· 1284 1271 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); 1285 1272 } 1286 1273 1287 - static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) 1288 - { 1289 - return (atomic_read(&sdp->sd_log_pinned) + 1290 - atomic_read(&sdp->sd_log_blks_needed) >= 1291 - atomic_read(&sdp->sd_log_thresh1)); 1292 - } 1293 - 1294 - static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) 1295 - { 1296 - unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); 1297 - 1298 - if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags)) 1299 - return 1; 1300 - 1301 - return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= 1302 - atomic_read(&sdp->sd_log_thresh2); 1303 - } 1304 - 1305 1274 /** 1306 1275 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks 1307 1276 * @data: Pointer to GFS2 superblock ··· 1296 1301 { 1297 1302 struct gfs2_sbd *sdp = data; 1298 1303 unsigned long t = 1; 1299 - DEFINE_WAIT(wait); 1300 1304 1301 1305 while (!kthread_should_stop()) { 1306 + if (gfs2_withdrawn(sdp)) 1307 + break; 1302 1308 1303 - if (gfs2_withdrawn(sdp)) { 1304 - msleep_interruptible(HZ); 1305 - continue; 1306 - } 1307 1309 /* Check for errors writing to the journal */ 1308 1310 if (sdp->sd_log_error) { 1309 1311 gfs2_lm(sdp, ··· 1309 1317 "prevent further damage.\n", 1310 1318 sdp->sd_fsname, sdp->sd_log_error); 1311 1319 gfs2_withdraw(sdp); 1312 - continue; 1320 + break; 1313 1321 } 1314 1322 1315 1323 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { ··· 1318 1326 GFS2_LFC_LOGD_JFLUSH_REQD); 1319 1327 } 1320 1328 1321 - if (gfs2_ail_flush_reqd(sdp)) { 1329 + if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) || 1330 + gfs2_ail_flush_reqd(sdp)) { 1331 + clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 1322 1332 gfs2_ail1_start(sdp); 1323 1333 gfs2_ail1_wait(sdp); 1324 1334 gfs2_ail1_empty(sdp, 0); ··· 1332 1338 1333 1339 try_to_freeze(); 1334 1340 1335 - do { 1336 - prepare_to_wait(&sdp->sd_logd_waitq, &wait, 1337 - TASK_INTERRUPTIBLE); 1338 - if (!gfs2_ail_flush_reqd(sdp) && 1339 - !gfs2_jrnl_flush_reqd(sdp) && 1340 - !kthread_should_stop()) 1341 - t = schedule_timeout(t); 1342 - } while(t && !gfs2_ail_flush_reqd(sdp) && 1343 - !gfs2_jrnl_flush_reqd(sdp) && 1344 - !kthread_should_stop()); 1345 - finish_wait(&sdp->sd_logd_waitq, &wait); 1341 + t = wait_event_interruptible_timeout(sdp->sd_logd_waitq, 1342 + test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) || 1343 + gfs2_ail_flush_reqd(sdp) || 1344 + gfs2_jrnl_flush_reqd(sdp) || 1345 + sdp->sd_log_error || 1346 + gfs2_withdrawn(sdp) || 1347 + kthread_should_stop(), 1348 + t); 1346 1349 } 1347 1350 1348 1351 return 0;
+3 -4
fs/gfs2/lops.c
··· 456 456 * Find the folio with 'index' in the journal's mapping. Search the folio for 457 457 * the journal head if requested (cleanup == false). Release refs on the 458 458 * folio so the page cache can reclaim it. We grabbed a 459 - * reference on this folio twice, first when we did a find_or_create_page() 459 + * reference on this folio twice, first when we did a grab_cache_page() 460 460 * to obtain the folio to add it to the bio and second when we do a 461 461 * filemap_get_folio() here to get the folio to wait on while I/O on it is being 462 462 * completed. ··· 481 481 if (!*done) 482 482 *done = gfs2_jhead_pg_srch(jd, head, &folio->page); 483 483 484 - /* filemap_get_folio() and the earlier find_or_create_page() */ 484 + /* filemap_get_folio() and the earlier grab_cache_page() */ 485 485 folio_put_refs(folio, 2); 486 486 } 487 487 ··· 535 535 536 536 for (; block < je->lblock + je->blocks; block++, dblock++) { 537 537 if (!page) { 538 - page = find_or_create_page(mapping, 539 - block >> shift, GFP_NOFS); 538 + page = grab_cache_page(mapping, block >> shift); 540 539 if (!page) { 541 540 ret = -ENOMEM; 542 541 done = true;
+5 -5
fs/gfs2/main.c
··· 152 152 goto fail_shrinker; 153 153 154 154 error = -ENOMEM; 155 - gfs_recovery_wq = alloc_workqueue("gfs_recovery", 155 + gfs2_recovery_wq = alloc_workqueue("gfs2_recovery", 156 156 WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); 157 - if (!gfs_recovery_wq) 157 + if (!gfs2_recovery_wq) 158 158 goto fail_wq1; 159 159 160 160 gfs2_control_wq = alloc_workqueue("gfs2_control", ··· 162 162 if (!gfs2_control_wq) 163 163 goto fail_wq2; 164 164 165 - gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0); 165 + gfs2_freeze_wq = alloc_workqueue("gfs2_freeze", 0, 0); 166 166 167 167 if (!gfs2_freeze_wq) 168 168 goto fail_wq3; ··· 194 194 fail_wq3: 195 195 destroy_workqueue(gfs2_control_wq); 196 196 fail_wq2: 197 - destroy_workqueue(gfs_recovery_wq); 197 + destroy_workqueue(gfs2_recovery_wq); 198 198 fail_wq1: 199 199 unregister_shrinker(&gfs2_qd_shrinker); 200 200 fail_shrinker: ··· 234 234 gfs2_unregister_debugfs(); 235 235 unregister_filesystem(&gfs2_fs_type); 236 236 unregister_filesystem(&gfs2meta_fs_type); 237 - destroy_workqueue(gfs_recovery_wq); 237 + destroy_workqueue(gfs2_recovery_wq); 238 238 destroy_workqueue(gfs2_control_wq); 239 239 destroy_workqueue(gfs2_freeze_wq); 240 240 list_lru_destroy(&gfs2_qd_lru);
+29 -13
fs/gfs2/ops_fstype.c
··· 87 87 set_bit(SDF_NOJOURNALID, &sdp->sd_flags); 88 88 gfs2_tune_init(&sdp->sd_tune); 89 89 90 - init_waitqueue_head(&sdp->sd_glock_wait); 90 + init_waitqueue_head(&sdp->sd_kill_wait); 91 91 init_waitqueue_head(&sdp->sd_async_glock_wait); 92 92 atomic_set(&sdp->sd_glock_disposal, 0); 93 93 init_completion(&sdp->sd_locking_init); ··· 1103 1103 struct task_struct *p; 1104 1104 int error = 0; 1105 1105 1106 - p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); 1106 + p = kthread_create(gfs2_logd, sdp, "gfs2_logd/%s", sdp->sd_fsname); 1107 1107 if (IS_ERR(p)) { 1108 1108 error = PTR_ERR(p); 1109 - fs_err(sdp, "can't start logd thread: %d\n", error); 1109 + fs_err(sdp, "can't create logd thread: %d\n", error); 1110 1110 return error; 1111 1111 } 1112 + get_task_struct(p); 1112 1113 sdp->sd_logd_process = p; 1113 1114 1114 - p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); 1115 + p = kthread_create(gfs2_quotad, sdp, "gfs2_quotad/%s", sdp->sd_fsname); 1115 1116 if (IS_ERR(p)) { 1116 1117 error = PTR_ERR(p); 1117 - fs_err(sdp, "can't start quotad thread: %d\n", error); 1118 + fs_err(sdp, "can't create quotad thread: %d\n", error); 1118 1119 goto fail; 1119 1120 } 1121 + get_task_struct(p); 1120 1122 sdp->sd_quotad_process = p; 1123 + 1124 + wake_up_process(sdp->sd_logd_process); 1125 + wake_up_process(sdp->sd_quotad_process); 1121 1126 return 0; 1122 1127 1123 1128 fail: 1124 1129 kthread_stop(sdp->sd_logd_process); 1130 + put_task_struct(sdp->sd_logd_process); 1125 1131 sdp->sd_logd_process = NULL; 1126 1132 return error; 1133 + } 1134 + 1135 + void gfs2_destroy_threads(struct gfs2_sbd *sdp) 1136 + { 1137 + if (sdp->sd_logd_process) { 1138 + kthread_stop(sdp->sd_logd_process); 1139 + put_task_struct(sdp->sd_logd_process); 1140 + sdp->sd_logd_process = NULL; 1141 + } 1142 + if (sdp->sd_quotad_process) { 1143 + kthread_stop(sdp->sd_quotad_process); 1144 + put_task_struct(sdp->sd_quotad_process); 1145 + sdp->sd_quotad_process = NULL; 1146 + } 1127 1147 } 1128 1148 1129 1149 /** ··· 1296 1276 1297 1277 if (error) { 1298 1278 gfs2_freeze_unlock(&sdp->sd_freeze_gh); 1299 - if (sdp->sd_quotad_process) 1300 - kthread_stop(sdp->sd_quotad_process); 1301 - sdp->sd_quotad_process = NULL; 1302 - if (sdp->sd_logd_process) 1303 - kthread_stop(sdp->sd_logd_process); 1304 - sdp->sd_logd_process = NULL; 1279 + gfs2_destroy_threads(sdp); 1305 1280 fs_err(sdp, "can't make FS RW: %d\n", error); 1306 1281 goto fail_per_node; 1307 1282 } ··· 1396 1381 {"off", GFS2_QUOTA_OFF}, 1397 1382 {"account", GFS2_QUOTA_ACCOUNT}, 1398 1383 {"on", GFS2_QUOTA_ON}, 1384 + {"quiet", GFS2_QUOTA_QUIET}, 1399 1385 {} 1400 1386 }; 1401 1387 ··· 1802 1786 /* 1803 1787 * Flush and then drain the delete workqueue here (via 1804 1788 * destroy_workqueue()) to ensure that any delete work that 1805 - * may be running will also see the SDF_DEACTIVATING flag. 1789 + * may be running will also see the SDF_KILL flag. 1806 1790 */ 1807 - set_bit(SDF_DEACTIVATING, &sdp->sd_flags); 1791 + set_bit(SDF_KILL, &sdp->sd_flags); 1808 1792 gfs2_flush_delete_work(sdp); 1809 1793 destroy_workqueue(sdp->sd_delete_wq); 1810 1794
+197 -173
fs/gfs2/quota.c
··· 109 109 static void gfs2_qd_dealloc(struct rcu_head *rcu) 110 110 { 111 111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); 112 + struct gfs2_sbd *sdp = qd->qd_sbd; 113 + 112 114 kmem_cache_free(gfs2_quotad_cachep, qd); 115 + if (atomic_dec_and_test(&sdp->sd_quota_count)) 116 + wake_up(&sdp->sd_kill_wait); 113 117 } 114 118 115 - static void gfs2_qd_dispose(struct list_head *list) 119 + static void gfs2_qd_dispose(struct gfs2_quota_data *qd) 120 + { 121 + struct gfs2_sbd *sdp = qd->qd_sbd; 122 + 123 + spin_lock(&qd_lock); 124 + list_del(&qd->qd_list); 125 + spin_unlock(&qd_lock); 126 + 127 + spin_lock_bucket(qd->qd_hash); 128 + hlist_bl_del_rcu(&qd->qd_hlist); 129 + spin_unlock_bucket(qd->qd_hash); 130 + 131 + if (!gfs2_withdrawn(sdp)) { 132 + gfs2_assert_warn(sdp, !qd->qd_change); 133 + gfs2_assert_warn(sdp, !qd->qd_slot_ref); 134 + gfs2_assert_warn(sdp, !qd->qd_bh_count); 135 + } 136 + 137 + gfs2_glock_put(qd->qd_gl); 138 + call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 139 + } 140 + 141 + static void gfs2_qd_list_dispose(struct list_head *list) 116 142 { 117 143 struct gfs2_quota_data *qd; 118 - struct gfs2_sbd *sdp; 119 144 120 145 while (!list_empty(list)) { 121 146 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru); 122 - sdp = qd->qd_gl->gl_name.ln_sbd; 123 - 124 147 list_del(&qd->qd_lru); 125 148 126 - /* Free from the filesystem-specific list */ 127 - spin_lock(&qd_lock); 128 - list_del(&qd->qd_list); 129 - spin_unlock(&qd_lock); 130 - 131 - spin_lock_bucket(qd->qd_hash); 132 - hlist_bl_del_rcu(&qd->qd_hlist); 133 - spin_unlock_bucket(qd->qd_hash); 134 - 135 - gfs2_assert_warn(sdp, !qd->qd_change); 136 - gfs2_assert_warn(sdp, !qd->qd_slot_count); 137 - gfs2_assert_warn(sdp, !qd->qd_bh_count); 138 - 139 - gfs2_glock_put(qd->qd_gl); 140 - atomic_dec(&sdp->sd_quota_count); 141 - 142 - /* Delete it from the common reclaim list */ 143 - call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 149 + gfs2_qd_dispose(qd); 144 150 } 145 151 } 146 152 ··· 155 149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 156 150 { 157 151 struct list_head *dispose = arg; 158 - struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); 152 + struct gfs2_quota_data *qd = 153 + list_entry(item, struct gfs2_quota_data, qd_lru); 154 + enum lru_status status; 159 155 160 156 if (!spin_trylock(&qd->qd_lockref.lock)) 161 157 return LRU_SKIP; 162 158 159 + status = LRU_SKIP; 163 160 if (qd->qd_lockref.count == 0) { 164 161 lockref_mark_dead(&qd->qd_lockref); 165 162 list_lru_isolate_move(lru, &qd->qd_lru, dispose); 163 + status = LRU_REMOVED; 166 164 } 167 165 168 166 spin_unlock(&qd->qd_lockref.lock); 169 - return LRU_REMOVED; 167 + return status; 170 168 } 171 169 172 170 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, ··· 185 175 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, 186 176 gfs2_qd_isolate, &dispose); 187 177 188 - gfs2_qd_dispose(&dispose); 178 + gfs2_qd_list_dispose(&dispose); 189 179 190 180 return freed; 191 181 } ··· 213 203 214 204 static u64 qd2offset(struct gfs2_quota_data *qd) 215 205 { 216 - u64 offset; 217 - 218 - offset = qd2index(qd); 219 - offset *= sizeof(struct gfs2_quota); 220 - 221 - return offset; 206 + return qd2index(qd) * sizeof(struct gfs2_quota); 222 207 } 223 208 224 209 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) ··· 226 221 return NULL; 227 222 228 223 qd->qd_sbd = sdp; 229 - qd->qd_lockref.count = 1; 224 + qd->qd_lockref.count = 0; 230 225 spin_lock_init(&qd->qd_lockref.lock); 231 226 qd->qd_id = qid; 232 227 qd->qd_slot = -1; ··· 288 283 spin_lock_bucket(hash); 289 284 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 290 285 if (qd == NULL) { 286 + new_qd->qd_lockref.count++; 291 287 *qdp = new_qd; 292 288 list_add(&new_qd->qd_list, &sdp->sd_quota_list); 293 289 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); ··· 308 302 309 303 static void qd_hold(struct gfs2_quota_data *qd) 310 304 { 311 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 305 + struct gfs2_sbd *sdp = qd->qd_sbd; 312 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 313 307 lockref_get(&qd->qd_lockref); 314 308 } 315 309 316 310 static void qd_put(struct gfs2_quota_data *qd) 317 311 { 312 + struct gfs2_sbd *sdp; 313 + 318 314 if (lockref_put_or_lock(&qd->qd_lockref)) 319 315 return; 316 + 317 + BUG_ON(__lockref_is_dead(&qd->qd_lockref)); 318 + sdp = qd->qd_sbd; 319 + if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { 320 + lockref_mark_dead(&qd->qd_lockref); 321 + spin_unlock(&qd->qd_lockref.lock); 322 + 323 + gfs2_qd_dispose(qd); 324 + return; 325 + } 320 326 321 327 qd->qd_lockref.count = 0; 322 328 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); 323 329 spin_unlock(&qd->qd_lockref.lock); 324 - 325 330 } 326 331 327 332 static int slot_get(struct gfs2_quota_data *qd) ··· 342 325 int error = 0; 343 326 344 327 spin_lock(&sdp->sd_bitmap_lock); 345 - if (qd->qd_slot_count != 0) 346 - goto out; 347 - 348 - error = -ENOSPC; 349 - bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); 350 - if (bit < sdp->sd_quota_slots) { 328 + if (qd->qd_slot_ref == 0) { 329 + bit = find_first_zero_bit(sdp->sd_quota_bitmap, 330 + sdp->sd_quota_slots); 331 + if (bit >= sdp->sd_quota_slots) { 332 + error = -ENOSPC; 333 + goto out; 334 + } 351 335 set_bit(bit, sdp->sd_quota_bitmap); 352 336 qd->qd_slot = bit; 353 - error = 0; 354 - out: 355 - qd->qd_slot_count++; 356 337 } 338 + qd->qd_slot_ref++; 339 + out: 357 340 spin_unlock(&sdp->sd_bitmap_lock); 358 - 359 341 return error; 360 342 } 361 343 ··· 363 347 struct gfs2_sbd *sdp = qd->qd_sbd; 364 348 365 349 spin_lock(&sdp->sd_bitmap_lock); 366 - gfs2_assert(sdp, qd->qd_slot_count); 367 - qd->qd_slot_count++; 350 + gfs2_assert(sdp, qd->qd_slot_ref); 351 + qd->qd_slot_ref++; 368 352 spin_unlock(&sdp->sd_bitmap_lock); 369 353 } 370 354 ··· 373 357 struct gfs2_sbd *sdp = qd->qd_sbd; 374 358 375 359 spin_lock(&sdp->sd_bitmap_lock); 376 - gfs2_assert(sdp, qd->qd_slot_count); 377 - if (!--qd->qd_slot_count) { 360 + gfs2_assert(sdp, qd->qd_slot_ref); 361 + if (!--qd->qd_slot_ref) { 378 362 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); 379 363 qd->qd_slot = -1; 380 364 } ··· 383 367 384 368 static int bh_get(struct gfs2_quota_data *qd) 385 369 { 386 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 370 + struct gfs2_sbd *sdp = qd->qd_sbd; 387 371 struct inode *inode = sdp->sd_qc_inode; 388 372 struct gfs2_inode *ip = GFS2_I(inode); 389 373 unsigned int block, offset; ··· 437 421 438 422 static void bh_put(struct gfs2_quota_data *qd) 439 423 { 440 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 424 + struct gfs2_sbd *sdp = qd->qd_sbd; 441 425 442 426 mutex_lock(&sdp->sd_quota_mutex); 443 427 gfs2_assert(sdp, qd->qd_bh_count); ··· 467 451 return 1; 468 452 } 469 453 454 + static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 455 + { 456 + int error; 457 + 458 + error = bh_get(qd); 459 + if (!error) 460 + return 0; 461 + 462 + clear_bit(QDF_LOCKED, &qd->qd_flags); 463 + slot_put(qd); 464 + qd_put(qd); 465 + return error; 466 + } 467 + 470 468 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 471 469 { 472 470 struct gfs2_quota_data *qd = NULL, *iter; ··· 503 473 spin_unlock(&qd_lock); 504 474 505 475 if (qd) { 506 - error = bh_get(qd); 507 - if (error) { 508 - clear_bit(QDF_LOCKED, &qd->qd_flags); 509 - slot_put(qd); 510 - qd_put(qd); 476 + error = qd_bh_get_or_undo(sdp, qd); 477 + if (error) 511 478 return error; 512 - } 479 + *qdp = qd; 513 480 } 514 - 515 - *qdp = qd; 516 481 517 482 return 0; 518 483 } 519 484 520 - static void qd_unlock(struct gfs2_quota_data *qd) 485 + static void qdsb_put(struct gfs2_quota_data *qd) 521 486 { 522 - gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, 523 - test_bit(QDF_LOCKED, &qd->qd_flags)); 524 - clear_bit(QDF_LOCKED, &qd->qd_flags); 525 487 bh_put(qd); 526 488 slot_put(qd); 527 489 qd_put(qd); 490 + } 491 + 492 + static void qd_unlock(struct gfs2_quota_data *qd) 493 + { 494 + gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags)); 495 + clear_bit(QDF_LOCKED, &qd->qd_flags); 496 + qdsb_put(qd); 528 497 } 529 498 530 499 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid, ··· 550 521 fail: 551 522 qd_put(*qdp); 552 523 return error; 553 - } 554 - 555 - static void qdsb_put(struct gfs2_quota_data *qd) 556 - { 557 - bh_put(qd); 558 - slot_put(qd); 559 - qd_put(qd); 560 524 } 561 525 562 526 /** ··· 688 666 689 667 static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type) 690 668 { 691 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 669 + struct gfs2_sbd *sdp = qd->qd_sbd; 692 670 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 693 671 struct gfs2_quota_change *qc = qd->qd_bh_qc; 694 672 s64 x; ··· 730 708 mutex_unlock(&sdp->sd_quota_mutex); 731 709 } 732 710 733 - static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, 711 + static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, 734 712 unsigned off, void *buf, unsigned bytes) 735 713 { 714 + struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 736 715 struct inode *inode = &ip->i_inode; 737 - struct gfs2_sbd *sdp = GFS2_SB(inode); 738 716 struct address_space *mapping = inode->i_mapping; 739 717 struct page *page; 740 718 struct buffer_head *bh; 741 719 u64 blk; 742 720 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; 743 721 unsigned to_write = bytes, pg_off = off; 744 - int done = 0; 745 722 746 723 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); 747 724 boff = off % bsize; 748 725 749 - page = find_or_create_page(mapping, index, GFP_NOFS); 726 + page = grab_cache_page(mapping, index); 750 727 if (!page) 751 728 return -ENOMEM; 752 729 if (!page_has_buffers(page)) 753 730 create_empty_buffers(page, bsize, 0); 754 731 755 732 bh = page_buffers(page); 756 - while (!done) { 733 + for(;;) { 757 734 /* Find the beginning block within the page */ 758 735 if (pg_off >= ((bnum * bsize) + bsize)) { 759 736 bh = bh->b_this_page; ··· 772 751 set_buffer_uptodate(bh); 773 752 if (bh_read(bh, REQ_META | REQ_PRIO) < 0) 774 753 goto unlock_out; 775 - if (gfs2_is_jdata(ip)) 776 - gfs2_trans_add_data(ip->i_gl, bh); 777 - else 778 - gfs2_ordered_add_inode(ip); 754 + gfs2_trans_add_data(ip->i_gl, bh); 779 755 780 756 /* If we need to write to the next block as well */ 781 757 if (to_write > (bsize - boff)) { ··· 781 763 boff = pg_off % bsize; 782 764 continue; 783 765 } 784 - done = 1; 766 + break; 785 767 } 786 768 787 769 /* Write to the page, now that we have setup the buffer(s) */ ··· 798 780 return -EIO; 799 781 } 800 782 801 - static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, 783 + static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp, 802 784 loff_t loc) 803 785 { 804 786 unsigned long pg_beg; 805 787 unsigned pg_off, nbytes, overflow = 0; 806 - int pg_oflow = 0, error; 788 + int error; 807 789 void *ptr; 808 790 809 791 nbytes = sizeof(struct gfs2_quota); ··· 812 794 pg_off = offset_in_page(loc); 813 795 814 796 /* If the quota straddles a page boundary, split the write in two */ 815 - if ((pg_off + nbytes) > PAGE_SIZE) { 816 - pg_oflow = 1; 797 + if ((pg_off + nbytes) > PAGE_SIZE) 817 798 overflow = (pg_off + nbytes) - PAGE_SIZE; 818 - } 819 799 820 800 ptr = qp; 821 - error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, 801 + error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr, 822 802 nbytes - overflow); 823 803 /* If there's an overflow, write the remaining bytes to the next page */ 824 - if (!error && pg_oflow) 825 - error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, 804 + if (!error && overflow) 805 + error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0, 826 806 ptr + nbytes - overflow, 827 807 overflow); 828 808 return error; ··· 828 812 829 813 /** 830 814 * gfs2_adjust_quota - adjust record of current block usage 831 - * @ip: The quota inode 815 + * @sdp: The superblock 832 816 * @loc: Offset of the entry in the quota file 833 817 * @change: The amount of usage change to record 834 818 * @qd: The quota data ··· 840 824 * Returns: 0 or -ve on error 841 825 */ 842 826 843 - static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 827 + static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, 844 828 s64 change, struct gfs2_quota_data *qd, 845 829 struct qc_dqblk *fdq) 846 830 { 831 + struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 847 832 struct inode *inode = &ip->i_inode; 848 - struct gfs2_sbd *sdp = GFS2_SB(inode); 849 833 struct gfs2_quota q; 850 834 int err; 851 835 u64 size; ··· 862 846 return err; 863 847 864 848 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ 865 - err = -EIO; 866 849 be64_add_cpu(&q.qu_value, change); 867 850 if (((s64)be64_to_cpu(q.qu_value)) < 0) 868 851 q.qu_value = 0; /* Never go negative on quota usage */ ··· 881 866 } 882 867 } 883 868 884 - err = gfs2_write_disk_quota(ip, &q, loc); 869 + err = gfs2_write_disk_quota(sdp, &q, loc); 885 870 if (!err) { 886 871 size = loc + sizeof(struct gfs2_quota); 887 872 if (size > inode->i_size) ··· 896 881 897 882 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 898 883 { 899 - struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; 884 + struct gfs2_sbd *sdp = (*qda)->qd_sbd; 900 885 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 901 886 struct gfs2_alloc_parms ap = { .aflags = 0, }; 902 887 unsigned int data_blocks, ind_blocks; ··· 908 893 unsigned int nalloc = 0, blocks; 909 894 int error; 910 895 911 - error = gfs2_qa_get(ip); 912 - if (error) 913 - return error; 914 - 915 896 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 916 897 &data_blocks, &ind_blocks); 917 898 918 899 ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); 919 - if (!ghs) { 920 - error = -ENOMEM; 921 - goto out; 922 - } 900 + if (!ghs) 901 + return -ENOMEM; 923 902 924 903 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 925 904 inode_lock(&ip->i_inode); ··· 962 953 for (x = 0; x < num_qd; x++) { 963 954 qd = qda[x]; 964 955 offset = qd2offset(qd); 965 - error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 956 + error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd, 957 + NULL); 966 958 if (error) 967 959 goto out_end_trans; 968 960 969 961 do_qc(qd, -qd->qd_change_sync, QC_SYNC); 970 962 set_bit(QDF_REFRESH, &qd->qd_flags); 971 963 } 972 - 973 - error = 0; 974 964 975 965 out_end_trans: 976 966 gfs2_trans_end(sdp); ··· 984 976 kfree(ghs); 985 977 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, 986 978 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); 987 - out: 988 - gfs2_qa_put(ip); 979 + if (!error) { 980 + for (x = 0; x < num_qd; x++) 981 + qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen; 982 + } 989 983 return error; 990 984 } 991 985 ··· 1019 1009 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 1020 1010 struct gfs2_holder *q_gh) 1021 1011 { 1022 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1012 + struct gfs2_sbd *sdp = qd->qd_sbd; 1023 1013 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1024 1014 struct gfs2_holder i_gh; 1025 1015 int error; 1026 1016 1017 + gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd); 1027 1018 restart: 1028 1019 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 1029 1020 if (error) ··· 1070 1059 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1071 1060 struct gfs2_quota_data *qd; 1072 1061 u32 x; 1073 - int error = 0; 1062 + int error; 1074 1063 1075 - if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1064 + if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON && 1065 + sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) 1076 1066 return 0; 1077 1067 1078 1068 error = gfs2_quota_hold(ip, uid, gid); ··· 1101 1089 return error; 1102 1090 } 1103 1091 1104 - static int need_sync(struct gfs2_quota_data *qd) 1092 + static bool need_sync(struct gfs2_quota_data *qd) 1105 1093 { 1106 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1094 + struct gfs2_sbd *sdp = qd->qd_sbd; 1107 1095 struct gfs2_tune *gt = &sdp->sd_tune; 1108 1096 s64 value; 1109 1097 unsigned int num, den; 1110 - int do_sync = 1; 1111 1098 1112 1099 if (!qd->qd_qb.qb_limit) 1113 - return 0; 1100 + return false; 1114 1101 1115 1102 spin_lock(&qd_lock); 1116 1103 value = qd->qd_change; ··· 1120 1109 den = gt->gt_quota_scale_den; 1121 1110 spin_unlock(&gt->gt_spin); 1122 1111 1123 - if (value < 0) 1124 - do_sync = 0; 1112 + if (value <= 0) 1113 + return false; 1125 1114 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 1126 1115 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1127 - do_sync = 0; 1116 + return false; 1128 1117 else { 1129 1118 value *= gfs2_jindex_size(sdp) * num; 1130 1119 value = div_s64(value, den); 1131 1120 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 1132 1121 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1133 - do_sync = 0; 1122 + return false; 1134 1123 } 1135 1124 1136 - return do_sync; 1125 + return true; 1137 1126 } 1138 1127 1139 1128 void gfs2_quota_unlock(struct gfs2_inode *ip) 1140 1129 { 1141 1130 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1142 - struct gfs2_quota_data *qda[4]; 1131 + struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS]; 1143 1132 unsigned int count = 0; 1144 1133 u32 x; 1145 1134 int found; ··· 1149 1138 1150 1139 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1151 1140 struct gfs2_quota_data *qd; 1152 - int sync; 1141 + bool sync; 1153 1142 1154 1143 qd = ip->i_qadata->qa_qd[x]; 1155 1144 sync = need_sync(qd); ··· 1165 1154 if (!found) 1166 1155 continue; 1167 1156 1168 - gfs2_assert_warn(sdp, qd->qd_change_sync); 1169 - if (bh_get(qd)) { 1170 - clear_bit(QDF_LOCKED, &qd->qd_flags); 1171 - slot_put(qd); 1172 - qd_put(qd); 1173 - continue; 1174 - } 1175 - 1176 - qda[count++] = qd; 1157 + if (!qd_bh_get_or_undo(sdp, qd)) 1158 + qda[count++] = qd; 1177 1159 } 1178 1160 1179 1161 if (count) { ··· 1182 1178 1183 1179 static int print_message(struct gfs2_quota_data *qd, char *type) 1184 1180 { 1185 - struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1181 + struct gfs2_sbd *sdp = qd->qd_sbd; 1186 1182 1187 - fs_info(sdp, "quota %s for %s %u\n", 1188 - type, 1189 - (qd->qd_id.type == USRQUOTA) ? "user" : "group", 1190 - from_kqid(&init_user_ns, qd->qd_id)); 1183 + if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) 1184 + fs_info(sdp, "quota %s for %s %u\n", 1185 + type, 1186 + (qd->qd_id.type == USRQUOTA) ? "user" : "group", 1187 + from_kqid(&init_user_ns, qd->qd_id)); 1191 1188 1192 1189 return 0; 1193 1190 } ··· 1274 1269 u32 x; 1275 1270 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1276 1271 1277 - if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON || 1272 + if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON && 1273 + sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) || 1278 1274 gfs2_assert_warn(sdp, change)) 1279 1275 return; 1280 1276 if (ip->i_diskflags & GFS2_DIF_SYSTEM) ··· 1294 1288 } 1295 1289 } 1296 1290 1291 + static bool qd_changed(struct gfs2_sbd *sdp) 1292 + { 1293 + struct gfs2_quota_data *qd; 1294 + bool changed = false; 1295 + 1296 + spin_lock(&qd_lock); 1297 + list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 1298 + if (test_bit(QDF_LOCKED, &qd->qd_flags) || 1299 + !test_bit(QDF_CHANGE, &qd->qd_flags)) 1300 + continue; 1301 + 1302 + changed = true; 1303 + break; 1304 + } 1305 + spin_unlock(&qd_lock); 1306 + return changed; 1307 + } 1308 + 1297 1309 int gfs2_quota_sync(struct super_block *sb, int type) 1298 1310 { 1299 1311 struct gfs2_sbd *sdp = sb->s_fs_info; ··· 1320 1296 unsigned int num_qd; 1321 1297 unsigned int x; 1322 1298 int error = 0; 1299 + 1300 + if (!qd_changed(sdp)) 1301 + return 0; 1323 1302 1324 1303 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1325 1304 if (!qda) ··· 1345 1318 if (num_qd) { 1346 1319 if (!error) 1347 1320 error = do_sync(num_qd, qda); 1348 - if (!error) 1349 - for (x = 0; x < num_qd; x++) 1350 - qda[x]->qd_sync_gen = 1351 - sdp->sd_quota_sync_gen; 1352 1321 1353 1322 for (x = 0; x < num_qd; x++) 1354 1323 qd_unlock(qda[x]); ··· 1446 1423 set_bit(QDF_CHANGE, &qd->qd_flags); 1447 1424 qd->qd_change = qc_change; 1448 1425 qd->qd_slot = slot; 1449 - qd->qd_slot_count = 1; 1426 + qd->qd_slot_ref = 1; 1450 1427 1451 1428 spin_lock(&qd_lock); 1452 1429 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); ··· 1478 1455 1479 1456 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1480 1457 { 1481 - struct list_head *head = &sdp->sd_quota_list; 1482 1458 struct gfs2_quota_data *qd; 1459 + LIST_HEAD(dispose); 1460 + int count; 1461 + 1462 + BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)); 1483 1463 1484 1464 spin_lock(&qd_lock); 1485 - while (!list_empty(head)) { 1486 - qd = list_last_entry(head, struct gfs2_quota_data, qd_list); 1465 + list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 1466 + spin_lock(&qd->qd_lockref.lock); 1467 + if (qd->qd_lockref.count != 0) { 1468 + spin_unlock(&qd->qd_lockref.lock); 1469 + continue; 1470 + } 1471 + lockref_mark_dead(&qd->qd_lockref); 1472 + spin_unlock(&qd->qd_lockref.lock); 1487 1473 1488 - list_del(&qd->qd_list); 1489 - 1490 - /* Also remove if this qd exists in the reclaim list */ 1491 1474 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 1492 - atomic_dec(&sdp->sd_quota_count); 1493 - spin_unlock(&qd_lock); 1494 - 1495 - spin_lock_bucket(qd->qd_hash); 1496 - hlist_bl_del_rcu(&qd->qd_hlist); 1497 - spin_unlock_bucket(qd->qd_hash); 1498 - 1499 - gfs2_assert_warn(sdp, !qd->qd_change); 1500 - gfs2_assert_warn(sdp, !qd->qd_slot_count); 1501 - gfs2_assert_warn(sdp, !qd->qd_bh_count); 1502 - 1503 - gfs2_glock_put(qd->qd_gl); 1504 - call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 1505 - 1506 - spin_lock(&qd_lock); 1475 + list_add(&qd->qd_lru, &dispose); 1507 1476 } 1508 1477 spin_unlock(&qd_lock); 1509 1478 1510 - gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1479 + gfs2_qd_list_dispose(&dispose); 1480 + 1481 + wait_event_timeout(sdp->sd_kill_wait, 1482 + (count = atomic_read(&sdp->sd_quota_count)) == 0, 1483 + HZ * 60); 1484 + 1485 + if (count != 0) 1486 + fs_err(sdp, "%d left-over quota data objects\n", count); 1511 1487 1512 1488 kvfree(sdp->sd_quota_bitmap); 1513 1489 sdp->sd_quota_bitmap = NULL; ··· 1558 1536 unsigned long statfs_timeo = 0; 1559 1537 unsigned long quotad_timeo = 0; 1560 1538 unsigned long t = 0; 1561 - DEFINE_WAIT(wait); 1562 1539 1563 1540 while (!kthread_should_stop()) { 1564 - 1565 1541 if (gfs2_withdrawn(sdp)) 1566 - goto bypass; 1542 + break; 1543 + 1567 1544 /* Update the master statfs file */ 1568 1545 if (sdp->sd_statfs_force_sync) { 1569 1546 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); ··· 1580 1559 1581 1560 try_to_freeze(); 1582 1561 1583 - bypass: 1584 1562 t = min(quotad_timeo, statfs_timeo); 1585 1563 1586 - prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1587 - if (!sdp->sd_statfs_force_sync) 1588 - t -= schedule_timeout(t); 1589 - else 1564 + t = wait_event_interruptible_timeout(sdp->sd_quota_wait, 1565 + sdp->sd_statfs_force_sync || 1566 + gfs2_withdrawn(sdp) || 1567 + kthread_should_stop(), 1568 + t); 1569 + 1570 + if (sdp->sd_statfs_force_sync) 1590 1571 t = 0; 1591 - finish_wait(&sdp->sd_quota_wait, &wait); 1592 1572 } 1593 1573 1594 1574 return 0; ··· 1602 1580 memset(state, 0, sizeof(*state)); 1603 1581 1604 1582 switch (sdp->sd_args.ar_quota) { 1583 + case GFS2_QUOTA_QUIET: 1584 + fallthrough; 1605 1585 case GFS2_QUOTA_ON: 1606 1586 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; 1607 1587 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; ··· 1750 1726 goto out_release; 1751 1727 1752 1728 /* Apply changes */ 1753 - error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1729 + error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq); 1754 1730 if (!error) 1755 1731 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 1756 1732
+2 -2
fs/gfs2/recovery.c
··· 27 27 #include "util.h" 28 28 #include "dir.h" 29 29 30 - struct workqueue_struct *gfs_recovery_wq; 30 + struct workqueue_struct *gfs2_recovery_wq; 31 31 32 32 int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, 33 33 struct buffer_head **bh) ··· 570 570 return -EBUSY; 571 571 572 572 /* we have JDF_RECOVERY, queue should always succeed */ 573 - rv = queue_work(gfs_recovery_wq, &jd->jd_work); 573 + rv = queue_work(gfs2_recovery_wq, &jd->jd_work); 574 574 BUG_ON(!rv); 575 575 576 576 if (wait)
+1 -1
fs/gfs2/recovery.h
··· 9 9 10 10 #include "incore.h" 11 11 12 - extern struct workqueue_struct *gfs_recovery_wq; 12 + extern struct workqueue_struct *gfs2_recovery_wq; 13 13 14 14 static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk) 15 15 {
+9 -19
fs/gfs2/super.c
··· 546 546 { 547 547 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 548 548 549 - if (!test_bit(SDF_DEACTIVATING, &sdp->sd_flags)) 549 + if (!test_bit(SDF_KILL, &sdp->sd_flags)) 550 550 gfs2_flush_delete_work(sdp); 551 551 552 - if (!log_write_allowed && current == sdp->sd_quotad_process) 553 - fs_warn(sdp, "The quotad daemon is withdrawing.\n"); 554 - else if (sdp->sd_quotad_process) 555 - kthread_stop(sdp->sd_quotad_process); 556 - sdp->sd_quotad_process = NULL; 557 - 558 - if (!log_write_allowed && current == sdp->sd_logd_process) 559 - fs_warn(sdp, "The logd daemon is withdrawing.\n"); 560 - else if (sdp->sd_logd_process) 561 - kthread_stop(sdp->sd_logd_process); 562 - sdp->sd_logd_process = NULL; 552 + gfs2_destroy_threads(sdp); 563 553 564 554 if (log_write_allowed) { 565 555 gfs2_quota_sync(sdp->sd_vfs, 0); ··· 570 580 gfs2_log_is_empty(sdp), 571 581 HZ * 5); 572 582 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp)); 573 - } else { 574 - wait_event_timeout(sdp->sd_log_waitq, 575 - gfs2_log_is_empty(sdp), 576 - HZ * 5); 577 583 } 578 584 gfs2_quota_cleanup(sdp); 579 - 580 - if (!log_write_allowed) 581 - sdp->sd_vfs->s_flags |= SB_RDONLY; 582 585 } 583 586 584 587 /** ··· 604 621 605 622 if (!sb_rdonly(sb)) { 606 623 gfs2_make_fs_ro(sdp); 624 + } 625 + if (gfs2_withdrawn(sdp)) { 626 + gfs2_destroy_threads(sdp); 627 + gfs2_quota_cleanup(sdp); 607 628 } 608 629 WARN_ON(gfs2_withdrawing(sdp)); 609 630 ··· 1120 1133 break; 1121 1134 case GFS2_QUOTA_ON: 1122 1135 state = "on"; 1136 + break; 1137 + case GFS2_QUOTA_QUIET: 1138 + state = "quiet"; 1123 1139 break; 1124 1140 default: 1125 1141 state = "unknown";
+1
fs/gfs2/super.h
··· 36 36 extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp); 37 37 extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp); 38 38 extern void gfs2_online_uevent(struct gfs2_sbd *sdp); 39 + extern void gfs2_destroy_threads(struct gfs2_sbd *sdp); 39 40 extern int gfs2_statfs_init(struct gfs2_sbd *sdp); 40 41 extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 41 42 s64 dinodes);
+9 -3
fs/gfs2/sys.c
··· 98 98 "sd_log_flush_head: %d\n" 99 99 "sd_log_flush_tail: %d\n" 100 100 "sd_log_blks_reserved: %d\n" 101 - "sd_log_revokes_available: %d\n", 101 + "sd_log_revokes_available: %d\n" 102 + "sd_log_pinned: %d\n" 103 + "sd_log_thresh1: %d\n" 104 + "sd_log_thresh2: %d\n", 102 105 test_bit(SDF_JOURNAL_CHECKED, &f), 103 106 test_bit(SDF_JOURNAL_LIVE, &f), 104 107 (sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0), ··· 121 118 test_bit(SDF_WITHDRAW_IN_PROG, &f), 122 119 test_bit(SDF_REMOTE_WITHDRAW, &f), 123 120 test_bit(SDF_WITHDRAW_RECOVERY, &f), 124 - test_bit(SDF_DEACTIVATING, &f), 121 + test_bit(SDF_KILL, &f), 125 122 sdp->sd_log_error, 126 123 rwsem_is_locked(&sdp->sd_log_flush_lock), 127 124 sdp->sd_log_num_revoke, ··· 131 128 sdp->sd_log_flush_head, 132 129 sdp->sd_log_flush_tail, 133 130 sdp->sd_log_blks_reserved, 134 - atomic_read(&sdp->sd_log_revokes_available)); 131 + atomic_read(&sdp->sd_log_revokes_available), 132 + atomic_read(&sdp->sd_log_pinned), 133 + atomic_read(&sdp->sd_log_thresh1), 134 + atomic_read(&sdp->sd_log_thresh2)); 135 135 return s; 136 136 } 137 137
+21 -13
fs/gfs2/util.c
··· 9 9 #include <linux/spinlock.h> 10 10 #include <linux/completion.h> 11 11 #include <linux/buffer_head.h> 12 + #include <linux/kthread.h> 12 13 #include <linux/crc32.h> 13 14 #include <linux/gfs2_ondisk.h> 14 15 #include <linux/delay.h> ··· 151 150 if (!sb_rdonly(sdp->sd_vfs)) { 152 151 bool locked = mutex_trylock(&sdp->sd_freeze_mutex); 153 152 154 - gfs2_make_fs_ro(sdp); 153 + wake_up(&sdp->sd_logd_waitq); 154 + wake_up(&sdp->sd_quota_wait); 155 + 156 + wait_event_timeout(sdp->sd_log_waitq, 157 + gfs2_log_is_empty(sdp), 158 + HZ * 5); 159 + 160 + sdp->sd_vfs->s_flags |= SB_RDONLY; 155 161 156 162 if (locked) 157 163 mutex_unlock(&sdp->sd_freeze_mutex); ··· 323 315 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 324 316 const struct lm_lockops *lm = ls->ls_ops; 325 317 326 - if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW && 327 - test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags)) { 328 - if (!test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags)) 329 - return -1; 330 - 331 - wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG, 332 - TASK_UNINTERRUPTIBLE); 333 - return -1; 334 - } 335 - 336 - set_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags); 337 - 338 318 if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) { 319 + unsigned long old = READ_ONCE(sdp->sd_flags), new; 320 + 321 + do { 322 + if (old & BIT(SDF_WITHDRAWN)) { 323 + wait_on_bit(&sdp->sd_flags, 324 + SDF_WITHDRAW_IN_PROG, 325 + TASK_UNINTERRUPTIBLE); 326 + return -1; 327 + } 328 + new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG); 329 + } while (unlikely(!try_cmpxchg(&sdp->sd_flags, &old, new))); 330 + 339 331 fs_err(sdp, "about to withdraw this file system\n"); 340 332 BUG_ON(sdp->sd_args.ar_debug); 341 333