Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: consolidate superblock logging functions

We now have several superblock loggin functions that are identical
except for the transaction reservation and whether it shoul dbe a
synchronous transaction or not. Consolidate these all into a single
function, a single reserveration and a sync flag and call it
xfs_sync_sb().

Also, xfs_mod_sb() is not really a modification function - it's the
operation of logging the superblock buffer. hence change the name of
it to reflect this.

Note that we have to change the mp->m_update_flags that are passed
around at mount time to a boolean simply to indicate a superblock
update is needed.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>

authored by

Dave Chinner and committed by
Dave Chinner
61e63ecb 4d11a402

+101 -181
+1 -1
fs/xfs/libxfs/xfs_attr_leaf.c
··· 403 403 if (!xfs_sb_version_hasattr2(&mp->m_sb)) { 404 404 xfs_sb_version_addattr2(&mp->m_sb); 405 405 spin_unlock(&mp->m_sb_lock); 406 - xfs_mod_sb(tp); 406 + xfs_log_sb(tp); 407 407 } else 408 408 spin_unlock(&mp->m_sb_lock); 409 409 }
+5 -5
fs/xfs/libxfs/xfs_bmap.c
··· 1221 1221 goto bmap_cancel; 1222 1222 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1223 1223 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1224 - bool mod_sb = false; 1224 + bool log_sb = false; 1225 1225 1226 1226 spin_lock(&mp->m_sb_lock); 1227 1227 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1228 1228 xfs_sb_version_addattr(&mp->m_sb); 1229 - mod_sb = true; 1229 + log_sb = true; 1230 1230 } 1231 1231 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1232 1232 xfs_sb_version_addattr2(&mp->m_sb); 1233 - mod_sb = true; 1233 + log_sb = true; 1234 1234 } 1235 1235 spin_unlock(&mp->m_sb_lock); 1236 - if (mod_sb) 1237 - xfs_mod_sb(tp); 1236 + if (log_sb) 1237 + xfs_log_sb(tp); 1238 1238 } 1239 1239 1240 1240 error = xfs_bmap_finish(&tp, &flist, &committed);
+37 -6
fs/xfs/libxfs/xfs_sb.c
··· 753 753 } 754 754 755 755 /* 756 - * xfs_mod_sb() can be used to copy arbitrary changes to the 757 - * in-core superblock into the superblock buffer to be logged. 758 - * It does not provide the higher level of locking that is 759 - * needed to protect the in-core superblock from concurrent 760 - * access. 756 + * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock 757 + * into the superblock buffer to be logged. It does not provide the higher 758 + * level of locking that is needed to protect the in-core superblock from 759 + * concurrent access. 761 760 */ 762 761 void 763 - xfs_mod_sb( 762 + xfs_log_sb( 764 763 struct xfs_trans *tp) 765 764 { 766 765 struct xfs_mount *mp = tp->t_mountp; ··· 768 769 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); 769 770 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 770 771 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); 772 + } 773 + 774 + /* 775 + * xfs_sync_sb 776 + * 777 + * Sync the superblock to disk. 778 + * 779 + * Note that the caller is responsible for checking the frozen state of the 780 + * filesystem. This procedure uses the non-blocking transaction allocator and 781 + * thus will allow modifications to a frozen fs. This is required because this 782 + * code can be called during the process of freezing where use of the high-level 783 + * allocator would deadlock. 784 + */ 785 + int 786 + xfs_sync_sb( 787 + struct xfs_mount *mp, 788 + bool wait) 789 + { 790 + struct xfs_trans *tp; 791 + int error; 792 + 793 + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP); 794 + error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 795 + if (error) { 796 + xfs_trans_cancel(tp, 0); 797 + return error; 798 + } 799 + 800 + xfs_log_sb(tp); 801 + if (wait) 802 + xfs_trans_set_sync(tp); 803 + return xfs_trans_commit(tp, 0); 771 804 }
+2 -1
fs/xfs/libxfs/xfs_sb.h
··· 28 28 extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t); 29 29 30 30 extern void xfs_sb_calc_crc(struct xfs_buf *bp); 31 - extern void xfs_mod_sb(struct xfs_trans *tp); 31 + extern void xfs_log_sb(struct xfs_trans *tp); 32 + extern int xfs_sync_sb(struct xfs_mount *mp, bool wait); 32 33 extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp); 33 34 extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from); 34 35 extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
+15 -18
fs/xfs/libxfs/xfs_shared.h
··· 82 82 #define XFS_TRANS_ATTR_RM 23 83 83 #define XFS_TRANS_ATTR_FLAG 24 84 84 #define XFS_TRANS_CLEAR_AGI_BUCKET 25 85 - #define XFS_TRANS_QM_SBCHANGE 26 85 + #define XFS_TRANS_SB_CHANGE 26 86 86 /* 87 87 * Dummy entries since we use the transaction type to index into the 88 88 * trans_type[] in xlog_recover_print_trans_head() ··· 95 95 #define XFS_TRANS_QM_DQCLUSTER 32 96 96 #define XFS_TRANS_QM_QINOCREATE 33 97 97 #define XFS_TRANS_QM_QUOTAOFF_END 34 98 - #define XFS_TRANS_SB_UNIT 35 99 - #define XFS_TRANS_FSYNC_TS 36 100 - #define XFS_TRANS_GROWFSRT_ALLOC 37 101 - #define XFS_TRANS_GROWFSRT_ZERO 38 102 - #define XFS_TRANS_GROWFSRT_FREE 39 103 - #define XFS_TRANS_SWAPEXT 40 104 - #define XFS_TRANS_SB_COUNT 41 105 - #define XFS_TRANS_CHECKPOINT 42 106 - #define XFS_TRANS_ICREATE 43 107 - #define XFS_TRANS_CREATE_TMPFILE 44 108 - #define XFS_TRANS_TYPE_MAX 44 98 + #define XFS_TRANS_FSYNC_TS 35 99 + #define XFS_TRANS_GROWFSRT_ALLOC 36 100 + #define XFS_TRANS_GROWFSRT_ZERO 37 101 + #define XFS_TRANS_GROWFSRT_FREE 38 102 + #define XFS_TRANS_SWAPEXT 39 103 + #define XFS_TRANS_CHECKPOINT 40 104 + #define XFS_TRANS_ICREATE 41 105 + #define XFS_TRANS_CREATE_TMPFILE 42 106 + #define XFS_TRANS_TYPE_MAX 43 109 107 /* new transaction types need to be reflected in xfs_logprint(8) */ 110 108 111 109 #define XFS_TRANS_TYPES \ ··· 111 113 { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \ 112 114 { XFS_TRANS_INACTIVE, "INACTIVE" }, \ 113 115 { XFS_TRANS_CREATE, "CREATE" }, \ 114 - { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \ 115 116 { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \ 116 117 { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \ 117 118 { XFS_TRANS_REMOVE, "REMOVE" }, \ ··· 131 134 { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \ 132 135 { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \ 133 136 { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \ 134 - { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \ 137 + { XFS_TRANS_SB_CHANGE, "SBCHANGE" }, \ 138 + { XFS_TRANS_DUMMY1, "DUMMY1" }, \ 139 + { XFS_TRANS_DUMMY2, "DUMMY2" }, \ 135 140 { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \ 136 141 { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \ 137 142 { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \ 138 143 { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \ 139 144 { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \ 140 145 { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \ 141 - { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \ 142 146 { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \ 143 147 { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \ 144 148 { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \ 145 149 { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \ 146 150 { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \ 147 - { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \ 148 151 { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \ 149 - { XFS_TRANS_DUMMY1, "DUMMY1" }, \ 150 - { XFS_TRANS_DUMMY2, "DUMMY2" }, \ 152 + { XFS_TRANS_ICREATE, "ICREATE" }, \ 153 + { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \ 151 154 { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" } 152 155 153 156 /*
-14
fs/xfs/libxfs/xfs_trans_resv.c
··· 716 716 } 717 717 718 718 /* 719 - * Clearing the quotaflags in the superblock. 720 - * the super block for changing quota flags: sector size 721 - */ 722 - STATIC uint 723 - xfs_calc_qm_sbchange_reservation( 724 - struct xfs_mount *mp) 725 - { 726 - return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize); 727 - } 728 - 729 - /* 730 719 * Adjusting quota limits. 731 720 * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot) 732 721 */ ··· 853 864 * The following transactions are logged in logical format with 854 865 * a default log count. 855 866 */ 856 - resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp); 857 - resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT; 858 - 859 867 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp); 860 868 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT; 861 869
-1
fs/xfs/libxfs/xfs_trans_resv.h
··· 56 56 struct xfs_trans_res tr_growrtalloc; /* grow realtime allocations */ 57 57 struct xfs_trans_res tr_growrtzero; /* grow realtime zeroing */ 58 58 struct xfs_trans_res tr_growrtfree; /* grow realtime freeing */ 59 - struct xfs_trans_res tr_qm_sbchange; /* change quota flags */ 60 59 struct xfs_trans_res tr_qm_setqlim; /* adjust quota limits */ 61 60 struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */ 62 61 struct xfs_trans_res tr_qm_quotaoff; /* turn quota off */
-29
fs/xfs/xfs_fsops.c
··· 756 756 return 0; 757 757 } 758 758 759 - /* 760 - * Dump a transaction into the log that contains no real change. This is needed 761 - * to be able to make the log dirty or stamp the current tail LSN into the log 762 - * during the covering operation. 763 - * 764 - * We cannot use an inode here for this - that will push dirty state back up 765 - * into the VFS and then periodic inode flushing will prevent log covering from 766 - * making progress. Hence we log a field in the superblock instead and use a 767 - * synchronous transaction to ensure the superblock is immediately unpinned 768 - * and can be written back. 769 - */ 770 - int 771 - xfs_fs_log_dummy( 772 - xfs_mount_t *mp) 773 - { 774 - xfs_trans_t *tp; 775 - int error; 776 - 777 - tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); 778 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 779 - if (error) { 780 - xfs_trans_cancel(tp, 0); 781 - return error; 782 - } 783 - xfs_mod_sb(tp); 784 - xfs_trans_set_sync(tp); 785 - return xfs_trans_commit(tp, 0); 786 - } 787 - 788 759 int 789 760 xfs_fs_goingdown( 790 761 xfs_mount_t *mp,
+15 -3
fs/xfs/xfs_log.c
··· 33 33 #include "xfs_fsops.h" 34 34 #include "xfs_cksum.h" 35 35 #include "xfs_sysfs.h" 36 + #include "xfs_sb.h" 36 37 37 38 kmem_zone_t *xfs_log_ticket_zone; 38 39 ··· 1291 1290 struct xfs_mount *mp = log->l_mp; 1292 1291 1293 1292 /* dgc: errors ignored - not fatal and nowhere to report them */ 1294 - if (xfs_log_need_covered(mp)) 1295 - xfs_fs_log_dummy(mp); 1296 - else 1293 + if (xfs_log_need_covered(mp)) { 1294 + /* 1295 + * Dump a transaction into the log that contains no real change. 1296 + * This is needed to stamp the current tail LSN into the log 1297 + * during the covering operation. 1298 + * 1299 + * We cannot use an inode here for this - that will push dirty 1300 + * state back up into the VFS and then periodic inode flushing 1301 + * will prevent log covering from making progress. Hence we 1302 + * synchronously log the superblock instead to ensure the 1303 + * superblock is immediately unpinned and can be written back. 1304 + */ 1305 + xfs_sync_sb(mp, true); 1306 + } else 1297 1307 xfs_log_force(mp, 0); 1298 1308 1299 1309 /* start pushing all the metadata that is currently dirty */
+12 -66
fs/xfs/xfs_mount.c
··· 408 408 if (xfs_sb_version_hasdalign(sbp)) { 409 409 if (sbp->sb_unit != mp->m_dalign) { 410 410 sbp->sb_unit = mp->m_dalign; 411 - mp->m_update_flags |= XFS_SB_UNIT; 411 + mp->m_update_sb = true; 412 412 } 413 413 if (sbp->sb_width != mp->m_swidth) { 414 414 sbp->sb_width = mp->m_swidth; 415 - mp->m_update_flags |= XFS_SB_WIDTH; 415 + mp->m_update_sb = true; 416 416 } 417 417 } else { 418 418 xfs_warn(mp, ··· 583 583 xfs_mount_reset_sbqflags( 584 584 struct xfs_mount *mp) 585 585 { 586 - int error; 587 - struct xfs_trans *tp; 588 - 589 586 mp->m_qflags = 0; 590 587 591 - /* 592 - * It is OK to look at sb_qflags here in mount path, 593 - * without m_sb_lock. 594 - */ 588 + /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */ 595 589 if (mp->m_sb.sb_qflags == 0) 596 590 return 0; 597 591 spin_lock(&mp->m_sb_lock); 598 592 mp->m_sb.sb_qflags = 0; 599 593 spin_unlock(&mp->m_sb_lock); 600 594 601 - /* 602 - * If the fs is readonly, let the incore superblock run 603 - * with quotas off but don't flush the update out to disk 604 - */ 605 - if (mp->m_flags & XFS_MOUNT_RDONLY) 595 + if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 606 596 return 0; 607 597 608 - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 609 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 610 - if (error) { 611 - xfs_trans_cancel(tp, 0); 612 - xfs_alert(mp, "%s: Superblock update failed!", __func__); 613 - return error; 614 - } 615 - 616 - xfs_mod_sb(tp); 617 - return xfs_trans_commit(tp, 0); 598 + return xfs_sync_sb(mp, false); 618 599 } 619 600 620 601 __uint64_t ··· 659 678 xfs_warn(mp, "correcting sb_features alignment problem"); 660 679 sbp->sb_features2 |= sbp->sb_bad_features2; 661 680 sbp->sb_bad_features2 = sbp->sb_features2; 662 - mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; 681 + mp->m_update_sb = true; 663 682 664 683 /* 665 684 * Re-check for ATTR2 in case it was found in bad_features2 ··· 673 692 if (xfs_sb_version_hasattr2(&mp->m_sb) && 674 693 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 675 694 xfs_sb_version_removeattr2(&mp->m_sb); 676 - mp->m_update_flags |= XFS_SB_FEATURES2; 695 + mp->m_update_sb = true; 677 696 678 697 /* update sb_versionnum for the clearing of the morebits */ 679 698 if (!sbp->sb_features2) 680 - mp->m_update_flags |= XFS_SB_VERSIONNUM; 699 + mp->m_update_sb = true; 681 700 } 682 701 683 702 /* always use v2 inodes by default now */ 684 703 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 685 704 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 686 - mp->m_update_flags |= XFS_SB_VERSIONNUM; 705 + mp->m_update_sb = true; 687 706 } 688 707 689 708 /* ··· 876 895 * the next remount into writeable mode. Otherwise we would never 877 896 * perform the update e.g. for the root filesystem. 878 897 */ 879 - if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 880 - error = xfs_mount_log_sb(mp); 898 + if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 899 + error = xfs_sync_sb(mp, false); 881 900 if (error) { 882 901 xfs_warn(mp, "failed to write sb changes"); 883 902 goto out_rtunmount; ··· 1084 1103 int 1085 1104 xfs_log_sbcount(xfs_mount_t *mp) 1086 1105 { 1087 - xfs_trans_t *tp; 1088 - int error; 1089 - 1090 1106 /* allow this to proceed during the freeze sequence... */ 1091 1107 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) 1092 1108 return 0; ··· 1097 1119 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1098 1120 return 0; 1099 1121 1100 - tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); 1101 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1102 - if (error) { 1103 - xfs_trans_cancel(tp, 0); 1104 - return error; 1105 - } 1106 - 1107 - xfs_mod_sb(tp); 1108 - xfs_trans_set_sync(tp); 1109 - error = xfs_trans_commit(tp, 0); 1110 - return error; 1122 + return xfs_sync_sb(mp, true); 1111 1123 } 1112 1124 1113 1125 /* ··· 1388 1420 xfs_buf_lock(bp); 1389 1421 mp->m_sb_bp = NULL; 1390 1422 xfs_buf_relse(bp); 1391 - } 1392 - 1393 - /* 1394 - * Used to log changes to the superblock unit and width fields which could 1395 - * be altered by the mount options, as well as any potential sb_features2 1396 - * fixup. Only the first superblock is updated. 1397 - */ 1398 - int 1399 - xfs_mount_log_sb( 1400 - struct xfs_mount *mp) 1401 - { 1402 - struct xfs_trans *tp; 1403 - int error; 1404 - 1405 - tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); 1406 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1407 - if (error) { 1408 - xfs_trans_cancel(tp, 0); 1409 - return error; 1410 - } 1411 - xfs_mod_sb(tp); 1412 - return xfs_trans_commit(tp, 0); 1413 1423 } 1414 1424 1415 1425 /*
+1 -2
fs/xfs/xfs_mount.h
··· 162 162 struct delayed_work m_reclaim_work; /* background inode reclaim */ 163 163 struct delayed_work m_eofblocks_work; /* background eof blocks 164 164 trimming */ 165 - __int64_t m_update_flags; /* sb flags we need to update 166 - on the next remount,rw */ 165 + bool m_update_sb; /* sb needs update in mount */ 167 166 int64_t m_low_space[XFS_LOWSP_MAX]; 168 167 /* low free space thresholds */ 169 168 struct xfs_kobj m_kobj;
+2 -25
fs/xfs/xfs_qm.c
··· 792 792 else 793 793 mp->m_sb.sb_pquotino = (*ip)->i_ino; 794 794 spin_unlock(&mp->m_sb_lock); 795 - xfs_mod_sb(tp); 795 + xfs_log_sb(tp); 796 796 797 797 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 798 798 xfs_alert(mp, "%s failed (error %d)!", __func__, error); ··· 1445 1445 spin_unlock(&mp->m_sb_lock); 1446 1446 1447 1447 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1448 - if (xfs_qm_write_sb_changes(mp)) { 1448 + if (xfs_sync_sb(mp, false)) { 1449 1449 /* 1450 1450 * We could only have been turning quotas off. 1451 1451 * We aren't in very good shape actually because ··· 1573 1573 1574 1574 xfs_qm_dqdestroy(dqp); 1575 1575 } 1576 - 1577 - /* 1578 - * Start a transaction and write the incore superblock changes to 1579 - * disk. flags parameter indicates which fields have changed. 1580 - */ 1581 - int 1582 - xfs_qm_write_sb_changes( 1583 - struct xfs_mount *mp) 1584 - { 1585 - xfs_trans_t *tp; 1586 - int error; 1587 - 1588 - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 1589 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 1590 - if (error) { 1591 - xfs_trans_cancel(tp, 0); 1592 - return error; 1593 - } 1594 - 1595 - xfs_mod_sb(tp); 1596 - return xfs_trans_commit(tp, 0); 1597 - } 1598 - 1599 1576 1600 1577 /* --------------- utility functions for vnodeops ---------------- */ 1601 1578
-1
fs/xfs/xfs_qm.h
··· 157 157 #define XFS_QM_RTBWARNLIMIT 5 158 158 159 159 extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); 160 - extern int xfs_qm_write_sb_changes(struct xfs_mount *); 161 160 162 161 /* dquot stuff */ 163 162 extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
+4 -3
fs/xfs/xfs_qm_syscalls.c
··· 92 92 mutex_unlock(&q->qi_quotaofflock); 93 93 94 94 /* XXX what to do if error ? Revert back to old vals incore ? */ 95 - return xfs_qm_write_sb_changes(mp); 95 + return xfs_sync_sb(mp, false); 96 96 } 97 97 98 98 dqtype = 0; ··· 369 369 if ((qf & flags) == flags) 370 370 return -EEXIST; 371 371 372 - if ((error = xfs_qm_write_sb_changes(mp))) 372 + error = xfs_sync_sb(mp, false); 373 + if (error) 373 374 return error; 374 375 /* 375 376 * If we aren't trying to switch on quota enforcement, we are done. ··· 797 796 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 798 797 spin_unlock(&mp->m_sb_lock); 799 798 800 - xfs_mod_sb(tp); 799 + xfs_log_sb(tp); 801 800 802 801 /* 803 802 * We have to make sure that the transaction is secure on disk before we
+7 -6
fs/xfs/xfs_super.c
··· 1257 1257 * If this is the first remount to writeable state we 1258 1258 * might have some superblock changes to update. 1259 1259 */ 1260 - if (mp->m_update_flags) { 1261 - error = xfs_mount_log_sb(mp); 1260 + if (mp->m_update_sb) { 1261 + error = xfs_sync_sb(mp, false); 1262 1262 if (error) { 1263 1263 xfs_warn(mp, "failed to write sb changes"); 1264 1264 return error; 1265 1265 } 1266 - mp->m_update_flags = 0; 1266 + mp->m_update_sb = false; 1267 1267 } 1268 1268 1269 1269 /* ··· 1293 1293 1294 1294 /* 1295 1295 * Second stage of a freeze. The data is already frozen so we only 1296 - * need to take care of the metadata. Once that's done write a dummy 1297 - * record to dirty the log in case of a crash while frozen. 1296 + * need to take care of the metadata. Once that's done sync the superblock 1297 + * to the log to dirty it in case of a crash while frozen. This ensures that we 1298 + * will recover the unlinked inode lists on the next mount. 1298 1299 */ 1299 1300 STATIC int 1300 1301 xfs_fs_freeze( ··· 1305 1304 1306 1305 xfs_save_resvblks(mp); 1307 1306 xfs_quiesce_attr(mp); 1308 - return xfs_fs_log_dummy(mp); 1307 + return xfs_sync_sb(mp, true); 1309 1308 } 1310 1309 1311 1310 STATIC int