Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'xfs-sb-logging-rework' into for-next

Conflicts:
fs/xfs/xfs_mount.c

+228 -418
+1 -1
fs/xfs/libxfs/xfs_attr_leaf.c
··· 403 403 if (!xfs_sb_version_hasattr2(&mp->m_sb)) { 404 404 xfs_sb_version_addattr2(&mp->m_sb); 405 405 spin_unlock(&mp->m_sb_lock); 406 - xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 406 + xfs_log_sb(tp); 407 407 } else 408 408 spin_unlock(&mp->m_sb_lock); 409 409 }
+6 -8
fs/xfs/libxfs/xfs_bmap.c
··· 1221 1221 goto bmap_cancel; 1222 1222 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1223 1223 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1224 - __int64_t sbfields = 0; 1224 + bool log_sb = false; 1225 1225 1226 1226 spin_lock(&mp->m_sb_lock); 1227 1227 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1228 1228 xfs_sb_version_addattr(&mp->m_sb); 1229 - sbfields |= XFS_SB_VERSIONNUM; 1229 + log_sb = true; 1230 1230 } 1231 1231 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1232 1232 xfs_sb_version_addattr2(&mp->m_sb); 1233 - sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 1233 + log_sb = true; 1234 1234 } 1235 - if (sbfields) { 1236 - spin_unlock(&mp->m_sb_lock); 1237 - xfs_mod_sb(tp, sbfields); 1238 - } else 1239 - spin_unlock(&mp->m_sb_lock); 1235 + spin_unlock(&mp->m_sb_lock); 1236 + if (log_sb) 1237 + xfs_log_sb(tp); 1240 1238 } 1241 1239 1242 1240 error = xfs_bmap_finish(&tp, &flist, &committed);
+7 -7
fs/xfs/libxfs/xfs_format.h
··· 151 151 __uint32_t sb_features2; /* additional feature bits */ 152 152 153 153 /* 154 - * bad features2 field as a result of failing to pad the sb 155 - * structure to 64 bits. Some machines will be using this field 156 - * for features2 bits. Easiest just to mark it bad and not use 157 - * it for anything else. 154 + * bad features2 field as a result of failing to pad the sb structure to 155 + * 64 bits. Some machines will be using this field for features2 bits. 156 + * Easiest just to mark it bad and not use it for anything else. 157 + * 158 + * This is not kept up to date in memory; it is always overwritten by 159 + * the value in sb_features2 when formatting the incore superblock to 160 + * the disk buffer. 158 161 */ 159 162 __uint32_t sb_bad_features2; 160 163 ··· 456 453 { 457 454 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; 458 455 sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT; 459 - sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT; 460 456 } 461 457 462 458 static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp) 463 459 { 464 460 sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT; 465 - sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT; 466 461 if (!sbp->sb_features2) 467 462 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT; 468 463 } ··· 476 475 { 477 476 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; 478 477 sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT; 479 - sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT; 480 478 } 481 479 482 480 /*
+138 -186
fs/xfs/libxfs/xfs_sb.c
··· 40 40 * Physical superblock buffer manipulations. Shared with libxfs in userspace. 41 41 */ 42 42 43 - static const struct { 44 - short offset; 45 - short type; /* 0 = integer 46 - * 1 = binary / string (no translation) 47 - */ 48 - } xfs_sb_info[] = { 49 - { offsetof(xfs_sb_t, sb_magicnum), 0 }, 50 - { offsetof(xfs_sb_t, sb_blocksize), 0 }, 51 - { offsetof(xfs_sb_t, sb_dblocks), 0 }, 52 - { offsetof(xfs_sb_t, sb_rblocks), 0 }, 53 - { offsetof(xfs_sb_t, sb_rextents), 0 }, 54 - { offsetof(xfs_sb_t, sb_uuid), 1 }, 55 - { offsetof(xfs_sb_t, sb_logstart), 0 }, 56 - { offsetof(xfs_sb_t, sb_rootino), 0 }, 57 - { offsetof(xfs_sb_t, sb_rbmino), 0 }, 58 - { offsetof(xfs_sb_t, sb_rsumino), 0 }, 59 - { offsetof(xfs_sb_t, sb_rextsize), 0 }, 60 - { offsetof(xfs_sb_t, sb_agblocks), 0 }, 61 - { offsetof(xfs_sb_t, sb_agcount), 0 }, 62 - { offsetof(xfs_sb_t, sb_rbmblocks), 0 }, 63 - { offsetof(xfs_sb_t, sb_logblocks), 0 }, 64 - { offsetof(xfs_sb_t, sb_versionnum), 0 }, 65 - { offsetof(xfs_sb_t, sb_sectsize), 0 }, 66 - { offsetof(xfs_sb_t, sb_inodesize), 0 }, 67 - { offsetof(xfs_sb_t, sb_inopblock), 0 }, 68 - { offsetof(xfs_sb_t, sb_fname[0]), 1 }, 69 - { offsetof(xfs_sb_t, sb_blocklog), 0 }, 70 - { offsetof(xfs_sb_t, sb_sectlog), 0 }, 71 - { offsetof(xfs_sb_t, sb_inodelog), 0 }, 72 - { offsetof(xfs_sb_t, sb_inopblog), 0 }, 73 - { offsetof(xfs_sb_t, sb_agblklog), 0 }, 74 - { offsetof(xfs_sb_t, sb_rextslog), 0 }, 75 - { offsetof(xfs_sb_t, sb_inprogress), 0 }, 76 - { offsetof(xfs_sb_t, sb_imax_pct), 0 }, 77 - { offsetof(xfs_sb_t, sb_icount), 0 }, 78 - { offsetof(xfs_sb_t, sb_ifree), 0 }, 79 - { offsetof(xfs_sb_t, sb_fdblocks), 0 }, 80 - { offsetof(xfs_sb_t, sb_frextents), 0 }, 81 - { offsetof(xfs_sb_t, sb_uquotino), 0 }, 82 - { offsetof(xfs_sb_t, sb_gquotino), 0 }, 83 - { offsetof(xfs_sb_t, sb_qflags), 0 }, 84 - { offsetof(xfs_sb_t, sb_flags), 0 }, 85 - { offsetof(xfs_sb_t, sb_shared_vn), 0 }, 86 - { offsetof(xfs_sb_t, sb_inoalignmt), 0 }, 87 - { offsetof(xfs_sb_t, sb_unit), 0 }, 88 - { offsetof(xfs_sb_t, sb_width), 0 }, 89 - { offsetof(xfs_sb_t, sb_dirblklog), 0 }, 90 - { offsetof(xfs_sb_t, sb_logsectlog), 0 }, 91 - { offsetof(xfs_sb_t, sb_logsectsize), 0 }, 92 - { offsetof(xfs_sb_t, sb_logsunit), 0 }, 93 - { offsetof(xfs_sb_t, sb_features2), 0 }, 94 - { offsetof(xfs_sb_t, sb_bad_features2), 0 }, 95 - { offsetof(xfs_sb_t, sb_features_compat), 0 }, 96 - { offsetof(xfs_sb_t, sb_features_ro_compat), 0 }, 97 - { offsetof(xfs_sb_t, sb_features_incompat), 0 }, 98 - { offsetof(xfs_sb_t, sb_features_log_incompat), 0 }, 99 - { offsetof(xfs_sb_t, sb_crc), 0 }, 100 - { offsetof(xfs_sb_t, sb_pad), 0 }, 101 - { offsetof(xfs_sb_t, sb_pquotino), 0 }, 102 - { offsetof(xfs_sb_t, sb_lsn), 0 }, 103 - { sizeof(xfs_sb_t), 0 } 104 - }; 105 - 106 43 /* 107 44 * Reference counting access wrappers to the perag structures. 108 45 * Because we never free per-ag structures, the only thing we ··· 398 461 __xfs_sb_from_disk(to, from, true); 399 462 } 400 463 401 - static inline void 464 + static void 402 465 xfs_sb_quota_to_disk( 403 - xfs_dsb_t *to, 404 - xfs_sb_t *from, 405 - __int64_t *fields) 466 + struct xfs_dsb *to, 467 + struct xfs_sb *from) 406 468 { 407 469 __uint16_t qflags = from->sb_qflags; 408 470 409 - /* 410 - * We need to do these manipilations only if we are working 411 - * with an older version of on-disk superblock. 412 - */ 413 - if (xfs_sb_version_has_pquotino(from)) 471 + to->sb_uquotino = cpu_to_be64(from->sb_uquotino); 472 + if (xfs_sb_version_has_pquotino(from)) { 473 + to->sb_qflags = cpu_to_be16(from->sb_qflags); 474 + to->sb_gquotino = cpu_to_be64(from->sb_gquotino); 475 + to->sb_pquotino = cpu_to_be64(from->sb_pquotino); 414 476 return; 415 - 416 - if (*fields & XFS_SB_QFLAGS) { 417 - /* 418 - * The in-core version of sb_qflags do not have 419 - * XFS_OQUOTA_* flags, whereas the on-disk version 420 - * does. So, convert incore XFS_{PG}QUOTA_* flags 421 - * to on-disk XFS_OQUOTA_* flags. 422 - */ 423 - qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD | 424 - XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD); 425 - 426 - if (from->sb_qflags & 427 - (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD)) 428 - qflags |= XFS_OQUOTA_ENFD; 429 - if (from->sb_qflags & 430 - (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) 431 - qflags |= XFS_OQUOTA_CHKD; 432 - to->sb_qflags = cpu_to_be16(qflags); 433 - *fields &= ~XFS_SB_QFLAGS; 434 477 } 435 478 436 479 /* 437 - * GQUOTINO and PQUOTINO cannot be used together in versions of 438 - * superblock that do not have pquotino. from->sb_flags tells us which 439 - * quota is active and should be copied to disk. If neither are active, 440 - * make sure we write NULLFSINO to the sb_gquotino field as a quota 441 - * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature 442 - * bit is set. 443 - * 444 - * Note that we don't need to handle the sb_uquotino or sb_pquotino here 445 - * as they do not require any translation. Hence the main sb field loop 446 - * will write them appropriately from the in-core superblock. 480 + * The in-core version of sb_qflags do not have XFS_OQUOTA_* 481 + * flags, whereas the on-disk version does. So, convert incore 482 + * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags. 447 483 */ 448 - if ((*fields & XFS_SB_GQUOTINO) && 449 - (from->sb_qflags & XFS_GQUOTA_ACCT)) 484 + qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD | 485 + XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD); 486 + 487 + if (from->sb_qflags & 488 + (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD)) 489 + qflags |= XFS_OQUOTA_ENFD; 490 + if (from->sb_qflags & 491 + (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) 492 + qflags |= XFS_OQUOTA_CHKD; 493 + to->sb_qflags = cpu_to_be16(qflags); 494 + 495 + /* 496 + * GQUOTINO and PQUOTINO cannot be used together in versions 497 + * of superblock that do not have pquotino. from->sb_flags 498 + * tells us which quota is active and should be copied to 499 + * disk. If neither are active, we should NULL the inode. 500 + * 501 + * In all cases, the separate pquotino must remain 0 because it 502 + * it beyond the "end" of the valid non-pquotino superblock. 503 + */ 504 + if (from->sb_qflags & XFS_GQUOTA_ACCT) 450 505 to->sb_gquotino = cpu_to_be64(from->sb_gquotino); 451 - else if ((*fields & XFS_SB_PQUOTINO) && 452 - (from->sb_qflags & XFS_PQUOTA_ACCT)) 506 + else if (from->sb_qflags & XFS_PQUOTA_ACCT) 453 507 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 454 508 else { 455 509 /* ··· 454 526 to->sb_gquotino = cpu_to_be64(NULLFSINO); 455 527 } 456 528 457 - *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 529 + to->sb_pquotino = 0; 458 530 } 459 531 460 - /* 461 - * Copy in core superblock to ondisk one. 462 - * 463 - * The fields argument is mask of superblock fields to copy. 464 - */ 465 532 void 466 533 xfs_sb_to_disk( 467 - xfs_dsb_t *to, 468 - xfs_sb_t *from, 469 - __int64_t fields) 534 + struct xfs_dsb *to, 535 + struct xfs_sb *from) 470 536 { 471 - xfs_caddr_t to_ptr = (xfs_caddr_t)to; 472 - xfs_caddr_t from_ptr = (xfs_caddr_t)from; 473 - xfs_sb_field_t f; 474 - int first; 475 - int size; 537 + xfs_sb_quota_to_disk(to, from); 476 538 477 - ASSERT(fields); 478 - if (!fields) 479 - return; 539 + to->sb_magicnum = cpu_to_be32(from->sb_magicnum); 540 + to->sb_blocksize = cpu_to_be32(from->sb_blocksize); 541 + to->sb_dblocks = cpu_to_be64(from->sb_dblocks); 542 + to->sb_rblocks = cpu_to_be64(from->sb_rblocks); 543 + to->sb_rextents = cpu_to_be64(from->sb_rextents); 544 + memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); 545 + to->sb_logstart = cpu_to_be64(from->sb_logstart); 546 + to->sb_rootino = cpu_to_be64(from->sb_rootino); 547 + to->sb_rbmino = cpu_to_be64(from->sb_rbmino); 548 + to->sb_rsumino = cpu_to_be64(from->sb_rsumino); 549 + to->sb_rextsize = cpu_to_be32(from->sb_rextsize); 550 + to->sb_agblocks = cpu_to_be32(from->sb_agblocks); 551 + to->sb_agcount = cpu_to_be32(from->sb_agcount); 552 + to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks); 553 + to->sb_logblocks = cpu_to_be32(from->sb_logblocks); 554 + to->sb_versionnum = cpu_to_be16(from->sb_versionnum); 555 + to->sb_sectsize = cpu_to_be16(from->sb_sectsize); 556 + to->sb_inodesize = cpu_to_be16(from->sb_inodesize); 557 + to->sb_inopblock = cpu_to_be16(from->sb_inopblock); 558 + memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); 559 + to->sb_blocklog = from->sb_blocklog; 560 + to->sb_sectlog = from->sb_sectlog; 561 + to->sb_inodelog = from->sb_inodelog; 562 + to->sb_inopblog = from->sb_inopblog; 563 + to->sb_agblklog = from->sb_agblklog; 564 + to->sb_rextslog = from->sb_rextslog; 565 + to->sb_inprogress = from->sb_inprogress; 566 + to->sb_imax_pct = from->sb_imax_pct; 567 + to->sb_icount = cpu_to_be64(from->sb_icount); 568 + to->sb_ifree = cpu_to_be64(from->sb_ifree); 569 + to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks); 570 + to->sb_frextents = cpu_to_be64(from->sb_frextents); 480 571 481 - /* We should never write the crc here, it's updated in the IO path */ 482 - fields &= ~XFS_SB_CRC; 572 + to->sb_flags = from->sb_flags; 573 + to->sb_shared_vn = from->sb_shared_vn; 574 + to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt); 575 + to->sb_unit = cpu_to_be32(from->sb_unit); 576 + to->sb_width = cpu_to_be32(from->sb_width); 577 + to->sb_dirblklog = from->sb_dirblklog; 578 + to->sb_logsectlog = from->sb_logsectlog; 579 + to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize); 580 + to->sb_logsunit = cpu_to_be32(from->sb_logsunit); 483 581 484 - xfs_sb_quota_to_disk(to, from, &fields); 485 - while (fields) { 486 - f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 487 - first = xfs_sb_info[f].offset; 488 - size = xfs_sb_info[f + 1].offset - first; 582 + /* 583 + * We need to ensure that bad_features2 always matches features2. 584 + * Hence we enforce that here rather than having to remember to do it 585 + * everywhere else that updates features2. 586 + */ 587 + from->sb_bad_features2 = from->sb_features2; 588 + to->sb_features2 = cpu_to_be32(from->sb_features2); 589 + to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2); 489 590 490 - ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); 491 - 492 - if (size == 1 || xfs_sb_info[f].type == 1) { 493 - memcpy(to_ptr + first, from_ptr + first, size); 494 - } else { 495 - switch (size) { 496 - case 2: 497 - *(__be16 *)(to_ptr + first) = 498 - cpu_to_be16(*(__u16 *)(from_ptr + first)); 499 - break; 500 - case 4: 501 - *(__be32 *)(to_ptr + first) = 502 - cpu_to_be32(*(__u32 *)(from_ptr + first)); 503 - break; 504 - case 8: 505 - *(__be64 *)(to_ptr + first) = 506 - cpu_to_be64(*(__u64 *)(from_ptr + first)); 507 - break; 508 - default: 509 - ASSERT(0); 510 - } 511 - } 512 - 513 - fields &= ~(1LL << f); 591 + if (xfs_sb_version_hascrc(from)) { 592 + to->sb_features_compat = cpu_to_be32(from->sb_features_compat); 593 + to->sb_features_ro_compat = 594 + cpu_to_be32(from->sb_features_ro_compat); 595 + to->sb_features_incompat = 596 + cpu_to_be32(from->sb_features_incompat); 597 + to->sb_features_log_incompat = 598 + cpu_to_be32(from->sb_features_log_incompat); 599 + to->sb_pad = 0; 600 + to->sb_lsn = cpu_to_be64(from->sb_lsn); 514 601 } 515 602 } 516 603 ··· 759 816 } 760 817 761 818 /* 762 - * xfs_mod_sb() can be used to copy arbitrary changes to the 763 - * in-core superblock into the superblock buffer to be logged. 764 - * It does not provide the higher level of locking that is 765 - * needed to protect the in-core superblock from concurrent 766 - * access. 819 + * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock 820 + * into the superblock buffer to be logged. It does not provide the higher 821 + * level of locking that is needed to protect the in-core superblock from 822 + * concurrent access. 767 823 */ 768 824 void 769 - xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) 825 + xfs_log_sb( 826 + struct xfs_trans *tp) 770 827 { 771 - xfs_buf_t *bp; 772 - int first; 773 - int last; 774 - xfs_mount_t *mp; 775 - xfs_sb_field_t f; 828 + struct xfs_mount *mp = tp->t_mountp; 829 + struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0); 776 830 777 - ASSERT(fields); 778 - if (!fields) 779 - return; 780 - mp = tp->t_mountp; 781 - bp = xfs_trans_getsb(tp, mp, 0); 782 - first = sizeof(xfs_sb_t); 783 - last = 0; 784 - 785 - /* translate/copy */ 786 - 787 - xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); 788 - 789 - /* find modified range */ 790 - f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); 791 - ASSERT((1LL << f) & XFS_SB_MOD_BITS); 792 - last = xfs_sb_info[f + 1].offset - 1; 793 - 794 - f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 795 - ASSERT((1LL << f) & XFS_SB_MOD_BITS); 796 - first = xfs_sb_info[f].offset; 797 - 831 + xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); 798 832 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 799 - xfs_trans_log_buf(tp, bp, first, last); 833 + xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); 834 + } 835 + 836 + /* 837 + * xfs_sync_sb 838 + * 839 + * Sync the superblock to disk. 840 + * 841 + * Note that the caller is responsible for checking the frozen state of the 842 + * filesystem. This procedure uses the non-blocking transaction allocator and 843 + * thus will allow modifications to a frozen fs. This is required because this 844 + * code can be called during the process of freezing where use of the high-level 845 + * allocator would deadlock. 846 + */ 847 + int 848 + xfs_sync_sb( 849 + struct xfs_mount *mp, 850 + bool wait) 851 + { 852 + struct xfs_trans *tp; 853 + int error; 854 + 855 + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP); 856 + error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 857 + if (error) { 858 + xfs_trans_cancel(tp, 0); 859 + return error; 860 + } 861 + 862 + xfs_log_sb(tp); 863 + if (wait) 864 + xfs_trans_set_sync(tp); 865 + return xfs_trans_commit(tp, 0); 800 866 }
+6 -5
fs/xfs/libxfs/xfs_sb.h
··· 27 27 extern void xfs_perag_put(struct xfs_perag *pag); 28 28 extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t); 29 29 30 - extern void xfs_sb_calc_crc(struct xfs_buf *); 31 - extern void xfs_mod_sb(struct xfs_trans *, __int64_t); 32 - extern void xfs_sb_mount_common(struct xfs_mount *, struct xfs_sb *); 33 - extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); 34 - extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); 30 + extern void xfs_sb_calc_crc(struct xfs_buf *bp); 31 + extern void xfs_log_sb(struct xfs_trans *tp); 32 + extern int xfs_sync_sb(struct xfs_mount *mp, bool wait); 33 + extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp); 34 + extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from); 35 + extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from); 35 36 extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp); 36 37 37 38 #endif /* __XFS_SB_H__ */
+15 -18
fs/xfs/libxfs/xfs_shared.h
··· 82 82 #define XFS_TRANS_ATTR_RM 23 83 83 #define XFS_TRANS_ATTR_FLAG 24 84 84 #define XFS_TRANS_CLEAR_AGI_BUCKET 25 85 - #define XFS_TRANS_QM_SBCHANGE 26 85 + #define XFS_TRANS_SB_CHANGE 26 86 86 /* 87 87 * Dummy entries since we use the transaction type to index into the 88 88 * trans_type[] in xlog_recover_print_trans_head() ··· 95 95 #define XFS_TRANS_QM_DQCLUSTER 32 96 96 #define XFS_TRANS_QM_QINOCREATE 33 97 97 #define XFS_TRANS_QM_QUOTAOFF_END 34 98 - #define XFS_TRANS_SB_UNIT 35 99 - #define XFS_TRANS_FSYNC_TS 36 100 - #define XFS_TRANS_GROWFSRT_ALLOC 37 101 - #define XFS_TRANS_GROWFSRT_ZERO 38 102 - #define XFS_TRANS_GROWFSRT_FREE 39 103 - #define XFS_TRANS_SWAPEXT 40 104 - #define XFS_TRANS_SB_COUNT 41 105 - #define XFS_TRANS_CHECKPOINT 42 106 - #define XFS_TRANS_ICREATE 43 107 - #define XFS_TRANS_CREATE_TMPFILE 44 108 - #define XFS_TRANS_TYPE_MAX 44 98 + #define XFS_TRANS_FSYNC_TS 35 99 + #define XFS_TRANS_GROWFSRT_ALLOC 36 100 + #define XFS_TRANS_GROWFSRT_ZERO 37 101 + #define XFS_TRANS_GROWFSRT_FREE 38 102 + #define XFS_TRANS_SWAPEXT 39 103 + #define XFS_TRANS_CHECKPOINT 40 104 + #define XFS_TRANS_ICREATE 41 105 + #define XFS_TRANS_CREATE_TMPFILE 42 106 + #define XFS_TRANS_TYPE_MAX 43 109 107 /* new transaction types need to be reflected in xfs_logprint(8) */ 110 108 111 109 #define XFS_TRANS_TYPES \ ··· 111 113 { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \ 112 114 { XFS_TRANS_INACTIVE, "INACTIVE" }, \ 113 115 { XFS_TRANS_CREATE, "CREATE" }, \ 114 - { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \ 115 116 { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \ 116 117 { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \ 117 118 { XFS_TRANS_REMOVE, "REMOVE" }, \ ··· 131 134 { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \ 132 135 { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \ 133 136 { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \ 134 - { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \ 137 + { XFS_TRANS_SB_CHANGE, "SBCHANGE" }, \ 138 + { XFS_TRANS_DUMMY1, "DUMMY1" }, \ 139 + { XFS_TRANS_DUMMY2, "DUMMY2" }, \ 135 140 { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \ 136 141 { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \ 137 142 { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \ 138 143 { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \ 139 144 { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \ 140 145 { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \ 141 - { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \ 142 146 { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \ 143 147 { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \ 144 148 { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \ 145 149 { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \ 146 150 { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \ 147 - { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \ 148 151 { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \ 149 - { XFS_TRANS_DUMMY1, "DUMMY1" }, \ 150 - { XFS_TRANS_DUMMY2, "DUMMY2" }, \ 152 + { XFS_TRANS_ICREATE, "ICREATE" }, \ 153 + { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \ 151 154 { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" } 152 155 153 156 /*
-14
fs/xfs/libxfs/xfs_trans_resv.c
··· 716 716 } 717 717 718 718 /* 719 - * Clearing the quotaflags in the superblock. 720 - * the super block for changing quota flags: sector size 721 - */ 722 - STATIC uint 723 - xfs_calc_qm_sbchange_reservation( 724 - struct xfs_mount *mp) 725 - { 726 - return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize); 727 - } 728 - 729 - /* 730 719 * Adjusting quota limits. 731 720 * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot) 732 721 */ ··· 853 864 * The following transactions are logged in logical format with 854 865 * a default log count. 855 866 */ 856 - resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp); 857 - resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT; 858 - 859 867 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp); 860 868 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT; 861 869
-1
fs/xfs/libxfs/xfs_trans_resv.h
··· 56 56 struct xfs_trans_res tr_growrtalloc; /* grow realtime allocations */ 57 57 struct xfs_trans_res tr_growrtzero; /* grow realtime zeroing */ 58 58 struct xfs_trans_res tr_growrtfree; /* grow realtime freeing */ 59 - struct xfs_trans_res tr_qm_sbchange; /* change quota flags */ 60 59 struct xfs_trans_res tr_qm_setqlim; /* adjust quota limits */ 61 60 struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */ 62 61 struct xfs_trans_res tr_qm_quotaoff; /* turn quota off */
+1 -32
fs/xfs/xfs_fsops.c
··· 541 541 saved_error = error; 542 542 continue; 543 543 } 544 - xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS); 544 + xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); 545 545 546 546 error = xfs_bwrite(bp); 547 547 xfs_buf_relse(bp); ··· 754 754 goto retry; 755 755 } 756 756 return 0; 757 - } 758 - 759 - /* 760 - * Dump a transaction into the log that contains no real change. This is needed 761 - * to be able to make the log dirty or stamp the current tail LSN into the log 762 - * during the covering operation. 763 - * 764 - * We cannot use an inode here for this - that will push dirty state back up 765 - * into the VFS and then periodic inode flushing will prevent log covering from 766 - * making progress. Hence we log a field in the superblock instead and use a 767 - * synchronous transaction to ensure the superblock is immediately unpinned 768 - * and can be written back. 769 - */ 770 - int 771 - xfs_fs_log_dummy( 772 - xfs_mount_t *mp) 773 - { 774 - xfs_trans_t *tp; 775 - int error; 776 - 777 - tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); 778 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 779 - if (error) { 780 - xfs_trans_cancel(tp, 0); 781 - return error; 782 - } 783 - 784 - /* log the UUID because it is an unchanging field */ 785 - xfs_mod_sb(tp, XFS_SB_UUID); 786 - xfs_trans_set_sync(tp); 787 - return xfs_trans_commit(tp, 0); 788 757 } 789 758 790 759 int
+15 -3
fs/xfs/xfs_log.c
··· 33 33 #include "xfs_fsops.h" 34 34 #include "xfs_cksum.h" 35 35 #include "xfs_sysfs.h" 36 + #include "xfs_sb.h" 36 37 37 38 kmem_zone_t *xfs_log_ticket_zone; 38 39 ··· 1291 1290 struct xfs_mount *mp = log->l_mp; 1292 1291 1293 1292 /* dgc: errors ignored - not fatal and nowhere to report them */ 1294 - if (xfs_log_need_covered(mp)) 1295 - xfs_fs_log_dummy(mp); 1296 - else 1293 + if (xfs_log_need_covered(mp)) { 1294 + /* 1295 + * Dump a transaction into the log that contains no real change. 1296 + * This is needed to stamp the current tail LSN into the log 1297 + * during the covering operation. 1298 + * 1299 + * We cannot use an inode here for this - that will push dirty 1300 + * state back up into the VFS and then periodic inode flushing 1301 + * will prevent log covering from making progress. Hence we 1302 + * synchronously log the superblock instead to ensure the 1303 + * superblock is immediately unpinned and can be written back. 1304 + */ 1305 + xfs_sync_sb(mp, true); 1306 + } else 1297 1307 xfs_log_force(mp, 0); 1298 1308 1299 1309 /* start pushing all the metadata that is currently dirty */
+23 -83
fs/xfs/xfs_mount.c
··· 408 408 if (xfs_sb_version_hasdalign(sbp)) { 409 409 if (sbp->sb_unit != mp->m_dalign) { 410 410 sbp->sb_unit = mp->m_dalign; 411 - mp->m_update_flags |= XFS_SB_UNIT; 411 + mp->m_update_sb = true; 412 412 } 413 413 if (sbp->sb_width != mp->m_swidth) { 414 414 sbp->sb_width = mp->m_swidth; 415 - mp->m_update_flags |= XFS_SB_WIDTH; 415 + mp->m_update_sb = true; 416 416 } 417 417 } else { 418 418 xfs_warn(mp, ··· 583 583 xfs_mount_reset_sbqflags( 584 584 struct xfs_mount *mp) 585 585 { 586 - int error; 587 - struct xfs_trans *tp; 588 - 589 586 mp->m_qflags = 0; 590 587 591 - /* 592 - * It is OK to look at sb_qflags here in mount path, 593 - * without m_sb_lock. 594 - */ 588 + /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */ 595 589 if (mp->m_sb.sb_qflags == 0) 596 590 return 0; 597 591 spin_lock(&mp->m_sb_lock); 598 592 mp->m_sb.sb_qflags = 0; 599 593 spin_unlock(&mp->m_sb_lock); 600 594 601 - /* 602 - * If the fs is readonly, let the incore superblock run 603 - * with quotas off but don't flush the update out to disk 604 - */ 605 - if (mp->m_flags & XFS_MOUNT_RDONLY) 595 + if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 606 596 return 0; 607 597 608 - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 609 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 610 - if (error) { 611 - xfs_trans_cancel(tp, 0); 612 - xfs_alert(mp, "%s: Superblock update failed!", __func__); 613 - return error; 614 - } 615 - 616 - xfs_mod_sb(tp, XFS_SB_QFLAGS); 617 - return xfs_trans_commit(tp, 0); 598 + return xfs_sync_sb(mp, false); 618 599 } 619 600 620 601 __uint64_t ··· 640 659 xfs_sb_mount_common(mp, sbp); 641 660 642 661 /* 643 - * Check for a mismatched features2 values. Older kernels 644 - * read & wrote into the wrong sb offset for sb_features2 645 - * on some platforms due to xfs_sb_t not being 64bit size aligned 646 - * when sb_features2 was added, which made older superblock 647 - * reading/writing routines swap it as a 64-bit value. 662 + * Check for a mismatched features2 values. Older kernels read & wrote 663 + * into the wrong sb offset for sb_features2 on some platforms due to 664 + * xfs_sb_t not being 64bit size aligned when sb_features2 was added, 665 + * which made older superblock reading/writing routines swap it as a 666 + * 64-bit value. 648 667 * 649 668 * For backwards compatibility, we make both slots equal. 650 669 * 651 - * If we detect a mismatched field, we OR the set bits into the 652 - * existing features2 field in case it has already been modified; we 653 - * don't want to lose any features. We then update the bad location 654 - * with the ORed value so that older kernels will see any features2 655 - * flags, and mark the two fields as needing updates once the 656 - * transaction subsystem is online. 670 + * If we detect a mismatched field, we OR the set bits into the existing 671 + * features2 field in case it has already been modified; we don't want 672 + * to lose any features. We then update the bad location with the ORed 673 + * value so that older kernels will see any features2 flags. The 674 + * superblock writeback code ensures the new sb_features2 is copied to 675 + * sb_bad_features2 before it is logged or written to disk. 657 676 */ 658 677 if (xfs_sb_has_mismatched_features2(sbp)) { 659 678 xfs_warn(mp, "correcting sb_features alignment problem"); 660 679 sbp->sb_features2 |= sbp->sb_bad_features2; 661 - sbp->sb_bad_features2 = sbp->sb_features2; 662 - mp->m_update_flags |= XFS_SB_FEATURES2; 680 + mp->m_update_sb = true; 663 681 664 682 /* 665 683 * Re-check for ATTR2 in case it was found in bad_features2 ··· 672 692 if (xfs_sb_version_hasattr2(&mp->m_sb) && 673 693 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 674 694 xfs_sb_version_removeattr2(&mp->m_sb); 675 - mp->m_update_flags |= XFS_SB_FEATURES2; 695 + mp->m_update_sb = true; 676 696 677 697 /* update sb_versionnum for the clearing of the morebits */ 678 698 if (!sbp->sb_features2) 679 - mp->m_update_flags |= XFS_SB_VERSIONNUM; 699 + mp->m_update_sb = true; 680 700 } 681 701 682 702 /* always use v2 inodes by default now */ 683 703 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 684 704 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 685 - mp->m_update_flags |= XFS_SB_VERSIONNUM; 705 + mp->m_update_sb = true; 686 706 } 687 707 688 708 /* ··· 875 895 * the next remount into writeable mode. Otherwise we would never 876 896 * perform the update e.g. for the root filesystem. 877 897 */ 878 - if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 879 - error = xfs_mount_log_sb(mp, mp->m_update_flags); 898 + if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 899 + error = xfs_sync_sb(mp, false); 880 900 if (error) { 881 901 xfs_warn(mp, "failed to write sb changes"); 882 902 goto out_rtunmount; ··· 1083 1103 int 1084 1104 xfs_log_sbcount(xfs_mount_t *mp) 1085 1105 { 1086 - xfs_trans_t *tp; 1087 - int error; 1088 - 1089 1106 /* allow this to proceed during the freeze sequence... */ 1090 1107 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) 1091 1108 return 0; ··· 1096 1119 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1097 1120 return 0; 1098 1121 1099 - tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); 1100 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1101 - if (error) { 1102 - xfs_trans_cancel(tp, 0); 1103 - return error; 1104 - } 1105 - 1106 - xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); 1107 - xfs_trans_set_sync(tp); 1108 - error = xfs_trans_commit(tp, 0); 1109 - return error; 1122 + return xfs_sync_sb(mp, true); 1110 1123 } 1111 1124 1112 1125 /* ··· 1387 1420 xfs_buf_lock(bp); 1388 1421 mp->m_sb_bp = NULL; 1389 1422 xfs_buf_relse(bp); 1390 - } 1391 - 1392 - /* 1393 - * Used to log changes to the superblock unit and width fields which could 1394 - * be altered by the mount options, as well as any potential sb_features2 1395 - * fixup. Only the first superblock is updated. 1396 - */ 1397 - int 1398 - xfs_mount_log_sb( 1399 - xfs_mount_t *mp, 1400 - __int64_t fields) 1401 - { 1402 - xfs_trans_t *tp; 1403 - int error; 1404 - 1405 - ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | 1406 - XFS_SB_FEATURES2 | XFS_SB_VERSIONNUM)); 1407 - 1408 - tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); 1409 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1410 - if (error) { 1411 - xfs_trans_cancel(tp, 0); 1412 - return error; 1413 - } 1414 - xfs_mod_sb(tp, fields); 1415 - error = xfs_trans_commit(tp, 0); 1416 - return error; 1417 1423 } 1418 1424 1419 1425 /*
+2 -3
fs/xfs/xfs_mount.h
··· 162 162 struct delayed_work m_reclaim_work; /* background inode reclaim */ 163 163 struct delayed_work m_eofblocks_work; /* background eof blocks 164 164 trimming */ 165 - __int64_t m_update_flags; /* sb flags we need to update 166 - on the next remount,rw */ 165 + bool m_update_sb; /* sb needs update in mount */ 167 166 int64_t m_low_space[XFS_LOWSP_MAX]; 168 167 /* low free space thresholds */ 169 168 struct xfs_kobj m_kobj; ··· 377 378 extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); 378 379 extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, 379 380 uint, int); 380 - extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t); 381 + extern int xfs_mount_log_sb(xfs_mount_t *); 381 382 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 382 383 extern int xfs_readsb(xfs_mount_t *, int); 383 384 extern void xfs_freesb(xfs_mount_t *);
+2 -41
fs/xfs/xfs_qm.c
··· 714 714 xfs_qm_qino_alloc( 715 715 xfs_mount_t *mp, 716 716 xfs_inode_t **ip, 717 - __int64_t sbfields, 718 717 uint flags) 719 718 { 720 719 xfs_trans_t *tp; ··· 776 777 spin_lock(&mp->m_sb_lock); 777 778 if (flags & XFS_QMOPT_SBVERSION) { 778 779 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 779 - ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 780 - XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) == 781 - (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 782 - XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | 783 - XFS_SB_QFLAGS)); 784 780 785 781 xfs_sb_version_addquota(&mp->m_sb); 786 782 mp->m_sb.sb_uquotino = NULLFSINO; ··· 792 798 else 793 799 mp->m_sb.sb_pquotino = (*ip)->i_ino; 794 800 spin_unlock(&mp->m_sb_lock); 795 - xfs_mod_sb(tp, sbfields); 801 + xfs_log_sb(tp); 796 802 797 803 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 798 804 xfs_alert(mp, "%s failed (error %d)!", __func__, error); ··· 1445 1451 spin_unlock(&mp->m_sb_lock); 1446 1452 1447 1453 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1448 - if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 1454 + if (xfs_sync_sb(mp, false)) { 1449 1455 /* 1450 1456 * We could only have been turning quotas off. 1451 1457 * We aren't in very good shape actually because ··· 1476 1482 struct xfs_inode *gip = NULL; 1477 1483 struct xfs_inode *pip = NULL; 1478 1484 int error; 1479 - __int64_t sbflags = 0; 1480 1485 uint flags = 0; 1481 1486 1482 1487 ASSERT(mp->m_quotainfo); ··· 1510 1517 } 1511 1518 } else { 1512 1519 flags |= XFS_QMOPT_SBVERSION; 1513 - sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1514 - XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | 1515 - XFS_SB_QFLAGS); 1516 1520 } 1517 1521 1518 1522 /* ··· 1520 1530 */ 1521 1531 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1522 1532 error = xfs_qm_qino_alloc(mp, &uip, 1523 - sbflags | XFS_SB_UQUOTINO, 1524 1533 flags | XFS_QMOPT_UQUOTA); 1525 1534 if (error) 1526 1535 goto error_rele; ··· 1528 1539 } 1529 1540 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1530 1541 error = xfs_qm_qino_alloc(mp, &gip, 1531 - sbflags | XFS_SB_GQUOTINO, 1532 1542 flags | XFS_QMOPT_GQUOTA); 1533 1543 if (error) 1534 1544 goto error_rele; ··· 1536 1548 } 1537 1549 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1538 1550 error = xfs_qm_qino_alloc(mp, &pip, 1539 - sbflags | XFS_SB_PQUOTINO, 1540 1551 flags | XFS_QMOPT_PQUOTA); 1541 1552 if (error) 1542 1553 goto error_rele; ··· 1573 1586 1574 1587 xfs_qm_dqdestroy(dqp); 1575 1588 } 1576 - 1577 - /* 1578 - * Start a transaction and write the incore superblock changes to 1579 - * disk. flags parameter indicates which fields have changed. 1580 - */ 1581 - int 1582 - xfs_qm_write_sb_changes( 1583 - xfs_mount_t *mp, 1584 - __int64_t flags) 1585 - { 1586 - xfs_trans_t *tp; 1587 - int error; 1588 - 1589 - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 1590 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 1591 - if (error) { 1592 - xfs_trans_cancel(tp, 0); 1593 - return error; 1594 - } 1595 - 1596 - xfs_mod_sb(tp, flags); 1597 - error = xfs_trans_commit(tp, 0); 1598 - 1599 - return error; 1600 - } 1601 - 1602 1589 1603 1590 /* --------------- utility functions for vnodeops ---------------- */ 1604 1591
-1
fs/xfs/xfs_qm.h
··· 157 157 #define XFS_QM_RTBWARNLIMIT 5 158 158 159 159 extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); 160 - extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t); 161 160 162 161 /* dquot stuff */ 163 162 extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
+5 -9
fs/xfs/xfs_qm_syscalls.c
··· 92 92 mutex_unlock(&q->qi_quotaofflock); 93 93 94 94 /* XXX what to do if error ? Revert back to old vals incore ? */ 95 - error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); 96 - return error; 95 + return xfs_sync_sb(mp, false); 97 96 } 98 97 99 98 dqtype = 0; ··· 313 314 { 314 315 int error; 315 316 uint qf; 316 - __int64_t sbflags; 317 317 318 318 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 319 319 /* 320 320 * Switching on quota accounting must be done at mount time. 321 321 */ 322 322 flags &= ~(XFS_ALL_QUOTA_ACCT); 323 - 324 - sbflags = 0; 325 323 326 324 if (flags == 0) { 327 325 xfs_debug(mp, "%s: zero flags, m_qflags=%x", ··· 366 370 /* 367 371 * There's nothing to change if it's the same. 368 372 */ 369 - if ((qf & flags) == flags && sbflags == 0) 373 + if ((qf & flags) == flags) 370 374 return -EEXIST; 371 - sbflags |= XFS_SB_QFLAGS; 372 375 373 - if ((error = xfs_qm_write_sb_changes(mp, sbflags))) 376 + error = xfs_sync_sb(mp, false); 377 + if (error) 374 378 return error; 375 379 /* 376 380 * If we aren't trying to switch on quota enforcement, we are done. ··· 797 801 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 798 802 spin_unlock(&mp->m_sb_lock); 799 803 800 - xfs_mod_sb(tp, XFS_SB_QFLAGS); 804 + xfs_log_sb(tp); 801 805 802 806 /* 803 807 * We have to make sure that the transaction is secure on disk before we
+7 -6
fs/xfs/xfs_super.c
··· 1257 1257 * If this is the first remount to writeable state we 1258 1258 * might have some superblock changes to update. 1259 1259 */ 1260 - if (mp->m_update_flags) { 1261 - error = xfs_mount_log_sb(mp, mp->m_update_flags); 1260 + if (mp->m_update_sb) { 1261 + error = xfs_sync_sb(mp, false); 1262 1262 if (error) { 1263 1263 xfs_warn(mp, "failed to write sb changes"); 1264 1264 return error; 1265 1265 } 1266 - mp->m_update_flags = 0; 1266 + mp->m_update_sb = false; 1267 1267 } 1268 1268 1269 1269 /* ··· 1293 1293 1294 1294 /* 1295 1295 * Second stage of a freeze. The data is already frozen so we only 1296 - * need to take care of the metadata. Once that's done write a dummy 1297 - * record to dirty the log in case of a crash while frozen. 1296 + * need to take care of the metadata. Once that's done sync the superblock 1297 + * to the log to dirty it in case of a crash while frozen. This ensures that we 1298 + * will recover the unlinked inode lists on the next mount. 1298 1299 */ 1299 1300 STATIC int 1300 1301 xfs_fs_freeze( ··· 1305 1304 1306 1305 xfs_save_resvblks(mp); 1307 1306 xfs_quiesce_attr(mp); 1308 - return xfs_fs_log_dummy(mp); 1307 + return xfs_sync_sb(mp, true); 1309 1308 } 1310 1309 1311 1310 STATIC int