Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: convert xfs_sb_version_has checks to use mount features

This is a conversion of the remaining xfs_sb_version_has..(sbp)
checks to use xfs_has_..(mp) feature checks.

This was largely done with a vim replacement macro that did:

:0,$s/xfs_sb_version_has\(.*\)&\(.*\)->m_sb/xfs_has_\1\2/g<CR>

A couple of other variants were also used, and the rest touched up
by hand.

$ size -t fs/xfs/built-in.a
text data bss dec hex filename
before 1127533 311352 484 1439369 15f689 (TOTALS)
after 1125360 311352 484 1437196 15ee0c (TOTALS)

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>

authored by

Dave Chinner and committed by
Darrick J. Wong
ebd9027d 55fafb31

+90 -96
+2 -2
fs/xfs/libxfs/xfs_ag.c
··· 607 607 } 608 608 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 609 609 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 610 - if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) { 610 + if (xfs_has_inobtcounts(mp)) { 611 611 agi->agi_iblocks = cpu_to_be32(1); 612 - if (xfs_sb_version_hasfinobt(&mp->m_sb)) 612 + if (xfs_has_finobt(mp)) 613 613 agi->agi_fblocks = cpu_to_be32(1); 614 614 } 615 615 }
+6 -6
fs/xfs/libxfs/xfs_alloc.c
··· 2264 2264 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1, 2265 2265 mp->m_ag_maxlevels); 2266 2266 /* space needed reverse mapping used space btree */ 2267 - if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 2267 + if (xfs_has_rmapbt(mp)) 2268 2268 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1, 2269 2269 mp->m_rmap_maxlevels); 2270 2270 ··· 2912 2912 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > mp->m_rmap_maxlevels)) 2913 2913 return __this_address; 2914 2914 2915 - if (xfs_sb_version_hasrmapbt(&mp->m_sb) && 2915 + if (xfs_has_rmapbt(mp) && 2916 2916 be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length)) 2917 2917 return __this_address; 2918 2918 ··· 2925 2925 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno) 2926 2926 return __this_address; 2927 2927 2928 - if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 2928 + if (xfs_has_lazysbcount(mp) && 2929 2929 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) 2930 2930 return __this_address; 2931 2931 2932 - if (xfs_sb_version_hasreflink(&mp->m_sb) && 2932 + if (xfs_has_reflink(mp) && 2933 2933 be32_to_cpu(agf->agf_refcount_blocks) > 2934 2934 be32_to_cpu(agf->agf_length)) 2935 2935 return __this_address; 2936 2936 2937 - if (xfs_sb_version_hasreflink(&mp->m_sb) && 2937 + if (xfs_has_reflink(mp) && 2938 2938 (be32_to_cpu(agf->agf_refcount_level) < 1 || 2939 2939 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)) 2940 2940 return __this_address; ··· 3073 3073 * counter only tracks non-root blocks. 3074 3074 */ 3075 3075 allocbt_blks = pag->pagf_btreeblks; 3076 - if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 3076 + if (xfs_has_rmapbt(mp)) 3077 3077 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1; 3078 3078 if (allocbt_blks > 0) 3079 3079 atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
+1 -1
fs/xfs/libxfs/xfs_alloc.h
··· 243 243 xfs_buf_to_agfl_bno( 244 244 struct xfs_buf *bp) 245 245 { 246 - if (xfs_sb_version_hascrc(&bp->b_mount->m_sb)) 246 + if (xfs_has_crc(bp->b_mount)) 247 247 return bp->b_addr + sizeof(struct xfs_agfl); 248 248 return bp->b_addr; 249 249 }
+1 -1
fs/xfs/libxfs/xfs_alloc_btree.c
··· 295 295 if (!xfs_verify_magic(bp, block->bb_magic)) 296 296 return __this_address; 297 297 298 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 298 + if (xfs_has_crc(mp)) { 299 299 fa = xfs_btree_sblock_v5hdr_verify(bp); 300 300 if (fa) 301 301 return fa;
+1 -1
fs/xfs/libxfs/xfs_bmap_btree.c
··· 428 428 if (!xfs_verify_magic(bp, block->bb_magic)) 429 429 return __this_address; 430 430 431 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 431 + if (xfs_has_crc(mp)) { 432 432 /* 433 433 * XXX: need a better way of verifying the owner here. Right now 434 434 * just make sure there has been one set.
+3 -3
fs/xfs/libxfs/xfs_btree.c
··· 273 273 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 274 274 struct xfs_buf_log_item *bip = bp->b_log_item; 275 275 276 - if (!xfs_sb_version_hascrc(&bp->b_mount->m_sb)) 276 + if (!xfs_has_crc(bp->b_mount)) 277 277 return; 278 278 if (bip) 279 279 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); ··· 311 311 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 312 312 struct xfs_buf_log_item *bip = bp->b_log_item; 313 313 314 - if (!xfs_sb_version_hascrc(&bp->b_mount->m_sb)) 314 + if (!xfs_has_crc(bp->b_mount)) 315 315 return; 316 316 if (bip) 317 317 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); ··· 1749 1749 return error; 1750 1750 1751 1751 /* Check the inode owner since the verifiers don't. */ 1752 - if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) && 1752 + if (xfs_has_crc(cur->bc_mp) && 1753 1753 !(cur->bc_ino.flags & XFS_BTCUR_BMBT_INVALID_OWNER) && 1754 1754 (cur->bc_flags & XFS_BTREE_LONG_PTRS) && 1755 1755 be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
+3 -3
fs/xfs/libxfs/xfs_da_btree.c
··· 129 129 struct xfs_da3_icnode_hdr *to, 130 130 struct xfs_da_intnode *from) 131 131 { 132 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 132 + if (xfs_has_crc(mp)) { 133 133 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from; 134 134 135 135 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw); ··· 156 156 struct xfs_da_intnode *to, 157 157 struct xfs_da3_icnode_hdr *from) 158 158 { 159 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 159 + if (xfs_has_crc(mp)) { 160 160 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to; 161 161 162 162 ASSERT(from->magic == XFS_DA3_NODE_MAGIC); ··· 191 191 if (!xfs_verify_magic16(bp, hdr->magic)) 192 192 return __this_address; 193 193 194 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 194 + if (xfs_has_crc(mp)) { 195 195 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 196 196 return __this_address; 197 197 if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
+3 -3
fs/xfs/libxfs/xfs_dir2.c
··· 115 115 dageo->fsblog = mp->m_sb.sb_blocklog; 116 116 dageo->blksize = xfs_dir2_dirblock_bytes(&mp->m_sb); 117 117 dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog; 118 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 118 + if (xfs_has_crc(mp)) { 119 119 dageo->node_hdr_size = sizeof(struct xfs_da3_node_hdr); 120 120 dageo->leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr); 121 121 dageo->free_hdr_size = sizeof(struct xfs_dir3_free_hdr); ··· 730 730 struct xfs_mount *mp, 731 731 struct xfs_name *name) 732 732 { 733 - if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb))) 733 + if (unlikely(xfs_has_asciici(mp))) 734 734 return xfs_ascii_ci_hashname(name); 735 735 return xfs_da_hashname(name->name, name->len); 736 736 } ··· 741 741 const unsigned char *name, 742 742 int len) 743 743 { 744 - if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb))) 744 + if (unlikely(xfs_has_asciici(args->dp->i_mount))) 745 745 return xfs_ascii_ci_compname(args, name, len); 746 746 return xfs_da_compname(args, name, len); 747 747 }
+2 -2
fs/xfs/libxfs/xfs_dir2_block.c
··· 53 53 if (!xfs_verify_magic(bp, hdr3->magic)) 54 54 return __this_address; 55 55 56 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 56 + if (xfs_has_crc(mp)) { 57 57 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 58 58 return __this_address; 59 59 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) ··· 121 121 { 122 122 struct xfs_mount *mp = dp->i_mount; 123 123 124 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 124 + if (xfs_has_crc(mp)) { 125 125 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; 126 126 127 127 if (be64_to_cpu(hdr3->owner) != dp->i_ino)
+5 -5
fs/xfs/libxfs/xfs_dir2_data.c
··· 29 29 struct xfs_mount *mp, 30 30 struct xfs_dir2_data_hdr *hdr) 31 31 { 32 - if (xfs_sb_version_hascrc(&mp->m_sb)) 32 + if (xfs_has_crc(mp)) 33 33 return ((struct xfs_dir3_data_hdr *)hdr)->best_free; 34 34 return hdr->bestfree; 35 35 } ··· 51 51 struct xfs_mount *mp, 52 52 struct xfs_dir2_data_entry *dep) 53 53 { 54 - if (xfs_sb_version_hasftype(&mp->m_sb)) { 54 + if (xfs_has_ftype(mp)) { 55 55 uint8_t ftype = dep->name[dep->namelen]; 56 56 57 57 if (likely(ftype < XFS_DIR3_FT_MAX)) ··· 70 70 ASSERT(ftype < XFS_DIR3_FT_MAX); 71 71 ASSERT(dep->namelen != 0); 72 72 73 - if (xfs_sb_version_hasftype(&mp->m_sb)) 73 + if (xfs_has_ftype(mp)) 74 74 dep->name[dep->namelen] = ftype; 75 75 } 76 76 ··· 297 297 if (!xfs_verify_magic(bp, hdr3->magic)) 298 298 return __this_address; 299 299 300 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 300 + if (xfs_has_crc(mp)) { 301 301 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid)) 302 302 return __this_address; 303 303 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) ··· 401 401 { 402 402 struct xfs_mount *mp = dp->i_mount; 403 403 404 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 404 + if (xfs_has_crc(mp)) { 405 405 struct xfs_dir3_data_hdr *hdr3 = bp->b_addr; 406 406 407 407 if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
+2 -2
fs/xfs/libxfs/xfs_dir2_leaf.c
··· 37 37 struct xfs_dir3_icleaf_hdr *to, 38 38 struct xfs_dir2_leaf *from) 39 39 { 40 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 40 + if (xfs_has_crc(mp)) { 41 41 struct xfs_dir3_leaf *from3 = (struct xfs_dir3_leaf *)from; 42 42 43 43 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw); ··· 68 68 struct xfs_dir2_leaf *to, 69 69 struct xfs_dir3_icleaf_hdr *from) 70 70 { 71 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 71 + if (xfs_has_crc(mp)) { 72 72 struct xfs_dir3_leaf *to3 = (struct xfs_dir3_leaf *)to; 73 73 74 74 ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
+2 -2
fs/xfs/libxfs/xfs_dir2_node.c
··· 247 247 struct xfs_dir3_icfree_hdr *to, 248 248 struct xfs_dir2_free *from) 249 249 { 250 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 250 + if (xfs_has_crc(mp)) { 251 251 struct xfs_dir3_free *from3 = (struct xfs_dir3_free *)from; 252 252 253 253 to->magic = be32_to_cpu(from3->hdr.hdr.magic); ··· 274 274 struct xfs_dir2_free *to, 275 275 struct xfs_dir3_icfree_hdr *from) 276 276 { 277 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 277 + if (xfs_has_crc(mp)) { 278 278 struct xfs_dir3_free *to3 = (struct xfs_dir3_free *)to; 279 279 280 280 ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
+1 -1
fs/xfs/libxfs/xfs_dir2_priv.h
··· 196 196 197 197 len = offsetof(struct xfs_dir2_data_entry, name[0]) + namelen + 198 198 sizeof(xfs_dir2_data_off_t) /* tag */; 199 - if (xfs_sb_version_hasftype(&mp->m_sb)) 199 + if (xfs_has_ftype(mp)) 200 200 len += sizeof(uint8_t); 201 201 return round_up(len, XFS_DIR2_DATA_ALIGN); 202 202 }
+5 -5
fs/xfs/libxfs/xfs_dir2_sf.c
··· 48 48 count += sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */ 49 49 count += hdr->i8count ? XFS_INO64_SIZE : XFS_INO32_SIZE; /* ino # */ 50 50 51 - if (xfs_sb_version_hasftype(&mp->m_sb)) 51 + if (xfs_has_ftype(mp)) 52 52 count += sizeof(uint8_t); 53 53 return count; 54 54 } ··· 76 76 { 77 77 uint8_t *from = sfep->name + sfep->namelen; 78 78 79 - if (xfs_sb_version_hasftype(&mp->m_sb)) 79 + if (xfs_has_ftype(mp)) 80 80 from++; 81 81 82 82 if (!hdr->i8count) ··· 95 95 96 96 ASSERT(ino <= XFS_MAXINUMBER); 97 97 98 - if (xfs_sb_version_hasftype(&mp->m_sb)) 98 + if (xfs_has_ftype(mp)) 99 99 to++; 100 100 101 101 if (hdr->i8count) ··· 135 135 struct xfs_mount *mp, 136 136 struct xfs_dir2_sf_entry *sfep) 137 137 { 138 - if (xfs_sb_version_hasftype(&mp->m_sb)) { 138 + if (xfs_has_ftype(mp)) { 139 139 uint8_t ftype = sfep->name[sfep->namelen]; 140 140 141 141 if (ftype < XFS_DIR3_FT_MAX) ··· 153 153 { 154 154 ASSERT(ftype < XFS_DIR3_FT_MAX); 155 155 156 - if (xfs_sb_version_hasftype(&mp->m_sb)) 156 + if (xfs_has_ftype(mp)) 157 157 sfep->name[sfep->namelen] = ftype; 158 158 } 159 159
+1 -1
fs/xfs/libxfs/xfs_dquot_buf.c
··· 70 70 return __this_address; 71 71 72 72 if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && 73 - !xfs_sb_version_hasbigtime(&mp->m_sb)) 73 + !xfs_has_bigtime(mp)) 74 74 return __this_address; 75 75 76 76 if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && !ddq->d_id)
+15 -15
fs/xfs/libxfs/xfs_ialloc.c
··· 302 302 * That means for v3 inode we log the entire buffer rather than just the 303 303 * inode cores. 304 304 */ 305 - if (xfs_sb_version_has_v3inode(&mp->m_sb)) { 305 + if (xfs_has_v3inodes(mp)) { 306 306 version = 3; 307 307 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 308 308 ··· 635 635 636 636 #ifdef DEBUG 637 637 /* randomly do sparse inode allocations */ 638 - if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) && 638 + if (xfs_has_sparseinodes(tp->t_mountp) && 639 639 igeo->ialloc_min_blks < igeo->ialloc_blks) 640 640 do_sparse = prandom_u32() & 1; 641 641 #endif ··· 754 754 * Finally, try a sparse allocation if the filesystem supports it and 755 755 * the sparse allocation length is smaller than a full chunk. 756 756 */ 757 - if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) && 757 + if (xfs_has_sparseinodes(args.mp) && 758 758 igeo->ialloc_min_blks < igeo->ialloc_blks && 759 759 args.fsbno == NULLFSBLOCK) { 760 760 sparse_alloc: ··· 856 856 * from the previous call. Set merge false to replace any 857 857 * existing record with this one. 858 858 */ 859 - if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) { 859 + if (xfs_has_finobt(args.mp)) { 860 860 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag, 861 861 XFS_BTNUM_FINO, &rec, false); 862 862 if (error) ··· 869 869 if (error) 870 870 return error; 871 871 872 - if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) { 872 + if (xfs_has_finobt(args.mp)) { 873 873 error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino, 874 874 newlen, XFS_BTNUM_FINO); 875 875 if (error) ··· 1448 1448 int offset; 1449 1449 int i; 1450 1450 1451 - if (!xfs_sb_version_hasfinobt(&mp->m_sb)) 1451 + if (!xfs_has_finobt(mp)) 1452 1452 return xfs_dialloc_ag_inobt(tp, agbp, pag, parent, inop); 1453 1453 1454 1454 /* ··· 2187 2187 /* 2188 2188 * Fix up the free inode btree. 2189 2189 */ 2190 - if (xfs_sb_version_hasfinobt(&mp->m_sb)) { 2190 + if (xfs_has_finobt(mp)) { 2191 2191 error = xfs_difree_finobt(mp, tp, agbp, pag, agino, &rec); 2192 2192 if (error) 2193 2193 goto error0; ··· 2771 2771 uint inodes; 2772 2772 2773 2773 igeo->new_diflags2 = 0; 2774 - if (xfs_sb_version_hasbigtime(&mp->m_sb)) 2774 + if (xfs_has_bigtime(mp)) 2775 2775 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME; 2776 2776 2777 2777 /* Compute inode btree geometry. */ ··· 2826 2826 * cannot change the behavior. 2827 2827 */ 2828 2828 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2829 - if (xfs_sb_version_has_v3inode(&mp->m_sb)) { 2829 + if (xfs_has_v3inodes(mp)) { 2830 2830 int new_size = igeo->inode_cluster_size_raw; 2831 2831 2832 2832 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; ··· 2844 2844 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2845 2845 2846 2846 /* Calculate inode cluster alignment. */ 2847 - if (xfs_sb_version_hasalign(&mp->m_sb) && 2847 + if (xfs_has_align(mp) && 2848 2848 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2849 2849 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2850 2850 else ··· 2892 2892 first_bno += xfs_alloc_min_freelist(mp, NULL); 2893 2893 2894 2894 /* ...the free inode btree root... */ 2895 - if (xfs_sb_version_hasfinobt(&mp->m_sb)) 2895 + if (xfs_has_finobt(mp)) 2896 2896 first_bno++; 2897 2897 2898 2898 /* ...the reverse mapping btree root... */ 2899 - if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 2899 + if (xfs_has_rmapbt(mp)) 2900 2900 first_bno++; 2901 2901 2902 2902 /* ...the reference count btree... */ 2903 - if (xfs_sb_version_hasreflink(&mp->m_sb)) 2903 + if (xfs_has_reflink(mp)) 2904 2904 first_bno++; 2905 2905 2906 2906 /* ··· 2918 2918 * Now round first_bno up to whatever allocation alignment is given 2919 2919 * by the filesystem or was passed in. 2920 2920 */ 2921 - if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0) 2921 + if (xfs_has_dalign(mp) && igeo->ialloc_align > 0) 2922 2922 first_bno = roundup(first_bno, sunit); 2923 - else if (xfs_sb_version_hasalign(&mp->m_sb) && 2923 + else if (xfs_has_align(mp) && 2924 2924 mp->m_sb.sb_inoalignmt > 1) 2925 2925 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2926 2926
+5 -5
fs/xfs/libxfs/xfs_ialloc_btree.c
··· 76 76 struct xfs_buf *agbp = cur->bc_ag.agbp; 77 77 struct xfs_agi *agi = agbp->b_addr; 78 78 79 - if (!xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) 79 + if (!xfs_has_inobtcounts(cur->bc_mp)) 80 80 return; 81 81 82 82 if (cur->bc_btnum == XFS_BTNUM_FINO) ··· 292 292 * but beware of the landmine (i.e. need to check pag->pagi_init) if we 293 293 * ever do. 294 294 */ 295 - if (xfs_sb_version_hascrc(&mp->m_sb)) { 295 + if (xfs_has_crc(mp)) { 296 296 fa = xfs_btree_sblock_v5hdr_verify(bp); 297 297 if (fa) 298 298 return fa; ··· 511 511 fields = XFS_AGI_ROOT | XFS_AGI_LEVEL; 512 512 agi->agi_root = cpu_to_be32(afake->af_root); 513 513 agi->agi_level = cpu_to_be32(afake->af_levels); 514 - if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) { 514 + if (xfs_has_inobtcounts(cur->bc_mp)) { 515 515 agi->agi_iblocks = cpu_to_be32(afake->af_blocks); 516 516 fields |= XFS_AGI_IBLOCKS; 517 517 } ··· 521 521 fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL; 522 522 agi->agi_free_root = cpu_to_be32(afake->af_root); 523 523 agi->agi_free_level = cpu_to_be32(afake->af_levels); 524 - if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) { 524 + if (xfs_has_inobtcounts(cur->bc_mp)) { 525 525 agi->agi_fblocks = cpu_to_be32(afake->af_blocks); 526 526 fields |= XFS_AGI_IBLOCKS; 527 527 } ··· 740 740 if (!xfs_has_finobt(mp)) 741 741 return 0; 742 742 743 - if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) 743 + if (xfs_has_inobtcounts(mp)) 744 744 error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len); 745 745 else 746 746 error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO,
+5 -5
fs/xfs/libxfs/xfs_inode_buf.c
··· 192 192 * inode. If the inode is unused, mode is zero and we shouldn't mess 193 193 * with the uninitialized part of it. 194 194 */ 195 - if (!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) 195 + if (!xfs_has_v3inodes(ip->i_mount)) 196 196 ip->i_flushiter = be16_to_cpu(from->di_flushiter); 197 197 inode->i_generation = be32_to_cpu(from->di_gen); 198 198 inode->i_mode = be16_to_cpu(from->di_mode); ··· 235 235 if (from->di_dmevmask || from->di_dmstate) 236 236 xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS); 237 237 238 - if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { 238 + if (xfs_has_v3inodes(ip->i_mount)) { 239 239 inode_set_iversion_queried(inode, 240 240 be64_to_cpu(from->di_changecount)); 241 241 ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime); ··· 313 313 to->di_aformat = xfs_ifork_format(ip->i_afp); 314 314 to->di_flags = cpu_to_be16(ip->i_diflags); 315 315 316 - if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { 316 + if (xfs_has_v3inodes(ip->i_mount)) { 317 317 to->di_version = 3; 318 318 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode)); 319 319 to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime); ··· 413 413 414 414 /* Verify v3 integrity information first */ 415 415 if (dip->di_version >= 3) { 416 - if (!xfs_sb_version_has_v3inode(&mp->m_sb)) 416 + if (!xfs_has_v3inodes(mp)) 417 417 return __this_address; 418 418 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, 419 419 XFS_DINODE_CRC_OFF)) ··· 534 534 535 535 /* bigtime iflag can only happen on bigtime filesystems */ 536 536 if (xfs_dinode_has_bigtime(dip) && 537 - !xfs_sb_version_hasbigtime(&mp->m_sb)) 537 + !xfs_has_bigtime(mp)) 538 538 return __this_address; 539 539 540 540 return NULL;
+1 -1
fs/xfs/libxfs/xfs_log_format.h
··· 434 434 }; 435 435 436 436 #define xfs_log_dinode_size(mp) \ 437 - (xfs_sb_version_has_v3inode(&(mp)->m_sb) ? \ 437 + (xfs_has_v3inodes((mp)) ? \ 438 438 sizeof(struct xfs_log_dinode) : \ 439 439 offsetof(struct xfs_log_dinode, di_next_unlinked)) 440 440
+4 -4
fs/xfs/libxfs/xfs_refcount.c
··· 1253 1253 struct xfs_trans *tp, 1254 1254 struct xfs_bmbt_irec *PREV) 1255 1255 { 1256 - if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb)) 1256 + if (!xfs_has_reflink(tp->t_mountp)) 1257 1257 return; 1258 1258 1259 1259 __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock, ··· 1268 1268 struct xfs_trans *tp, 1269 1269 struct xfs_bmbt_irec *PREV) 1270 1270 { 1271 - if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb)) 1271 + if (!xfs_has_reflink(tp->t_mountp)) 1272 1272 return; 1273 1273 1274 1274 __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock, ··· 1617 1617 { 1618 1618 struct xfs_mount *mp = tp->t_mountp; 1619 1619 1620 - if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1620 + if (!xfs_has_reflink(mp)) 1621 1621 return; 1622 1622 1623 1623 __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len); ··· 1636 1636 { 1637 1637 struct xfs_mount *mp = tp->t_mountp; 1638 1638 1639 - if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1639 + if (!xfs_has_reflink(mp)) 1640 1640 return; 1641 1641 1642 1642 /* Remove rmap entry */
+1 -1
fs/xfs/libxfs/xfs_sb.c
··· 911 911 * unclean shutdown, this will be corrected by log recovery rebuilding 912 912 * the counters from the AGF block counts. 913 913 */ 914 - if (xfs_sb_version_haslazysbcount(&mp->m_sb)) { 914 + if (xfs_has_lazysbcount(mp)) { 915 915 mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); 916 916 mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); 917 917 mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
+1 -1
fs/xfs/libxfs/xfs_trans_inode.c
··· 136 136 * to upgrade this inode to bigtime format, do so now. 137 137 */ 138 138 if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) && 139 - xfs_sb_version_hasbigtime(&ip->i_mount->m_sb) && 139 + xfs_has_bigtime(ip->i_mount) && 140 140 !xfs_inode_has_bigtime(ip)) { 141 141 ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME; 142 142 flags |= XFS_ILOG_CORE;
+3 -3
fs/xfs/libxfs/xfs_trans_resv.c
··· 187 187 XFS_FSB_TO_B(mp, 1)); 188 188 if (alloc) { 189 189 /* icreate tx uses ordered buffers */ 190 - if (xfs_sb_version_has_v3inode(&mp->m_sb)) 190 + if (xfs_has_v3inodes(mp)) 191 191 return res; 192 192 size = XFS_FSB_TO_B(mp, 1); 193 193 } ··· 268 268 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 269 269 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); 270 270 271 - if (xfs_sb_version_hasrealtime(&mp->m_sb)) { 271 + if (xfs_has_realtime(mp)) { 272 272 t2 = xfs_calc_inode_res(mp, 1) + 273 273 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 274 274 blksz) + ··· 317 317 t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + 318 318 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); 319 319 320 - if (xfs_sb_version_hasrealtime(&mp->m_sb)) { 320 + if (xfs_has_realtime(mp)) { 321 321 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + 322 322 xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + 323 323 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+2 -4
fs/xfs/libxfs/xfs_trans_space.h
··· 57 57 XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) 58 58 #define XFS_IALLOC_SPACE_RES(mp) \ 59 59 (M_IGEO(mp)->ialloc_blks + \ 60 - ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \ 61 - M_IGEO(mp)->inobt_maxlevels)) 60 + ((xfs_has_finobt(mp) ? 2 : 1) * M_IGEO(mp)->inobt_maxlevels)) 62 61 63 62 /* 64 63 * Space reservation values for various transactions. ··· 93 94 #define XFS_SYMLINK_SPACE_RES(mp,nl,b) \ 94 95 (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b)) 95 96 #define XFS_IFREE_SPACE_RES(mp) \ 96 - (xfs_sb_version_hasfinobt(&mp->m_sb) ? \ 97 - M_IGEO(mp)->inobt_maxlevels : 0) 97 + (xfs_has_finobt(mp) ? M_IGEO(mp)->inobt_maxlevels : 0) 98 98 99 99 100 100 #endif /* __XFS_TRANS_SPACE_H__ */
+3 -3
fs/xfs/scrub/agheader.c
··· 430 430 int error; 431 431 432 432 /* agf_btreeblks didn't exist before lazysbcount */ 433 - if (!xfs_sb_version_haslazysbcount(&sc->mp->m_sb)) 433 + if (!xfs_has_lazysbcount(sc->mp)) 434 434 return; 435 435 436 436 /* Check agf_rmap_blocks; set up for agf_btreeblks check */ ··· 598 598 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 599 599 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) 600 600 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 601 - if (xfs_sb_version_haslazysbcount(&sc->mp->m_sb) && 601 + if (xfs_has_lazysbcount(sc->mp) && 602 602 pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) 603 603 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 604 604 ··· 797 797 xfs_agblock_t blocks; 798 798 int error = 0; 799 799 800 - if (!xfs_sb_version_hasinobtcounts(&sc->mp->m_sb)) 800 + if (!xfs_has_inobtcounts(sc->mp)) 801 801 return; 802 802 803 803 if (sc->sa.ino_cur) {
+2 -3
fs/xfs/scrub/agheader_repair.c
··· 816 816 error = xfs_ialloc_count_inodes(cur, &count, &freecount); 817 817 if (error) 818 818 goto err; 819 - if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) { 819 + if (xfs_has_inobtcounts(mp)) { 820 820 xfs_agblock_t blocks; 821 821 822 822 error = xfs_btree_count_blocks(cur, &blocks); ··· 829 829 agi->agi_count = cpu_to_be32(count); 830 830 agi->agi_freecount = cpu_to_be32(freecount); 831 831 832 - if (xfs_sb_version_hasfinobt(&mp->m_sb) && 833 - xfs_sb_version_hasinobtcounts(&mp->m_sb)) { 832 + if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) { 834 833 xfs_agblock_t blocks; 835 834 836 835 cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
+1 -2
fs/xfs/scrub/bmap.c
··· 662 662 } 663 663 break; 664 664 case XFS_ATTR_FORK: 665 - if (!xfs_sb_version_hasattr(&mp->m_sb) && 666 - !xfs_sb_version_hasattr2(&mp->m_sb)) 665 + if (!xfs_has_attr(mp) && !xfs_has_attr2(mp)) 667 666 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 668 667 break; 669 668 default:
+3 -3
fs/xfs/scrub/common.c
··· 485 485 } 486 486 487 487 /* Set up a finobt cursor for cross-referencing. */ 488 - if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) && 488 + if (sa->agi_bp && xfs_has_finobt(mp) && 489 489 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) { 490 490 sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, 491 491 sa->pag, XFS_BTNUM_FINO); 492 492 } 493 493 494 494 /* Set up a rmapbt cursor for cross-referencing. */ 495 - if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) && 495 + if (sa->agf_bp && xfs_has_rmapbt(mp) && 496 496 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) { 497 497 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp, 498 498 sa->pag); 499 499 } 500 500 501 501 /* Set up a refcountbt cursor for cross-referencing. */ 502 - if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) && 502 + if (sa->agf_bp && xfs_has_reflink(mp) && 503 503 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) { 504 504 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, 505 505 sa->agf_bp, sa->pag);
+1 -1
fs/xfs/scrub/fscounters.c
··· 207 207 /* Add up the free/freelist/bnobt/cntbt blocks */ 208 208 fsc->fdblocks += pag->pagf_freeblks; 209 209 fsc->fdblocks += pag->pagf_flcount; 210 - if (xfs_sb_version_haslazysbcount(&sc->mp->m_sb)) { 210 + if (xfs_has_lazysbcount(sc->mp)) { 211 211 fsc->fdblocks += pag->pagf_btreeblks; 212 212 } else { 213 213 error = xchk_fscount_btreeblks(sc, fsc, agno);
+1 -2
fs/xfs/scrub/inode.c
··· 199 199 goto bad; 200 200 201 201 /* no bigtime iflag without the bigtime feature */ 202 - if (xfs_dinode_has_bigtime(dip) && 203 - !xfs_sb_version_hasbigtime(&mp->m_sb)) 202 + if (xfs_dinode_has_bigtime(dip) && !xfs_has_bigtime(mp)) 204 203 goto bad; 205 204 206 205 return;
+1 -1
fs/xfs/scrub/quota.c
··· 127 127 * a reflink filesystem we're allowed to exceed physical space 128 128 * if there are no quota limits. 129 129 */ 130 - if (xfs_sb_version_hasreflink(&mp->m_sb)) { 130 + if (xfs_has_reflink(mp)) { 131 131 if (mp->m_sb.sb_dblocks < dq->q_blk.count) 132 132 xchk_fblock_set_warning(sc, XFS_DATA_FORK, 133 133 offset);
+1 -1
fs/xfs/xfs_mount.c
··· 1320 1320 { 1321 1321 bool ret = false; 1322 1322 1323 - if (!xfs_sb_version_hascrc(&mp->m_sb) || 1323 + if (!xfs_has_crc(mp) || 1324 1324 !xfs_sb_has_incompat_log_feature(&mp->m_sb, 1325 1325 XFS_SB_FEAT_INCOMPAT_LOG_ALL) || 1326 1326 xfs_is_shutdown(mp))
+1 -2
fs/xfs/xfs_reflink.h
··· 8 8 9 9 static inline bool xfs_is_always_cow_inode(struct xfs_inode *ip) 10 10 { 11 - return ip->i_mount->m_always_cow && 12 - xfs_sb_version_hasreflink(&ip->i_mount->m_sb); 11 + return ip->i_mount->m_always_cow && xfs_has_reflink(ip->i_mount); 13 12 } 14 13 15 14 static inline bool xfs_is_cow_inode(struct xfs_inode *ip)
+1 -1
fs/xfs/xfs_super.c
··· 1496 1496 } 1497 1497 1498 1498 /* Filesystem claims it needs repair, so refuse the mount. */ 1499 - if (xfs_sb_version_needsrepair(&mp->m_sb)) { 1499 + if (xfs_has_needsrepair(mp)) { 1500 1500 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1501 1501 error = -EFSCORRUPTED; 1502 1502 goto out_free_sb;