Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: convert buffer verifiers to an ops structure.

To separate the verifiers from iodone functions and associate read
and write verifiers at the same time, introduce a buffer verifier
operations structure to the xfs_buf.

This avoids the need for assigning the write verifier, clearing the
iodone function and re-running ioend processing in the read
verifier, and gets rid of the nasty "b_pre_io" name for the write
verifier function pointer. If we ever need to, it will also be
easier to add further content specific callbacks to a buffer with an
ops structure in place.

We also avoid needing to export verifier functions, instead we
can simply export the ops structures for those that are needed
outside the function they are defined in.

This patch also fixes a directory block readahead verifier issue
it exposed.

This patch also adds ops callbacks to the inode/alloc btree blocks
initialised by growfs. These will need more work before they will
work with CRCs.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Phil White <pwhite@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>

authored by

Dave Chinner and committed by
Ben Myers
1813dd64 b0f539de

+384 -276
+4
fs/xfs/xfs_ag.h
··· 108 108 extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp, 109 109 xfs_agnumber_t agno, int flags, struct xfs_buf **bpp); 110 110 111 + extern const struct xfs_buf_ops xfs_agf_buf_ops; 112 + 111 113 /* 112 114 * Size of the unlinked inode hash table in the agi. 113 115 */ ··· 162 160 163 161 extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, 164 162 xfs_agnumber_t agno, struct xfs_buf **bpp); 163 + 164 + extern const struct xfs_buf_ops xfs_agi_buf_ops; 165 165 166 166 /* 167 167 * The third a.g. block contains the a.g. freelist, an array
+16 -12
fs/xfs/xfs_alloc.c
··· 465 465 #endif 466 466 } 467 467 468 - void 468 + static void 469 469 xfs_agfl_write_verify( 470 470 struct xfs_buf *bp) 471 471 { ··· 477 477 struct xfs_buf *bp) 478 478 { 479 479 xfs_agfl_verify(bp); 480 - bp->b_pre_io = xfs_agfl_write_verify; 481 - bp->b_iodone = NULL; 482 - xfs_buf_ioend(bp, 0); 483 480 } 481 + 482 + const struct xfs_buf_ops xfs_agfl_buf_ops = { 483 + .verify_read = xfs_agfl_read_verify, 484 + .verify_write = xfs_agfl_write_verify, 485 + }; 484 486 485 487 /* 486 488 * Read in the allocation group free block array. ··· 501 499 error = xfs_trans_read_buf( 502 500 mp, tp, mp->m_ddev_targp, 503 501 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 504 - XFS_FSS_TO_BB(mp, 1), 0, &bp, xfs_agfl_read_verify); 502 + XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); 505 503 if (error) 506 504 return error; 507 505 ASSERT(!xfs_buf_geterror(bp)); ··· 2183 2181 } 2184 2182 } 2185 2183 2186 - void 2187 - xfs_agf_write_verify( 2184 + static void 2185 + xfs_agf_read_verify( 2188 2186 struct xfs_buf *bp) 2189 2187 { 2190 2188 xfs_agf_verify(bp); 2191 2189 } 2192 2190 2193 2191 static void 2194 - xfs_agf_read_verify( 2192 + xfs_agf_write_verify( 2195 2193 struct xfs_buf *bp) 2196 2194 { 2197 2195 xfs_agf_verify(bp); 2198 - bp->b_pre_io = xfs_agf_write_verify; 2199 - bp->b_iodone = NULL; 2200 - xfs_buf_ioend(bp, 0); 2201 2196 } 2197 + 2198 + const struct xfs_buf_ops xfs_agf_buf_ops = { 2199 + .verify_read = xfs_agf_read_verify, 2200 + .verify_write = xfs_agf_write_verify, 2201 + }; 2202 2202 2203 2203 /* 2204 2204 * Read in the allocation group header (free/alloc section). ··· 2219 2215 error = xfs_trans_read_buf( 2220 2216 mp, tp, mp->m_ddev_targp, 2221 2217 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 2222 - XFS_FSS_TO_BB(mp, 1), flags, bpp, xfs_agf_read_verify); 2218 + XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops); 2223 2219 if (error) 2224 2220 return error; 2225 2221 if (!*bpp)
+2 -2
fs/xfs/xfs_alloc.h
··· 231 231 xfs_extlen_t *len, /* output: length of extent */ 232 232 int *stat); /* output: success/failure */ 233 233 234 - void xfs_agf_write_verify(struct xfs_buf *bp); 235 - void xfs_agfl_write_verify(struct xfs_buf *bp); 234 + extern const struct xfs_buf_ops xfs_agf_buf_ops; 235 + extern const struct xfs_buf_ops xfs_agfl_buf_ops; 236 236 237 237 #endif /* __XFS_ALLOC_H__ */
+13 -11
fs/xfs/xfs_alloc_btree.c
··· 329 329 } 330 330 331 331 static void 332 + xfs_allocbt_read_verify( 333 + struct xfs_buf *bp) 334 + { 335 + xfs_allocbt_verify(bp); 336 + } 337 + 338 + static void 332 339 xfs_allocbt_write_verify( 333 340 struct xfs_buf *bp) 334 341 { 335 342 xfs_allocbt_verify(bp); 336 343 } 337 344 338 - void 339 - xfs_allocbt_read_verify( 340 - struct xfs_buf *bp) 341 - { 342 - xfs_allocbt_verify(bp); 343 - bp->b_pre_io = xfs_allocbt_write_verify; 344 - bp->b_iodone = NULL; 345 - xfs_buf_ioend(bp, 0); 346 - } 345 + const struct xfs_buf_ops xfs_allocbt_buf_ops = { 346 + .verify_read = xfs_allocbt_read_verify, 347 + .verify_write = xfs_allocbt_write_verify, 348 + }; 349 + 347 350 348 351 #ifdef DEBUG 349 352 STATIC int ··· 403 400 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 404 401 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 405 402 .key_diff = xfs_allocbt_key_diff, 406 - .read_verify = xfs_allocbt_read_verify, 407 - .write_verify = xfs_allocbt_write_verify, 403 + .buf_ops = &xfs_allocbt_buf_ops, 408 404 #ifdef DEBUG 409 405 .keys_inorder = xfs_allocbt_keys_inorder, 410 406 .recs_inorder = xfs_allocbt_recs_inorder,
+2
fs/xfs/xfs_alloc_btree.h
··· 93 93 xfs_agnumber_t, xfs_btnum_t); 94 94 extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int); 95 95 96 + extern const struct xfs_buf_ops xfs_allocbt_buf_ops; 97 + 96 98 #endif /* __XFS_ALLOC_BTREE_H__ */
+14 -13
fs/xfs/xfs_attr_leaf.c
··· 104 104 } 105 105 106 106 static void 107 + xfs_attr_leaf_read_verify( 108 + struct xfs_buf *bp) 109 + { 110 + xfs_attr_leaf_verify(bp); 111 + } 112 + 113 + static void 107 114 xfs_attr_leaf_write_verify( 108 115 struct xfs_buf *bp) 109 116 { 110 117 xfs_attr_leaf_verify(bp); 111 118 } 112 119 113 - void 114 - xfs_attr_leaf_read_verify( 115 - struct xfs_buf *bp) 116 - { 117 - xfs_attr_leaf_verify(bp); 118 - bp->b_pre_io = xfs_attr_leaf_write_verify; 119 - bp->b_iodone = NULL; 120 - xfs_buf_ioend(bp, 0); 121 - } 122 - 120 + const struct xfs_buf_ops xfs_attr_leaf_buf_ops = { 121 + .verify_read = xfs_attr_leaf_read_verify, 122 + .verify_write = xfs_attr_leaf_write_verify, 123 + }; 123 124 124 125 int 125 126 xfs_attr_leaf_read( ··· 131 130 struct xfs_buf **bpp) 132 131 { 133 132 return xfs_da_read_buf(tp, dp, bno, mappedbno, bpp, 134 - XFS_ATTR_FORK, xfs_attr_leaf_read_verify); 133 + XFS_ATTR_FORK, &xfs_attr_leaf_buf_ops); 135 134 } 136 135 137 136 /*======================================================================== ··· 925 924 XFS_ATTR_FORK); 926 925 if (error) 927 926 goto out; 928 - bp2->b_pre_io = bp1->b_pre_io; 927 + bp2->b_ops = bp1->b_ops; 929 928 memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(dp->i_mount)); 930 929 bp1 = NULL; 931 930 xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1); ··· 979 978 XFS_ATTR_FORK); 980 979 if (error) 981 980 return(error); 982 - bp->b_pre_io = xfs_attr_leaf_write_verify; 981 + bp->b_ops = &xfs_attr_leaf_buf_ops; 983 982 leaf = bp->b_addr; 984 983 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); 985 984 hdr = &leaf->hdr;
+2 -1
fs/xfs/xfs_attr_leaf.h
··· 264 264 int xfs_attr_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp, 265 265 xfs_dablk_t bno, xfs_daddr_t mappedbno, 266 266 struct xfs_buf **bpp); 267 - void xfs_attr_leaf_read_verify(struct xfs_buf *bp); 267 + 268 + extern const struct xfs_buf_ops xfs_attr_leaf_buf_ops; 268 269 269 270 #endif /* __XFS_ATTR_LEAF_H__ */
+11 -11
fs/xfs/xfs_bmap.c
··· 2663 2663 return error; 2664 2664 #endif 2665 2665 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 2666 - xfs_bmbt_read_verify); 2666 + &xfs_bmbt_buf_ops); 2667 2667 if (error) 2668 2668 return error; 2669 2669 cblock = XFS_BUF_TO_BLOCK(cbp); ··· 3124 3124 /* 3125 3125 * Fill in the child block. 3126 3126 */ 3127 - abp->b_pre_io = xfs_bmbt_write_verify; 3127 + abp->b_ops = &xfs_bmbt_buf_ops; 3128 3128 ablock = XFS_BUF_TO_BLOCK(abp); 3129 3129 ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); 3130 3130 ablock->bb_level = 0; ··· 3271 3271 ASSERT(args.len == 1); 3272 3272 *firstblock = args.fsbno; 3273 3273 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 3274 - bp->b_pre_io = xfs_bmbt_write_verify; 3274 + bp->b_ops = &xfs_bmbt_buf_ops; 3275 3275 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); 3276 3276 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); 3277 3277 xfs_bmap_forkoff_reset(args.mp, ip, whichfork); ··· 4082 4082 */ 4083 4083 while (level-- > 0) { 4084 4084 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 4085 - XFS_BMAP_BTREE_REF, xfs_bmbt_read_verify); 4085 + XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 4086 4086 if (error) 4087 4087 return error; 4088 4088 block = XFS_BUF_TO_BLOCK(bp); ··· 4129 4129 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 4130 4130 if (nextbno != NULLFSBLOCK) 4131 4131 xfs_btree_reada_bufl(mp, nextbno, 1, 4132 - xfs_bmbt_read_verify); 4132 + &xfs_bmbt_buf_ops); 4133 4133 /* 4134 4134 * Copy records into the extent records. 4135 4135 */ ··· 4162 4162 if (bno == NULLFSBLOCK) 4163 4163 break; 4164 4164 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 4165 - XFS_BMAP_BTREE_REF, xfs_bmbt_read_verify); 4165 + XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 4166 4166 if (error) 4167 4167 return error; 4168 4168 block = XFS_BUF_TO_BLOCK(bp); ··· 5880 5880 bp_release = 1; 5881 5881 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 5882 5882 XFS_BMAP_BTREE_REF, 5883 - xfs_bmbt_read_verify); 5883 + &xfs_bmbt_buf_ops); 5884 5884 if (error) 5885 5885 goto error_norelse; 5886 5886 } ··· 5966 5966 bp_release = 1; 5967 5967 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 5968 5968 XFS_BMAP_BTREE_REF, 5969 - xfs_bmbt_read_verify); 5969 + &xfs_bmbt_buf_ops); 5970 5970 if (error) 5971 5971 goto error_norelse; 5972 5972 } ··· 6061 6061 int numrecs; 6062 6062 6063 6063 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, 6064 - xfs_bmbt_read_verify); 6064 + &xfs_bmbt_buf_ops); 6065 6065 if (error) 6066 6066 return error; 6067 6067 *count += 1; ··· 6073 6073 while (nextbno != NULLFSBLOCK) { 6074 6074 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp, 6075 6075 XFS_BMAP_BTREE_REF, 6076 - xfs_bmbt_read_verify); 6076 + &xfs_bmbt_buf_ops); 6077 6077 if (error) 6078 6078 return error; 6079 6079 *count += 1; ··· 6105 6105 bno = nextbno; 6106 6106 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 6107 6107 XFS_BMAP_BTREE_REF, 6108 - xfs_bmbt_read_verify); 6108 + &xfs_bmbt_buf_ops); 6109 6109 if (error) 6110 6110 return error; 6111 6111 *count += 1;
+14 -12
fs/xfs/xfs_bmap_btree.c
··· 749 749 } 750 750 } 751 751 752 - void 752 + static void 753 + xfs_bmbt_read_verify( 754 + struct xfs_buf *bp) 755 + { 756 + xfs_bmbt_verify(bp); 757 + } 758 + 759 + static void 753 760 xfs_bmbt_write_verify( 754 761 struct xfs_buf *bp) 755 762 { 756 763 xfs_bmbt_verify(bp); 757 764 } 758 765 759 - void 760 - xfs_bmbt_read_verify( 761 - struct xfs_buf *bp) 762 - { 763 - xfs_bmbt_verify(bp); 764 - bp->b_pre_io = xfs_bmbt_write_verify; 765 - bp->b_iodone = NULL; 766 - xfs_buf_ioend(bp, 0); 767 - } 766 + const struct xfs_buf_ops xfs_bmbt_buf_ops = { 767 + .verify_read = xfs_bmbt_read_verify, 768 + .verify_write = xfs_bmbt_write_verify, 769 + }; 770 + 768 771 769 772 #ifdef DEBUG 770 773 STATIC int ··· 808 805 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, 809 806 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, 810 807 .key_diff = xfs_bmbt_key_diff, 811 - .read_verify = xfs_bmbt_read_verify, 812 - .write_verify = xfs_bmbt_write_verify, 808 + .buf_ops = &xfs_bmbt_buf_ops, 813 809 #ifdef DEBUG 814 810 .keys_inorder = xfs_bmbt_keys_inorder, 815 811 .recs_inorder = xfs_bmbt_recs_inorder,
+1 -2
fs/xfs/xfs_bmap_btree.h
··· 232 232 extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level); 233 233 extern int xfs_bmdr_maxrecs(struct xfs_mount *, int blocklen, int leaf); 234 234 extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf); 235 - extern void xfs_bmbt_read_verify(struct xfs_buf *bp); 236 - extern void xfs_bmbt_write_verify(struct xfs_buf *bp); 237 235 238 236 extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *, 239 237 struct xfs_trans *, struct xfs_inode *, int); 240 238 239 + extern const struct xfs_buf_ops xfs_bmbt_buf_ops; 241 240 242 241 #endif /* __XFS_BMAP_BTREE_H__ */
+13 -13
fs/xfs/xfs_btree.c
··· 271 271 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 272 272 XFS_BUF_ADDR(bp), mp->m_bsize, 273 273 0, &bp, 274 - cur->bc_ops->read_verify); 274 + cur->bc_ops->buf_ops); 275 275 if (error) { 276 276 xfs_btree_del_cursor(new, error); 277 277 *ncur = NULL; ··· 621 621 uint lock, /* lock flags for read_buf */ 622 622 struct xfs_buf **bpp, /* buffer for fsbno */ 623 623 int refval, /* ref count value for buffer */ 624 - xfs_buf_iodone_t verify) 624 + const struct xfs_buf_ops *ops) 625 625 { 626 626 struct xfs_buf *bp; /* return value */ 627 627 xfs_daddr_t d; /* real disk block address */ ··· 630 630 ASSERT(fsbno != NULLFSBLOCK); 631 631 d = XFS_FSB_TO_DADDR(mp, fsbno); 632 632 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, 633 - mp->m_bsize, lock, &bp, verify); 633 + mp->m_bsize, lock, &bp, ops); 634 634 if (error) 635 635 return error; 636 636 ASSERT(!xfs_buf_geterror(bp)); ··· 650 650 struct xfs_mount *mp, /* file system mount point */ 651 651 xfs_fsblock_t fsbno, /* file system block number */ 652 652 xfs_extlen_t count, /* count of filesystem blocks */ 653 - xfs_buf_iodone_t verify) 653 + const struct xfs_buf_ops *ops) 654 654 { 655 655 xfs_daddr_t d; 656 656 657 657 ASSERT(fsbno != NULLFSBLOCK); 658 658 d = XFS_FSB_TO_DADDR(mp, fsbno); 659 - xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, verify); 659 + xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops); 660 660 } 661 661 662 662 /* ··· 670 670 xfs_agnumber_t agno, /* allocation group number */ 671 671 xfs_agblock_t agbno, /* allocation group block number */ 672 672 xfs_extlen_t count, /* count of filesystem blocks */ 673 - xfs_buf_iodone_t verify) 673 + const struct xfs_buf_ops *ops) 674 674 { 675 675 xfs_daddr_t d; 676 676 677 677 ASSERT(agno != NULLAGNUMBER); 678 678 ASSERT(agbno != NULLAGBLOCK); 679 679 d = XFS_AGB_TO_DADDR(mp, agno, agbno); 680 - xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, verify); 680 + xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops); 681 681 } 682 682 683 683 STATIC int ··· 692 692 693 693 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) { 694 694 xfs_btree_reada_bufl(cur->bc_mp, left, 1, 695 - cur->bc_ops->read_verify); 695 + cur->bc_ops->buf_ops); 696 696 rval++; 697 697 } 698 698 699 699 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) { 700 700 xfs_btree_reada_bufl(cur->bc_mp, right, 1, 701 - cur->bc_ops->read_verify); 701 + cur->bc_ops->buf_ops); 702 702 rval++; 703 703 } 704 704 ··· 718 718 719 719 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) { 720 720 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, 721 - left, 1, cur->bc_ops->read_verify); 721 + left, 1, cur->bc_ops->buf_ops); 722 722 rval++; 723 723 } 724 724 725 725 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) { 726 726 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, 727 - right, 1, cur->bc_ops->read_verify); 727 + right, 1, cur->bc_ops->buf_ops); 728 728 rval++; 729 729 } 730 730 ··· 996 996 if (!*bpp) 997 997 return ENOMEM; 998 998 999 - (*bpp)->b_pre_io = cur->bc_ops->write_verify; 999 + (*bpp)->b_ops = cur->bc_ops->buf_ops; 1000 1000 *block = XFS_BUF_TO_BLOCK(*bpp); 1001 1001 return 0; 1002 1002 } ··· 1024 1024 d = xfs_btree_ptr_to_daddr(cur, ptr); 1025 1025 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, 1026 1026 mp->m_bsize, flags, bpp, 1027 - cur->bc_ops->read_verify); 1027 + cur->bc_ops->buf_ops); 1028 1028 if (error) 1029 1029 return error; 1030 1030
+4 -5
fs/xfs/xfs_btree.h
··· 188 188 __int64_t (*key_diff)(struct xfs_btree_cur *cur, 189 189 union xfs_btree_key *key); 190 190 191 - void (*read_verify)(struct xfs_buf *bp); 192 - void (*write_verify)(struct xfs_buf *bp); 191 + const struct xfs_buf_ops *buf_ops; 193 192 194 193 #ifdef DEBUG 195 194 /* check that k1 is lower than k2 */ ··· 358 359 uint lock, /* lock flags for read_buf */ 359 360 struct xfs_buf **bpp, /* buffer for fsbno */ 360 361 int refval, /* ref count value for buffer */ 361 - xfs_buf_iodone_t verify); 362 + const struct xfs_buf_ops *ops); 362 363 363 364 /* 364 365 * Read-ahead the block, don't wait for it, don't return a buffer. ··· 369 370 struct xfs_mount *mp, /* file system mount point */ 370 371 xfs_fsblock_t fsbno, /* file system block number */ 371 372 xfs_extlen_t count, /* count of filesystem blocks */ 372 - xfs_buf_iodone_t verify); 373 + const struct xfs_buf_ops *ops); 373 374 374 375 /* 375 376 * Read-ahead the block, don't wait for it, don't return a buffer. ··· 381 382 xfs_agnumber_t agno, /* allocation group number */ 382 383 xfs_agblock_t agbno, /* allocation group block number */ 383 384 xfs_extlen_t count, /* count of filesystem blocks */ 384 - xfs_buf_iodone_t verify); 385 + const struct xfs_buf_ops *ops); 385 386 386 387 /* 387 388 * Initialise a new btree block header
+37 -26
fs/xfs/xfs_buf.c
··· 571 571 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 572 572 ASSERT(bp->b_iodone == NULL); 573 573 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 574 - bp->b_pre_io = NULL; 574 + bp->b_ops = NULL; 575 575 } 576 576 577 577 trace_xfs_buf_find(bp, flags, _RET_IP_); ··· 657 657 struct xfs_buf_map *map, 658 658 int nmaps, 659 659 xfs_buf_flags_t flags, 660 - xfs_buf_iodone_t verify) 660 + const struct xfs_buf_ops *ops) 661 661 { 662 662 struct xfs_buf *bp; 663 663 ··· 669 669 670 670 if (!XFS_BUF_ISDONE(bp)) { 671 671 XFS_STATS_INC(xb_get_read); 672 - bp->b_iodone = verify; 672 + bp->b_ops = ops; 673 673 _xfs_buf_read(bp, flags); 674 674 } else if (flags & XBF_ASYNC) { 675 675 /* ··· 696 696 struct xfs_buftarg *target, 697 697 struct xfs_buf_map *map, 698 698 int nmaps, 699 - xfs_buf_iodone_t verify) 699 + const struct xfs_buf_ops *ops) 700 700 { 701 701 if (bdi_read_congested(target->bt_bdi)) 702 702 return; 703 703 704 704 xfs_buf_read_map(target, map, nmaps, 705 - XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, verify); 705 + XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); 706 706 } 707 707 708 708 /* ··· 715 715 xfs_daddr_t daddr, 716 716 size_t numblks, 717 717 int flags, 718 - xfs_buf_iodone_t verify) 718 + const struct xfs_buf_ops *ops) 719 719 { 720 720 struct xfs_buf *bp; 721 721 ··· 728 728 bp->b_bn = daddr; 729 729 bp->b_maps[0].bm_bn = daddr; 730 730 bp->b_flags |= XBF_READ; 731 - bp->b_iodone = verify; 731 + bp->b_ops = ops; 732 732 733 733 xfsbdstrat(target->bt_mount, bp); 734 734 xfs_buf_iowait(bp); ··· 1001 1001 xfs_buf_iodone_work( 1002 1002 struct work_struct *work) 1003 1003 { 1004 - xfs_buf_t *bp = 1004 + struct xfs_buf *bp = 1005 1005 container_of(work, xfs_buf_t, b_iodone_work); 1006 + bool read = !!(bp->b_flags & XBF_READ); 1007 + 1008 + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1009 + if (read && bp->b_ops) 1010 + bp->b_ops->verify_read(bp); 1006 1011 1007 1012 if (bp->b_iodone) 1008 1013 (*(bp->b_iodone))(bp); 1009 1014 else if (bp->b_flags & XBF_ASYNC) 1010 1015 xfs_buf_relse(bp); 1016 + else { 1017 + ASSERT(read && bp->b_ops); 1018 + complete(&bp->b_iowait); 1019 + } 1011 1020 } 1012 1021 1013 1022 void 1014 1023 xfs_buf_ioend( 1015 - xfs_buf_t *bp, 1016 - int schedule) 1024 + struct xfs_buf *bp, 1025 + int schedule) 1017 1026 { 1027 + bool read = !!(bp->b_flags & XBF_READ); 1028 + 1018 1029 trace_xfs_buf_iodone(bp, _RET_IP_); 1019 1030 1020 - bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1021 1031 if (bp->b_error == 0) 1022 1032 bp->b_flags |= XBF_DONE; 1023 1033 1024 - if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1034 + if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { 1025 1035 if (schedule) { 1026 1036 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); 1027 1037 queue_work(xfslogd_workqueue, &bp->b_iodone_work); ··· 1039 1029 xfs_buf_iodone_work(&bp->b_iodone_work); 1040 1030 } 1041 1031 } else { 1032 + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1042 1033 complete(&bp->b_iowait); 1043 1034 } 1044 1035 } ··· 1327 1316 rw |= REQ_FUA; 1328 1317 if (bp->b_flags & XBF_FLUSH) 1329 1318 rw |= REQ_FLUSH; 1319 + 1320 + /* 1321 + * Run the write verifier callback function if it exists. If 1322 + * this function fails it will mark the buffer with an error and 1323 + * the IO should not be dispatched. 1324 + */ 1325 + if (bp->b_ops) { 1326 + bp->b_ops->verify_write(bp); 1327 + if (bp->b_error) { 1328 + xfs_force_shutdown(bp->b_target->bt_mount, 1329 + SHUTDOWN_CORRUPT_INCORE); 1330 + return; 1331 + } 1332 + } 1330 1333 } else if (bp->b_flags & XBF_READ_AHEAD) { 1331 1334 rw = READA; 1332 1335 } else { ··· 1349 1324 1350 1325 /* we only use the buffer cache for meta-data */ 1351 1326 rw |= REQ_META; 1352 - 1353 - /* 1354 - * run the pre-io callback function if it exists. If this function 1355 - * fails it will mark the buffer with an error and the IO should 1356 - * not be dispatched. 1357 - */ 1358 - if (bp->b_pre_io) { 1359 - bp->b_pre_io(bp); 1360 - if (bp->b_error) { 1361 - xfs_force_shutdown(bp->b_target->bt_mount, 1362 - SHUTDOWN_CORRUPT_INCORE); 1363 - return; 1364 - } 1365 - } 1366 1327 1367 1328 /* 1368 1329 * Walk all the vectors issuing IO on them. Set up the initial offset
+14 -10
fs/xfs/xfs_buf.h
··· 111 111 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ 112 112 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; 113 113 114 + struct xfs_buf_ops { 115 + void (*verify_read)(struct xfs_buf *); 116 + void (*verify_write)(struct xfs_buf *); 117 + }; 118 + 114 119 typedef struct xfs_buf { 115 120 /* 116 121 * first cacheline holds all the fields needed for an uncontended cache ··· 159 154 unsigned int b_page_count; /* size of page array */ 160 155 unsigned int b_offset; /* page offset in first page */ 161 156 unsigned short b_error; /* error code on I/O */ 162 - 163 - void (*b_pre_io)(struct xfs_buf *); 164 - /* pre-io callback function */ 157 + const struct xfs_buf_ops *b_ops; 165 158 166 159 #ifdef XFS_BUF_LOCK_TRACKING 167 160 int b_last_holder; ··· 202 199 xfs_buf_flags_t flags); 203 200 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target, 204 201 struct xfs_buf_map *map, int nmaps, 205 - xfs_buf_flags_t flags, xfs_buf_iodone_t verify); 202 + xfs_buf_flags_t flags, 203 + const struct xfs_buf_ops *ops); 206 204 void xfs_buf_readahead_map(struct xfs_buftarg *target, 207 205 struct xfs_buf_map *map, int nmaps, 208 - xfs_buf_iodone_t verify); 206 + const struct xfs_buf_ops *ops); 209 207 210 208 static inline struct xfs_buf * 211 209 xfs_buf_get( ··· 225 221 xfs_daddr_t blkno, 226 222 size_t numblks, 227 223 xfs_buf_flags_t flags, 228 - xfs_buf_iodone_t verify) 224 + const struct xfs_buf_ops *ops) 229 225 { 230 226 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 231 - return xfs_buf_read_map(target, &map, 1, flags, verify); 227 + return xfs_buf_read_map(target, &map, 1, flags, ops); 232 228 } 233 229 234 230 static inline void ··· 236 232 struct xfs_buftarg *target, 237 233 xfs_daddr_t blkno, 238 234 size_t numblks, 239 - xfs_buf_iodone_t verify) 235 + const struct xfs_buf_ops *ops) 240 236 { 241 237 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 242 - return xfs_buf_readahead_map(target, &map, 1, verify); 238 + return xfs_buf_readahead_map(target, &map, 1, ops); 243 239 } 244 240 245 241 struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks); ··· 250 246 int flags); 251 247 struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target, 252 248 xfs_daddr_t daddr, size_t numblks, int flags, 253 - xfs_buf_iodone_t verify); 249 + const struct xfs_buf_ops *ops); 254 250 void xfs_buf_hold(struct xfs_buf *bp); 255 251 256 252 /* Releasing Buffers */
+25 -15
fs/xfs/xfs_da_btree.c
··· 117 117 xfs_da_node_verify(bp); 118 118 } 119 119 120 + /* 121 + * leaf/node format detection on trees is sketchy, so a node read can be done on 122 + * leaf level blocks when detection identifies the tree as a node format tree 123 + * incorrectly. In this case, we need to swap the verifier to match the correct 124 + * format of the block being read. 125 + */ 120 126 static void 121 127 xfs_da_node_read_verify( 122 128 struct xfs_buf *bp) ··· 135 129 xfs_da_node_verify(bp); 136 130 break; 137 131 case XFS_ATTR_LEAF_MAGIC: 138 - xfs_attr_leaf_read_verify(bp); 132 + bp->b_ops = &xfs_attr_leaf_buf_ops; 133 + bp->b_ops->verify_read(bp); 139 134 return; 140 135 case XFS_DIR2_LEAFN_MAGIC: 141 - xfs_dir2_leafn_read_verify(bp); 136 + bp->b_ops = &xfs_dir2_leafn_buf_ops; 137 + bp->b_ops->verify_read(bp); 142 138 return; 143 139 default: 144 140 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, ··· 148 140 xfs_buf_ioerror(bp, EFSCORRUPTED); 149 141 break; 150 142 } 151 - 152 - bp->b_pre_io = xfs_da_node_write_verify; 153 - bp->b_iodone = NULL; 154 - xfs_buf_ioend(bp, 0); 155 143 } 144 + 145 + const struct xfs_buf_ops xfs_da_node_buf_ops = { 146 + .verify_read = xfs_da_node_read_verify, 147 + .verify_write = xfs_da_node_write_verify, 148 + }; 149 + 156 150 157 151 int 158 152 xfs_da_node_read( ··· 166 156 int which_fork) 167 157 { 168 158 return xfs_da_read_buf(tp, dp, bno, mappedbno, bpp, 169 - which_fork, xfs_da_node_read_verify); 159 + which_fork, &xfs_da_node_buf_ops); 170 160 } 171 161 172 162 /*======================================================================== ··· 203 193 xfs_trans_log_buf(tp, bp, 204 194 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 205 195 206 - bp->b_pre_io = xfs_da_node_write_verify; 196 + bp->b_ops = &xfs_da_node_buf_ops; 207 197 *bpp = bp; 208 198 return(0); 209 199 } ··· 404 394 memcpy(node, oldroot, size); 405 395 xfs_trans_log_buf(tp, bp, 0, size - 1); 406 396 407 - bp->b_pre_io = blk1->bp->b_pre_io; 397 + bp->b_ops = blk1->bp->b_ops; 408 398 blk1->bp = bp; 409 399 blk1->blkno = blkno; 410 400 ··· 838 828 /* 839 829 * This could be copying a leaf back into the root block in the case of 840 830 * there only being a single leaf block left in the tree. Hence we have 841 - * to update the pre_io pointer as well to match the buffer type change 831 + * to update the b_ops pointer as well to match the buffer type change 842 832 * that could occur. 843 833 */ 844 834 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize); 845 - root_blk->bp->b_pre_io = bp->b_pre_io; 835 + root_blk->bp->b_ops = bp->b_ops; 846 836 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1); 847 837 error = xfs_da_shrink_inode(args, child, bp); 848 838 return(error); ··· 2233 2223 xfs_daddr_t mappedbno, 2234 2224 struct xfs_buf **bpp, 2235 2225 int whichfork, 2236 - xfs_buf_iodone_t verifier) 2226 + const struct xfs_buf_ops *ops) 2237 2227 { 2238 2228 struct xfs_buf *bp; 2239 2229 struct xfs_buf_map map; ··· 2255 2245 2256 2246 error = xfs_trans_read_buf_map(dp->i_mount, trans, 2257 2247 dp->i_mount->m_ddev_targp, 2258 - mapp, nmap, 0, &bp, verifier); 2248 + mapp, nmap, 0, &bp, ops); 2259 2249 if (error) 2260 2250 goto out_free; 2261 2251 ··· 2313 2303 xfs_dablk_t bno, 2314 2304 xfs_daddr_t mappedbno, 2315 2305 int whichfork, 2316 - xfs_buf_iodone_t verifier) 2306 + const struct xfs_buf_ops *ops) 2317 2307 { 2318 2308 struct xfs_buf_map map; 2319 2309 struct xfs_buf_map *mapp; ··· 2332 2322 } 2333 2323 2334 2324 mappedbno = mapp[0].bm_bn; 2335 - xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, NULL); 2325 + xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2336 2326 2337 2327 out_free: 2338 2328 if (mapp != &map)
+2 -2
fs/xfs/xfs_da_btree.h
··· 229 229 int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp, 230 230 xfs_dablk_t bno, xfs_daddr_t mappedbno, 231 231 struct xfs_buf **bpp, int whichfork, 232 - xfs_buf_iodone_t verifier); 232 + const struct xfs_buf_ops *ops); 233 233 xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp, 234 234 xfs_dablk_t bno, xfs_daddr_t mapped_bno, 235 - int whichfork, xfs_buf_iodone_t verifier); 235 + int whichfork, const struct xfs_buf_ops *ops); 236 236 int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, 237 237 struct xfs_buf *dead_buf); 238 238
+14 -12
fs/xfs/xfs_dir2_block.c
··· 74 74 } 75 75 76 76 static void 77 + xfs_dir2_block_read_verify( 78 + struct xfs_buf *bp) 79 + { 80 + xfs_dir2_block_verify(bp); 81 + } 82 + 83 + static void 77 84 xfs_dir2_block_write_verify( 78 85 struct xfs_buf *bp) 79 86 { 80 87 xfs_dir2_block_verify(bp); 81 88 } 82 89 83 - void 84 - xfs_dir2_block_read_verify( 85 - struct xfs_buf *bp) 86 - { 87 - xfs_dir2_block_verify(bp); 88 - bp->b_pre_io = xfs_dir2_block_write_verify; 89 - bp->b_iodone = NULL; 90 - xfs_buf_ioend(bp, 0); 91 - } 90 + const struct xfs_buf_ops xfs_dir2_block_buf_ops = { 91 + .verify_read = xfs_dir2_block_read_verify, 92 + .verify_write = xfs_dir2_block_write_verify, 93 + }; 92 94 93 95 static int 94 96 xfs_dir2_block_read( ··· 101 99 struct xfs_mount *mp = dp->i_mount; 102 100 103 101 return xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, bpp, 104 - XFS_DATA_FORK, xfs_dir2_block_read_verify); 102 + XFS_DATA_FORK, &xfs_dir2_block_buf_ops); 105 103 } 106 104 107 105 static void ··· 1012 1010 /* 1013 1011 * Start converting it to block form. 1014 1012 */ 1015 - dbp->b_pre_io = xfs_dir2_block_write_verify; 1013 + dbp->b_ops = &xfs_dir2_block_buf_ops; 1016 1014 hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); 1017 1015 needlog = 1; 1018 1016 needscan = 0; ··· 1142 1140 kmem_free(sfp); 1143 1141 return error; 1144 1142 } 1145 - bp->b_pre_io = xfs_dir2_block_write_verify; 1143 + bp->b_ops = &xfs_dir2_block_buf_ops; 1146 1144 hdr = bp->b_addr; 1147 1145 hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); 1148 1146 /*
+44 -10
fs/xfs/xfs_dir2_data.c
··· 202 202 } 203 203 } 204 204 205 - void 206 - xfs_dir2_data_write_verify( 207 - struct xfs_buf *bp) 205 + /* 206 + * Readahead of the first block of the directory when it is opened is completely 207 + * oblivious to the format of the directory. Hence we can either get a block 208 + * format buffer or a data format buffer on readahead. 209 + */ 210 + static void 211 + xfs_dir2_data_reada_verify( 212 + struct xfs_buf *bp) 208 213 { 209 - xfs_dir2_data_verify(bp); 214 + struct xfs_mount *mp = bp->b_target->bt_mount; 215 + struct xfs_dir2_data_hdr *hdr = bp->b_addr; 216 + 217 + switch (hdr->magic) { 218 + case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC): 219 + bp->b_ops = &xfs_dir2_block_buf_ops; 220 + bp->b_ops->verify_read(bp); 221 + return; 222 + case cpu_to_be32(XFS_DIR2_DATA_MAGIC): 223 + xfs_dir2_data_verify(bp); 224 + return; 225 + default: 226 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, hdr); 227 + xfs_buf_ioerror(bp, EFSCORRUPTED); 228 + break; 229 + } 210 230 } 211 231 212 232 static void ··· 234 214 struct xfs_buf *bp) 235 215 { 236 216 xfs_dir2_data_verify(bp); 237 - bp->b_pre_io = xfs_dir2_data_write_verify; 238 - bp->b_iodone = NULL; 239 - xfs_buf_ioend(bp, 0); 240 217 } 218 + 219 + static void 220 + xfs_dir2_data_write_verify( 221 + struct xfs_buf *bp) 222 + { 223 + xfs_dir2_data_verify(bp); 224 + } 225 + 226 + const struct xfs_buf_ops xfs_dir2_data_buf_ops = { 227 + .verify_read = xfs_dir2_data_read_verify, 228 + .verify_write = xfs_dir2_data_write_verify, 229 + }; 230 + 231 + static const struct xfs_buf_ops xfs_dir2_data_reada_buf_ops = { 232 + .verify_read = xfs_dir2_data_reada_verify, 233 + .verify_write = xfs_dir2_data_write_verify, 234 + }; 241 235 242 236 243 237 int ··· 263 229 struct xfs_buf **bpp) 264 230 { 265 231 return xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp, 266 - XFS_DATA_FORK, xfs_dir2_data_read_verify); 232 + XFS_DATA_FORK, &xfs_dir2_data_buf_ops); 267 233 } 268 234 269 235 int ··· 274 240 xfs_daddr_t mapped_bno) 275 241 { 276 242 return xfs_da_reada_buf(tp, dp, bno, mapped_bno, 277 - XFS_DATA_FORK, xfs_dir2_data_read_verify); 243 + XFS_DATA_FORK, &xfs_dir2_data_reada_buf_ops); 278 244 } 279 245 280 246 /* ··· 518 484 XFS_DATA_FORK); 519 485 if (error) 520 486 return error; 521 - bp->b_pre_io = xfs_dir2_data_write_verify; 487 + bp->b_ops = &xfs_dir2_data_buf_ops; 522 488 523 489 /* 524 490 * Initialize the header.
+24 -20
fs/xfs/xfs_dir2_leaf.c
··· 65 65 } 66 66 67 67 static void 68 - xfs_dir2_leaf1_write_verify( 68 + xfs_dir2_leaf1_read_verify( 69 69 struct xfs_buf *bp) 70 70 { 71 71 xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); 72 72 } 73 73 74 74 static void 75 - xfs_dir2_leaf1_read_verify( 75 + xfs_dir2_leaf1_write_verify( 76 76 struct xfs_buf *bp) 77 77 { 78 78 xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); 79 - bp->b_pre_io = xfs_dir2_leaf1_write_verify; 80 - bp->b_iodone = NULL; 81 - xfs_buf_ioend(bp, 0); 79 + } 80 + 81 + void 82 + xfs_dir2_leafn_read_verify( 83 + struct xfs_buf *bp) 84 + { 85 + xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 82 86 } 83 87 84 88 void ··· 92 88 xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 93 89 } 94 90 95 - void 96 - xfs_dir2_leafn_read_verify( 97 - struct xfs_buf *bp) 98 - { 99 - xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 100 - bp->b_pre_io = xfs_dir2_leafn_write_verify; 101 - bp->b_iodone = NULL; 102 - xfs_buf_ioend(bp, 0); 103 - } 91 + static const struct xfs_buf_ops xfs_dir2_leaf1_buf_ops = { 92 + .verify_read = xfs_dir2_leaf1_read_verify, 93 + .verify_write = xfs_dir2_leaf1_write_verify, 94 + }; 95 + 96 + const struct xfs_buf_ops xfs_dir2_leafn_buf_ops = { 97 + .verify_read = xfs_dir2_leafn_read_verify, 98 + .verify_write = xfs_dir2_leafn_write_verify, 99 + }; 104 100 105 101 static int 106 102 xfs_dir2_leaf_read( ··· 111 107 struct xfs_buf **bpp) 112 108 { 113 109 return xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp, 114 - XFS_DATA_FORK, xfs_dir2_leaf1_read_verify); 110 + XFS_DATA_FORK, &xfs_dir2_leaf1_buf_ops); 115 111 } 116 112 117 113 int ··· 123 119 struct xfs_buf **bpp) 124 120 { 125 121 return xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp, 126 - XFS_DATA_FORK, xfs_dir2_leafn_read_verify); 122 + XFS_DATA_FORK, &xfs_dir2_leafn_buf_ops); 127 123 } 128 124 129 125 /* ··· 202 198 /* 203 199 * Fix up the block header, make it a data block. 204 200 */ 205 - dbp->b_pre_io = xfs_dir2_data_write_verify; 201 + dbp->b_ops = &xfs_dir2_data_buf_ops; 206 202 hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); 207 203 if (needscan) 208 204 xfs_dir2_data_freescan(mp, hdr, &needlog); ··· 1268 1264 * the block. 1269 1265 */ 1270 1266 if (magic == XFS_DIR2_LEAF1_MAGIC) { 1271 - bp->b_pre_io = xfs_dir2_leaf1_write_verify; 1267 + bp->b_ops = &xfs_dir2_leaf1_buf_ops; 1272 1268 ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1273 1269 ltp->bestcount = 0; 1274 1270 xfs_dir2_leaf_log_tail(tp, bp); 1275 1271 } else 1276 - bp->b_pre_io = xfs_dir2_leafn_write_verify; 1272 + bp->b_ops = &xfs_dir2_leafn_buf_ops; 1277 1273 *bpp = bp; 1278 1274 return 0; 1279 1275 } ··· 1958 1954 else 1959 1955 xfs_dir2_leaf_log_header(tp, lbp); 1960 1956 1961 - lbp->b_pre_io = xfs_dir2_leaf1_write_verify; 1957 + lbp->b_ops = &xfs_dir2_leaf1_buf_ops; 1962 1958 leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAF1_MAGIC); 1963 1959 1964 1960 /*
+17 -15
fs/xfs/xfs_dir2_node.c
··· 72 72 } 73 73 74 74 static void 75 + xfs_dir2_free_read_verify( 76 + struct xfs_buf *bp) 77 + { 78 + xfs_dir2_free_verify(bp); 79 + } 80 + 81 + static void 75 82 xfs_dir2_free_write_verify( 76 83 struct xfs_buf *bp) 77 84 { 78 85 xfs_dir2_free_verify(bp); 79 86 } 80 87 81 - void 82 - xfs_dir2_free_read_verify( 83 - struct xfs_buf *bp) 84 - { 85 - xfs_dir2_free_verify(bp); 86 - bp->b_pre_io = xfs_dir2_free_write_verify; 87 - bp->b_iodone = NULL; 88 - xfs_buf_ioend(bp, 0); 89 - } 88 + static const struct xfs_buf_ops xfs_dir2_free_buf_ops = { 89 + .verify_read = xfs_dir2_free_read_verify, 90 + .verify_write = xfs_dir2_free_write_verify, 91 + }; 90 92 91 93 92 94 static int ··· 100 98 struct xfs_buf **bpp) 101 99 { 102 100 return xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp, 103 - XFS_DATA_FORK, xfs_dir2_free_read_verify); 101 + XFS_DATA_FORK, &xfs_dir2_free_buf_ops); 104 102 } 105 103 106 104 int ··· 203 201 XFS_DATA_FORK); 204 202 if (error) 205 203 return error; 206 - fbp->b_pre_io = xfs_dir2_free_write_verify; 204 + fbp->b_ops = &xfs_dir2_free_buf_ops; 207 205 208 206 free = fbp->b_addr; 209 207 leaf = lbp->b_addr; ··· 227 225 } 228 226 free->hdr.nused = cpu_to_be32(n); 229 227 230 - lbp->b_pre_io = xfs_dir2_leafn_write_verify; 228 + lbp->b_ops = &xfs_dir2_leafn_buf_ops; 231 229 leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAFN_MAGIC); 232 230 233 231 /* ··· 638 636 state->extrablk.index = (int)((char *)dep - 639 637 (char *)curbp->b_addr); 640 638 state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 641 - curbp->b_pre_io = xfs_dir2_data_write_verify; 639 + curbp->b_ops = &xfs_dir2_data_buf_ops; 642 640 if (cmp == XFS_CMP_EXACT) 643 641 return XFS_ERROR(EEXIST); 644 642 } ··· 653 651 state->extrablk.index = -1; 654 652 state->extrablk.blkno = curdb; 655 653 state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 656 - curbp->b_pre_io = xfs_dir2_data_write_verify; 654 + curbp->b_ops = &xfs_dir2_data_buf_ops; 657 655 } else { 658 656 /* If the curbp is not the CI match block, drop it */ 659 657 if (state->extrablk.bp != curbp) ··· 1651 1649 -1, &fbp, XFS_DATA_FORK); 1652 1650 if (error) 1653 1651 return error; 1654 - fbp->b_pre_io = xfs_dir2_free_write_verify; 1652 + fbp->b_ops = &xfs_dir2_free_buf_ops; 1655 1653 1656 1654 /* 1657 1655 * Initialize the new block to be empty, and remember
+7 -3
fs/xfs/xfs_dir2_priv.h
··· 30 30 const unsigned char *name, int len); 31 31 32 32 /* xfs_dir2_block.c */ 33 + extern const struct xfs_buf_ops xfs_dir2_block_buf_ops; 34 + 33 35 extern int xfs_dir2_block_addname(struct xfs_da_args *args); 34 36 extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent, 35 37 xfs_off_t *offset, filldir_t filldir); ··· 47 45 #else 48 46 #define xfs_dir2_data_check(dp,bp) 49 47 #endif 50 - extern void xfs_dir2_data_write_verify(struct xfs_buf *bp); 48 + 49 + extern const struct xfs_buf_ops xfs_dir2_data_buf_ops; 50 + 51 51 extern int __xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_buf *bp); 52 52 extern int xfs_dir2_data_read(struct xfs_trans *tp, struct xfs_inode *dp, 53 53 xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp); ··· 77 73 xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp); 78 74 79 75 /* xfs_dir2_leaf.c */ 80 - extern void xfs_dir2_leafn_read_verify(struct xfs_buf *bp); 81 - extern void xfs_dir2_leafn_write_verify(struct xfs_buf *bp); 76 + extern const struct xfs_buf_ops xfs_dir2_leafn_buf_ops; 77 + 82 78 extern int xfs_dir2_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp, 83 79 xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp); 84 80 extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
+10 -8
fs/xfs/xfs_dquot.c
··· 284 284 } 285 285 286 286 static void 287 - xfs_dquot_buf_write_verify( 287 + xfs_dquot_buf_read_verify( 288 288 struct xfs_buf *bp) 289 289 { 290 290 xfs_dquot_buf_verify(bp); 291 291 } 292 292 293 293 void 294 - xfs_dquot_buf_read_verify( 294 + xfs_dquot_buf_write_verify( 295 295 struct xfs_buf *bp) 296 296 { 297 297 xfs_dquot_buf_verify(bp); 298 - bp->b_pre_io = xfs_dquot_buf_write_verify; 299 - bp->b_iodone = NULL; 300 - xfs_buf_ioend(bp, 0); 301 298 } 299 + 300 + const struct xfs_buf_ops xfs_dquot_buf_ops = { 301 + .verify_read = xfs_dquot_buf_read_verify, 302 + .verify_write = xfs_dquot_buf_write_verify, 303 + }; 302 304 303 305 /* 304 306 * Allocate a block and fill it with dquots. ··· 367 365 error = xfs_buf_geterror(bp); 368 366 if (error) 369 367 goto error1; 370 - bp->b_pre_io = xfs_dquot_buf_write_verify; 368 + bp->b_ops = &xfs_dquot_buf_ops; 371 369 372 370 /* 373 371 * Make a chunk of dquots out of this buffer and log ··· 437 435 ASSERT(*bpp == NULL); 438 436 return XFS_ERROR(error); 439 437 } 440 - (*bpp)->b_pre_io = xfs_dquot_buf_write_verify; 438 + (*bpp)->b_ops = &xfs_dquot_buf_ops; 441 439 442 440 ASSERT(xfs_buf_islocked(*bpp)); 443 441 d = (struct xfs_dqblk *)(*bpp)->b_addr; ··· 536 534 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 537 535 dqp->q_blkno, 538 536 mp->m_quotainfo->qi_dqchunklen, 539 - 0, &bp, xfs_dquot_buf_read_verify); 537 + 0, &bp, &xfs_dquot_buf_ops); 540 538 541 539 if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { 542 540 xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
+2 -1
fs/xfs/xfs_dquot.h
··· 140 140 141 141 extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, 142 142 uint, struct xfs_dquot **); 143 - extern void xfs_dquot_buf_read_verify(struct xfs_buf *bp); 144 143 extern void xfs_qm_dqdestroy(xfs_dquot_t *); 145 144 extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **); 146 145 extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); ··· 160 161 xfs_dqunlock(dqp); 161 162 return dqp; 162 163 } 164 + 165 + extern const struct xfs_buf_ops xfs_dquot_buf_ops; 163 166 164 167 #endif /* __XFS_DQUOT_H__ */
+17 -12
fs/xfs/xfs_fsops.c
··· 119 119 struct xfs_mount *mp, 120 120 xfs_daddr_t blkno, 121 121 size_t numblks, 122 - int flags) 122 + int flags, 123 + const struct xfs_buf_ops *ops) 123 124 { 124 125 struct xfs_buf *bp; 125 126 ··· 131 130 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 132 131 bp->b_bn = blkno; 133 132 bp->b_maps[0].bm_bn = blkno; 133 + bp->b_ops = ops; 134 134 135 135 return bp; 136 136 } ··· 219 217 */ 220 218 bp = xfs_growfs_get_hdr_buf(mp, 221 219 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 222 - XFS_FSS_TO_BB(mp, 1), 0); 220 + XFS_FSS_TO_BB(mp, 1), 0, 221 + &xfs_agf_buf_ops); 223 222 if (!bp) { 224 223 error = ENOMEM; 225 224 goto error0; 226 225 } 227 - bp->b_pre_io = xfs_agf_write_verify; 228 226 229 227 agf = XFS_BUF_TO_AGF(bp); 230 228 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); ··· 257 255 */ 258 256 bp = xfs_growfs_get_hdr_buf(mp, 259 257 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 260 - XFS_FSS_TO_BB(mp, 1), 0); 258 + XFS_FSS_TO_BB(mp, 1), 0, 259 + &xfs_agfl_buf_ops); 261 260 if (!bp) { 262 261 error = ENOMEM; 263 262 goto error0; 264 263 } 265 - bp->b_pre_io = xfs_agfl_write_verify; 266 264 267 265 agfl = XFS_BUF_TO_AGFL(bp); 268 266 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) ··· 278 276 */ 279 277 bp = xfs_growfs_get_hdr_buf(mp, 280 278 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 281 - XFS_FSS_TO_BB(mp, 1), 0); 279 + XFS_FSS_TO_BB(mp, 1), 0, 280 + &xfs_agi_buf_ops); 282 281 if (!bp) { 283 282 error = ENOMEM; 284 283 goto error0; 285 284 } 286 - bp->b_pre_io = xfs_agi_write_verify; 287 285 288 286 agi = XFS_BUF_TO_AGI(bp); 289 287 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); ··· 308 306 */ 309 307 bp = xfs_growfs_get_hdr_buf(mp, 310 308 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), 311 - BTOBB(mp->m_sb.sb_blocksize), 0); 309 + BTOBB(mp->m_sb.sb_blocksize), 0, 310 + &xfs_allocbt_buf_ops); 312 311 313 312 if (!bp) { 314 313 error = ENOMEM; ··· 332 329 */ 333 330 bp = xfs_growfs_get_hdr_buf(mp, 334 331 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), 335 - BTOBB(mp->m_sb.sb_blocksize), 0); 332 + BTOBB(mp->m_sb.sb_blocksize), 0, 333 + &xfs_allocbt_buf_ops); 336 334 if (!bp) { 337 335 error = ENOMEM; 338 336 goto error0; ··· 356 352 */ 357 353 bp = xfs_growfs_get_hdr_buf(mp, 358 354 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), 359 - BTOBB(mp->m_sb.sb_blocksize), 0); 355 + BTOBB(mp->m_sb.sb_blocksize), 0, 356 + &xfs_inobt_buf_ops); 360 357 if (!bp) { 361 358 error = ENOMEM; 362 359 goto error0; ··· 453 448 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 454 449 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 455 450 XFS_FSS_TO_BB(mp, 1), 0, &bp, 456 - xfs_sb_read_verify); 451 + &xfs_sb_buf_ops); 457 452 } else { 458 453 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, 459 454 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 460 455 XFS_FSS_TO_BB(mp, 1), 0); 461 456 if (bp) { 457 + bp->b_ops = &xfs_sb_buf_ops; 462 458 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 463 - bp->b_pre_io = xfs_sb_write_verify; 464 459 } else 465 460 error = ENOMEM; 466 461 }
+10 -8
fs/xfs/xfs_ialloc.c
··· 210 210 * to log a whole cluster of inodes instead of all the 211 211 * individual transactions causing a lot of log traffic. 212 212 */ 213 - fbuf->b_pre_io = xfs_inode_buf_write_verify; 213 + fbuf->b_ops = &xfs_inode_buf_ops; 214 214 xfs_buf_zero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog); 215 215 for (i = 0; i < ninodes; i++) { 216 216 int ioffset = i << mp->m_sb.sb_inodelog; ··· 1505 1505 xfs_check_agi_unlinked(agi); 1506 1506 } 1507 1507 1508 - void 1509 - xfs_agi_write_verify( 1508 + static void 1509 + xfs_agi_read_verify( 1510 1510 struct xfs_buf *bp) 1511 1511 { 1512 1512 xfs_agi_verify(bp); 1513 1513 } 1514 1514 1515 1515 static void 1516 - xfs_agi_read_verify( 1516 + xfs_agi_write_verify( 1517 1517 struct xfs_buf *bp) 1518 1518 { 1519 1519 xfs_agi_verify(bp); 1520 - bp->b_pre_io = xfs_agi_write_verify; 1521 - bp->b_iodone = NULL; 1522 - xfs_buf_ioend(bp, 0); 1523 1520 } 1521 + 1522 + const struct xfs_buf_ops xfs_agi_buf_ops = { 1523 + .verify_read = xfs_agi_read_verify, 1524 + .verify_write = xfs_agi_write_verify, 1525 + }; 1524 1526 1525 1527 /* 1526 1528 * Read in the allocation group header (inode allocation section) ··· 1540 1538 1541 1539 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 1542 1540 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 1543 - XFS_FSS_TO_BB(mp, 1), 0, bpp, xfs_agi_read_verify); 1541 + XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops); 1544 1542 if (error) 1545 1543 return error; 1546 1544
+1 -1
fs/xfs/xfs_ialloc.h
··· 150 150 int xfs_inobt_get_rec(struct xfs_btree_cur *cur, 151 151 xfs_inobt_rec_incore_t *rec, int *stat); 152 152 153 - void xfs_agi_write_verify(struct xfs_buf *bp); 153 + extern const struct xfs_buf_ops xfs_agi_buf_ops; 154 154 155 155 #endif /* __XFS_IALLOC_H__ */
+12 -11
fs/xfs/xfs_ialloc_btree.c
··· 217 217 } 218 218 219 219 static void 220 + xfs_inobt_read_verify( 221 + struct xfs_buf *bp) 222 + { 223 + xfs_inobt_verify(bp); 224 + } 225 + 226 + static void 220 227 xfs_inobt_write_verify( 221 228 struct xfs_buf *bp) 222 229 { 223 230 xfs_inobt_verify(bp); 224 231 } 225 232 226 - void 227 - xfs_inobt_read_verify( 228 - struct xfs_buf *bp) 229 - { 230 - xfs_inobt_verify(bp); 231 - bp->b_pre_io = xfs_inobt_write_verify; 232 - bp->b_iodone = NULL; 233 - xfs_buf_ioend(bp, 0); 234 - } 233 + const struct xfs_buf_ops xfs_inobt_buf_ops = { 234 + .verify_read = xfs_inobt_read_verify, 235 + .verify_write = xfs_inobt_write_verify, 236 + }; 235 237 236 238 #ifdef DEBUG 237 239 STATIC int ··· 272 270 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 273 271 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 274 272 .key_diff = xfs_inobt_key_diff, 275 - .read_verify = xfs_inobt_read_verify, 276 - .write_verify = xfs_inobt_write_verify, 273 + .buf_ops = &xfs_inobt_buf_ops, 277 274 #ifdef DEBUG 278 275 .keys_inorder = xfs_inobt_keys_inorder, 279 276 .recs_inorder = xfs_inobt_recs_inorder,
+2
fs/xfs/xfs_ialloc_btree.h
··· 109 109 struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); 110 110 extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int); 111 111 112 + extern const struct xfs_buf_ops xfs_inobt_buf_ops; 113 + 112 114 #endif /* __XFS_IALLOC_BTREE_H__ */
+16 -12
fs/xfs/xfs_inode.c
··· 420 420 xfs_inobp_check(mp, bp); 421 421 } 422 422 423 - void 423 + 424 + static void 425 + xfs_inode_buf_read_verify( 426 + struct xfs_buf *bp) 427 + { 428 + xfs_inode_buf_verify(bp); 429 + } 430 + 431 + static void 424 432 xfs_inode_buf_write_verify( 425 433 struct xfs_buf *bp) 426 434 { 427 435 xfs_inode_buf_verify(bp); 428 436 } 429 437 430 - void 431 - xfs_inode_buf_read_verify( 432 - struct xfs_buf *bp) 433 - { 434 - xfs_inode_buf_verify(bp); 435 - bp->b_pre_io = xfs_inode_buf_write_verify; 436 - bp->b_iodone = NULL; 437 - xfs_buf_ioend(bp, 0); 438 - } 438 + const struct xfs_buf_ops xfs_inode_buf_ops = { 439 + .verify_read = xfs_inode_buf_read_verify, 440 + .verify_write = xfs_inode_buf_write_verify, 441 + }; 442 + 439 443 440 444 /* 441 445 * This routine is called to map an inode to the buffer containing the on-disk ··· 466 462 buf_flags |= XBF_UNMAPPED; 467 463 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 468 464 (int)imap->im_len, buf_flags, &bp, 469 - xfs_inode_buf_read_verify); 465 + &xfs_inode_buf_ops); 470 466 if (error) { 471 467 if (error == EAGAIN) { 472 468 ASSERT(buf_flags & XBF_TRYLOCK); ··· 1796 1792 * want it to fail. We can acheive this by adding a write 1797 1793 * verifier to the buffer. 1798 1794 */ 1799 - bp->b_pre_io = xfs_inode_buf_write_verify; 1795 + bp->b_ops = &xfs_inode_buf_ops; 1800 1796 1801 1797 /* 1802 1798 * Walk the inodes already attached to the buffer and mark them
+1 -2
fs/xfs/xfs_inode.h
··· 554 554 struct xfs_buf **, uint, uint); 555 555 int xfs_iread(struct xfs_mount *, struct xfs_trans *, 556 556 struct xfs_inode *, uint); 557 - void xfs_inode_buf_read_verify(struct xfs_buf *); 558 - void xfs_inode_buf_write_verify(struct xfs_buf *); 559 557 void xfs_dinode_to_disk(struct xfs_dinode *, 560 558 struct xfs_icdinode *); 561 559 void xfs_idestroy_fork(struct xfs_inode *, int); ··· 598 600 extern struct kmem_zone *xfs_ifork_zone; 599 601 extern struct kmem_zone *xfs_inode_zone; 600 602 extern struct kmem_zone *xfs_ili_zone; 603 + extern const struct xfs_buf_ops xfs_inode_buf_ops; 601 604 602 605 #endif /* __XFS_INODE_H__ */
+1 -1
fs/xfs/xfs_itable.c
··· 397 397 & ~r.ir_free) 398 398 xfs_btree_reada_bufs(mp, agno, 399 399 agbno, nbcluster, 400 - xfs_inode_buf_read_verify); 400 + &xfs_inode_buf_ops); 401 401 } 402 402 irbp->ir_startino = r.ir_startino; 403 403 irbp->ir_freecount = r.ir_freecount;
+1 -1
fs/xfs/xfs_log_recover.c
··· 3699 3699 ASSERT(!(XFS_BUF_ISWRITE(bp))); 3700 3700 XFS_BUF_READ(bp); 3701 3701 XFS_BUF_UNASYNC(bp); 3702 - bp->b_iodone = xfs_sb_read_verify; 3702 + bp->b_ops = &xfs_sb_buf_ops; 3703 3703 xfsbdstrat(log->l_mp, bp); 3704 3704 error = xfs_buf_iowait(bp); 3705 3705 if (error) {
+21 -14
fs/xfs/xfs_mount.c
··· 631 631 xfs_buf_ioerror(bp, error); 632 632 } 633 633 634 - void 635 - xfs_sb_write_verify( 636 - struct xfs_buf *bp) 637 - { 638 - xfs_sb_verify(bp); 639 - } 640 - 641 - void 634 + static void 642 635 xfs_sb_read_verify( 643 636 struct xfs_buf *bp) 644 637 { 645 638 xfs_sb_verify(bp); 646 - bp->b_pre_io = xfs_sb_write_verify; 647 - bp->b_iodone = NULL; 648 - xfs_buf_ioend(bp, 0); 649 639 } 650 640 651 641 /* ··· 644 654 * If we find an XFS superblock, the run a normal, noisy mount because we are 645 655 * really going to mount it and want to know about errors. 646 656 */ 647 - void 657 + static void 648 658 xfs_sb_quiet_read_verify( 649 659 struct xfs_buf *bp) 650 660 { ··· 660 670 /* quietly fail */ 661 671 xfs_buf_ioerror(bp, EFSCORRUPTED); 662 672 } 673 + 674 + static void 675 + xfs_sb_write_verify( 676 + struct xfs_buf *bp) 677 + { 678 + xfs_sb_verify(bp); 679 + } 680 + 681 + const struct xfs_buf_ops xfs_sb_buf_ops = { 682 + .verify_read = xfs_sb_read_verify, 683 + .verify_write = xfs_sb_write_verify, 684 + }; 685 + 686 + static const struct xfs_buf_ops xfs_sb_quiet_buf_ops = { 687 + .verify_read = xfs_sb_quiet_read_verify, 688 + .verify_write = xfs_sb_write_verify, 689 + }; 663 690 664 691 /* 665 692 * xfs_readsb ··· 704 697 reread: 705 698 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 706 699 BTOBB(sector_size), 0, 707 - loud ? xfs_sb_read_verify 708 - : xfs_sb_quiet_read_verify); 700 + loud ? &xfs_sb_buf_ops 701 + : &xfs_sb_quiet_buf_ops); 709 702 if (!bp) { 710 703 if (loud) 711 704 xfs_warn(mp, "SB buffer read failed");
+2 -2
fs/xfs/xfs_mount.h
··· 385 385 386 386 #endif /* __KERNEL__ */ 387 387 388 - extern void xfs_sb_read_verify(struct xfs_buf *); 389 - extern void xfs_sb_write_verify(struct xfs_buf *bp); 390 388 extern void xfs_mod_sb(struct xfs_trans *, __int64_t); 391 389 extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t, 392 390 xfs_agnumber_t *); 393 391 extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); 394 392 extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); 393 + 394 + extern const struct xfs_buf_ops xfs_sb_buf_ops; 395 395 396 396 #endif /* __XFS_MOUNT_H__ */
+1 -1
fs/xfs/xfs_qm.c
··· 893 893 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 894 894 XFS_FSB_TO_DADDR(mp, bno), 895 895 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 896 - xfs_dquot_buf_read_verify); 896 + &xfs_dquot_buf_ops); 897 897 if (error) 898 898 break; 899 899
+3 -3
fs/xfs/xfs_trans.h
··· 474 474 struct xfs_buf_map *map, int nmaps, 475 475 xfs_buf_flags_t flags, 476 476 struct xfs_buf **bpp, 477 - xfs_buf_iodone_t verify); 477 + const struct xfs_buf_ops *ops); 478 478 479 479 static inline int 480 480 xfs_trans_read_buf( ··· 485 485 int numblks, 486 486 xfs_buf_flags_t flags, 487 487 struct xfs_buf **bpp, 488 - xfs_buf_iodone_t verify) 488 + const struct xfs_buf_ops *ops) 489 489 { 490 490 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 491 491 return xfs_trans_read_buf_map(mp, tp, target, &map, 1, 492 - flags, bpp, verify); 492 + flags, bpp, ops); 493 493 } 494 494 495 495 struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
+4 -4
fs/xfs/xfs_trans_buf.c
··· 258 258 int nmaps, 259 259 xfs_buf_flags_t flags, 260 260 struct xfs_buf **bpp, 261 - xfs_buf_iodone_t verify) 261 + const struct xfs_buf_ops *ops) 262 262 { 263 263 xfs_buf_t *bp; 264 264 xfs_buf_log_item_t *bip; ··· 266 266 267 267 *bpp = NULL; 268 268 if (!tp) { 269 - bp = xfs_buf_read_map(target, map, nmaps, flags, verify); 269 + bp = xfs_buf_read_map(target, map, nmaps, flags, ops); 270 270 if (!bp) 271 271 return (flags & XBF_TRYLOCK) ? 272 272 EAGAIN : XFS_ERROR(ENOMEM); ··· 315 315 ASSERT(!XFS_BUF_ISASYNC(bp)); 316 316 ASSERT(bp->b_iodone == NULL); 317 317 XFS_BUF_READ(bp); 318 - bp->b_iodone = verify; 318 + bp->b_ops = ops; 319 319 xfsbdstrat(tp->t_mountp, bp); 320 320 error = xfs_buf_iowait(bp); 321 321 if (error) { ··· 352 352 return 0; 353 353 } 354 354 355 - bp = xfs_buf_read_map(target, map, nmaps, flags, verify); 355 + bp = xfs_buf_read_map(target, map, nmaps, flags, ops); 356 356 if (bp == NULL) { 357 357 *bpp = NULL; 358 358 return (flags & XBF_TRYLOCK) ?