Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iomap: replace iomap_folio_ops with iomap_write_ops

The iomap_folio_ops are only used for buffered writes, including the zero
and unshare variants. Rename them to iomap_write_ops to better describe
the usage, and pass them through the call chain like the other operation
specific methods instead of through the iomap.

xfs_iomap_valid grows a IOMAP_HOLE check to keep the existing behavior
that never attached the folio_ops to a iomap representing a hole.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/20250710133343.399917-12-hch@lst.de
Acked-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

Christoph Hellwig and committed by
Christian Brauner
2a5574fc 8b217cf7

+89 -76
-3
Documentation/filesystems/iomap/design.rst
··· 167 167 struct dax_device *dax_dev; 168 168 void *inline_data; 169 169 void *private; 170 - const struct iomap_folio_ops *folio_ops; 171 170 u64 validity_cookie; 172 171 }; 173 172 ··· 290 291 * ``private`` is a pointer to `filesystem-private information 291 292 <https://lore.kernel.org/all/20180619164137.13720-7-hch@lst.de/>`_. 292 293 This value will be passed unchanged to ``->iomap_end``. 293 - 294 - * ``folio_ops`` will be covered in the section on pagecache operations. 295 294 296 295 * ``validity_cookie`` is a magic freshness value set by the filesystem 297 296 that should be used to detect stale mappings.
+2 -6
Documentation/filesystems/iomap/operations.rst
··· 57 57 * ``bmap`` 58 58 * ``swap_activate`` 59 59 60 - ``struct iomap_folio_ops`` 60 + ``struct iomap_write_ops`` 61 61 -------------------------- 62 - 63 - The ``->iomap_begin`` function for pagecache operations may set the 64 - ``struct iomap::folio_ops`` field to an ops structure to override 65 - default behaviors of iomap: 66 62 67 63 .. code-block:: c 68 64 69 - struct iomap_folio_ops { 65 + struct iomap_write_ops { 70 66 struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos, 71 67 unsigned len); 72 68 void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+2 -1
block/fops.c
··· 723 723 724 724 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) 725 725 { 726 - return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL); 726 + return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL, 727 + NULL); 727 728 } 728 729 729 730 /*
+12 -9
fs/gfs2/bmap.c
··· 963 963 gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len) 964 964 { 965 965 struct inode *inode = iter->inode; 966 + struct gfs2_inode *ip = GFS2_I(inode); 966 967 unsigned int blockmask = i_blocksize(inode) - 1; 967 968 struct gfs2_sbd *sdp = GFS2_SB(inode); 968 969 unsigned int blocks; 969 970 struct folio *folio; 970 971 int status; 972 + 973 + if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip)) 974 + return iomap_get_folio(iter, pos, len); 971 975 972 976 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits; 973 977 status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0); ··· 991 987 struct gfs2_inode *ip = GFS2_I(inode); 992 988 struct gfs2_sbd *sdp = GFS2_SB(inode); 993 989 994 - if (!gfs2_is_stuffed(ip)) 990 + if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip)) 995 991 gfs2_trans_add_databufs(ip->i_gl, folio, 996 992 offset_in_folio(folio, pos), 997 993 copied); ··· 999 995 folio_unlock(folio); 1000 996 folio_put(folio); 1001 997 1002 - if (tr->tr_num_buf_new) 1003 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1004 - 1005 - gfs2_trans_end(sdp); 998 + if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) { 999 + if (tr->tr_num_buf_new) 1000 + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1001 + gfs2_trans_end(sdp); 1002 + } 1006 1003 } 1007 1004 1008 - static const struct iomap_folio_ops gfs2_iomap_folio_ops = { 1005 + const struct iomap_write_ops gfs2_iomap_write_ops = { 1009 1006 .get_folio = gfs2_iomap_get_folio, 1010 1007 .put_folio = gfs2_iomap_put_folio, 1011 1008 }; ··· 1083 1078 gfs2_trans_end(sdp); 1084 1079 } 1085 1080 1086 - if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip)) 1087 - iomap->folio_ops = &gfs2_iomap_folio_ops; 1088 1081 return 0; 1089 1082 1090 1083 out_trans_end: ··· 1307 1304 return 0; 1308 1305 length = min(length, inode->i_size - from); 1309 1306 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops, 1310 - NULL); 1307 + &gfs2_iomap_write_ops, NULL); 1311 1308 } 1312 1309 1313 1310 #define GFS2_JTRUNC_REVOKES 8192
+1
fs/gfs2/bmap.h
··· 44 44 } 45 45 46 46 extern const struct iomap_ops gfs2_iomap_ops; 47 + extern const struct iomap_write_ops gfs2_iomap_write_ops; 47 48 extern const struct iomap_writeback_ops gfs2_writeback_ops; 48 49 49 50 int gfs2_unstuff_dinode(struct gfs2_inode *ip);
+2 -1
fs/gfs2/file.c
··· 1058 1058 } 1059 1059 1060 1060 pagefault_disable(); 1061 - ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL); 1061 + ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, 1062 + &gfs2_iomap_write_ops, NULL); 1062 1063 pagefault_enable(); 1063 1064 if (ret > 0) 1064 1065 written += ret;
+44 -35
fs/iomap/buffered-io.c
··· 733 733 return 0; 734 734 } 735 735 736 - static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len) 736 + static struct folio *__iomap_get_folio(struct iomap_iter *iter, 737 + const struct iomap_write_ops *write_ops, size_t len) 737 738 { 738 - const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 739 739 loff_t pos = iter->pos; 740 740 741 741 if (!mapping_large_folio_support(iter->inode->i_mapping)) 742 742 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 743 743 744 - if (folio_ops && folio_ops->get_folio) 745 - return folio_ops->get_folio(iter, pos, len); 746 - else 747 - return iomap_get_folio(iter, pos, len); 744 + if (write_ops && write_ops->get_folio) 745 + return write_ops->get_folio(iter, pos, len); 746 + return iomap_get_folio(iter, pos, len); 748 747 } 749 748 750 - static void __iomap_put_folio(struct iomap_iter *iter, size_t ret, 749 + static void __iomap_put_folio(struct iomap_iter *iter, 750 + const struct iomap_write_ops *write_ops, size_t ret, 751 751 struct folio *folio) 752 752 { 753 - const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 754 753 loff_t pos = iter->pos; 755 754 756 - if (folio_ops && folio_ops->put_folio) { 757 - folio_ops->put_folio(iter->inode, pos, ret, folio); 755 + if (write_ops && write_ops->put_folio) { 756 + write_ops->put_folio(iter->inode, pos, ret, folio); 758 757 } else { 759 758 folio_unlock(folio); 760 759 folio_put(folio); ··· 790 791 * offset, and length. Callers can optionally pass a max length *plen, 791 792 * otherwise init to zero. 792 793 */ 793 - static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, 794 + static int iomap_write_begin(struct iomap_iter *iter, 795 + const struct iomap_write_ops *write_ops, struct folio **foliop, 794 796 size_t *poffset, u64 *plen) 795 797 { 796 - const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 797 798 const struct iomap *srcmap = iomap_iter_srcmap(iter); 798 799 loff_t pos = iter->pos; 799 800 u64 len = min_t(u64, SIZE_MAX, iomap_length(iter)); ··· 808 809 if (fatal_signal_pending(current)) 809 810 return -EINTR; 810 811 811 - folio = __iomap_get_folio(iter, len); 812 + folio = __iomap_get_folio(iter, write_ops, len); 812 813 if (IS_ERR(folio)) 813 814 return PTR_ERR(folio); 814 815 ··· 822 823 * could do the wrong thing here (zero a page range incorrectly or fail 823 824 * to zero) and corrupt data. 824 825 */ 825 - if (folio_ops && folio_ops->iomap_valid) { 826 - bool iomap_valid = folio_ops->iomap_valid(iter->inode, 826 + if (write_ops && write_ops->iomap_valid) { 827 + bool iomap_valid = write_ops->iomap_valid(iter->inode, 827 828 &iter->iomap); 828 829 if (!iomap_valid) { 829 830 iter->iomap.flags |= IOMAP_F_STALE; ··· 849 850 return 0; 850 851 851 852 out_unlock: 852 - __iomap_put_folio(iter, 0, folio); 853 - 853 + __iomap_put_folio(iter, write_ops, 0, folio); 854 854 return status; 855 855 } 856 856 ··· 921 923 return __iomap_write_end(iter->inode, pos, len, copied, folio); 922 924 } 923 925 924 - static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 926 + static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i, 927 + const struct iomap_write_ops *write_ops) 925 928 { 926 929 ssize_t total_written = 0; 927 930 int status = 0; ··· 966 967 break; 967 968 } 968 969 969 - status = iomap_write_begin(iter, &folio, &offset, &bytes); 970 + status = iomap_write_begin(iter, write_ops, &folio, &offset, 971 + &bytes); 970 972 if (unlikely(status)) { 971 973 iomap_write_failed(iter->inode, iter->pos, bytes); 972 974 break; ··· 996 996 i_size_write(iter->inode, pos + written); 997 997 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 998 998 } 999 - __iomap_put_folio(iter, written, folio); 999 + __iomap_put_folio(iter, write_ops, written, folio); 1000 1000 1001 1001 if (old_size < pos) 1002 1002 pagecache_isize_extended(iter->inode, old_size, pos); ··· 1029 1029 1030 1030 ssize_t 1031 1031 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 1032 - const struct iomap_ops *ops, void *private) 1032 + const struct iomap_ops *ops, 1033 + const struct iomap_write_ops *write_ops, void *private) 1033 1034 { 1034 1035 struct iomap_iter iter = { 1035 1036 .inode = iocb->ki_filp->f_mapping->host, ··· 1047 1046 iter.flags |= IOMAP_DONTCACHE; 1048 1047 1049 1048 while ((ret = iomap_iter(&iter, ops)) > 0) 1050 - iter.status = iomap_write_iter(&iter, i); 1049 + iter.status = iomap_write_iter(&iter, i, write_ops); 1051 1050 1052 1051 if (unlikely(iter.pos == iocb->ki_pos)) 1053 1052 return ret; ··· 1281 1280 } 1282 1281 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release); 1283 1282 1284 - static int iomap_unshare_iter(struct iomap_iter *iter) 1283 + static int iomap_unshare_iter(struct iomap_iter *iter, 1284 + const struct iomap_write_ops *write_ops) 1285 1285 { 1286 1286 struct iomap *iomap = &iter->iomap; 1287 1287 u64 bytes = iomap_length(iter); ··· 1297 1295 bool ret; 1298 1296 1299 1297 bytes = min_t(u64, SIZE_MAX, bytes); 1300 - status = iomap_write_begin(iter, &folio, &offset, &bytes); 1298 + status = iomap_write_begin(iter, write_ops, &folio, &offset, 1299 + &bytes); 1301 1300 if (unlikely(status)) 1302 1301 return status; 1303 1302 if (iomap->flags & IOMAP_F_STALE) 1304 1303 break; 1305 1304 1306 1305 ret = iomap_write_end(iter, bytes, bytes, folio); 1307 - __iomap_put_folio(iter, bytes, folio); 1306 + __iomap_put_folio(iter, write_ops, bytes, folio); 1308 1307 if (WARN_ON_ONCE(!ret)) 1309 1308 return -EIO; 1310 1309 ··· 1323 1320 1324 1321 int 1325 1322 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1326 - const struct iomap_ops *ops) 1323 + const struct iomap_ops *ops, 1324 + const struct iomap_write_ops *write_ops) 1327 1325 { 1328 1326 struct iomap_iter iter = { 1329 1327 .inode = inode, ··· 1339 1335 1340 1336 iter.len = min(len, size - pos); 1341 1337 while ((ret = iomap_iter(&iter, ops)) > 0) 1342 - iter.status = iomap_unshare_iter(&iter); 1338 + iter.status = iomap_unshare_iter(&iter, write_ops); 1343 1339 return ret; 1344 1340 } 1345 1341 EXPORT_SYMBOL_GPL(iomap_file_unshare); ··· 1358 1354 return filemap_write_and_wait_range(mapping, i->pos, end); 1359 1355 } 1360 1356 1361 - static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 1357 + static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero, 1358 + const struct iomap_write_ops *write_ops) 1362 1359 { 1363 1360 u64 bytes = iomap_length(iter); 1364 1361 int status; ··· 1370 1365 bool ret; 1371 1366 1372 1367 bytes = min_t(u64, SIZE_MAX, bytes); 1373 - status = iomap_write_begin(iter, &folio, &offset, &bytes); 1368 + status = iomap_write_begin(iter, write_ops, &folio, &offset, 1369 + &bytes); 1374 1370 if (status) 1375 1371 return status; 1376 1372 if (iter->iomap.flags & IOMAP_F_STALE) ··· 1384 1378 folio_mark_accessed(folio); 1385 1379 1386 1380 ret = iomap_write_end(iter, bytes, bytes, folio); 1387 - __iomap_put_folio(iter, bytes, folio); 1381 + __iomap_put_folio(iter, write_ops, bytes, folio); 1388 1382 if (WARN_ON_ONCE(!ret)) 1389 1383 return -EIO; 1390 1384 ··· 1400 1394 1401 1395 int 1402 1396 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1403 - const struct iomap_ops *ops, void *private) 1397 + const struct iomap_ops *ops, 1398 + const struct iomap_write_ops *write_ops, void *private) 1404 1399 { 1405 1400 struct iomap_iter iter = { 1406 1401 .inode = inode, ··· 1431 1424 filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) { 1432 1425 iter.len = plen; 1433 1426 while ((ret = iomap_iter(&iter, ops)) > 0) 1434 - iter.status = iomap_zero_iter(&iter, did_zero); 1427 + iter.status = iomap_zero_iter(&iter, did_zero, 1428 + write_ops); 1435 1429 1436 1430 iter.len = len - (iter.pos - pos); 1437 1431 if (ret || !iter.len) ··· 1463 1455 continue; 1464 1456 } 1465 1457 1466 - iter.status = iomap_zero_iter(&iter, did_zero); 1458 + iter.status = iomap_zero_iter(&iter, did_zero, write_ops); 1467 1459 } 1468 1460 return ret; 1469 1461 } ··· 1471 1463 1472 1464 int 1473 1465 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1474 - const struct iomap_ops *ops, void *private) 1466 + const struct iomap_ops *ops, 1467 + const struct iomap_write_ops *write_ops, void *private) 1475 1468 { 1476 1469 unsigned int blocksize = i_blocksize(inode); 1477 1470 unsigned int off = pos & (blocksize - 1); ··· 1481 1472 if (!off) 1482 1473 return 0; 1483 1474 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops, 1484 - private); 1475 + write_ops, private); 1485 1476 } 1486 1477 EXPORT_SYMBOL_GPL(iomap_truncate_page); 1487 1478
+4 -2
fs/xfs/xfs_file.c
··· 979 979 980 980 trace_xfs_file_buffered_write(iocb, from); 981 981 ret = iomap_file_buffered_write(iocb, from, 982 - &xfs_buffered_write_iomap_ops, NULL); 982 + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, 983 + NULL); 983 984 984 985 /* 985 986 * If we hit a space limit, try to free up some lingering preallocated ··· 1060 1059 retry: 1061 1060 trace_xfs_file_buffered_write(iocb, from); 1062 1061 ret = iomap_file_buffered_write(iocb, from, 1063 - &xfs_buffered_write_iomap_ops, &ac); 1062 + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, 1063 + &ac); 1064 1064 if (ret == -ENOSPC && !cleared_space) { 1065 1065 /* 1066 1066 * Kick off writeback to convert delalloc space and release the
+8 -4
fs/xfs/xfs_iomap.c
··· 79 79 { 80 80 struct xfs_inode *ip = XFS_I(inode); 81 81 82 + if (iomap->type == IOMAP_HOLE) 83 + return true; 84 + 82 85 if (iomap->validity_cookie != 83 86 xfs_iomap_inode_sequence(ip, iomap->flags)) { 84 87 trace_xfs_iomap_invalid(ip, iomap); ··· 92 89 return true; 93 90 } 94 91 95 - static const struct iomap_folio_ops xfs_iomap_folio_ops = { 92 + const struct iomap_write_ops xfs_iomap_write_ops = { 96 93 .iomap_valid = xfs_iomap_valid, 97 94 }; 98 95 ··· 154 151 iomap->flags |= IOMAP_F_DIRTY; 155 152 156 153 iomap->validity_cookie = sequence_cookie; 157 - iomap->folio_ops = &xfs_iomap_folio_ops; 158 154 return 0; 159 155 } 160 156 ··· 2200 2198 return dax_zero_range(inode, pos, len, did_zero, 2201 2199 &xfs_dax_write_iomap_ops); 2202 2200 return iomap_zero_range(inode, pos, len, did_zero, 2203 - &xfs_buffered_write_iomap_ops, ac); 2201 + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, 2202 + ac); 2204 2203 } 2205 2204 2206 2205 int ··· 2217 2214 return dax_truncate_page(inode, pos, did_zero, 2218 2215 &xfs_dax_write_iomap_ops); 2219 2216 return iomap_truncate_page(inode, pos, did_zero, 2220 - &xfs_buffered_write_iomap_ops, ac); 2217 + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, 2218 + ac); 2221 2219 }
+1
fs/xfs/xfs_iomap.h
··· 57 57 extern const struct iomap_ops xfs_xattr_iomap_ops; 58 58 extern const struct iomap_ops xfs_dax_write_iomap_ops; 59 59 extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops; 60 + extern const struct iomap_write_ops xfs_iomap_write_ops; 60 61 61 62 #endif /* __XFS_IOMAP_H__*/
+2 -1
fs/xfs/xfs_reflink.c
··· 1881 1881 &xfs_dax_write_iomap_ops); 1882 1882 else 1883 1883 error = iomap_file_unshare(inode, offset, len, 1884 - &xfs_buffered_write_iomap_ops); 1884 + &xfs_buffered_write_iomap_ops, 1885 + &xfs_iomap_write_ops); 1885 1886 if (error) 1886 1887 goto out; 1887 1888
+2 -1
fs/zonefs/file.c
··· 572 572 if (ret <= 0) 573 573 goto inode_unlock; 574 574 575 - ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL); 575 + ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, 576 + NULL, NULL); 576 577 if (ret == -EIO) 577 578 zonefs_io_error(inode, true); 578 579
+9 -13
include/linux/iomap.h
··· 101 101 */ 102 102 #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ 103 103 104 - struct iomap_folio_ops; 105 - 106 104 struct iomap { 107 105 u64 addr; /* disk offset of mapping, bytes */ 108 106 loff_t offset; /* file offset of mapping, bytes */ ··· 111 113 struct dax_device *dax_dev; /* dax_dev for dax operations */ 112 114 void *inline_data; 113 115 void *private; /* filesystem private */ 114 - const struct iomap_folio_ops *folio_ops; 115 116 u64 validity_cookie; /* used with .iomap_valid() */ 116 117 }; 117 118 ··· 140 143 } 141 144 142 145 /* 143 - * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio 144 - * and put_folio will be called for each folio written to. This only applies 145 - * to buffered writes as unbuffered writes will not typically have folios 146 - * associated with them. 147 - * 148 146 * When get_folio succeeds, put_folio will always be called to do any 149 147 * cleanup work necessary. put_folio is responsible for unlocking and putting 150 148 * @folio. 151 149 */ 152 - struct iomap_folio_ops { 150 + struct iomap_write_ops { 153 151 struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos, 154 152 unsigned len); 155 153 void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied, ··· 327 335 } 328 336 329 337 ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, 330 - const struct iomap_ops *ops, void *private); 338 + const struct iomap_ops *ops, 339 + const struct iomap_write_ops *write_ops, void *private); 331 340 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); 332 341 void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); 333 342 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); ··· 337 344 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); 338 345 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio); 339 346 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 340 - const struct iomap_ops *ops); 347 + const struct iomap_ops *ops, 348 + const struct iomap_write_ops *write_ops); 341 349 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, 342 - bool *did_zero, const struct iomap_ops *ops, void *private); 350 + bool *did_zero, const struct iomap_ops *ops, 351 + const struct iomap_write_ops *write_ops, void *private); 343 352 int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 344 - const struct iomap_ops *ops, void *private); 353 + const struct iomap_ops *ops, 354 + const struct iomap_write_ops *write_ops, void *private); 345 355 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops, 346 356 void *private); 347 357 typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,