Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Merge i_size update functions

Netfslib has two functions for updating the i_size after a write: one for
buffered writes into the pagecache and one for direct/unbuffered writes.
However, what needs to be done is much the same in both cases, so merge
them together.

This does raise one question, though: should updating the i_size after a
direct write do the same estimated update of i_blocks as is done for
buffered writes.

Also get rid of the cleanup function pointer from netfs_io_request as it's
only used for direct write to update i_size; instead do the i_size setting
directly from write collection.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/20250701163852.2171681-12-dhowells@redhat.com
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.org>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

David Howells and committed by
Christian Brauner
5e1e6ec2 2e065894

+31 -37
+21 -15
fs/netfs/buffered_write.c
··· 53 53 * data written into the pagecache until we can find out from the server what 54 54 * the values actually are. 55 55 */ 56 - static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, 57 - loff_t i_size, loff_t pos, size_t copied) 56 + void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, 57 + loff_t pos, size_t copied) 58 58 { 59 + loff_t i_size, end = pos + copied; 59 60 blkcnt_t add; 60 61 size_t gap; 61 62 63 + if (end <= i_size_read(inode)) 64 + return; 65 + 62 66 if (ctx->ops->update_i_size) { 63 - ctx->ops->update_i_size(inode, pos); 67 + ctx->ops->update_i_size(inode, end); 64 68 return; 65 69 } 66 70 67 71 spin_lock(&inode->i_lock); 68 - i_size_write(inode, pos); 72 + 73 + i_size = i_size_read(inode); 74 + if (end > i_size) { 75 + i_size_write(inode, end); 69 76 #if IS_ENABLED(CONFIG_FSCACHE) 70 - fscache_update_cookie(ctx->cache, NULL, &pos); 77 + fscache_update_cookie(ctx->cache, NULL, &end); 71 78 #endif 72 79 73 - gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1)); 74 - if (copied > gap) { 75 - add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE); 80 + gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1)); 81 + if (copied > gap) { 82 + add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE); 76 83 77 - inode->i_blocks = min_t(blkcnt_t, 78 - DIV_ROUND_UP(pos, SECTOR_SIZE), 79 - inode->i_blocks + add); 84 + inode->i_blocks = min_t(blkcnt_t, 85 + DIV_ROUND_UP(end, SECTOR_SIZE), 86 + inode->i_blocks + add); 87 + } 80 88 } 81 89 spin_unlock(&inode->i_lock); 82 90 } ··· 121 113 struct folio *folio = NULL, *writethrough = NULL; 122 114 unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0; 123 115 ssize_t written = 0, ret, ret2; 124 - loff_t i_size, pos = iocb->ki_pos; 116 + loff_t pos = iocb->ki_pos; 125 117 size_t max_chunk = mapping_max_folio_size(mapping); 126 118 bool maybe_trouble = false; 127 119 ··· 354 346 flush_dcache_folio(folio); 355 347 356 348 /* Update the inode size if we moved the EOF marker */ 349 + netfs_update_i_size(ctx, inode, pos, copied); 357 350 pos += copied; 358 - i_size = i_size_read(inode); 359 - if (pos > i_size) 360 - netfs_update_i_size(ctx, inode, i_size, pos, copied); 361 351 written += copied; 362 352 363 353 if (likely(!wreq)) {
-19
fs/netfs/direct_write.c
··· 9 9 #include <linux/uio.h> 10 10 #include "internal.h" 11 11 12 - static void netfs_cleanup_dio_write(struct netfs_io_request *wreq) 13 - { 14 - struct inode *inode = wreq->inode; 15 - unsigned long long end = wreq->start + wreq->transferred; 16 - 17 - if (wreq->error || end <= i_size_read(inode)) 18 - return; 19 - 20 - spin_lock(&inode->i_lock); 21 - if (end > i_size_read(inode)) { 22 - if (wreq->netfs_ops->update_i_size) 23 - wreq->netfs_ops->update_i_size(inode, end); 24 - else 25 - i_size_write(inode, end); 26 - } 27 - spin_unlock(&inode->i_lock); 28 - } 29 - 30 12 /* 31 13 * Perform an unbuffered write where we may have to do an RMW operation on an 32 14 * encrypted file. This can also be used for direct I/O writes. ··· 84 102 if (async) 85 103 wreq->iocb = iocb; 86 104 wreq->len = iov_iter_count(&wreq->buffer.iter); 87 - wreq->cleanup = netfs_cleanup_dio_write; 88 105 ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len); 89 106 if (ret < 0) { 90 107 _debug("begin = %zd", ret);
+6
fs/netfs/internal.h
··· 28 28 size_t offset, size_t len); 29 29 30 30 /* 31 + * buffered_write.c 32 + */ 33 + void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, 34 + loff_t pos, size_t copied); 35 + 36 + /* 31 37 * main.c 32 38 */ 33 39 extern unsigned int netfs_debug;
+4 -2
fs/netfs/write_collect.c
··· 393 393 ictx->ops->invalidate_cache(wreq); 394 394 } 395 395 396 - if (wreq->cleanup) 397 - wreq->cleanup(wreq); 396 + if ((wreq->origin == NETFS_UNBUFFERED_WRITE || 397 + wreq->origin == NETFS_DIO_WRITE) && 398 + !wreq->error) 399 + netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred); 398 400 399 401 if (wreq->origin == NETFS_DIO_WRITE && 400 402 wreq->mapping->nrpages) {
-1
include/linux/netfs.h
··· 279 279 #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark 280 280 * write to cache on read */ 281 281 const struct netfs_request_ops *netfs_ops; 282 - void (*cleanup)(struct netfs_io_request *req); 283 282 }; 284 283 285 284 /*