Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Move pinning-for-writeback from fscache to netfs

Move the resource pinning-for-writeback from fscache code to netfslib code.
This is used to keep a cache backing object pinned whilst we have dirty
pages on the netfs inode in the pagecache such that VM writeback will be
able to reach it.

Whilst we're at it, switch the parameters of netfs_unpin_writeback() to
match ->write_inode() so that it can be used for that directly.

Note that this mechanism could be more generically useful than that for
network filesystems. Quite often they have to keep around other resources
(e.g. authentication tokens or network connections) until the writeback is
complete.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

+124 -187
+9 -24
fs/9p/vfs_addr.c
··· 317 317 return copied; 318 318 } 319 319 320 - #ifdef CONFIG_9P_FSCACHE 321 - /* 322 - * Mark a page as having been made dirty and thus needing writeback. We also 323 - * need to pin the cache object to write back to. 324 - */ 325 - static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio) 326 - { 327 - struct v9fs_inode *v9inode = V9FS_I(mapping->host); 328 - 329 - return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode)); 330 - } 331 - #else 332 - #define v9fs_dirty_folio filemap_dirty_folio 333 - #endif 334 - 335 320 const struct address_space_operations v9fs_addr_operations = { 336 - .read_folio = netfs_read_folio, 337 - .readahead = netfs_readahead, 338 - .dirty_folio = v9fs_dirty_folio, 339 - .writepage = v9fs_vfs_writepage, 340 - .write_begin = v9fs_write_begin, 341 - .write_end = v9fs_write_end, 342 - .release_folio = v9fs_release_folio, 321 + .read_folio = netfs_read_folio, 322 + .readahead = netfs_readahead, 323 + .dirty_folio = netfs_dirty_folio, 324 + .writepage = v9fs_vfs_writepage, 325 + .write_begin = v9fs_write_begin, 326 + .write_end = v9fs_write_end, 327 + .release_folio = v9fs_release_folio, 343 328 .invalidate_folio = v9fs_invalidate_folio, 344 - .launder_folio = v9fs_launder_folio, 345 - .direct_IO = v9fs_direct_IO, 329 + .launder_folio = v9fs_launder_folio, 330 + .direct_IO = v9fs_direct_IO, 346 331 };
+1 -2
fs/9p/vfs_inode.c
··· 376 376 377 377 #ifdef CONFIG_9P_FSCACHE 378 378 version = cpu_to_le32(v9inode->qid.version); 379 - fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode, 380 - &version); 379 + netfs_clear_inode_writeback(inode, &version); 381 380 #endif 382 381 383 382 clear_inode(inode);
+2 -12
fs/9p/vfs_super.c
··· 289 289 static int v9fs_write_inode(struct inode *inode, 290 290 struct writeback_control *wbc) 291 291 { 292 - struct v9fs_inode *v9inode; 293 - 294 292 /* 295 293 * send an fsync request to server irrespective of 296 294 * wbc->sync_mode. 297 295 */ 298 296 p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); 299 - 300 - v9inode = V9FS_I(inode); 301 - fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode)); 302 - 303 - return 0; 297 + return netfs_unpin_writeback(inode, wbc); 304 298 } 305 299 306 300 static int v9fs_write_inode_dotl(struct inode *inode, 307 301 struct writeback_control *wbc) 308 302 { 309 - struct v9fs_inode *v9inode; 310 303 311 - v9inode = V9FS_I(inode); 312 304 p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); 313 305 314 - fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode)); 315 - 316 - return 0; 306 + return netfs_unpin_writeback(inode, wbc); 317 307 } 318 308 319 309 static const struct super_operations v9fs_super_ops = {
+1 -7
fs/afs/file.c
··· 55 55 const struct address_space_operations afs_file_aops = { 56 56 .read_folio = netfs_read_folio, 57 57 .readahead = netfs_readahead, 58 - .dirty_folio = afs_dirty_folio, 58 + .dirty_folio = netfs_dirty_folio, 59 59 .launder_folio = afs_launder_folio, 60 60 .release_folio = afs_release_folio, 61 61 .invalidate_folio = afs_invalidate_folio, ··· 385 385 .check_write_begin = afs_check_write_begin, 386 386 .issue_read = afs_issue_read, 387 387 }; 388 - 389 - int afs_write_inode(struct inode *inode, struct writeback_control *wbc) 390 - { 391 - fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode))); 392 - return 0; 393 - } 394 388 395 389 /* 396 390 * Adjust the dirty region of the page on truncation or full invalidation,
+1 -1
fs/afs/inode.c
··· 823 823 truncate_inode_pages_final(&inode->i_data); 824 824 825 825 afs_set_cache_aux(vnode, &aux); 826 - fscache_clear_inode_writeback(afs_vnode_cache(vnode), inode, &aux); 826 + netfs_clear_inode_writeback(inode, &aux); 827 827 clear_inode(inode); 828 828 829 829 while (!list_empty(&vnode->wb_keys)) {
-6
fs/afs/internal.h
··· 1073 1073 extern int afs_fetch_data(struct afs_vnode *, struct afs_read *); 1074 1074 extern struct afs_read *afs_alloc_read(gfp_t); 1075 1075 extern void afs_put_read(struct afs_read *); 1076 - extern int afs_write_inode(struct inode *, struct writeback_control *); 1077 1076 1078 1077 static inline struct afs_read *afs_get_read(struct afs_read *req) 1079 1078 { ··· 1521 1522 /* 1522 1523 * write.c 1523 1524 */ 1524 - #ifdef CONFIG_AFS_FSCACHE 1525 - bool afs_dirty_folio(struct address_space *, struct folio *); 1526 - #else 1527 - #define afs_dirty_folio filemap_dirty_folio 1528 - #endif 1529 1525 extern int afs_write_begin(struct file *file, struct address_space *mapping, 1530 1526 loff_t pos, unsigned len, 1531 1527 struct page **pagep, void **fsdata);
+1 -1
fs/afs/super.c
··· 55 55 static const struct super_operations afs_super_ops = { 56 56 .statfs = afs_statfs, 57 57 .alloc_inode = afs_alloc_inode, 58 - .write_inode = afs_write_inode, 58 + .write_inode = netfs_unpin_writeback, 59 59 .drop_inode = afs_drop_inode, 60 60 .destroy_inode = afs_destroy_inode, 61 61 .free_inode = afs_free_inode,
-9
fs/afs/write.c
··· 23 23 loff_t i_size, bool caching); 24 24 25 25 #ifdef CONFIG_AFS_FSCACHE 26 - /* 27 - * Mark a page as having been made dirty and thus needing writeback. We also 28 - * need to pin the cache object to write back to. 29 - */ 30 - bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) 31 - { 32 - return fscache_dirty_folio(mapping, folio, 33 - afs_vnode_cache(AFS_FS_I(mapping->host))); 34 - } 35 26 static void afs_folio_start_fscache(bool caching, struct folio *folio) 36 27 { 37 28 if (caching)
+7 -16
fs/ceph/cache.h
··· 43 43 } 44 44 } 45 45 46 - static inline void ceph_fscache_unpin_writeback(struct inode *inode, 46 + static inline int ceph_fscache_unpin_writeback(struct inode *inode, 47 47 struct writeback_control *wbc) 48 48 { 49 - fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode))); 49 + return netfs_unpin_writeback(inode, wbc); 50 50 } 51 51 52 - static inline int ceph_fscache_dirty_folio(struct address_space *mapping, 53 - struct folio *folio) 54 - { 55 - struct ceph_inode_info *ci = ceph_inode(mapping->host); 56 - 57 - return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci)); 58 - } 52 + #define ceph_fscache_dirty_folio netfs_dirty_folio 59 53 60 54 static inline bool ceph_is_cache_enabled(struct inode *inode) 61 55 { ··· 106 112 { 107 113 } 108 114 109 - static inline void ceph_fscache_unpin_writeback(struct inode *inode, 110 - struct writeback_control *wbc) 115 + static inline int ceph_fscache_unpin_writeback(struct inode *inode, 116 + struct writeback_control *wbc) 111 117 { 118 + return 0; 112 119 } 113 120 114 - static inline int ceph_fscache_dirty_folio(struct address_space *mapping, 115 - struct folio *folio) 116 - { 117 - return filemap_dirty_folio(mapping, folio); 118 - } 121 + #define ceph_fscache_dirty_folio filemap_dirty_folio 119 122 120 123 static inline bool ceph_is_cache_enabled(struct inode *inode) 121 124 {
+1 -1
fs/ceph/inode.c
··· 694 694 percpu_counter_dec(&mdsc->metric.total_inodes); 695 695 696 696 truncate_inode_pages_final(&inode->i_data); 697 - if (inode->i_state & I_PINNING_FSCACHE_WB) 697 + if (inode->i_state & I_PINNING_NETFS_WB) 698 698 ceph_fscache_unuse_cookie(inode, true); 699 699 clear_inode(inode); 700 700
+5 -5
fs/fs-writeback.c
··· 1675 1675 1676 1676 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1677 1677 inode->i_state |= I_DIRTY_PAGES; 1678 - else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) { 1678 + else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) { 1679 1679 if (!(inode->i_state & I_DIRTY_PAGES)) { 1680 - inode->i_state &= ~I_PINNING_FSCACHE_WB; 1681 - wbc->unpinned_fscache_wb = true; 1682 - dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */ 1680 + inode->i_state &= ~I_PINNING_NETFS_WB; 1681 + wbc->unpinned_netfs_wb = true; 1682 + dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */ 1683 1683 } 1684 1684 } 1685 1685 ··· 1691 1691 if (ret == 0) 1692 1692 ret = err; 1693 1693 } 1694 - wbc->unpinned_fscache_wb = false; 1694 + wbc->unpinned_netfs_wb = false; 1695 1695 trace_writeback_single_inode(inode, wbc, nr_to_write); 1696 1696 return ret; 1697 1697 }
+1
fs/netfs/Makefile
··· 5 5 io.o \ 6 6 iterator.o \ 7 7 main.o \ 8 + misc.o \ 8 9 objects.o 9 10 10 11 netfs-$(CONFIG_NETFS_STATS) += stats.o
-40
fs/netfs/fscache_io.c
··· 158 158 } 159 159 EXPORT_SYMBOL(__fscache_begin_write_operation); 160 160 161 - /** 162 - * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback 163 - * @mapping: The mapping the folio belongs to. 164 - * @folio: The folio being dirtied. 165 - * @cookie: The cookie referring to the cache object 166 - * 167 - * Set the dirty flag on a folio and pin an in-use cache object in memory 168 - * so that writeback can later write to it. This is intended 169 - * to be called from the filesystem's ->dirty_folio() method. 170 - * 171 - * Return: true if the dirty flag was set on the folio, false otherwise. 172 - */ 173 - bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio, 174 - struct fscache_cookie *cookie) 175 - { 176 - struct inode *inode = mapping->host; 177 - bool need_use = false; 178 - 179 - _enter(""); 180 - 181 - if (!filemap_dirty_folio(mapping, folio)) 182 - return false; 183 - if (!fscache_cookie_valid(cookie)) 184 - return true; 185 - 186 - if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { 187 - spin_lock(&inode->i_lock); 188 - if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { 189 - inode->i_state |= I_PINNING_FSCACHE_WB; 190 - need_use = true; 191 - } 192 - spin_unlock(&inode->i_lock); 193 - 194 - if (need_use) 195 - fscache_use_cookie(cookie, true); 196 - } 197 - return true; 198 - } 199 - EXPORT_SYMBOL(fscache_dirty_folio); 200 - 201 161 struct fscache_write_request { 202 162 struct netfs_cache_resources cache_resources; 203 163 struct address_space *mapping;
+86
fs/netfs/misc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Miscellaneous routines. 3 + * 4 + * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. 5 + * Written by David Howells (dhowells@redhat.com) 6 + */ 7 + 8 + #include <linux/swap.h> 9 + #include "internal.h" 10 + 11 + /** 12 + * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback 13 + * @mapping: The mapping the folio belongs to. 14 + * @folio: The folio being dirtied. 15 + * 16 + * Set the dirty flag on a folio and pin an in-use cache object in memory so 17 + * that writeback can later write to it. This is intended to be called from 18 + * the filesystem's ->dirty_folio() method. 19 + * 20 + * Return: true if the dirty flag was set on the folio, false otherwise. 21 + */ 22 + bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio) 23 + { 24 + struct inode *inode = mapping->host; 25 + struct netfs_inode *ictx = netfs_inode(inode); 26 + struct fscache_cookie *cookie = netfs_i_cookie(ictx); 27 + bool need_use = false; 28 + 29 + _enter(""); 30 + 31 + if (!filemap_dirty_folio(mapping, folio)) 32 + return false; 33 + if (!fscache_cookie_valid(cookie)) 34 + return true; 35 + 36 + if (!(inode->i_state & I_PINNING_NETFS_WB)) { 37 + spin_lock(&inode->i_lock); 38 + if (!(inode->i_state & I_PINNING_NETFS_WB)) { 39 + inode->i_state |= I_PINNING_NETFS_WB; 40 + need_use = true; 41 + } 42 + spin_unlock(&inode->i_lock); 43 + 44 + if (need_use) 45 + fscache_use_cookie(cookie, true); 46 + } 47 + return true; 48 + } 49 + EXPORT_SYMBOL(netfs_dirty_folio); 50 + 51 + /** 52 + * netfs_unpin_writeback - Unpin writeback resources 53 + * @inode: The inode on which the cookie resides 54 + * @wbc: The writeback control 55 + * 56 + * Unpin the writeback resources pinned by netfs_dirty_folio(). This is 57 + * intended to be called as/by the netfs's ->write_inode() method. 58 + */ 59 + int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc) 60 + { 61 + struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 62 + 63 + if (wbc->unpinned_netfs_wb) 64 + fscache_unuse_cookie(cookie, NULL, NULL); 65 + return 0; 66 + } 67 + EXPORT_SYMBOL(netfs_unpin_writeback); 68 + 69 + /** 70 + * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode 71 + * @inode: The inode to clean up 72 + * @aux: Auxiliary data to apply to the inode 73 + * 74 + * Clear any writeback resources held by an inode when the inode is evicted. 75 + * This must be called before clear_inode() is called. 76 + */ 77 + void netfs_clear_inode_writeback(struct inode *inode, const void *aux) 78 + { 79 + struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 80 + 81 + if (inode->i_state & I_PINNING_NETFS_WB) { 82 + loff_t i_size = i_size_read(inode); 83 + fscache_unuse_cookie(cookie, aux, &i_size); 84 + } 85 + } 86 + EXPORT_SYMBOL(netfs_clear_inode_writeback);
+2 -3
fs/smb/client/cifsfs.c
··· 429 429 cifs_evict_inode(struct inode *inode) 430 430 { 431 431 truncate_inode_pages_final(&inode->i_data); 432 - if (inode->i_state & I_PINNING_FSCACHE_WB) 432 + if (inode->i_state & I_PINNING_NETFS_WB) 433 433 cifs_fscache_unuse_inode_cookie(inode, true); 434 434 cifs_fscache_release_inode_cookie(inode); 435 435 clear_inode(inode); ··· 792 792 793 793 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) 794 794 { 795 - fscache_unpin_writeback(wbc, cifs_inode_cookie(inode)); 796 - return 0; 795 + return netfs_unpin_writeback(inode, wbc); 797 796 } 798 797 799 798 static int cifs_drop_inode(struct inode *inode)
+2 -16
fs/smb/client/file.c
··· 5043 5043 /* do we need to unpin (or unlock) the file */ 5044 5044 } 5045 5045 5046 - /* 5047 - * Mark a page as having been made dirty and thus needing writeback. We also 5048 - * need to pin the cache object to write back to. 5049 - */ 5050 - #ifdef CONFIG_CIFS_FSCACHE 5051 - static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio) 5052 - { 5053 - return fscache_dirty_folio(mapping, folio, 5054 - cifs_inode_cookie(mapping->host)); 5055 - } 5056 - #else 5057 - #define cifs_dirty_folio filemap_dirty_folio 5058 - #endif 5059 - 5060 5046 const struct address_space_operations cifs_addr_ops = { 5061 5047 .read_folio = cifs_read_folio, 5062 5048 .readahead = cifs_readahead, 5063 5049 .writepages = cifs_writepages, 5064 5050 .write_begin = cifs_write_begin, 5065 5051 .write_end = cifs_write_end, 5066 - .dirty_folio = cifs_dirty_folio, 5052 + .dirty_folio = netfs_dirty_folio, 5067 5053 .release_folio = cifs_release_folio, 5068 5054 .direct_IO = cifs_direct_io, 5069 5055 .invalidate_folio = cifs_invalidate_folio, ··· 5073 5087 .writepages = cifs_writepages, 5074 5088 .write_begin = cifs_write_begin, 5075 5089 .write_end = cifs_write_end, 5076 - .dirty_folio = cifs_dirty_folio, 5090 + .dirty_folio = netfs_dirty_folio, 5077 5091 .release_folio = cifs_release_folio, 5078 5092 .invalidate_folio = cifs_invalidate_folio, 5079 5093 .launder_folio = cifs_launder_folio,
+1 -1
include/linux/fs.h
··· 2294 2294 #define I_CREATING (1 << 15) 2295 2295 #define I_DONTCACHE (1 << 16) 2296 2296 #define I_SYNC_QUEUED (1 << 17) 2297 - #define I_PINNING_FSCACHE_WB (1 << 18) 2297 + #define I_PINNING_NETFS_WB (1 << 18) 2298 2298 2299 2299 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 2300 2300 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
-42
include/linux/fscache.h
··· 626 626 627 627 } 628 628 629 - #if __fscache_available 630 - bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio, 631 - struct fscache_cookie *cookie); 632 - #else 633 - #define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \ 634 - filemap_dirty_folio(MAPPING, FOLIO) 635 - #endif 636 - 637 - /** 638 - * fscache_unpin_writeback - Unpin writeback resources 639 - * @wbc: The writeback control 640 - * @cookie: The cookie referring to the cache object 641 - * 642 - * Unpin the writeback resources pinned by fscache_dirty_folio(). This is 643 - * intended to be called by the netfs's ->write_inode() method. 644 - */ 645 - static inline void fscache_unpin_writeback(struct writeback_control *wbc, 646 - struct fscache_cookie *cookie) 647 - { 648 - if (wbc->unpinned_fscache_wb) 649 - fscache_unuse_cookie(cookie, NULL, NULL); 650 - } 651 - 652 - /** 653 - * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode 654 - * @cookie: The cookie referring to the cache object 655 - * @inode: The inode to clean up 656 - * @aux: Auxiliary data to apply to the inode 657 - * 658 - * Clear any writeback resources held by an inode when the inode is evicted. 659 - * This must be called before clear_inode() is called. 660 - */ 661 - static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie, 662 - struct inode *inode, 663 - const void *aux) 664 - { 665 - if (inode->i_state & I_PINNING_FSCACHE_WB) { 666 - loff_t i_size = i_size_read(inode); 667 - fscache_unuse_cookie(cookie, aux, &i_size); 668 - } 669 - } 670 - 671 629 /** 672 630 * fscache_note_page_release - Note that a netfs page got released 673 631 * @cookie: The cookie corresponding to the file
+3
include/linux/netfs.h
··· 288 288 int netfs_write_begin(struct netfs_inode *, struct file *, 289 289 struct address_space *, loff_t pos, unsigned int len, 290 290 struct folio **, void **fsdata); 291 + bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); 292 + int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); 293 + void netfs_clear_inode_writeback(struct inode *inode, const void *aux); 291 294 292 295 void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); 293 296 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+1 -1
include/linux/writeback.h
··· 60 60 unsigned for_reclaim:1; /* Invoked from the page allocator */ 61 61 unsigned range_cyclic:1; /* range_start is cyclic */ 62 62 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 63 - unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */ 63 + unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */ 64 64 65 65 /* 66 66 * When writeback IOs are bounced through async layers, only the