Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Provide invalidate_folio and release_folio calls

Provide default invalidate_folio and release_folio calls. These will need
to interact with invalidation correctly at some point. They will be needed
if netfslib is to make use of folio->private for its own purposes.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

+54 -114
+2 -31
fs/9p/vfs_addr.c
··· 88 88 .issue_read = v9fs_issue_read, 89 89 }; 90 90 91 - /** 92 - * v9fs_release_folio - release the private state associated with a folio 93 - * @folio: The folio to be released 94 - * @gfp: The caller's allocation restrictions 95 - * 96 - * Returns true if the page can be released, false otherwise. 97 - */ 98 - 99 - static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) 100 - { 101 - if (folio_test_private(folio)) 102 - return false; 103 - #ifdef CONFIG_9P_FSCACHE 104 - if (folio_test_fscache(folio)) { 105 - if (current_is_kswapd() || !(gfp & __GFP_FS)) 106 - return false; 107 - folio_wait_fscache(folio); 108 - } 109 - fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio)))); 110 - #endif 111 - return true; 112 - } 113 - 114 - static void v9fs_invalidate_folio(struct folio *folio, size_t offset, 115 - size_t length) 116 - { 117 - folio_wait_fscache(folio); 118 - } 119 - 120 91 #ifdef CONFIG_9P_FSCACHE 121 92 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, 122 93 bool was_async) ··· 295 324 .writepage = v9fs_vfs_writepage, 296 325 .write_begin = v9fs_write_begin, 297 326 .write_end = v9fs_write_end, 298 - .release_folio = v9fs_release_folio, 299 - .invalidate_folio = v9fs_invalidate_folio, 327 + .release_folio = netfs_release_folio, 328 + .invalidate_folio = netfs_invalidate_folio, 300 329 .launder_folio = v9fs_launder_folio, 301 330 .direct_IO = v9fs_direct_IO, 302 331 };
+4 -49
fs/afs/file.c
··· 20 20 21 21 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); 22 22 static int afs_symlink_read_folio(struct file *file, struct folio *folio); 23 - static void afs_invalidate_folio(struct folio *folio, size_t offset, 24 - size_t length); 25 - static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags); 26 23 27 24 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); 28 25 static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, ··· 54 57 .readahead = netfs_readahead, 55 58 .dirty_folio = netfs_dirty_folio, 56 59 .launder_folio = afs_launder_folio, 57 - .release_folio = afs_release_folio, 58 - .invalidate_folio = afs_invalidate_folio, 60 + .release_folio = netfs_release_folio, 61 + .invalidate_folio = netfs_invalidate_folio, 59 62 .write_begin = afs_write_begin, 60 63 .write_end = afs_write_end, 61 64 .writepages = afs_writepages, ··· 64 67 65 68 const struct address_space_operations afs_symlink_aops = { 66 69 .read_folio = afs_symlink_read_folio, 67 - .release_folio = afs_release_folio, 68 - .invalidate_folio = afs_invalidate_folio, 70 + .release_folio = netfs_release_folio, 71 + .invalidate_folio = netfs_invalidate_folio, 69 72 .migrate_folio = filemap_migrate_folio, 70 73 }; 71 74 ··· 382 385 .check_write_begin = afs_check_write_begin, 383 386 .issue_read = afs_issue_read, 384 387 }; 385 - 386 - /* 387 - * invalidate part or all of a page 388 - * - release a page and clean up its private data if offset is 0 (indicating 389 - * the entire page) 390 - */ 391 - static void afs_invalidate_folio(struct folio *folio, size_t offset, 392 - size_t length) 393 - { 394 - _enter("{%lu},%zu,%zu", folio->index, offset, length); 395 - 396 - folio_wait_fscache(folio); 397 - _leave(""); 398 - } 399 - 400 - /* 401 - * release a page and clean up its private state if it's not busy 402 - * - return true if the page can now be released, false if not 403 - */ 404 - static bool afs_release_folio(struct folio *folio, gfp_t gfp) 405 - { 406 - struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); 407 - 408 - _enter("{{%llx:%llu}[%lu],%lx},%x", 409 - vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags, 410 - gfp); 411 - 412 - /* deny if folio is being written to the cache and the caller hasn't 413 - * elected to wait */ 414 - #ifdef CONFIG_AFS_FSCACHE 415 - if (folio_test_fscache(folio)) { 416 - if (current_is_kswapd() || !(gfp & __GFP_FS)) 417 - return false; 418 - folio_wait_fscache(folio); 419 - } 420 - fscache_note_page_release(afs_vnode_cache(vnode)); 421 - #endif 422 - 423 - /* Indicate that the folio can be released */ 424 - _leave(" = T"); 425 - return true; 426 - } 427 388 428 389 static void afs_add_open_mmap(struct afs_vnode *vnode) 429 390 {
+2 -22
fs/ceph/addr.c
··· 159 159 ceph_put_snap_context(snapc); 160 160 } 161 161 162 - folio_wait_fscache(folio); 163 - } 164 - 165 - static bool ceph_release_folio(struct folio *folio, gfp_t gfp) 166 - { 167 - struct inode *inode = folio->mapping->host; 168 - struct ceph_client *cl = ceph_inode_to_client(inode); 169 - 170 - doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode), 171 - folio->index, folio_test_dirty(folio) ? "" : "not "); 172 - 173 - if (folio_test_private(folio)) 174 - return false; 175 - 176 - if (folio_test_fscache(folio)) { 177 - if (current_is_kswapd() || !(gfp & __GFP_FS)) 178 - return false; 179 - folio_wait_fscache(folio); 180 - } 181 - ceph_fscache_note_page_release(inode); 182 - return true; 162 + netfs_invalidate_folio(folio, offset, length); 183 163 } 184 164 185 165 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) ··· 1565 1585 .write_end = ceph_write_end, 1566 1586 .dirty_folio = ceph_dirty_folio, 1567 1587 .invalidate_folio = ceph_invalidate_folio, 1568 - .release_folio = ceph_release_folio, 1588 + .release_folio = netfs_release_folio, 1569 1589 .direct_IO = noop_direct_IO, 1570 1590 }; 1571 1591
-10
fs/ceph/cache.h
··· 56 56 return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode))); 57 57 } 58 58 59 - static inline void ceph_fscache_note_page_release(struct inode *inode) 60 - { 61 - struct ceph_inode_info *ci = ceph_inode(inode); 62 - 63 - fscache_note_page_release(ceph_fscache_cookie(ci)); 64 - } 65 59 #else /* CONFIG_CEPH_FSCACHE */ 66 60 static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc, 67 61 struct fs_context *fc) ··· 111 117 static inline bool ceph_is_cache_enabled(struct inode *inode) 112 118 { 113 119 return false; 114 - } 115 - 116 - static inline void ceph_fscache_note_page_release(struct inode *inode) 117 - { 118 120 } 119 121 #endif /* CONFIG_CEPH_FSCACHE */ 120 122
+42
fs/netfs/misc.c
··· 84 84 } 85 85 } 86 86 EXPORT_SYMBOL(netfs_clear_inode_writeback); 87 + 88 + /** 89 + * netfs_invalidate_folio - Invalidate or partially invalidate a folio 90 + * @folio: Folio proposed for release 91 + * @offset: Offset of the invalidated region 92 + * @length: Length of the invalidated region 93 + * 94 + * Invalidate part or all of a folio for a network filesystem. The folio will 95 + * be removed afterwards if the invalidated region covers the entire folio. 96 + */ 97 + void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) 98 + { 99 + _enter("{%lx},%zx,%zx", folio_index(folio), offset, length); 100 + 101 + folio_wait_fscache(folio); 102 + } 103 + EXPORT_SYMBOL(netfs_invalidate_folio); 104 + 105 + /** 106 + * netfs_release_folio - Try to release a folio 107 + * @folio: Folio proposed for release 108 + * @gfp: Flags qualifying the release 109 + * 110 + * Request release of a folio and clean up its private state if it's not busy. 111 + * Returns true if the folio can now be released, false if not 112 + */ 113 + bool netfs_release_folio(struct folio *folio, gfp_t gfp) 114 + { 115 + struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); 116 + 117 + if (folio_test_private(folio)) 118 + return false; 119 + if (folio_test_fscache(folio)) { 120 + if (current_is_kswapd() || !(gfp & __GFP_FS)) 121 + return false; 122 + folio_wait_fscache(folio); 123 + } 124 + 125 + fscache_note_page_release(netfs_i_cookie(ctx)); 126 + return true; 127 + } 128 + EXPORT_SYMBOL(netfs_release_folio);
+4 -2
include/linux/netfs.h
··· 293 293 void netfs_readahead(struct readahead_control *); 294 294 int netfs_read_folio(struct file *, struct folio *); 295 295 int netfs_write_begin(struct netfs_inode *, struct file *, 296 - struct address_space *, loff_t pos, unsigned int len, 297 - struct folio **, void **fsdata); 296 + struct address_space *, loff_t pos, unsigned int len, 297 + struct folio **, void **fsdata); 298 298 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); 299 299 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); 300 300 void netfs_clear_inode_writeback(struct inode *inode, const void *aux); 301 + void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); 302 + bool netfs_release_folio(struct folio *folio, gfp_t gfp); 301 303 302 304 void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); 303 305 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,