Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Replace PG_fscache by setting folio->private and marking dirty

When dirty data is being written to the cache, setting/waiting on/clearing
the fscache flag is always done in tandem with setting/waiting on/clearing
the writeback flag. The netfslib buffered write routines wait on and set
both flags and the write request cleanup clears both flags, so the fscache
flag is almost superfluous.

The reason it isn't superfluous is because the fscache flag is also used to
indicate that data just read from the server is being written to the cache.
The flag is used to prevent a race involving overlapping direct-I/O writes
to the cache.

Change this to indicate that a page is in need of being copied to the cache
by placing a magic value in folio->private and marking the folios dirty.
Then when the writeback code sees a folio marked in this way, it only
writes it to the cache and not to the server.

If a folio that has this magic value set is modified, the value is just
replaced and the folio will then be uplodaded too.

With this, PG_fscache is no longer required by the netfslib core, 9p and
afs.

Ceph and nfs, however, still need to use the old PG_fscache-based tracking.
To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the
flags in the netfs_inode struct for those filesystems. This reenables the
use of PG_fscache in that inode. 9p and afs use the netfslib write helpers
so get switched over; cifs, for the moment, does page-by-page manual access
to the cache, so doesn't use PG_fscache and is unaffected.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Ronnie Sahlberg <ronniesahlberg@gmail.com>
cc: Shyam Prasad N <sprasad@microsoft.com>
cc: Tom Talpey <tom@talpey.com>
cc: Bharath SM <bharathsm@microsoft.com>
cc: Trond Myklebust <trond.myklebust@hammerspace.com>
cc: Anna Schumaker <anna@kernel.org>
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: linux-nfs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

+145 -99
+1 -1
fs/ceph/addr.c
··· 517 517 struct fscache_cookie *cookie = ceph_fscache_cookie(ci); 518 518 519 519 fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), 520 - ceph_fscache_write_terminated, inode, caching); 520 + ceph_fscache_write_terminated, inode, true, caching); 521 521 } 522 522 #else 523 523 static inline void ceph_set_page_fscache(struct page *page)
+2
fs/ceph/inode.c
··· 577 577 578 578 /* Set parameters for the netfs library */ 579 579 netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false); 580 + /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 581 + __set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags); 580 582 581 583 spin_lock_init(&ci->i_ceph_lock); 582 584
+26 -10
fs/netfs/buffered_read.c
··· 10 10 #include "internal.h" 11 11 12 12 /* 13 - * Unlock the folios in a read operation. We need to set PG_fscache on any 13 + * Unlock the folios in a read operation. We need to set PG_writeback on any 14 14 * folios we're going to write back before we unlock them. 15 + * 16 + * Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use 17 + * PG_private_2 and do a direct write to the cache from here instead. 15 18 */ 16 19 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) 17 20 { ··· 51 48 xas_for_each(&xas, folio, last_page) { 52 49 loff_t pg_end; 53 50 bool pg_failed = false; 54 - bool folio_started; 51 + bool wback_to_cache = false; 52 + bool folio_started = false; 55 53 56 54 if (xas_retry(&xas, folio)) 57 55 continue; 58 56 59 57 pg_end = folio_pos(folio) + folio_size(folio) - 1; 60 58 61 - folio_started = false; 62 59 for (;;) { 63 60 loff_t sreq_end; 64 61 ··· 66 63 pg_failed = true; 67 64 break; 68 65 } 69 - if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { 70 - trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 71 - folio_start_fscache(folio); 72 - folio_started = true; 66 + if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) { 67 + if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, 68 + &subreq->flags)) { 69 + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 70 + folio_start_fscache(folio); 71 + folio_started = true; 72 + } 73 + } else { 74 + wback_to_cache |= 75 + test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 73 76 } 74 77 pg_failed |= subreq_failed; 75 78 sreq_end = subreq->start + subreq->len - 1; ··· 107 98 kfree(finfo); 108 99 } 109 100 folio_mark_uptodate(folio); 101 + if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) { 102 + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 103 + folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE); 104 + filemap_dirty_folio(folio->mapping, folio); 105 + } 110 106 } 111 107 112 108 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { ··· 505 491 netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 506 492 507 493 have_folio: 508 - ret = folio_wait_fscache_killable(folio); 509 - if (ret < 0) 510 - goto error; 494 + if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) { 495 + ret = folio_wait_fscache_killable(folio); 496 + if (ret < 0) 497 + goto error; 498 + } 511 499 have_folio_no_wait: 512 500 *_folio = folio; 513 501 _leave(" = 0");
+44 -49
fs/netfs/buffered_write.c
··· 30 30 31 31 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) 32 32 { 33 - if (netfs_group && !folio_get_private(folio)) 34 - folio_attach_private(folio, netfs_get_group(netfs_group)); 35 - } 33 + void *priv = folio_get_private(folio); 36 34 37 - #if IS_ENABLED(CONFIG_FSCACHE) 38 - static void netfs_folio_start_fscache(bool caching, struct folio *folio) 39 - { 40 - if (caching) 41 - folio_start_fscache(folio); 35 + if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE)) 36 + folio_attach_private(folio, netfs_get_group(netfs_group)); 37 + else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE) 38 + folio_detach_private(folio); 42 39 } 43 - #else 44 - static void netfs_folio_start_fscache(bool caching, struct folio *folio) 45 - { 46 - } 47 - #endif 48 40 49 41 /* 50 42 * Decide how we should modify a folio. We might be attempting to do ··· 55 63 bool maybe_trouble) 56 64 { 57 65 struct netfs_folio *finfo = netfs_folio_info(folio); 66 + struct netfs_group *group = netfs_folio_group(folio); 58 67 loff_t pos = folio_file_pos(folio); 59 68 60 69 _enter(""); 61 70 62 - if (netfs_folio_group(folio) != netfs_group) 71 + if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) 63 72 return NETFS_FLUSH_CONTENT; 64 73 65 74 if (folio_test_uptodate(folio)) ··· 389 396 folio_clear_dirty_for_io(folio); 390 397 /* We make multiple writes to the folio... */ 391 398 if (!folio_test_writeback(folio)) { 392 - folio_wait_fscache(folio); 393 399 folio_start_writeback(folio); 394 - folio_start_fscache(folio); 395 400 if (wreq->iter.count == 0) 396 401 trace_netfs_folio(folio, netfs_folio_trace_wthru); 397 402 else ··· 519 528 */ 520 529 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) 521 530 { 531 + struct netfs_group *group; 522 532 struct folio *folio = page_folio(vmf->page); 523 533 struct file *file = vmf->vma->vm_file; 524 534 struct inode *inode = file_inode(file); ··· 542 550 goto out; 543 551 } 544 552 545 - if (netfs_folio_group(folio) != netfs_group) { 553 + group = netfs_folio_group(folio); 554 + if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) { 546 555 folio_unlock(folio); 547 556 err = filemap_fdatawait_range(inode->i_mapping, 548 557 folio_pos(folio), ··· 599 606 600 607 trace_netfs_folio(folio, netfs_folio_trace_kill); 601 608 folio_clear_uptodate(folio); 602 - if (folio_test_fscache(folio)) 603 - folio_end_fscache(folio); 604 609 folio_end_writeback(folio); 605 610 folio_lock(folio); 606 611 generic_error_remove_folio(mapping, folio); ··· 634 643 next = folio_next_index(folio); 635 644 trace_netfs_folio(folio, netfs_folio_trace_redirty); 636 645 filemap_dirty_folio(mapping, folio); 637 - if (folio_test_fscache(folio)) 638 - folio_end_fscache(folio); 639 646 folio_end_writeback(folio); 640 647 folio_put(folio); 641 648 } while (index = next, index <= last); ··· 689 700 if (!folio_test_dirty(folio)) { 690 701 folio_detach_private(folio); 691 702 gcount++; 692 - trace_netfs_folio(folio, netfs_folio_trace_clear_g); 703 + if (group == NETFS_FOLIO_COPY_TO_CACHE) 704 + trace_netfs_folio(folio, 705 + netfs_folio_trace_end_copy); 706 + else 707 + trace_netfs_folio(folio, netfs_folio_trace_clear_g); 693 708 } else { 694 709 trace_netfs_folio(folio, netfs_folio_trace_redirtied); 695 710 } ··· 717 724 trace_netfs_folio(folio, netfs_folio_trace_clear); 718 725 } 719 726 end_wb: 720 - if (folio_test_fscache(folio)) 721 - folio_end_fscache(folio); 722 727 xas_advance(&xas, folio_next_index(folio) - 1); 723 728 folio_end_writeback(folio); 724 729 } ··· 786 795 long *_count, 787 796 loff_t start, 788 797 loff_t max_len, 789 - bool caching, 790 798 size_t *_len, 791 799 size_t *_top) 792 800 { ··· 836 846 break; 837 847 } 838 848 if (!folio_test_dirty(folio) || 839 - folio_test_writeback(folio) || 840 - folio_test_fscache(folio)) { 849 + folio_test_writeback(folio)) { 841 850 folio_unlock(folio); 842 851 folio_put(folio); 843 852 xas_reset(xas); ··· 849 860 if ((const struct netfs_group *)priv != group) { 850 861 stop = true; 851 862 finfo = netfs_folio_info(folio); 852 - if (finfo->netfs_group != group || 863 + if (!finfo || 864 + finfo->netfs_group != group || 853 865 finfo->dirty_offset > 0) { 854 866 folio_unlock(folio); 855 867 folio_put(folio); ··· 884 894 885 895 for (i = 0; i < folio_batch_count(&fbatch); i++) { 886 896 folio = fbatch.folios[i]; 887 - trace_netfs_folio(folio, netfs_folio_trace_store_plus); 897 + if (group == NETFS_FOLIO_COPY_TO_CACHE) 898 + trace_netfs_folio(folio, netfs_folio_trace_copy_plus); 899 + else 900 + trace_netfs_folio(folio, netfs_folio_trace_store_plus); 888 901 889 902 if (!folio_clear_dirty_for_io(folio)) 890 903 BUG(); 891 904 folio_start_writeback(folio); 892 - netfs_folio_start_fscache(caching, folio); 893 905 folio_unlock(folio); 894 906 } 895 907 ··· 917 925 struct netfs_inode *ctx = netfs_inode(mapping->host); 918 926 unsigned long long i_size = i_size_read(&ctx->inode); 919 927 size_t len, max_len; 920 - bool caching = netfs_is_cache_enabled(ctx); 921 928 long count = wbc->nr_to_write; 922 929 int ret; 923 930 924 - _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching); 931 + _enter(",%lx,%llx-%llx", folio->index, start, end); 925 932 926 933 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), 927 - NETFS_WRITEBACK); 934 + group == NETFS_FOLIO_COPY_TO_CACHE ? 935 + NETFS_COPY_TO_CACHE : NETFS_WRITEBACK); 928 936 if (IS_ERR(wreq)) { 929 937 folio_unlock(folio); 930 938 return PTR_ERR(wreq); ··· 933 941 if (!folio_clear_dirty_for_io(folio)) 934 942 BUG(); 935 943 folio_start_writeback(folio); 936 - netfs_folio_start_fscache(caching, folio); 937 944 938 945 count -= folio_nr_pages(folio); 939 946 ··· 941 950 * immediately lockable, is not dirty or is missing, or we reach the 942 951 * end of the range. 943 952 */ 944 - trace_netfs_folio(folio, netfs_folio_trace_store); 953 + if (group == NETFS_FOLIO_COPY_TO_CACHE) 954 + trace_netfs_folio(folio, netfs_folio_trace_copy); 955 + else 956 + trace_netfs_folio(folio, netfs_folio_trace_store); 945 957 946 958 len = wreq->len; 947 959 finfo = netfs_folio_info(folio); ··· 967 973 968 974 if (len < max_len) 969 975 netfs_extend_writeback(mapping, group, xas, &count, start, 970 - max_len, caching, &len, &wreq->upper_len); 976 + max_len, &len, &wreq->upper_len); 971 977 } 972 978 973 979 cant_expand: ··· 991 997 992 998 iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start, 993 999 wreq->upper_len); 994 - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); 995 - ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); 1000 + if (group != NETFS_FOLIO_COPY_TO_CACHE) { 1001 + __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); 1002 + ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); 1003 + } else { 1004 + ret = netfs_begin_write(wreq, true, netfs_write_trace_copy_to_cache); 1005 + } 996 1006 if (ret == 0 || ret == -EIOCBQUEUED) 997 1007 wbc->nr_to_write -= len / PAGE_SIZE; 998 1008 } else { 999 1009 _debug("write discard %zx @%llx [%llx]", len, start, i_size); 1000 1010 1001 1011 /* The dirty region was entirely beyond the EOF. */ 1002 - fscache_clear_page_bits(mapping, start, len, caching); 1003 1012 netfs_pages_written_back(wreq); 1004 1013 ret = 0; 1005 1014 } ··· 1055 1058 1056 1059 /* Skip any dirty folio that's not in the group of interest. */ 1057 1060 priv = folio_get_private(folio); 1058 - if ((const struct netfs_group *)priv != group) { 1059 - finfo = netfs_folio_info(folio); 1060 - if (finfo->netfs_group != group) { 1061 + if ((const struct netfs_group *)priv == NETFS_FOLIO_COPY_TO_CACHE) { 1062 + group = NETFS_FOLIO_COPY_TO_CACHE; 1063 + } else if ((const struct netfs_group *)priv != group) { 1064 + finfo = __netfs_folio_info(priv); 1065 + if (!finfo || finfo->netfs_group != group) { 1061 1066 folio_put(folio); 1062 1067 continue; 1063 1068 } ··· 1098 1099 goto search_again; 1099 1100 } 1100 1101 1101 - if (folio_test_writeback(folio) || 1102 - folio_test_fscache(folio)) { 1102 + if (folio_test_writeback(folio)) { 1103 1103 folio_unlock(folio); 1104 1104 if (wbc->sync_mode != WB_SYNC_NONE) { 1105 1105 folio_wait_writeback(folio); 1106 - #ifdef CONFIG_FSCACHE 1107 - folio_wait_fscache(folio); 1108 - #endif 1109 1106 goto lock_again; 1110 1107 } 1111 1108 ··· 1260 1265 1261 1266 bvec_set_folio(&bvec, folio, len, offset); 1262 1267 iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len); 1263 - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); 1268 + if (group != NETFS_FOLIO_COPY_TO_CACHE) 1269 + __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); 1264 1270 ret = netfs_begin_write(wreq, true, netfs_write_trace_launder); 1265 1271 1266 1272 out_put: ··· 1270 1274 kfree(finfo); 1271 1275 netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 1272 1276 out: 1273 - folio_wait_fscache(folio); 1274 1277 _leave(" = %d", ret); 1275 1278 return ret; 1276 1279 }
+8 -4
fs/netfs/fscache_io.c
··· 166 166 loff_t start; 167 167 size_t len; 168 168 bool set_bits; 169 + bool using_pgpriv2; 169 170 netfs_io_terminated_t term_func; 170 171 void *term_func_priv; 171 172 }; ··· 198 197 { 199 198 struct fscache_write_request *wreq = priv; 200 199 201 - fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, 202 - wreq->set_bits); 200 + if (wreq->using_pgpriv2) 201 + fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, 202 + wreq->set_bits); 203 203 204 204 if (wreq->term_func) 205 205 wreq->term_func(wreq->term_func_priv, transferred_or_error, ··· 214 212 loff_t start, size_t len, loff_t i_size, 215 213 netfs_io_terminated_t term_func, 216 214 void *term_func_priv, 217 - bool cond) 215 + bool using_pgpriv2, bool cond) 218 216 { 219 217 struct fscache_write_request *wreq; 220 218 struct netfs_cache_resources *cres; ··· 232 230 wreq->mapping = mapping; 233 231 wreq->start = start; 234 232 wreq->len = len; 233 + wreq->using_pgpriv2 = using_pgpriv2; 235 234 wreq->set_bits = cond; 236 235 wreq->term_func = term_func; 237 236 wreq->term_func_priv = term_func_priv; ··· 260 257 abandon_free: 261 258 kfree(wreq); 262 259 abandon: 263 - fscache_clear_page_bits(mapping, start, len, cond); 260 + if (using_pgpriv2) 261 + fscache_clear_page_bits(mapping, start, len, cond); 264 262 if (term_func) 265 263 term_func(term_func_priv, ret, false); 266 264 }
+7 -3
fs/netfs/internal.h
··· 168 168 */ 169 169 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) 170 170 { 171 - if (netfs_group) 171 + if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) 172 172 refcount_inc(&netfs_group->ref); 173 173 return netfs_group; 174 174 } ··· 178 178 */ 179 179 static inline void netfs_put_group(struct netfs_group *netfs_group) 180 180 { 181 - if (netfs_group && refcount_dec_and_test(&netfs_group->ref)) 181 + if (netfs_group && 182 + netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 183 + refcount_dec_and_test(&netfs_group->ref)) 182 184 netfs_group->free(netfs_group); 183 185 } 184 186 ··· 189 187 */ 190 188 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) 191 189 { 192 - if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref)) 190 + if (netfs_group && 191 + netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 192 + refcount_sub_and_test(nr, &netfs_group->ref)) 193 193 netfs_group->free(netfs_group); 194 194 } 195 195
+10 -8
fs/netfs/io.c
··· 99 99 } 100 100 101 101 /* 102 - * Deal with the completion of writing the data to the cache. We have to clear 103 - * the PG_fscache bits on the folios involved and release the caller's ref. 102 + * [DEPRECATED] Deal with the completion of writing the data to the cache. We 103 + * have to clear the PG_fscache bits on the folios involved and release the 104 + * caller's ref. 104 105 * 105 106 * May be called in softirq mode and we inherit a ref from the caller. 106 107 */ ··· 139 138 } 140 139 141 140 static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, 142 - bool was_async) 141 + bool was_async) /* [DEPRECATED] */ 143 142 { 144 143 struct netfs_io_subrequest *subreq = priv; 145 144 struct netfs_io_request *rreq = subreq->rreq; ··· 162 161 } 163 162 164 163 /* 165 - * Perform any outstanding writes to the cache. We inherit a ref from the 166 - * caller. 164 + * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref 165 + * from the caller. 167 166 */ 168 167 static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) 169 168 { ··· 223 222 netfs_rreq_unmark_after_write(rreq, false); 224 223 } 225 224 226 - static void netfs_rreq_write_to_cache_work(struct work_struct *work) 225 + static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */ 227 226 { 228 227 struct netfs_io_request *rreq = 229 228 container_of(work, struct netfs_io_request, work); ··· 231 230 netfs_rreq_do_write_to_cache(rreq); 232 231 } 233 232 234 - static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) 233 + static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */ 235 234 { 236 235 rreq->work.func = netfs_rreq_write_to_cache_work; 237 236 if (!queue_work(system_unbound_wq, &rreq->work)) ··· 410 409 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 411 410 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); 412 411 413 - if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) 412 + if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) && 413 + test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) 414 414 return netfs_rreq_write_to_cache(rreq); 415 415 416 416 netfs_rreq_completed(rreq, was_async);
+1
fs/netfs/main.c
··· 31 31 [NETFS_READAHEAD] = "RA", 32 32 [NETFS_READPAGE] = "RP", 33 33 [NETFS_READ_FOR_WRITE] = "RW", 34 + [NETFS_COPY_TO_CACHE] = "CC", 34 35 [NETFS_WRITEBACK] = "WB", 35 36 [NETFS_WRITETHROUGH] = "WT", 36 37 [NETFS_LAUNDER_WRITE] = "LW",
+1 -9
fs/netfs/misc.c
··· 177 177 */ 178 178 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) 179 179 { 180 - struct netfs_folio *finfo = NULL; 180 + struct netfs_folio *finfo; 181 181 size_t flen = folio_size(folio); 182 182 183 183 _enter("{%lx},%zx,%zx", folio->index, offset, length); 184 - 185 - folio_wait_fscache(folio); 186 184 187 185 if (!folio_test_private(folio)) 188 186 return; ··· 246 248 247 249 if (folio_test_private(folio)) 248 250 return false; 249 - if (folio_test_fscache(folio)) { 250 - if (current_is_kswapd() || !(gfp & __GFP_FS)) 251 - return false; 252 - folio_wait_fscache(folio); 253 - } 254 - 255 251 fscache_note_page_release(netfs_i_cookie(ctx)); 256 252 return true; 257 253 }
+5 -1
fs/netfs/objects.c
··· 45 45 refcount_set(&rreq->ref, 1); 46 46 47 47 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 48 - if (cached) 48 + if (cached) { 49 49 __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); 50 + if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) 51 + /* Filesystem uses deprecated PG_private_2 marking. */ 52 + __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 53 + } 50 54 if (file && file->f_flags & O_NONBLOCK) 51 55 __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); 52 56 if (rreq->netfs_ops->init_request) {
+2
fs/nfs/fscache.h
··· 81 81 static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) 82 82 { 83 83 netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false); 84 + /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 85 + __set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags); 84 86 } 85 87 extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr); 86 88 extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
+14 -8
include/linux/fscache.h
··· 172 172 extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *); 173 173 extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *); 174 174 175 - extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *, 176 - loff_t, size_t, loff_t, netfs_io_terminated_t, void *, 177 - bool); 175 + void __fscache_write_to_cache(struct fscache_cookie *cookie, 176 + struct address_space *mapping, 177 + loff_t start, size_t len, loff_t i_size, 178 + netfs_io_terminated_t term_func, 179 + void *term_func_priv, 180 + bool using_pgpriv2, bool cond); 178 181 extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t); 179 182 180 183 /** ··· 600 597 * @i_size: The new size of the inode 601 598 * @term_func: The function to call upon completion 602 599 * @term_func_priv: The private data for @term_func 603 - * @caching: If PG_fscache has been set 600 + * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write 601 + * @caching: If we actually want to do the caching 604 602 * 605 603 * Helper function for a netfs to write dirty data from an inode into the cache 606 604 * object that's backing it. ··· 612 608 * marked with PG_fscache. 613 609 * 614 610 * If given, @term_func will be called upon completion and supplied with 615 - * @term_func_priv. Note that the PG_fscache flags will have been cleared by 616 - * this point, so the netfs must retain its own pin on the mapping. 611 + * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags 612 + * will have been cleared by this point, so the netfs must retain its own pin 613 + * on the mapping. 617 614 */ 618 615 static inline void fscache_write_to_cache(struct fscache_cookie *cookie, 619 616 struct address_space *mapping, 620 617 loff_t start, size_t len, loff_t i_size, 621 618 netfs_io_terminated_t term_func, 622 619 void *term_func_priv, 623 - bool caching) 620 + bool using_pgpriv2, bool caching) 624 621 { 625 622 if (caching) 626 623 __fscache_write_to_cache(cookie, mapping, start, len, i_size, 627 - term_func, term_func_priv, caching); 624 + term_func, term_func_priv, 625 + using_pgpriv2, caching); 628 626 else if (term_func) 629 627 term_func(term_func_priv, -ENOBUFS, false); 630 628
+19 -5
include/linux/netfs.h
··· 143 143 #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ 144 144 #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ 145 145 #define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */ 146 + #define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark 147 + * write to cache on read */ 146 148 }; 147 149 148 150 /* ··· 167 165 unsigned int dirty_len; /* Write-streaming dirty data length */ 168 166 }; 169 167 #define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */ 168 + #define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */ 169 + 170 + static inline bool netfs_is_folio_info(const void *priv) 171 + { 172 + return (unsigned long)priv & NETFS_FOLIO_INFO; 173 + } 174 + 175 + static inline struct netfs_folio *__netfs_folio_info(const void *priv) 176 + { 177 + if (netfs_is_folio_info(priv)) 178 + return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO); 179 + return NULL; 180 + } 170 181 171 182 static inline struct netfs_folio *netfs_folio_info(struct folio *folio) 172 183 { 173 - void *priv = folio_get_private(folio); 174 - 175 - if ((unsigned long)priv & NETFS_FOLIO_INFO) 176 - return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO); 177 - return NULL; 184 + return __netfs_folio_info(folio_get_private(folio)); 178 185 } 179 186 180 187 static inline struct netfs_group *netfs_folio_group(struct folio *folio) ··· 241 230 NETFS_READAHEAD, /* This read was triggered by readahead */ 242 231 NETFS_READPAGE, /* This read is a synchronous read */ 243 232 NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ 233 + NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */ 244 234 NETFS_WRITEBACK, /* This write was triggered by writepages */ 245 235 NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ 246 236 NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */ ··· 299 287 #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ 300 288 #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ 301 289 #define NETFS_RREQ_BLOCKED 10 /* We blocked */ 290 + #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark 291 + * write to cache on read */ 302 292 const struct netfs_request_ops *netfs_ops; 303 293 void (*cleanup)(struct netfs_io_request *req); 304 294 };
+5 -1
include/trace/events/netfs.h
··· 24 24 E_(netfs_read_trace_write_begin, "WRITEBEGN") 25 25 26 26 #define netfs_write_traces \ 27 + EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \ 27 28 EM(netfs_write_trace_dio_write, "DIO-WRITE") \ 28 29 EM(netfs_write_trace_launder, "LAUNDER ") \ 29 30 EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ ··· 35 34 EM(NETFS_READAHEAD, "RA") \ 36 35 EM(NETFS_READPAGE, "RP") \ 37 36 EM(NETFS_READ_FOR_WRITE, "RW") \ 37 + EM(NETFS_COPY_TO_CACHE, "CC") \ 38 38 EM(NETFS_WRITEBACK, "WB") \ 39 39 EM(NETFS_WRITETHROUGH, "WT") \ 40 40 EM(NETFS_LAUNDER_WRITE, "LW") \ ··· 129 127 EM(netfs_folio_trace_clear, "clear") \ 130 128 EM(netfs_folio_trace_clear_s, "clear-s") \ 131 129 EM(netfs_folio_trace_clear_g, "clear-g") \ 132 - EM(netfs_folio_trace_copy_to_cache, "copy") \ 130 + EM(netfs_folio_trace_copy, "copy") \ 131 + EM(netfs_folio_trace_copy_plus, "copy+") \ 132 + EM(netfs_folio_trace_copy_to_cache, "mark-copy") \ 133 133 EM(netfs_folio_trace_end_copy, "end-copy") \ 134 134 EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ 135 135 EM(netfs_folio_trace_kill, "kill") \