Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Add support for caching single monolithic objects such as AFS dirs

Add support for caching the content of a file that contains a single
monolithic object that must be read/written with a single I/O operation,
such as an AFS directory.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-20-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: netfs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

David Howells and committed by
Christian Brauner
49866ce7 e61bfaad

+478 -14
+1
fs/netfs/Makefile
··· 13 13 read_collect.o \ 14 14 read_pgpriv2.o \ 15 15 read_retry.o \ 16 + read_single.o \ 16 17 rolling_buffer.o \ 17 18 write_collect.o \ 18 19 write_issue.o \
+8 -3
fs/netfs/buffered_read.c
··· 137 137 loff_t i_size) 138 138 { 139 139 struct netfs_cache_resources *cres = &rreq->cache_resources; 140 + enum netfs_io_source source; 140 141 141 142 if (!cres->ops) 142 143 return NETFS_DOWNLOAD_FROM_SERVER; 143 - return cres->ops->prepare_read(subreq, i_size); 144 + source = cres->ops->prepare_read(subreq, i_size); 145 + trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 146 + return source; 147 + 144 148 } 145 149 146 - static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, 147 - bool was_async) 150 + void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async) 148 151 { 149 152 struct netfs_io_subrequest *subreq = priv; 150 153 ··· 216 213 unsigned long long zp = umin(ictx->zero_point, rreq->i_size); 217 214 size_t len = subreq->len; 218 215 216 + if (unlikely(rreq->origin == NETFS_READ_SINGLE)) 217 + zp = rreq->i_size; 219 218 if (subreq->start >= zp) { 220 219 subreq->source = source = NETFS_FILL_WITH_ZEROES; 221 220 goto fill_with_zeroes;
+2
fs/netfs/internal.h
··· 23 23 /* 24 24 * buffered_read.c 25 25 */ 26 + void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async); 26 27 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 27 28 size_t offset, size_t len); 28 29 ··· 111 110 extern atomic_t netfs_n_rh_dio_read; 112 111 extern atomic_t netfs_n_rh_readahead; 113 112 extern atomic_t netfs_n_rh_read_folio; 113 + extern atomic_t netfs_n_rh_read_single; 114 114 extern atomic_t netfs_n_rh_rreq; 115 115 extern atomic_t netfs_n_rh_sreq; 116 116 extern atomic_t netfs_n_rh_download;
+2
fs/netfs/main.c
··· 37 37 [NETFS_READAHEAD] = "RA", 38 38 [NETFS_READPAGE] = "RP", 39 39 [NETFS_READ_GAPS] = "RG", 40 + [NETFS_READ_SINGLE] = "R1", 40 41 [NETFS_READ_FOR_WRITE] = "RW", 41 42 [NETFS_DIO_READ] = "DR", 42 43 [NETFS_WRITEBACK] = "WB", 44 + [NETFS_WRITEBACK_SINGLE] = "W1", 43 45 [NETFS_WRITETHROUGH] = "WT", 44 46 [NETFS_UNBUFFERED_WRITE] = "UW", 45 47 [NETFS_DIO_WRITE] = "DW",
+2
fs/netfs/objects.c
··· 54 54 if (origin == NETFS_READAHEAD || 55 55 origin == NETFS_READPAGE || 56 56 origin == NETFS_READ_GAPS || 57 + origin == NETFS_READ_SINGLE || 57 58 origin == NETFS_READ_FOR_WRITE || 58 59 origin == NETFS_DIO_READ) 59 60 INIT_WORK(&rreq->work, NULL); ··· 197 196 case NETFS_READAHEAD: 198 197 case NETFS_READPAGE: 199 198 case NETFS_READ_GAPS: 199 + case NETFS_READ_SINGLE: 200 200 case NETFS_READ_FOR_WRITE: 201 201 case NETFS_DIO_READ: 202 202 INIT_WORK(&subreq->work, netfs_read_subreq_termination_worker);
+43 -2
fs/netfs/read_collect.c
··· 362 362 } 363 363 364 364 /* 365 + * Do processing after reading a monolithic single object. 366 + */ 367 + static void netfs_rreq_assess_single(struct netfs_io_request *rreq) 368 + { 369 + struct netfs_io_subrequest *subreq; 370 + struct netfs_io_stream *stream = &rreq->io_streams[0]; 371 + 372 + subreq = list_first_entry_or_null(&stream->subrequests, 373 + struct netfs_io_subrequest, rreq_link); 374 + if (subreq) { 375 + if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 376 + rreq->error = subreq->error; 377 + else 378 + rreq->transferred = subreq->transferred; 379 + 380 + if (!rreq->error && subreq->source == NETFS_DOWNLOAD_FROM_SERVER && 381 + fscache_resources_valid(&rreq->cache_resources)) { 382 + trace_netfs_rreq(rreq, netfs_rreq_trace_dirty); 383 + netfs_single_mark_inode_dirty(rreq->inode); 384 + } 385 + } 386 + 387 + if (rreq->iocb) { 388 + rreq->iocb->ki_pos += rreq->transferred; 389 + if (rreq->iocb->ki_complete) 390 + rreq->iocb->ki_complete( 391 + rreq->iocb, rreq->error ? rreq->error : rreq->transferred); 392 + } 393 + if (rreq->netfs_ops->done) 394 + rreq->netfs_ops->done(rreq); 395 + } 396 + 397 + /* 365 398 * Assess the state of a read request and decide what to do next. 366 399 * 367 400 * Note that we're in normal kernel thread context at this point, possibly ··· 411 378 return; 412 379 } 413 380 414 - if (rreq->origin == NETFS_DIO_READ || 415 - rreq->origin == NETFS_READ_GAPS) 381 + switch (rreq->origin) { 382 + case NETFS_DIO_READ: 383 + case NETFS_READ_GAPS: 416 384 netfs_rreq_assess_dio(rreq); 385 + break; 386 + case NETFS_READ_SINGLE: 387 + netfs_rreq_assess_single(rreq); 388 + break; 389 + default: 390 + break; 391 + } 417 392 task_io_account_read(rreq->transferred); 418 393 419 394 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
+202
fs/netfs/read_single.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* Single, monolithic object support (e.g. AFS directory). 3 + * 4 + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 5 + * Written by David Howells (dhowells@redhat.com) 6 + */ 7 + 8 + #include <linux/export.h> 9 + #include <linux/fs.h> 10 + #include <linux/mm.h> 11 + #include <linux/pagemap.h> 12 + #include <linux/slab.h> 13 + #include <linux/uio.h> 14 + #include <linux/sched/mm.h> 15 + #include <linux/task_io_accounting_ops.h> 16 + #include <linux/netfs.h> 17 + #include "internal.h" 18 + 19 + /** 20 + * netfs_single_mark_inode_dirty - Mark a single, monolithic object inode dirty 21 + * @inode: The inode to mark 22 + * 23 + * Mark an inode that contains a single, monolithic object as dirty so that its 24 + * writepages op will get called. If set, the SINGLE_NO_UPLOAD flag indicates 25 + * that the object will only be written to the cache and not uploaded (e.g. AFS 26 + * directory contents). 27 + */ 28 + void netfs_single_mark_inode_dirty(struct inode *inode) 29 + { 30 + struct netfs_inode *ictx = netfs_inode(inode); 31 + bool cache_only = test_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &ictx->flags); 32 + bool caching = fscache_cookie_enabled(netfs_i_cookie(netfs_inode(inode))); 33 + 34 + if (cache_only && !caching) 35 + return; 36 + 37 + mark_inode_dirty(inode); 38 + 39 + if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) { 40 + bool need_use = false; 41 + 42 + spin_lock(&inode->i_lock); 43 + if (!(inode->i_state & I_PINNING_NETFS_WB)) { 44 + inode->i_state |= I_PINNING_NETFS_WB; 45 + need_use = true; 46 + } 47 + spin_unlock(&inode->i_lock); 48 + 49 + if (need_use) 50 + fscache_use_cookie(netfs_i_cookie(ictx), true); 51 + } 52 + 53 + } 54 + EXPORT_SYMBOL(netfs_single_mark_inode_dirty); 55 + 56 + static int netfs_single_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx) 57 + { 58 + return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); 59 + } 60 + 61 + static void netfs_single_cache_prepare_read(struct netfs_io_request *rreq, 62 + struct netfs_io_subrequest *subreq) 63 + { 64 + struct netfs_cache_resources *cres = &rreq->cache_resources; 65 + 66 + if (!cres->ops) { 67 + subreq->source = NETFS_DOWNLOAD_FROM_SERVER; 68 + return; 69 + } 70 + subreq->source = cres->ops->prepare_read(subreq, rreq->i_size); 71 + trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 72 + 73 + } 74 + 75 + static void netfs_single_read_cache(struct netfs_io_request *rreq, 76 + struct netfs_io_subrequest *subreq) 77 + { 78 + struct netfs_cache_resources *cres = &rreq->cache_resources; 79 + 80 + netfs_stat(&netfs_n_rh_read); 81 + cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL, 82 + netfs_cache_read_terminated, subreq); 83 + } 84 + 85 + /* 86 + * Perform a read to a buffer from the cache or the server. Only a single 87 + * subreq is permitted as the object must be fetched in a single transaction. 88 + */ 89 + static int netfs_single_dispatch_read(struct netfs_io_request *rreq) 90 + { 91 + struct netfs_io_subrequest *subreq; 92 + int ret = 0; 93 + 94 + atomic_set(&rreq->nr_outstanding, 1); 95 + 96 + subreq = netfs_alloc_subrequest(rreq); 97 + if (!subreq) { 98 + ret = -ENOMEM; 99 + goto out; 100 + } 101 + 102 + subreq->source = NETFS_DOWNLOAD_FROM_SERVER; 103 + subreq->start = 0; 104 + subreq->len = rreq->len; 105 + subreq->io_iter = rreq->buffer.iter; 106 + 107 + atomic_inc(&rreq->nr_outstanding); 108 + 109 + spin_lock_bh(&rreq->lock); 110 + list_add_tail(&subreq->rreq_link, &rreq->subrequests); 111 + trace_netfs_sreq(subreq, netfs_sreq_trace_added); 112 + spin_unlock_bh(&rreq->lock); 113 + 114 + netfs_single_cache_prepare_read(rreq, subreq); 115 + switch (subreq->source) { 116 + case NETFS_DOWNLOAD_FROM_SERVER: 117 + netfs_stat(&netfs_n_rh_download); 118 + if (rreq->netfs_ops->prepare_read) { 119 + ret = rreq->netfs_ops->prepare_read(subreq); 120 + if (ret < 0) 121 + goto cancel; 122 + } 123 + 124 + rreq->netfs_ops->issue_read(subreq); 125 + rreq->submitted += subreq->len; 126 + break; 127 + case NETFS_READ_FROM_CACHE: 128 + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 129 + netfs_single_read_cache(rreq, subreq); 130 + rreq->submitted += subreq->len; 131 + ret = 0; 132 + break; 133 + default: 134 + pr_warn("Unexpected single-read source %u\n", subreq->source); 135 + WARN_ON_ONCE(true); 136 + ret = -EIO; 137 + break; 138 + } 139 + 140 + out: 141 + if (atomic_dec_and_test(&rreq->nr_outstanding)) 142 + netfs_rreq_terminated(rreq); 143 + return ret; 144 + cancel: 145 + atomic_dec(&rreq->nr_outstanding); 146 + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); 147 + goto out; 148 + } 149 + 150 + /** 151 + * netfs_read_single - Synchronously read a single blob of pages. 152 + * @inode: The inode to read from. 153 + * @file: The file we're using to read or NULL. 154 + * @iter: The buffer we're reading into. 155 + * 156 + * Fulfil a read request for a single monolithic object by drawing data from 157 + * the cache if possible, or the netfs if not. The buffer may be larger than 158 + * the file content; unused beyond the EOF will be zero-filled. The content 159 + * will be read with a single I/O request (though this may be retried). 160 + * 161 + * The calling netfs must initialise a netfs context contiguous to the vfs 162 + * inode before calling this. 163 + * 164 + * This is usable whether or not caching is enabled. If caching is enabled, 165 + * the data will be stored as a single object into the cache. 166 + */ 167 + ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter) 168 + { 169 + struct netfs_io_request *rreq; 170 + struct netfs_inode *ictx = netfs_inode(inode); 171 + ssize_t ret; 172 + 173 + rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter), 174 + NETFS_READ_SINGLE); 175 + if (IS_ERR(rreq)) 176 + return PTR_ERR(rreq); 177 + 178 + ret = netfs_single_begin_cache_read(rreq, ictx); 179 + if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 180 + goto cleanup_free; 181 + 182 + netfs_stat(&netfs_n_rh_read_single); 183 + trace_netfs_read(rreq, 0, rreq->len, netfs_read_trace_read_single); 184 + 185 + rreq->buffer.iter = *iter; 186 + netfs_single_dispatch_read(rreq); 187 + 188 + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip); 189 + wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, 190 + TASK_UNINTERRUPTIBLE); 191 + 192 + ret = rreq->error; 193 + if (ret == 0) 194 + ret = rreq->transferred; 195 + netfs_put_request(rreq, true, netfs_rreq_trace_put_return); 196 + return ret; 197 + 198 + cleanup_free: 199 + netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 200 + return ret; 201 + } 202 + EXPORT_SYMBOL(netfs_read_single);
+3 -1
fs/netfs/stats.c
··· 12 12 atomic_t netfs_n_rh_dio_read; 13 13 atomic_t netfs_n_rh_readahead; 14 14 atomic_t netfs_n_rh_read_folio; 15 + atomic_t netfs_n_rh_read_single; 15 16 atomic_t netfs_n_rh_rreq; 16 17 atomic_t netfs_n_rh_sreq; 17 18 atomic_t netfs_n_rh_download; ··· 47 46 48 47 int netfs_stats_show(struct seq_file *m, void *v) 49 48 { 50 - seq_printf(m, "Reads : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n", 49 + seq_printf(m, "Reads : DR=%u RA=%u RF=%u RS=%u WB=%u WBZ=%u\n", 51 50 atomic_read(&netfs_n_rh_dio_read), 52 51 atomic_read(&netfs_n_rh_readahead), 53 52 atomic_read(&netfs_n_rh_read_folio), 53 + atomic_read(&netfs_n_rh_read_single), 54 54 atomic_read(&netfs_n_rh_write_begin), 55 55 atomic_read(&netfs_n_rh_write_zskip)); 56 56 seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n",
+3 -3
fs/netfs/write_collect.c
··· 17 17 #define HIT_PENDING 0x01 /* A front op was still pending */ 18 18 #define NEED_REASSESS 0x02 /* Need to loop round and reassess */ 19 19 #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */ 20 - #define BUFFERED 0x08 /* The pagecache needs cleaning up */ 20 + #define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */ 21 21 #define NEED_RETRY 0x10 /* A front op requests retrying */ 22 22 #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */ 23 23 ··· 179 179 if (wreq->origin == NETFS_WRITEBACK || 180 180 wreq->origin == NETFS_WRITETHROUGH || 181 181 wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) 182 - notes = BUFFERED; 182 + notes = NEED_UNLOCK; 183 183 else 184 184 notes = 0; 185 185 ··· 276 276 trace_netfs_collect_state(wreq, wreq->collected_to, notes); 277 277 278 278 /* Unlock any folios that we have now finished with. */ 279 - if (notes & BUFFERED) { 279 + if (notes & NEED_UNLOCK) { 280 280 if (wreq->cleaned_to < wreq->collected_to) 281 281 netfs_writeback_unlock_folios(wreq, &notes); 282 282 } else {
+198 -5
fs/netfs/write_issue.c
··· 94 94 { 95 95 struct netfs_io_request *wreq; 96 96 struct netfs_inode *ictx; 97 - bool is_buffered = (origin == NETFS_WRITEBACK || 98 - origin == NETFS_WRITETHROUGH || 99 - origin == NETFS_PGPRIV2_COPY_TO_CACHE); 97 + bool is_cacheable = (origin == NETFS_WRITEBACK || 98 + origin == NETFS_WRITEBACK_SINGLE || 99 + origin == NETFS_WRITETHROUGH || 100 + origin == NETFS_PGPRIV2_COPY_TO_CACHE); 100 101 101 102 wreq = netfs_alloc_request(mapping, file, start, 0, origin); 102 103 if (IS_ERR(wreq)) ··· 106 105 _enter("R=%x", wreq->debug_id); 107 106 108 107 ictx = netfs_inode(wreq->inode); 109 - if (is_buffered && netfs_is_cache_enabled(ictx)) 108 + if (is_cacheable && netfs_is_cache_enabled(ictx)) 110 109 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); 111 110 if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0) 112 111 goto nomem; ··· 453 452 stream = &wreq->io_streams[s]; 454 453 stream->submit_off = foff; 455 454 stream->submit_len = flen; 456 - if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) || 455 + if (!stream->avail || 456 + (stream->source == NETFS_WRITE_TO_CACHE && streamw) || 457 457 (stream->source == NETFS_UPLOAD_TO_SERVER && 458 458 fgroup == NETFS_FOLIO_COPY_TO_CACHE)) { 459 459 stream->submit_off = UINT_MAX; ··· 733 731 _leave(" = %d", error); 734 732 return error; 735 733 } 734 + 735 + /* 736 + * Write some of a pending folio data back to the server and/or the cache. 737 + */ 738 + static int netfs_write_folio_single(struct netfs_io_request *wreq, 739 + struct folio *folio) 740 + { 741 + struct netfs_io_stream *upload = &wreq->io_streams[0]; 742 + struct netfs_io_stream *cache = &wreq->io_streams[1]; 743 + struct netfs_io_stream *stream; 744 + size_t iter_off = 0; 745 + size_t fsize = folio_size(folio), flen; 746 + loff_t fpos = folio_pos(folio); 747 + bool to_eof = false; 748 + bool no_debug = false; 749 + 750 + _enter(""); 751 + 752 + flen = folio_size(folio); 753 + if (flen > wreq->i_size - fpos) { 754 + flen = wreq->i_size - fpos; 755 + folio_zero_segment(folio, flen, fsize); 756 + to_eof = true; 757 + } else if (flen == wreq->i_size - fpos) { 758 + to_eof = true; 759 + } 760 + 761 + _debug("folio %zx/%zx", flen, fsize); 762 + 763 + if (!upload->avail && !cache->avail) { 764 + trace_netfs_folio(folio, netfs_folio_trace_cancel_store); 765 + return 0; 766 + } 767 + 768 + if (!upload->construct) 769 + trace_netfs_folio(folio, netfs_folio_trace_store); 770 + else 771 + trace_netfs_folio(folio, netfs_folio_trace_store_plus); 772 + 773 + /* Attach the folio to the rolling buffer. */ 774 + folio_get(folio); 775 + rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK); 776 + 777 + /* Move the submission point forward to allow for write-streaming data 778 + * not starting at the front of the page. We don't do write-streaming 779 + * with the cache as the cache requires DIO alignment. 780 + * 781 + * Also skip uploading for data that's been read and just needs copying 782 + * to the cache. 783 + */ 784 + for (int s = 0; s < NR_IO_STREAMS; s++) { 785 + stream = &wreq->io_streams[s]; 786 + stream->submit_off = 0; 787 + stream->submit_len = flen; 788 + if (!stream->avail) { 789 + stream->submit_off = UINT_MAX; 790 + stream->submit_len = 0; 791 + } 792 + } 793 + 794 + /* Attach the folio to one or more subrequests. For a big folio, we 795 + * could end up with thousands of subrequests if the wsize is small - 796 + * but we might need to wait during the creation of subrequests for 797 + * network resources (eg. SMB credits). 798 + */ 799 + for (;;) { 800 + ssize_t part; 801 + size_t lowest_off = ULONG_MAX; 802 + int choose_s = -1; 803 + 804 + /* Always add to the lowest-submitted stream first. */ 805 + for (int s = 0; s < NR_IO_STREAMS; s++) { 806 + stream = &wreq->io_streams[s]; 807 + if (stream->submit_len > 0 && 808 + stream->submit_off < lowest_off) { 809 + lowest_off = stream->submit_off; 810 + choose_s = s; 811 + } 812 + } 813 + 814 + if (choose_s < 0) 815 + break; 816 + stream = &wreq->io_streams[choose_s]; 817 + 818 + /* Advance the iterator(s). */ 819 + if (stream->submit_off > iter_off) { 820 + rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); 821 + iter_off = stream->submit_off; 822 + } 823 + 824 + atomic64_set(&wreq->issued_to, fpos + stream->submit_off); 825 + stream->submit_extendable_to = fsize - stream->submit_off; 826 + part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, 827 + stream->submit_len, to_eof); 828 + stream->submit_off += part; 829 + if (part > stream->submit_len) 830 + stream->submit_len = 0; 831 + else 832 + stream->submit_len -= part; 833 + if (part > 0) 834 + no_debug = true; 835 + } 836 + 837 + wreq->buffer.iter.iov_offset = 0; 838 + if (fsize > iter_off) 839 + rolling_buffer_advance(&wreq->buffer, fsize - iter_off); 840 + atomic64_set(&wreq->issued_to, fpos + fsize); 841 + 842 + if (!no_debug) 843 + kdebug("R=%x: No submit", wreq->debug_id); 844 + _leave(" = 0"); 845 + return 0; 846 + } 847 + 848 + /** 849 + * netfs_writeback_single - Write back a monolithic payload 850 + * @mapping: The mapping to write from 851 + * @wbc: Hints from the VM 852 + * @iter: Data to write, must be ITER_FOLIOQ. 853 + * 854 + * Write a monolithic, non-pagecache object back to the server and/or 855 + * the cache. 856 + */ 857 + int netfs_writeback_single(struct address_space *mapping, 858 + struct writeback_control *wbc, 859 + struct iov_iter *iter) 860 + { 861 + struct netfs_io_request *wreq; 862 + struct netfs_inode *ictx = netfs_inode(mapping->host); 863 + struct folio_queue *fq; 864 + size_t size = iov_iter_count(iter); 865 + int ret; 866 + 867 + if (WARN_ON_ONCE(!iov_iter_is_folioq(iter))) 868 + return -EIO; 869 + 870 + if (!mutex_trylock(&ictx->wb_lock)) { 871 + if (wbc->sync_mode == WB_SYNC_NONE) { 872 + netfs_stat(&netfs_n_wb_lock_skip); 873 + return 0; 874 + } 875 + netfs_stat(&netfs_n_wb_lock_wait); 876 + mutex_lock(&ictx->wb_lock); 877 + } 878 + 879 + wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE); 880 + if (IS_ERR(wreq)) { 881 + ret = PTR_ERR(wreq); 882 + goto couldnt_start; 883 + } 884 + 885 + trace_netfs_write(wreq, netfs_write_trace_writeback); 886 + netfs_stat(&netfs_n_wh_writepages); 887 + 888 + if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) 889 + wreq->netfs_ops->begin_writeback(wreq); 890 + 891 + for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) { 892 + for (int slot = 0; slot < folioq_count(fq); slot++) { 893 + struct folio *folio = folioq_folio(fq, slot); 894 + size_t part = umin(folioq_folio_size(fq, slot), size); 895 + 896 + _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); 897 + 898 + ret = netfs_write_folio_single(wreq, folio); 899 + if (ret < 0) 900 + goto stop; 901 + size -= part; 902 + if (size <= 0) 903 + goto stop; 904 + } 905 + } 906 + 907 + stop: 908 + for (int s = 0; s < NR_IO_STREAMS; s++) 909 + netfs_issue_write(wreq, &wreq->io_streams[s]); 910 + smp_wmb(); /* Write lists before ALL_QUEUED. */ 911 + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 912 + 913 + mutex_unlock(&ictx->wb_lock); 914 + 915 + netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 916 + _leave(" = %d", ret); 917 + return ret; 918 + 919 + couldnt_start: 920 + mutex_unlock(&ictx->wb_lock); 921 + _leave(" = %d", ret); 922 + return ret; 923 + } 924 + EXPORT_SYMBOL(netfs_writeback_single);
+10
include/linux/netfs.h
··· 73 73 #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ 74 74 #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ 75 75 #define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */ 76 + #define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */ 76 77 }; 77 78 78 79 /* ··· 211 210 NETFS_READAHEAD, /* This read was triggered by readahead */ 212 211 NETFS_READPAGE, /* This read is a synchronous read */ 213 212 NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */ 213 + NETFS_READ_SINGLE, /* This read should be treated as a single object */ 214 214 NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ 215 215 NETFS_DIO_READ, /* This is a direct I/O read */ 216 216 NETFS_WRITEBACK, /* This write was triggered by writepages */ 217 + NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */ 217 218 NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ 218 219 NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ 219 220 NETFS_DIO_WRITE, /* This is a direct I/O write */ ··· 410 407 ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter, 411 408 struct netfs_group *netfs_group); 412 409 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); 410 + 411 + /* Single, monolithic object read/write API. */ 412 + void netfs_single_mark_inode_dirty(struct inode *inode); 413 + ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter); 414 + int netfs_writeback_single(struct address_space *mapping, 415 + struct writeback_control *wbc, 416 + struct iov_iter *iter); 413 417 414 418 /* Address operations API */ 415 419 struct readahead_control;
+4
include/trace/events/netfs.h
··· 21 21 EM(netfs_read_trace_readahead, "READAHEAD") \ 22 22 EM(netfs_read_trace_readpage, "READPAGE ") \ 23 23 EM(netfs_read_trace_read_gaps, "READ-GAPS") \ 24 + EM(netfs_read_trace_read_single, "READ-SNGL") \ 24 25 EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \ 25 26 E_(netfs_read_trace_write_begin, "WRITEBEGN") 26 27 ··· 36 35 EM(NETFS_READAHEAD, "RA") \ 37 36 EM(NETFS_READPAGE, "RP") \ 38 37 EM(NETFS_READ_GAPS, "RG") \ 38 + EM(NETFS_READ_SINGLE, "R1") \ 39 39 EM(NETFS_READ_FOR_WRITE, "RW") \ 40 40 EM(NETFS_DIO_READ, "DR") \ 41 41 EM(NETFS_WRITEBACK, "WB") \ 42 + EM(NETFS_WRITEBACK_SINGLE, "W1") \ 42 43 EM(NETFS_WRITETHROUGH, "WT") \ 43 44 EM(NETFS_UNBUFFERED_WRITE, "UW") \ 44 45 EM(NETFS_DIO_WRITE, "DW") \ ··· 50 47 EM(netfs_rreq_trace_assess, "ASSESS ") \ 51 48 EM(netfs_rreq_trace_copy, "COPY ") \ 52 49 EM(netfs_rreq_trace_collect, "COLLECT") \ 50 + EM(netfs_rreq_trace_dirty, "DIRTY ") \ 53 51 EM(netfs_rreq_trace_done, "DONE ") \ 54 52 EM(netfs_rreq_trace_free, "FREE ") \ 55 53 EM(netfs_rreq_trace_redirty, "REDIRTY") \