Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "netfs: Miscellaneous fixes"

David Howells <dhowells@redhat.com> says:

Here are some miscellaneous fixes and changes for netfslib, if you could
pull them:

(1) Fix an oops in write-retry due to mis-resetting the I/O iterator.

(2) Fix the recording of transferred bytes for short DIO reads.

(3) Fix a request's work item to not require a reference, thereby avoiding
the need to get rid of it in BH/IRQ context.

(4) Fix waiting and waking to be consistent about the waitqueue used.

* patches from https://lore.kernel.org/20250519090707.2848510-1-dhowells@redhat.com:
netfs: Fix wait/wake to be consistent about the waitqueue used
netfs: Fix the request's work item to not require a ref
netfs: Fix setting of transferred bytes with short DIO reads
netfs: Fix oops in write-retry from mis-resetting the subreq iterator

Link: https://lore.kernel.org/20250519090707.2848510-1-dhowells@redhat.com
Signed-off-by: Christian Brauner <brauner@kernel.org>

+425 -379
+1 -1
fs/9p/vfs_addr.c
··· 59 59 len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err); 60 60 if (len > 0) 61 61 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 62 - netfs_write_subrequest_terminated(subreq, len ?: err, false); 62 + netfs_write_subrequest_terminated(subreq, len ?: err); 63 63 } 64 64 65 65 /**
+4 -4
fs/afs/write.c
··· 120 120 121 121 #if 0 // Error injection 122 122 if (subreq->debug_index == 3) 123 - return netfs_write_subrequest_terminated(subreq, -ENOANO, false); 123 + return netfs_write_subrequest_terminated(subreq, -ENOANO); 124 124 125 125 if (!subreq->retry_count) { 126 126 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 127 - return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); 127 + return netfs_write_subrequest_terminated(subreq, -EAGAIN); 128 128 } 129 129 #endif 130 130 131 131 op = afs_alloc_operation(wreq->netfs_priv, vnode->volume); 132 132 if (IS_ERR(op)) 133 - return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); 133 + return netfs_write_subrequest_terminated(subreq, -EAGAIN); 134 134 135 135 afs_op_set_vnode(op, 0, vnode); 136 136 op->file[0].dv_delta = 1; ··· 166 166 break; 167 167 } 168 168 169 - netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false); 169 + netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len); 170 170 } 171 171 172 172 void afs_issue_write(struct netfs_io_subrequest *subreq)
+8 -8
fs/cachefiles/io.c
··· 63 63 ret = -ESTALE; 64 64 } 65 65 66 - ki->term_func(ki->term_func_priv, ret, ki->was_async); 66 + ki->term_func(ki->term_func_priv, ret); 67 67 } 68 68 69 69 cachefiles_put_kiocb(ki); ··· 188 188 189 189 presubmission_error: 190 190 if (term_func) 191 - term_func(term_func_priv, ret < 0 ? ret : skipped, false); 191 + term_func(term_func_priv, ret < 0 ? ret : skipped); 192 192 return ret; 193 193 } 194 194 ··· 271 271 atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing); 272 272 set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags); 273 273 if (ki->term_func) 274 - ki->term_func(ki->term_func_priv, ret, ki->was_async); 274 + ki->term_func(ki->term_func_priv, ret); 275 275 cachefiles_put_kiocb(ki); 276 276 } 277 277 ··· 301 301 ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL); 302 302 if (!ki) { 303 303 if (term_func) 304 - term_func(term_func_priv, -ENOMEM, false); 304 + term_func(term_func_priv, -ENOMEM); 305 305 return -ENOMEM; 306 306 } 307 307 ··· 366 366 { 367 367 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) { 368 368 if (term_func) 369 - term_func(term_func_priv, -ENOBUFS, false); 369 + term_func(term_func_priv, -ENOBUFS); 370 370 trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite); 371 371 return -ENOBUFS; 372 372 } ··· 665 665 pre = CACHEFILES_DIO_BLOCK_SIZE - off; 666 666 if (pre >= len) { 667 667 fscache_count_dio_misfit(); 668 - netfs_write_subrequest_terminated(subreq, len, false); 668 + netfs_write_subrequest_terminated(subreq, len); 669 669 return; 670 670 } 671 671 subreq->transferred += pre; ··· 691 691 len -= post; 692 692 if (len == 0) { 693 693 fscache_count_dio_misfit(); 694 - netfs_write_subrequest_terminated(subreq, post, false); 694 + netfs_write_subrequest_terminated(subreq, post); 695 695 return; 696 696 } 697 697 iov_iter_truncate(&subreq->io_iter, len); ··· 703 703 &start, &len, len, true); 704 704 cachefiles_end_secure(cache, saved_cred); 705 705 if (ret < 0) { 706 - netfs_write_subrequest_terminated(subreq, ret, false); 706 + netfs_write_subrequest_terminated(subreq, ret); 707 707 return; 708 708 } 709 709
+1 -1
fs/ceph/addr.c
··· 539 539 folio_start_private_2(page_folio(page)); /* [DEPRECATED] */ 540 540 } 541 541 542 - static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) 542 + static void ceph_fscache_write_terminated(void *priv, ssize_t error) 543 543 { 544 544 struct inode *inode = priv; 545 545
+2 -4
fs/erofs/fscache.c
··· 102 102 erofs_fscache_req_put(req); 103 103 } 104 104 105 - static void erofs_fscache_req_end_io(void *priv, 106 - ssize_t transferred_or_error, bool was_async) 105 + static void erofs_fscache_req_end_io(void *priv, ssize_t transferred_or_error) 107 106 { 108 107 struct erofs_fscache_io *io = priv; 109 108 struct erofs_fscache_rq *req = io->private; ··· 179 180 struct bio_vec bvecs[BIO_MAX_VECS]; 180 181 }; 181 182 182 - static void erofs_fscache_bio_endio(void *priv, 183 - ssize_t transferred_or_error, bool was_async) 183 + static void erofs_fscache_bio_endio(void *priv, ssize_t transferred_or_error) 184 184 { 185 185 struct erofs_fscache_bio *io = priv; 186 186
+15 -17
fs/netfs/buffered_read.c
··· 264 264 if (ret < 0) { 265 265 subreq->error = ret; 266 266 /* Not queued - release both refs. */ 267 - netfs_put_subrequest(subreq, false, 267 + netfs_put_subrequest(subreq, 268 268 netfs_sreq_trace_put_cancel); 269 - netfs_put_subrequest(subreq, false, 269 + netfs_put_subrequest(subreq, 270 270 netfs_sreq_trace_put_cancel); 271 271 break; 272 272 } ··· 299 299 subreq->error = ret; 300 300 trace_netfs_sreq(subreq, netfs_sreq_trace_cancel); 301 301 /* Not queued - release both refs. */ 302 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); 303 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); 302 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel); 303 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel); 304 304 break; 305 305 } 306 306 size -= slice; ··· 314 314 if (unlikely(size > 0)) { 315 315 smp_wmb(); /* Write lists before ALL_QUEUED. */ 316 316 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); 317 - netfs_wake_read_collector(rreq); 317 + netfs_wake_collector(rreq); 318 318 } 319 319 320 320 /* Defer error return as we may need to wait for outstanding I/O. */ ··· 366 366 goto cleanup_free; 367 367 netfs_read_to_pagecache(rreq, ractl); 368 368 369 - netfs_put_request(rreq, true, netfs_rreq_trace_put_return); 370 - return; 369 + return netfs_put_request(rreq, netfs_rreq_trace_put_return); 371 370 372 371 cleanup_free: 373 - netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 374 - return; 372 + return netfs_put_request(rreq, netfs_rreq_trace_put_failed); 375 373 } 376 374 EXPORT_SYMBOL(netfs_readahead); 377 375 ··· 468 470 folio_mark_uptodate(folio); 469 471 } 470 472 folio_unlock(folio); 471 - netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 473 + netfs_put_request(rreq, netfs_rreq_trace_put_return); 472 474 return ret < 0 ? ret : 0; 473 475 474 476 discard: 475 - netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); 477 + netfs_put_request(rreq, netfs_rreq_trace_put_discard); 476 478 alloc_error: 477 479 folio_unlock(folio); 478 480 return ret; ··· 528 530 529 531 netfs_read_to_pagecache(rreq, NULL); 530 532 ret = netfs_wait_for_read(rreq); 531 - netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 533 + netfs_put_request(rreq, netfs_rreq_trace_put_return); 532 534 return ret < 0 ? ret : 0; 533 535 534 536 discard: 535 - netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); 537 + netfs_put_request(rreq, netfs_rreq_trace_put_discard); 536 538 alloc_error: 537 539 folio_unlock(folio); 538 540 return ret; ··· 687 689 ret = netfs_wait_for_read(rreq); 688 690 if (ret < 0) 689 691 goto error; 690 - netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 692 + netfs_put_request(rreq, netfs_rreq_trace_put_return); 691 693 692 694 have_folio: 693 695 ret = folio_wait_private_2_killable(folio); ··· 699 701 return 0; 700 702 701 703 error_put: 702 - netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 704 + netfs_put_request(rreq, netfs_rreq_trace_put_failed); 703 705 error: 704 706 if (folio) { 705 707 folio_unlock(folio); ··· 750 752 751 753 netfs_read_to_pagecache(rreq, NULL); 752 754 ret = netfs_wait_for_read(rreq); 753 - netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 755 + netfs_put_request(rreq, netfs_rreq_trace_put_return); 754 756 return ret < 0 ? ret : 0; 755 757 756 758 error_put: 757 - netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); 759 + netfs_put_request(rreq, netfs_rreq_trace_put_discard); 758 760 error: 759 761 _leave(" = %d", ret); 760 762 return ret;
+1 -1
fs/netfs/buffered_write.c
··· 385 385 wbc_detach_inode(&wbc); 386 386 if (ret2 == -EIOCBQUEUED) 387 387 return ret2; 388 - if (ret == 0) 388 + if (ret == 0 && ret2 < 0) 389 389 ret = ret2; 390 390 } 391 391
+5 -5
fs/netfs/direct_read.c
··· 85 85 if (rreq->netfs_ops->prepare_read) { 86 86 ret = rreq->netfs_ops->prepare_read(subreq); 87 87 if (ret < 0) { 88 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); 88 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel); 89 89 break; 90 90 } 91 91 } ··· 103 103 rreq->netfs_ops->issue_read(subreq); 104 104 105 105 if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) 106 - netfs_wait_for_pause(rreq); 106 + netfs_wait_for_paused_read(rreq); 107 107 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) 108 108 break; 109 109 cond_resched(); ··· 112 112 if (unlikely(size > 0)) { 113 113 smp_wmb(); /* Write lists before ALL_QUEUED. */ 114 114 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); 115 - netfs_wake_read_collector(rreq); 115 + netfs_wake_collector(rreq); 116 116 } 117 117 118 118 return ret; ··· 141 141 ret = netfs_dispatch_unbuffered_reads(rreq); 142 142 143 143 if (!rreq->submitted) { 144 - netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit); 144 + netfs_put_request(rreq, netfs_rreq_trace_put_no_submit); 145 145 inode_dio_end(rreq->inode); 146 146 ret = 0; 147 147 goto out; ··· 233 233 } 234 234 235 235 out: 236 - netfs_put_request(rreq, false, netfs_rreq_trace_put_return); 236 + netfs_put_request(rreq, netfs_rreq_trace_put_return); 237 237 if (ret > 0) 238 238 orig_count -= ret; 239 239 return ret;
+5 -7
fs/netfs/direct_write.c
··· 87 87 } 88 88 89 89 __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags); 90 + if (async) 91 + __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); 90 92 91 93 /* Copy the data into the bounce buffer and encrypt it. */ 92 94 // TODO ··· 107 105 108 106 if (!async) { 109 107 trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip); 110 - wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, 111 - TASK_UNINTERRUPTIBLE); 112 - ret = wreq->error; 113 - if (ret == 0) { 114 - ret = wreq->transferred; 108 + ret = netfs_wait_for_write(wreq); 109 + if (ret > 0) 115 110 iocb->ki_pos += ret; 116 - } 117 111 } else { 118 112 ret = -EIOCBQUEUED; 119 113 } 120 114 121 115 out: 122 - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 116 + netfs_put_request(wreq, netfs_rreq_trace_put_return); 123 117 return ret; 124 118 } 125 119 EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
+4 -6
fs/netfs/fscache_io.c
··· 192 192 /* 193 193 * Deal with the completion of writing the data to the cache. 194 194 */ 195 - static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, 196 - bool was_async) 195 + static void fscache_wreq_done(void *priv, ssize_t transferred_or_error) 197 196 { 198 197 struct fscache_write_request *wreq = priv; 199 198 ··· 201 202 wreq->set_bits); 202 203 203 204 if (wreq->term_func) 204 - wreq->term_func(wreq->term_func_priv, transferred_or_error, 205 - was_async); 205 + wreq->term_func(wreq->term_func_priv, transferred_or_error); 206 206 fscache_end_operation(&wreq->cache_resources); 207 207 kfree(wreq); 208 208 } ··· 253 255 return; 254 256 255 257 abandon_end: 256 - return fscache_wreq_done(wreq, ret, false); 258 + return fscache_wreq_done(wreq, ret); 257 259 abandon_free: 258 260 kfree(wreq); 259 261 abandon: 260 262 if (using_pgpriv2) 261 263 fscache_clear_page_bits(mapping, start, len, cond); 262 264 if (term_func) 263 - term_func(term_func_priv, ret, false); 265 + term_func(term_func_priv, ret); 264 266 } 265 267 EXPORT_SYMBOL(__fscache_write_to_cache); 266 268
+31 -11
fs/netfs/internal.h
··· 23 23 /* 24 24 * buffered_read.c 25 25 */ 26 - void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async); 26 + void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 27 27 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 28 28 size_t offset, size_t len); 29 29 ··· 62 62 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, 63 63 enum netfs_folioq_trace trace); 64 64 void netfs_reset_iter(struct netfs_io_subrequest *subreq); 65 + void netfs_wake_collector(struct netfs_io_request *rreq); 66 + void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq); 67 + void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, 68 + struct netfs_io_stream *stream); 69 + ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); 70 + ssize_t netfs_wait_for_write(struct netfs_io_request *rreq); 71 + void netfs_wait_for_paused_read(struct netfs_io_request *rreq); 72 + void netfs_wait_for_paused_write(struct netfs_io_request *rreq); 65 73 66 74 /* 67 75 * objects.c ··· 79 71 loff_t start, size_t len, 80 72 enum netfs_io_origin origin); 81 73 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 82 - void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); 83 - void netfs_put_request(struct netfs_io_request *rreq, bool was_async, 84 - enum netfs_rreq_ref_trace what); 74 + void netfs_clear_subrequests(struct netfs_io_request *rreq); 75 + void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 85 76 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); 86 77 87 78 static inline void netfs_see_request(struct netfs_io_request *rreq, ··· 99 92 /* 100 93 * read_collect.c 101 94 */ 95 + bool netfs_read_collection(struct netfs_io_request *rreq); 102 96 void netfs_read_collection_worker(struct work_struct *work); 103 - void netfs_wake_read_collector(struct netfs_io_request *rreq); 104 - void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async); 105 - ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); 106 - void netfs_wait_for_pause(struct netfs_io_request *rreq); 97 + void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 107 98 108 99 /* 109 100 * read_pgpriv2.c ··· 181 176 * write_collect.c 182 177 */ 183 178 int netfs_folio_written_back(struct folio *folio); 179 + bool netfs_write_collection(struct netfs_io_request *wreq); 184 180 void netfs_write_collection_worker(struct work_struct *work); 185 - void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async); 186 181 187 182 /* 188 183 * write_issue.c ··· 203 198 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 204 199 struct folio *folio, size_t copied, bool to_page_end, 205 200 struct folio **writethrough_cache); 206 - int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 207 - struct folio *writethrough_cache); 201 + ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 202 + struct folio *writethrough_cache); 208 203 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); 209 204 210 205 /* ··· 257 252 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 258 253 refcount_sub_and_test(nr, &netfs_group->ref)) 259 254 netfs_group->free(netfs_group); 255 + } 256 + 257 + /* 258 + * Clear and wake up a NETFS_RREQ_* flag bit on a request. 259 + */ 260 + static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq, 261 + unsigned int rreq_flag, 262 + enum netfs_rreq_trace trace) 263 + { 264 + if (test_bit(rreq_flag, &rreq->flags)) { 265 + trace_netfs_rreq(rreq, trace); 266 + clear_bit_unlock(rreq_flag, &rreq->flags); 267 + smp_mb__after_atomic(); /* Set flag before task state */ 268 + wake_up(&rreq->waitq); 269 + } 260 270 } 261 271 262 272 /*
+218
fs/netfs/misc.c
··· 313 313 return true; 314 314 } 315 315 EXPORT_SYMBOL(netfs_release_folio); 316 + 317 + /* 318 + * Wake the collection work item. 319 + */ 320 + void netfs_wake_collector(struct netfs_io_request *rreq) 321 + { 322 + if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) && 323 + !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) { 324 + queue_work(system_unbound_wq, &rreq->work); 325 + } else { 326 + trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue); 327 + wake_up(&rreq->waitq); 328 + } 329 + } 330 + 331 + /* 332 + * Mark a subrequest as no longer being in progress and, if need be, wake the 333 + * collector. 334 + */ 335 + void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq) 336 + { 337 + struct netfs_io_request *rreq = subreq->rreq; 338 + struct netfs_io_stream *stream = &rreq->io_streams[subreq->stream_nr]; 339 + 340 + clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 341 + smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */ 342 + 343 + /* If we are at the head of the queue, wake up the collector. */ 344 + if (list_is_first(&subreq->rreq_link, &stream->subrequests) || 345 + test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) 346 + netfs_wake_collector(rreq); 347 + } 348 + 349 + /* 350 + * Wait for all outstanding I/O in a stream to quiesce. 351 + */ 352 + void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, 353 + struct netfs_io_stream *stream) 354 + { 355 + struct netfs_io_subrequest *subreq; 356 + DEFINE_WAIT(myself); 357 + 358 + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 359 + if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags)) 360 + continue; 361 + 362 + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); 363 + for (;;) { 364 + prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); 365 + 366 + if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags)) 367 + break; 368 + 369 + trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for); 370 + schedule(); 371 + trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); 372 + } 373 + } 374 + 375 + finish_wait(&rreq->waitq, &myself); 376 + } 377 + 378 + /* 379 + * Perform collection in app thread if not offloaded to workqueue. 380 + */ 381 + static int netfs_collect_in_app(struct netfs_io_request *rreq, 382 + bool (*collector)(struct netfs_io_request *rreq)) 383 + { 384 + bool need_collect = false, inactive = true; 385 + 386 + for (int i = 0; i < NR_IO_STREAMS; i++) { 387 + struct netfs_io_subrequest *subreq; 388 + struct netfs_io_stream *stream = &rreq->io_streams[i]; 389 + 390 + if (!stream->active) 391 + continue; 392 + inactive = false; 393 + trace_netfs_collect_stream(rreq, stream); 394 + subreq = list_first_entry_or_null(&stream->subrequests, 395 + struct netfs_io_subrequest, 396 + rreq_link); 397 + if (subreq && 398 + (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || 399 + test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { 400 + need_collect = true; 401 + break; 402 + } 403 + } 404 + 405 + if (!need_collect && !inactive) 406 + return 0; /* Sleep */ 407 + 408 + __set_current_state(TASK_RUNNING); 409 + if (collector(rreq)) { 410 + /* Drop the ref from the NETFS_RREQ_IN_PROGRESS flag. */ 411 + netfs_put_request(rreq, netfs_rreq_trace_put_work_ip); 412 + return 1; /* Done */ 413 + } 414 + 415 + if (inactive) { 416 + WARN(true, "Failed to collect inactive req R=%08x\n", 417 + rreq->debug_id); 418 + cond_resched(); 419 + } 420 + return 2; /* Again */ 421 + } 422 + 423 + /* 424 + * Wait for a request to complete, successfully or otherwise. 425 + */ 426 + static ssize_t netfs_wait_for_request(struct netfs_io_request *rreq, 427 + bool (*collector)(struct netfs_io_request *rreq)) 428 + { 429 + DEFINE_WAIT(myself); 430 + ssize_t ret; 431 + 432 + for (;;) { 433 + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); 434 + prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); 435 + 436 + if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { 437 + switch (netfs_collect_in_app(rreq, collector)) { 438 + case 0: 439 + break; 440 + case 1: 441 + goto all_collected; 442 + case 2: 443 + continue; 444 + } 445 + } 446 + 447 + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) 448 + break; 449 + 450 + schedule(); 451 + trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); 452 + } 453 + 454 + all_collected: 455 + finish_wait(&rreq->waitq, &myself); 456 + 457 + ret = rreq->error; 458 + if (ret == 0) { 459 + ret = rreq->transferred; 460 + switch (rreq->origin) { 461 + case NETFS_DIO_READ: 462 + case NETFS_DIO_WRITE: 463 + case NETFS_READ_SINGLE: 464 + case NETFS_UNBUFFERED_WRITE: 465 + break; 466 + default: 467 + if (rreq->submitted < rreq->len) { 468 + trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); 469 + ret = -EIO; 470 + } 471 + break; 472 + } 473 + } 474 + 475 + return ret; 476 + } 477 + 478 + ssize_t netfs_wait_for_read(struct netfs_io_request *rreq) 479 + { 480 + return netfs_wait_for_request(rreq, netfs_read_collection); 481 + } 482 + 483 + ssize_t netfs_wait_for_write(struct netfs_io_request *rreq) 484 + { 485 + return netfs_wait_for_request(rreq, netfs_write_collection); 486 + } 487 + 488 + /* 489 + * Wait for a paused operation to unpause or complete in some manner. 490 + */ 491 + static void netfs_wait_for_pause(struct netfs_io_request *rreq, 492 + bool (*collector)(struct netfs_io_request *rreq)) 493 + { 494 + DEFINE_WAIT(myself); 495 + 496 + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause); 497 + 498 + for (;;) { 499 + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); 500 + prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); 501 + 502 + if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { 503 + switch (netfs_collect_in_app(rreq, collector)) { 504 + case 0: 505 + break; 506 + case 1: 507 + goto all_collected; 508 + case 2: 509 + continue; 510 + } 511 + } 512 + 513 + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) || 514 + !test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) 515 + break; 516 + 517 + schedule(); 518 + trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); 519 + } 520 + 521 + all_collected: 522 + finish_wait(&rreq->waitq, &myself); 523 + } 524 + 525 + void netfs_wait_for_paused_read(struct netfs_io_request *rreq) 526 + { 527 + return netfs_wait_for_pause(rreq, netfs_read_collection); 528 + } 529 + 530 + void netfs_wait_for_paused_write(struct netfs_io_request *rreq) 531 + { 532 + return netfs_wait_for_pause(rreq, netfs_write_collection); 533 + }
+22 -23
fs/netfs/objects.c
··· 10 10 #include <linux/delay.h> 11 11 #include "internal.h" 12 12 13 + static void netfs_free_request(struct work_struct *work); 14 + 13 15 /* 14 16 * Allocate an I/O request and initialise it. 15 17 */ ··· 36 34 } 37 35 38 36 memset(rreq, 0, kmem_cache_size(cache)); 37 + INIT_WORK(&rreq->cleanup_work, netfs_free_request); 39 38 rreq->start = start; 40 39 rreq->len = len; 41 40 rreq->origin = origin; ··· 52 49 INIT_LIST_HEAD(&rreq->io_streams[0].subrequests); 53 50 INIT_LIST_HEAD(&rreq->io_streams[1].subrequests); 54 51 init_waitqueue_head(&rreq->waitq); 55 - refcount_set(&rreq->ref, 1); 52 + refcount_set(&rreq->ref, 2); 56 53 57 54 if (origin == NETFS_READAHEAD || 58 55 origin == NETFS_READPAGE || ··· 76 73 } 77 74 78 75 atomic_inc(&ctx->io_count); 79 - trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new); 76 + trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new); 80 77 netfs_proc_add_rreq(rreq); 81 78 netfs_stat(&netfs_n_rh_rreq); 82 79 return rreq; ··· 90 87 trace_netfs_rreq_ref(rreq->debug_id, r + 1, what); 91 88 } 92 89 93 - void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) 90 + void netfs_clear_subrequests(struct netfs_io_request *rreq) 94 91 { 95 92 struct netfs_io_subrequest *subreq; 96 93 struct netfs_io_stream *stream; ··· 102 99 subreq = list_first_entry(&stream->subrequests, 103 100 struct netfs_io_subrequest, rreq_link); 104 101 list_del(&subreq->rreq_link); 105 - netfs_put_subrequest(subreq, was_async, 106 - netfs_sreq_trace_put_clear); 102 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear); 107 103 } 108 104 } 109 105 } ··· 118 116 static void netfs_free_request(struct work_struct *work) 119 117 { 120 118 struct netfs_io_request *rreq = 121 - container_of(work, struct netfs_io_request, work); 119 + container_of(work, struct netfs_io_request, cleanup_work); 122 120 struct netfs_inode *ictx = netfs_inode(rreq->inode); 123 121 unsigned int i; 124 122 125 123 trace_netfs_rreq(rreq, netfs_rreq_trace_free); 124 + 125 + /* Cancel/flush the result collection worker. That does not carry a 126 + * ref of its own, so we must wait for it somewhere. 127 + */ 128 + cancel_work_sync(&rreq->work); 129 + 126 130 netfs_proc_del_rreq(rreq); 127 - netfs_clear_subrequests(rreq, false); 131 + netfs_clear_subrequests(rreq); 128 132 if (rreq->netfs_ops->free_request) 129 133 rreq->netfs_ops->free_request(rreq); 130 134 if (rreq->cache_resources.ops) ··· 151 143 call_rcu(&rreq->rcu, netfs_free_request_rcu); 152 144 } 153 145 154 - void netfs_put_request(struct netfs_io_request *rreq, bool was_async, 155 - enum netfs_rreq_ref_trace what) 146 + void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) 156 147 { 157 148 unsigned int debug_id; 158 149 bool dead; ··· 161 154 debug_id = rreq->debug_id; 162 155 dead = __refcount_dec_and_test(&rreq->ref, &r); 163 156 trace_netfs_rreq_ref(debug_id, r - 1, what); 164 - if (dead) { 165 - if (was_async) { 166 - rreq->work.func = netfs_free_request; 167 - if (!queue_work(system_unbound_wq, &rreq->work)) 168 - WARN_ON(1); 169 - } else { 170 - netfs_free_request(&rreq->work); 171 - } 172 - } 157 + if (dead) 158 + WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work)); 173 159 } 174 160 } 175 161 ··· 204 204 what); 205 205 } 206 206 207 - static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, 208 - bool was_async) 207 + static void netfs_free_subrequest(struct netfs_io_subrequest *subreq) 209 208 { 210 209 struct netfs_io_request *rreq = subreq->rreq; 211 210 ··· 213 214 rreq->netfs_ops->free_subrequest(subreq); 214 215 mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool); 215 216 netfs_stat_d(&netfs_n_rh_sreq); 216 - netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); 217 + netfs_put_request(rreq, netfs_rreq_trace_put_subreq); 217 218 } 218 219 219 - void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, 220 + void netfs_put_subrequest(struct netfs_io_subrequest *subreq, 220 221 enum netfs_sreq_ref_trace what) 221 222 { 222 223 unsigned int debug_index = subreq->debug_index; ··· 227 228 dead = __refcount_dec_and_test(&subreq->ref, &r); 228 229 trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what); 229 230 if (dead) 230 - netfs_free_subrequest(subreq, was_async); 231 + netfs_free_subrequest(subreq); 231 232 }
+24 -154
fs/netfs/read_collect.c
··· 278 278 stream->need_retry = true; 279 279 notes |= NEED_RETRY | MADE_PROGRESS; 280 280 break; 281 + } else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) { 282 + notes |= MADE_PROGRESS; 281 283 } else { 282 284 if (!stream->failed) 283 - stream->transferred = stream->collected_to - rreq->start; 285 + stream->transferred += transferred; 286 + if (front->transferred < front->len) 287 + set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags); 284 288 notes |= MADE_PROGRESS; 285 289 } 286 290 ··· 299 295 struct netfs_io_subrequest, rreq_link); 300 296 stream->front = front; 301 297 spin_unlock(&rreq->lock); 302 - netfs_put_subrequest(remove, false, 298 + netfs_put_subrequest(remove, 303 299 notes & ABANDON_SREQ ? 304 300 netfs_sreq_trace_put_abandon : 305 301 netfs_sreq_trace_put_done); ··· 313 309 314 310 if (notes & NEED_RETRY) 315 311 goto need_retry; 316 - if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) { 317 - trace_netfs_rreq(rreq, netfs_rreq_trace_unpause); 318 - clear_bit_unlock(NETFS_RREQ_PAUSE, &rreq->flags); 319 - smp_mb__after_atomic(); /* Set PAUSE before task state */ 320 - wake_up(&rreq->waitq); 321 - } 322 - 323 312 if (notes & MADE_PROGRESS) { 313 + netfs_wake_rreq_flag(rreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause); 324 314 //cond_resched(); 325 315 goto reassess; 326 316 } ··· 338 340 */ 339 341 static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) 340 342 { 341 - struct netfs_io_subrequest *subreq; 342 - struct netfs_io_stream *stream = &rreq->io_streams[0]; 343 343 unsigned int i; 344 - 345 - /* Collect unbuffered reads and direct reads, adding up the transfer 346 - * sizes until we find the first short or failed subrequest. 347 - */ 348 - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 349 - rreq->transferred += subreq->transferred; 350 - 351 - if (subreq->transferred < subreq->len || 352 - test_bit(NETFS_SREQ_FAILED, &subreq->flags)) { 353 - rreq->error = subreq->error; 354 - break; 355 - } 356 - } 357 344 358 345 if (rreq->origin == NETFS_DIO_READ) { 359 346 for (i = 0; i < rreq->direct_bv_count; i++) { ··· 391 408 * Note that we're in normal kernel thread context at this point, possibly 392 409 * running on a workqueue. 393 410 */ 394 - static void netfs_read_collection(struct netfs_io_request *rreq) 411 + bool netfs_read_collection(struct netfs_io_request *rreq) 395 412 { 396 413 struct netfs_io_stream *stream = &rreq->io_streams[0]; 397 414 ··· 401 418 * queue is empty. 402 419 */ 403 420 if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags)) 404 - return; 421 + return false; 405 422 smp_rmb(); /* Read ALL_QUEUED before subreq lists. */ 406 423 407 424 if (!list_empty(&stream->subrequests)) 408 - return; 425 + return false; 409 426 410 427 /* Okay, declare that all I/O is complete. */ 411 428 rreq->transferred = stream->transferred; ··· 426 443 } 427 444 task_io_account_read(rreq->transferred); 428 445 429 - trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip); 430 - clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 446 + netfs_wake_rreq_flag(rreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip); 447 + /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */ 431 448 432 449 trace_netfs_rreq(rreq, netfs_rreq_trace_done); 433 - netfs_clear_subrequests(rreq, false); 450 + netfs_clear_subrequests(rreq); 434 451 netfs_unlock_abandoned_read_pages(rreq); 435 452 if (unlikely(rreq->copy_to_cache)) 436 453 netfs_pgpriv2_end_copy_to_cache(rreq); 454 + return true; 437 455 } 438 456 439 457 void netfs_read_collection_worker(struct work_struct *work) ··· 442 458 struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); 443 459 444 460 netfs_see_request(rreq, netfs_rreq_trace_see_work); 445 - if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) 446 - netfs_read_collection(rreq); 447 - netfs_put_request(rreq, false, netfs_rreq_trace_put_work); 448 - } 449 - 450 - /* 451 - * Wake the collection work item. 452 - */ 453 - void netfs_wake_read_collector(struct netfs_io_request *rreq) 454 - { 455 - if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) && 456 - !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) { 457 - if (!work_pending(&rreq->work)) { 458 - netfs_get_request(rreq, netfs_rreq_trace_get_work); 459 - if (!queue_work(system_unbound_wq, &rreq->work)) 460 - netfs_put_request(rreq, true, netfs_rreq_trace_put_work_nq); 461 - } 462 - } else { 463 - trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue); 464 - wake_up(&rreq->waitq); 461 + if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) { 462 + if (netfs_read_collection(rreq)) 463 + /* Drop the ref from the IN_PROGRESS flag. */ 464 + netfs_put_request(rreq, netfs_rreq_trace_put_work_ip); 465 + else 466 + netfs_see_request(rreq, netfs_rreq_trace_see_work_complete); 465 467 } 466 468 } 467 469 ··· 479 509 list_is_first(&subreq->rreq_link, &stream->subrequests) 480 510 ) { 481 511 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 482 - netfs_wake_read_collector(rreq); 512 + netfs_wake_collector(rreq); 483 513 } 484 514 } 485 515 EXPORT_SYMBOL(netfs_read_subreq_progress); ··· 503 533 void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq) 504 534 { 505 535 struct netfs_io_request *rreq = subreq->rreq; 506 - struct netfs_io_stream *stream = &rreq->io_streams[0]; 507 536 508 537 switch (subreq->source) { 509 538 case NETFS_READ_FROM_CACHE: ··· 549 580 } 550 581 551 582 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); 552 - 553 - clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 554 - smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */ 555 - 556 - /* If we are at the head of the queue, wake up the collector. */ 557 - if (list_is_first(&subreq->rreq_link, &stream->subrequests) || 558 - test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) 559 - netfs_wake_read_collector(rreq); 560 - 561 - netfs_put_subrequest(subreq, true, netfs_sreq_trace_put_terminated); 583 + netfs_subreq_clear_in_progress(subreq); 584 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated); 562 585 } 563 586 EXPORT_SYMBOL(netfs_read_subreq_terminated); 564 587 565 588 /* 566 589 * Handle termination of a read from the cache. 567 590 */ 568 - void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async) 591 + void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error) 569 592 { 570 593 struct netfs_io_subrequest *subreq = priv; 571 594 ··· 571 610 subreq->error = transferred_or_error; 572 611 } 573 612 netfs_read_subreq_terminated(subreq); 574 - } 575 - 576 - /* 577 - * Wait for the read operation to complete, successfully or otherwise. 578 - */ 579 - ssize_t netfs_wait_for_read(struct netfs_io_request *rreq) 580 - { 581 - struct netfs_io_subrequest *subreq; 582 - struct netfs_io_stream *stream = &rreq->io_streams[0]; 583 - DEFINE_WAIT(myself); 584 - ssize_t ret; 585 - 586 - for (;;) { 587 - trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); 588 - prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); 589 - 590 - subreq = list_first_entry_or_null(&stream->subrequests, 591 - struct netfs_io_subrequest, rreq_link); 592 - if (subreq && 593 - (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || 594 - test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { 595 - __set_current_state(TASK_RUNNING); 596 - netfs_read_collection(rreq); 597 - continue; 598 - } 599 - 600 - if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) 601 - break; 602 - 603 - schedule(); 604 - trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); 605 - } 606 - 607 - finish_wait(&rreq->waitq, &myself); 608 - 609 - ret = rreq->error; 610 - if (ret == 0) { 611 - ret = rreq->transferred; 612 - switch (rreq->origin) { 613 - case NETFS_DIO_READ: 614 - case NETFS_READ_SINGLE: 615 - ret = rreq->transferred; 616 - break; 617 - default: 618 - if (rreq->submitted < rreq->len) { 619 - trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); 620 - ret = -EIO; 621 - } 622 - break; 623 - } 624 - } 625 - 626 - return ret; 627 - } 628 - 629 - /* 630 - * Wait for a paused read operation to unpause or complete in some manner. 631 - */ 632 - void netfs_wait_for_pause(struct netfs_io_request *rreq) 633 - { 634 - struct netfs_io_subrequest *subreq; 635 - struct netfs_io_stream *stream = &rreq->io_streams[0]; 636 - DEFINE_WAIT(myself); 637 - 638 - trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause); 639 - 640 - for (;;) { 641 - trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); 642 - prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); 643 - 644 - if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { 645 - subreq = list_first_entry_or_null(&stream->subrequests, 646 - struct netfs_io_subrequest, rreq_link); 647 - if (subreq && 648 - (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || 649 - test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { 650 - __set_current_state(TASK_RUNNING); 651 - netfs_read_collection(rreq); 652 - continue; 653 - } 654 - } 655 - 656 - if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) || 657 - !test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) 658 - break; 659 - 660 - schedule(); 661 - trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); 662 - } 663 - 664 - finish_wait(&rreq->waitq, &myself); 665 613 }
+2 -2
fs/netfs/read_pgpriv2.c
··· 116 116 return creq; 117 117 118 118 cancel_put: 119 - netfs_put_request(creq, false, netfs_rreq_trace_put_return); 119 + netfs_put_request(creq, netfs_rreq_trace_put_return); 120 120 cancel: 121 121 rreq->copy_to_cache = ERR_PTR(-ENOBUFS); 122 122 clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags); ··· 155 155 smp_wmb(); /* Write lists before ALL_QUEUED. */ 156 156 set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags); 157 157 158 - netfs_put_request(creq, false, netfs_rreq_trace_put_return); 158 + netfs_put_request(creq, netfs_rreq_trace_put_return); 159 159 creq->copy_to_cache = NULL; 160 160 } 161 161
+3 -23
fs/netfs/read_retry.c
··· 173 173 &stream->subrequests, rreq_link) { 174 174 trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous); 175 175 list_del(&subreq->rreq_link); 176 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); 176 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_done); 177 177 if (subreq == to) 178 178 break; 179 179 } ··· 257 257 */ 258 258 void netfs_retry_reads(struct netfs_io_request *rreq) 259 259 { 260 - struct netfs_io_subrequest *subreq; 261 260 struct netfs_io_stream *stream = &rreq->io_streams[0]; 262 - DEFINE_WAIT(myself); 263 261 264 262 netfs_stat(&netfs_n_rh_retry_read_req); 265 - 266 - set_bit(NETFS_RREQ_RETRYING, &rreq->flags); 267 263 268 264 /* Wait for all outstanding I/O to quiesce before performing retries as 269 265 * we may need to renegotiate the I/O sizes. 270 266 */ 271 - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 272 - if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags)) 273 - continue; 274 - 275 - trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); 276 - for (;;) { 277 - prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); 278 - 279 - if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags)) 280 - break; 281 - 282 - trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for); 283 - schedule(); 284 - trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); 285 - } 286 - 287 - finish_wait(&rreq->waitq, &myself); 288 - } 267 + set_bit(NETFS_RREQ_RETRYING, &rreq->flags); 268 + netfs_wait_for_in_progress_stream(rreq, stream); 289 269 clear_bit(NETFS_RREQ_RETRYING, &rreq->flags); 290 270 291 271 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+3 -3
fs/netfs/read_single.c
··· 142 142 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); 143 143 return ret; 144 144 cancel: 145 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); 145 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel); 146 146 return ret; 147 147 } 148 148 ··· 185 185 netfs_single_dispatch_read(rreq); 186 186 187 187 ret = netfs_wait_for_read(rreq); 188 - netfs_put_request(rreq, true, netfs_rreq_trace_put_return); 188 + netfs_put_request(rreq, netfs_rreq_trace_put_return); 189 189 return ret; 190 190 191 191 cleanup_free: 192 - netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 192 + netfs_put_request(rreq, netfs_rreq_trace_put_failed); 193 193 return ret; 194 194 } 195 195 EXPORT_SYMBOL(netfs_read_single);
+27 -54
fs/netfs/write_collect.c
··· 280 280 struct netfs_io_subrequest, rreq_link); 281 281 stream->front = front; 282 282 spin_unlock(&wreq->lock); 283 - netfs_put_subrequest(remove, false, 283 + netfs_put_subrequest(remove, 284 284 notes & SAW_FAILURE ? 285 285 netfs_sreq_trace_put_cancel : 286 286 netfs_sreq_trace_put_done); ··· 321 321 322 322 if (notes & NEED_RETRY) 323 323 goto need_retry; 324 - if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { 325 - trace_netfs_rreq(wreq, netfs_rreq_trace_unpause); 326 - clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags); 327 - smp_mb__after_atomic(); /* Set PAUSE before task state */ 328 - wake_up(&wreq->waitq); 329 - } 330 324 331 - if (notes & NEED_REASSESS) { 325 + if (notes & MADE_PROGRESS) { 326 + netfs_wake_rreq_flag(wreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause); 332 327 //cond_resched(); 333 328 goto reassess_streams; 334 329 } 335 - if (notes & MADE_PROGRESS) { 330 + 331 + if (notes & NEED_REASSESS) { 336 332 //cond_resched(); 337 333 goto reassess_streams; 338 334 } ··· 352 356 /* 353 357 * Perform the collection of subrequests, folios and encryption buffers. 354 358 */ 355 - void netfs_write_collection_worker(struct work_struct *work) 359 + bool netfs_write_collection(struct netfs_io_request *wreq) 356 360 { 357 - struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work); 358 361 struct netfs_inode *ictx = netfs_inode(wreq->inode); 359 362 size_t transferred; 360 363 int s; 361 364 362 365 _enter("R=%x", wreq->debug_id); 363 366 364 - netfs_see_request(wreq, netfs_rreq_trace_see_work); 365 - if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) { 366 - netfs_put_request(wreq, false, netfs_rreq_trace_put_work); 367 - return; 368 - } 369 - 370 367 netfs_collect_write_results(wreq); 371 368 372 369 /* We're done when the app thread has finished posting subreqs and all 373 370 * the queues in all the streams are empty. 374 371 */ 375 - if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) { 376 - netfs_put_request(wreq, false, netfs_rreq_trace_put_work); 377 - return; 378 - } 372 + if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) 373 + return false; 379 374 smp_rmb(); /* Read ALL_QUEUED before lists. */ 380 375 381 376 transferred = LONG_MAX; ··· 374 387 struct netfs_io_stream *stream = &wreq->io_streams[s]; 375 388 if (!stream->active) 376 389 continue; 377 - if (!list_empty(&stream->subrequests)) { 378 - netfs_put_request(wreq, false, netfs_rreq_trace_put_work); 379 - return; 380 - } 390 + if (!list_empty(&stream->subrequests)) 391 + return false; 381 392 if (stream->transferred < transferred) 382 393 transferred = stream->transferred; 383 394 } ··· 413 428 inode_dio_end(wreq->inode); 414 429 415 430 _debug("finished"); 416 - trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip); 417 - clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags); 431 + netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip); 432 + /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */ 418 433 419 434 if (wreq->iocb) { 420 435 size_t written = min(wreq->transferred, wreq->len); ··· 425 440 wreq->iocb = VFS_PTR_POISON; 426 441 } 427 442 428 - netfs_clear_subrequests(wreq, false); 429 - netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete); 443 + netfs_clear_subrequests(wreq); 444 + return true; 430 445 } 431 446 432 - /* 433 - * Wake the collection work item. 434 - */ 435 - void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async) 447 + void netfs_write_collection_worker(struct work_struct *work) 436 448 { 437 - if (!work_pending(&wreq->work)) { 438 - netfs_get_request(wreq, netfs_rreq_trace_get_work); 439 - if (!queue_work(system_unbound_wq, &wreq->work)) 440 - netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq); 449 + struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); 450 + 451 + netfs_see_request(rreq, netfs_rreq_trace_see_work); 452 + if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) { 453 + if (netfs_write_collection(rreq)) 454 + /* Drop the ref from the IN_PROGRESS flag. */ 455 + netfs_put_request(rreq, netfs_rreq_trace_put_work_ip); 456 + else 457 + netfs_see_request(rreq, netfs_rreq_trace_see_work_complete); 441 458 } 442 459 } 443 460 ··· 447 460 * netfs_write_subrequest_terminated - Note the termination of a write operation. 448 461 * @_op: The I/O request that has terminated. 449 462 * @transferred_or_error: The amount of data transferred or an error code. 450 - * @was_async: The termination was asynchronous 451 463 * 452 464 * This tells the library that a contributory write I/O operation has 453 465 * terminated, one way or another, and that it should collect the results. ··· 456 470 * negative error code. The library will look after reissuing I/O operations 457 471 * as appropriate and writing downloaded data to the cache. 458 472 * 459 - * If @was_async is true, the caller might be running in softirq or interrupt 460 - * context and we can't sleep. 461 - * 462 473 * When this is called, ownership of the subrequest is transferred back to the 463 474 * library, along with a ref. 464 475 * 465 476 * Note that %_op is a void* so that the function can be passed to 466 477 * kiocb::term_func without the need for a casting wrapper. 467 478 */ 468 - void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, 469 - bool was_async) 479 + void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error) 470 480 { 471 481 struct netfs_io_subrequest *subreq = _op; 472 482 struct netfs_io_request *wreq = subreq->rreq; 473 - struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; 474 483 475 484 _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); 476 485 ··· 515 534 } 516 535 517 536 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); 518 - 519 - clear_and_wake_up_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 520 - 521 - /* If we are at the head of the queue, wake up the collector, 522 - * transferring a ref to it if we were the ones to do so. 523 - */ 524 - if (list_is_first(&subreq->rreq_link, &stream->subrequests)) 525 - netfs_wake_write_collector(wreq, was_async); 526 - 527 - netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); 537 + netfs_subreq_clear_in_progress(subreq); 538 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated); 528 539 } 529 540 EXPORT_SYMBOL(netfs_write_subrequest_terminated);
+19 -19
fs/netfs/write_issue.c
··· 134 134 return wreq; 135 135 nomem: 136 136 wreq->error = -ENOMEM; 137 - netfs_put_request(wreq, false, netfs_rreq_trace_put_failed); 137 + netfs_put_request(wreq, netfs_rreq_trace_put_failed); 138 138 return ERR_PTR(-ENOMEM); 139 139 } 140 140 ··· 233 233 _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); 234 234 235 235 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 236 - return netfs_write_subrequest_terminated(subreq, subreq->error, false); 236 + return netfs_write_subrequest_terminated(subreq, subreq->error); 237 237 238 238 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 239 239 stream->issue_write(subreq); ··· 542 542 } 543 543 544 544 if (needs_poke) 545 - netfs_wake_write_collector(wreq, false); 545 + netfs_wake_collector(wreq); 546 546 } 547 547 548 548 /* ··· 576 576 goto couldnt_start; 577 577 } 578 578 579 + __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); 579 580 trace_netfs_write(wreq, netfs_write_trace_writeback); 580 581 netfs_stat(&netfs_n_wh_writepages); 581 582 ··· 600 599 netfs_end_issue_write(wreq); 601 600 602 601 mutex_unlock(&ictx->wb_lock); 602 + netfs_wake_collector(wreq); 603 603 604 - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 604 + netfs_put_request(wreq, netfs_rreq_trace_put_return); 605 605 _leave(" = %d", error); 606 606 return error; 607 607 ··· 675 673 /* 676 674 * End a write operation used when writing through the pagecache. 677 675 */ 678 - int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 679 - struct folio *writethrough_cache) 676 + ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 677 + struct folio *writethrough_cache) 680 678 { 681 679 struct netfs_inode *ictx = netfs_inode(wreq->inode); 682 - int ret; 680 + ssize_t ret; 683 681 684 682 _enter("R=%x", wreq->debug_id); 685 683 ··· 690 688 691 689 mutex_unlock(&ictx->wb_lock); 692 690 693 - if (wreq->iocb) { 691 + if (wreq->iocb) 694 692 ret = -EIOCBQUEUED; 695 - } else { 696 - wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE); 697 - ret = wreq->error; 698 - } 699 - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 693 + else 694 + ret = netfs_wait_for_write(wreq); 695 + netfs_put_request(wreq, netfs_rreq_trace_put_return); 700 696 return ret; 701 697 } 702 698 ··· 722 722 start += part; 723 723 len -= part; 724 724 rolling_buffer_advance(&wreq->buffer, part); 725 - if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { 726 - trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause); 727 - wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags)); 728 - } 725 + if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) 726 + netfs_wait_for_paused_write(wreq); 729 727 if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) 730 728 break; 731 729 } ··· 883 885 goto couldnt_start; 884 886 } 885 887 886 - trace_netfs_write(wreq, netfs_write_trace_writeback); 888 + __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); 889 + trace_netfs_write(wreq, netfs_write_trace_writeback_single); 887 890 netfs_stat(&netfs_n_wh_writepages); 888 891 889 892 if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) ··· 913 914 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 914 915 915 916 mutex_unlock(&ictx->wb_lock); 917 + netfs_wake_collector(wreq); 916 918 917 - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 919 + netfs_put_request(wreq, netfs_rreq_trace_put_return); 918 920 _leave(" = %d", ret); 919 921 return ret; 920 922
+8 -11
fs/netfs/write_retry.c
··· 39 39 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 40 40 break; 41 41 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { 42 - struct iov_iter source = subreq->io_iter; 42 + struct iov_iter source; 43 43 44 - iov_iter_revert(&source, subreq->len - source.count); 44 + netfs_reset_iter(subreq); 45 + source = subreq->io_iter; 45 46 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 46 47 netfs_reissue_write(stream, subreq, &source); 47 48 } ··· 132 131 &stream->subrequests, rreq_link) { 133 132 trace_netfs_sreq(subreq, netfs_sreq_trace_discard); 134 133 list_del(&subreq->rreq_link); 135 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); 134 + netfs_put_subrequest(subreq, netfs_sreq_trace_put_done); 136 135 if (subreq == to) 137 136 break; 138 137 } ··· 200 199 */ 201 200 void netfs_retry_writes(struct netfs_io_request *wreq) 202 201 { 203 - struct netfs_io_subrequest *subreq; 204 202 struct netfs_io_stream *stream; 205 203 int s; 206 204 ··· 208 208 /* Wait for all outstanding I/O to quiesce before performing retries as 209 209 * we may need to renegotiate the I/O sizes. 210 210 */ 211 + set_bit(NETFS_RREQ_RETRYING, &wreq->flags); 211 212 for (s = 0; s < NR_IO_STREAMS; s++) { 212 213 stream = &wreq->io_streams[s]; 213 - if (!stream->active) 214 - continue; 215 - 216 - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 217 - wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, 218 - TASK_UNINTERRUPTIBLE); 219 - } 214 + if (stream->active) 215 + netfs_wait_for_in_progress_stream(wreq, stream); 220 216 } 217 + clear_bit(NETFS_RREQ_RETRYING, &wreq->flags); 221 218 222 219 // TODO: Enc: Fetch changed partial pages 223 220 // TODO: Enc: Reencrypt content if needed.
+1 -2
fs/smb/client/cifsproto.h
··· 151 151 bool from_readdir); 152 152 extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 153 153 unsigned int bytes_written); 154 - void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, 155 - bool was_async); 154 + void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result); 156 155 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); 157 156 extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 158 157 int flags,
+2 -2
fs/smb/client/cifssmb.c
··· 1725 1725 server->credits, server->in_flight, 1726 1726 0, cifs_trace_rw_credits_write_response_clear); 1727 1727 wdata->credits.value = 0; 1728 - cifs_write_subrequest_terminated(wdata, result, true); 1728 + cifs_write_subrequest_terminated(wdata, result); 1729 1729 release_mid(mid); 1730 1730 trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0, 1731 1731 server->credits, server->in_flight, ··· 1813 1813 out: 1814 1814 if (rc) { 1815 1815 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 1816 - cifs_write_subrequest_terminated(wdata, rc, false); 1816 + cifs_write_subrequest_terminated(wdata, rc); 1817 1817 } 1818 1818 } 1819 1819
+3 -4
fs/smb/client/file.c
··· 130 130 else 131 131 trace_netfs_sreq(subreq, netfs_sreq_trace_fail); 132 132 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 133 - cifs_write_subrequest_terminated(wdata, rc, false); 133 + cifs_write_subrequest_terminated(wdata, rc); 134 134 goto out; 135 135 } 136 136 ··· 2395 2395 return rc; 2396 2396 } 2397 2397 2398 - void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, 2399 - bool was_async) 2398 + void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result) 2400 2399 { 2401 2400 struct netfs_io_request *wreq = wdata->rreq; 2402 2401 struct netfs_inode *ictx = netfs_inode(wreq->inode); ··· 2412 2413 netfs_resize_file(ictx, wrend, true); 2413 2414 } 2414 2415 2415 - netfs_write_subrequest_terminated(&wdata->subreq, result, was_async); 2416 + netfs_write_subrequest_terminated(&wdata->subreq, result); 2416 2417 } 2417 2418 2418 2419 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+2 -2
fs/smb/client/smb2pdu.c
··· 4898 4898 0, cifs_trace_rw_credits_write_response_clear); 4899 4899 wdata->credits.value = 0; 4900 4900 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress); 4901 - cifs_write_subrequest_terminated(wdata, result ?: written, true); 4901 + cifs_write_subrequest_terminated(wdata, result ?: written); 4902 4902 release_mid(mid); 4903 4903 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, 4904 4904 server->credits, server->in_flight, ··· 5071 5071 -(int)wdata->credits.value, 5072 5072 cifs_trace_rw_credits_write_response_clear); 5073 5073 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 5074 - cifs_write_subrequest_terminated(wdata, rc, true); 5074 + cifs_write_subrequest_terminated(wdata, rc); 5075 5075 } 5076 5076 } 5077 5077
+1 -1
include/linux/fscache.h
··· 625 625 term_func, term_func_priv, 626 626 using_pgpriv2, caching); 627 627 else if (term_func) 628 - term_func(term_func_priv, -ENOBUFS, false); 628 + term_func(term_func_priv, -ENOBUFS); 629 629 630 630 } 631 631
+7 -7
include/linux/netfs.h
··· 50 50 NETFS_WRITE_TO_CACHE, 51 51 } __mode(byte); 52 52 53 - typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, 54 - bool was_async); 53 + typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error); 55 54 56 55 /* 57 56 * Per-inode context. This wraps the VFS inode. ··· 219 220 */ 220 221 struct netfs_io_request { 221 222 union { 222 - struct work_struct work; 223 + struct work_struct cleanup_work; /* Deferred cleanup work */ 223 224 struct rcu_head rcu; 224 225 }; 226 + struct work_struct work; /* Result collector work */ 225 227 struct inode *inode; /* The file being accessed */ 226 228 struct address_space *mapping; /* The mapping being accessed */ 227 229 struct kiocb *iocb; /* AIO completion vector */ ··· 267 267 #define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */ 268 268 #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ 269 269 #define NETFS_RREQ_FAILED 4 /* The request failed */ 270 - #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ 270 + #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes (has ref) */ 271 271 #define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */ 272 272 #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ 273 273 #define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */ 274 274 #define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */ 275 275 #define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */ 276 276 #define NETFS_RREQ_RETRYING 14 /* Set if we're in the retry path */ 277 + #define NETFS_RREQ_SHORT_TRANSFER 15 /* Set if we have a short transfer */ 277 278 #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark 278 279 * write to cache on read */ 279 280 const struct netfs_request_ops *netfs_ops; ··· 434 433 void netfs_get_subrequest(struct netfs_io_subrequest *subreq, 435 434 enum netfs_sreq_ref_trace what); 436 435 void netfs_put_subrequest(struct netfs_io_subrequest *subreq, 437 - bool was_async, enum netfs_sreq_ref_trace what); 436 + enum netfs_sreq_ref_trace what); 438 437 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, 439 438 struct iov_iter *new, 440 439 iov_iter_extraction_t extraction_flags); 441 440 size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, 442 441 size_t max_size, size_t max_segs); 443 442 void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); 444 - void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, 445 - bool was_async); 443 + void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error); 446 444 void netfs_queue_write_request(struct netfs_io_subrequest *subreq); 447 445 448 446 int netfs_start_io_read(struct inode *inode);
+3 -4
include/trace/events/netfs.h
··· 30 30 EM(netfs_write_trace_dio_write, "DIO-WRITE") \ 31 31 EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ 32 32 EM(netfs_write_trace_writeback, "WRITEBACK") \ 33 + EM(netfs_write_trace_writeback_single, "WB-SINGLE") \ 33 34 E_(netfs_write_trace_writethrough, "WRITETHRU") 34 35 35 36 #define netfs_rreq_origins \ ··· 128 127 #define netfs_rreq_ref_traces \ 129 128 EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \ 130 129 EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ 131 - EM(netfs_rreq_trace_get_work, "GET WORK ") \ 132 130 EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ 133 131 EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ 134 132 EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ 135 133 EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \ 136 134 EM(netfs_rreq_trace_put_return, "PUT RETURN ") \ 137 135 EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ 138 - EM(netfs_rreq_trace_put_work, "PUT WORK ") \ 139 - EM(netfs_rreq_trace_put_work_complete, "PUT WORK CP") \ 140 - EM(netfs_rreq_trace_put_work_nq, "PUT WORK NQ") \ 136 + EM(netfs_rreq_trace_put_work_ip, "PUT WORK IP ") \ 141 137 EM(netfs_rreq_trace_see_work, "SEE WORK ") \ 138 + EM(netfs_rreq_trace_see_work_complete, "SEE WORK CP") \ 142 139 E_(netfs_rreq_trace_new, "NEW ") 143 140 144 141 #define netfs_sreq_ref_traces \
+3 -3
net/9p/client.c
··· 1704 1704 start, len, &subreq->io_iter); 1705 1705 } 1706 1706 if (IS_ERR(req)) { 1707 - netfs_write_subrequest_terminated(subreq, PTR_ERR(req), false); 1707 + netfs_write_subrequest_terminated(subreq, PTR_ERR(req)); 1708 1708 return; 1709 1709 } 1710 1710 ··· 1712 1712 if (err) { 1713 1713 trace_9p_protocol_dump(clnt, &req->rc); 1714 1714 p9_req_put(clnt, req); 1715 - netfs_write_subrequest_terminated(subreq, err, false); 1715 + netfs_write_subrequest_terminated(subreq, err); 1716 1716 return; 1717 1717 } 1718 1718 ··· 1724 1724 p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len); 1725 1725 1726 1726 p9_req_put(clnt, req); 1727 - netfs_write_subrequest_terminated(subreq, written, false); 1727 + netfs_write_subrequest_terminated(subreq, written); 1728 1728 } 1729 1729 EXPORT_SYMBOL(p9_client_write_subreq); 1730 1730