Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Drop the was_async arg from netfs_read_subreq_terminated()

Drop the was_async argument from netfs_read_subreq_terminated(). Almost
every caller is either in process context and passes false. Some
filesystems delegate the call to a workqueue to avoid doing the work in
their network message queue parsing thread.

The only exception is netfs_cache_read_terminated() which handles
completion in the cache - which is usually a callback from the backing
filesystem in softirq context, though it can be from process context if an
error occurred. In this case, delegate to a workqueue.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/CAHk-=wiVC5Cgyz6QKXFu6fTaA6h4CjexDR-OV9kL6Vo5x9v8=A@mail.gmail.com/
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-10-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

David Howells and committed by
Christian Brauner
31fc366a 36015782

+33 -62
+1 -1
fs/9p/vfs_addr.c
··· 88 88 } 89 89 90 90 subreq->error = err; 91 - netfs_read_subreq_terminated(subreq, false); 91 + netfs_read_subreq_terminated(subreq); 92 92 } 93 93 94 94 /**
+3 -3
fs/afs/file.c
··· 247 247 if (req->pos + req->actual_len >= req->file_size) 248 248 __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); 249 249 subreq->error = error; 250 - netfs_read_subreq_terminated(subreq, false); 250 + netfs_read_subreq_terminated(subreq); 251 251 req->subreq = NULL; 252 252 } else if (req->done) { 253 253 req->done(req); ··· 304 304 if (IS_ERR(op)) { 305 305 if (req->subreq) { 306 306 req->subreq->error = PTR_ERR(op); 307 - netfs_read_subreq_terminated(req->subreq, false); 307 + netfs_read_subreq_terminated(req->subreq); 308 308 } 309 309 return PTR_ERR(op); 310 310 } ··· 325 325 fsreq = afs_alloc_read(GFP_NOFS); 326 326 if (!fsreq) { 327 327 subreq->error = -ENOMEM; 328 - return netfs_read_subreq_terminated(subreq, false); 328 + return netfs_read_subreq_terminated(subreq); 329 329 } 330 330 331 331 fsreq->subreq = subreq;
+1 -1
fs/afs/fsclient.c
··· 352 352 ret = afs_extract_data(call, true); 353 353 if (req->subreq) { 354 354 req->subreq->transferred += count_before - call->iov_len; 355 - netfs_read_subreq_progress(req->subreq, false); 355 + netfs_read_subreq_progress(req->subreq); 356 356 } 357 357 if (ret < 0) 358 358 return ret;
+1 -1
fs/afs/yfsclient.c
··· 398 398 ret = afs_extract_data(call, true); 399 399 if (req->subreq) { 400 400 req->subreq->transferred += count_before - call->iov_len; 401 - netfs_read_subreq_progress(req->subreq, false); 401 + netfs_read_subreq_progress(req->subreq); 402 402 } 403 403 if (ret < 0) 404 404 return ret;
+3 -3
fs/ceph/addr.c
··· 255 255 } 256 256 subreq->error = err; 257 257 trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); 258 - netfs_read_subreq_terminated(subreq, false); 258 + netfs_read_subreq_terminated(subreq); 259 259 iput(req->r_inode); 260 260 ceph_dec_osd_stopping_blocker(fsc->mdsc); 261 261 } ··· 317 317 out: 318 318 subreq->error = err; 319 319 trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); 320 - netfs_read_subreq_terminated(subreq, false); 320 + netfs_read_subreq_terminated(subreq); 321 321 return true; 322 322 } 323 323 ··· 431 431 ceph_osdc_put_request(req); 432 432 if (err) { 433 433 subreq->error = err; 434 - netfs_read_subreq_terminated(subreq, false); 434 + netfs_read_subreq_terminated(subreq); 435 435 } 436 436 doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err); 437 437 }
+3 -3
fs/netfs/buffered_read.c
··· 154 154 } else { 155 155 subreq->error = transferred_or_error; 156 156 } 157 - netfs_read_subreq_terminated(subreq, was_async); 157 + schedule_work(&subreq->work); 158 158 } 159 159 160 160 /* ··· 255 255 goto prep_iter_failed; 256 256 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 257 257 subreq->error = 0; 258 - netfs_read_subreq_terminated(subreq, false); 258 + netfs_read_subreq_terminated(subreq); 259 259 goto done; 260 260 } 261 261 ··· 287 287 } while (size > 0); 288 288 289 289 if (atomic_dec_and_test(&rreq->nr_outstanding)) 290 - netfs_rreq_terminated(rreq, false); 290 + netfs_rreq_terminated(rreq); 291 291 292 292 /* Defer error return as we may need to wait for outstanding I/O. */ 293 293 cmpxchg(&rreq->error, 0, ret);
+1 -1
fs/netfs/direct_read.c
··· 100 100 } while (size > 0); 101 101 102 102 if (atomic_dec_and_test(&rreq->nr_outstanding)) 103 - netfs_rreq_terminated(rreq, false); 103 + netfs_rreq_terminated(rreq); 104 104 return ret; 105 105 } 106 106
+1 -1
fs/netfs/internal.h
··· 85 85 * read_collect.c 86 86 */ 87 87 void netfs_read_termination_worker(struct work_struct *work); 88 - void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async); 88 + void netfs_rreq_terminated(struct netfs_io_request *rreq); 89 89 90 90 /* 91 91 * read_pgpriv2.c
+1 -1
fs/netfs/objects.c
··· 56 56 origin == NETFS_READ_GAPS || 57 57 origin == NETFS_READ_FOR_WRITE || 58 58 origin == NETFS_DIO_READ) 59 - INIT_WORK(&rreq->work, netfs_read_termination_worker); 59 + INIT_WORK(&rreq->work, NULL); 60 60 else 61 61 INIT_WORK(&rreq->work, netfs_write_collection_worker); 62 62
+12 -41
fs/netfs/read_collect.c
··· 87 87 * Unlock any folios that are now completely read. Returns true if the 88 88 * subrequest is removed from the list. 89 89 */ 90 - static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async) 90 + static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq) 91 91 { 92 92 struct netfs_io_subrequest *prev, *next; 93 93 struct netfs_io_request *rreq = subreq->rreq; ··· 230 230 subreq->curr_folioq_slot = slot; 231 231 if (folioq && folioq_folio(folioq, slot)) 232 232 subreq->curr_folio_order = folioq->orders[slot]; 233 - if (!was_async) 234 - cond_resched(); 233 + cond_resched(); 235 234 goto next_folio; 236 235 } 237 236 ··· 367 368 * Note that we're in normal kernel thread context at this point, possibly 368 369 * running on a workqueue. 369 370 */ 370 - static void netfs_rreq_assess(struct netfs_io_request *rreq) 371 + void netfs_rreq_terminated(struct netfs_io_request *rreq) 371 372 { 372 373 trace_netfs_rreq(rreq, netfs_rreq_trace_assess); 373 374 ··· 393 394 netfs_pgpriv2_write_to_the_cache(rreq); 394 395 } 395 396 396 - void netfs_read_termination_worker(struct work_struct *work) 397 - { 398 - struct netfs_io_request *rreq = 399 - container_of(work, struct netfs_io_request, work); 400 - netfs_see_request(rreq, netfs_rreq_trace_see_work); 401 - netfs_rreq_assess(rreq); 402 - netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete); 403 - } 404 - 405 - /* 406 - * Handle the completion of all outstanding I/O operations on a read request. 407 - * We inherit a ref from the caller. 408 - */ 409 - void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async) 410 - { 411 - if (!was_async) 412 - return netfs_rreq_assess(rreq); 413 - if (!work_pending(&rreq->work)) { 414 - netfs_get_request(rreq, netfs_rreq_trace_get_work); 415 - if (!queue_work(system_unbound_wq, &rreq->work)) 416 - netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq); 417 - } 418 - } 419 - 420 397 /** 421 398 * netfs_read_subreq_progress - Note progress of a read operation. 422 399 * @subreq: The read request that has terminated. 423 - * @was_async: True if we're in an asynchronous context. 424 400 * 425 401 * This tells the read side of netfs lib that a contributory I/O operation has 426 402 * made some progress and that it may be possible to unlock some folios. 427 403 * 428 404 * Before calling, the filesystem should update subreq->transferred to track 429 405 * the amount of data copied into the output buffer. 430 - * 431 - * If @was_async is true, the caller might be running in softirq or interrupt 432 - * context and we can't sleep. 433 406 */ 434 - void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, 435 - bool was_async) 407 + void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq) 436 408 { 437 409 struct netfs_io_request *rreq = subreq->rreq; 410 + 411 + might_sleep(); 438 412 439 413 trace_netfs_sreq(subreq, netfs_sreq_trace_progress); 440 414 ··· 415 443 (rreq->origin == NETFS_READAHEAD || 416 444 rreq->origin == NETFS_READPAGE || 417 445 rreq->origin == NETFS_READ_FOR_WRITE)) { 418 - netfs_consume_read_data(subreq, was_async); 446 + netfs_consume_read_data(subreq); 419 447 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 420 448 } 421 449 } ··· 424 452 /** 425 453 * netfs_read_subreq_terminated - Note the termination of an I/O operation. 426 454 * @subreq: The I/O request that has terminated. 427 - * @was_async: True if we're in an asynchronous context. 428 455 * 429 456 * This tells the read helper that a contributory I/O operation has terminated, 430 457 * one way or another, and that it should integrate the results. ··· 437 466 * Before calling, the filesystem should update subreq->transferred to track 438 467 * the amount of data copied into the output buffer. 439 468 */ 440 - void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_async) 469 + void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq) 441 470 { 442 471 struct netfs_io_request *rreq = subreq->rreq; 443 472 ··· 471 500 (rreq->origin == NETFS_READAHEAD || 472 501 rreq->origin == NETFS_READPAGE || 473 502 rreq->origin == NETFS_READ_FOR_WRITE)) { 474 - netfs_consume_read_data(subreq, was_async); 503 + netfs_consume_read_data(subreq); 475 504 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 476 505 } 477 506 rreq->transferred += subreq->transferred; ··· 516 545 } 517 546 518 547 if (atomic_dec_and_test(&rreq->nr_outstanding)) 519 - netfs_rreq_terminated(rreq, was_async); 548 + netfs_rreq_terminated(rreq); 520 549 521 - netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); 550 + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_terminated); 522 551 } 523 552 EXPORT_SYMBOL(netfs_read_subreq_terminated); 524 553 ··· 534 563 struct netfs_io_subrequest *subreq = 535 564 container_of(work, struct netfs_io_subrequest, work); 536 565 537 - netfs_read_subreq_terminated(subreq, false); 566 + netfs_read_subreq_terminated(subreq); 538 567 } 539 568 EXPORT_SYMBOL(netfs_read_subreq_termination_worker);
+1 -1
fs/netfs/read_retry.c
··· 234 234 netfs_retry_read_subrequests(rreq); 235 235 236 236 if (atomic_dec_and_test(&rreq->nr_outstanding)) 237 - netfs_rreq_terminated(rreq, false); 237 + netfs_rreq_terminated(rreq); 238 238 } 239 239 240 240 /*
+1 -1
fs/nfs/fscache.c
··· 316 316 netfs = nfs_netfs_alloc(sreq); 317 317 if (!netfs) { 318 318 sreq->error = -ENOMEM; 319 - return netfs_read_subreq_terminated(sreq, false); 319 + return netfs_read_subreq_terminated(sreq); 320 320 } 321 321 322 322 pgio.pg_netfs = netfs; /* used in completion */
+1 -1
fs/nfs/fscache.h
··· 75 75 netfs->sreq->transferred = min_t(s64, netfs->sreq->len, 76 76 atomic64_read(&netfs->transferred)); 77 77 netfs->sreq->error = netfs->error; 78 - netfs_read_subreq_terminated(netfs->sreq, false); 78 + netfs_read_subreq_terminated(netfs->sreq); 79 79 kfree(netfs); 80 80 } 81 81 static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
+1 -1
fs/smb/client/file.c
··· 228 228 229 229 failed: 230 230 subreq->error = rc; 231 - netfs_read_subreq_terminated(subreq, false); 231 + netfs_read_subreq_terminated(subreq); 232 232 } 233 233 234 234 /*
+2 -2
include/linux/netfs.h
··· 427 427 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); 428 428 429 429 /* (Sub)request management API. */ 430 - void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, bool was_async); 431 - void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_async); 430 + void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq); 431 + void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq); 432 432 void netfs_read_subreq_termination_worker(struct work_struct *work); 433 433 void netfs_get_subrequest(struct netfs_io_subrequest *subreq, 434 434 enum netfs_sreq_ref_trace what);