Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.10-rc6 647 lines 19 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* Network filesystem high-level read support. 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8#include <linux/module.h> 9#include <linux/export.h> 10#include <linux/fs.h> 11#include <linux/mm.h> 12#include <linux/pagemap.h> 13#include <linux/slab.h> 14#include <linux/uio.h> 15#include <linux/sched/mm.h> 16#include <linux/task_io_accounting_ops.h> 17#include "internal.h" 18 19/* 20 * Clear the unread part of an I/O request. 21 */ 22static void netfs_clear_unread(struct netfs_io_subrequest *subreq) 23{ 24 iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter); 25} 26 27static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, 28 bool was_async) 29{ 30 struct netfs_io_subrequest *subreq = priv; 31 32 netfs_subreq_terminated(subreq, transferred_or_error, was_async); 33} 34 35/* 36 * Issue a read against the cache. 37 * - Eats the caller's ref on subreq. 38 */ 39static void netfs_read_from_cache(struct netfs_io_request *rreq, 40 struct netfs_io_subrequest *subreq, 41 enum netfs_read_from_hole read_hole) 42{ 43 struct netfs_cache_resources *cres = &rreq->cache_resources; 44 45 netfs_stat(&netfs_n_rh_read); 46 cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole, 47 netfs_cache_read_terminated, subreq); 48} 49 50/* 51 * Fill a subrequest region with zeroes. 52 */ 53static void netfs_fill_with_zeroes(struct netfs_io_request *rreq, 54 struct netfs_io_subrequest *subreq) 55{ 56 netfs_stat(&netfs_n_rh_zero); 57 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 58 netfs_subreq_terminated(subreq, 0, false); 59} 60 61/* 62 * Ask the netfs to issue a read request to the server for us. 63 * 64 * The netfs is expected to read from subreq->pos + subreq->transferred to 65 * subreq->pos + subreq->len - 1. It may not backtrack and write data into the 66 * buffer prior to the transferred point as it might clobber dirty data 67 * obtained from the cache. 68 * 69 * Alternatively, the netfs is allowed to indicate one of two things: 70 * 71 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and 72 * make progress. 73 * 74 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be 75 * cleared. 76 */ 77static void netfs_read_from_server(struct netfs_io_request *rreq, 78 struct netfs_io_subrequest *subreq) 79{ 80 netfs_stat(&netfs_n_rh_download); 81 82 if (rreq->origin != NETFS_DIO_READ && 83 iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred) 84 pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n", 85 rreq->debug_id, subreq->debug_index, 86 iov_iter_count(&subreq->io_iter), subreq->len, 87 subreq->transferred, subreq->flags); 88 rreq->netfs_ops->issue_read(subreq); 89} 90 91/* 92 * Release those waiting. 93 */ 94static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) 95{ 96 trace_netfs_rreq(rreq, netfs_rreq_trace_done); 97 netfs_clear_subrequests(rreq, was_async); 98 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete); 99} 100 101/* 102 * Handle a short read. 103 */ 104static void netfs_rreq_short_read(struct netfs_io_request *rreq, 105 struct netfs_io_subrequest *subreq) 106{ 107 __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); 108 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); 109 110 netfs_stat(&netfs_n_rh_short_read); 111 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); 112 113 netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read); 114 atomic_inc(&rreq->nr_outstanding); 115 if (subreq->source == NETFS_READ_FROM_CACHE) 116 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); 117 else 118 netfs_read_from_server(rreq, subreq); 119} 120 121/* 122 * Reset the subrequest iterator prior to resubmission. 123 */ 124static void netfs_reset_subreq_iter(struct netfs_io_request *rreq, 125 struct netfs_io_subrequest *subreq) 126{ 127 size_t remaining = subreq->len - subreq->transferred; 128 size_t count = iov_iter_count(&subreq->io_iter); 129 130 if (count == remaining) 131 return; 132 133 _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n", 134 rreq->debug_id, subreq->debug_index, 135 iov_iter_count(&subreq->io_iter), subreq->transferred, 136 subreq->len, rreq->i_size, 137 subreq->io_iter.iter_type); 138 139 if (count < remaining) 140 iov_iter_revert(&subreq->io_iter, remaining - count); 141 else 142 iov_iter_advance(&subreq->io_iter, count - remaining); 143} 144 145/* 146 * Resubmit any short or failed operations. Returns true if we got the rreq 147 * ref back. 148 */ 149static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) 150{ 151 struct netfs_io_subrequest *subreq; 152 153 WARN_ON(in_interrupt()); 154 155 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); 156 157 /* We don't want terminating submissions trying to wake us up whilst 158 * we're still going through the list. 159 */ 160 atomic_inc(&rreq->nr_outstanding); 161 162 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); 163 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 164 if (subreq->error) { 165 if (subreq->source != NETFS_READ_FROM_CACHE) 166 break; 167 subreq->source = NETFS_DOWNLOAD_FROM_SERVER; 168 subreq->error = 0; 169 netfs_stat(&netfs_n_rh_download_instead); 170 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); 171 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 172 atomic_inc(&rreq->nr_outstanding); 173 netfs_reset_subreq_iter(rreq, subreq); 174 netfs_read_from_server(rreq, subreq); 175 } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { 176 netfs_rreq_short_read(rreq, subreq); 177 } 178 } 179 180 /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */ 181 if (atomic_dec_and_test(&rreq->nr_outstanding)) 182 return true; 183 184 wake_up_var(&rreq->nr_outstanding); 185 return false; 186} 187 188/* 189 * Check to see if the data read is still valid. 190 */ 191static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq) 192{ 193 struct netfs_io_subrequest *subreq; 194 195 if (!rreq->netfs_ops->is_still_valid || 196 rreq->netfs_ops->is_still_valid(rreq)) 197 return; 198 199 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 200 if (subreq->source == NETFS_READ_FROM_CACHE) { 201 subreq->error = -ESTALE; 202 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); 203 } 204 } 205} 206 207/* 208 * Determine how much we can admit to having read from a DIO read. 209 */ 210static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) 211{ 212 struct netfs_io_subrequest *subreq; 213 unsigned int i; 214 size_t transferred = 0; 215 216 for (i = 0; i < rreq->direct_bv_count; i++) { 217 flush_dcache_page(rreq->direct_bv[i].bv_page); 218 // TODO: cifs marks pages in the destination buffer 219 // dirty under some circumstances after a read. Do we 220 // need to do that too? 221 set_page_dirty(rreq->direct_bv[i].bv_page); 222 } 223 224 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 225 if (subreq->error || subreq->transferred == 0) 226 break; 227 transferred += subreq->transferred; 228 if (subreq->transferred < subreq->len) 229 break; 230 } 231 232 for (i = 0; i < rreq->direct_bv_count; i++) 233 flush_dcache_page(rreq->direct_bv[i].bv_page); 234 235 rreq->transferred = transferred; 236 task_io_account_read(transferred); 237 238 if (rreq->iocb) { 239 rreq->iocb->ki_pos += transferred; 240 if (rreq->iocb->ki_complete) 241 rreq->iocb->ki_complete( 242 rreq->iocb, rreq->error ? rreq->error : transferred); 243 } 244 if (rreq->netfs_ops->done) 245 rreq->netfs_ops->done(rreq); 246 inode_dio_end(rreq->inode); 247} 248 249/* 250 * Assess the state of a read request and decide what to do next. 251 * 252 * Note that we could be in an ordinary kernel thread, on a workqueue or in 253 * softirq context at this point. We inherit a ref from the caller. 254 */ 255static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async) 256{ 257 trace_netfs_rreq(rreq, netfs_rreq_trace_assess); 258 259again: 260 netfs_rreq_is_still_valid(rreq); 261 262 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) && 263 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) { 264 if (netfs_rreq_perform_resubmissions(rreq)) 265 goto again; 266 return; 267 } 268 269 if (rreq->origin != NETFS_DIO_READ) 270 netfs_rreq_unlock_folios(rreq); 271 else 272 netfs_rreq_assess_dio(rreq); 273 274 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip); 275 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 276 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); 277 278 netfs_rreq_completed(rreq, was_async); 279} 280 281static void netfs_rreq_work(struct work_struct *work) 282{ 283 struct netfs_io_request *rreq = 284 container_of(work, struct netfs_io_request, work); 285 netfs_rreq_assess(rreq, false); 286} 287 288/* 289 * Handle the completion of all outstanding I/O operations on a read request. 290 * We inherit a ref from the caller. 291 */ 292static void netfs_rreq_terminated(struct netfs_io_request *rreq, 293 bool was_async) 294{ 295 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) && 296 was_async) { 297 if (!queue_work(system_unbound_wq, &rreq->work)) 298 BUG(); 299 } else { 300 netfs_rreq_assess(rreq, was_async); 301 } 302} 303 304/** 305 * netfs_subreq_terminated - Note the termination of an I/O operation. 306 * @subreq: The I/O request that has terminated. 307 * @transferred_or_error: The amount of data transferred or an error code. 308 * @was_async: The termination was asynchronous 309 * 310 * This tells the read helper that a contributory I/O operation has terminated, 311 * one way or another, and that it should integrate the results. 312 * 313 * The caller indicates in @transferred_or_error the outcome of the operation, 314 * supplying a positive value to indicate the number of bytes transferred, 0 to 315 * indicate a failure to transfer anything that should be retried or a negative 316 * error code. The helper will look after reissuing I/O operations as 317 * appropriate and writing downloaded data to the cache. 318 * 319 * If @was_async is true, the caller might be running in softirq or interrupt 320 * context and we can't sleep. 321 */ 322void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, 323 ssize_t transferred_or_error, 324 bool was_async) 325{ 326 struct netfs_io_request *rreq = subreq->rreq; 327 int u; 328 329 _enter("R=%x[%x]{%llx,%lx},%zd", 330 rreq->debug_id, subreq->debug_index, 331 subreq->start, subreq->flags, transferred_or_error); 332 333 switch (subreq->source) { 334 case NETFS_READ_FROM_CACHE: 335 netfs_stat(&netfs_n_rh_read_done); 336 break; 337 case NETFS_DOWNLOAD_FROM_SERVER: 338 netfs_stat(&netfs_n_rh_download_done); 339 break; 340 default: 341 break; 342 } 343 344 if (IS_ERR_VALUE(transferred_or_error)) { 345 subreq->error = transferred_or_error; 346 trace_netfs_failure(rreq, subreq, transferred_or_error, 347 netfs_fail_read); 348 goto failed; 349 } 350 351 if (WARN(transferred_or_error > subreq->len - subreq->transferred, 352 "Subreq overread: R%x[%x] %zd > %zu - %zu", 353 rreq->debug_id, subreq->debug_index, 354 transferred_or_error, subreq->len, subreq->transferred)) 355 transferred_or_error = subreq->len - subreq->transferred; 356 357 subreq->error = 0; 358 subreq->transferred += transferred_or_error; 359 if (subreq->transferred < subreq->len) 360 goto incomplete; 361 362complete: 363 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); 364 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) 365 set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); 366 367out: 368 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); 369 370 /* If we decrement nr_outstanding to 0, the ref belongs to us. */ 371 u = atomic_dec_return(&rreq->nr_outstanding); 372 if (u == 0) 373 netfs_rreq_terminated(rreq, was_async); 374 else if (u == 1) 375 wake_up_var(&rreq->nr_outstanding); 376 377 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); 378 return; 379 380incomplete: 381 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) { 382 netfs_clear_unread(subreq); 383 subreq->transferred = subreq->len; 384 goto complete; 385 } 386 387 if (transferred_or_error == 0) { 388 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { 389 subreq->error = -ENODATA; 390 goto failed; 391 } 392 } else { 393 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); 394 } 395 396 __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); 397 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); 398 goto out; 399 400failed: 401 if (subreq->source == NETFS_READ_FROM_CACHE) { 402 netfs_stat(&netfs_n_rh_read_failed); 403 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); 404 } else { 405 netfs_stat(&netfs_n_rh_download_failed); 406 set_bit(NETFS_RREQ_FAILED, &rreq->flags); 407 rreq->error = subreq->error; 408 } 409 goto out; 410} 411EXPORT_SYMBOL(netfs_subreq_terminated); 412 413static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq, 414 loff_t i_size) 415{ 416 struct netfs_io_request *rreq = subreq->rreq; 417 struct netfs_cache_resources *cres = &rreq->cache_resources; 418 419 if (cres->ops) 420 return cres->ops->prepare_read(subreq, i_size); 421 if (subreq->start >= rreq->i_size) 422 return NETFS_FILL_WITH_ZEROES; 423 return NETFS_DOWNLOAD_FROM_SERVER; 424} 425 426/* 427 * Work out what sort of subrequest the next one will be. 428 */ 429static enum netfs_io_source 430netfs_rreq_prepare_read(struct netfs_io_request *rreq, 431 struct netfs_io_subrequest *subreq, 432 struct iov_iter *io_iter) 433{ 434 enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER; 435 struct netfs_inode *ictx = netfs_inode(rreq->inode); 436 size_t lsize; 437 438 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); 439 440 if (rreq->origin != NETFS_DIO_READ) { 441 source = netfs_cache_prepare_read(subreq, rreq->i_size); 442 if (source == NETFS_INVALID_READ) 443 goto out; 444 } 445 446 if (source == NETFS_DOWNLOAD_FROM_SERVER) { 447 /* Call out to the netfs to let it shrink the request to fit 448 * its own I/O sizes and boundaries. If it shinks it here, it 449 * will be called again to make simultaneous calls; if it wants 450 * to make serial calls, it can indicate a short read and then 451 * we will call it again. 452 */ 453 if (rreq->origin != NETFS_DIO_READ) { 454 if (subreq->start >= ictx->zero_point) { 455 source = NETFS_FILL_WITH_ZEROES; 456 goto set; 457 } 458 if (subreq->len > ictx->zero_point - subreq->start) 459 subreq->len = ictx->zero_point - subreq->start; 460 } 461 if (subreq->len > rreq->i_size - subreq->start) 462 subreq->len = rreq->i_size - subreq->start; 463 if (rreq->rsize && subreq->len > rreq->rsize) 464 subreq->len = rreq->rsize; 465 466 if (rreq->netfs_ops->clamp_length && 467 !rreq->netfs_ops->clamp_length(subreq)) { 468 source = NETFS_INVALID_READ; 469 goto out; 470 } 471 472 if (subreq->max_nr_segs) { 473 lsize = netfs_limit_iter(io_iter, 0, subreq->len, 474 subreq->max_nr_segs); 475 if (subreq->len > lsize) { 476 subreq->len = lsize; 477 trace_netfs_sreq(subreq, netfs_sreq_trace_limited); 478 } 479 } 480 } 481 482set: 483 if (subreq->len > rreq->len) 484 pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n", 485 rreq->debug_id, subreq->debug_index, 486 subreq->len, rreq->len); 487 488 if (WARN_ON(subreq->len == 0)) { 489 source = NETFS_INVALID_READ; 490 goto out; 491 } 492 493 subreq->source = source; 494 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 495 496 subreq->io_iter = *io_iter; 497 iov_iter_truncate(&subreq->io_iter, subreq->len); 498 iov_iter_advance(io_iter, subreq->len); 499out: 500 subreq->source = source; 501 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 502 return source; 503} 504 505/* 506 * Slice off a piece of a read request and submit an I/O request for it. 507 */ 508static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, 509 struct iov_iter *io_iter) 510{ 511 struct netfs_io_subrequest *subreq; 512 enum netfs_io_source source; 513 514 subreq = netfs_alloc_subrequest(rreq); 515 if (!subreq) 516 return false; 517 518 subreq->start = rreq->start + rreq->submitted; 519 subreq->len = io_iter->count; 520 521 _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted); 522 list_add_tail(&subreq->rreq_link, &rreq->subrequests); 523 524 /* Call out to the cache to find out what it can do with the remaining 525 * subset. It tells us in subreq->flags what it decided should be done 526 * and adjusts subreq->len down if the subset crosses a cache boundary. 527 * 528 * Then when we hand the subset, it can choose to take a subset of that 529 * (the starts must coincide), in which case, we go around the loop 530 * again and ask it to download the next piece. 531 */ 532 source = netfs_rreq_prepare_read(rreq, subreq, io_iter); 533 if (source == NETFS_INVALID_READ) 534 goto subreq_failed; 535 536 atomic_inc(&rreq->nr_outstanding); 537 538 rreq->submitted += subreq->len; 539 540 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 541 switch (source) { 542 case NETFS_FILL_WITH_ZEROES: 543 netfs_fill_with_zeroes(rreq, subreq); 544 break; 545 case NETFS_DOWNLOAD_FROM_SERVER: 546 netfs_read_from_server(rreq, subreq); 547 break; 548 case NETFS_READ_FROM_CACHE: 549 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE); 550 break; 551 default: 552 BUG(); 553 } 554 555 return true; 556 557subreq_failed: 558 rreq->error = subreq->error; 559 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed); 560 return false; 561} 562 563/* 564 * Begin the process of reading in a chunk of data, where that data may be 565 * stitched together from multiple sources, including multiple servers and the 566 * local cache. 567 */ 568int netfs_begin_read(struct netfs_io_request *rreq, bool sync) 569{ 570 struct iov_iter io_iter; 571 int ret; 572 573 _enter("R=%x %llx-%llx", 574 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); 575 576 if (rreq->len == 0) { 577 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id); 578 return -EIO; 579 } 580 581 if (rreq->origin == NETFS_DIO_READ) 582 inode_dio_begin(rreq->inode); 583 584 // TODO: Use bounce buffer if requested 585 rreq->io_iter = rreq->iter; 586 587 INIT_WORK(&rreq->work, netfs_rreq_work); 588 589 /* Chop the read into slices according to what the cache and the netfs 590 * want and submit each one. 591 */ 592 netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding); 593 atomic_set(&rreq->nr_outstanding, 1); 594 io_iter = rreq->io_iter; 595 do { 596 _debug("submit %llx + %llx >= %llx", 597 rreq->start, rreq->submitted, rreq->i_size); 598 if (rreq->origin == NETFS_DIO_READ && 599 rreq->start + rreq->submitted >= rreq->i_size) 600 break; 601 if (!netfs_rreq_submit_slice(rreq, &io_iter)) 602 break; 603 if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) && 604 test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags)) 605 break; 606 607 } while (rreq->submitted < rreq->len); 608 609 if (!rreq->submitted) { 610 netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit); 611 if (rreq->origin == NETFS_DIO_READ) 612 inode_dio_end(rreq->inode); 613 ret = 0; 614 goto out; 615 } 616 617 if (sync) { 618 /* Keep nr_outstanding incremented so that the ref always 619 * belongs to us, and the service code isn't punted off to a 620 * random thread pool to process. Note that this might start 621 * further work, such as writing to the cache. 622 */ 623 wait_var_event(&rreq->nr_outstanding, 624 atomic_read(&rreq->nr_outstanding) == 1); 625 if (atomic_dec_and_test(&rreq->nr_outstanding)) 626 netfs_rreq_assess(rreq, false); 627 628 trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip); 629 wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, 630 TASK_UNINTERRUPTIBLE); 631 632 ret = rreq->error; 633 if (ret == 0 && rreq->submitted < rreq->len && 634 rreq->origin != NETFS_DIO_READ) { 635 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); 636 ret = -EIO; 637 } 638 } else { 639 /* If we decrement nr_outstanding to 0, the ref belongs to us. */ 640 if (atomic_dec_and_test(&rreq->nr_outstanding)) 641 netfs_rreq_assess(rreq, false); 642 ret = -EIOCBQUEUED; 643 } 644 645out: 646 return ret; 647}