Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Network filesystem high-level read support.
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/export.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/slab.h>
14#include <linux/uio.h>
15#include <linux/sched/mm.h>
16#include <linux/task_io_accounting_ops.h>
17#include "internal.h"
18
19/*
20 * Clear the unread part of an I/O request.
21 */
22static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
23{
24 iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
25}
26
27static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
28 bool was_async)
29{
30 struct netfs_io_subrequest *subreq = priv;
31
32 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
33}
34
35/*
36 * Issue a read against the cache.
37 * - Eats the caller's ref on subreq.
38 */
39static void netfs_read_from_cache(struct netfs_io_request *rreq,
40 struct netfs_io_subrequest *subreq,
41 enum netfs_read_from_hole read_hole)
42{
43 struct netfs_cache_resources *cres = &rreq->cache_resources;
44
45 netfs_stat(&netfs_n_rh_read);
46 cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole,
47 netfs_cache_read_terminated, subreq);
48}
49
50/*
51 * Fill a subrequest region with zeroes.
52 */
53static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
54 struct netfs_io_subrequest *subreq)
55{
56 netfs_stat(&netfs_n_rh_zero);
57 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
58 netfs_subreq_terminated(subreq, 0, false);
59}
60
61/*
62 * Ask the netfs to issue a read request to the server for us.
63 *
64 * The netfs is expected to read from subreq->pos + subreq->transferred to
65 * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
66 * buffer prior to the transferred point as it might clobber dirty data
67 * obtained from the cache.
68 *
69 * Alternatively, the netfs is allowed to indicate one of two things:
70 *
71 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
72 * make progress.
73 *
74 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
75 * cleared.
76 */
77static void netfs_read_from_server(struct netfs_io_request *rreq,
78 struct netfs_io_subrequest *subreq)
79{
80 netfs_stat(&netfs_n_rh_download);
81
82 if (rreq->origin != NETFS_DIO_READ &&
83 iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
84 pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n",
85 rreq->debug_id, subreq->debug_index,
86 iov_iter_count(&subreq->io_iter), subreq->len,
87 subreq->transferred, subreq->flags);
88 rreq->netfs_ops->issue_read(subreq);
89}
90
91/*
92 * Release those waiting.
93 */
94static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
95{
96 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
97 netfs_clear_subrequests(rreq, was_async);
98 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
99}
100
101/*
102 * [DEPRECATED] Deal with the completion of writing the data to the cache. We
103 * have to clear the PG_fscache bits on the folios involved and release the
104 * caller's ref.
105 *
106 * May be called in softirq mode and we inherit a ref from the caller.
107 */
108static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
109 bool was_async)
110{
111 struct netfs_io_subrequest *subreq;
112 struct folio *folio;
113 pgoff_t unlocked = 0;
114 bool have_unlocked = false;
115
116 rcu_read_lock();
117
118 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
119 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
120
121 xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
122 if (xas_retry(&xas, folio))
123 continue;
124
125 /* We might have multiple writes from the same huge
126 * folio, but we mustn't unlock a folio more than once.
127 */
128 if (have_unlocked && folio->index <= unlocked)
129 continue;
130 unlocked = folio_next_index(folio) - 1;
131 trace_netfs_folio(folio, netfs_folio_trace_end_copy);
132 folio_end_private_2(folio);
133 have_unlocked = true;
134 }
135 }
136
137 rcu_read_unlock();
138 netfs_rreq_completed(rreq, was_async);
139}
140
141static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
142 bool was_async) /* [DEPRECATED] */
143{
144 struct netfs_io_subrequest *subreq = priv;
145 struct netfs_io_request *rreq = subreq->rreq;
146
147 if (IS_ERR_VALUE(transferred_or_error)) {
148 netfs_stat(&netfs_n_rh_write_failed);
149 trace_netfs_failure(rreq, subreq, transferred_or_error,
150 netfs_fail_copy_to_cache);
151 } else {
152 netfs_stat(&netfs_n_rh_write_done);
153 }
154
155 trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
156
157 /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
158 if (atomic_dec_and_test(&rreq->nr_copy_ops))
159 netfs_rreq_unmark_after_write(rreq, was_async);
160
161 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
162}
163
164/*
165 * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
166 * from the caller.
167 */
168static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
169{
170 struct netfs_cache_resources *cres = &rreq->cache_resources;
171 struct netfs_io_subrequest *subreq, *next, *p;
172 struct iov_iter iter;
173 int ret;
174
175 trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
176
177 /* We don't want terminating writes trying to wake us up whilst we're
178 * still going through the list.
179 */
180 atomic_inc(&rreq->nr_copy_ops);
181
182 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
183 if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
184 list_del_init(&subreq->rreq_link);
185 netfs_put_subrequest(subreq, false,
186 netfs_sreq_trace_put_no_copy);
187 }
188 }
189
190 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
191 /* Amalgamate adjacent writes */
192 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
193 next = list_next_entry(subreq, rreq_link);
194 if (next->start != subreq->start + subreq->len)
195 break;
196 subreq->len += next->len;
197 list_del_init(&next->rreq_link);
198 netfs_put_subrequest(next, false,
199 netfs_sreq_trace_put_merged);
200 }
201
202 ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
203 subreq->len, rreq->i_size, true);
204 if (ret < 0) {
205 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
206 trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
207 continue;
208 }
209
210 iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
211 subreq->start, subreq->len);
212
213 atomic_inc(&rreq->nr_copy_ops);
214 netfs_stat(&netfs_n_rh_write);
215 netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
216 trace_netfs_sreq(subreq, netfs_sreq_trace_write);
217 cres->ops->write(cres, subreq->start, &iter,
218 netfs_rreq_copy_terminated, subreq);
219 }
220
221 /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
222 if (atomic_dec_and_test(&rreq->nr_copy_ops))
223 netfs_rreq_unmark_after_write(rreq, false);
224}
225
226static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
227{
228 struct netfs_io_request *rreq =
229 container_of(work, struct netfs_io_request, work);
230
231 netfs_rreq_do_write_to_cache(rreq);
232}
233
234static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
235{
236 rreq->work.func = netfs_rreq_write_to_cache_work;
237 if (!queue_work(system_unbound_wq, &rreq->work))
238 BUG();
239}
240
241/*
242 * Handle a short read.
243 */
244static void netfs_rreq_short_read(struct netfs_io_request *rreq,
245 struct netfs_io_subrequest *subreq)
246{
247 __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
248 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
249
250 netfs_stat(&netfs_n_rh_short_read);
251 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
252
253 netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
254 atomic_inc(&rreq->nr_outstanding);
255 if (subreq->source == NETFS_READ_FROM_CACHE)
256 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
257 else
258 netfs_read_from_server(rreq, subreq);
259}
260
261/*
262 * Reset the subrequest iterator prior to resubmission.
263 */
264static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
265 struct netfs_io_subrequest *subreq)
266{
267 size_t remaining = subreq->len - subreq->transferred;
268 size_t count = iov_iter_count(&subreq->io_iter);
269
270 if (count == remaining)
271 return;
272
273 _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x",
274 rreq->debug_id, subreq->debug_index,
275 iov_iter_count(&subreq->io_iter), subreq->transferred,
276 subreq->len, rreq->i_size,
277 subreq->io_iter.iter_type);
278
279 if (count < remaining)
280 iov_iter_revert(&subreq->io_iter, remaining - count);
281 else
282 iov_iter_advance(&subreq->io_iter, count - remaining);
283}
284
285/*
286 * Resubmit any short or failed operations. Returns true if we got the rreq
287 * ref back.
288 */
289static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
290{
291 struct netfs_io_subrequest *subreq;
292
293 WARN_ON(in_interrupt());
294
295 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
296
297 /* We don't want terminating submissions trying to wake us up whilst
298 * we're still going through the list.
299 */
300 atomic_inc(&rreq->nr_outstanding);
301
302 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
303 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
304 if (subreq->error) {
305 if (subreq->source != NETFS_READ_FROM_CACHE)
306 break;
307 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
308 subreq->error = 0;
309 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
310 netfs_stat(&netfs_n_rh_download_instead);
311 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
312 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
313 atomic_inc(&rreq->nr_outstanding);
314 netfs_reset_subreq_iter(rreq, subreq);
315 netfs_read_from_server(rreq, subreq);
316 } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
317 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
318 netfs_reset_subreq_iter(rreq, subreq);
319 netfs_rreq_short_read(rreq, subreq);
320 }
321 }
322
323 /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
324 if (atomic_dec_and_test(&rreq->nr_outstanding))
325 return true;
326
327 wake_up_var(&rreq->nr_outstanding);
328 return false;
329}
330
331/*
332 * Check to see if the data read is still valid.
333 */
334static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
335{
336 struct netfs_io_subrequest *subreq;
337
338 if (!rreq->netfs_ops->is_still_valid ||
339 rreq->netfs_ops->is_still_valid(rreq))
340 return;
341
342 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
343 if (subreq->source == NETFS_READ_FROM_CACHE) {
344 subreq->error = -ESTALE;
345 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
346 }
347 }
348}
349
350/*
351 * Determine how much we can admit to having read from a DIO read.
352 */
353static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
354{
355 struct netfs_io_subrequest *subreq;
356 unsigned int i;
357 size_t transferred = 0;
358
359 for (i = 0; i < rreq->direct_bv_count; i++) {
360 flush_dcache_page(rreq->direct_bv[i].bv_page);
361 // TODO: cifs marks pages in the destination buffer
362 // dirty under some circumstances after a read. Do we
363 // need to do that too?
364 set_page_dirty(rreq->direct_bv[i].bv_page);
365 }
366
367 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
368 if (subreq->error || subreq->transferred == 0)
369 break;
370 transferred += subreq->transferred;
371 if (subreq->transferred < subreq->len ||
372 test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
373 break;
374 }
375
376 for (i = 0; i < rreq->direct_bv_count; i++)
377 flush_dcache_page(rreq->direct_bv[i].bv_page);
378
379 rreq->transferred = transferred;
380 task_io_account_read(transferred);
381
382 if (rreq->iocb) {
383 rreq->iocb->ki_pos += transferred;
384 if (rreq->iocb->ki_complete)
385 rreq->iocb->ki_complete(
386 rreq->iocb, rreq->error ? rreq->error : transferred);
387 }
388 if (rreq->netfs_ops->done)
389 rreq->netfs_ops->done(rreq);
390 inode_dio_end(rreq->inode);
391}
392
393/*
394 * Assess the state of a read request and decide what to do next.
395 *
396 * Note that we could be in an ordinary kernel thread, on a workqueue or in
397 * softirq context at this point. We inherit a ref from the caller.
398 */
399static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
400{
401 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
402
403again:
404 netfs_rreq_is_still_valid(rreq);
405
406 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
407 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
408 if (netfs_rreq_perform_resubmissions(rreq))
409 goto again;
410 return;
411 }
412
413 if (rreq->origin != NETFS_DIO_READ)
414 netfs_rreq_unlock_folios(rreq);
415 else
416 netfs_rreq_assess_dio(rreq);
417
418 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
419 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
420 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
421
422 if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
423 test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
424 return netfs_rreq_write_to_cache(rreq);
425
426 netfs_rreq_completed(rreq, was_async);
427}
428
429static void netfs_rreq_work(struct work_struct *work)
430{
431 struct netfs_io_request *rreq =
432 container_of(work, struct netfs_io_request, work);
433 netfs_rreq_assess(rreq, false);
434}
435
436/*
437 * Handle the completion of all outstanding I/O operations on a read request.
438 * We inherit a ref from the caller.
439 */
440static void netfs_rreq_terminated(struct netfs_io_request *rreq,
441 bool was_async)
442{
443 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
444 was_async) {
445 if (!queue_work(system_unbound_wq, &rreq->work))
446 BUG();
447 } else {
448 netfs_rreq_assess(rreq, was_async);
449 }
450}
451
452/**
453 * netfs_subreq_terminated - Note the termination of an I/O operation.
454 * @subreq: The I/O request that has terminated.
455 * @transferred_or_error: The amount of data transferred or an error code.
456 * @was_async: The termination was asynchronous
457 *
458 * This tells the read helper that a contributory I/O operation has terminated,
459 * one way or another, and that it should integrate the results.
460 *
461 * The caller indicates in @transferred_or_error the outcome of the operation,
462 * supplying a positive value to indicate the number of bytes transferred, 0 to
463 * indicate a failure to transfer anything that should be retried or a negative
464 * error code. The helper will look after reissuing I/O operations as
465 * appropriate and writing downloaded data to the cache.
466 *
467 * If @was_async is true, the caller might be running in softirq or interrupt
468 * context and we can't sleep.
469 */
470void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
471 ssize_t transferred_or_error,
472 bool was_async)
473{
474 struct netfs_io_request *rreq = subreq->rreq;
475 int u;
476
477 _enter("R=%x[%x]{%llx,%lx},%zd",
478 rreq->debug_id, subreq->debug_index,
479 subreq->start, subreq->flags, transferred_or_error);
480
481 switch (subreq->source) {
482 case NETFS_READ_FROM_CACHE:
483 netfs_stat(&netfs_n_rh_read_done);
484 break;
485 case NETFS_DOWNLOAD_FROM_SERVER:
486 netfs_stat(&netfs_n_rh_download_done);
487 break;
488 default:
489 break;
490 }
491
492 if (IS_ERR_VALUE(transferred_or_error)) {
493 subreq->error = transferred_or_error;
494 trace_netfs_failure(rreq, subreq, transferred_or_error,
495 netfs_fail_read);
496 goto failed;
497 }
498
499 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
500 "Subreq overread: R%x[%x] %zd > %zu - %zu",
501 rreq->debug_id, subreq->debug_index,
502 transferred_or_error, subreq->len, subreq->transferred))
503 transferred_or_error = subreq->len - subreq->transferred;
504
505 subreq->error = 0;
506 subreq->transferred += transferred_or_error;
507 if (subreq->transferred < subreq->len &&
508 !test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
509 goto incomplete;
510
511complete:
512 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
513 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
514 set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
515
516out:
517 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
518
519 /* If we decrement nr_outstanding to 0, the ref belongs to us. */
520 u = atomic_dec_return(&rreq->nr_outstanding);
521 if (u == 0)
522 netfs_rreq_terminated(rreq, was_async);
523 else if (u == 1)
524 wake_up_var(&rreq->nr_outstanding);
525
526 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
527 return;
528
529incomplete:
530 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
531 netfs_clear_unread(subreq);
532 subreq->transferred = subreq->len;
533 goto complete;
534 }
535
536 if (transferred_or_error == 0) {
537 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
538 if (rreq->origin != NETFS_DIO_READ)
539 subreq->error = -ENODATA;
540 goto failed;
541 }
542 } else {
543 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
544 }
545
546 __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
547 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
548 goto out;
549
550failed:
551 if (subreq->source == NETFS_READ_FROM_CACHE) {
552 netfs_stat(&netfs_n_rh_read_failed);
553 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
554 } else {
555 netfs_stat(&netfs_n_rh_download_failed);
556 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
557 rreq->error = subreq->error;
558 }
559 goto out;
560}
561EXPORT_SYMBOL(netfs_subreq_terminated);
562
563static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
564 loff_t i_size)
565{
566 struct netfs_io_request *rreq = subreq->rreq;
567 struct netfs_cache_resources *cres = &rreq->cache_resources;
568
569 if (cres->ops)
570 return cres->ops->prepare_read(subreq, i_size);
571 if (subreq->start >= rreq->i_size)
572 return NETFS_FILL_WITH_ZEROES;
573 return NETFS_DOWNLOAD_FROM_SERVER;
574}
575
576/*
577 * Work out what sort of subrequest the next one will be.
578 */
579static enum netfs_io_source
580netfs_rreq_prepare_read(struct netfs_io_request *rreq,
581 struct netfs_io_subrequest *subreq,
582 struct iov_iter *io_iter)
583{
584 enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
585 struct netfs_inode *ictx = netfs_inode(rreq->inode);
586 size_t lsize;
587
588 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
589
590 if (rreq->origin != NETFS_DIO_READ) {
591 source = netfs_cache_prepare_read(subreq, rreq->i_size);
592 if (source == NETFS_INVALID_READ)
593 goto out;
594 }
595
596 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
597 /* Call out to the netfs to let it shrink the request to fit
598 * its own I/O sizes and boundaries. If it shinks it here, it
599 * will be called again to make simultaneous calls; if it wants
600 * to make serial calls, it can indicate a short read and then
601 * we will call it again.
602 */
603 if (rreq->origin != NETFS_DIO_READ) {
604 if (subreq->start >= ictx->zero_point) {
605 source = NETFS_FILL_WITH_ZEROES;
606 goto set;
607 }
608 if (subreq->len > ictx->zero_point - subreq->start)
609 subreq->len = ictx->zero_point - subreq->start;
610
611 /* We limit buffered reads to the EOF, but let the
612 * server deal with larger-than-EOF DIO/unbuffered
613 * reads.
614 */
615 if (subreq->len > rreq->i_size - subreq->start)
616 subreq->len = rreq->i_size - subreq->start;
617 }
618 if (rreq->rsize && subreq->len > rreq->rsize)
619 subreq->len = rreq->rsize;
620
621 if (rreq->netfs_ops->clamp_length &&
622 !rreq->netfs_ops->clamp_length(subreq)) {
623 source = NETFS_INVALID_READ;
624 goto out;
625 }
626
627 if (subreq->max_nr_segs) {
628 lsize = netfs_limit_iter(io_iter, 0, subreq->len,
629 subreq->max_nr_segs);
630 if (subreq->len > lsize) {
631 subreq->len = lsize;
632 trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
633 }
634 }
635 }
636
637set:
638 if (subreq->len > rreq->len)
639 pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
640 rreq->debug_id, subreq->debug_index,
641 subreq->len, rreq->len);
642
643 if (WARN_ON(subreq->len == 0)) {
644 source = NETFS_INVALID_READ;
645 goto out;
646 }
647
648 subreq->source = source;
649 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
650
651 subreq->io_iter = *io_iter;
652 iov_iter_truncate(&subreq->io_iter, subreq->len);
653 iov_iter_advance(io_iter, subreq->len);
654out:
655 subreq->source = source;
656 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
657 return source;
658}
659
660/*
661 * Slice off a piece of a read request and submit an I/O request for it.
662 */
663static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
664 struct iov_iter *io_iter)
665{
666 struct netfs_io_subrequest *subreq;
667 enum netfs_io_source source;
668
669 subreq = netfs_alloc_subrequest(rreq);
670 if (!subreq)
671 return false;
672
673 subreq->start = rreq->start + rreq->submitted;
674 subreq->len = io_iter->count;
675
676 _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
677 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
678
679 /* Call out to the cache to find out what it can do with the remaining
680 * subset. It tells us in subreq->flags what it decided should be done
681 * and adjusts subreq->len down if the subset crosses a cache boundary.
682 *
683 * Then when we hand the subset, it can choose to take a subset of that
684 * (the starts must coincide), in which case, we go around the loop
685 * again and ask it to download the next piece.
686 */
687 source = netfs_rreq_prepare_read(rreq, subreq, io_iter);
688 if (source == NETFS_INVALID_READ)
689 goto subreq_failed;
690
691 atomic_inc(&rreq->nr_outstanding);
692
693 rreq->submitted += subreq->len;
694
695 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
696 switch (source) {
697 case NETFS_FILL_WITH_ZEROES:
698 netfs_fill_with_zeroes(rreq, subreq);
699 break;
700 case NETFS_DOWNLOAD_FROM_SERVER:
701 netfs_read_from_server(rreq, subreq);
702 break;
703 case NETFS_READ_FROM_CACHE:
704 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
705 break;
706 default:
707 BUG();
708 }
709
710 return true;
711
712subreq_failed:
713 rreq->error = subreq->error;
714 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
715 return false;
716}
717
718/*
719 * Begin the process of reading in a chunk of data, where that data may be
720 * stitched together from multiple sources, including multiple servers and the
721 * local cache.
722 */
723int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
724{
725 struct iov_iter io_iter;
726 int ret;
727
728 _enter("R=%x %llx-%llx",
729 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
730
731 if (rreq->len == 0) {
732 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
733 return -EIO;
734 }
735
736 if (rreq->origin == NETFS_DIO_READ)
737 inode_dio_begin(rreq->inode);
738
739 // TODO: Use bounce buffer if requested
740 rreq->io_iter = rreq->iter;
741
742 INIT_WORK(&rreq->work, netfs_rreq_work);
743
744 /* Chop the read into slices according to what the cache and the netfs
745 * want and submit each one.
746 */
747 netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
748 atomic_set(&rreq->nr_outstanding, 1);
749 io_iter = rreq->io_iter;
750 do {
751 _debug("submit %llx + %llx >= %llx",
752 rreq->start, rreq->submitted, rreq->i_size);
753 if (!netfs_rreq_submit_slice(rreq, &io_iter))
754 break;
755 if (test_bit(NETFS_SREQ_NO_PROGRESS, &rreq->flags))
756 break;
757 if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
758 test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
759 break;
760
761 } while (rreq->submitted < rreq->len);
762
763 if (!rreq->submitted) {
764 netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
765 if (rreq->origin == NETFS_DIO_READ)
766 inode_dio_end(rreq->inode);
767 ret = 0;
768 goto out;
769 }
770
771 if (sync) {
772 /* Keep nr_outstanding incremented so that the ref always
773 * belongs to us, and the service code isn't punted off to a
774 * random thread pool to process. Note that this might start
775 * further work, such as writing to the cache.
776 */
777 wait_var_event(&rreq->nr_outstanding,
778 atomic_read(&rreq->nr_outstanding) == 1);
779 if (atomic_dec_and_test(&rreq->nr_outstanding))
780 netfs_rreq_assess(rreq, false);
781
782 trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
783 wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
784 TASK_UNINTERRUPTIBLE);
785
786 ret = rreq->error;
787 if (ret == 0) {
788 if (rreq->origin == NETFS_DIO_READ) {
789 ret = rreq->transferred;
790 } else if (rreq->submitted < rreq->len) {
791 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
792 ret = -EIO;
793 }
794 }
795 } else {
796 /* If we decrement nr_outstanding to 0, the ref belongs to us. */
797 if (atomic_dec_and_test(&rreq->nr_outstanding))
798 netfs_rreq_assess(rreq, false);
799 ret = -EIOCBQUEUED;
800 }
801
802out:
803 return ret;
804}