Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Network filesystem high-level (buffered) writeback.
3 *
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 *
8 * To support network filesystems with local caching, we manage a situation
9 * that can be envisioned like the following:
10 *
11 * +---+---+-----+-----+---+----------+
12 * Folios: | | | | | | |
13 * +---+---+-----+-----+---+----------+
14 *
15 * +------+------+ +----+----+
16 * Upload: | | |.....| | |
17 * (Stream 0) +------+------+ +----+----+
18 *
19 * +------+------+------+------+------+
20 * Cache: | | | | | |
21 * (Stream 1) +------+------+------+------+------+
22 *
23 * Where we have a sequence of folios of varying sizes that we need to overlay
24 * with multiple parallel streams of I/O requests, where the I/O requests in a
25 * stream may also be of various sizes (in cifs, for example, the sizes are
26 * negotiated with the server; in something like ceph, they may represent the
27 * sizes of storage objects).
28 *
29 * The sequence in each stream may contain gaps and noncontiguous subrequests
30 * may be glued together into single vectored write RPCs.
31 */
32
33#include <linux/export.h>
34#include <linux/fs.h>
35#include <linux/mm.h>
36#include <linux/pagemap.h>
37#include "internal.h"
38
39/*
40 * Kill all dirty folios in the event of an unrecoverable error, starting with
41 * a locked folio we've already obtained from writeback_iter().
42 */
43static void netfs_kill_dirty_pages(struct address_space *mapping,
44 struct writeback_control *wbc,
45 struct folio *folio)
46{
47 int error = 0;
48
49 do {
50 enum netfs_folio_trace why = netfs_folio_trace_kill;
51 struct netfs_group *group = NULL;
52 struct netfs_folio *finfo = NULL;
53 void *priv;
54
55 priv = folio_detach_private(folio);
56 if (priv) {
57 finfo = __netfs_folio_info(priv);
58 if (finfo) {
59 /* Kill folio from streaming write. */
60 group = finfo->netfs_group;
61 why = netfs_folio_trace_kill_s;
62 } else {
63 group = priv;
64 if (group == NETFS_FOLIO_COPY_TO_CACHE) {
65 /* Kill copy-to-cache folio */
66 why = netfs_folio_trace_kill_cc;
67 group = NULL;
68 } else {
69 /* Kill folio with group */
70 why = netfs_folio_trace_kill_g;
71 }
72 }
73 }
74
75 trace_netfs_folio(folio, why);
76
77 folio_start_writeback(folio);
78 folio_unlock(folio);
79 folio_end_writeback(folio);
80
81 netfs_put_group(group);
82 kfree(finfo);
83
84 } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
85}
86
87/*
88 * Create a write request and set it up appropriately for the origin type.
89 */
90struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
91 struct file *file,
92 loff_t start,
93 enum netfs_io_origin origin)
94{
95 struct netfs_io_request *wreq;
96 struct netfs_inode *ictx;
97 bool is_buffered = (origin == NETFS_WRITEBACK ||
98 origin == NETFS_WRITETHROUGH);
99
100 wreq = netfs_alloc_request(mapping, file, start, 0, origin);
101 if (IS_ERR(wreq))
102 return wreq;
103
104 _enter("R=%x", wreq->debug_id);
105
106 ictx = netfs_inode(wreq->inode);
107 if (is_buffered && netfs_is_cache_enabled(ictx))
108 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
109
110 wreq->contiguity = wreq->start;
111 wreq->cleaned_to = wreq->start;
112 INIT_WORK(&wreq->work, netfs_write_collection_worker);
113
114 wreq->io_streams[0].stream_nr = 0;
115 wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER;
116 wreq->io_streams[0].prepare_write = ictx->ops->prepare_write;
117 wreq->io_streams[0].issue_write = ictx->ops->issue_write;
118 wreq->io_streams[0].collected_to = start;
119 wreq->io_streams[0].transferred = LONG_MAX;
120
121 wreq->io_streams[1].stream_nr = 1;
122 wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE;
123 wreq->io_streams[1].collected_to = start;
124 wreq->io_streams[1].transferred = LONG_MAX;
125 if (fscache_resources_valid(&wreq->cache_resources)) {
126 wreq->io_streams[1].avail = true;
127 wreq->io_streams[1].active = true;
128 wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq;
129 wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write;
130 }
131
132 return wreq;
133}
134
135/**
136 * netfs_prepare_write_failed - Note write preparation failed
137 * @subreq: The subrequest to mark
138 *
139 * Mark a subrequest to note that preparation for write failed.
140 */
141void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq)
142{
143 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
144 trace_netfs_sreq(subreq, netfs_sreq_trace_prep_failed);
145}
146EXPORT_SYMBOL(netfs_prepare_write_failed);
147
148/*
149 * Prepare a write subrequest. We need to allocate a new subrequest
150 * if we don't have one.
151 */
152static void netfs_prepare_write(struct netfs_io_request *wreq,
153 struct netfs_io_stream *stream,
154 loff_t start)
155{
156 struct netfs_io_subrequest *subreq;
157
158 subreq = netfs_alloc_subrequest(wreq);
159 subreq->source = stream->source;
160 subreq->start = start;
161 subreq->max_len = ULONG_MAX;
162 subreq->max_nr_segs = INT_MAX;
163 subreq->stream_nr = stream->stream_nr;
164
165 _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
166
167 trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
168 refcount_read(&subreq->ref),
169 netfs_sreq_trace_new);
170
171 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
172
173 switch (stream->source) {
174 case NETFS_UPLOAD_TO_SERVER:
175 netfs_stat(&netfs_n_wh_upload);
176 subreq->max_len = wreq->wsize;
177 break;
178 case NETFS_WRITE_TO_CACHE:
179 netfs_stat(&netfs_n_wh_write);
180 break;
181 default:
182 WARN_ON_ONCE(1);
183 break;
184 }
185
186 if (stream->prepare_write)
187 stream->prepare_write(subreq);
188
189 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
190
191 /* We add to the end of the list whilst the collector may be walking
192 * the list. The collector only goes nextwards and uses the lock to
193 * remove entries off of the front.
194 */
195 spin_lock(&wreq->lock);
196 list_add_tail(&subreq->rreq_link, &stream->subrequests);
197 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
198 stream->front = subreq;
199 if (!stream->active) {
200 stream->collected_to = stream->front->start;
201 /* Write list pointers before active flag */
202 smp_store_release(&stream->active, true);
203 }
204 }
205
206 spin_unlock(&wreq->lock);
207
208 stream->construct = subreq;
209}
210
211/*
212 * Set the I/O iterator for the filesystem/cache to use and dispatch the I/O
213 * operation. The operation may be asynchronous and should call
214 * netfs_write_subrequest_terminated() when complete.
215 */
216static void netfs_do_issue_write(struct netfs_io_stream *stream,
217 struct netfs_io_subrequest *subreq)
218{
219 struct netfs_io_request *wreq = subreq->rreq;
220
221 _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
222
223 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
224 return netfs_write_subrequest_terminated(subreq, subreq->error, false);
225
226 // TODO: Use encrypted buffer
227 if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) {
228 subreq->io_iter = wreq->io_iter;
229 iov_iter_advance(&subreq->io_iter,
230 subreq->start + subreq->transferred - wreq->start);
231 iov_iter_truncate(&subreq->io_iter,
232 subreq->len - subreq->transferred);
233 } else {
234 iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages,
235 subreq->start + subreq->transferred,
236 subreq->len - subreq->transferred);
237 }
238
239 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
240 stream->issue_write(subreq);
241}
242
243void netfs_reissue_write(struct netfs_io_stream *stream,
244 struct netfs_io_subrequest *subreq)
245{
246 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
247 netfs_do_issue_write(stream, subreq);
248}
249
250static void netfs_issue_write(struct netfs_io_request *wreq,
251 struct netfs_io_stream *stream)
252{
253 struct netfs_io_subrequest *subreq = stream->construct;
254
255 if (!subreq)
256 return;
257 stream->construct = NULL;
258
259 if (subreq->start + subreq->len > wreq->start + wreq->submitted)
260 WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
261 netfs_do_issue_write(stream, subreq);
262}
263
264/*
265 * Add data to the write subrequest, dispatching each as we fill it up or if it
266 * is discontiguous with the previous. We only fill one part at a time so that
267 * we can avoid overrunning the credits obtained (cifs) and try to parallelise
268 * content-crypto preparation with network writes.
269 */
270int netfs_advance_write(struct netfs_io_request *wreq,
271 struct netfs_io_stream *stream,
272 loff_t start, size_t len, bool to_eof)
273{
274 struct netfs_io_subrequest *subreq = stream->construct;
275 size_t part;
276
277 if (!stream->avail) {
278 _leave("no write");
279 return len;
280 }
281
282 _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
283
284 if (subreq && start != subreq->start + subreq->len) {
285 netfs_issue_write(wreq, stream);
286 subreq = NULL;
287 }
288
289 if (!stream->construct)
290 netfs_prepare_write(wreq, stream, start);
291 subreq = stream->construct;
292
293 part = min(subreq->max_len - subreq->len, len);
294 _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
295 subreq->len += part;
296 subreq->nr_segs++;
297
298 if (subreq->len >= subreq->max_len ||
299 subreq->nr_segs >= subreq->max_nr_segs ||
300 to_eof) {
301 netfs_issue_write(wreq, stream);
302 subreq = NULL;
303 }
304
305 return part;
306}
307
308/*
309 * Write some of a pending folio data back to the server.
310 */
311static int netfs_write_folio(struct netfs_io_request *wreq,
312 struct writeback_control *wbc,
313 struct folio *folio)
314{
315 struct netfs_io_stream *upload = &wreq->io_streams[0];
316 struct netfs_io_stream *cache = &wreq->io_streams[1];
317 struct netfs_io_stream *stream;
318 struct netfs_group *fgroup; /* TODO: Use this with ceph */
319 struct netfs_folio *finfo;
320 size_t fsize = folio_size(folio), flen = fsize, foff = 0;
321 loff_t fpos = folio_pos(folio), i_size;
322 bool to_eof = false, streamw = false;
323 bool debug = false;
324
325 _enter("");
326
327 /* netfs_perform_write() may shift i_size around the page or from out
328 * of the page to beyond it, but cannot move i_size into or through the
329 * page since we have it locked.
330 */
331 i_size = i_size_read(wreq->inode);
332
333 if (fpos >= i_size) {
334 /* mmap beyond eof. */
335 _debug("beyond eof");
336 folio_start_writeback(folio);
337 folio_unlock(folio);
338 wreq->nr_group_rel += netfs_folio_written_back(folio);
339 netfs_put_group_many(wreq->group, wreq->nr_group_rel);
340 wreq->nr_group_rel = 0;
341 return 0;
342 }
343
344 if (fpos + fsize > wreq->i_size)
345 wreq->i_size = i_size;
346
347 fgroup = netfs_folio_group(folio);
348 finfo = netfs_folio_info(folio);
349 if (finfo) {
350 foff = finfo->dirty_offset;
351 flen = foff + finfo->dirty_len;
352 streamw = true;
353 }
354
355 if (wreq->origin == NETFS_WRITETHROUGH) {
356 to_eof = false;
357 if (flen > i_size - fpos)
358 flen = i_size - fpos;
359 } else if (flen > i_size - fpos) {
360 flen = i_size - fpos;
361 if (!streamw)
362 folio_zero_segment(folio, flen, fsize);
363 to_eof = true;
364 } else if (flen == i_size - fpos) {
365 to_eof = true;
366 }
367 flen -= foff;
368
369 _debug("folio %zx %zx %zx", foff, flen, fsize);
370
371 /* Deal with discontinuities in the stream of dirty pages. These can
372 * arise from a number of sources:
373 *
374 * (1) Intervening non-dirty pages from random-access writes, multiple
375 * flushers writing back different parts simultaneously and manual
376 * syncing.
377 *
378 * (2) Partially-written pages from write-streaming.
379 *
380 * (3) Pages that belong to a different write-back group (eg. Ceph
381 * snapshots).
382 *
383 * (4) Actually-clean pages that were marked for write to the cache
384 * when they were read. Note that these appear as a special
385 * write-back group.
386 */
387 if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
388 netfs_issue_write(wreq, upload);
389 } else if (fgroup != wreq->group) {
390 /* We can't write this page to the server yet. */
391 kdebug("wrong group");
392 folio_redirty_for_writepage(wbc, folio);
393 folio_unlock(folio);
394 netfs_issue_write(wreq, upload);
395 netfs_issue_write(wreq, cache);
396 return 0;
397 }
398
399 if (foff > 0)
400 netfs_issue_write(wreq, upload);
401 if (streamw)
402 netfs_issue_write(wreq, cache);
403
404 /* Flip the page to the writeback state and unlock. If we're called
405 * from write-through, then the page has already been put into the wb
406 * state.
407 */
408 if (wreq->origin == NETFS_WRITEBACK)
409 folio_start_writeback(folio);
410 folio_unlock(folio);
411
412 if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
413 if (!fscache_resources_valid(&wreq->cache_resources)) {
414 trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
415 netfs_issue_write(wreq, upload);
416 netfs_folio_written_back(folio);
417 return 0;
418 }
419 trace_netfs_folio(folio, netfs_folio_trace_store_copy);
420 } else if (!upload->construct) {
421 trace_netfs_folio(folio, netfs_folio_trace_store);
422 } else {
423 trace_netfs_folio(folio, netfs_folio_trace_store_plus);
424 }
425
426 /* Move the submission point forward to allow for write-streaming data
427 * not starting at the front of the page. We don't do write-streaming
428 * with the cache as the cache requires DIO alignment.
429 *
430 * Also skip uploading for data that's been read and just needs copying
431 * to the cache.
432 */
433 for (int s = 0; s < NR_IO_STREAMS; s++) {
434 stream = &wreq->io_streams[s];
435 stream->submit_max_len = fsize;
436 stream->submit_off = foff;
437 stream->submit_len = flen;
438 if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
439 (stream->source == NETFS_UPLOAD_TO_SERVER &&
440 fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
441 stream->submit_off = UINT_MAX;
442 stream->submit_len = 0;
443 stream->submit_max_len = 0;
444 }
445 }
446
447 /* Attach the folio to one or more subrequests. For a big folio, we
448 * could end up with thousands of subrequests if the wsize is small -
449 * but we might need to wait during the creation of subrequests for
450 * network resources (eg. SMB credits).
451 */
452 for (;;) {
453 ssize_t part;
454 size_t lowest_off = ULONG_MAX;
455 int choose_s = -1;
456
457 /* Always add to the lowest-submitted stream first. */
458 for (int s = 0; s < NR_IO_STREAMS; s++) {
459 stream = &wreq->io_streams[s];
460 if (stream->submit_len > 0 &&
461 stream->submit_off < lowest_off) {
462 lowest_off = stream->submit_off;
463 choose_s = s;
464 }
465 }
466
467 if (choose_s < 0)
468 break;
469 stream = &wreq->io_streams[choose_s];
470
471 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
472 stream->submit_len, to_eof);
473 atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
474 stream->submit_off += part;
475 stream->submit_max_len -= part;
476 if (part > stream->submit_len)
477 stream->submit_len = 0;
478 else
479 stream->submit_len -= part;
480 if (part > 0)
481 debug = true;
482 }
483
484 atomic64_set(&wreq->issued_to, fpos + fsize);
485
486 if (!debug)
487 kdebug("R=%x: No submit", wreq->debug_id);
488
489 if (foff + flen < fsize)
490 for (int s = 0; s < NR_IO_STREAMS; s++)
491 netfs_issue_write(wreq, &wreq->io_streams[s]);
492
493 _leave(" = 0");
494 return 0;
495}
496
497/*
498 * Write some of the pending data back to the server
499 */
500int netfs_writepages(struct address_space *mapping,
501 struct writeback_control *wbc)
502{
503 struct netfs_inode *ictx = netfs_inode(mapping->host);
504 struct netfs_io_request *wreq = NULL;
505 struct folio *folio;
506 int error = 0;
507
508 if (wbc->sync_mode == WB_SYNC_ALL)
509 mutex_lock(&ictx->wb_lock);
510 else if (!mutex_trylock(&ictx->wb_lock))
511 return 0;
512
513 /* Need the first folio to be able to set up the op. */
514 folio = writeback_iter(mapping, wbc, NULL, &error);
515 if (!folio)
516 goto out;
517
518 wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK);
519 if (IS_ERR(wreq)) {
520 error = PTR_ERR(wreq);
521 goto couldnt_start;
522 }
523
524 trace_netfs_write(wreq, netfs_write_trace_writeback);
525 netfs_stat(&netfs_n_wh_writepages);
526
527 do {
528 _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
529
530 /* It appears we don't have to handle cyclic writeback wrapping. */
531 WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
532
533 if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE &&
534 unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) {
535 set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
536 wreq->netfs_ops->begin_writeback(wreq);
537 }
538
539 error = netfs_write_folio(wreq, wbc, folio);
540 if (error < 0)
541 break;
542 } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
543
544 for (int s = 0; s < NR_IO_STREAMS; s++)
545 netfs_issue_write(wreq, &wreq->io_streams[s]);
546 smp_wmb(); /* Write lists before ALL_QUEUED. */
547 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
548
549 mutex_unlock(&ictx->wb_lock);
550
551 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
552 _leave(" = %d", error);
553 return error;
554
555couldnt_start:
556 netfs_kill_dirty_pages(mapping, wbc, folio);
557out:
558 mutex_unlock(&ictx->wb_lock);
559 _leave(" = %d", error);
560 return error;
561}
562EXPORT_SYMBOL(netfs_writepages);
563
564/*
565 * Begin a write operation for writing through the pagecache.
566 */
567struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
568{
569 struct netfs_io_request *wreq = NULL;
570 struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
571
572 mutex_lock(&ictx->wb_lock);
573
574 wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp,
575 iocb->ki_pos, NETFS_WRITETHROUGH);
576 if (IS_ERR(wreq)) {
577 mutex_unlock(&ictx->wb_lock);
578 return wreq;
579 }
580
581 wreq->io_streams[0].avail = true;
582 trace_netfs_write(wreq, netfs_write_trace_writethrough);
583 return wreq;
584}
585
586/*
587 * Advance the state of the write operation used when writing through the
588 * pagecache. Data has been copied into the pagecache that we need to append
589 * to the request. If we've added more than wsize then we need to create a new
590 * subrequest.
591 */
592int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
593 struct folio *folio, size_t copied, bool to_page_end,
594 struct folio **writethrough_cache)
595{
596 _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
597 wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
598
599 if (!*writethrough_cache) {
600 if (folio_test_dirty(folio))
601 /* Sigh. mmap. */
602 folio_clear_dirty_for_io(folio);
603
604 /* We can make multiple writes to the folio... */
605 folio_start_writeback(folio);
606 if (wreq->len == 0)
607 trace_netfs_folio(folio, netfs_folio_trace_wthru);
608 else
609 trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
610 *writethrough_cache = folio;
611 }
612
613 wreq->len += copied;
614 if (!to_page_end)
615 return 0;
616
617 *writethrough_cache = NULL;
618 return netfs_write_folio(wreq, wbc, folio);
619}
620
621/*
622 * End a write operation used when writing through the pagecache.
623 */
624int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
625 struct folio *writethrough_cache)
626{
627 struct netfs_inode *ictx = netfs_inode(wreq->inode);
628 int ret;
629
630 _enter("R=%x", wreq->debug_id);
631
632 if (writethrough_cache)
633 netfs_write_folio(wreq, wbc, writethrough_cache);
634
635 netfs_issue_write(wreq, &wreq->io_streams[0]);
636 netfs_issue_write(wreq, &wreq->io_streams[1]);
637 smp_wmb(); /* Write lists before ALL_QUEUED. */
638 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
639
640 mutex_unlock(&ictx->wb_lock);
641
642 if (wreq->iocb) {
643 ret = -EIOCBQUEUED;
644 } else {
645 wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
646 ret = wreq->error;
647 }
648 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
649 return ret;
650}
651
652/*
653 * Write data to the server without going through the pagecache and without
654 * writing it to the local cache.
655 */
656int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
657{
658 struct netfs_io_stream *upload = &wreq->io_streams[0];
659 ssize_t part;
660 loff_t start = wreq->start;
661 int error = 0;
662
663 _enter("%zx", len);
664
665 if (wreq->origin == NETFS_DIO_WRITE)
666 inode_dio_begin(wreq->inode);
667
668 while (len) {
669 // TODO: Prepare content encryption
670
671 _debug("unbuffered %zx", len);
672 part = netfs_advance_write(wreq, upload, start, len, false);
673 start += part;
674 len -= part;
675 if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
676 trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
677 wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
678 }
679 if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
680 break;
681 }
682
683 netfs_issue_write(wreq, upload);
684
685 smp_wmb(); /* Write lists before ALL_QUEUED. */
686 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
687 if (list_empty(&upload->subrequests))
688 netfs_wake_write_collector(wreq, false);
689
690 _leave(" = %d", error);
691 return error;
692}