Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/backing-dev.h>
9#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include <linux/netfs.h>
15#include <linux/fscache.h>
16#include "internal.h"
17
18/*
19 * mark a page as having been made dirty and thus needing writeback
20 */
21int afs_set_page_dirty(struct page *page)
22{
23 _enter("");
24 return __set_page_dirty_nobuffers(page);
25}
26
27/*
28 * prepare to perform part of a write to a page
29 */
30int afs_write_begin(struct file *file, struct address_space *mapping,
31 loff_t pos, unsigned len, unsigned flags,
32 struct page **_page, void **fsdata)
33{
34 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
35 struct page *page;
36 unsigned long priv;
37 unsigned f, from;
38 unsigned t, to;
39 pgoff_t index;
40 int ret;
41
42 _enter("{%llx:%llu},%llx,%x",
43 vnode->fid.vid, vnode->fid.vnode, pos, len);
44
45 /* Prefetch area to be written into the cache if we're caching this
46 * file. We need to do this before we get a lock on the page in case
47 * there's more than one writer competing for the same cache block.
48 */
49 ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
50 &afs_req_ops, NULL);
51 if (ret < 0)
52 return ret;
53
54 index = page->index;
55 from = pos - index * PAGE_SIZE;
56 to = from + len;
57
58try_again:
59 /* See if this page is already partially written in a way that we can
60 * merge the new write with.
61 */
62 if (PagePrivate(page)) {
63 priv = page_private(page);
64 f = afs_page_dirty_from(page, priv);
65 t = afs_page_dirty_to(page, priv);
66 ASSERTCMP(f, <=, t);
67
68 if (PageWriteback(page)) {
69 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
70 goto flush_conflicting_write;
71 }
72 /* If the file is being filled locally, allow inter-write
73 * spaces to be merged into writes. If it's not, only write
74 * back what the user gives us.
75 */
76 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
77 (to < f || from > t))
78 goto flush_conflicting_write;
79 }
80
81 *_page = page;
82 _leave(" = 0");
83 return 0;
84
85 /* The previous write and this write aren't adjacent or overlapping, so
86 * flush the page out.
87 */
88flush_conflicting_write:
89 _debug("flush conflict");
90 ret = write_one_page(page);
91 if (ret < 0)
92 goto error;
93
94 ret = lock_page_killable(page);
95 if (ret < 0)
96 goto error;
97 goto try_again;
98
99error:
100 put_page(page);
101 _leave(" = %d", ret);
102 return ret;
103}
104
105/*
106 * finalise part of a write to a page
107 */
108int afs_write_end(struct file *file, struct address_space *mapping,
109 loff_t pos, unsigned len, unsigned copied,
110 struct page *page, void *fsdata)
111{
112 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
113 unsigned long priv;
114 unsigned int f, from = pos & (thp_size(page) - 1);
115 unsigned int t, to = from + copied;
116 loff_t i_size, maybe_i_size;
117
118 _enter("{%llx:%llu},{%lx}",
119 vnode->fid.vid, vnode->fid.vnode, page->index);
120
121 if (copied == 0)
122 goto out;
123
124 maybe_i_size = pos + copied;
125
126 i_size = i_size_read(&vnode->vfs_inode);
127 if (maybe_i_size > i_size) {
128 write_seqlock(&vnode->cb_lock);
129 i_size = i_size_read(&vnode->vfs_inode);
130 if (maybe_i_size > i_size)
131 i_size_write(&vnode->vfs_inode, maybe_i_size);
132 write_sequnlock(&vnode->cb_lock);
133 }
134
135 ASSERT(PageUptodate(page));
136
137 if (PagePrivate(page)) {
138 priv = page_private(page);
139 f = afs_page_dirty_from(page, priv);
140 t = afs_page_dirty_to(page, priv);
141 if (from < f)
142 f = from;
143 if (to > t)
144 t = to;
145 priv = afs_page_dirty(page, f, t);
146 set_page_private(page, priv);
147 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
148 } else {
149 priv = afs_page_dirty(page, from, to);
150 attach_page_private(page, (void *)priv);
151 trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
152 }
153
154 if (set_page_dirty(page))
155 _debug("dirtied %lx", page->index);
156
157out:
158 unlock_page(page);
159 put_page(page);
160 return copied;
161}
162
163/*
164 * kill all the pages in the given range
165 */
166static void afs_kill_pages(struct address_space *mapping,
167 loff_t start, loff_t len)
168{
169 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
170 struct pagevec pv;
171 unsigned int loop, psize;
172
173 _enter("{%llx:%llu},%llx @%llx",
174 vnode->fid.vid, vnode->fid.vnode, len, start);
175
176 pagevec_init(&pv);
177
178 do {
179 _debug("kill %llx @%llx", len, start);
180
181 pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
182 PAGEVEC_SIZE, pv.pages);
183 if (pv.nr == 0)
184 break;
185
186 for (loop = 0; loop < pv.nr; loop++) {
187 struct page *page = pv.pages[loop];
188
189 if (page->index * PAGE_SIZE >= start + len)
190 break;
191
192 psize = thp_size(page);
193 start += psize;
194 len -= psize;
195 ClearPageUptodate(page);
196 end_page_writeback(page);
197 lock_page(page);
198 generic_error_remove_page(mapping, page);
199 unlock_page(page);
200 }
201
202 __pagevec_release(&pv);
203 } while (len > 0);
204
205 _leave("");
206}
207
208/*
209 * Redirty all the pages in a given range.
210 */
211static void afs_redirty_pages(struct writeback_control *wbc,
212 struct address_space *mapping,
213 loff_t start, loff_t len)
214{
215 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
216 struct pagevec pv;
217 unsigned int loop, psize;
218
219 _enter("{%llx:%llu},%llx @%llx",
220 vnode->fid.vid, vnode->fid.vnode, len, start);
221
222 pagevec_init(&pv);
223
224 do {
225 _debug("redirty %llx @%llx", len, start);
226
227 pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
228 PAGEVEC_SIZE, pv.pages);
229 if (pv.nr == 0)
230 break;
231
232 for (loop = 0; loop < pv.nr; loop++) {
233 struct page *page = pv.pages[loop];
234
235 if (page->index * PAGE_SIZE >= start + len)
236 break;
237
238 psize = thp_size(page);
239 start += psize;
240 len -= psize;
241 redirty_page_for_writepage(wbc, page);
242 end_page_writeback(page);
243 }
244
245 __pagevec_release(&pv);
246 } while (len > 0);
247
248 _leave("");
249}
250
251/*
252 * completion of write to server
253 */
254static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
255{
256 struct address_space *mapping = vnode->vfs_inode.i_mapping;
257 struct page *page;
258 pgoff_t end;
259
260 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
261
262 _enter("{%llx:%llu},{%x @%llx}",
263 vnode->fid.vid, vnode->fid.vnode, len, start);
264
265 rcu_read_lock();
266
267 end = (start + len - 1) / PAGE_SIZE;
268 xas_for_each(&xas, page, end) {
269 if (!PageWriteback(page)) {
270 kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end);
271 ASSERT(PageWriteback(page));
272 }
273
274 trace_afs_page_dirty(vnode, tracepoint_string("clear"), page);
275 detach_page_private(page);
276 page_endio(page, true, 0);
277 }
278
279 rcu_read_unlock();
280
281 afs_prune_wb_keys(vnode);
282 _leave("");
283}
284
285/*
286 * Find a key to use for the writeback. We cached the keys used to author the
287 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
288 * and we need to start from there if it's set.
289 */
290static int afs_get_writeback_key(struct afs_vnode *vnode,
291 struct afs_wb_key **_wbk)
292{
293 struct afs_wb_key *wbk = NULL;
294 struct list_head *p;
295 int ret = -ENOKEY, ret2;
296
297 spin_lock(&vnode->wb_lock);
298 if (*_wbk)
299 p = (*_wbk)->vnode_link.next;
300 else
301 p = vnode->wb_keys.next;
302
303 while (p != &vnode->wb_keys) {
304 wbk = list_entry(p, struct afs_wb_key, vnode_link);
305 _debug("wbk %u", key_serial(wbk->key));
306 ret2 = key_validate(wbk->key);
307 if (ret2 == 0) {
308 refcount_inc(&wbk->usage);
309 _debug("USE WB KEY %u", key_serial(wbk->key));
310 break;
311 }
312
313 wbk = NULL;
314 if (ret == -ENOKEY)
315 ret = ret2;
316 p = p->next;
317 }
318
319 spin_unlock(&vnode->wb_lock);
320 if (*_wbk)
321 afs_put_wb_key(*_wbk);
322 *_wbk = wbk;
323 return 0;
324}
325
326static void afs_store_data_success(struct afs_operation *op)
327{
328 struct afs_vnode *vnode = op->file[0].vnode;
329
330 op->ctime = op->file[0].scb.status.mtime_client;
331 afs_vnode_commit_status(op, &op->file[0]);
332 if (op->error == 0) {
333 if (!op->store.laundering)
334 afs_pages_written_back(vnode, op->store.pos, op->store.size);
335 afs_stat_v(vnode, n_stores);
336 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
337 }
338}
339
340static const struct afs_operation_ops afs_store_data_operation = {
341 .issue_afs_rpc = afs_fs_store_data,
342 .issue_yfs_rpc = yfs_fs_store_data,
343 .success = afs_store_data_success,
344};
345
346/*
347 * write to a file
348 */
349static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
350 bool laundering)
351{
352 struct afs_operation *op;
353 struct afs_wb_key *wbk = NULL;
354 loff_t size = iov_iter_count(iter), i_size;
355 int ret = -ENOKEY;
356
357 _enter("%s{%llx:%llu.%u},%llx,%llx",
358 vnode->volume->name,
359 vnode->fid.vid,
360 vnode->fid.vnode,
361 vnode->fid.unique,
362 size, pos);
363
364 ret = afs_get_writeback_key(vnode, &wbk);
365 if (ret) {
366 _leave(" = %d [no keys]", ret);
367 return ret;
368 }
369
370 op = afs_alloc_operation(wbk->key, vnode->volume);
371 if (IS_ERR(op)) {
372 afs_put_wb_key(wbk);
373 return -ENOMEM;
374 }
375
376 i_size = i_size_read(&vnode->vfs_inode);
377
378 afs_op_set_vnode(op, 0, vnode);
379 op->file[0].dv_delta = 1;
380 op->file[0].modification = true;
381 op->store.write_iter = iter;
382 op->store.pos = pos;
383 op->store.size = size;
384 op->store.i_size = max(pos + size, i_size);
385 op->store.laundering = laundering;
386 op->mtime = vnode->vfs_inode.i_mtime;
387 op->flags |= AFS_OPERATION_UNINTR;
388 op->ops = &afs_store_data_operation;
389
390try_next_key:
391 afs_begin_vnode_operation(op);
392 afs_wait_for_operation(op);
393
394 switch (op->error) {
395 case -EACCES:
396 case -EPERM:
397 case -ENOKEY:
398 case -EKEYEXPIRED:
399 case -EKEYREJECTED:
400 case -EKEYREVOKED:
401 _debug("next");
402
403 ret = afs_get_writeback_key(vnode, &wbk);
404 if (ret == 0) {
405 key_put(op->key);
406 op->key = key_get(wbk->key);
407 goto try_next_key;
408 }
409 break;
410 }
411
412 afs_put_wb_key(wbk);
413 _leave(" = %d", op->error);
414 return afs_put_operation(op);
415}
416
417/*
418 * Extend the region to be written back to include subsequent contiguously
419 * dirty pages if possible, but don't sleep while doing so.
420 *
421 * If this page holds new content, then we can include filler zeros in the
422 * writeback.
423 */
424static void afs_extend_writeback(struct address_space *mapping,
425 struct afs_vnode *vnode,
426 long *_count,
427 loff_t start,
428 loff_t max_len,
429 bool new_content,
430 unsigned int *_len)
431{
432 struct pagevec pvec;
433 struct page *page;
434 unsigned long priv;
435 unsigned int psize, filler = 0;
436 unsigned int f, t;
437 loff_t len = *_len;
438 pgoff_t index = (start + len) / PAGE_SIZE;
439 bool stop = true;
440 unsigned int i;
441
442 XA_STATE(xas, &mapping->i_pages, index);
443 pagevec_init(&pvec);
444
445 do {
446 /* Firstly, we gather up a batch of contiguous dirty pages
447 * under the RCU read lock - but we can't clear the dirty flags
448 * there if any of those pages are mapped.
449 */
450 rcu_read_lock();
451
452 xas_for_each(&xas, page, ULONG_MAX) {
453 stop = true;
454 if (xas_retry(&xas, page))
455 continue;
456 if (xa_is_value(page))
457 break;
458 if (page->index != index)
459 break;
460
461 if (!page_cache_get_speculative(page)) {
462 xas_reset(&xas);
463 continue;
464 }
465
466 /* Has the page moved or been split? */
467 if (unlikely(page != xas_reload(&xas)))
468 break;
469
470 if (!trylock_page(page))
471 break;
472 if (!PageDirty(page) || PageWriteback(page)) {
473 unlock_page(page);
474 break;
475 }
476
477 psize = thp_size(page);
478 priv = page_private(page);
479 f = afs_page_dirty_from(page, priv);
480 t = afs_page_dirty_to(page, priv);
481 if (f != 0 && !new_content) {
482 unlock_page(page);
483 break;
484 }
485
486 len += filler + t;
487 filler = psize - t;
488 if (len >= max_len || *_count <= 0)
489 stop = true;
490 else if (t == psize || new_content)
491 stop = false;
492
493 index += thp_nr_pages(page);
494 if (!pagevec_add(&pvec, page))
495 break;
496 if (stop)
497 break;
498 }
499
500 if (!stop)
501 xas_pause(&xas);
502 rcu_read_unlock();
503
504 /* Now, if we obtained any pages, we can shift them to being
505 * writable and mark them for caching.
506 */
507 if (!pagevec_count(&pvec))
508 break;
509
510 for (i = 0; i < pagevec_count(&pvec); i++) {
511 page = pvec.pages[i];
512 trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
513
514 if (!clear_page_dirty_for_io(page))
515 BUG();
516 if (test_set_page_writeback(page))
517 BUG();
518
519 *_count -= thp_nr_pages(page);
520 unlock_page(page);
521 }
522
523 pagevec_release(&pvec);
524 cond_resched();
525 } while (!stop);
526
527 *_len = len;
528}
529
530/*
531 * Synchronously write back the locked page and any subsequent non-locked dirty
532 * pages.
533 */
534static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
535 struct writeback_control *wbc,
536 struct page *page,
537 loff_t start, loff_t end)
538{
539 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
540 struct iov_iter iter;
541 unsigned long priv;
542 unsigned int offset, to, len, max_len;
543 loff_t i_size = i_size_read(&vnode->vfs_inode);
544 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
545 long count = wbc->nr_to_write;
546 int ret;
547
548 _enter(",%lx,%llx-%llx", page->index, start, end);
549
550 if (test_set_page_writeback(page))
551 BUG();
552
553 count -= thp_nr_pages(page);
554
555 /* Find all consecutive lockable dirty pages that have contiguous
556 * written regions, stopping when we find a page that is not
557 * immediately lockable, is not dirty or is missing, or we reach the
558 * end of the range.
559 */
560 priv = page_private(page);
561 offset = afs_page_dirty_from(page, priv);
562 to = afs_page_dirty_to(page, priv);
563 trace_afs_page_dirty(vnode, tracepoint_string("store"), page);
564
565 len = to - offset;
566 start += offset;
567 if (start < i_size) {
568 /* Trim the write to the EOF; the extra data is ignored. Also
569 * put an upper limit on the size of a single storedata op.
570 */
571 max_len = 65536 * 4096;
572 max_len = min_t(unsigned long long, max_len, end - start + 1);
573 max_len = min_t(unsigned long long, max_len, i_size - start);
574
575 if (len < max_len &&
576 (to == thp_size(page) || new_content))
577 afs_extend_writeback(mapping, vnode, &count,
578 start, max_len, new_content, &len);
579 len = min_t(loff_t, len, max_len);
580 }
581
582 /* We now have a contiguous set of dirty pages, each with writeback
583 * set; the first page is still locked at this point, but all the rest
584 * have been unlocked.
585 */
586 unlock_page(page);
587
588 if (start < i_size) {
589 _debug("write back %x @%llx [%llx]", len, start, i_size);
590
591 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
592 ret = afs_store_data(vnode, &iter, start, false);
593 } else {
594 _debug("write discard %x @%llx [%llx]", len, start, i_size);
595
596 /* The dirty region was entirely beyond the EOF. */
597 afs_pages_written_back(vnode, start, len);
598 ret = 0;
599 }
600
601 switch (ret) {
602 case 0:
603 wbc->nr_to_write = count;
604 ret = len;
605 break;
606
607 default:
608 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
609 fallthrough;
610 case -EACCES:
611 case -EPERM:
612 case -ENOKEY:
613 case -EKEYEXPIRED:
614 case -EKEYREJECTED:
615 case -EKEYREVOKED:
616 afs_redirty_pages(wbc, mapping, start, len);
617 mapping_set_error(mapping, ret);
618 break;
619
620 case -EDQUOT:
621 case -ENOSPC:
622 afs_redirty_pages(wbc, mapping, start, len);
623 mapping_set_error(mapping, -ENOSPC);
624 break;
625
626 case -EROFS:
627 case -EIO:
628 case -EREMOTEIO:
629 case -EFBIG:
630 case -ENOENT:
631 case -ENOMEDIUM:
632 case -ENXIO:
633 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
634 afs_kill_pages(mapping, start, len);
635 mapping_set_error(mapping, ret);
636 break;
637 }
638
639 _leave(" = %d", ret);
640 return ret;
641}
642
643/*
644 * write a page back to the server
645 * - the caller locked the page for us
646 */
647int afs_writepage(struct page *page, struct writeback_control *wbc)
648{
649 ssize_t ret;
650 loff_t start;
651
652 _enter("{%lx},", page->index);
653
654 start = page->index * PAGE_SIZE;
655 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
656 start, LLONG_MAX - start);
657 if (ret < 0) {
658 _leave(" = %zd", ret);
659 return ret;
660 }
661
662 _leave(" = 0");
663 return 0;
664}
665
666/*
667 * write a region of pages back to the server
668 */
669static int afs_writepages_region(struct address_space *mapping,
670 struct writeback_control *wbc,
671 loff_t start, loff_t end, loff_t *_next)
672{
673 struct page *page;
674 ssize_t ret;
675 int n;
676
677 _enter("%llx,%llx,", start, end);
678
679 do {
680 pgoff_t index = start / PAGE_SIZE;
681
682 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
683 PAGECACHE_TAG_DIRTY, 1, &page);
684 if (!n)
685 break;
686
687 start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */
688
689 _debug("wback %lx", page->index);
690
691 /* At this point we hold neither the i_pages lock nor the
692 * page lock: the page may be truncated or invalidated
693 * (changing page->mapping to NULL), or even swizzled
694 * back from swapper_space to tmpfs file mapping
695 */
696 if (wbc->sync_mode != WB_SYNC_NONE) {
697 ret = lock_page_killable(page);
698 if (ret < 0) {
699 put_page(page);
700 return ret;
701 }
702 } else {
703 if (!trylock_page(page)) {
704 put_page(page);
705 return 0;
706 }
707 }
708
709 if (page->mapping != mapping || !PageDirty(page)) {
710 start += thp_size(page);
711 unlock_page(page);
712 put_page(page);
713 continue;
714 }
715
716 if (PageWriteback(page)) {
717 unlock_page(page);
718 if (wbc->sync_mode != WB_SYNC_NONE)
719 wait_on_page_writeback(page);
720 put_page(page);
721 continue;
722 }
723
724 if (!clear_page_dirty_for_io(page))
725 BUG();
726 ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end);
727 put_page(page);
728 if (ret < 0) {
729 _leave(" = %zd", ret);
730 return ret;
731 }
732
733 start += ret * PAGE_SIZE;
734
735 cond_resched();
736 } while (wbc->nr_to_write > 0);
737
738 *_next = start;
739 _leave(" = 0 [%llx]", *_next);
740 return 0;
741}
742
743/*
744 * write some of the pending data back to the server
745 */
746int afs_writepages(struct address_space *mapping,
747 struct writeback_control *wbc)
748{
749 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
750 loff_t start, next;
751 int ret;
752
753 _enter("");
754
755 /* We have to be careful as we can end up racing with setattr()
756 * truncating the pagecache since the caller doesn't take a lock here
757 * to prevent it.
758 */
759 if (wbc->sync_mode == WB_SYNC_ALL)
760 down_read(&vnode->validate_lock);
761 else if (!down_read_trylock(&vnode->validate_lock))
762 return 0;
763
764 if (wbc->range_cyclic) {
765 start = mapping->writeback_index * PAGE_SIZE;
766 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
767 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
768 ret = afs_writepages_region(mapping, wbc, 0, start,
769 &next);
770 mapping->writeback_index = next / PAGE_SIZE;
771 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
772 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
773 if (wbc->nr_to_write > 0)
774 mapping->writeback_index = next;
775 } else {
776 ret = afs_writepages_region(mapping, wbc,
777 wbc->range_start, wbc->range_end, &next);
778 }
779
780 up_read(&vnode->validate_lock);
781 _leave(" = %d", ret);
782 return ret;
783}
784
785/*
786 * write to an AFS file
787 */
788ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
789{
790 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
791 ssize_t result;
792 size_t count = iov_iter_count(from);
793
794 _enter("{%llx:%llu},{%zu},",
795 vnode->fid.vid, vnode->fid.vnode, count);
796
797 if (IS_SWAPFILE(&vnode->vfs_inode)) {
798 printk(KERN_INFO
799 "AFS: Attempt to write to active swap file!\n");
800 return -EBUSY;
801 }
802
803 if (!count)
804 return 0;
805
806 result = generic_file_write_iter(iocb, from);
807
808 _leave(" = %zd", result);
809 return result;
810}
811
812/*
813 * flush any dirty pages for this process, and check for write errors.
814 * - the return status from this call provides a reliable indication of
815 * whether any write errors occurred for this process.
816 */
817int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
818{
819 struct inode *inode = file_inode(file);
820 struct afs_vnode *vnode = AFS_FS_I(inode);
821
822 _enter("{%llx:%llu},{n=%pD},%d",
823 vnode->fid.vid, vnode->fid.vnode, file,
824 datasync);
825
826 return file_write_and_wait_range(file, start, end);
827}
828
829/*
830 * notification that a previously read-only page is about to become writable
831 * - if it returns an error, the caller will deliver a bus error signal
832 */
833vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
834{
835 struct page *page = thp_head(vmf->page);
836 struct file *file = vmf->vma->vm_file;
837 struct inode *inode = file_inode(file);
838 struct afs_vnode *vnode = AFS_FS_I(inode);
839 unsigned long priv;
840
841 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
842
843 sb_start_pagefault(inode->i_sb);
844
845 /* Wait for the page to be written to the cache before we allow it to
846 * be modified. We then assume the entire page will need writing back.
847 */
848#ifdef CONFIG_AFS_FSCACHE
849 if (PageFsCache(page) &&
850 wait_on_page_fscache_killable(page) < 0)
851 return VM_FAULT_RETRY;
852#endif
853
854 if (wait_on_page_writeback_killable(page))
855 return VM_FAULT_RETRY;
856
857 if (lock_page_killable(page) < 0)
858 return VM_FAULT_RETRY;
859
860 /* We mustn't change page->private until writeback is complete as that
861 * details the portion of the page we need to write back and we might
862 * need to redirty the page if there's a problem.
863 */
864 if (wait_on_page_writeback_killable(page) < 0) {
865 unlock_page(page);
866 return VM_FAULT_RETRY;
867 }
868
869 priv = afs_page_dirty(page, 0, thp_size(page));
870 priv = afs_page_dirty_mmapped(priv);
871 if (PagePrivate(page)) {
872 set_page_private(page, priv);
873 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
874 } else {
875 attach_page_private(page, (void *)priv);
876 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
877 }
878 file_update_time(file);
879
880 sb_end_pagefault(inode->i_sb);
881 return VM_FAULT_LOCKED;
882}
883
884/*
885 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
886 */
887void afs_prune_wb_keys(struct afs_vnode *vnode)
888{
889 LIST_HEAD(graveyard);
890 struct afs_wb_key *wbk, *tmp;
891
892 /* Discard unused keys */
893 spin_lock(&vnode->wb_lock);
894
895 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
896 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
897 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
898 if (refcount_read(&wbk->usage) == 1)
899 list_move(&wbk->vnode_link, &graveyard);
900 }
901 }
902
903 spin_unlock(&vnode->wb_lock);
904
905 while (!list_empty(&graveyard)) {
906 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
907 list_del(&wbk->vnode_link);
908 afs_put_wb_key(wbk);
909 }
910}
911
912/*
913 * Clean up a page during invalidation.
914 */
915int afs_launder_page(struct page *page)
916{
917 struct address_space *mapping = page->mapping;
918 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
919 struct iov_iter iter;
920 struct bio_vec bv[1];
921 unsigned long priv;
922 unsigned int f, t;
923 int ret = 0;
924
925 _enter("{%lx}", page->index);
926
927 priv = page_private(page);
928 if (clear_page_dirty_for_io(page)) {
929 f = 0;
930 t = thp_size(page);
931 if (PagePrivate(page)) {
932 f = afs_page_dirty_from(page, priv);
933 t = afs_page_dirty_to(page, priv);
934 }
935
936 bv[0].bv_page = page;
937 bv[0].bv_offset = f;
938 bv[0].bv_len = t - f;
939 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
940
941 trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
942 ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE,
943 true);
944 }
945
946 trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
947 detach_page_private(page);
948 wait_on_page_fscache(page);
949 return ret;
950}