Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

afs: Stop implementing ->writepage()

We're trying to get rid of the ->writepage() hook[1]. Stop afs from using
it by unlocking the page and calling afs_writepages_region() rather than
folio_write_one().

A flag is passed to afs_writepages_region() to indicate that it should only
write a single region so that we don't flush the entire file in
->write_begin(), but do add other dirty data to the region being written to
try and reduce the number of RPC ops.

This requires ->migrate_folio() to be implemented, so point that at
filemap_migrate_folio() for files and also for symlinks and directories.

This can be tested by turning on the afs_folio_dirty tracepoint and then
doing something like:

xfs_io -c "w 2223 7000" -c "w 15000 22222" -c "w 23 7" /afs/my/test/foo

and then looking in the trace to see if the write at position 15000 gets
stored before page 0 gets dirtied for the write at position 23.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Christoph Hellwig <hch@lst.de>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-afs@lists.infradead.org
Link: https://lore.kernel.org/r/20221113162902.883850-1-hch@lst.de/ [1]
Link: https://lore.kernel.org/r/166876785552.222254.4403222906022558715.stgit@warthog.procyon.org.uk/ # v1

+50 -37
+1
fs/afs/dir.c
··· 77 77 .dirty_folio = afs_dir_dirty_folio, 78 78 .release_folio = afs_dir_release_folio, 79 79 .invalidate_folio = afs_dir_invalidate_folio, 80 + .migrate_folio = filemap_migrate_folio, 80 81 }; 81 82 82 83 const struct dentry_operations afs_fs_dentry_operations = {
+2 -1
fs/afs/file.c
··· 58 58 .invalidate_folio = afs_invalidate_folio, 59 59 .write_begin = afs_write_begin, 60 60 .write_end = afs_write_end, 61 - .writepage = afs_writepage, 62 61 .writepages = afs_writepages, 62 + .migrate_folio = filemap_migrate_folio, 63 63 }; 64 64 65 65 const struct address_space_operations afs_symlink_aops = { 66 66 .read_folio = afs_symlink_read_folio, 67 67 .release_folio = afs_release_folio, 68 68 .invalidate_folio = afs_invalidate_folio, 69 + .migrate_folio = filemap_migrate_folio, 69 70 }; 70 71 71 72 static const struct vm_operations_struct afs_vm_ops = {
+47 -36
fs/afs/write.c
··· 14 14 #include <linux/netfs.h> 15 15 #include "internal.h" 16 16 17 + static int afs_writepages_region(struct address_space *mapping, 18 + struct writeback_control *wbc, 19 + loff_t start, loff_t end, loff_t *_next, 20 + bool max_one_loop); 21 + 17 22 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, 18 23 loff_t i_size, bool caching); 19 24 ··· 42 37 { 43 38 } 44 39 #endif 40 + 41 + /* 42 + * Flush out a conflicting write. This may extend the write to the surrounding 43 + * pages if also dirty and contiguous to the conflicting region.. 44 + */ 45 + static int afs_flush_conflicting_write(struct address_space *mapping, 46 + struct folio *folio) 47 + { 48 + struct writeback_control wbc = { 49 + .sync_mode = WB_SYNC_ALL, 50 + .nr_to_write = LONG_MAX, 51 + .range_start = folio_pos(folio), 52 + .range_end = LLONG_MAX, 53 + }; 54 + loff_t next; 55 + 56 + return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX, 57 + &next, true); 58 + } 45 59 46 60 /* 47 61 * prepare to perform part of a write to a page ··· 104 80 105 81 if (folio_test_writeback(folio)) { 106 82 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio); 107 - goto flush_conflicting_write; 83 + folio_unlock(folio); 84 + goto wait_for_writeback; 108 85 } 109 86 /* If the file is being filled locally, allow inter-write 110 87 * spaces to be merged into writes. If it's not, only write ··· 124 99 * flush the page out. 125 100 */ 126 101 flush_conflicting_write: 127 - _debug("flush conflict"); 128 - ret = folio_write_one(folio); 102 + trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio); 103 + folio_unlock(folio); 104 + 105 + ret = afs_flush_conflicting_write(mapping, folio); 106 + if (ret < 0) 107 + goto error; 108 + 109 + wait_for_writeback: 110 + ret = folio_wait_writeback_killable(folio); 129 111 if (ret < 0) 130 112 goto error; 131 113 ··· 696 664 } 697 665 698 666 /* 699 - * write a page back to the server 700 - * - the caller locked the page for us 701 - */ 702 - int afs_writepage(struct page *subpage, struct writeback_control *wbc) 703 - { 704 - struct folio *folio = page_folio(subpage); 705 - ssize_t ret; 706 - loff_t start; 707 - 708 - _enter("{%lx},", folio_index(folio)); 709 - 710 - #ifdef CONFIG_AFS_FSCACHE 711 - folio_wait_fscache(folio); 712 - #endif 713 - 714 - start = folio_index(folio) * PAGE_SIZE; 715 - ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc, 716 - folio, start, LLONG_MAX - start); 717 - if (ret < 0) { 718 - _leave(" = %zd", ret); 719 - return ret; 720 - } 721 - 722 - _leave(" = 0"); 723 - return 0; 724 - } 725 - 726 - /* 727 667 * write a region of pages back to the server 728 668 */ 729 669 static int afs_writepages_region(struct address_space *mapping, 730 670 struct writeback_control *wbc, 731 - loff_t start, loff_t end, loff_t *_next) 671 + loff_t start, loff_t end, loff_t *_next, 672 + bool max_one_loop) 732 673 { 733 674 struct folio *folio; 734 675 struct page *head_page; ··· 780 775 781 776 start += ret; 782 777 778 + if (max_one_loop) 779 + break; 780 + 783 781 cond_resched(); 784 782 } while (wbc->nr_to_write > 0); 785 783 ··· 814 806 815 807 if (wbc->range_cyclic) { 816 808 start = mapping->writeback_index * PAGE_SIZE; 817 - ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 809 + ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, 810 + &next, false); 818 811 if (ret == 0) { 819 812 mapping->writeback_index = next / PAGE_SIZE; 820 813 if (start > 0 && wbc->nr_to_write > 0) { 821 814 ret = afs_writepages_region(mapping, wbc, 0, 822 - start, &next); 815 + start, &next, false); 823 816 if (ret == 0) 824 817 mapping->writeback_index = 825 818 next / PAGE_SIZE; 826 819 } 827 820 } 828 821 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 829 - ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 822 + ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, 823 + &next, false); 830 824 if (wbc->nr_to_write > 0 && ret == 0) 831 825 mapping->writeback_index = next / PAGE_SIZE; 832 826 } else { 833 827 ret = afs_writepages_region(mapping, wbc, 834 - wbc->range_start, wbc->range_end, &next); 828 + wbc->range_start, wbc->range_end, 829 + &next, false); 835 830 } 836 831 837 832 up_read(&vnode->validate_lock);