Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: merge folio_has_private()/filemap_release_folio() call pairs

Patch series "mm, netfs, fscache: Stop read optimisation when folio
removed from pagecache", v7.

This fixes an optimisation in fscache whereby we don't read from the cache
for a particular file until we know that there's data there that we don't
have in the pagecache. The problem is that I'm no longer using PG_fscache
(aka PG_private_2) to indicate that the page is cached and so I don't get
a notification when a cached page is dropped from the pagecache.

The first patch merges some folio_has_private() and
filemap_release_folio() pairs and introduces a helper,
folio_needs_release(), to indicate if a release is required.

The second patch is the actual fix. Following Willy's suggestions[1], it
adds an AS_RELEASE_ALWAYS flag to an address_space that will make
filemap_release_folio() always call ->release_folio(), even if
PG_private/PG_private_2 aren't set. folio_needs_release() is altered to
add a check for this.


This patch (of 2):

Make filemap_release_folio() check folio_has_private(). Then, in most
cases, where a call to folio_has_private() is immediately followed by a
call to filemap_release_folio(), we can get rid of the test in the pair.

There are a couple of sites in mm/vscan.c that this can't so easily be
done. In shrink_folio_list(), there are actually three cases (something
different is done for incompletely invalidated buffers), but
filemap_release_folio() elides two of them.

In shrink_active_list(), we don't have have the folio lock yet, so the
check allows us to avoid locking the page unnecessarily.

A wrapper function to check if a folio needs release is provided for those
places that still need to do it in the mm/ directory. This will acquire
additional parts to the condition in a future patch.

After this, the only remaining caller of folio_has_private() outside of
mm/ is a check in fuse.

Link: https://lkml.kernel.org/r/20230628104852.3391651-1-dhowells@redhat.com
Link: https://lkml.kernel.org/r/20230628104852.3391651-2-dhowells@redhat.com
Reported-by: Rohith Surabattula <rohiths.msft@gmail.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Steve French <sfrench@samba.org>
Cc: Shyam Prasad N <nspmangalore@gmail.com>
Cc: Rohith Surabattula <rohiths.msft@gmail.com>
Cc: Dave Wysochanski <dwysocha@redhat.com>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Xiubo Li <xiubli@redhat.com>
Cc: Jingbo Xu <jefflexu@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

David Howells and committed by
Andrew Morton
0201ebf2 dba438bd

+27 -29
+4 -8
fs/ext4/move_extent.c
··· 340 340 ext4_double_up_write_data_sem(orig_inode, donor_inode); 341 341 goto data_copy; 342 342 } 343 - if ((folio_has_private(folio[0]) && 344 - !filemap_release_folio(folio[0], 0)) || 345 - (folio_has_private(folio[1]) && 346 - !filemap_release_folio(folio[1], 0))) { 343 + if (!filemap_release_folio(folio[0], 0) || 344 + !filemap_release_folio(folio[1], 0)) { 347 345 *err = -EBUSY; 348 346 goto drop_data_sem; 349 347 } ··· 360 362 361 363 /* At this point all buffers in range are uptodate, old mapping layout 362 364 * is no longer required, try to drop it now. */ 363 - if ((folio_has_private(folio[0]) && 364 - !filemap_release_folio(folio[0], 0)) || 365 - (folio_has_private(folio[1]) && 366 - !filemap_release_folio(folio[1], 0))) { 365 + if (!filemap_release_folio(folio[0], 0) || 366 + !filemap_release_folio(folio[1], 0)) { 367 367 *err = -EBUSY; 368 368 goto unlock_folios; 369 369 }
+1 -2
fs/splice.c
··· 83 83 */ 84 84 folio_wait_writeback(folio); 85 85 86 - if (folio_has_private(folio) && 87 - !filemap_release_folio(folio, GFP_KERNEL)) 86 + if (!filemap_release_folio(folio, GFP_KERNEL)) 88 87 goto out_unlock; 89 88 90 89 /*
+2
mm/filemap.c
··· 4073 4073 struct address_space * const mapping = folio->mapping; 4074 4074 4075 4075 BUG_ON(!folio_test_locked(folio)); 4076 + if (!folio_needs_release(folio)) 4077 + return true; 4076 4078 if (folio_test_writeback(folio)) 4077 4079 return false; 4078 4080
+1 -2
mm/huge_memory.c
··· 2697 2697 gfp = current_gfp_context(mapping_gfp_mask(mapping) & 2698 2698 GFP_RECLAIM_MASK); 2699 2699 2700 - if (folio_test_private(folio) && 2701 - !filemap_release_folio(folio, gfp)) { 2700 + if (!filemap_release_folio(folio, gfp)) { 2702 2701 ret = -EBUSY; 2703 2702 goto out; 2704 2703 }
+8
mm/internal.h
··· 176 176 set_page_count(page, 1); 177 177 } 178 178 179 + /* 180 + * Return true if a folio needs ->release_folio() calling upon it. 181 + */ 182 + static inline bool folio_needs_release(struct folio *folio) 183 + { 184 + return folio_has_private(folio); 185 + } 186 + 179 187 extern unsigned long highest_memmap_pfn; 180 188 181 189 /*
+1 -2
mm/khugepaged.c
··· 2078 2078 goto out_unlock; 2079 2079 } 2080 2080 2081 - if (folio_has_private(folio) && 2082 - !filemap_release_folio(folio, GFP_KERNEL)) { 2081 + if (!filemap_release_folio(folio, GFP_KERNEL)) { 2083 2082 result = SCAN_PAGE_HAS_PRIVATE; 2084 2083 folio_putback_lru(folio); 2085 2084 goto out_unlock;
+3 -5
mm/memory-failure.c
··· 936 936 struct folio *folio = page_folio(p); 937 937 int err = mapping->a_ops->error_remove_page(mapping, p); 938 938 939 - if (err != 0) { 939 + if (err != 0) 940 940 pr_info("%#lx: Failed to punch page: %d\n", pfn, err); 941 - } else if (folio_has_private(folio) && 942 - !filemap_release_folio(folio, GFP_NOIO)) { 941 + else if (!filemap_release_folio(folio, GFP_NOIO)) 943 942 pr_info("%#lx: failed to release buffers\n", pfn); 944 - } else { 943 + else 945 944 ret = MF_RECOVERED; 946 - } 947 945 } else { 948 946 /* 949 947 * If the file system doesn't support it just invalidate
+1 -2
mm/migrate.c
··· 922 922 * Buffers may be managed in a filesystem specific way. 923 923 * We must have no buffers or drop them. 924 924 */ 925 - if (folio_test_private(src) && 926 - !filemap_release_folio(src, GFP_KERNEL)) 925 + if (!filemap_release_folio(src, GFP_KERNEL)) 927 926 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 928 927 929 928 return migrate_folio(mapping, dst, src, mode);
+2 -4
mm/truncate.c
··· 19 19 #include <linux/highmem.h> 20 20 #include <linux/pagevec.h> 21 21 #include <linux/task_io_accounting_ops.h> 22 - #include <linux/buffer_head.h> /* grr. try_to_release_page */ 23 22 #include <linux/shmem_fs.h> 24 23 #include <linux/rmap.h> 25 24 #include "internal.h" ··· 275 276 if (folio_ref_count(folio) > 276 277 folio_nr_pages(folio) + folio_has_private(folio) + 1) 277 278 return 0; 278 - if (folio_has_private(folio) && !filemap_release_folio(folio, 0)) 279 + if (!filemap_release_folio(folio, 0)) 279 280 return 0; 280 281 281 282 return remove_mapping(mapping, folio); ··· 572 573 if (folio->mapping != mapping) 573 574 return 0; 574 575 575 - if (folio_has_private(folio) && 576 - !filemap_release_folio(folio, GFP_KERNEL)) 576 + if (!filemap_release_folio(folio, GFP_KERNEL)) 577 577 return 0; 578 578 579 579 spin_lock(&mapping->host->i_lock);
+4 -4
mm/vmscan.c
··· 2064 2064 * (refcount == 1) it can be freed. Otherwise, leave 2065 2065 * the folio on the LRU so it is swappable. 2066 2066 */ 2067 - if (folio_has_private(folio)) { 2067 + if (folio_needs_release(folio)) { 2068 2068 if (!filemap_release_folio(folio, sc->gfp_mask)) 2069 2069 goto activate_locked; 2070 2070 if (!mapping && folio_ref_count(folio) == 1) { ··· 2729 2729 } 2730 2730 2731 2731 if (unlikely(buffer_heads_over_limit)) { 2732 - if (folio_test_private(folio) && folio_trylock(folio)) { 2733 - if (folio_test_private(folio)) 2734 - filemap_release_folio(folio, 0); 2732 + if (folio_needs_release(folio) && 2733 + folio_trylock(folio)) { 2734 + filemap_release_folio(folio, 0); 2735 2735 folio_unlock(folio); 2736 2736 } 2737 2737 }