Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'folio-5.16b' of git://git.infradead.org/users/willy/pagecache

Pull folio fixes from Matthew Wilcox:
"In the course of preparing the folio changes for iomap for next merge
window, we discovered some problems that would be nice to address now:

- Renaming multi-page folios to large folios.

mapping_multi_page_folio_support() is just a little too long, so we
settled on mapping_large_folio_support(). That meant renaming, eg
folio_test_multi() to folio_test_large().

Rename AS_THP_SUPPORT to match

- I hadn't included folio wrappers for zero_user_segments(), etc.
Also, multi-page^W^W large folio support is now independent of
CONFIG_TRANSPARENT_HUGEPAGE, so machines with HIGHMEM always need
to fall back to the out-of-line zero_user_segments().

Remove FS_THP_SUPPORT to match

- The build bots finally got round to telling me that I missed a
couple of architectures when adding flush_dcache_folio(). Christoph
suggested that we just add linux/cacheflush.h and not rely on
asm-generic/cacheflush.h"

* tag 'folio-5.16b' of git://git.infradead.org/users/willy/pagecache:
mm: Add functions to zero portions of a folio
fs: Rename AS_THP_SUPPORT and mapping_thp_support
fs: Remove FS_THP_SUPPORT
mm: Remove folio_test_single
mm: Rename folio_test_multi to folio_test_large
Add linux/cacheflush.h

+92 -43
-1
arch/arc/include/asm/cacheflush.h
··· 36 36 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 37 37 38 38 void flush_dcache_page(struct page *page); 39 - void flush_dcache_folio(struct folio *folio); 40 39 41 40 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); 42 41 void dma_cache_inv(phys_addr_t start, unsigned long sz);
-1
arch/arm/include/asm/cacheflush.h
··· 290 290 */ 291 291 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 292 292 extern void flush_dcache_page(struct page *); 293 - void flush_dcache_folio(struct folio *folio); 294 293 295 294 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 296 295 static inline void flush_kernel_vmap_range(void *addr, int size)
-1
arch/m68k/include/asm/cacheflush_mm.h
··· 250 250 251 251 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 252 252 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) 253 - void flush_dcache_folio(struct folio *folio); 254 253 #define flush_dcache_mmap_lock(mapping) do { } while (0) 255 254 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 256 255 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
-2
arch/mips/include/asm/cacheflush.h
··· 61 61 SetPageDcacheDirty(page); 62 62 } 63 63 64 - void flush_dcache_folio(struct folio *folio); 65 - 66 64 #define flush_dcache_mmap_lock(mapping) do { } while (0) 67 65 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 68 66
-1
arch/nds32/include/asm/cacheflush.h
··· 27 27 28 28 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 29 29 void flush_dcache_page(struct page *page); 30 - void flush_dcache_folio(struct folio *folio); 31 30 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 32 31 unsigned long vaddr, void *dst, void *src, int len); 33 32 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
-1
arch/nios2/include/asm/cacheflush.h
··· 29 29 unsigned long pfn); 30 30 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 31 31 void flush_dcache_page(struct page *page); 32 - void flush_dcache_folio(struct folio *folio); 33 32 34 33 extern void flush_icache_range(unsigned long start, unsigned long end); 35 34 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-1
arch/parisc/include/asm/cacheflush.h
··· 50 50 51 51 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 52 52 void flush_dcache_page(struct page *page); 53 - void flush_dcache_folio(struct folio *folio); 54 53 55 54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 56 55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
-1
arch/sh/include/asm/cacheflush.h
··· 43 43 unsigned long start, unsigned long end); 44 44 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 45 45 void flush_dcache_page(struct page *page); 46 - void flush_dcache_folio(struct folio *folio); 47 46 extern void flush_icache_range(unsigned long start, unsigned long end); 48 47 #define flush_icache_user_range flush_icache_range 49 48 extern void flush_icache_page(struct vm_area_struct *vma,
-3
arch/xtensa/include/asm/cacheflush.h
··· 121 121 122 122 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 123 123 void flush_dcache_page(struct page *); 124 - void flush_dcache_folio(struct folio *); 125 124 126 125 void local_flush_cache_range(struct vm_area_struct *vma, 127 126 unsigned long start, unsigned long end); ··· 137 138 #define flush_cache_vunmap(start,end) do { } while (0) 138 139 139 140 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 140 - #define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 141 141 #define flush_dcache_page(page) do { } while (0) 142 - static inline void flush_dcache_folio(struct folio *folio) { } 143 142 144 143 #define flush_icache_range local_flush_icache_range 145 144 #define flush_cache_page(vma, addr, pfn) do { } while (0)
-2
fs/inode.c
··· 180 180 mapping->a_ops = &empty_aops; 181 181 mapping->host = inode; 182 182 mapping->flags = 0; 183 - if (sb->s_type->fs_flags & FS_THP_SUPPORT) 184 - __set_bit(AS_THP_SUPPORT, &mapping->flags); 185 183 mapping->wb_err = 0; 186 184 atomic_set(&mapping->i_mmap_writable, 0); 187 185 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
-6
include/asm-generic/cacheflush.h
··· 50 50 { 51 51 } 52 52 53 - static inline void flush_dcache_folio(struct folio *folio) { } 54 53 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 55 - #define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 56 - #endif 57 - 58 - #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 59 - void flush_dcache_folio(struct folio *folio); 60 54 #endif 61 55 62 56 #ifndef flush_dcache_mmap_lock
+18
include/linux/cacheflush.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_CACHEFLUSH_H 3 + #define _LINUX_CACHEFLUSH_H 4 + 5 + #include <asm/cacheflush.h> 6 + 7 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8 + #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 9 + void flush_dcache_folio(struct folio *folio); 10 + #endif 11 + #else 12 + static inline void flush_dcache_folio(struct folio *folio) 13 + { 14 + } 15 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0 16 + #endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ 17 + 18 + #endif /* _LINUX_CACHEFLUSH_H */
-1
include/linux/fs.h
··· 2518 2518 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ 2519 2519 #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ 2520 2520 #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ 2521 - #define FS_THP_SUPPORT 8192 /* Remove once all fs converted */ 2522 2521 #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ 2523 2522 int (*init_fs_context)(struct fs_context *); 2524 2523 const struct fs_parameter_spec *parameters;
+42 -5
include/linux/highmem.h
··· 5 5 #include <linux/fs.h> 6 6 #include <linux/kernel.h> 7 7 #include <linux/bug.h> 8 + #include <linux/cacheflush.h> 8 9 #include <linux/mm.h> 9 10 #include <linux/uaccess.h> 10 11 #include <linux/hardirq.h> 11 - 12 - #include <asm/cacheflush.h> 13 12 14 13 #include "highmem-internal.h" 15 14 ··· 230 231 * If we pass in a base or tail page, we can zero up to PAGE_SIZE. 231 232 * If we pass in a head page, we can zero up to the size of the compound page. 232 233 */ 233 - #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 234 + #ifdef CONFIG_HIGHMEM 234 235 void zero_user_segments(struct page *page, unsigned start1, unsigned end1, 235 236 unsigned start2, unsigned end2); 236 - #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 237 + #else 237 238 static inline void zero_user_segments(struct page *page, 238 239 unsigned start1, unsigned end1, 239 240 unsigned start2, unsigned end2) ··· 253 254 for (i = 0; i < compound_nr(page); i++) 254 255 flush_dcache_page(page + i); 255 256 } 256 - #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 257 + #endif 257 258 258 259 static inline void zero_user_segment(struct page *page, 259 260 unsigned start, unsigned end) ··· 361 362 memset(addr + offset, 0, len); 362 363 flush_dcache_page(page); 363 364 kunmap_local(addr); 365 + } 366 + 367 + /** 368 + * folio_zero_segments() - Zero two byte ranges in a folio. 369 + * @folio: The folio to write to. 370 + * @start1: The first byte to zero. 371 + * @xend1: One more than the last byte in the first range. 372 + * @start2: The first byte to zero in the second range. 373 + * @xend2: One more than the last byte in the second range. 374 + */ 375 + static inline void folio_zero_segments(struct folio *folio, 376 + size_t start1, size_t xend1, size_t start2, size_t xend2) 377 + { 378 + zero_user_segments(&folio->page, start1, xend1, start2, xend2); 379 + } 380 + 381 + /** 382 + * folio_zero_segment() - Zero a byte range in a folio. 383 + * @folio: The folio to write to. 384 + * @start: The first byte to zero. 385 + * @xend: One more than the last byte to zero. 386 + */ 387 + static inline void folio_zero_segment(struct folio *folio, 388 + size_t start, size_t xend) 389 + { 390 + zero_user_segments(&folio->page, start, xend, 0, 0); 391 + } 392 + 393 + /** 394 + * folio_zero_range() - Zero a byte range in a folio. 395 + * @folio: The folio to write to. 396 + * @start: The first byte to zero. 397 + * @length: The number of bytes to zero. 398 + */ 399 + static inline void folio_zero_range(struct folio *folio, 400 + size_t start, size_t length) 401 + { 402 + zero_user_segments(&folio->page, start, start + length, 0, 0); 364 403 } 365 404 366 405 #endif /* _LINUX_HIGHMEM_H */
+7 -7
include/linux/page-flags.h
··· 686 686 687 687 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) 688 688 689 - /* Whether there are one or multiple pages in a folio */ 690 - static inline bool folio_test_single(struct folio *folio) 691 - { 692 - return !folio_test_head(folio); 693 - } 694 - 695 - static inline bool folio_test_multi(struct folio *folio) 689 + /** 690 + * folio_test_large() - Does this folio contain more than one page? 691 + * @folio: The folio to test. 692 + * 693 + * Return: True if the folio is larger than one page. 694 + */ 695 + static inline bool folio_test_large(struct folio *folio) 696 696 { 697 697 return folio_test_head(folio); 698 698 }
+21 -5
include/linux/pagemap.h
··· 84 84 AS_EXITING = 4, /* final truncate in progress */ 85 85 /* writeback related tags are not used */ 86 86 AS_NO_WRITEBACK_TAGS = 5, 87 - AS_THP_SUPPORT = 6, /* THPs supported */ 87 + AS_LARGE_FOLIO_SUPPORT = 6, 88 88 }; 89 89 90 90 /** ··· 176 176 m->gfp_mask = mask; 177 177 } 178 178 179 - static inline bool mapping_thp_support(struct address_space *mapping) 179 + /** 180 + * mapping_set_large_folios() - Indicate the file supports large folios. 181 + * @mapping: The file. 182 + * 183 + * The filesystem should call this function in its inode constructor to 184 + * indicate that the VFS can use large folios to cache the contents of 185 + * the file. 186 + * 187 + * Context: This should not be called while the inode is active as it 188 + * is non-atomic. 189 + */ 190 + static inline void mapping_set_large_folios(struct address_space *mapping) 180 191 { 181 - return test_bit(AS_THP_SUPPORT, &mapping->flags); 192 + __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 193 + } 194 + 195 + static inline bool mapping_large_folio_support(struct address_space *mapping) 196 + { 197 + return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 182 198 } 183 199 184 200 static inline int filemap_nr_thps(struct address_space *mapping) ··· 209 193 static inline void filemap_nr_thps_inc(struct address_space *mapping) 210 194 { 211 195 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 212 - if (!mapping_thp_support(mapping)) 196 + if (!mapping_large_folio_support(mapping)) 213 197 atomic_inc(&mapping->nr_thps); 214 198 #else 215 199 WARN_ON_ONCE(1); ··· 219 203 static inline void filemap_nr_thps_dec(struct address_space *mapping) 220 204 { 221 205 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 222 - if (!mapping_thp_support(mapping)) 206 + if (!mapping_large_folio_support(mapping)) 223 207 atomic_dec(&mapping->nr_thps); 224 208 #else 225 209 WARN_ON_ONCE(1);
-2
mm/highmem.c
··· 359 359 } 360 360 EXPORT_SYMBOL(kunmap_high); 361 361 362 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 363 362 void zero_user_segments(struct page *page, unsigned start1, unsigned end1, 364 363 unsigned start2, unsigned end2) 365 364 { ··· 415 416 BUG_ON((start1 | start2 | end1 | end2) != 0); 416 417 } 417 418 EXPORT_SYMBOL(zero_user_segments); 418 - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 419 419 #endif /* CONFIG_HIGHMEM */ 420 420 421 421 #ifdef CONFIG_KMAP_LOCAL
+1 -1
mm/memcontrol.c
··· 5558 5558 5559 5559 VM_BUG_ON(from == to); 5560 5560 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5561 - VM_BUG_ON(compound && !folio_test_multi(folio)); 5561 + VM_BUG_ON(compound && !folio_test_large(folio)); 5562 5562 5563 5563 /* 5564 5564 * Prevent mem_cgroup_migrate() from looking at
+2 -1
mm/shmem.c
··· 2303 2303 INIT_LIST_HEAD(&info->swaplist); 2304 2304 simple_xattrs_init(&info->xattrs); 2305 2305 cache_no_acl(inode); 2306 + mapping_set_large_folios(inode->i_mapping); 2306 2307 2307 2308 switch (mode & S_IFMT) { 2308 2309 default: ··· 3871 3870 .parameters = shmem_fs_parameters, 3872 3871 #endif 3873 3872 .kill_sb = kill_litter_super, 3874 - .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, 3873 + .fs_flags = FS_USERNS_MOUNT, 3875 3874 }; 3876 3875 3877 3876 int __init shmem_init(void)
+1 -1
mm/util.c
··· 670 670 { 671 671 long i, nr; 672 672 673 - if (folio_test_single(folio)) 673 + if (!folio_test_large(folio)) 674 674 return atomic_read(&folio->_mapcount) >= 0; 675 675 if (atomic_read(folio_mapcount_ptr(folio)) >= 0) 676 676 return true;