Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vfs-6.12.folio' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs

Pull vfs folio updates from Christian Brauner:
"This contains work to port write_begin and write_end to rely on folios
for various filesystems.

This converts ocfs2, vboxfs, orangefs, jffs2, hostfs, fuse, f2fs,
ecryptfs, ntfs3, nilfs2, reiserfs, minixfs, qnx6, sysv, ufs, and
squashfs.

After this series lands a bunch of the filesystems in this list do not
mention struct page anymore"

* tag 'vfs-6.12.folio' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs: (61 commits)
Squashfs: Ensure all readahead pages have been used
Squashfs: Rewrite and update squashfs_readahead_fragment() to not use page->index
Squashfs: Update squashfs_readpage_block() to not use page->index
Squashfs: Update squashfs_readahead() to not use page->index
Squashfs: Update page_actor to not use page->index
jffs2: Use a folio in jffs2_garbage_collect_dnode()
jffs2: Convert jffs2_do_readpage_nolock to take a folio
buffer: Convert __block_write_begin() to take a folio
ocfs2: Convert ocfs2_write_zero_page to use a folio
fs: Convert aops->write_begin to take a folio
fs: Convert aops->write_end to take a folio
vboxsf: Use a folio in vboxsf_write_end()
orangefs: Convert orangefs_write_begin() to use a folio
orangefs: Convert orangefs_write_end() to use a folio
jffs2: Convert jffs2_write_begin() to use a folio
jffs2: Convert jffs2_write_end() to use a folio
hostfs: Convert hostfs_write_end() to use a folio
fuse: Convert fuse_write_begin() to use a folio
fuse: Convert fuse_write_end() to use a folio
f2fs: Convert f2fs_write_begin() to use a folio
...

+994 -1064
+3 -3
Documentation/filesystems/locking.rst
··· 251 251 void (*readahead)(struct readahead_control *); 252 252 int (*write_begin)(struct file *, struct address_space *mapping, 253 253 loff_t pos, unsigned len, 254 - struct page **pagep, void **fsdata); 254 + struct folio **foliop, void **fsdata); 255 255 int (*write_end)(struct file *, struct address_space *mapping, 256 256 loff_t pos, unsigned len, unsigned copied, 257 - struct page *page, void *fsdata); 257 + struct folio *folio, void *fsdata); 258 258 sector_t (*bmap)(struct address_space *, sector_t); 259 259 void (*invalidate_folio) (struct folio *, size_t start, size_t len); 260 260 bool (*release_folio)(struct folio *, gfp_t); ··· 280 280 writepages: 281 281 dirty_folio: maybe 282 282 readahead: yes, unlocks shared 283 - write_begin: locks the page exclusive 283 + write_begin: locks the folio exclusive 284 284 write_end: yes, unlocks exclusive 285 285 bmap: 286 286 invalidate_folio: yes exclusive
+6 -6
Documentation/filesystems/vfs.rst
··· 810 810 struct page **pagep, void **fsdata); 811 811 int (*write_end)(struct file *, struct address_space *mapping, 812 812 loff_t pos, unsigned len, unsigned copied, 813 - struct page *page, void *fsdata); 813 + struct folio *folio, void *fsdata); 814 814 sector_t (*bmap)(struct address_space *, sector_t); 815 815 void (*invalidate_folio) (struct folio *, size_t start, size_t len); 816 816 bool (*release_folio)(struct folio *, gfp_t); ··· 926 926 (if they haven't been read already) so that the updated blocks 927 927 can be written out properly. 928 928 929 - The filesystem must return the locked pagecache page for the 930 - specified offset, in ``*pagep``, for the caller to write into. 929 + The filesystem must return the locked pagecache folio for the 930 + specified offset, in ``*foliop``, for the caller to write into. 931 931 932 932 It must be able to cope with short writes (where the length 933 933 passed to write_begin is greater than the number of bytes copied 934 - into the page). 934 + into the folio). 935 935 936 936 A void * may be returned in fsdata, which then gets passed into 937 937 write_end. ··· 944 944 called. len is the original len passed to write_begin, and 945 945 copied is the amount that was able to be copied. 946 946 947 - The filesystem must take care of unlocking the page and 948 - releasing it refcount, and updating i_size. 947 + The filesystem must take care of unlocking the folio, 948 + decrementing its refcount, and updating i_size. 949 949 950 950 Returns < 0 on failure, otherwise the number of bytes (<= 951 951 'copied') that were able to be copied into pagecache.
+6 -6
block/fops.c
··· 451 451 } 452 452 453 453 static int blkdev_write_begin(struct file *file, struct address_space *mapping, 454 - loff_t pos, unsigned len, struct page **pagep, void **fsdata) 454 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) 455 455 { 456 - return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); 456 + return block_write_begin(mapping, pos, len, foliop, blkdev_get_block); 457 457 } 458 458 459 459 static int blkdev_write_end(struct file *file, struct address_space *mapping, 460 - loff_t pos, unsigned len, unsigned copied, struct page *page, 460 + loff_t pos, unsigned len, unsigned copied, struct folio *folio, 461 461 void *fsdata) 462 462 { 463 463 int ret; 464 - ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 464 + ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata); 465 465 466 - unlock_page(page); 467 - put_page(page); 466 + folio_unlock(folio); 467 + folio_put(folio); 468 468 469 469 return ret; 470 470 }
+22 -25
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
··· 424 424 struct address_space *mapping = obj->base.filp->f_mapping; 425 425 const struct address_space_operations *aops = mapping->a_ops; 426 426 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 427 - u64 remain, offset; 427 + u64 remain; 428 + loff_t pos; 428 429 unsigned int pg; 429 430 430 431 /* Caller already validated user args */ ··· 458 457 */ 459 458 460 459 remain = arg->size; 461 - offset = arg->offset; 462 - pg = offset_in_page(offset); 460 + pos = arg->offset; 461 + pg = offset_in_page(pos); 463 462 464 463 do { 465 464 unsigned int len, unwritten; 466 - struct page *page; 465 + struct folio *folio; 467 466 void *data, *vaddr; 468 467 int err; 469 468 char __maybe_unused c; ··· 481 480 if (err) 482 481 return err; 483 482 484 - err = aops->write_begin(obj->base.filp, mapping, offset, len, 485 - &page, &data); 483 + err = aops->write_begin(obj->base.filp, mapping, pos, len, 484 + &folio, &data); 486 485 if (err < 0) 487 486 return err; 488 487 489 - vaddr = kmap_local_page(page); 488 + vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos)); 490 489 pagefault_disable(); 491 - unwritten = __copy_from_user_inatomic(vaddr + pg, 492 - user_data, 493 - len); 490 + unwritten = __copy_from_user_inatomic(vaddr, user_data, len); 494 491 pagefault_enable(); 495 492 kunmap_local(vaddr); 496 493 497 - err = aops->write_end(obj->base.filp, mapping, offset, len, 498 - len - unwritten, page, data); 494 + err = aops->write_end(obj->base.filp, mapping, pos, len, 495 + len - unwritten, folio, data); 499 496 if (err < 0) 500 497 return err; 501 498 ··· 503 504 504 505 remain -= len; 505 506 user_data += len; 506 - offset += len; 507 + pos += len; 507 508 pg = 0; 508 509 } while (remain); 509 510 ··· 659 660 struct drm_i915_gem_object *obj; 660 661 struct file *file; 661 662 const struct address_space_operations *aops; 662 - resource_size_t offset; 663 + loff_t pos; 663 664 int err; 664 665 665 666 GEM_WARN_ON(IS_DGFX(i915)); ··· 671 672 672 673 file = obj->base.filp; 673 674 aops = file->f_mapping->a_ops; 674 - offset = 0; 675 + pos = 0; 675 676 do { 676 677 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 677 - struct page *page; 678 - void *pgdata, *vaddr; 678 + struct folio *folio; 679 + void *fsdata; 679 680 680 - err = aops->write_begin(file, file->f_mapping, offset, len, 681 - &page, &pgdata); 681 + err = aops->write_begin(file, file->f_mapping, pos, len, 682 + &folio, &fsdata); 682 683 if (err < 0) 683 684 goto fail; 684 685 685 - vaddr = kmap(page); 686 - memcpy(vaddr, data, len); 687 - kunmap(page); 686 + memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len); 688 687 689 - err = aops->write_end(file, file->f_mapping, offset, len, len, 690 - page, pgdata); 688 + err = aops->write_end(file, file->f_mapping, pos, len, len, 689 + folio, fsdata); 691 690 if (err < 0) 692 691 goto fail; 693 692 694 693 size -= len; 695 694 data += len; 696 - offset += len; 695 + pos += len; 697 696 } while (size); 698 697 699 698 return obj;
+2 -3
fs/adfs/inode.c
··· 55 55 56 56 static int adfs_write_begin(struct file *file, struct address_space *mapping, 57 57 loff_t pos, unsigned len, 58 - struct page **pagep, void **fsdata) 58 + struct folio **foliop, void **fsdata) 59 59 { 60 60 int ret; 61 61 62 - *pagep = NULL; 63 - ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, 62 + ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, 64 63 adfs_get_block, 65 64 &ADFS_I(mapping->host)->mmu_private); 66 65 if (unlikely(ret))
+10 -12
fs/affs/file.c
··· 417 417 418 418 static int affs_write_begin(struct file *file, struct address_space *mapping, 419 419 loff_t pos, unsigned len, 420 - struct page **pagep, void **fsdata) 420 + struct folio **foliop, void **fsdata) 421 421 { 422 422 int ret; 423 423 424 - *pagep = NULL; 425 - ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, 424 + ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, 426 425 affs_get_block, 427 426 &AFFS_I(mapping->host)->mmu_private); 428 427 if (unlikely(ret)) ··· 432 433 433 434 static int affs_write_end(struct file *file, struct address_space *mapping, 434 435 loff_t pos, unsigned int len, unsigned int copied, 435 - struct page *page, void *fsdata) 436 + struct folio *folio, void *fsdata) 436 437 { 437 438 struct inode *inode = mapping->host; 438 439 int ret; 439 440 440 - ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 441 + ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 441 442 442 443 /* Clear Archived bit on file writes, as AmigaOS would do */ 443 444 if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { ··· 647 648 648 649 static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, 649 650 loff_t pos, unsigned len, 650 - struct page **pagep, void **fsdata) 651 + struct folio **foliop, void **fsdata) 651 652 { 652 653 struct inode *inode = mapping->host; 653 654 struct folio *folio; ··· 670 671 mapping_gfp_mask(mapping)); 671 672 if (IS_ERR(folio)) 672 673 return PTR_ERR(folio); 673 - *pagep = &folio->page; 674 + *foliop = folio; 674 675 675 676 if (folio_test_uptodate(folio)) 676 677 return 0; ··· 686 687 687 688 static int affs_write_end_ofs(struct file *file, struct address_space *mapping, 688 689 loff_t pos, unsigned len, unsigned copied, 689 - struct page *page, void *fsdata) 690 + struct folio *folio, void *fsdata) 690 691 { 691 - struct folio *folio = page_folio(page); 692 692 struct inode *inode = mapping->host; 693 693 struct super_block *sb = inode->i_sb; 694 694 struct buffer_head *bh, *prev_bh; ··· 880 882 881 883 if (inode->i_size > AFFS_I(inode)->mmu_private) { 882 884 struct address_space *mapping = inode->i_mapping; 883 - struct page *page; 885 + struct folio *folio; 884 886 void *fsdata = NULL; 885 887 loff_t isize = inode->i_size; 886 888 int res; 887 889 888 - res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata); 890 + res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata); 889 891 if (!res) 890 - res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); 892 + res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata); 891 893 else 892 894 inode->i_size = AFFS_I(inode)->mmu_private; 893 895 mark_inode_dirty(inode);
+3 -5
fs/bcachefs/fs-io-buffered.c
··· 659 659 660 660 int bch2_write_begin(struct file *file, struct address_space *mapping, 661 661 loff_t pos, unsigned len, 662 - struct page **pagep, void **fsdata) 662 + struct folio **foliop, void **fsdata) 663 663 { 664 664 struct bch_inode_info *inode = to_bch_ei(mapping->host); 665 665 struct bch_fs *c = inode->v.i_sb->s_fs_info; ··· 728 728 goto err; 729 729 } 730 730 731 - *pagep = &folio->page; 731 + *foliop = folio; 732 732 return 0; 733 733 err: 734 734 folio_unlock(folio); 735 735 folio_put(folio); 736 - *pagep = NULL; 737 736 err_unlock: 738 737 bch2_pagecache_add_put(inode); 739 738 kfree(res); ··· 742 743 743 744 int bch2_write_end(struct file *file, struct address_space *mapping, 744 745 loff_t pos, unsigned len, unsigned copied, 745 - struct page *page, void *fsdata) 746 + struct folio *folio, void *fsdata) 746 747 { 747 748 struct bch_inode_info *inode = to_bch_ei(mapping->host); 748 749 struct bch_fs *c = inode->v.i_sb->s_fs_info; 749 750 struct bch2_folio_reservation *res = fsdata; 750 - struct folio *folio = page_folio(page); 751 751 unsigned offset = pos - folio_pos(folio); 752 752 753 753 lockdep_assert_held(&inode->v.i_rwsem);
+3 -3
fs/bcachefs/fs-io-buffered.h
··· 10 10 int bch2_writepages(struct address_space *, struct writeback_control *); 11 11 void bch2_readahead(struct readahead_control *); 12 12 13 - int bch2_write_begin(struct file *, struct address_space *, loff_t, 14 - unsigned, struct page **, void **); 13 + int bch2_write_begin(struct file *, struct address_space *, loff_t pos, 14 + unsigned len, struct folio **, void **); 15 15 int bch2_write_end(struct file *, struct address_space *, loff_t, 16 - unsigned, unsigned, struct page *, void *); 16 + unsigned len, unsigned copied, struct folio *, void *); 17 17 18 18 ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *); 19 19
+2 -2
fs/bfs/file.c
··· 172 172 173 173 static int bfs_write_begin(struct file *file, struct address_space *mapping, 174 174 loff_t pos, unsigned len, 175 - struct page **pagep, void **fsdata) 175 + struct folio **foliop, void **fsdata) 176 176 { 177 177 int ret; 178 178 179 - ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block); 179 + ret = block_write_begin(mapping, pos, len, foliop, bfs_get_block); 180 180 if (unlikely(ret)) 181 181 bfs_write_failed(mapping, pos + len); 182 182
+31 -32
fs/buffer.c
··· 2164 2164 return err; 2165 2165 } 2166 2166 2167 - int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2167 + int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 2168 2168 get_block_t *get_block) 2169 2169 { 2170 - return __block_write_begin_int(page_folio(page), pos, len, get_block, 2171 - NULL); 2170 + return __block_write_begin_int(folio, pos, len, get_block, NULL); 2172 2171 } 2173 2172 EXPORT_SYMBOL(__block_write_begin); 2174 2173 ··· 2217 2218 * The filesystem needs to handle block truncation upon failure. 2218 2219 */ 2219 2220 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2220 - struct page **pagep, get_block_t *get_block) 2221 + struct folio **foliop, get_block_t *get_block) 2221 2222 { 2222 2223 pgoff_t index = pos >> PAGE_SHIFT; 2223 - struct page *page; 2224 + struct folio *folio; 2224 2225 int status; 2225 2226 2226 - page = grab_cache_page_write_begin(mapping, index); 2227 - if (!page) 2228 - return -ENOMEM; 2227 + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2228 + mapping_gfp_mask(mapping)); 2229 + if (IS_ERR(folio)) 2230 + return PTR_ERR(folio); 2229 2231 2230 - status = __block_write_begin(page, pos, len, get_block); 2232 + status = __block_write_begin_int(folio, pos, len, get_block, NULL); 2231 2233 if (unlikely(status)) { 2232 - unlock_page(page); 2233 - put_page(page); 2234 - page = NULL; 2234 + folio_unlock(folio); 2235 + folio_put(folio); 2236 + folio = NULL; 2235 2237 } 2236 2238 2237 - *pagep = page; 2239 + *foliop = folio; 2238 2240 return status; 2239 2241 } 2240 2242 EXPORT_SYMBOL(block_write_begin); 2241 2243 2242 2244 int block_write_end(struct file *file, struct address_space *mapping, 2243 2245 loff_t pos, unsigned len, unsigned copied, 2244 - struct page *page, void *fsdata) 2246 + struct folio *folio, void *fsdata) 2245 2247 { 2246 - struct folio *folio = page_folio(page); 2247 2248 size_t start = pos - folio_pos(folio); 2248 2249 2249 2250 if (unlikely(copied < len)) { ··· 2275 2276 2276 2277 int generic_write_end(struct file *file, struct address_space *mapping, 2277 2278 loff_t pos, unsigned len, unsigned copied, 2278 - struct page *page, void *fsdata) 2279 + struct folio *folio, void *fsdata) 2279 2280 { 2280 2281 struct inode *inode = mapping->host; 2281 2282 loff_t old_size = inode->i_size; 2282 2283 bool i_size_changed = false; 2283 2284 2284 - copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2285 + copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); 2285 2286 2286 2287 /* 2287 2288 * No need to use i_size_read() here, the i_size cannot change under us 2288 2289 * because we hold i_rwsem. 2289 2290 * 2290 - * But it's important to update i_size while still holding page lock: 2291 + * But it's important to update i_size while still holding folio lock: 2291 2292 * page writeout could otherwise come in and zero beyond i_size. 2292 2293 */ 2293 2294 if (pos + copied > inode->i_size) { ··· 2295 2296 i_size_changed = true; 2296 2297 } 2297 2298 2298 - unlock_page(page); 2299 - put_page(page); 2299 + folio_unlock(folio); 2300 + folio_put(folio); 2300 2301 2301 2302 if (old_size < pos) 2302 2303 pagecache_isize_extended(inode, old_size, pos); ··· 2462 2463 { 2463 2464 struct address_space *mapping = inode->i_mapping; 2464 2465 const struct address_space_operations *aops = mapping->a_ops; 2465 - struct page *page; 2466 + struct folio *folio; 2466 2467 void *fsdata = NULL; 2467 2468 int err; 2468 2469 ··· 2470 2471 if (err) 2471 2472 goto out; 2472 2473 2473 - err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata); 2474 + err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata); 2474 2475 if (err) 2475 2476 goto out; 2476 2477 2477 - err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata); 2478 + err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata); 2478 2479 BUG_ON(err > 0); 2479 2480 2480 2481 out: ··· 2488 2489 struct inode *inode = mapping->host; 2489 2490 const struct address_space_operations *aops = mapping->a_ops; 2490 2491 unsigned int blocksize = i_blocksize(inode); 2491 - struct page *page; 2492 + struct folio *folio; 2492 2493 void *fsdata = NULL; 2493 2494 pgoff_t index, curidx; 2494 2495 loff_t curpos; ··· 2507 2508 len = PAGE_SIZE - zerofrom; 2508 2509 2509 2510 err = aops->write_begin(file, mapping, curpos, len, 2510 - &page, &fsdata); 2511 + &folio, &fsdata); 2511 2512 if (err) 2512 2513 goto out; 2513 - zero_user(page, zerofrom, len); 2514 + folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2514 2515 err = aops->write_end(file, mapping, curpos, len, len, 2515 - page, fsdata); 2516 + folio, fsdata); 2516 2517 if (err < 0) 2517 2518 goto out; 2518 2519 BUG_ON(err != len); ··· 2540 2541 len = offset - zerofrom; 2541 2542 2542 2543 err = aops->write_begin(file, mapping, curpos, len, 2543 - &page, &fsdata); 2544 + &folio, &fsdata); 2544 2545 if (err) 2545 2546 goto out; 2546 - zero_user(page, zerofrom, len); 2547 + folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2547 2548 err = aops->write_end(file, mapping, curpos, len, len, 2548 - page, fsdata); 2549 + folio, fsdata); 2549 2550 if (err < 0) 2550 2551 goto out; 2551 2552 BUG_ON(err != len); ··· 2561 2562 */ 2562 2563 int cont_write_begin(struct file *file, struct address_space *mapping, 2563 2564 loff_t pos, unsigned len, 2564 - struct page **pagep, void **fsdata, 2565 + struct folio **foliop, void **fsdata, 2565 2566 get_block_t *get_block, loff_t *bytes) 2566 2567 { 2567 2568 struct inode *inode = mapping->host; ··· 2579 2580 (*bytes)++; 2580 2581 } 2581 2582 2582 - return block_write_begin(mapping, pos, len, pagep, get_block); 2583 + return block_write_begin(mapping, pos, len, foliop, get_block); 2583 2584 } 2584 2585 EXPORT_SYMBOL(cont_write_begin); 2585 2586
+5 -8
fs/ceph/addr.c
··· 1508 1508 */ 1509 1509 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1510 1510 loff_t pos, unsigned len, 1511 - struct page **pagep, void **fsdata) 1511 + struct folio **foliop, void **fsdata) 1512 1512 { 1513 1513 struct inode *inode = file_inode(file); 1514 1514 struct ceph_inode_info *ci = ceph_inode(inode); 1515 - struct folio *folio = NULL; 1516 1515 int r; 1517 1516 1518 - r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL); 1517 + r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL); 1519 1518 if (r < 0) 1520 1519 return r; 1521 1520 1522 - folio_wait_private_2(folio); /* [DEPRECATED] */ 1523 - WARN_ON_ONCE(!folio_test_locked(folio)); 1524 - *pagep = &folio->page; 1521 + folio_wait_private_2(*foliop); /* [DEPRECATED] */ 1522 + WARN_ON_ONCE(!folio_test_locked(*foliop)); 1525 1523 return 0; 1526 1524 } 1527 1525 ··· 1529 1531 */ 1530 1532 static int ceph_write_end(struct file *file, struct address_space *mapping, 1531 1533 loff_t pos, unsigned len, unsigned copied, 1532 - struct page *subpage, void *fsdata) 1534 + struct folio *folio, void *fsdata) 1533 1535 { 1534 - struct folio *folio = page_folio(subpage); 1535 1536 struct inode *inode = file_inode(file); 1536 1537 struct ceph_client *cl = ceph_inode_to_client(inode); 1537 1538 bool check_cap = false;
+43 -43
fs/ecryptfs/mmap.c
··· 234 234 /* 235 235 * Called with lower inode mutex held. 236 236 */ 237 - static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) 237 + static int fill_zeros_to_end_of_page(struct folio *folio, unsigned int to) 238 238 { 239 - struct inode *inode = page->mapping->host; 239 + struct inode *inode = folio->mapping->host; 240 240 int end_byte_in_page; 241 241 242 - if ((i_size_read(inode) / PAGE_SIZE) != page->index) 242 + if ((i_size_read(inode) / PAGE_SIZE) != folio->index) 243 243 goto out; 244 244 end_byte_in_page = i_size_read(inode) % PAGE_SIZE; 245 245 if (to > end_byte_in_page) 246 246 end_byte_in_page = to; 247 - zero_user_segment(page, end_byte_in_page, PAGE_SIZE); 247 + folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE); 248 248 out: 249 249 return 0; 250 250 } ··· 255 255 * @mapping: The eCryptfs object 256 256 * @pos: The file offset at which to start writing 257 257 * @len: Length of the write 258 - * @pagep: Pointer to return the page 258 + * @foliop: Pointer to return the folio 259 259 * @fsdata: Pointer to return fs data (unused) 260 260 * 261 261 * This function must zero any hole we create ··· 265 265 static int ecryptfs_write_begin(struct file *file, 266 266 struct address_space *mapping, 267 267 loff_t pos, unsigned len, 268 - struct page **pagep, void **fsdata) 268 + struct folio **foliop, void **fsdata) 269 269 { 270 270 pgoff_t index = pos >> PAGE_SHIFT; 271 - struct page *page; 271 + struct folio *folio; 272 272 loff_t prev_page_end_size; 273 273 int rc = 0; 274 274 275 - page = grab_cache_page_write_begin(mapping, index); 276 - if (!page) 277 - return -ENOMEM; 278 - *pagep = page; 275 + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 276 + mapping_gfp_mask(mapping)); 277 + if (IS_ERR(folio)) 278 + return PTR_ERR(folio); 279 + *foliop = folio; 279 280 280 281 prev_page_end_size = ((loff_t)index << PAGE_SHIFT); 281 - if (!PageUptodate(page)) { 282 + if (!folio_test_uptodate(folio)) { 282 283 struct ecryptfs_crypt_stat *crypt_stat = 283 284 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; 284 285 285 286 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 286 287 rc = ecryptfs_read_lower_page_segment( 287 - page, index, 0, PAGE_SIZE, mapping->host); 288 + &folio->page, index, 0, PAGE_SIZE, mapping->host); 288 289 if (rc) { 289 290 printk(KERN_ERR "%s: Error attempting to read " 290 291 "lower page segment; rc = [%d]\n", 291 292 __func__, rc); 292 - ClearPageUptodate(page); 293 + folio_clear_uptodate(folio); 293 294 goto out; 294 295 } else 295 - SetPageUptodate(page); 296 + folio_mark_uptodate(folio); 296 297 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { 297 298 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { 298 299 rc = ecryptfs_copy_up_encrypted_with_header( 299 - page, crypt_stat); 300 + &folio->page, crypt_stat); 300 301 if (rc) { 301 302 printk(KERN_ERR "%s: Error attempting " 302 303 "to copy the encrypted content " ··· 305 304 "inserting the metadata from " 306 305 "the xattr into the header; rc " 307 306 "= [%d]\n", __func__, rc); 308 - ClearPageUptodate(page); 307 + folio_clear_uptodate(folio); 309 308 goto out; 310 309 } 311 - SetPageUptodate(page); 310 + folio_mark_uptodate(folio); 312 311 } else { 313 312 rc = ecryptfs_read_lower_page_segment( 314 - page, index, 0, PAGE_SIZE, 313 + &folio->page, index, 0, PAGE_SIZE, 315 314 mapping->host); 316 315 if (rc) { 317 316 printk(KERN_ERR "%s: Error reading " 318 317 "page; rc = [%d]\n", 319 318 __func__, rc); 320 - ClearPageUptodate(page); 319 + folio_clear_uptodate(folio); 321 320 goto out; 322 321 } 323 - SetPageUptodate(page); 322 + folio_mark_uptodate(folio); 324 323 } 325 324 } else { 326 325 if (prev_page_end_size 327 - >= i_size_read(page->mapping->host)) { 328 - zero_user(page, 0, PAGE_SIZE); 329 - SetPageUptodate(page); 326 + >= i_size_read(mapping->host)) { 327 + folio_zero_range(folio, 0, PAGE_SIZE); 328 + folio_mark_uptodate(folio); 330 329 } else if (len < PAGE_SIZE) { 331 - rc = ecryptfs_decrypt_page(page); 330 + rc = ecryptfs_decrypt_page(&folio->page); 332 331 if (rc) { 333 332 printk(KERN_ERR "%s: Error decrypting " 334 333 "page at index [%ld]; " 335 334 "rc = [%d]\n", 336 - __func__, page->index, rc); 337 - ClearPageUptodate(page); 335 + __func__, folio->index, rc); 336 + folio_clear_uptodate(folio); 338 337 goto out; 339 338 } 340 - SetPageUptodate(page); 339 + folio_mark_uptodate(folio); 341 340 } 342 341 } 343 342 } 344 343 /* If creating a page or more of holes, zero them out via truncate. 345 344 * Note, this will increase i_size. */ 346 345 if (index != 0) { 347 - if (prev_page_end_size > i_size_read(page->mapping->host)) { 346 + if (prev_page_end_size > i_size_read(mapping->host)) { 348 347 rc = ecryptfs_truncate(file->f_path.dentry, 349 348 prev_page_end_size); 350 349 if (rc) { ··· 360 359 * of page? Zero it out. */ 361 360 if ((i_size_read(mapping->host) == prev_page_end_size) 362 361 && (pos != 0)) 363 - zero_user(page, 0, PAGE_SIZE); 362 + folio_zero_range(folio, 0, PAGE_SIZE); 364 363 out: 365 364 if (unlikely(rc)) { 366 - unlock_page(page); 367 - put_page(page); 368 - *pagep = NULL; 365 + folio_unlock(folio); 366 + folio_put(folio); 369 367 } 370 368 return rc; 371 369 } ··· 457 457 * @pos: The file position 458 458 * @len: The length of the data (unused) 459 459 * @copied: The amount of data copied 460 - * @page: The eCryptfs page 460 + * @folio: The eCryptfs folio 461 461 * @fsdata: The fsdata (unused) 462 462 */ 463 463 static int ecryptfs_write_end(struct file *file, 464 464 struct address_space *mapping, 465 465 loff_t pos, unsigned len, unsigned copied, 466 - struct page *page, void *fsdata) 466 + struct folio *folio, void *fsdata) 467 467 { 468 468 pgoff_t index = pos >> PAGE_SHIFT; 469 469 unsigned from = pos & (PAGE_SIZE - 1); ··· 476 476 ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" 477 477 "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); 478 478 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 479 - rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0, 480 - to); 479 + rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, 480 + &folio->page, 0, to); 481 481 if (!rc) { 482 482 rc = copied; 483 483 fsstack_copy_inode_size(ecryptfs_inode, ··· 485 485 } 486 486 goto out; 487 487 } 488 - if (!PageUptodate(page)) { 488 + if (!folio_test_uptodate(folio)) { 489 489 if (copied < PAGE_SIZE) { 490 490 rc = 0; 491 491 goto out; 492 492 } 493 - SetPageUptodate(page); 493 + folio_mark_uptodate(folio); 494 494 } 495 495 /* Fills in zeros if 'to' goes beyond inode size */ 496 - rc = fill_zeros_to_end_of_page(page, to); 496 + rc = fill_zeros_to_end_of_page(folio, to); 497 497 if (rc) { 498 498 ecryptfs_printk(KERN_WARNING, "Error attempting to fill " 499 499 "zeros in page with index = [0x%.16lx]\n", index); 500 500 goto out; 501 501 } 502 - rc = ecryptfs_encrypt_page(page); 502 + rc = ecryptfs_encrypt_page(&folio->page); 503 503 if (rc) { 504 504 ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " 505 505 "index [0x%.16lx])\n", index); ··· 518 518 else 519 519 rc = copied; 520 520 out: 521 - unlock_page(page); 522 - put_page(page); 521 + folio_unlock(folio); 522 + folio_put(folio); 523 523 return rc; 524 524 } 525 525
+4 -4
fs/exfat/file.c
··· 535 535 536 536 while (start < end) { 537 537 u32 zerofrom, len; 538 - struct page *page = NULL; 538 + struct folio *folio; 539 539 540 540 zerofrom = start & (PAGE_SIZE - 1); 541 541 len = PAGE_SIZE - zerofrom; 542 542 if (start + len > end) 543 543 len = end - start; 544 544 545 - err = ops->write_begin(file, mapping, start, len, &page, NULL); 545 + err = ops->write_begin(file, mapping, start, len, &folio, NULL); 546 546 if (err) 547 547 goto out; 548 548 549 - zero_user_segment(page, zerofrom, zerofrom + len); 549 + folio_zero_range(folio, offset_in_folio(folio, start), len); 550 550 551 - err = ops->write_end(file, mapping, start, len, len, page, NULL); 551 + err = ops->write_end(file, mapping, start, len, len, folio, NULL); 552 552 if (err < 0) 553 553 goto out; 554 554 start += len;
+4 -5
fs/exfat/inode.c
··· 448 448 449 449 static int exfat_write_begin(struct file *file, struct address_space *mapping, 450 450 loff_t pos, unsigned int len, 451 - struct page **pagep, void **fsdata) 451 + struct folio **foliop, void **fsdata) 452 452 { 453 453 int ret; 454 454 455 - *pagep = NULL; 456 - ret = block_write_begin(mapping, pos, len, pagep, exfat_get_block); 455 + ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block); 457 456 458 457 if (ret < 0) 459 458 exfat_write_failed(mapping, pos+len); ··· 462 463 463 464 static int exfat_write_end(struct file *file, struct address_space *mapping, 464 465 loff_t pos, unsigned int len, unsigned int copied, 465 - struct page *pagep, void *fsdata) 466 + struct folio *folio, void *fsdata) 466 467 { 467 468 struct inode *inode = mapping->host; 468 469 struct exfat_inode_info *ei = EXFAT_I(inode); 469 470 int err; 470 471 471 - err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); 472 + err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 472 473 473 474 if (ei->i_size_aligned < i_size_read(inode)) { 474 475 exfat_fs_error(inode->i_sb,
+2 -2
fs/ext2/dir.c
··· 87 87 struct inode *dir = mapping->host; 88 88 89 89 inode_inc_iversion(dir); 90 - block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); 90 + block_write_end(NULL, mapping, pos, len, len, folio, NULL); 91 91 92 92 if (pos+len > dir->i_size) { 93 93 i_size_write(dir, pos+len); ··· 434 434 435 435 static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) 436 436 { 437 - return __block_write_begin(&folio->page, pos, len, ext2_get_block); 437 + return __block_write_begin(folio, pos, len, ext2_get_block); 438 438 } 439 439 440 440 static int ext2_handle_dirsync(struct inode *dir)
+4 -4
fs/ext2/inode.c
··· 916 916 917 917 static int 918 918 ext2_write_begin(struct file *file, struct address_space *mapping, 919 - loff_t pos, unsigned len, struct page **pagep, void **fsdata) 919 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) 920 920 { 921 921 int ret; 922 922 923 - ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block); 923 + ret = block_write_begin(mapping, pos, len, foliop, ext2_get_block); 924 924 if (ret < 0) 925 925 ext2_write_failed(mapping, pos + len); 926 926 return ret; ··· 928 928 929 929 static int ext2_write_end(struct file *file, struct address_space *mapping, 930 930 loff_t pos, unsigned len, unsigned copied, 931 - struct page *page, void *fsdata) 931 + struct folio *folio, void *fsdata) 932 932 { 933 933 int ret; 934 934 935 - ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 935 + ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 936 936 if (ret < len) 937 937 ext2_write_failed(mapping, pos + len); 938 938 return ret;
+2 -2
fs/ext4/ext4.h
··· 3563 3563 extern int ext4_try_to_write_inline_data(struct address_space *mapping, 3564 3564 struct inode *inode, 3565 3565 loff_t pos, unsigned len, 3566 - struct page **pagep); 3566 + struct folio **foliop); 3567 3567 int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, 3568 3568 unsigned copied, struct folio *folio); 3569 3569 extern int ext4_da_write_inline_data_begin(struct address_space *mapping, 3570 3570 struct inode *inode, 3571 3571 loff_t pos, unsigned len, 3572 - struct page **pagep, 3572 + struct folio **foliop, 3573 3573 void **fsdata); 3574 3574 extern int ext4_try_add_inline_entry(handle_t *handle, 3575 3575 struct ext4_filename *fname,
+7 -7
fs/ext4/inline.c
··· 601 601 goto out; 602 602 603 603 if (ext4_should_dioread_nolock(inode)) { 604 - ret = __block_write_begin(&folio->page, from, to, 604 + ret = __block_write_begin(folio, from, to, 605 605 ext4_get_block_unwritten); 606 606 } else 607 - ret = __block_write_begin(&folio->page, from, to, ext4_get_block); 607 + ret = __block_write_begin(folio, from, to, ext4_get_block); 608 608 609 609 if (!ret && ext4_should_journal_data(inode)) { 610 610 ret = ext4_walk_page_buffers(handle, inode, ··· 660 660 int ext4_try_to_write_inline_data(struct address_space *mapping, 661 661 struct inode *inode, 662 662 loff_t pos, unsigned len, 663 - struct page **pagep) 663 + struct folio **foliop) 664 664 { 665 665 int ret; 666 666 handle_t *handle; ··· 708 708 goto out; 709 709 } 710 710 711 - *pagep = &folio->page; 711 + *foliop = folio; 712 712 down_read(&EXT4_I(inode)->xattr_sem); 713 713 if (!ext4_has_inline_data(inode)) { 714 714 ret = 0; ··· 856 856 goto out; 857 857 } 858 858 859 - ret = __block_write_begin(&folio->page, 0, inline_size, 859 + ret = __block_write_begin(folio, 0, inline_size, 860 860 ext4_da_get_block_prep); 861 861 if (ret) { 862 862 up_read(&EXT4_I(inode)->xattr_sem); ··· 891 891 int ext4_da_write_inline_data_begin(struct address_space *mapping, 892 892 struct inode *inode, 893 893 loff_t pos, unsigned len, 894 - struct page **pagep, 894 + struct folio **foliop, 895 895 void **fsdata) 896 896 { 897 897 int ret; ··· 954 954 goto out_release_page; 955 955 956 956 up_read(&EXT4_I(inode)->xattr_sem); 957 - *pagep = &folio->page; 957 + *foliop = folio; 958 958 brelse(iloc.bh); 959 959 return 1; 960 960 out_release_page:
+17 -20
fs/ext4/inode.c
··· 1145 1145 */ 1146 1146 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1147 1147 loff_t pos, unsigned len, 1148 - struct page **pagep, void **fsdata) 1148 + struct folio **foliop, void **fsdata) 1149 1149 { 1150 1150 struct inode *inode = mapping->host; 1151 1151 int ret, needed_blocks; ··· 1170 1170 1171 1171 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1172 1172 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1173 - pagep); 1173 + foliop); 1174 1174 if (ret < 0) 1175 1175 return ret; 1176 1176 if (ret == 1) ··· 1224 1224 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block); 1225 1225 #else 1226 1226 if (ext4_should_dioread_nolock(inode)) 1227 - ret = __block_write_begin(&folio->page, pos, len, 1227 + ret = __block_write_begin(folio, pos, len, 1228 1228 ext4_get_block_unwritten); 1229 1229 else 1230 - ret = __block_write_begin(&folio->page, pos, len, ext4_get_block); 1230 + ret = __block_write_begin(folio, pos, len, ext4_get_block); 1231 1231 #endif 1232 1232 if (!ret && ext4_should_journal_data(inode)) { 1233 1233 ret = ext4_walk_page_buffers(handle, inode, ··· 1270 1270 folio_put(folio); 1271 1271 return ret; 1272 1272 } 1273 - *pagep = &folio->page; 1273 + *foliop = folio; 1274 1274 return ret; 1275 1275 } 1276 1276 ··· 1298 1298 static int ext4_write_end(struct file *file, 1299 1299 struct address_space *mapping, 1300 1300 loff_t pos, unsigned len, unsigned copied, 1301 - struct page *page, void *fsdata) 1301 + struct folio *folio, void *fsdata) 1302 1302 { 1303 - struct folio *folio = page_folio(page); 1304 1303 handle_t *handle = ext4_journal_current_handle(); 1305 1304 struct inode *inode = mapping->host; 1306 1305 loff_t old_size = inode->i_size; ··· 1314 1315 return ext4_write_inline_data_end(inode, pos, len, copied, 1315 1316 folio); 1316 1317 1317 - copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1318 + copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); 1318 1319 /* 1319 1320 * it's important to update i_size while still holding folio lock: 1320 1321 * page writeout could otherwise come in and zero beyond i_size. ··· 1401 1402 static int ext4_journalled_write_end(struct file *file, 1402 1403 struct address_space *mapping, 1403 1404 loff_t pos, unsigned len, unsigned copied, 1404 - struct page *page, void *fsdata) 1405 + struct folio *folio, void *fsdata) 1405 1406 { 1406 - struct folio *folio = page_folio(page); 1407 1407 handle_t *handle = ext4_journal_current_handle(); 1408 1408 struct inode *inode = mapping->host; 1409 1409 loff_t old_size = inode->i_size; ··· 2924 2926 2925 2927 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2926 2928 loff_t pos, unsigned len, 2927 - struct page **pagep, void **fsdata) 2929 + struct folio **foliop, void **fsdata) 2928 2930 { 2929 2931 int ret, retries = 0; 2930 2932 struct folio *folio; ··· 2939 2941 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) { 2940 2942 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2941 2943 return ext4_write_begin(file, mapping, pos, 2942 - len, pagep, fsdata); 2944 + len, foliop, fsdata); 2943 2945 } 2944 2946 *fsdata = (void *)0; 2945 2947 trace_ext4_da_write_begin(inode, pos, len); 2946 2948 2947 2949 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2948 2950 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, 2949 - pagep, fsdata); 2951 + foliop, fsdata); 2950 2952 if (ret < 0) 2951 2953 return ret; 2952 2954 if (ret == 1) ··· 2962 2964 #ifdef CONFIG_FS_ENCRYPTION 2963 2965 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep); 2964 2966 #else 2965 - ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep); 2967 + ret = __block_write_begin(folio, pos, len, ext4_da_get_block_prep); 2966 2968 #endif 2967 2969 if (ret < 0) { 2968 2970 folio_unlock(folio); ··· 2981 2983 return ret; 2982 2984 } 2983 2985 2984 - *pagep = &folio->page; 2986 + *foliop = folio; 2985 2987 return ret; 2986 2988 } 2987 2989 ··· 3027 3029 * flag, which all that's needed to trigger page writeback. 3028 3030 */ 3029 3031 copied = block_write_end(NULL, mapping, pos, len, copied, 3030 - &folio->page, NULL); 3032 + folio, NULL); 3031 3033 new_i_size = pos + copied; 3032 3034 3033 3035 /* ··· 3078 3080 static int ext4_da_write_end(struct file *file, 3079 3081 struct address_space *mapping, 3080 3082 loff_t pos, unsigned len, unsigned copied, 3081 - struct page *page, void *fsdata) 3083 + struct folio *folio, void *fsdata) 3082 3084 { 3083 3085 struct inode *inode = mapping->host; 3084 3086 int write_mode = (int)(unsigned long)fsdata; 3085 - struct folio *folio = page_folio(page); 3086 3087 3087 3088 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3088 3089 return ext4_write_end(file, mapping, pos, 3089 - len, copied, &folio->page, fsdata); 3090 + len, copied, folio, fsdata); 3090 3091 3091 3092 trace_ext4_da_write_end(inode, pos, len, copied); 3092 3093 ··· 6216 6219 if (folio_pos(folio) + len > size) 6217 6220 len = size - folio_pos(folio); 6218 6221 6219 - err = __block_write_begin(&folio->page, 0, len, ext4_get_block); 6222 + err = __block_write_begin(folio, 0, len, ext4_get_block); 6220 6223 if (!err) { 6221 6224 ret = VM_FAULT_SIGBUS; 6222 6225 if (ext4_journal_folio_buffers(handle, folio, len))
+4 -4
fs/ext4/verity.c
··· 76 76 while (count) { 77 77 size_t n = min_t(size_t, count, 78 78 PAGE_SIZE - offset_in_page(pos)); 79 - struct page *page; 79 + struct folio *folio; 80 80 void *fsdata = NULL; 81 81 int res; 82 82 83 - res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata); 83 + res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata); 84 84 if (res) 85 85 return res; 86 86 87 - memcpy_to_page(page, offset_in_page(pos), buf, n); 87 + memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n); 88 88 89 - res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata); 89 + res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata); 90 90 if (res < 0) 91 91 return res; 92 92 if (res != n)
+47 -40
fs/f2fs/data.c
··· 3552 3552 } 3553 3553 3554 3554 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 3555 - loff_t pos, unsigned len, struct page **pagep, void **fsdata) 3555 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) 3556 3556 { 3557 3557 struct inode *inode = mapping->host; 3558 3558 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3559 - struct page *page = NULL; 3560 - pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT; 3559 + struct folio *folio; 3560 + pgoff_t index = pos >> PAGE_SHIFT; 3561 3561 bool need_balance = false; 3562 3562 bool use_cow = false; 3563 3563 block_t blkaddr = NULL_ADDR; ··· 3573 3573 /* 3574 3574 * We should check this at this moment to avoid deadlock on inode page 3575 3575 * and #0 page. The locking rule for inline_data conversion should be: 3576 - * lock_page(page #0) -> lock_page(inode_page) 3576 + * folio_lock(folio #0) -> folio_lock(inode_page) 3577 3577 */ 3578 3578 if (index != 0) { 3579 3579 err = f2fs_convert_inline_inode(inode); ··· 3584 3584 #ifdef CONFIG_F2FS_FS_COMPRESSION 3585 3585 if (f2fs_compressed_file(inode)) { 3586 3586 int ret; 3587 + struct page *page; 3587 3588 3588 3589 *fsdata = NULL; 3589 3590 3590 3591 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode))) 3591 3592 goto repeat; 3592 3593 3593 - ret = f2fs_prepare_compress_overwrite(inode, pagep, 3594 + ret = f2fs_prepare_compress_overwrite(inode, &page, 3594 3595 index, fsdata); 3595 3596 if (ret < 0) { 3596 3597 err = ret; 3597 3598 goto fail; 3598 3599 } else if (ret) { 3600 + *foliop = page_folio(page); 3599 3601 return 0; 3600 3602 } 3601 3603 } ··· 3605 3603 3606 3604 repeat: 3607 3605 /* 3608 - * Do not use grab_cache_page_write_begin() to avoid deadlock due to 3609 - * wait_for_stable_page. Will wait that below with our IO control. 3606 + * Do not use FGP_STABLE to avoid deadlock. 3607 + * Will wait that below with our IO control. 3610 3608 */ 3611 - page = f2fs_pagecache_get_page(mapping, index, 3609 + folio = __filemap_get_folio(mapping, index, 3612 3610 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); 3613 - if (!page) { 3614 - err = -ENOMEM; 3611 + if (IS_ERR(folio)) { 3612 + err = PTR_ERR(folio); 3615 3613 goto fail; 3616 3614 } 3617 3615 3618 3616 /* TODO: cluster can be compressed due to race with .writepage */ 3619 3617 3620 - *pagep = page; 3618 + *foliop = folio; 3621 3619 3622 3620 if (f2fs_is_atomic_file(inode)) 3623 - err = prepare_atomic_write_begin(sbi, page, pos, len, 3621 + err = prepare_atomic_write_begin(sbi, &folio->page, pos, len, 3624 3622 &blkaddr, &need_balance, &use_cow); 3625 3623 else 3626 - err = prepare_write_begin(sbi, page, pos, len, 3624 + err = prepare_write_begin(sbi, &folio->page, pos, len, 3627 3625 &blkaddr, &need_balance); 3628 3626 if (err) 3629 - goto fail; 3627 + goto put_folio; 3630 3628 3631 3629 if (need_balance && !IS_NOQUOTA(inode) && 3632 3630 has_not_enough_free_secs(sbi, 0, 0)) { 3633 - unlock_page(page); 3631 + folio_unlock(folio); 3634 3632 f2fs_balance_fs(sbi, true); 3635 - lock_page(page); 3636 - if (page->mapping != mapping) { 3637 - /* The page got truncated from under us */ 3638 - f2fs_put_page(page, 1); 3633 + folio_lock(folio); 3634 + if (folio->mapping != mapping) { 3635 + /* The folio got truncated from under us */ 3636 + folio_unlock(folio); 3637 + folio_put(folio); 3639 3638 goto repeat; 3640 3639 } 3641 3640 } 3642 3641 3643 - f2fs_wait_on_page_writeback(page, DATA, false, true); 3642 + f2fs_wait_on_page_writeback(&folio->page, DATA, false, true); 3644 3643 3645 - if (len == PAGE_SIZE || PageUptodate(page)) 3644 + if (len == folio_size(folio) || folio_test_uptodate(folio)) 3646 3645 return 0; 3647 3646 3648 3647 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && 3649 3648 !f2fs_verity_in_progress(inode)) { 3650 - zero_user_segment(page, len, PAGE_SIZE); 3649 + folio_zero_segment(folio, len, PAGE_SIZE); 3651 3650 return 0; 3652 3651 } 3653 3652 3654 3653 if (blkaddr == NEW_ADDR) { 3655 - zero_user_segment(page, 0, PAGE_SIZE); 3656 - SetPageUptodate(page); 3654 + folio_zero_segment(folio, 0, folio_size(folio)); 3655 + folio_mark_uptodate(folio); 3657 3656 } else { 3658 3657 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 3659 3658 DATA_GENERIC_ENHANCE_READ)) { 3660 3659 err = -EFSCORRUPTED; 3661 - goto fail; 3660 + goto put_folio; 3662 3661 } 3663 3662 err = f2fs_submit_page_read(use_cow ? 3664 - F2FS_I(inode)->cow_inode : inode, page, 3663 + F2FS_I(inode)->cow_inode : inode, &folio->page, 3665 3664 blkaddr, 0, true); 3666 3665 if (err) 3667 - goto fail; 3666 + goto put_folio; 3668 3667 3669 - lock_page(page); 3670 - if (unlikely(page->mapping != mapping)) { 3671 - f2fs_put_page(page, 1); 3668 + folio_lock(folio); 3669 + if (unlikely(folio->mapping != mapping)) { 3670 + folio_unlock(folio); 3671 + folio_put(folio); 3672 3672 goto repeat; 3673 3673 } 3674 - if (unlikely(!PageUptodate(page))) { 3674 + if (unlikely(!folio_test_uptodate(folio))) { 3675 3675 err = -EIO; 3676 - goto fail; 3676 + goto put_folio; 3677 3677 } 3678 3678 } 3679 3679 return 0; 3680 3680 3681 + put_folio: 3682 + folio_unlock(folio); 3683 + folio_put(folio); 3681 3684 fail: 3682 - f2fs_put_page(page, 1); 3683 3685 f2fs_write_failed(inode, pos + len); 3684 3686 return err; 3685 3687 } ··· 3691 3685 static int f2fs_write_end(struct file *file, 3692 3686 struct address_space *mapping, 3693 3687 loff_t pos, unsigned len, unsigned copied, 3694 - struct page *page, void *fsdata) 3688 + struct folio *folio, void *fsdata) 3695 3689 { 3696 - struct inode *inode = page->mapping->host; 3690 + struct inode *inode = folio->mapping->host; 3697 3691 3698 3692 trace_f2fs_write_end(inode, pos, len, copied); 3699 3693 ··· 3702 3696 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and 3703 3697 * let generic_perform_write() try to copy data again through copied=0. 3704 3698 */ 3705 - if (!PageUptodate(page)) { 3699 + if (!folio_test_uptodate(folio)) { 3706 3700 if (unlikely(copied != len)) 3707 3701 copied = 0; 3708 3702 else 3709 - SetPageUptodate(page); 3703 + folio_mark_uptodate(folio); 3710 3704 } 3711 3705 3712 3706 #ifdef CONFIG_F2FS_FS_COMPRESSION 3713 3707 /* overwrite compressed file */ 3714 3708 if (f2fs_compressed_file(inode) && fsdata) { 3715 - f2fs_compress_write_end(inode, fsdata, page->index, copied); 3709 + f2fs_compress_write_end(inode, fsdata, folio->index, copied); 3716 3710 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3717 3711 3718 3712 if (pos + copied > i_size_read(inode) && ··· 3725 3719 if (!copied) 3726 3720 goto unlock_out; 3727 3721 3728 - set_page_dirty(page); 3722 + folio_mark_dirty(folio); 3729 3723 3730 3724 if (pos + copied > i_size_read(inode) && 3731 3725 !f2fs_verity_in_progress(inode)) { ··· 3735 3729 pos + copied); 3736 3730 } 3737 3731 unlock_out: 3738 - f2fs_put_page(page, 1); 3732 + folio_unlock(folio); 3733 + folio_put(folio); 3739 3734 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3740 3735 return copied; 3741 3736 }
+4 -4
fs/f2fs/super.c
··· 2677 2677 const struct address_space_operations *a_ops = mapping->a_ops; 2678 2678 int offset = off & (sb->s_blocksize - 1); 2679 2679 size_t towrite = len; 2680 - struct page *page; 2680 + struct folio *folio; 2681 2681 void *fsdata = NULL; 2682 2682 int err = 0; 2683 2683 int tocopy; ··· 2687 2687 towrite); 2688 2688 retry: 2689 2689 err = a_ops->write_begin(NULL, mapping, off, tocopy, 2690 - &page, &fsdata); 2690 + &folio, &fsdata); 2691 2691 if (unlikely(err)) { 2692 2692 if (err == -ENOMEM) { 2693 2693 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); ··· 2697 2697 break; 2698 2698 } 2699 2699 2700 - memcpy_to_page(page, offset, data, tocopy); 2700 + memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy); 2701 2701 2702 2702 a_ops->write_end(NULL, mapping, off, tocopy, tocopy, 2703 - page, fsdata); 2703 + folio, fsdata); 2704 2704 offset = 0; 2705 2705 towrite -= tocopy; 2706 2706 off += tocopy;
+4 -4
fs/f2fs/verity.c
··· 80 80 while (count) { 81 81 size_t n = min_t(size_t, count, 82 82 PAGE_SIZE - offset_in_page(pos)); 83 - struct page *page; 83 + struct folio *folio; 84 84 void *fsdata = NULL; 85 85 int res; 86 86 87 - res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata); 87 + res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata); 88 88 if (res) 89 89 return res; 90 90 91 - memcpy_to_page(page, offset_in_page(pos), buf, n); 91 + memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n); 92 92 93 - res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata); 93 + res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata); 94 94 if (res < 0) 95 95 return res; 96 96 if (res != n)
+4 -5
fs/fat/inode.c
··· 221 221 222 222 static int fat_write_begin(struct file *file, struct address_space *mapping, 223 223 loff_t pos, unsigned len, 224 - struct page **pagep, void **fsdata) 224 + struct folio **foliop, void **fsdata) 225 225 { 226 226 int err; 227 227 228 - *pagep = NULL; 229 228 err = cont_write_begin(file, mapping, pos, len, 230 - pagep, fsdata, fat_get_block, 229 + foliop, fsdata, fat_get_block, 231 230 &MSDOS_I(mapping->host)->mmu_private); 232 231 if (err < 0) 233 232 fat_write_failed(mapping, pos + len); ··· 235 236 236 237 static int fat_write_end(struct file *file, struct address_space *mapping, 237 238 loff_t pos, unsigned len, unsigned copied, 238 - struct page *pagep, void *fsdata) 239 + struct folio *folio, void *fsdata) 239 240 { 240 241 struct inode *inode = mapping->host; 241 242 int err; 242 - err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); 243 + err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 243 244 if (err < len) 244 245 fat_write_failed(mapping, pos + len); 245 246 if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
+24 -23
fs/fuse/file.c
··· 2393 2393 * but how to implement it without killing performance need more thinking. 2394 2394 */ 2395 2395 static int fuse_write_begin(struct file *file, struct address_space *mapping, 2396 - loff_t pos, unsigned len, struct page **pagep, void **fsdata) 2396 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) 2397 2397 { 2398 2398 pgoff_t index = pos >> PAGE_SHIFT; 2399 2399 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 2400 - struct page *page; 2400 + struct folio *folio; 2401 2401 loff_t fsize; 2402 2402 int err = -ENOMEM; 2403 2403 2404 2404 WARN_ON(!fc->writeback_cache); 2405 2405 2406 - page = grab_cache_page_write_begin(mapping, index); 2407 - if (!page) 2406 + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2407 + mapping_gfp_mask(mapping)); 2408 + if (IS_ERR(folio)) 2408 2409 goto error; 2409 2410 2410 - fuse_wait_on_page_writeback(mapping->host, page->index); 2411 + fuse_wait_on_page_writeback(mapping->host, folio->index); 2411 2412 2412 - if (PageUptodate(page) || len == PAGE_SIZE) 2413 + if (folio_test_uptodate(folio) || len >= folio_size(folio)) 2413 2414 goto success; 2414 2415 /* 2415 - * Check if the start this page comes after the end of file, in which 2416 - * case the readpage can be optimized away. 2416 + * Check if the start of this folio comes after the end of file, 2417 + * in which case the readpage can be optimized away. 2417 2418 */ 2418 2419 fsize = i_size_read(mapping->host); 2419 - if (fsize <= (pos & PAGE_MASK)) { 2420 - size_t off = pos & ~PAGE_MASK; 2420 + if (fsize <= folio_pos(folio)) { 2421 + size_t off = offset_in_folio(folio, pos); 2421 2422 if (off) 2422 - zero_user_segment(page, 0, off); 2423 + folio_zero_segment(folio, 0, off); 2423 2424 goto success; 2424 2425 } 2425 - err = fuse_do_readpage(file, page); 2426 + err = fuse_do_readpage(file, &folio->page); 2426 2427 if (err) 2427 2428 goto cleanup; 2428 2429 success: 2429 - *pagep = page; 2430 + *foliop = folio; 2430 2431 return 0; 2431 2432 2432 2433 cleanup: 2433 - unlock_page(page); 2434 - put_page(page); 2434 + folio_unlock(folio); 2435 + folio_put(folio); 2435 2436 error: 2436 2437 return err; 2437 2438 } 2438 2439 2439 2440 static int fuse_write_end(struct file *file, struct address_space *mapping, 2440 2441 loff_t pos, unsigned len, unsigned copied, 2441 - struct page *page, void *fsdata) 2442 + struct folio *folio, void *fsdata) 2442 2443 { 2443 - struct inode *inode = page->mapping->host; 2444 + struct inode *inode = folio->mapping->host; 2444 2445 2445 2446 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ 2446 2447 if (!copied) 2447 2448 goto unlock; 2448 2449 2449 2450 pos += copied; 2450 - if (!PageUptodate(page)) { 2451 + if (!folio_test_uptodate(folio)) { 2451 2452 /* Zero any unwritten bytes at the end of the page */ 2452 2453 size_t endoff = pos & ~PAGE_MASK; 2453 2454 if (endoff) 2454 - zero_user_segment(page, endoff, PAGE_SIZE); 2455 - SetPageUptodate(page); 2455 + folio_zero_segment(folio, endoff, PAGE_SIZE); 2456 + folio_mark_uptodate(folio); 2456 2457 } 2457 2458 2458 2459 if (pos > inode->i_size) 2459 2460 i_size_write(inode, pos); 2460 2461 2461 - set_page_dirty(page); 2462 + folio_mark_dirty(folio); 2462 2463 2463 2464 unlock: 2464 - unlock_page(page); 2465 - put_page(page); 2465 + folio_unlock(folio); 2466 + folio_put(folio); 2466 2467 2467 2468 return copied; 2468 2469 }
+3 -3
fs/hfs/extent.c
··· 487 487 if (inode->i_size > HFS_I(inode)->phys_size) { 488 488 struct address_space *mapping = inode->i_mapping; 489 489 void *fsdata = NULL; 490 - struct page *page; 490 + struct folio *folio; 491 491 492 492 /* XXX: Can use generic_cont_expand? */ 493 493 size = inode->i_size - 1; 494 - res = hfs_write_begin(NULL, mapping, size + 1, 0, &page, 494 + res = hfs_write_begin(NULL, mapping, size + 1, 0, &folio, 495 495 &fsdata); 496 496 if (!res) { 497 497 res = generic_write_end(NULL, mapping, size + 1, 0, 0, 498 - page, fsdata); 498 + folio, fsdata); 499 499 } 500 500 if (res) 501 501 inode->i_size = HFS_I(inode)->phys_size;
+1 -1
fs/hfs/hfs_fs.h
··· 202 202 extern const struct address_space_operations hfs_btree_aops; 203 203 204 204 int hfs_write_begin(struct file *file, struct address_space *mapping, 205 - loff_t pos, unsigned len, struct page **pagep, void **fsdata); 205 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata); 206 206 extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t); 207 207 extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); 208 208 extern int hfs_write_inode(struct inode *, struct writeback_control *);
+2 -3
fs/hfs/inode.c
··· 45 45 } 46 46 47 47 int hfs_write_begin(struct file *file, struct address_space *mapping, 48 - loff_t pos, unsigned len, struct page **pagep, void **fsdata) 48 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) 49 49 { 50 50 int ret; 51 51 52 - *pagep = NULL; 53 - ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, 52 + ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, 54 53 hfs_get_block, 55 54 &HFS_I(mapping->host)->phys_size); 56 55 if (unlikely(ret))
+3 -3
fs/hfsplus/extents.c
··· 554 554 555 555 if (inode->i_size > hip->phys_size) { 556 556 struct address_space *mapping = inode->i_mapping; 557 - struct page *page; 557 + struct folio *folio; 558 558 void *fsdata = NULL; 559 559 loff_t size = inode->i_size; 560 560 561 561 res = hfsplus_write_begin(NULL, mapping, size, 0, 562 - &page, &fsdata); 562 + &folio, &fsdata); 563 563 if (res) 564 564 return; 565 565 res = generic_write_end(NULL, mapping, size, 0, 0, 566 - page, fsdata); 566 + folio, fsdata); 567 567 if (res < 0) 568 568 return; 569 569 mark_inode_dirty(inode);
+1 -1
fs/hfsplus/hfsplus_fs.h
··· 472 472 extern const struct dentry_operations hfsplus_dentry_operations; 473 473 474 474 int hfsplus_write_begin(struct file *file, struct address_space *mapping, 475 - loff_t pos, unsigned len, struct page **pagep, void **fsdata); 475 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata); 476 476 struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir, 477 477 umode_t mode); 478 478 void hfsplus_delete_inode(struct inode *inode);
+2 -3
fs/hfsplus/inode.c
··· 39 39 } 40 40 41 41 int hfsplus_write_begin(struct file *file, struct address_space *mapping, 42 - loff_t pos, unsigned len, struct page **pagep, void **fsdata) 42 + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) 43 43 { 44 44 int ret; 45 45 46 - *pagep = NULL; 47 - ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, 46 + ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, 48 47 hfsplus_get_block, 49 48 &HFSPLUS_I(mapping->host)->phys_size); 50 49 if (unlikely(ret))
+12 -11
fs/hostfs/hostfs_kern.c
··· 465 465 466 466 static int hostfs_write_begin(struct file *file, struct address_space *mapping, 467 467 loff_t pos, unsigned len, 468 - struct page **pagep, void **fsdata) 468 + struct folio **foliop, void **fsdata) 469 469 { 470 470 pgoff_t index = pos >> PAGE_SHIFT; 471 471 472 - *pagep = grab_cache_page_write_begin(mapping, index); 473 - if (!*pagep) 472 + *foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 473 + mapping_gfp_mask(mapping)); 474 + if (!*foliop) 474 475 return -ENOMEM; 475 476 return 0; 476 477 } 477 478 478 479 static int hostfs_write_end(struct file *file, struct address_space *mapping, 479 480 loff_t pos, unsigned len, unsigned copied, 480 - struct page *page, void *fsdata) 481 + struct folio *folio, void *fsdata) 481 482 { 482 483 struct inode *inode = mapping->host; 483 484 void *buffer; 484 - unsigned from = pos & (PAGE_SIZE - 1); 485 + size_t from = offset_in_folio(folio, pos); 485 486 int err; 486 487 487 - buffer = kmap_local_page(page); 488 - err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); 488 + buffer = kmap_local_folio(folio, from); 489 + err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer, copied); 489 490 kunmap_local(buffer); 490 491 491 - if (!PageUptodate(page) && err == PAGE_SIZE) 492 - SetPageUptodate(page); 492 + if (!folio_test_uptodate(folio) && err == folio_size(folio)) 493 + folio_mark_uptodate(folio); 493 494 494 495 /* 495 496 * If err > 0, write_file has added err to pos, so we are comparing ··· 498 497 */ 499 498 if (err > 0 && (pos > inode->i_size)) 500 499 inode->i_size = pos; 501 - unlock_page(page); 502 - put_page(page); 500 + folio_unlock(folio); 501 + folio_put(folio); 503 502 504 503 return err; 505 504 }
+4 -5
fs/hpfs/file.c
··· 190 190 191 191 static int hpfs_write_begin(struct file *file, struct address_space *mapping, 192 192 loff_t pos, unsigned len, 193 - struct page **pagep, void **fsdata) 193 + struct folio **foliop, void **fsdata) 194 194 { 195 195 int ret; 196 196 197 - *pagep = NULL; 198 - ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, 197 + ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, 199 198 hpfs_get_block, 200 199 &hpfs_i(mapping->host)->mmu_private); 201 200 if (unlikely(ret)) ··· 205 206 206 207 static int hpfs_write_end(struct file *file, struct address_space *mapping, 207 208 loff_t pos, unsigned len, unsigned copied, 208 - struct page *pagep, void *fsdata) 209 + struct folio *folio, void *fsdata) 209 210 { 210 211 struct inode *inode = mapping->host; 211 212 int err; 212 - err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); 213 + err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 213 214 if (err < len) 214 215 hpfs_write_failed(mapping, pos + len); 215 216 if (!(err < 0)) {
+2 -2
fs/hugetlbfs/inode.c
··· 388 388 static int hugetlbfs_write_begin(struct file *file, 389 389 struct address_space *mapping, 390 390 loff_t pos, unsigned len, 391 - struct page **pagep, void **fsdata) 391 + struct folio **foliop, void **fsdata) 392 392 { 393 393 return -EINVAL; 394 394 } 395 395 396 396 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 397 397 loff_t pos, unsigned len, unsigned copied, 398 - struct page *page, void *fsdata) 398 + struct folio *folio, void *fsdata) 399 399 { 400 400 BUG(); 401 401 return -EINVAL;
+1 -1
fs/iomap/buffered-io.c
··· 900 900 size_t bh_written; 901 901 902 902 bh_written = block_write_end(NULL, iter->inode->i_mapping, pos, 903 - len, copied, &folio->page, NULL); 903 + len, copied, folio, NULL); 904 904 WARN_ON_ONCE(bh_written != copied && bh_written != 0); 905 905 return bh_written == copied; 906 906 }
+42 -46
fs/jffs2/file.c
··· 23 23 24 24 static int jffs2_write_end(struct file *filp, struct address_space *mapping, 25 25 loff_t pos, unsigned len, unsigned copied, 26 - struct page *pg, void *fsdata); 26 + struct folio *folio, void *fsdata); 27 27 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, 28 28 loff_t pos, unsigned len, 29 - struct page **pagep, void **fsdata); 29 + struct folio **foliop, void **fsdata); 30 30 static int jffs2_read_folio(struct file *filp, struct folio *folio); 31 31 32 32 int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) ··· 77 77 .write_end = jffs2_write_end, 78 78 }; 79 79 80 - static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) 80 + static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio) 81 81 { 82 82 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 83 83 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 84 - unsigned char *pg_buf; 84 + unsigned char *kaddr; 85 85 int ret; 86 86 87 87 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", 88 - __func__, inode->i_ino, pg->index << PAGE_SHIFT); 88 + __func__, inode->i_ino, folio->index << PAGE_SHIFT); 89 89 90 - BUG_ON(!PageLocked(pg)); 90 + BUG_ON(!folio_test_locked(folio)); 91 91 92 - pg_buf = kmap(pg); 93 - /* FIXME: Can kmap fail? */ 94 - 95 - ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT, 92 + kaddr = kmap_local_folio(folio, 0); 93 + ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT, 96 94 PAGE_SIZE); 95 + kunmap_local(kaddr); 97 96 98 97 if (!ret) 99 - SetPageUptodate(pg); 98 + folio_mark_uptodate(folio); 100 99 101 - flush_dcache_page(pg); 102 - kunmap(pg); 100 + flush_dcache_folio(folio); 103 101 104 102 jffs2_dbg(2, "readpage finished\n"); 105 103 return ret; ··· 105 107 106 108 int __jffs2_read_folio(struct file *file, struct folio *folio) 107 109 { 108 - int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page); 110 + int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio); 109 111 folio_unlock(folio); 110 112 return ret; 111 113 } ··· 123 125 124 126 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, 125 127 loff_t pos, unsigned len, 126 - struct page **pagep, void **fsdata) 128 + struct folio **foliop, void **fsdata) 127 129 { 128 - struct page *pg; 130 + struct folio *folio; 129 131 struct inode *inode = mapping->host; 130 132 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 131 133 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ··· 204 206 * page in read_cache_page(), which causes a deadlock. 205 207 */ 206 208 mutex_lock(&c->alloc_sem); 207 - pg = grab_cache_page_write_begin(mapping, index); 208 - if (!pg) { 209 - ret = -ENOMEM; 209 + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 210 + mapping_gfp_mask(mapping)); 211 + if (IS_ERR(folio)) { 212 + ret = PTR_ERR(folio); 210 213 goto release_sem; 211 214 } 212 - *pagep = pg; 215 + *foliop = folio; 213 216 214 217 /* 215 - * Read in the page if it wasn't already present. Cannot optimize away 216 - * the whole page write case until jffs2_write_end can handle the 218 + * Read in the folio if it wasn't already present. Cannot optimize away 219 + * the whole folio write case until jffs2_write_end can handle the 217 220 * case of a short-copy. 218 221 */ 219 - if (!PageUptodate(pg)) { 222 + if (!folio_test_uptodate(folio)) { 220 223 mutex_lock(&f->sem); 221 - ret = jffs2_do_readpage_nolock(inode, pg); 224 + ret = jffs2_do_readpage_nolock(inode, folio); 222 225 mutex_unlock(&f->sem); 223 226 if (ret) { 224 - unlock_page(pg); 225 - put_page(pg); 227 + folio_unlock(folio); 228 + folio_put(folio); 226 229 goto release_sem; 227 230 } 228 231 } 229 - jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); 232 + jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags); 230 233 231 234 release_sem: 232 235 mutex_unlock(&c->alloc_sem); ··· 237 238 238 239 static int jffs2_write_end(struct file *filp, struct address_space *mapping, 239 240 loff_t pos, unsigned len, unsigned copied, 240 - struct page *pg, void *fsdata) 241 + struct folio *folio, void *fsdata) 241 242 { 242 243 /* Actually commit the write from the page cache page we're looking at. 243 244 * For now, we write the full page out each time. It sucks, but it's simple ··· 251 252 unsigned aligned_start = start & ~3; 252 253 int ret = 0; 253 254 uint32_t writtenlen = 0; 255 + void *buf; 254 256 255 - jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 256 - __func__, inode->i_ino, pg->index << PAGE_SHIFT, 257 - start, end, pg->flags); 257 + jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n", 258 + __func__, inode->i_ino, folio_pos(folio), 259 + start, end, folio->flags); 258 260 259 261 /* We need to avoid deadlock with page_cache_read() in 260 - jffs2_garbage_collect_pass(). So the page must be 262 + jffs2_garbage_collect_pass(). So the folio must be 261 263 up to date to prevent page_cache_read() from trying 262 264 to re-lock it. */ 263 - BUG_ON(!PageUptodate(pg)); 265 + BUG_ON(!folio_test_uptodate(folio)); 264 266 265 267 if (end == PAGE_SIZE) { 266 268 /* When writing out the end of a page, write out the ··· 276 276 if (!ri) { 277 277 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", 278 278 __func__); 279 - unlock_page(pg); 280 - put_page(pg); 279 + folio_unlock(folio); 280 + folio_put(folio); 281 281 return -ENOMEM; 282 282 } 283 283 ··· 289 289 ri->isize = cpu_to_je32((uint32_t)inode->i_size); 290 290 ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW()); 291 291 292 - /* In 2.4, it was already kmapped by generic_file_write(). Doesn't 293 - hurt to do it again. The alternative is ifdefs, which are ugly. */ 294 - kmap(pg); 295 - 296 - ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, 297 - (pg->index << PAGE_SHIFT) + aligned_start, 292 + buf = kmap_local_folio(folio, aligned_start); 293 + ret = jffs2_write_inode_range(c, f, ri, buf, 294 + folio_pos(folio) + aligned_start, 298 295 end - aligned_start, &writtenlen); 299 - 300 - kunmap(pg); 296 + kunmap_local(buf); 301 297 302 298 if (ret) 303 299 mapping_set_error(mapping, ret); ··· 319 323 it gets reread */ 320 324 jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", 321 325 __func__); 322 - ClearPageUptodate(pg); 326 + folio_clear_uptodate(folio); 323 327 } 324 328 325 329 jffs2_dbg(1, "%s() returning %d\n", 326 330 __func__, writtenlen > 0 ? writtenlen : ret); 327 - unlock_page(pg); 328 - put_page(pg); 331 + folio_unlock(folio); 332 + folio_put(folio); 329 333 return writtenlen > 0 ? writtenlen : ret; 330 334 }
+12 -13
fs/jffs2/gc.c
··· 1171 1171 uint32_t alloclen, offset, orig_end, orig_start; 1172 1172 int ret = 0; 1173 1173 unsigned char *comprbuf = NULL, *writebuf; 1174 - struct page *page; 1174 + struct folio *folio; 1175 1175 unsigned char *pg_ptr; 1176 1176 1177 1177 memset(&ri, 0, sizeof(ri)); ··· 1317 1317 BUG_ON(start > orig_start); 1318 1318 } 1319 1319 1320 - /* The rules state that we must obtain the page lock *before* f->sem, so 1320 + /* The rules state that we must obtain the folio lock *before* f->sem, so 1321 1321 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's 1322 1322 * actually going to *change* so we're safe; we only allow reading. 1323 1323 * 1324 1324 * It is important to note that jffs2_write_begin() will ensure that its 1325 - * page is marked Uptodate before allocating space. That means that if we 1326 - * end up here trying to GC the *same* page that jffs2_write_begin() is 1327 - * trying to write out, read_cache_page() will not deadlock. */ 1325 + * folio is marked uptodate before allocating space. That means that if we 1326 + * end up here trying to GC the *same* folio that jffs2_write_begin() is 1327 + * trying to write out, read_cache_folio() will not deadlock. */ 1328 1328 mutex_unlock(&f->sem); 1329 - page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT, 1329 + folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT, 1330 1330 __jffs2_read_folio, NULL); 1331 - if (IS_ERR(page)) { 1332 - pr_warn("read_cache_page() returned error: %ld\n", 1333 - PTR_ERR(page)); 1331 + if (IS_ERR(folio)) { 1332 + pr_warn("read_cache_folio() returned error: %ld\n", 1333 + PTR_ERR(folio)); 1334 1334 mutex_lock(&f->sem); 1335 - return PTR_ERR(page); 1335 + return PTR_ERR(folio); 1336 1336 } 1337 1337 1338 - pg_ptr = kmap(page); 1338 + pg_ptr = kmap_local_folio(folio, 0); 1339 1339 mutex_lock(&f->sem); 1340 1340 1341 1341 offset = start; ··· 1400 1400 } 1401 1401 } 1402 1402 1403 - kunmap(page); 1404 - put_page(page); 1403 + folio_release_kmap(folio, pg_ptr); 1405 1404 return ret; 1406 1405 }
+4 -4
fs/jfs/inode.c
··· 292 292 293 293 static int jfs_write_begin(struct file *file, struct address_space *mapping, 294 294 loff_t pos, unsigned len, 295 - struct page **pagep, void **fsdata) 295 + struct folio **foliop, void **fsdata) 296 296 { 297 297 int ret; 298 298 299 - ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block); 299 + ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block); 300 300 if (unlikely(ret)) 301 301 jfs_write_failed(mapping, pos + len); 302 302 ··· 304 304 } 305 305 306 306 static int jfs_write_end(struct file *file, struct address_space *mapping, 307 - loff_t pos, unsigned len, unsigned copied, struct page *page, 307 + loff_t pos, unsigned len, unsigned copied, struct folio *folio, 308 308 void *fsdata) 309 309 { 310 310 int ret; 311 311 312 - ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 312 + ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 313 313 if (ret < len) 314 314 jfs_write_failed(mapping, pos + len); 315 315 return ret;
+6 -7
fs/libfs.c
··· 914 914 915 915 int simple_write_begin(struct file *file, struct address_space *mapping, 916 916 loff_t pos, unsigned len, 917 - struct page **pagep, void **fsdata) 917 + struct folio **foliop, void **fsdata) 918 918 { 919 919 struct folio *folio; 920 920 ··· 923 923 if (IS_ERR(folio)) 924 924 return PTR_ERR(folio); 925 925 926 - *pagep = &folio->page; 926 + *foliop = folio; 927 927 928 928 if (!folio_test_uptodate(folio) && (len != folio_size(folio))) { 929 929 size_t from = offset_in_folio(folio, pos); ··· 942 942 * @pos: " 943 943 * @len: " 944 944 * @copied: " 945 - * @page: " 945 + * @folio: " 946 946 * @fsdata: " 947 947 * 948 - * simple_write_end does the minimum needed for updating a page after writing is 949 - * done. It has the same API signature as the .write_end of 948 + * simple_write_end does the minimum needed for updating a folio after 949 + * writing is done. It has the same API signature as the .write_end of 950 950 * address_space_operations vector. So it can just be set onto .write_end for 951 951 * FSes that don't need any other processing. i_mutex is assumed to be held. 952 952 * Block based filesystems should use generic_write_end(). ··· 959 959 */ 960 960 static int simple_write_end(struct file *file, struct address_space *mapping, 961 961 loff_t pos, unsigned len, unsigned copied, 962 - struct page *page, void *fsdata) 962 + struct folio *folio, void *fsdata) 963 963 { 964 - struct folio *folio = page_folio(page); 965 964 struct inode *inode = folio->mapping->host; 966 965 loff_t last_pos = pos + copied; 967 966
+66 -68
fs/minix/dir.c
··· 40 40 return last_byte; 41 41 } 42 42 43 - static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len) 43 + static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len) 44 44 { 45 - struct address_space *mapping = page->mapping; 45 + struct address_space *mapping = folio->mapping; 46 46 struct inode *dir = mapping->host; 47 47 48 - block_write_end(NULL, mapping, pos, len, len, page, NULL); 48 + block_write_end(NULL, mapping, pos, len, len, folio, NULL); 49 49 50 50 if (pos+len > dir->i_size) { 51 51 i_size_write(dir, pos+len); 52 52 mark_inode_dirty(dir); 53 53 } 54 - unlock_page(page); 54 + folio_unlock(folio); 55 55 } 56 56 57 57 static int minix_handle_dirsync(struct inode *dir) ··· 64 64 return err; 65 65 } 66 66 67 - static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p) 67 + static void *dir_get_folio(struct inode *dir, unsigned long n, 68 + struct folio **foliop) 68 69 { 69 - struct address_space *mapping = dir->i_mapping; 70 - struct page *page = read_mapping_page(mapping, n, NULL); 71 - if (IS_ERR(page)) 72 - return ERR_CAST(page); 73 - *p = page; 74 - return kmap_local_page(page); 70 + struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL); 71 + 72 + if (IS_ERR(folio)) 73 + return ERR_CAST(folio); 74 + *foliop = folio; 75 + return kmap_local_folio(folio, 0); 75 76 } 76 77 77 78 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) ··· 100 99 101 100 for ( ; n < npages; n++, offset = 0) { 102 101 char *p, *kaddr, *limit; 103 - struct page *page; 102 + struct folio *folio; 104 103 105 - kaddr = dir_get_page(inode, n, &page); 104 + kaddr = dir_get_folio(inode, n, &folio); 106 105 if (IS_ERR(kaddr)) 107 106 continue; 108 107 p = kaddr+offset; ··· 123 122 unsigned l = strnlen(name, sbi->s_namelen); 124 123 if (!dir_emit(ctx, name, l, 125 124 inumber, DT_UNKNOWN)) { 126 - unmap_and_put_page(page, p); 125 + folio_release_kmap(folio, p); 127 126 return 0; 128 127 } 129 128 } 130 129 ctx->pos += chunk_size; 131 130 } 132 - unmap_and_put_page(page, kaddr); 131 + folio_release_kmap(folio, kaddr); 133 132 } 134 133 return 0; 135 134 } ··· 145 144 /* 146 145 * minix_find_entry() 147 146 * 148 - * finds an entry in the specified directory with the wanted name. It 149 - * returns the cache buffer in which the entry was found, and the entry 150 - * itself (as a parameter - res_dir). It does NOT read the inode of the 147 + * finds an entry in the specified directory with the wanted name. 148 + * It does NOT read the inode of the 151 149 * entry - you'll have to do that yourself if you want to. 150 + * 151 + * On Success folio_release_kmap() should be called on *foliop. 152 152 */ 153 - minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) 153 + minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop) 154 154 { 155 155 const char * name = dentry->d_name.name; 156 156 int namelen = dentry->d_name.len; ··· 160 158 struct minix_sb_info * sbi = minix_sb(sb); 161 159 unsigned long n; 162 160 unsigned long npages = dir_pages(dir); 163 - struct page *page = NULL; 164 161 char *p; 165 162 166 163 char *namx; 167 164 __u32 inumber; 168 - *res_page = NULL; 169 165 170 166 for (n = 0; n < npages; n++) { 171 167 char *kaddr, *limit; 172 168 173 - kaddr = dir_get_page(dir, n, &page); 169 + kaddr = dir_get_folio(dir, n, foliop); 174 170 if (IS_ERR(kaddr)) 175 171 continue; 176 172 ··· 188 188 if (namecompare(namelen, sbi->s_namelen, name, namx)) 189 189 goto found; 190 190 } 191 - unmap_and_put_page(page, kaddr); 191 + folio_release_kmap(*foliop, kaddr); 192 192 } 193 193 return NULL; 194 194 195 195 found: 196 - *res_page = page; 197 196 return (minix_dirent *)p; 198 197 } 199 198 ··· 203 204 int namelen = dentry->d_name.len; 204 205 struct super_block * sb = dir->i_sb; 205 206 struct minix_sb_info * sbi = minix_sb(sb); 206 - struct page *page = NULL; 207 + struct folio *folio = NULL; 207 208 unsigned long npages = dir_pages(dir); 208 209 unsigned long n; 209 210 char *kaddr, *p; ··· 222 223 for (n = 0; n <= npages; n++) { 223 224 char *limit, *dir_end; 224 225 225 - kaddr = dir_get_page(dir, n, &page); 226 + kaddr = dir_get_folio(dir, n, &folio); 226 227 if (IS_ERR(kaddr)) 227 228 return PTR_ERR(kaddr); 228 - lock_page(page); 229 + folio_lock(folio); 229 230 dir_end = kaddr + minix_last_byte(dir, n); 230 231 limit = kaddr + PAGE_SIZE - sbi->s_dirsize; 231 232 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { ··· 252 253 if (namecompare(namelen, sbi->s_namelen, name, namx)) 253 254 goto out_unlock; 254 255 } 255 - unlock_page(page); 256 - unmap_and_put_page(page, kaddr); 256 + folio_unlock(folio); 257 + folio_release_kmap(folio, kaddr); 257 258 } 258 259 BUG(); 259 260 return -EINVAL; 260 261 261 262 got_it: 262 - pos = page_offset(page) + offset_in_page(p); 263 - err = minix_prepare_chunk(page, pos, sbi->s_dirsize); 263 + pos = folio_pos(folio) + offset_in_folio(folio, p); 264 + err = minix_prepare_chunk(folio, pos, sbi->s_dirsize); 264 265 if (err) 265 266 goto out_unlock; 266 267 memcpy (namx, name, namelen); ··· 271 272 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2); 272 273 de->inode = inode->i_ino; 273 274 } 274 - dir_commit_chunk(page, pos, sbi->s_dirsize); 275 + dir_commit_chunk(folio, pos, sbi->s_dirsize); 275 276 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 276 277 mark_inode_dirty(dir); 277 278 err = minix_handle_dirsync(dir); 278 279 out_put: 279 - unmap_and_put_page(page, kaddr); 280 + folio_release_kmap(folio, kaddr); 280 281 return err; 281 282 out_unlock: 282 - unlock_page(page); 283 + folio_unlock(folio); 283 284 goto out_put; 284 285 } 285 286 286 - int minix_delete_entry(struct minix_dir_entry *de, struct page *page) 287 + int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio) 287 288 { 288 - struct inode *inode = page->mapping->host; 289 - loff_t pos = page_offset(page) + offset_in_page(de); 289 + struct inode *inode = folio->mapping->host; 290 + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 290 291 struct minix_sb_info *sbi = minix_sb(inode->i_sb); 291 292 unsigned len = sbi->s_dirsize; 292 293 int err; 293 294 294 - lock_page(page); 295 - err = minix_prepare_chunk(page, pos, len); 295 + folio_lock(folio); 296 + err = minix_prepare_chunk(folio, pos, len); 296 297 if (err) { 297 - unlock_page(page); 298 + folio_unlock(folio); 298 299 return err; 299 300 } 300 301 if (sbi->s_version == MINIX_V3) 301 302 ((minix3_dirent *)de)->inode = 0; 302 303 else 303 304 de->inode = 0; 304 - dir_commit_chunk(page, pos, len); 305 + dir_commit_chunk(folio, pos, len); 305 306 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 306 307 mark_inode_dirty(inode); 307 308 return minix_handle_dirsync(inode); ··· 309 310 310 311 int minix_make_empty(struct inode *inode, struct inode *dir) 311 312 { 312 - struct page *page = grab_cache_page(inode->i_mapping, 0); 313 + struct folio *folio = filemap_grab_folio(inode->i_mapping, 0); 313 314 struct minix_sb_info *sbi = minix_sb(inode->i_sb); 314 315 char *kaddr; 315 316 int err; 316 317 317 - if (!page) 318 - return -ENOMEM; 319 - err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize); 318 + if (IS_ERR(folio)) 319 + return PTR_ERR(folio); 320 + err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize); 320 321 if (err) { 321 - unlock_page(page); 322 + folio_unlock(folio); 322 323 goto fail; 323 324 } 324 325 325 - kaddr = kmap_local_page(page); 326 - memset(kaddr, 0, PAGE_SIZE); 326 + kaddr = kmap_local_folio(folio, 0); 327 + memset(kaddr, 0, folio_size(folio)); 327 328 328 329 if (sbi->s_version == MINIX_V3) { 329 330 minix3_dirent *de3 = (minix3_dirent *)kaddr; ··· 344 345 } 345 346 kunmap_local(kaddr); 346 347 347 - dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 348 + dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize); 348 349 err = minix_handle_dirsync(inode); 349 350 fail: 350 - put_page(page); 351 + folio_put(folio); 351 352 return err; 352 353 } 353 354 ··· 356 357 */ 357 358 int minix_empty_dir(struct inode * inode) 358 359 { 359 - struct page *page = NULL; 360 + struct folio *folio = NULL; 360 361 unsigned long i, npages = dir_pages(inode); 361 362 struct minix_sb_info *sbi = minix_sb(inode->i_sb); 362 363 char *name, *kaddr; ··· 365 366 for (i = 0; i < npages; i++) { 366 367 char *p, *limit; 367 368 368 - kaddr = dir_get_page(inode, i, &page); 369 + kaddr = dir_get_folio(inode, i, &folio); 369 370 if (IS_ERR(kaddr)) 370 371 continue; 371 372 ··· 394 395 goto not_empty; 395 396 } 396 397 } 397 - unmap_and_put_page(page, kaddr); 398 + folio_release_kmap(folio, kaddr); 398 399 } 399 400 return 1; 400 401 401 402 not_empty: 402 - unmap_and_put_page(page, kaddr); 403 + folio_release_kmap(folio, kaddr); 403 404 return 0; 404 405 } 405 406 406 407 /* Releases the page */ 407 - int minix_set_link(struct minix_dir_entry *de, struct page *page, 408 + int minix_set_link(struct minix_dir_entry *de, struct folio *folio, 408 409 struct inode *inode) 409 410 { 410 - struct inode *dir = page->mapping->host; 411 + struct inode *dir = folio->mapping->host; 411 412 struct minix_sb_info *sbi = minix_sb(dir->i_sb); 412 - loff_t pos = page_offset(page) + offset_in_page(de); 413 + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 413 414 int err; 414 415 415 - lock_page(page); 416 - err = minix_prepare_chunk(page, pos, sbi->s_dirsize); 416 + folio_lock(folio); 417 + err = minix_prepare_chunk(folio, pos, sbi->s_dirsize); 417 418 if (err) { 418 - unlock_page(page); 419 + folio_unlock(folio); 419 420 return err; 420 421 } 421 422 if (sbi->s_version == MINIX_V3) 422 423 ((minix3_dirent *)de)->inode = inode->i_ino; 423 424 else 424 425 de->inode = inode->i_ino; 425 - dir_commit_chunk(page, pos, sbi->s_dirsize); 426 + dir_commit_chunk(folio, pos, sbi->s_dirsize); 426 427 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 427 428 mark_inode_dirty(dir); 428 429 return minix_handle_dirsync(dir); 429 430 } 430 431 431 - struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) 432 + struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop) 432 433 { 433 434 struct minix_sb_info *sbi = minix_sb(dir->i_sb); 434 - struct minix_dir_entry *de = dir_get_page(dir, 0, p); 435 + struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop); 435 436 436 437 if (!IS_ERR(de)) 437 438 return minix_next_entry(de, sbi); ··· 440 441 441 442 ino_t minix_inode_by_name(struct dentry *dentry) 442 443 { 443 - struct page *page; 444 - struct minix_dir_entry *de = minix_find_entry(dentry, &page); 444 + struct folio *folio; 445 + struct minix_dir_entry *de = minix_find_entry(dentry, &folio); 445 446 ino_t res = 0; 446 447 447 448 if (de) { 448 - struct address_space *mapping = page->mapping; 449 - struct inode *inode = mapping->host; 449 + struct inode *inode = folio->mapping->host; 450 450 struct minix_sb_info *sbi = minix_sb(inode->i_sb); 451 451 452 452 if (sbi->s_version == MINIX_V3) 453 453 res = ((minix3_dirent *) de)->inode; 454 454 else 455 455 res = de->inode; 456 - unmap_and_put_page(page, de); 456 + folio_release_kmap(folio, de); 457 457 } 458 458 return res; 459 459 }
+4 -4
fs/minix/inode.c
··· 427 427 return block_read_full_folio(folio, minix_get_block); 428 428 } 429 429 430 - int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) 430 + int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) 431 431 { 432 - return __block_write_begin(page, pos, len, minix_get_block); 432 + return __block_write_begin(folio, pos, len, minix_get_block); 433 433 } 434 434 435 435 static void minix_write_failed(struct address_space *mapping, loff_t to) ··· 444 444 445 445 static int minix_write_begin(struct file *file, struct address_space *mapping, 446 446 loff_t pos, unsigned len, 447 - struct page **pagep, void **fsdata) 447 + struct folio **foliop, void **fsdata) 448 448 { 449 449 int ret; 450 450 451 - ret = block_write_begin(mapping, pos, len, pagep, minix_get_block); 451 + ret = block_write_begin(mapping, pos, len, foliop, minix_get_block); 452 452 if (unlikely(ret)) 453 453 minix_write_failed(mapping, pos + len); 454 454
+20 -20
fs/minix/minix.h
··· 42 42 unsigned short s_version; 43 43 }; 44 44 45 - extern struct inode *minix_iget(struct super_block *, unsigned long); 46 - extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); 47 - extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); 48 - extern struct inode * minix_new_inode(const struct inode *, umode_t); 49 - extern void minix_free_inode(struct inode * inode); 50 - extern unsigned long minix_count_free_inodes(struct super_block *sb); 51 - extern int minix_new_block(struct inode * inode); 52 - extern void minix_free_block(struct inode *inode, unsigned long block); 53 - extern unsigned long minix_count_free_blocks(struct super_block *sb); 54 - extern int minix_getattr(struct mnt_idmap *, const struct path *, 55 - struct kstat *, u32, unsigned int); 56 - extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); 45 + struct inode *minix_iget(struct super_block *, unsigned long); 46 + struct minix_inode *minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); 47 + struct minix2_inode *minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); 48 + struct inode *minix_new_inode(const struct inode *, umode_t); 49 + void minix_free_inode(struct inode *inode); 50 + unsigned long minix_count_free_inodes(struct super_block *sb); 51 + int minix_new_block(struct inode *inode); 52 + void minix_free_block(struct inode *inode, unsigned long block); 53 + unsigned long minix_count_free_blocks(struct super_block *sb); 54 + int minix_getattr(struct mnt_idmap *, const struct path *, 55 + struct kstat *, u32, unsigned int); 56 + int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len); 57 57 58 58 extern void V1_minix_truncate(struct inode *); 59 59 extern void V2_minix_truncate(struct inode *); ··· 64 64 extern unsigned V1_minix_blocks(loff_t, struct super_block *); 65 65 extern unsigned V2_minix_blocks(loff_t, struct super_block *); 66 66 67 - extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**); 68 - extern int minix_add_link(struct dentry*, struct inode*); 69 - extern int minix_delete_entry(struct minix_dir_entry*, struct page*); 70 - extern int minix_make_empty(struct inode*, struct inode*); 71 - extern int minix_empty_dir(struct inode*); 72 - int minix_set_link(struct minix_dir_entry *de, struct page *page, 67 + struct minix_dir_entry *minix_find_entry(struct dentry *, struct folio **); 68 + int minix_add_link(struct dentry*, struct inode*); 69 + int minix_delete_entry(struct minix_dir_entry *, struct folio *); 70 + int minix_make_empty(struct inode*, struct inode*); 71 + int minix_empty_dir(struct inode*); 72 + int minix_set_link(struct minix_dir_entry *de, struct folio *folio, 73 73 struct inode *inode); 74 - extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**); 75 - extern ino_t minix_inode_by_name(struct dentry*); 74 + struct minix_dir_entry *minix_dotdot(struct inode*, struct folio **); 75 + ino_t minix_inode_by_name(struct dentry*); 76 76 77 77 extern const struct inode_operations minix_file_inode_operations; 78 78 extern const struct inode_operations minix_dir_inode_operations;
+16 -16
fs/minix/namei.c
··· 141 141 static int minix_unlink(struct inode * dir, struct dentry *dentry) 142 142 { 143 143 struct inode * inode = d_inode(dentry); 144 - struct page * page; 144 + struct folio *folio; 145 145 struct minix_dir_entry * de; 146 146 int err; 147 147 148 - de = minix_find_entry(dentry, &page); 148 + de = minix_find_entry(dentry, &folio); 149 149 if (!de) 150 150 return -ENOENT; 151 - err = minix_delete_entry(de, page); 152 - unmap_and_put_page(page, de); 151 + err = minix_delete_entry(de, folio); 152 + folio_release_kmap(folio, de); 153 153 154 154 if (err) 155 155 return err; ··· 180 180 { 181 181 struct inode * old_inode = d_inode(old_dentry); 182 182 struct inode * new_inode = d_inode(new_dentry); 183 - struct page * dir_page = NULL; 183 + struct folio * dir_folio = NULL; 184 184 struct minix_dir_entry * dir_de = NULL; 185 - struct page * old_page; 185 + struct folio *old_folio; 186 186 struct minix_dir_entry * old_de; 187 187 int err = -ENOENT; 188 188 189 189 if (flags & ~RENAME_NOREPLACE) 190 190 return -EINVAL; 191 191 192 - old_de = minix_find_entry(old_dentry, &old_page); 192 + old_de = minix_find_entry(old_dentry, &old_folio); 193 193 if (!old_de) 194 194 goto out; 195 195 196 196 if (S_ISDIR(old_inode->i_mode)) { 197 197 err = -EIO; 198 - dir_de = minix_dotdot(old_inode, &dir_page); 198 + dir_de = minix_dotdot(old_inode, &dir_folio); 199 199 if (!dir_de) 200 200 goto out_old; 201 201 } 202 202 203 203 if (new_inode) { 204 - struct page * new_page; 204 + struct folio *new_folio; 205 205 struct minix_dir_entry * new_de; 206 206 207 207 err = -ENOTEMPTY; ··· 209 209 goto out_dir; 210 210 211 211 err = -ENOENT; 212 - new_de = minix_find_entry(new_dentry, &new_page); 212 + new_de = minix_find_entry(new_dentry, &new_folio); 213 213 if (!new_de) 214 214 goto out_dir; 215 - err = minix_set_link(new_de, new_page, old_inode); 216 - unmap_and_put_page(new_page, new_de); 215 + err = minix_set_link(new_de, new_folio, old_inode); 216 + folio_release_kmap(new_folio, new_de); 217 217 if (err) 218 218 goto out_dir; 219 219 inode_set_ctime_current(new_inode); ··· 228 228 inode_inc_link_count(new_dir); 229 229 } 230 230 231 - err = minix_delete_entry(old_de, old_page); 231 + err = minix_delete_entry(old_de, old_folio); 232 232 if (err) 233 233 goto out_dir; 234 234 235 235 mark_inode_dirty(old_inode); 236 236 237 237 if (dir_de) { 238 - err = minix_set_link(dir_de, dir_page, new_dir); 238 + err = minix_set_link(dir_de, dir_folio, new_dir); 239 239 if (!err) 240 240 inode_dec_link_count(old_dir); 241 241 } 242 242 out_dir: 243 243 if (dir_de) 244 - unmap_and_put_page(dir_page, dir_de); 244 + folio_release_kmap(dir_folio, dir_de); 245 245 out_old: 246 - unmap_and_put_page(old_page, old_de); 246 + folio_release_kmap(old_folio, old_de); 247 247 out: 248 248 return err; 249 249 }
+5 -5
fs/namei.c
··· 5351 5351 struct address_space *mapping = inode->i_mapping; 5352 5352 const struct address_space_operations *aops = mapping->a_ops; 5353 5353 bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS); 5354 - struct page *page; 5354 + struct folio *folio; 5355 5355 void *fsdata = NULL; 5356 5356 int err; 5357 5357 unsigned int flags; ··· 5359 5359 retry: 5360 5360 if (nofs) 5361 5361 flags = memalloc_nofs_save(); 5362 - err = aops->write_begin(NULL, mapping, 0, len-1, &page, &fsdata); 5362 + err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata); 5363 5363 if (nofs) 5364 5364 memalloc_nofs_restore(flags); 5365 5365 if (err) 5366 5366 goto fail; 5367 5367 5368 - memcpy(page_address(page), symname, len-1); 5368 + memcpy(folio_address(folio), symname, len - 1); 5369 5369 5370 - err = aops->write_end(NULL, mapping, 0, len-1, len-1, 5371 - page, fsdata); 5370 + err = aops->write_end(NULL, mapping, 0, len - 1, len - 1, 5371 + folio, fsdata); 5372 5372 if (err < 0) 5373 5373 goto fail; 5374 5374 if (err < len-1)
+3 -4
fs/nfs/file.c
··· 336 336 * increment the page use counts until he is done with the page. 337 337 */ 338 338 static int nfs_write_begin(struct file *file, struct address_space *mapping, 339 - loff_t pos, unsigned len, struct page **pagep, 339 + loff_t pos, unsigned len, struct folio **foliop, 340 340 void **fsdata) 341 341 { 342 342 fgf_t fgp = FGP_WRITEBEGIN; ··· 353 353 mapping_gfp_mask(mapping)); 354 354 if (IS_ERR(folio)) 355 355 return PTR_ERR(folio); 356 - *pagep = &folio->page; 356 + *foliop = folio; 357 357 358 358 ret = nfs_flush_incompatible(file, folio); 359 359 if (ret) { ··· 372 372 373 373 static int nfs_write_end(struct file *file, struct address_space *mapping, 374 374 loff_t pos, unsigned len, unsigned copied, 375 - struct page *page, void *fsdata) 375 + struct folio *folio, void *fsdata) 376 376 { 377 377 struct nfs_open_context *ctx = nfs_file_open_context(file); 378 - struct folio *folio = page_folio(page); 379 378 unsigned offset = offset_in_folio(folio, pos); 380 379 int status; 381 380
+2 -2
fs/nilfs2/dir.c
··· 83 83 { 84 84 loff_t pos = folio_pos(folio) + from; 85 85 86 - return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block); 86 + return __block_write_begin(folio, pos, to - from, nilfs_get_block); 87 87 } 88 88 89 89 static void nilfs_commit_chunk(struct folio *folio, ··· 96 96 int err; 97 97 98 98 nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to); 99 - copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); 99 + copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL); 100 100 if (pos + copied > dir->i_size) 101 101 i_size_write(dir, pos + copied); 102 102 if (IS_DIRSYNC(dir))
+5 -5
fs/nilfs2/inode.c
··· 250 250 251 251 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 252 252 loff_t pos, unsigned len, 253 - struct page **pagep, void **fsdata) 253 + struct folio **foliop, void **fsdata) 254 254 255 255 { 256 256 struct inode *inode = mapping->host; ··· 259 259 if (unlikely(err)) 260 260 return err; 261 261 262 - err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); 262 + err = block_write_begin(mapping, pos, len, foliop, nilfs_get_block); 263 263 if (unlikely(err)) { 264 264 nilfs_write_failed(mapping, pos + len); 265 265 nilfs_transaction_abort(inode->i_sb); ··· 269 269 270 270 static int nilfs_write_end(struct file *file, struct address_space *mapping, 271 271 loff_t pos, unsigned len, unsigned copied, 272 - struct page *page, void *fsdata) 272 + struct folio *folio, void *fsdata) 273 273 { 274 274 struct inode *inode = mapping->host; 275 275 unsigned int start = pos & (PAGE_SIZE - 1); 276 276 unsigned int nr_dirty; 277 277 int err; 278 278 279 - nr_dirty = nilfs_page_count_clean_buffers(page, start, 279 + nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start, 280 280 start + copied); 281 - copied = generic_write_end(file, mapping, pos, len, copied, page, 281 + copied = generic_write_end(file, mapping, pos, len, copied, folio, 282 282 fsdata); 283 283 nilfs_set_file_dirty(inode, nr_dirty); 284 284 err = nilfs_transaction_commit(inode->i_sb);
+8 -8
fs/nilfs2/recovery.c
··· 498 498 struct inode *inode; 499 499 struct nilfs_recovery_block *rb, *n; 500 500 unsigned int blocksize = nilfs->ns_blocksize; 501 - struct page *page; 501 + struct folio *folio; 502 502 loff_t pos; 503 503 int err = 0, err2 = 0; 504 504 ··· 512 512 513 513 pos = rb->blkoff << inode->i_blkbits; 514 514 err = block_write_begin(inode->i_mapping, pos, blocksize, 515 - &page, nilfs_get_block); 515 + &folio, nilfs_get_block); 516 516 if (unlikely(err)) { 517 517 loff_t isize = inode->i_size; 518 518 ··· 522 522 goto failed_inode; 523 523 } 524 524 525 - err = nilfs_recovery_copy_block(nilfs, rb, pos, page); 525 + err = nilfs_recovery_copy_block(nilfs, rb, pos, &folio->page); 526 526 if (unlikely(err)) 527 527 goto failed_page; 528 528 ··· 531 531 goto failed_page; 532 532 533 533 block_write_end(NULL, inode->i_mapping, pos, blocksize, 534 - blocksize, page, NULL); 534 + blocksize, folio, NULL); 535 535 536 - unlock_page(page); 537 - put_page(page); 536 + folio_unlock(folio); 537 + folio_put(folio); 538 538 539 539 (*nr_salvaged_blocks)++; 540 540 goto next; 541 541 542 542 failed_page: 543 - unlock_page(page); 544 - put_page(page); 543 + folio_unlock(folio); 544 + folio_put(folio); 545 545 546 546 failed_inode: 547 547 nilfs_warn(sb,
+4 -5
fs/ntfs3/file.c
··· 182 182 183 183 for (;;) { 184 184 u32 zerofrom, len; 185 - struct page *page; 185 + struct folio *folio; 186 186 u8 bits; 187 187 CLST vcn, lcn, clen; 188 188 ··· 208 208 if (pos + len > new_valid) 209 209 len = new_valid - pos; 210 210 211 - err = ntfs_write_begin(file, mapping, pos, len, &page, NULL); 211 + err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL); 212 212 if (err) 213 213 goto out; 214 214 215 - zero_user_segment(page, zerofrom, PAGE_SIZE); 215 + folio_zero_range(folio, zerofrom, folio_size(folio)); 216 216 217 - /* This function in any case puts page. */ 218 - err = ntfs_write_end(file, mapping, pos, len, len, page, NULL); 217 + err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); 219 218 if (err < 0) 220 219 goto out; 221 220 pos += len;
+5 -46
fs/ntfs3/inode.c
··· 901 901 } 902 902 903 903 int ntfs_write_begin(struct file *file, struct address_space *mapping, 904 - loff_t pos, u32 len, struct page **pagep, void **fsdata) 904 + loff_t pos, u32 len, struct folio **foliop, void **fsdata) 905 905 { 906 906 int err; 907 907 struct inode *inode = mapping->host; ··· 910 910 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 911 911 return -EIO; 912 912 913 - *pagep = NULL; 914 913 if (is_resident(ni)) { 915 914 struct folio *folio = __filemap_get_folio( 916 915 mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN, ··· 925 926 ni_unlock(ni); 926 927 927 928 if (!err) { 928 - *pagep = &folio->page; 929 + *foliop = folio; 929 930 goto out; 930 931 } 931 932 folio_unlock(folio); ··· 935 936 goto out; 936 937 } 937 938 938 - err = block_write_begin(mapping, pos, len, pagep, 939 + err = block_write_begin(mapping, pos, len, foliop, 939 940 ntfs_get_block_write_begin); 940 941 941 942 out: ··· 946 947 * ntfs_write_end - Address_space_operations::write_end. 947 948 */ 948 949 int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, 949 - u32 len, u32 copied, struct page *page, void *fsdata) 950 + u32 len, u32 copied, struct folio *folio, void *fsdata) 950 951 { 951 - struct folio *folio = page_folio(page); 952 952 struct inode *inode = mapping->host; 953 953 struct ntfs_inode *ni = ntfs_i(inode); 954 954 u64 valid = ni->i_valid; ··· 977 979 folio_unlock(folio); 978 980 folio_put(folio); 979 981 } else { 980 - err = generic_write_end(file, mapping, pos, len, copied, page, 982 + err = generic_write_end(file, mapping, pos, len, copied, folio, 981 983 fsdata); 982 984 } 983 985 ··· 1002 1004 if (dirty) 1003 1005 mark_inode_dirty(inode); 1004 1006 } 1005 - 1006 - return err; 1007 - } 1008 - 1009 - int reset_log_file(struct inode *inode) 1010 - { 1011 - int err; 1012 - loff_t pos = 0; 1013 - u32 log_size = inode->i_size; 1014 - struct address_space *mapping = inode->i_mapping; 1015 - 1016 - for (;;) { 1017 - u32 len; 1018 - void *kaddr; 1019 - struct page *page; 1020 - 1021 - len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE; 1022 - 1023 - err = block_write_begin(mapping, pos, len, &page, 1024 - ntfs_get_block_write_begin); 1025 - if (err) 1026 - goto out; 1027 - 1028 - kaddr = kmap_atomic(page); 1029 - memset(kaddr, -1, len); 1030 - kunmap_atomic(kaddr); 1031 - flush_dcache_page(page); 1032 - 1033 - err = block_write_end(NULL, mapping, pos, len, len, page, NULL); 1034 - if (err < 0) 1035 - goto out; 1036 - pos += len; 1037 - 1038 - if (pos >= log_size) 1039 - break; 1040 - balance_dirty_pages_ratelimited(mapping); 1041 - } 1042 - out: 1043 - mark_inode_dirty_sync(inode); 1044 1007 1045 1008 return err; 1046 1009 }
+2 -3
fs/ntfs3/ntfs_fs.h
··· 708 708 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, 709 709 const struct cpu_str *name); 710 710 int ntfs_set_size(struct inode *inode, u64 new_size); 711 - int reset_log_file(struct inode *inode); 712 711 int ntfs_get_block(struct inode *inode, sector_t vbn, 713 712 struct buffer_head *bh_result, int create); 714 713 int ntfs_write_begin(struct file *file, struct address_space *mapping, 715 - loff_t pos, u32 len, struct page **pagep, void **fsdata); 714 + loff_t pos, u32 len, struct folio **foliop, void **fsdata); 716 715 int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, 717 - u32 len, u32 copied, struct page *page, void *fsdata); 716 + u32 len, u32 copied, struct folio *folio, void *fsdata); 718 717 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc); 719 718 int ntfs_sync_inode(struct inode *inode); 720 719 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
+6 -6
fs/ocfs2/aops.c
··· 1643 1643 1644 1644 int ocfs2_write_begin_nolock(struct address_space *mapping, 1645 1645 loff_t pos, unsigned len, ocfs2_write_type_t type, 1646 - struct page **pagep, void **fsdata, 1646 + struct folio **foliop, void **fsdata, 1647 1647 struct buffer_head *di_bh, struct page *mmap_page) 1648 1648 { 1649 1649 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; ··· 1826 1826 ocfs2_free_alloc_context(meta_ac); 1827 1827 1828 1828 success: 1829 - if (pagep) 1830 - *pagep = wc->w_target_page; 1829 + if (foliop) 1830 + *foliop = page_folio(wc->w_target_page); 1831 1831 *fsdata = wc; 1832 1832 return 0; 1833 1833 out_quota: ··· 1879 1879 1880 1880 static int ocfs2_write_begin(struct file *file, struct address_space *mapping, 1881 1881 loff_t pos, unsigned len, 1882 - struct page **pagep, void **fsdata) 1882 + struct folio **foliop, void **fsdata) 1883 1883 { 1884 1884 int ret; 1885 1885 struct buffer_head *di_bh = NULL; ··· 1901 1901 down_write(&OCFS2_I(inode)->ip_alloc_sem); 1902 1902 1903 1903 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, 1904 - pagep, fsdata, di_bh, NULL); 1904 + foliop, fsdata, di_bh, NULL); 1905 1905 if (ret) { 1906 1906 mlog_errno(ret); 1907 1907 goto out_fail; ··· 2076 2076 2077 2077 static int ocfs2_write_end(struct file *file, struct address_space *mapping, 2078 2078 loff_t pos, unsigned len, unsigned copied, 2079 - struct page *page, void *fsdata) 2079 + struct folio *folio, void *fsdata) 2080 2080 { 2081 2081 int ret; 2082 2082 struct inode *inode = mapping->host;
+1 -1
fs/ocfs2/aops.h
··· 38 38 39 39 int ocfs2_write_begin_nolock(struct address_space *mapping, 40 40 loff_t pos, unsigned len, ocfs2_write_type_t type, 41 - struct page **pagep, void **fsdata, 41 + struct folio **foliop, void **fsdata, 42 42 struct buffer_head *di_bh, struct page *mmap_page); 43 43 44 44 int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+9 -8
fs/ocfs2/file.c
··· 755 755 u64 abs_to, struct buffer_head *di_bh) 756 756 { 757 757 struct address_space *mapping = inode->i_mapping; 758 - struct page *page; 758 + struct folio *folio; 759 759 unsigned long index = abs_from >> PAGE_SHIFT; 760 760 handle_t *handle; 761 761 int ret = 0; ··· 774 774 goto out; 775 775 } 776 776 777 - page = find_or_create_page(mapping, index, GFP_NOFS); 778 - if (!page) { 779 - ret = -ENOMEM; 777 + folio = __filemap_get_folio(mapping, index, 778 + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); 779 + if (IS_ERR(folio)) { 780 + ret = PTR_ERR(folio); 780 781 mlog_errno(ret); 781 782 goto out_commit_trans; 782 783 } ··· 804 803 * __block_write_begin and block_commit_write to zero the 805 804 * whole block. 806 805 */ 807 - ret = __block_write_begin(page, block_start + 1, 0, 806 + ret = __block_write_begin(folio, block_start + 1, 0, 808 807 ocfs2_get_block); 809 808 if (ret < 0) { 810 809 mlog_errno(ret); ··· 813 812 814 813 815 814 /* must not update i_size! */ 816 - block_commit_write(page, block_start + 1, block_start + 1); 815 + block_commit_write(&folio->page, block_start + 1, block_start + 1); 817 816 } 818 817 819 818 /* ··· 834 833 } 835 834 836 835 out_unlock: 837 - unlock_page(page); 838 - put_page(page); 836 + folio_unlock(folio); 837 + folio_put(folio); 839 838 out_commit_trans: 840 839 if (handle) 841 840 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+3 -3
fs/ocfs2/mmap.c
··· 53 53 loff_t pos = page_offset(page); 54 54 unsigned int len = PAGE_SIZE; 55 55 pgoff_t last_index; 56 - struct page *locked_page = NULL; 56 + struct folio *locked_folio = NULL; 57 57 void *fsdata; 58 58 loff_t size = i_size_read(inode); 59 59 ··· 91 91 len = ((size - 1) & ~PAGE_MASK) + 1; 92 92 93 93 err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, 94 - &locked_page, &fsdata, di_bh, page); 94 + &locked_folio, &fsdata, di_bh, page); 95 95 if (err) { 96 96 if (err != -ENOSPC) 97 97 mlog_errno(err); ··· 99 99 goto out; 100 100 } 101 101 102 - if (!locked_page) { 102 + if (!locked_folio) { 103 103 ret = VM_FAULT_NOPAGE; 104 104 goto out; 105 105 }
+2 -2
fs/omfs/file.c
··· 312 312 313 313 static int omfs_write_begin(struct file *file, struct address_space *mapping, 314 314 loff_t pos, unsigned len, 315 - struct page **pagep, void **fsdata) 315 + struct folio **foliop, void **fsdata) 316 316 { 317 317 int ret; 318 318 319 - ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block); 319 + ret = block_write_begin(mapping, pos, len, foliop, omfs_get_block); 320 320 if (unlikely(ret)) 321 321 omfs_write_failed(mapping, pos + len); 322 322
+18 -21
fs/orangefs/inode.c
··· 309 309 310 310 static int orangefs_write_begin(struct file *file, 311 311 struct address_space *mapping, loff_t pos, unsigned len, 312 - struct page **pagep, void **fsdata) 312 + struct folio **foliop, void **fsdata) 313 313 { 314 314 struct orangefs_write_range *wr; 315 315 struct folio *folio; 316 - struct page *page; 317 - pgoff_t index; 318 316 int ret; 319 317 320 - index = pos >> PAGE_SHIFT; 318 + folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN, 319 + mapping_gfp_mask(mapping)); 320 + if (IS_ERR(folio)) 321 + return PTR_ERR(folio); 321 322 322 - page = grab_cache_page_write_begin(mapping, index); 323 - if (!page) 324 - return -ENOMEM; 325 - 326 - *pagep = page; 327 - folio = page_folio(page); 323 + *foliop = folio; 328 324 329 325 if (folio_test_dirty(folio) && !folio_test_private(folio)) { 330 326 /* ··· 361 365 } 362 366 363 367 static int orangefs_write_end(struct file *file, struct address_space *mapping, 364 - loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) 368 + loff_t pos, unsigned len, unsigned copied, struct folio *folio, 369 + void *fsdata) 365 370 { 366 - struct inode *inode = page->mapping->host; 371 + struct inode *inode = folio->mapping->host; 367 372 loff_t last_pos = pos + copied; 368 373 369 374 /* ··· 374 377 if (last_pos > inode->i_size) 375 378 i_size_write(inode, last_pos); 376 379 377 - /* zero the stale part of the page if we did a short copy */ 378 - if (!PageUptodate(page)) { 380 + /* zero the stale part of the folio if we did a short copy */ 381 + if (!folio_test_uptodate(folio)) { 379 382 unsigned from = pos & (PAGE_SIZE - 1); 380 383 if (copied < len) { 381 - zero_user(page, from + copied, len - copied); 384 + folio_zero_range(folio, from + copied, len - copied); 382 385 } 383 386 /* Set fully written pages uptodate. */ 384 - if (pos == page_offset(page) && 387 + if (pos == folio_pos(folio) && 385 388 (len == PAGE_SIZE || pos + len == inode->i_size)) { 386 - zero_user_segment(page, from + copied, PAGE_SIZE); 387 - SetPageUptodate(page); 389 + folio_zero_segment(folio, from + copied, PAGE_SIZE); 390 + folio_mark_uptodate(folio); 388 391 } 389 392 } 390 393 391 - set_page_dirty(page); 392 - unlock_page(page); 393 - put_page(page); 394 + folio_mark_dirty(folio); 395 + folio_unlock(folio); 396 + folio_put(folio); 394 397 395 398 mark_inode_dirty_sync(file_inode(file)); 396 399 return copied;
+44 -44
fs/qnx6/dir.c
··· 24 24 return crc; 25 25 } 26 26 27 - static struct page *qnx6_get_page(struct inode *dir, unsigned long n) 27 + static void *qnx6_get_folio(struct inode *dir, unsigned long n, 28 + struct folio **foliop) 28 29 { 29 - struct address_space *mapping = dir->i_mapping; 30 - struct page *page = read_mapping_page(mapping, n, NULL); 31 - if (!IS_ERR(page)) 32 - kmap(page); 33 - return page; 30 + struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL); 31 + 32 + if (IS_ERR(folio)) 33 + return folio; 34 + *foliop = folio; 35 + return kmap_local_folio(folio, 0); 34 36 } 35 37 36 38 static unsigned last_entry(struct inode *inode, unsigned long page_nr) ··· 46 44 47 45 static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, 48 46 struct qnx6_long_dir_entry *de, 49 - struct page **p) 47 + struct folio **foliop) 50 48 { 51 49 struct qnx6_sb_info *sbi = QNX6_SB(sb); 52 50 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ 53 51 u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */ 54 - /* within page */ 55 - u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK; 52 + u32 offs; 56 53 struct address_space *mapping = sbi->longfile->i_mapping; 57 - struct page *page = read_mapping_page(mapping, n, NULL); 58 - if (IS_ERR(page)) 59 - return ERR_CAST(page); 60 - kmap(*p = page); 61 - return (struct qnx6_long_filename *)(page_address(page) + offs); 54 + struct folio *folio = read_mapping_folio(mapping, n, NULL); 55 + 56 + if (IS_ERR(folio)) 57 + return ERR_CAST(folio); 58 + offs = offset_in_folio(folio, s << sb->s_blocksize_bits); 59 + *foliop = folio; 60 + return kmap_local_folio(folio, offs); 62 61 } 63 62 64 63 static int qnx6_dir_longfilename(struct inode *inode, ··· 70 67 struct qnx6_long_filename *lf; 71 68 struct super_block *s = inode->i_sb; 72 69 struct qnx6_sb_info *sbi = QNX6_SB(s); 73 - struct page *page; 70 + struct folio *folio; 74 71 int lf_size; 75 72 76 73 if (de->de_size != 0xff) { ··· 79 76 pr_err("invalid direntry size (%i).\n", de->de_size); 80 77 return 0; 81 78 } 82 - lf = qnx6_longname(s, de, &page); 79 + lf = qnx6_longname(s, de, &folio); 83 80 if (IS_ERR(lf)) { 84 81 pr_err("Error reading longname\n"); 85 82 return 0; ··· 90 87 if (lf_size > QNX6_LONG_NAME_MAX) { 91 88 pr_debug("file %s\n", lf->lf_fname); 92 89 pr_err("Filename too long (%i)\n", lf_size); 93 - qnx6_put_page(page); 90 + folio_release_kmap(folio, lf); 94 91 return 0; 95 92 } 96 93 ··· 103 100 pr_debug("qnx6_readdir:%.*s inode:%u\n", 104 101 lf_size, lf->lf_fname, de_inode); 105 102 if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) { 106 - qnx6_put_page(page); 103 + folio_release_kmap(folio, lf); 107 104 return 0; 108 105 } 109 106 110 - qnx6_put_page(page); 107 + folio_release_kmap(folio, lf); 111 108 /* success */ 112 109 return 1; 113 110 } ··· 120 117 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); 121 118 unsigned long npages = dir_pages(inode); 122 119 unsigned long n = pos >> PAGE_SHIFT; 123 - unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE; 120 + unsigned offset = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE; 124 121 bool done = false; 125 122 126 123 ctx->pos = pos; 127 124 if (ctx->pos >= inode->i_size) 128 125 return 0; 129 126 130 - for ( ; !done && n < npages; n++, start = 0) { 131 - struct page *page = qnx6_get_page(inode, n); 132 - int limit = last_entry(inode, n); 127 + for ( ; !done && n < npages; n++, offset = 0) { 133 128 struct qnx6_dir_entry *de; 134 - int i = start; 129 + struct folio *folio; 130 + char *kaddr = qnx6_get_folio(inode, n, &folio); 131 + char *limit; 135 132 136 - if (IS_ERR(page)) { 133 + if (IS_ERR(kaddr)) { 137 134 pr_err("%s(): read failed\n", __func__); 138 135 ctx->pos = (n + 1) << PAGE_SHIFT; 139 - return PTR_ERR(page); 136 + return PTR_ERR(kaddr); 140 137 } 141 - de = ((struct qnx6_dir_entry *)page_address(page)) + start; 142 - for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) { 138 + de = (struct qnx6_dir_entry *)(kaddr + offset); 139 + limit = kaddr + last_entry(inode, n); 140 + for (; (char *)de < limit; de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) { 143 141 int size = de->de_size; 144 142 u32 no_inode = fs32_to_cpu(sbi, de->de_inode); 145 143 ··· 168 164 } 169 165 } 170 166 } 171 - qnx6_put_page(page); 167 + folio_release_kmap(folio, kaddr); 172 168 } 173 169 return 0; 174 170 } ··· 181 177 { 182 178 struct super_block *s = dir->i_sb; 183 179 struct qnx6_sb_info *sbi = QNX6_SB(s); 184 - struct page *page; 180 + struct folio *folio; 185 181 int thislen; 186 - struct qnx6_long_filename *lf = qnx6_longname(s, de, &page); 182 + struct qnx6_long_filename *lf = qnx6_longname(s, de, &folio); 187 183 188 184 if (IS_ERR(lf)) 189 185 return 0; 190 186 191 187 thislen = fs16_to_cpu(sbi, lf->lf_size); 192 188 if (len != thislen) { 193 - qnx6_put_page(page); 189 + folio_release_kmap(folio, lf); 194 190 return 0; 195 191 } 196 192 if (memcmp(name, lf->lf_fname, len) == 0) { 197 - qnx6_put_page(page); 193 + folio_release_kmap(folio, lf); 198 194 return fs32_to_cpu(sbi, de->de_inode); 199 195 } 200 - qnx6_put_page(page); 196 + folio_release_kmap(folio, lf); 201 197 return 0; 202 198 } 203 199 ··· 214 210 } 215 211 216 212 217 - unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, 218 - struct page **res_page) 213 + unsigned qnx6_find_ino(int len, struct inode *dir, const char *name) 219 214 { 220 215 struct super_block *s = dir->i_sb; 221 216 struct qnx6_inode_info *ei = QNX6_I(dir); 222 - struct page *page = NULL; 217 + struct folio *folio; 223 218 unsigned long start, n; 224 219 unsigned long npages = dir_pages(dir); 225 220 unsigned ino; 226 221 struct qnx6_dir_entry *de; 227 222 struct qnx6_long_dir_entry *lde; 228 - 229 - *res_page = NULL; 230 223 231 224 if (npages == 0) 232 225 return 0; ··· 233 232 n = start; 234 233 235 234 do { 236 - page = qnx6_get_page(dir, n); 237 - if (!IS_ERR(page)) { 235 + de = qnx6_get_folio(dir, n, &folio); 236 + if (!IS_ERR(de)) { 238 237 int limit = last_entry(dir, n); 239 238 int i; 240 239 241 - de = (struct qnx6_dir_entry *)page_address(page); 242 240 for (i = 0; i < limit; i++, de++) { 243 241 if (len <= QNX6_SHORT_NAME_MAX) { 244 242 /* short filename */ ··· 256 256 } else 257 257 pr_err("undefined filename size in inode.\n"); 258 258 } 259 - qnx6_put_page(page); 259 + folio_release_kmap(folio, de - i); 260 260 } 261 261 262 262 if (++n >= npages) ··· 265 265 return 0; 266 266 267 267 found: 268 - *res_page = page; 269 268 ei->i_dir_start_lookup = n; 269 + folio_release_kmap(folio, de); 270 270 return ino; 271 271 } 272 272
+12 -13
fs/qnx6/inode.c
··· 184 184 struct qnx6_dir_entry *dir_entry; 185 185 struct inode *root = d_inode(s->s_root); 186 186 struct address_space *mapping = root->i_mapping; 187 - struct page *page = read_mapping_page(mapping, 0, NULL); 188 - if (IS_ERR(page)) 187 + struct folio *folio = read_mapping_folio(mapping, 0, NULL); 188 + 189 + if (IS_ERR(folio)) 189 190 return "error reading root directory"; 190 - kmap(page); 191 - dir_entry = page_address(page); 191 + dir_entry = kmap_local_folio(folio, 0); 192 192 for (i = 0; i < 2; i++) { 193 193 /* maximum 3 bytes - due to match_root limitation */ 194 194 if (strncmp(dir_entry[i].de_fname, match_root[i], 3)) 195 195 error = 1; 196 196 } 197 - qnx6_put_page(page); 197 + folio_release_kmap(folio, dir_entry); 198 198 if (error) 199 199 return "error reading root directory."; 200 200 return NULL; ··· 518 518 struct inode *inode; 519 519 struct qnx6_inode_info *ei; 520 520 struct address_space *mapping; 521 - struct page *page; 521 + struct folio *folio; 522 522 u32 n, offs; 523 523 524 524 inode = iget_locked(sb, ino); ··· 538 538 return ERR_PTR(-EIO); 539 539 } 540 540 n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS); 541 - offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS); 542 541 mapping = sbi->inodes->i_mapping; 543 - page = read_mapping_page(mapping, n, NULL); 544 - if (IS_ERR(page)) { 542 + folio = read_mapping_folio(mapping, n, NULL); 543 + if (IS_ERR(folio)) { 545 544 pr_err("major problem: unable to read inode from dev %s\n", 546 545 sb->s_id); 547 546 iget_failed(inode); 548 - return ERR_CAST(page); 547 + return ERR_CAST(folio); 549 548 } 550 - kmap(page); 551 - raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs; 549 + offs = offset_in_folio(folio, (ino - 1) << QNX6_INODE_SIZE_BITS); 550 + raw_inode = kmap_local_folio(folio, offs); 552 551 553 552 inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode); 554 553 i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid)); ··· 577 578 inode->i_mapping->a_ops = &qnx6_aops; 578 579 } else 579 580 init_special_inode(inode, inode->i_mode, 0); 580 - qnx6_put_page(page); 581 + folio_release_kmap(folio, raw_inode); 581 582 unlock_new_inode(inode); 582 583 return inode; 583 584 }
+1 -3
fs/qnx6/namei.c
··· 17 17 unsigned int flags) 18 18 { 19 19 unsigned ino; 20 - struct page *page; 21 20 struct inode *foundinode = NULL; 22 21 const char *name = dentry->d_name.name; 23 22 int len = dentry->d_name.len; ··· 24 25 if (len > QNX6_LONG_NAME_MAX) 25 26 return ERR_PTR(-ENAMETOOLONG); 26 27 27 - ino = qnx6_find_entry(len, dir, name, &page); 28 + ino = qnx6_find_ino(len, dir, name); 28 29 if (ino) { 29 30 foundinode = qnx6_iget(dir->i_sb, ino); 30 - qnx6_put_page(page); 31 31 if (IS_ERR(foundinode)) 32 32 pr_debug("lookup->iget -> error %ld\n", 33 33 PTR_ERR(foundinode));
+1 -8
fs/qnx6/qnx6.h
··· 126 126 extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, 127 127 int silent); 128 128 129 - static inline void qnx6_put_page(struct page *page) 130 - { 131 - kunmap(page); 132 - put_page(page); 133 - } 134 - 135 - extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, 136 - struct page **res_page); 129 + unsigned qnx6_find_ino(int len, struct inode *dir, const char *name);
+28 -29
fs/reiserfs/inode.c
··· 2178 2178 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1); 2179 2179 struct buffer_head *bh; 2180 2180 struct buffer_head *head; 2181 - struct page *page; 2181 + struct folio *folio; 2182 2182 int error; 2183 2183 2184 2184 /* ··· 2190 2190 if ((offset & (blocksize - 1)) == 0) { 2191 2191 return -ENOENT; 2192 2192 } 2193 - page = grab_cache_page(inode->i_mapping, index); 2194 - error = -ENOMEM; 2195 - if (!page) { 2196 - goto out; 2197 - } 2193 + folio = __filemap_get_folio(inode->i_mapping, index, 2194 + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 2195 + mapping_gfp_mask(inode->i_mapping)); 2196 + if (IS_ERR(folio)) 2197 + return PTR_ERR(folio); 2198 2198 /* start within the page of the last block in the file */ 2199 2199 start = (offset / blocksize) * blocksize; 2200 2200 2201 - error = __block_write_begin(page, start, offset - start, 2201 + error = __block_write_begin(folio, start, offset - start, 2202 2202 reiserfs_get_block_create_0); 2203 2203 if (error) 2204 2204 goto unlock; 2205 2205 2206 - head = page_buffers(page); 2206 + head = folio_buffers(folio); 2207 2207 bh = head; 2208 2208 do { 2209 2209 if (pos >= start) { ··· 2226 2226 goto unlock; 2227 2227 } 2228 2228 *bh_result = bh; 2229 - *page_result = page; 2229 + *page_result = &folio->page; 2230 2230 2231 - out: 2232 2231 return error; 2233 2232 2234 2233 unlock: 2235 - unlock_page(page); 2236 - put_page(page); 2234 + folio_unlock(folio); 2235 + folio_put(folio); 2237 2236 return error; 2238 2237 } 2239 2238 ··· 2735 2736 static int reiserfs_write_begin(struct file *file, 2736 2737 struct address_space *mapping, 2737 2738 loff_t pos, unsigned len, 2738 - struct page **pagep, void **fsdata) 2739 + struct folio **foliop, void **fsdata) 2739 2740 { 2740 2741 struct inode *inode; 2741 - struct page *page; 2742 + struct folio *folio; 2742 2743 pgoff_t index; 2743 2744 int ret; 2744 2745 int old_ref = 0; 2745 2746 2746 2747 inode = mapping->host; 2747 2748 index = pos >> PAGE_SHIFT; 2748 - page = grab_cache_page_write_begin(mapping, index); 2749 - if (!page) 2750 - return -ENOMEM; 2751 - *pagep = page; 2749 + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2750 + mapping_gfp_mask(mapping)); 2751 + if (IS_ERR(folio)) 2752 + return PTR_ERR(folio); 2753 + *foliop = folio; 2752 2754 2753 2755 reiserfs_wait_on_write_block(inode->i_sb); 2754 - fix_tail_page_for_writing(page); 2756 + fix_tail_page_for_writing(&folio->page); 2755 2757 if (reiserfs_transaction_running(inode->i_sb)) { 2756 2758 struct reiserfs_transaction_handle *th; 2757 2759 th = (struct reiserfs_transaction_handle *)current-> ··· 2762 2762 old_ref = th->t_refcount; 2763 2763 th->t_refcount++; 2764 2764 } 2765 - ret = __block_write_begin(page, pos, len, reiserfs_get_block); 2765 + ret = __block_write_begin(folio, pos, len, reiserfs_get_block); 2766 2766 if (ret && reiserfs_transaction_running(inode->i_sb)) { 2767 2767 struct reiserfs_transaction_handle *th = current->journal_info; 2768 2768 /* ··· 2792 2792 } 2793 2793 } 2794 2794 if (ret) { 2795 - unlock_page(page); 2796 - put_page(page); 2795 + folio_unlock(folio); 2796 + folio_put(folio); 2797 2797 /* Truncate allocated blocks */ 2798 2798 reiserfs_truncate_failed_write(inode); 2799 2799 } ··· 2822 2822 th->t_refcount++; 2823 2823 } 2824 2824 2825 - ret = __block_write_begin(page, from, len, reiserfs_get_block); 2825 + ret = __block_write_begin(page_folio(page), from, len, reiserfs_get_block); 2826 2826 if (ret && reiserfs_transaction_running(inode->i_sb)) { 2827 2827 struct reiserfs_transaction_handle *th = current->journal_info; 2828 2828 /* ··· 2862 2862 2863 2863 static int reiserfs_write_end(struct file *file, struct address_space *mapping, 2864 2864 loff_t pos, unsigned len, unsigned copied, 2865 - struct page *page, void *fsdata) 2865 + struct folio *folio, void *fsdata) 2866 2866 { 2867 - struct folio *folio = page_folio(page); 2868 - struct inode *inode = page->mapping->host; 2867 + struct inode *inode = folio->mapping->host; 2869 2868 int ret = 0; 2870 2869 int update_sd = 0; 2871 2870 struct reiserfs_transaction_handle *th; ··· 2886 2887 } 2887 2888 flush_dcache_folio(folio); 2888 2889 2889 - reiserfs_commit_page(inode, page, start, start + copied); 2890 + reiserfs_commit_page(inode, &folio->page, start, start + copied); 2890 2891 2891 2892 /* 2892 2893 * generic_commit_write does this for us, but does not update the ··· 2941 2942 out: 2942 2943 if (locked) 2943 2944 reiserfs_write_unlock(inode->i_sb); 2944 - unlock_page(page); 2945 - put_page(page); 2945 + folio_unlock(folio); 2946 + folio_put(folio); 2946 2947 2947 2948 if (pos + len > inode->i_size) 2948 2949 reiserfs_truncate_failed_write(inode);
+58 -28
fs/squashfs/file.c
··· 494 494 } 495 495 496 496 static int squashfs_readahead_fragment(struct page **page, 497 - unsigned int pages, unsigned int expected) 497 + unsigned int pages, unsigned int expected, loff_t start) 498 498 { 499 499 struct inode *inode = page[0]->mapping->host; 500 500 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 501 501 squashfs_i(inode)->fragment_block, 502 502 squashfs_i(inode)->fragment_size); 503 503 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 504 - unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 505 - int error = buffer->error; 504 + int i, bytes, copied; 505 + struct squashfs_page_actor *actor; 506 + unsigned int offset; 507 + void *addr; 508 + struct page *last_page; 506 509 507 - if (error) 510 + if (buffer->error) 508 511 goto out; 509 512 510 - expected += squashfs_i(inode)->fragment_offset; 513 + actor = squashfs_page_actor_init_special(msblk, page, pages, 514 + expected, start); 515 + if (!actor) 516 + goto out; 511 517 512 - for (n = 0; n < pages; n++) { 513 - unsigned int base = (page[n]->index & mask) << PAGE_SHIFT; 514 - unsigned int offset = base + squashfs_i(inode)->fragment_offset; 518 + squashfs_actor_nobuff(actor); 519 + addr = squashfs_first_page(actor); 515 520 516 - if (expected > offset) { 517 - unsigned int avail = min_t(unsigned int, expected - 518 - offset, PAGE_SIZE); 521 + for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) { 522 + int avail = min_t(int, expected - offset, PAGE_SIZE); 519 523 520 - squashfs_fill_page(page[n], buffer, offset, avail); 524 + if (!IS_ERR(addr)) { 525 + bytes = squashfs_copy_data(addr, buffer, offset + 526 + squashfs_i(inode)->fragment_offset, avail); 527 + 528 + if (bytes != avail) 529 + goto failed; 521 530 } 522 531 523 - unlock_page(page[n]); 524 - put_page(page[n]); 532 + copied += avail; 533 + addr = squashfs_next_page(actor); 525 534 } 535 + 536 + last_page = squashfs_page_actor_free(actor); 537 + 538 + if (copied == expected && !IS_ERR(last_page)) { 539 + /* Last page (if present) may have trailing bytes not filled */ 540 + bytes = copied % PAGE_SIZE; 541 + if (bytes && last_page) 542 + memzero_page(last_page, bytes, PAGE_SIZE - bytes); 543 + 544 + for (i = 0; i < pages; i++) { 545 + flush_dcache_page(page[i]); 546 + SetPageUptodate(page[i]); 547 + } 548 + } 549 + 550 + for (i = 0; i < pages; i++) { 551 + unlock_page(page[i]); 552 + put_page(page[i]); 553 + } 554 + 555 + squashfs_cache_put(buffer); 556 + return 0; 557 + 558 + failed: 559 + squashfs_page_actor_free(actor); 526 560 527 561 out: 528 562 squashfs_cache_put(buffer); 529 - return error; 563 + return 1; 530 564 } 531 565 532 566 static void squashfs_readahead(struct readahead_control *ractl) ··· 585 551 return; 586 552 587 553 for (;;) { 588 - pgoff_t index; 589 554 int res, bsize; 590 555 u64 block = 0; 591 556 unsigned int expected; ··· 603 570 if (readahead_pos(ractl) >= i_size_read(inode)) 604 571 goto skip_pages; 605 572 606 - index = pages[0]->index >> shift; 607 - 608 - if ((pages[nr_pages - 1]->index >> shift) != index) 609 - goto skip_pages; 610 - 611 - if (index == file_end && squashfs_i(inode)->fragment_block != 612 - SQUASHFS_INVALID_BLK) { 573 + if (start >> msblk->block_log == file_end && 574 + squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { 613 575 res = squashfs_readahead_fragment(pages, nr_pages, 614 - expected); 576 + expected, start); 615 577 if (res) 616 578 goto skip_pages; 617 579 continue; 618 580 } 619 581 620 - bsize = read_blocklist(inode, index, &block); 582 + bsize = read_blocklist(inode, start >> msblk->block_log, &block); 621 583 if (bsize == 0) 622 584 goto skip_pages; 623 585 624 586 actor = squashfs_page_actor_init_special(msblk, pages, nr_pages, 625 - expected); 587 + expected, start); 626 588 if (!actor) 627 589 goto skip_pages; 628 590 ··· 625 597 626 598 last_page = squashfs_page_actor_free(actor); 627 599 628 - if (res == expected) { 600 + if (res == expected && !IS_ERR(last_page)) { 629 601 int bytes; 630 602 631 603 /* Last page (if present) may have trailing bytes not filled */ 632 604 bytes = res % PAGE_SIZE; 633 - if (index == file_end && bytes && last_page) 605 + if (start >> msblk->block_log == file_end && bytes && last_page) 634 606 memzero_page(last_page, bytes, 635 607 PAGE_SIZE - bytes); 636 608 ··· 644 616 unlock_page(pages[i]); 645 617 put_page(pages[i]); 646 618 } 619 + 620 + start += readahead_batch_length(ractl); 647 621 } 648 622 649 623 kfree(pages);
+10 -9
fs/squashfs/file_direct.c
··· 23 23 int expected) 24 24 25 25 { 26 + struct folio *folio = page_folio(target_page); 26 27 struct inode *inode = target_page->mapping->host; 27 28 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 28 - 29 29 loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; 30 30 int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 31 - loff_t start_index = target_page->index & ~mask; 31 + loff_t start_index = folio->index & ~mask; 32 32 loff_t end_index = start_index | mask; 33 33 int i, n, pages, bytes, res = -ENOMEM; 34 - struct page **page; 34 + struct page **page, *last_page; 35 35 struct squashfs_page_actor *actor; 36 36 void *pageaddr; 37 37 ··· 46 46 47 47 /* Try to grab all the pages covered by the Squashfs block */ 48 48 for (i = 0, n = start_index; n <= end_index; n++) { 49 - page[i] = (n == target_page->index) ? target_page : 49 + page[i] = (n == folio->index) ? target_page : 50 50 grab_cache_page_nowait(target_page->mapping, n); 51 51 52 52 if (page[i] == NULL) ··· 67 67 * Create a "page actor" which will kmap and kunmap the 68 68 * page cache pages appropriately within the decompressor 69 69 */ 70 - actor = squashfs_page_actor_init_special(msblk, page, pages, expected); 70 + actor = squashfs_page_actor_init_special(msblk, page, pages, expected, 71 + start_index << PAGE_SHIFT); 71 72 if (actor == NULL) 72 73 goto out; 73 74 74 75 /* Decompress directly into the page cache buffers */ 75 76 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 76 77 77 - squashfs_page_actor_free(actor); 78 + last_page = squashfs_page_actor_free(actor); 78 79 79 80 if (res < 0) 80 81 goto mark_errored; 81 82 82 - if (res != expected) { 83 + if (res != expected || IS_ERR(last_page)) { 83 84 res = -EIO; 84 85 goto mark_errored; 85 86 } 86 87 87 88 /* Last page (if present) may have trailing bytes not filled */ 88 89 bytes = res % PAGE_SIZE; 89 - if (page[pages - 1]->index == end_index && bytes) { 90 - pageaddr = kmap_local_page(page[pages - 1]); 90 + if (end_index == file_end && last_page && bytes) { 91 + pageaddr = kmap_local_page(last_page); 91 92 memset(pageaddr + bytes, 0, PAGE_SIZE - bytes); 92 93 kunmap_local(pageaddr); 93 94 }
+8 -3
fs/squashfs/page_actor.c
··· 60 60 } 61 61 62 62 /* Implementation of page_actor for decompressing directly into page cache. */ 63 + static loff_t page_next_index(struct squashfs_page_actor *actor) 64 + { 65 + return page_folio(actor->page[actor->next_page])->index; 66 + } 67 + 63 68 static void *handle_next_page(struct squashfs_page_actor *actor) 64 69 { 65 70 int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 73 68 return NULL; 74 69 75 70 if ((actor->next_page == actor->pages) || 76 - (actor->next_index != actor->page[actor->next_page]->index)) { 71 + (actor->next_index != page_next_index(actor))) { 77 72 actor->next_index++; 78 73 actor->returned_pages++; 79 74 actor->last_page = NULL; ··· 108 103 } 109 104 110 105 struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, 111 - struct page **page, int pages, int length) 106 + struct page **page, int pages, int length, loff_t start_index) 112 107 { 113 108 struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); 114 109 ··· 130 125 actor->pages = pages; 131 126 actor->next_page = 0; 132 127 actor->returned_pages = 0; 133 - actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1); 128 + actor->next_index = start_index >> PAGE_SHIFT; 134 129 actor->pageaddr = NULL; 135 130 actor->last_page = NULL; 136 131 actor->alloc_buffer = msblk->decompressor->alloc_buffer;
+4 -2
fs/squashfs/page_actor.h
··· 29 29 int pages, int length); 30 30 extern struct squashfs_page_actor *squashfs_page_actor_init_special( 31 31 struct squashfs_sb_info *msblk, 32 - struct page **page, int pages, int length); 32 + struct page **page, int pages, int length, 33 + loff_t start_index); 33 34 static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor) 34 35 { 35 - struct page *last_page = actor->last_page; 36 + struct page *last_page = actor->next_page == actor->pages ? actor->last_page : ERR_PTR(-EIO); 36 37 37 38 kfree(actor->tmp_buffer); 38 39 kfree(actor); 40 + 39 41 return last_page; 40 42 } 41 43 static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+77 -81
fs/sysv/dir.c
··· 28 28 .fsync = generic_file_fsync, 29 29 }; 30 30 31 - static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len) 31 + static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len) 32 32 { 33 - struct address_space *mapping = page->mapping; 33 + struct address_space *mapping = folio->mapping; 34 34 struct inode *dir = mapping->host; 35 35 36 - block_write_end(NULL, mapping, pos, len, len, page, NULL); 36 + block_write_end(NULL, mapping, pos, len, len, folio, NULL); 37 37 if (pos+len > dir->i_size) { 38 38 i_size_write(dir, pos+len); 39 39 mark_inode_dirty(dir); 40 40 } 41 - unlock_page(page); 41 + folio_unlock(folio); 42 42 } 43 43 44 44 static int sysv_handle_dirsync(struct inode *dir) ··· 52 52 } 53 53 54 54 /* 55 - * Calls to dir_get_page()/unmap_and_put_page() must be nested according to the 55 + * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the 56 56 * rules documented in mm/highmem.rst. 57 57 * 58 - * NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_page() 58 + * NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_folio() 59 59 * and must be treated accordingly for nesting purposes. 60 60 */ 61 - static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p) 61 + static void *dir_get_folio(struct inode *dir, unsigned long n, 62 + struct folio **foliop) 62 63 { 63 - struct address_space *mapping = dir->i_mapping; 64 - struct page *page = read_mapping_page(mapping, n, NULL); 65 - if (IS_ERR(page)) 66 - return ERR_CAST(page); 67 - *p = page; 68 - return kmap_local_page(page); 64 + struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL); 65 + 66 + if (IS_ERR(folio)) 67 + return ERR_CAST(folio); 68 + *foliop = folio; 69 + return kmap_local_folio(folio, 0); 69 70 } 70 71 71 72 static int sysv_readdir(struct file *file, struct dir_context *ctx) ··· 88 87 for ( ; n < npages; n++, offset = 0) { 89 88 char *kaddr, *limit; 90 89 struct sysv_dir_entry *de; 91 - struct page *page; 90 + struct folio *folio; 92 91 93 - kaddr = dir_get_page(inode, n, &page); 92 + kaddr = dir_get_folio(inode, n, &folio); 94 93 if (IS_ERR(kaddr)) 95 94 continue; 96 95 de = (struct sysv_dir_entry *)(kaddr+offset); ··· 104 103 if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN), 105 104 fs16_to_cpu(SYSV_SB(sb), de->inode), 106 105 DT_UNKNOWN)) { 107 - unmap_and_put_page(page, kaddr); 106 + folio_release_kmap(folio, kaddr); 108 107 return 0; 109 108 } 110 109 } 111 - unmap_and_put_page(page, kaddr); 110 + folio_release_kmap(folio, kaddr); 112 111 } 113 112 return 0; 114 113 } ··· 127 126 /* 128 127 * sysv_find_entry() 129 128 * 130 - * finds an entry in the specified directory with the wanted name. It 131 - * returns the cache buffer in which the entry was found, and the entry 132 - * itself (as a parameter - res_dir). It does NOT read the inode of the 129 + * finds an entry in the specified directory with the wanted name. 130 + * It does NOT read the inode of the 133 131 * entry - you'll have to do that yourself if you want to. 134 132 * 135 - * On Success unmap_and_put_page() should be called on *res_page. 133 + * On Success folio_release_kmap() should be called on *foliop. 136 134 * 137 - * sysv_find_entry() acts as a call to dir_get_page() and must be treated 135 + * sysv_find_entry() acts as a call to dir_get_folio() and must be treated 138 136 * accordingly for nesting purposes. 139 137 */ 140 - struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page) 138 + struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct folio **foliop) 141 139 { 142 140 const char * name = dentry->d_name.name; 143 141 int namelen = dentry->d_name.len; 144 142 struct inode * dir = d_inode(dentry->d_parent); 145 143 unsigned long start, n; 146 144 unsigned long npages = dir_pages(dir); 147 - struct page *page = NULL; 148 145 struct sysv_dir_entry *de; 149 - 150 - *res_page = NULL; 151 146 152 147 start = SYSV_I(dir)->i_dir_start_lookup; 153 148 if (start >= npages) ··· 151 154 n = start; 152 155 153 156 do { 154 - char *kaddr = dir_get_page(dir, n, &page); 157 + char *kaddr = dir_get_folio(dir, n, foliop); 155 158 156 159 if (!IS_ERR(kaddr)) { 157 160 de = (struct sysv_dir_entry *)kaddr; 158 - kaddr += PAGE_SIZE - SYSV_DIRSIZE; 161 + kaddr += folio_size(*foliop) - SYSV_DIRSIZE; 159 162 for ( ; (char *) de <= kaddr ; de++) { 160 163 if (!de->inode) 161 164 continue; ··· 163 166 name, de->name)) 164 167 goto found; 165 168 } 166 - unmap_and_put_page(page, kaddr); 169 + folio_release_kmap(*foliop, kaddr); 167 170 } 168 171 169 172 if (++n >= npages) ··· 174 177 175 178 found: 176 179 SYSV_I(dir)->i_dir_start_lookup = n; 177 - *res_page = page; 178 180 return de; 179 181 } 180 182 ··· 182 186 struct inode *dir = d_inode(dentry->d_parent); 183 187 const char * name = dentry->d_name.name; 184 188 int namelen = dentry->d_name.len; 185 - struct page *page = NULL; 189 + struct folio *folio = NULL; 186 190 struct sysv_dir_entry * de; 187 191 unsigned long npages = dir_pages(dir); 188 192 unsigned long n; ··· 192 196 193 197 /* We take care of directory expansion in the same loop */ 194 198 for (n = 0; n <= npages; n++) { 195 - kaddr = dir_get_page(dir, n, &page); 199 + kaddr = dir_get_folio(dir, n, &folio); 196 200 if (IS_ERR(kaddr)) 197 201 return PTR_ERR(kaddr); 198 202 de = (struct sysv_dir_entry *)kaddr; ··· 202 206 goto got_it; 203 207 err = -EEXIST; 204 208 if (namecompare(namelen, SYSV_NAMELEN, name, de->name)) 205 - goto out_page; 209 + goto out_folio; 206 210 de++; 207 211 } 208 - unmap_and_put_page(page, kaddr); 212 + folio_release_kmap(folio, kaddr); 209 213 } 210 214 BUG(); 211 215 return -EINVAL; 212 216 213 217 got_it: 214 - pos = page_offset(page) + offset_in_page(de); 215 - lock_page(page); 216 - err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); 218 + pos = folio_pos(folio) + offset_in_folio(folio, de); 219 + folio_lock(folio); 220 + err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE); 217 221 if (err) 218 222 goto out_unlock; 219 223 memcpy (de->name, name, namelen); 220 224 memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2); 221 225 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 222 - dir_commit_chunk(page, pos, SYSV_DIRSIZE); 226 + dir_commit_chunk(folio, pos, SYSV_DIRSIZE); 223 227 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 224 228 mark_inode_dirty(dir); 225 229 err = sysv_handle_dirsync(dir); 226 - out_page: 227 - unmap_and_put_page(page, kaddr); 230 + out_folio: 231 + folio_release_kmap(folio, kaddr); 228 232 return err; 229 233 out_unlock: 230 - unlock_page(page); 231 - goto out_page; 234 + folio_unlock(folio); 235 + goto out_folio; 232 236 } 233 237 234 - int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page) 238 + int sysv_delete_entry(struct sysv_dir_entry *de, struct folio *folio) 235 239 { 236 - struct inode *inode = page->mapping->host; 237 - loff_t pos = page_offset(page) + offset_in_page(de); 240 + struct inode *inode = folio->mapping->host; 241 + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 238 242 int err; 239 243 240 - lock_page(page); 241 - err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); 244 + folio_lock(folio); 245 + err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE); 242 246 if (err) { 243 - unlock_page(page); 247 + folio_unlock(folio); 244 248 return err; 245 249 } 246 250 de->inode = 0; 247 - dir_commit_chunk(page, pos, SYSV_DIRSIZE); 251 + dir_commit_chunk(folio, pos, SYSV_DIRSIZE); 248 252 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 249 253 mark_inode_dirty(inode); 250 254 return sysv_handle_dirsync(inode); ··· 252 256 253 257 int sysv_make_empty(struct inode *inode, struct inode *dir) 254 258 { 255 - struct page *page = grab_cache_page(inode->i_mapping, 0); 259 + struct folio *folio = filemap_grab_folio(inode->i_mapping, 0); 256 260 struct sysv_dir_entry * de; 257 - char *base; 261 + char *kaddr; 258 262 int err; 259 263 260 - if (!page) 261 - return -ENOMEM; 262 - err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE); 264 + if (IS_ERR(folio)) 265 + return PTR_ERR(folio); 266 + err = sysv_prepare_chunk(folio, 0, 2 * SYSV_DIRSIZE); 263 267 if (err) { 264 - unlock_page(page); 268 + folio_unlock(folio); 265 269 goto fail; 266 270 } 267 - base = kmap_local_page(page); 268 - memset(base, 0, PAGE_SIZE); 271 + kaddr = kmap_local_folio(folio, 0); 272 + memset(kaddr, 0, folio_size(folio)); 269 273 270 - de = (struct sysv_dir_entry *) base; 274 + de = (struct sysv_dir_entry *)kaddr; 271 275 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 272 276 strcpy(de->name,"."); 273 277 de++; 274 278 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino); 275 279 strcpy(de->name,".."); 276 280 277 - kunmap_local(base); 278 - dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); 281 + kunmap_local(kaddr); 282 + dir_commit_chunk(folio, 0, 2 * SYSV_DIRSIZE); 279 283 err = sysv_handle_dirsync(inode); 280 284 fail: 281 - put_page(page); 285 + folio_put(folio); 282 286 return err; 283 287 } 284 288 ··· 288 292 int sysv_empty_dir(struct inode * inode) 289 293 { 290 294 struct super_block *sb = inode->i_sb; 291 - struct page *page = NULL; 295 + struct folio *folio = NULL; 292 296 unsigned long i, npages = dir_pages(inode); 293 297 char *kaddr; 294 298 295 299 for (i = 0; i < npages; i++) { 296 300 struct sysv_dir_entry *de; 297 301 298 - kaddr = dir_get_page(inode, i, &page); 302 + kaddr = dir_get_folio(inode, i, &folio); 299 303 if (IS_ERR(kaddr)) 300 304 continue; 301 305 302 306 de = (struct sysv_dir_entry *)kaddr; 303 - kaddr += PAGE_SIZE-SYSV_DIRSIZE; 307 + kaddr += folio_size(folio) - SYSV_DIRSIZE; 304 308 305 309 for ( ;(char *)de <= kaddr; de++) { 306 310 if (!de->inode) ··· 317 321 if (de->name[1] != '.' || de->name[2]) 318 322 goto not_empty; 319 323 } 320 - unmap_and_put_page(page, kaddr); 324 + folio_release_kmap(folio, kaddr); 321 325 } 322 326 return 1; 323 327 324 328 not_empty: 325 - unmap_and_put_page(page, kaddr); 329 + folio_release_kmap(folio, kaddr); 326 330 return 0; 327 331 } 328 332 329 333 /* Releases the page */ 330 - int sysv_set_link(struct sysv_dir_entry *de, struct page *page, 331 - struct inode *inode) 334 + int sysv_set_link(struct sysv_dir_entry *de, struct folio *folio, 335 + struct inode *inode) 332 336 { 333 - struct inode *dir = page->mapping->host; 334 - loff_t pos = page_offset(page) + offset_in_page(de); 337 + struct inode *dir = folio->mapping->host; 338 + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 335 339 int err; 336 340 337 - lock_page(page); 338 - err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); 341 + folio_lock(folio); 342 + err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE); 339 343 if (err) { 340 - unlock_page(page); 344 + folio_unlock(folio); 341 345 return err; 342 346 } 343 347 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 344 - dir_commit_chunk(page, pos, SYSV_DIRSIZE); 348 + dir_commit_chunk(folio, pos, SYSV_DIRSIZE); 345 349 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 346 350 mark_inode_dirty(dir); 347 351 return sysv_handle_dirsync(inode); 348 352 } 349 353 350 354 /* 351 - * Calls to dir_get_page()/unmap_and_put_page() must be nested according to the 355 + * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the 352 356 * rules documented in mm/highmem.rst. 353 357 * 354 - * sysv_dotdot() acts as a call to dir_get_page() and must be treated 358 + * sysv_dotdot() acts as a call to dir_get_folio() and must be treated 355 359 * accordingly for nesting purposes. 356 360 */ 357 - struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p) 361 + struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct folio **foliop) 358 362 { 359 - struct sysv_dir_entry *de = dir_get_page(dir, 0, p); 363 + struct sysv_dir_entry *de = dir_get_folio(dir, 0, foliop); 360 364 361 365 if (IS_ERR(de)) 362 366 return NULL; ··· 366 370 367 371 ino_t sysv_inode_by_name(struct dentry *dentry) 368 372 { 369 - struct page *page; 370 - struct sysv_dir_entry *de = sysv_find_entry (dentry, &page); 373 + struct folio *folio; 374 + struct sysv_dir_entry *de = sysv_find_entry (dentry, &folio); 371 375 ino_t res = 0; 372 376 373 377 if (de) { 374 378 res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode); 375 - unmap_and_put_page(page, de); 379 + folio_release_kmap(folio, de); 376 380 } 377 381 return res; 378 382 }
+4 -4
fs/sysv/itree.c
··· 466 466 return block_read_full_folio(folio, get_block); 467 467 } 468 468 469 - int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len) 469 + int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) 470 470 { 471 - return __block_write_begin(page, pos, len, get_block); 471 + return __block_write_begin(folio, pos, len, get_block); 472 472 } 473 473 474 474 static void sysv_write_failed(struct address_space *mapping, loff_t to) ··· 483 483 484 484 static int sysv_write_begin(struct file *file, struct address_space *mapping, 485 485 loff_t pos, unsigned len, 486 - struct page **pagep, void **fsdata) 486 + struct folio **foliop, void **fsdata) 487 487 { 488 488 int ret; 489 489 490 - ret = block_write_begin(mapping, pos, len, pagep, get_block); 490 + ret = block_write_begin(mapping, pos, len, foliop, get_block); 491 491 if (unlikely(ret)) 492 492 sysv_write_failed(mapping, pos + len); 493 493
+16 -16
fs/sysv/namei.c
··· 151 151 static int sysv_unlink(struct inode * dir, struct dentry * dentry) 152 152 { 153 153 struct inode * inode = d_inode(dentry); 154 - struct page * page; 154 + struct folio *folio; 155 155 struct sysv_dir_entry * de; 156 156 int err; 157 157 158 - de = sysv_find_entry(dentry, &page); 158 + de = sysv_find_entry(dentry, &folio); 159 159 if (!de) 160 160 return -ENOENT; 161 161 162 - err = sysv_delete_entry(de, page); 162 + err = sysv_delete_entry(de, folio); 163 163 if (!err) { 164 164 inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); 165 165 inode_dec_link_count(inode); 166 166 } 167 - unmap_and_put_page(page, de); 167 + folio_release_kmap(folio, de); 168 168 return err; 169 169 } 170 170 ··· 194 194 { 195 195 struct inode * old_inode = d_inode(old_dentry); 196 196 struct inode * new_inode = d_inode(new_dentry); 197 - struct page * dir_page = NULL; 197 + struct folio *dir_folio; 198 198 struct sysv_dir_entry * dir_de = NULL; 199 - struct page * old_page; 199 + struct folio *old_folio; 200 200 struct sysv_dir_entry * old_de; 201 201 int err = -ENOENT; 202 202 203 203 if (flags & ~RENAME_NOREPLACE) 204 204 return -EINVAL; 205 205 206 - old_de = sysv_find_entry(old_dentry, &old_page); 206 + old_de = sysv_find_entry(old_dentry, &old_folio); 207 207 if (!old_de) 208 208 goto out; 209 209 210 210 if (S_ISDIR(old_inode->i_mode)) { 211 211 err = -EIO; 212 - dir_de = sysv_dotdot(old_inode, &dir_page); 212 + dir_de = sysv_dotdot(old_inode, &dir_folio); 213 213 if (!dir_de) 214 214 goto out_old; 215 215 } 216 216 217 217 if (new_inode) { 218 - struct page * new_page; 218 + struct folio *new_folio; 219 219 struct sysv_dir_entry * new_de; 220 220 221 221 err = -ENOTEMPTY; ··· 223 223 goto out_dir; 224 224 225 225 err = -ENOENT; 226 - new_de = sysv_find_entry(new_dentry, &new_page); 226 + new_de = sysv_find_entry(new_dentry, &new_folio); 227 227 if (!new_de) 228 228 goto out_dir; 229 - err = sysv_set_link(new_de, new_page, old_inode); 230 - unmap_and_put_page(new_page, new_de); 229 + err = sysv_set_link(new_de, new_folio, old_inode); 230 + folio_release_kmap(new_folio, new_de); 231 231 if (err) 232 232 goto out_dir; 233 233 inode_set_ctime_current(new_inode); ··· 242 242 inode_inc_link_count(new_dir); 243 243 } 244 244 245 - err = sysv_delete_entry(old_de, old_page); 245 + err = sysv_delete_entry(old_de, old_folio); 246 246 if (err) 247 247 goto out_dir; 248 248 249 249 mark_inode_dirty(old_inode); 250 250 251 251 if (dir_de) { 252 - err = sysv_set_link(dir_de, dir_page, new_dir); 252 + err = sysv_set_link(dir_de, dir_folio, new_dir); 253 253 if (!err) 254 254 inode_dec_link_count(old_dir); 255 255 } 256 256 257 257 out_dir: 258 258 if (dir_de) 259 - unmap_and_put_page(dir_page, dir_de); 259 + folio_release_kmap(dir_folio, dir_de); 260 260 out_old: 261 - unmap_and_put_page(old_page, old_de); 261 + folio_release_kmap(old_folio, old_de); 262 262 out: 263 263 return err; 264 264 }
+10 -10
fs/sysv/sysv.h
··· 133 133 extern unsigned long sysv_count_free_blocks(struct super_block *); 134 134 135 135 /* itree.c */ 136 - extern void sysv_truncate(struct inode *); 137 - extern int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len); 136 + void sysv_truncate(struct inode *); 137 + int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len); 138 138 139 139 /* inode.c */ 140 140 extern struct inode *sysv_iget(struct super_block *, unsigned int); ··· 148 148 149 149 150 150 /* dir.c */ 151 - extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **); 152 - extern int sysv_add_link(struct dentry *, struct inode *); 153 - extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *); 154 - extern int sysv_make_empty(struct inode *, struct inode *); 155 - extern int sysv_empty_dir(struct inode *); 156 - extern int sysv_set_link(struct sysv_dir_entry *, struct page *, 151 + struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct folio **); 152 + int sysv_add_link(struct dentry *, struct inode *); 153 + int sysv_delete_entry(struct sysv_dir_entry *, struct folio *); 154 + int sysv_make_empty(struct inode *, struct inode *); 155 + int sysv_empty_dir(struct inode *); 156 + int sysv_set_link(struct sysv_dir_entry *, struct folio *, 157 157 struct inode *); 158 - extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **); 159 - extern ino_t sysv_inode_by_name(struct dentry *); 158 + struct sysv_dir_entry *sysv_dotdot(struct inode *, struct folio **); 159 + ino_t sysv_inode_by_name(struct dentry *); 160 160 161 161 162 162 extern const struct inode_operations sysv_file_inode_operations;
+6 -7
fs/ubifs/file.c
··· 211 211 } 212 212 213 213 static int write_begin_slow(struct address_space *mapping, 214 - loff_t pos, unsigned len, struct page **pagep) 214 + loff_t pos, unsigned len, struct folio **foliop) 215 215 { 216 216 struct inode *inode = mapping->host; 217 217 struct ubifs_info *c = inode->i_sb->s_fs_info; ··· 298 298 ubifs_release_dirty_inode_budget(c, ui); 299 299 } 300 300 301 - *pagep = &folio->page; 301 + *foliop = folio; 302 302 return 0; 303 303 } 304 304 ··· 414 414 */ 415 415 static int ubifs_write_begin(struct file *file, struct address_space *mapping, 416 416 loff_t pos, unsigned len, 417 - struct page **pagep, void **fsdata) 417 + struct folio **foliop, void **fsdata) 418 418 { 419 419 struct inode *inode = mapping->host; 420 420 struct ubifs_info *c = inode->i_sb->s_fs_info; ··· 483 483 folio_unlock(folio); 484 484 folio_put(folio); 485 485 486 - return write_begin_slow(mapping, pos, len, pagep); 486 + return write_begin_slow(mapping, pos, len, foliop); 487 487 } 488 488 489 489 /* ··· 492 492 * with @ui->ui_mutex locked if we are appending pages, and unlocked 493 493 * otherwise. This is an optimization (slightly hacky though). 494 494 */ 495 - *pagep = &folio->page; 495 + *foliop = folio; 496 496 return 0; 497 497 } 498 498 ··· 524 524 525 525 static int ubifs_write_end(struct file *file, struct address_space *mapping, 526 526 loff_t pos, unsigned len, unsigned copied, 527 - struct page *page, void *fsdata) 527 + struct folio *folio, void *fsdata) 528 528 { 529 - struct folio *folio = page_folio(page); 530 529 struct inode *inode = mapping->host; 531 530 struct ubifs_inode *ui = ubifs_inode(inode); 532 531 struct ubifs_info *c = inode->i_sb->s_fs_info;
+1 -1
fs/udf/file.c
··· 62 62 end = size & ~PAGE_MASK; 63 63 else 64 64 end = PAGE_SIZE; 65 - err = __block_write_begin(&folio->page, 0, end, udf_get_block); 65 + err = __block_write_begin(folio, 0, end, udf_get_block); 66 66 if (err) { 67 67 folio_unlock(folio); 68 68 ret = vmf_fs_error(err);
+5 -7
fs/udf/inode.c
··· 246 246 247 247 static int udf_write_begin(struct file *file, struct address_space *mapping, 248 248 loff_t pos, unsigned len, 249 - struct page **pagep, void **fsdata) 249 + struct folio **foliop, void **fsdata) 250 250 { 251 251 struct udf_inode_info *iinfo = UDF_I(file_inode(file)); 252 252 struct folio *folio; 253 253 int ret; 254 254 255 255 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 256 - ret = block_write_begin(mapping, pos, len, pagep, 256 + ret = block_write_begin(mapping, pos, len, foliop, 257 257 udf_get_block); 258 258 if (unlikely(ret)) 259 259 udf_write_failed(mapping, pos + len); ··· 265 265 mapping_gfp_mask(mapping)); 266 266 if (IS_ERR(folio)) 267 267 return PTR_ERR(folio); 268 - *pagep = &folio->page; 268 + *foliop = folio; 269 269 if (!folio_test_uptodate(folio)) 270 270 udf_adinicb_read_folio(folio); 271 271 return 0; ··· 273 273 274 274 static int udf_write_end(struct file *file, struct address_space *mapping, 275 275 loff_t pos, unsigned len, unsigned copied, 276 - struct page *page, void *fsdata) 276 + struct folio *folio, void *fsdata) 277 277 { 278 278 struct inode *inode = file_inode(file); 279 - struct folio *folio; 280 279 loff_t last_pos; 281 280 282 281 if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) 283 - return generic_write_end(file, mapping, pos, len, copied, page, 282 + return generic_write_end(file, mapping, pos, len, copied, folio, 284 283 fsdata); 285 - folio = page_folio(page); 286 284 last_pos = pos + copied; 287 285 if (last_pos > inode->i_size) 288 286 i_size_write(inode, last_pos);
+106 -125
fs/ufs/dir.c
··· 42 42 return !memcmp(name, de->d_name, len); 43 43 } 44 44 45 - static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) 45 + static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len) 46 46 { 47 - struct address_space *mapping = page->mapping; 47 + struct address_space *mapping = folio->mapping; 48 48 struct inode *dir = mapping->host; 49 49 50 50 inode_inc_iversion(dir); 51 - block_write_end(NULL, mapping, pos, len, len, page, NULL); 51 + block_write_end(NULL, mapping, pos, len, len, folio, NULL); 52 52 if (pos+len > dir->i_size) { 53 53 i_size_write(dir, pos+len); 54 54 mark_inode_dirty(dir); 55 55 } 56 - unlock_page(page); 56 + folio_unlock(folio); 57 57 } 58 58 59 59 static int ufs_handle_dirsync(struct inode *dir) ··· 66 66 return err; 67 67 } 68 68 69 - static inline void ufs_put_page(struct page *page) 70 - { 71 - kunmap(page); 72 - put_page(page); 73 - } 74 - 75 69 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 76 70 { 77 71 ino_t res = 0; 78 72 struct ufs_dir_entry *de; 79 - struct page *page; 73 + struct folio *folio; 80 74 81 - de = ufs_find_entry(dir, qstr, &page); 75 + de = ufs_find_entry(dir, qstr, &folio); 82 76 if (de) { 83 77 res = fs32_to_cpu(dir->i_sb, de->d_ino); 84 - ufs_put_page(page); 78 + folio_release_kmap(folio, de); 85 79 } 86 80 return res; 87 81 } ··· 83 89 84 90 /* Releases the page */ 85 91 void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 86 - struct page *page, struct inode *inode, 92 + struct folio *folio, struct inode *inode, 87 93 bool update_times) 88 94 { 89 - loff_t pos = page_offset(page) + 90 - (char *) de - (char *) page_address(page); 95 + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 91 96 unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); 92 97 int err; 93 98 94 - lock_page(page); 95 - err = ufs_prepare_chunk(page, pos, len); 99 + folio_lock(folio); 100 + err = ufs_prepare_chunk(folio, pos, len); 96 101 BUG_ON(err); 97 102 98 103 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 99 104 ufs_set_de_type(dir->i_sb, de, inode->i_mode); 100 105 101 - ufs_commit_chunk(page, pos, len); 102 - ufs_put_page(page); 106 + ufs_commit_chunk(folio, pos, len); 107 + folio_release_kmap(folio, de); 103 108 if (update_times) 104 109 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 105 110 mark_inode_dirty(dir); 106 111 ufs_handle_dirsync(dir); 107 112 } 108 113 109 - 110 - static bool ufs_check_page(struct page *page) 114 + static bool ufs_check_folio(struct folio *folio, char *kaddr) 111 115 { 112 - struct inode *dir = page->mapping->host; 116 + struct inode *dir = folio->mapping->host; 113 117 struct super_block *sb = dir->i_sb; 114 - char *kaddr = page_address(page); 115 118 unsigned offs, rec_len; 116 - unsigned limit = PAGE_SIZE; 119 + unsigned limit = folio_size(folio); 117 120 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 118 121 struct ufs_dir_entry *p; 119 122 char *error; 120 123 121 - if ((dir->i_size >> PAGE_SHIFT) == page->index) { 122 - limit = dir->i_size & ~PAGE_MASK; 124 + if (dir->i_size < folio_pos(folio) + limit) { 125 + limit = offset_in_folio(folio, dir->i_size); 123 126 if (limit & chunk_mask) 124 127 goto Ebadsize; 125 128 if (!limit) ··· 141 150 if (offs != limit) 142 151 goto Eend; 143 152 out: 144 - SetPageChecked(page); 153 + folio_set_checked(folio); 145 154 return true; 146 155 147 156 /* Too bad, we had an error */ 148 157 149 158 Ebadsize: 150 - ufs_error(sb, "ufs_check_page", 159 + ufs_error(sb, __func__, 151 160 "size of directory #%lu is not a multiple of chunk size", 152 161 dir->i_ino 153 162 ); ··· 167 176 Einumber: 168 177 error = "inode out of bounds"; 169 178 bad_entry: 170 - ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " 171 - "offset=%lu, rec_len=%d, name_len=%d", 172 - dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, 179 + ufs_error(sb, __func__, "bad entry in directory #%lu: %s - " 180 + "offset=%llu, rec_len=%d, name_len=%d", 181 + dir->i_ino, error, folio_pos(folio) + offs, 173 182 rec_len, ufs_get_de_namlen(sb, p)); 174 183 goto fail; 175 184 Eend: 176 185 p = (struct ufs_dir_entry *)(kaddr + offs); 177 186 ufs_error(sb, __func__, 178 187 "entry in directory #%lu spans the page boundary" 179 - "offset=%lu", 180 - dir->i_ino, (page->index<<PAGE_SHIFT)+offs); 188 + "offset=%llu", 189 + dir->i_ino, folio_pos(folio) + offs); 181 190 fail: 182 191 return false; 183 192 } 184 193 185 - static struct page *ufs_get_page(struct inode *dir, unsigned long n) 194 + static void *ufs_get_folio(struct inode *dir, unsigned long n, 195 + struct folio **foliop) 186 196 { 187 197 struct address_space *mapping = dir->i_mapping; 188 - struct page *page = read_mapping_page(mapping, n, NULL); 189 - if (!IS_ERR(page)) { 190 - kmap(page); 191 - if (unlikely(!PageChecked(page))) { 192 - if (!ufs_check_page(page)) 193 - goto fail; 194 - } 198 + struct folio *folio = read_mapping_folio(mapping, n, NULL); 199 + void *kaddr; 200 + 201 + if (IS_ERR(folio)) 202 + return ERR_CAST(folio); 203 + kaddr = kmap_local_folio(folio, 0); 204 + if (unlikely(!folio_test_checked(folio))) { 205 + if (!ufs_check_folio(folio, kaddr)) 206 + goto fail; 195 207 } 196 - return page; 208 + *foliop = folio; 209 + return kaddr; 197 210 198 211 fail: 199 - ufs_put_page(page); 212 + folio_release_kmap(folio, kaddr); 200 213 return ERR_PTR(-EIO); 201 214 } 202 215 ··· 226 231 fs16_to_cpu(sb, p->d_reclen)); 227 232 } 228 233 229 - struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p) 234 + struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop) 230 235 { 231 - struct page *page = ufs_get_page(dir, 0); 232 - struct ufs_dir_entry *de = NULL; 236 + struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop); 233 237 234 - if (!IS_ERR(page)) { 235 - de = ufs_next_entry(dir->i_sb, 236 - (struct ufs_dir_entry *)page_address(page)); 237 - *p = page; 238 - } 239 - return de; 238 + if (!IS_ERR(de)) 239 + return ufs_next_entry(dir->i_sb, de); 240 + 241 + return NULL; 240 242 } 241 243 242 244 /* ··· 245 253 * Entry is guaranteed to be valid. 246 254 */ 247 255 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, 248 - struct page **res_page) 256 + struct folio **foliop) 249 257 { 250 258 struct super_block *sb = dir->i_sb; 251 259 const unsigned char *name = qstr->name; ··· 253 261 unsigned reclen = UFS_DIR_REC_LEN(namelen); 254 262 unsigned long start, n; 255 263 unsigned long npages = dir_pages(dir); 256 - struct page *page = NULL; 257 264 struct ufs_inode_info *ui = UFS_I(dir); 258 265 struct ufs_dir_entry *de; 259 266 ··· 261 270 if (npages == 0 || namelen > UFS_MAXNAMLEN) 262 271 goto out; 263 272 264 - /* OFFSET_CACHE */ 265 - *res_page = NULL; 266 - 267 273 start = ui->i_dir_start_lookup; 268 274 269 275 if (start >= npages) 270 276 start = 0; 271 277 n = start; 272 278 do { 273 - char *kaddr; 274 - page = ufs_get_page(dir, n); 275 - if (!IS_ERR(page)) { 276 - kaddr = page_address(page); 277 - de = (struct ufs_dir_entry *) kaddr; 279 + char *kaddr = ufs_get_folio(dir, n, foliop); 280 + 281 + if (!IS_ERR(kaddr)) { 282 + de = (struct ufs_dir_entry *)kaddr; 278 283 kaddr += ufs_last_byte(dir, n) - reclen; 279 284 while ((char *) de <= kaddr) { 280 285 if (ufs_match(sb, namelen, name, de)) 281 286 goto found; 282 287 de = ufs_next_entry(sb, de); 283 288 } 284 - ufs_put_page(page); 289 + folio_release_kmap(*foliop, kaddr); 285 290 } 286 291 if (++n >= npages) 287 292 n = 0; ··· 286 299 return NULL; 287 300 288 301 found: 289 - *res_page = page; 290 302 ui->i_dir_start_lookup = n; 291 303 return de; 292 304 } ··· 302 316 unsigned reclen = UFS_DIR_REC_LEN(namelen); 303 317 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 304 318 unsigned short rec_len, name_len; 305 - struct page *page = NULL; 319 + struct folio *folio = NULL; 306 320 struct ufs_dir_entry *de; 307 321 unsigned long npages = dir_pages(dir); 308 322 unsigned long n; 309 - char *kaddr; 310 323 loff_t pos; 311 324 int err; 312 325 ··· 313 328 314 329 /* 315 330 * We take care of directory expansion in the same loop. 316 - * This code plays outside i_size, so it locks the page 331 + * This code plays outside i_size, so it locks the folio 317 332 * to protect that region. 318 333 */ 319 334 for (n = 0; n <= npages; n++) { 335 + char *kaddr = ufs_get_folio(dir, n, &folio); 320 336 char *dir_end; 321 337 322 - page = ufs_get_page(dir, n); 323 - err = PTR_ERR(page); 324 - if (IS_ERR(page)) 325 - goto out; 326 - lock_page(page); 327 - kaddr = page_address(page); 338 + if (IS_ERR(kaddr)) 339 + return PTR_ERR(kaddr); 340 + folio_lock(folio); 328 341 dir_end = kaddr + ufs_last_byte(dir, n); 329 342 de = (struct ufs_dir_entry *)kaddr; 330 - kaddr += PAGE_SIZE - reclen; 343 + kaddr += folio_size(folio) - reclen; 331 344 while ((char *)de <= kaddr) { 332 345 if ((char *)de == dir_end) { 333 346 /* We hit i_size */ ··· 352 369 goto got_it; 353 370 de = (struct ufs_dir_entry *) ((char *) de + rec_len); 354 371 } 355 - unlock_page(page); 356 - ufs_put_page(page); 372 + folio_unlock(folio); 373 + folio_release_kmap(folio, kaddr); 357 374 } 358 375 BUG(); 359 376 return -EINVAL; 360 377 361 378 got_it: 362 - pos = page_offset(page) + 363 - (char*)de - (char*)page_address(page); 364 - err = ufs_prepare_chunk(page, pos, rec_len); 379 + pos = folio_pos(folio) + offset_in_folio(folio, de); 380 + err = ufs_prepare_chunk(folio, pos, rec_len); 365 381 if (err) 366 382 goto out_unlock; 367 383 if (de->d_ino) { ··· 377 395 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 378 396 ufs_set_de_type(sb, de, inode->i_mode); 379 397 380 - ufs_commit_chunk(page, pos, rec_len); 398 + ufs_commit_chunk(folio, pos, rec_len); 381 399 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 382 400 383 401 mark_inode_dirty(dir); 384 402 err = ufs_handle_dirsync(dir); 385 403 /* OFFSET_CACHE */ 386 404 out_put: 387 - ufs_put_page(page); 388 - out: 405 + folio_release_kmap(folio, de); 389 406 return err; 390 407 out_unlock: 391 - unlock_page(page); 408 + folio_unlock(folio); 392 409 goto out_put; 393 410 } 394 411 ··· 425 444 return 0; 426 445 427 446 for ( ; n < npages; n++, offset = 0) { 428 - char *kaddr, *limit; 429 447 struct ufs_dir_entry *de; 448 + struct folio *folio; 449 + char *kaddr = ufs_get_folio(inode, n, &folio); 450 + char *limit; 430 451 431 - struct page *page = ufs_get_page(inode, n); 432 - 433 - if (IS_ERR(page)) { 452 + if (IS_ERR(kaddr)) { 434 453 ufs_error(sb, __func__, 435 454 "bad page in #%lu", 436 455 inode->i_ino); 437 456 ctx->pos += PAGE_SIZE - offset; 438 - return -EIO; 457 + return PTR_ERR(kaddr); 439 458 } 440 - kaddr = page_address(page); 441 459 if (unlikely(need_revalidate)) { 442 460 if (offset) { 443 461 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); ··· 462 482 ufs_get_de_namlen(sb, de), 463 483 fs32_to_cpu(sb, de->d_ino), 464 484 d_type)) { 465 - ufs_put_page(page); 485 + folio_release_kmap(folio, de); 466 486 return 0; 467 487 } 468 488 } 469 489 ctx->pos += fs16_to_cpu(sb, de->d_reclen); 470 490 } 471 - ufs_put_page(page); 491 + folio_release_kmap(folio, kaddr); 472 492 } 473 493 return 0; 474 494 } ··· 479 499 * previous entry. 480 500 */ 481 501 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, 482 - struct page * page) 502 + struct folio *folio) 483 503 { 484 504 struct super_block *sb = inode->i_sb; 485 - char *kaddr = page_address(page); 486 - unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 487 - unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen); 505 + size_t from, to; 506 + char *kaddr; 488 507 loff_t pos; 489 - struct ufs_dir_entry *pde = NULL; 490 - struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from); 508 + struct ufs_dir_entry *de, *pde = NULL; 491 509 int err; 492 510 493 511 UFSD("ENTER\n"); 512 + 513 + from = offset_in_folio(folio, dir); 514 + to = from + fs16_to_cpu(sb, dir->d_reclen); 515 + kaddr = (char *)dir - from; 516 + from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 517 + de = (struct ufs_dir_entry *) (kaddr + from); 494 518 495 519 UFSD("ino %u, reclen %u, namlen %u, name %s\n", 496 520 fs32_to_cpu(sb, de->d_ino), ··· 512 528 de = ufs_next_entry(sb, de); 513 529 } 514 530 if (pde) 515 - from = (char*)pde - (char*)page_address(page); 516 - 517 - pos = page_offset(page) + from; 518 - lock_page(page); 519 - err = ufs_prepare_chunk(page, pos, to - from); 531 + from = offset_in_folio(folio, pde); 532 + pos = folio_pos(folio) + from; 533 + folio_lock(folio); 534 + err = ufs_prepare_chunk(folio, pos, to - from); 520 535 BUG_ON(err); 521 536 if (pde) 522 537 pde->d_reclen = cpu_to_fs16(sb, to - from); 523 538 dir->d_ino = 0; 524 - ufs_commit_chunk(page, pos, to - from); 539 + ufs_commit_chunk(folio, pos, to - from); 525 540 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 526 541 mark_inode_dirty(inode); 527 542 err = ufs_handle_dirsync(inode); 528 543 out: 529 - ufs_put_page(page); 544 + folio_release_kmap(folio, kaddr); 530 545 UFSD("EXIT\n"); 531 546 return err; 532 547 } ··· 534 551 { 535 552 struct super_block * sb = dir->i_sb; 536 553 struct address_space *mapping = inode->i_mapping; 537 - struct page *page = grab_cache_page(mapping, 0); 554 + struct folio *folio = filemap_grab_folio(mapping, 0); 538 555 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 539 556 struct ufs_dir_entry * de; 540 - char *base; 541 557 int err; 558 + char *kaddr; 542 559 543 - if (!page) 544 - return -ENOMEM; 560 + if (IS_ERR(folio)) 561 + return PTR_ERR(folio); 545 562 546 - err = ufs_prepare_chunk(page, 0, chunk_size); 563 + err = ufs_prepare_chunk(folio, 0, chunk_size); 547 564 if (err) { 548 - unlock_page(page); 565 + folio_unlock(folio); 549 566 goto fail; 550 567 } 551 568 552 - kmap(page); 553 - base = (char*)page_address(page); 554 - memset(base, 0, PAGE_SIZE); 569 + kaddr = kmap_local_folio(folio, 0); 570 + memset(kaddr, 0, folio_size(folio)); 555 571 556 - de = (struct ufs_dir_entry *) base; 572 + de = (struct ufs_dir_entry *)kaddr; 557 573 558 574 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 559 575 ufs_set_de_type(sb, de, inode->i_mode); ··· 566 584 de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); 567 585 ufs_set_de_namlen(sb, de, 2); 568 586 strcpy (de->d_name, ".."); 569 - kunmap(page); 587 + kunmap_local(kaddr); 570 588 571 - ufs_commit_chunk(page, 0, chunk_size); 589 + ufs_commit_chunk(folio, 0, chunk_size); 572 590 err = ufs_handle_dirsync(inode); 573 591 fail: 574 - put_page(page); 592 + folio_put(folio); 575 593 return err; 576 594 } 577 595 ··· 581 599 int ufs_empty_dir(struct inode * inode) 582 600 { 583 601 struct super_block *sb = inode->i_sb; 584 - struct page *page = NULL; 602 + struct folio *folio; 603 + char *kaddr; 585 604 unsigned long i, npages = dir_pages(inode); 586 605 587 606 for (i = 0; i < npages; i++) { 588 - char *kaddr; 589 607 struct ufs_dir_entry *de; 590 - page = ufs_get_page(inode, i); 591 608 592 - if (IS_ERR(page)) 609 + kaddr = ufs_get_folio(inode, i, &folio); 610 + if (IS_ERR(kaddr)) 593 611 continue; 594 612 595 - kaddr = page_address(page); 596 613 de = (struct ufs_dir_entry *)kaddr; 597 614 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); 598 615 ··· 618 637 } 619 638 de = ufs_next_entry(sb, de); 620 639 } 621 - ufs_put_page(page); 640 + folio_release_kmap(folio, kaddr); 622 641 } 623 642 return 1; 624 643 625 644 not_empty: 626 - ufs_put_page(page); 645 + folio_release_kmap(folio, kaddr); 627 646 return 0; 628 647 } 629 648
+6 -6
fs/ufs/inode.c
··· 479 479 return block_read_full_folio(folio, ufs_getfrag_block); 480 480 } 481 481 482 - int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) 482 + int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) 483 483 { 484 - return __block_write_begin(page, pos, len, ufs_getfrag_block); 484 + return __block_write_begin(folio, pos, len, ufs_getfrag_block); 485 485 } 486 486 487 487 static void ufs_truncate_blocks(struct inode *); ··· 498 498 499 499 static int ufs_write_begin(struct file *file, struct address_space *mapping, 500 500 loff_t pos, unsigned len, 501 - struct page **pagep, void **fsdata) 501 + struct folio **foliop, void **fsdata) 502 502 { 503 503 int ret; 504 504 505 - ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block); 505 + ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block); 506 506 if (unlikely(ret)) 507 507 ufs_write_failed(mapping, pos + len); 508 508 ··· 511 511 512 512 static int ufs_write_end(struct file *file, struct address_space *mapping, 513 513 loff_t pos, unsigned len, unsigned copied, 514 - struct page *page, void *fsdata) 514 + struct folio *folio, void *fsdata) 515 515 { 516 516 int ret; 517 517 518 - ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 518 + ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 519 519 if (ret < len) 520 520 ufs_write_failed(mapping, pos + len); 521 521 return ret;
+17 -22
fs/ufs/namei.c
··· 209 209 { 210 210 struct inode * inode = d_inode(dentry); 211 211 struct ufs_dir_entry *de; 212 - struct page *page; 212 + struct folio *folio; 213 213 int err = -ENOENT; 214 214 215 - de = ufs_find_entry(dir, &dentry->d_name, &page); 215 + de = ufs_find_entry(dir, &dentry->d_name, &folio); 216 216 if (!de) 217 217 goto out; 218 218 219 - err = ufs_delete_entry(dir, de, page); 219 + err = ufs_delete_entry(dir, de, folio); 220 220 if (err) 221 221 goto out; 222 222 ··· 249 249 { 250 250 struct inode *old_inode = d_inode(old_dentry); 251 251 struct inode *new_inode = d_inode(new_dentry); 252 - struct page *dir_page = NULL; 252 + struct folio *dir_folio = NULL; 253 253 struct ufs_dir_entry * dir_de = NULL; 254 - struct page *old_page; 254 + struct folio *old_folio; 255 255 struct ufs_dir_entry *old_de; 256 256 int err = -ENOENT; 257 257 258 258 if (flags & ~RENAME_NOREPLACE) 259 259 return -EINVAL; 260 260 261 - old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); 261 + old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_folio); 262 262 if (!old_de) 263 263 goto out; 264 264 265 265 if (S_ISDIR(old_inode->i_mode)) { 266 266 err = -EIO; 267 - dir_de = ufs_dotdot(old_inode, &dir_page); 267 + dir_de = ufs_dotdot(old_inode, &dir_folio); 268 268 if (!dir_de) 269 269 goto out_old; 270 270 } 271 271 272 272 if (new_inode) { 273 - struct page *new_page; 273 + struct folio *new_folio; 274 274 struct ufs_dir_entry *new_de; 275 275 276 276 err = -ENOTEMPTY; ··· 278 278 goto out_dir; 279 279 280 280 err = -ENOENT; 281 - new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 281 + new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_folio); 282 282 if (!new_de) 283 283 goto out_dir; 284 - ufs_set_link(new_dir, new_de, new_page, old_inode, 1); 284 + ufs_set_link(new_dir, new_de, new_folio, old_inode, 1); 285 285 inode_set_ctime_current(new_inode); 286 286 if (dir_de) 287 287 drop_nlink(new_inode); ··· 300 300 */ 301 301 inode_set_ctime_current(old_inode); 302 302 303 - ufs_delete_entry(old_dir, old_de, old_page); 303 + ufs_delete_entry(old_dir, old_de, old_folio); 304 304 mark_inode_dirty(old_inode); 305 305 306 306 if (dir_de) { 307 307 if (old_dir != new_dir) 308 - ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); 309 - else { 310 - kunmap(dir_page); 311 - put_page(dir_page); 312 - } 308 + ufs_set_link(old_inode, dir_de, dir_folio, new_dir, 0); 309 + else 310 + folio_release_kmap(dir_folio, new_dir); 313 311 inode_dec_link_count(old_dir); 314 312 } 315 313 return 0; 316 314 317 315 318 316 out_dir: 319 - if (dir_de) { 320 - kunmap(dir_page); 321 - put_page(dir_page); 322 - } 317 + if (dir_de) 318 + folio_release_kmap(dir_folio, dir_de); 323 319 out_old: 324 - kunmap(old_page); 325 - put_page(old_page); 320 + folio_release_kmap(old_folio, old_de); 326 321 out: 327 322 return err; 328 323 }
+11 -9
fs/ufs/ufs.h
··· 99 99 100 100 /* dir.c */ 101 101 extern const struct inode_operations ufs_dir_inode_operations; 102 - extern int ufs_add_link (struct dentry *, struct inode *); 103 - extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *); 104 - extern int ufs_make_empty(struct inode *, struct inode *); 105 - extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **); 106 - extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); 107 - extern int ufs_empty_dir (struct inode *); 108 - extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); 109 - extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 110 - struct page *page, struct inode *inode, bool update_times); 102 + 103 + int ufs_add_link(struct dentry *, struct inode *); 104 + ino_t ufs_inode_by_name(struct inode *, const struct qstr *); 105 + int ufs_make_empty(struct inode *, struct inode *); 106 + struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, 107 + struct folio **); 108 + int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct folio *); 109 + int ufs_empty_dir(struct inode *); 110 + struct ufs_dir_entry *ufs_dotdot(struct inode *, struct folio **); 111 + void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 112 + struct folio *folio, struct inode *inode, bool update_times); 111 113 112 114 /* file.c */ 113 115 extern const struct inode_operations ufs_file_inode_operations;
+3 -3
fs/ufs/util.h
··· 250 250 } 251 251 } 252 252 253 - extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *); 254 - extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t); 255 - extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len); 253 + dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *); 254 + void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t); 255 + int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len); 256 256 257 257 /* 258 258 * These functions manipulate ufs buffers
+12 -12
fs/vboxsf/file.c
··· 300 300 301 301 static int vboxsf_write_end(struct file *file, struct address_space *mapping, 302 302 loff_t pos, unsigned int len, unsigned int copied, 303 - struct page *page, void *fsdata) 303 + struct folio *folio, void *fsdata) 304 304 { 305 305 struct inode *inode = mapping->host; 306 306 struct vboxsf_handle *sf_handle = file->private_data; 307 - unsigned int from = pos & ~PAGE_MASK; 307 + size_t from = offset_in_folio(folio, pos); 308 308 u32 nwritten = len; 309 309 u8 *buf; 310 310 int err; 311 311 312 - /* zero the stale part of the page if we did a short copy */ 313 - if (!PageUptodate(page) && copied < len) 314 - zero_user(page, from + copied, len - copied); 312 + /* zero the stale part of the folio if we did a short copy */ 313 + if (!folio_test_uptodate(folio) && copied < len) 314 + folio_zero_range(folio, from + copied, len - copied); 315 315 316 - buf = kmap(page); 316 + buf = kmap(&folio->page); 317 317 err = vboxsf_write(sf_handle->root, sf_handle->handle, 318 318 pos, &nwritten, buf + from); 319 - kunmap(page); 319 + kunmap(&folio->page); 320 320 321 321 if (err) { 322 322 nwritten = 0; ··· 326 326 /* mtime changed */ 327 327 VBOXSF_I(inode)->force_restat = 1; 328 328 329 - if (!PageUptodate(page) && nwritten == PAGE_SIZE) 330 - SetPageUptodate(page); 329 + if (!folio_test_uptodate(folio) && nwritten == folio_size(folio)) 330 + folio_mark_uptodate(folio); 331 331 332 332 pos += nwritten; 333 333 if (pos > inode->i_size) 334 334 i_size_write(inode, pos); 335 335 336 336 out: 337 - unlock_page(page); 338 - put_page(page); 337 + folio_unlock(folio); 338 + folio_put(folio); 339 339 340 340 return nwritten; 341 341 } ··· 343 343 /* 344 344 * Note simple_write_begin does not read the page from disk on partial writes 345 345 * this is ok since vboxsf_write_end only writes the written parts of the 346 - * page and it does not call SetPageUptodate for partial writes. 346 + * page and it does not call folio_mark_uptodate for partial writes. 347 347 */ 348 348 const struct address_space_operations vboxsf_reg_aops = { 349 349 .read_folio = vboxsf_read_folio,
+7 -7
include/linux/buffer_head.h
··· 257 257 int block_read_full_folio(struct folio *, get_block_t *); 258 258 bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); 259 259 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 260 - struct page **pagep, get_block_t *get_block); 261 - int __block_write_begin(struct page *page, loff_t pos, unsigned len, 260 + struct folio **foliop, get_block_t *get_block); 261 + int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 262 262 get_block_t *get_block); 263 263 int block_write_end(struct file *, struct address_space *, 264 - loff_t, unsigned, unsigned, 265 - struct page *, void *); 264 + loff_t, unsigned len, unsigned copied, 265 + struct folio *, void *); 266 266 int generic_write_end(struct file *, struct address_space *, 267 - loff_t, unsigned, unsigned, 268 - struct page *, void *); 267 + loff_t, unsigned len, unsigned copied, 268 + struct folio *, void *); 269 269 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); 270 270 int cont_write_begin(struct file *, struct address_space *, loff_t, 271 - unsigned, struct page **, void **, 271 + unsigned, struct folio **, void **, 272 272 get_block_t *, loff_t *); 273 273 int generic_cont_expand_simple(struct inode *inode, loff_t size); 274 274 void block_commit_write(struct page *page, unsigned int from, unsigned int to);
+3 -3
include/linux/fs.h
··· 408 408 409 409 int (*write_begin)(struct file *, struct address_space *mapping, 410 410 loff_t pos, unsigned len, 411 - struct page **pagep, void **fsdata); 411 + struct folio **foliop, void **fsdata); 412 412 int (*write_end)(struct file *, struct address_space *mapping, 413 413 loff_t pos, unsigned len, unsigned copied, 414 - struct page *page, void *fsdata); 414 + struct folio *folio, void *fsdata); 415 415 416 416 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ 417 417 sector_t (*bmap)(struct address_space *, sector_t); ··· 3363 3363 extern int simple_empty(struct dentry *); 3364 3364 extern int simple_write_begin(struct file *file, struct address_space *mapping, 3365 3365 loff_t pos, unsigned len, 3366 - struct page **pagep, void **fsdata); 3366 + struct folio **foliop, void **fsdata); 3367 3367 extern const struct address_space_operations ram_aops; 3368 3368 extern int always_delete_dentry(const struct dentry *); 3369 3369 extern struct inode *alloc_anon_inode(struct super_block *);
+2 -4
mm/filemap.c
··· 3987 3987 ssize_t written = 0; 3988 3988 3989 3989 do { 3990 - struct page *page; 3991 3990 struct folio *folio; 3992 3991 size_t offset; /* Offset into folio */ 3993 3992 size_t bytes; /* Bytes to write to folio */ ··· 4016 4017 } 4017 4018 4018 4019 status = a_ops->write_begin(file, mapping, pos, bytes, 4019 - &page, &fsdata); 4020 + &folio, &fsdata); 4020 4021 if (unlikely(status < 0)) 4021 4022 break; 4022 4023 4023 - folio = page_folio(page); 4024 4024 offset = offset_in_folio(folio, pos); 4025 4025 if (bytes > folio_size(folio) - offset) 4026 4026 bytes = folio_size(folio) - offset; ··· 4031 4033 flush_dcache_folio(folio); 4032 4034 4033 4035 status = a_ops->write_end(file, mapping, pos, bytes, copied, 4034 - page, fsdata); 4036 + folio, fsdata); 4035 4037 if (unlikely(status != copied)) { 4036 4038 iov_iter_revert(i, copied - max(status, 0L)); 4037 4039 if (unlikely(status < 0))
+5 -6
mm/shmem.c
··· 2878 2878 static int 2879 2879 shmem_write_begin(struct file *file, struct address_space *mapping, 2880 2880 loff_t pos, unsigned len, 2881 - struct page **pagep, void **fsdata) 2881 + struct folio **foliop, void **fsdata) 2882 2882 { 2883 2883 struct inode *inode = mapping->host; 2884 2884 struct shmem_inode_info *info = SHMEM_I(inode); ··· 2899 2899 if (ret) 2900 2900 return ret; 2901 2901 2902 - *pagep = folio_file_page(folio, index); 2903 - if (PageHWPoison(*pagep)) { 2902 + if (folio_test_hwpoison(folio) || 2903 + (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { 2904 2904 folio_unlock(folio); 2905 2905 folio_put(folio); 2906 - *pagep = NULL; 2907 2906 return -EIO; 2908 2907 } 2909 2908 2909 + *foliop = folio; 2910 2910 return 0; 2911 2911 } 2912 2912 2913 2913 static int 2914 2914 shmem_write_end(struct file *file, struct address_space *mapping, 2915 2915 loff_t pos, unsigned len, unsigned copied, 2916 - struct page *page, void *fsdata) 2916 + struct folio *folio, void *fsdata) 2917 2917 { 2918 - struct folio *folio = page_folio(page); 2919 2918 struct inode *inode = mapping->host; 2920 2919 2921 2920 if (pos + copied > inode->i_size)