Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pull-write-one-page' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs write_one_page removal from Al Viro:
"write_one_page series"

* tag 'pull-write-one-page' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
mm,jfs: move write_one_page/folio_write_one to jfs
ocfs2: don't use write_one_page in ocfs2_duplicate_clusters_by_page
ufs: don't flush page immediately for DIRSYNC directories

+58 -65
+34 -5
fs/jfs/jfs_metapage.c
··· 691 691 unlock_page(mp->page); 692 692 } 693 693 694 + static int metapage_write_one(struct page *page) 695 + { 696 + struct folio *folio = page_folio(page); 697 + struct address_space *mapping = folio->mapping; 698 + struct writeback_control wbc = { 699 + .sync_mode = WB_SYNC_ALL, 700 + .nr_to_write = folio_nr_pages(folio), 701 + }; 702 + int ret = 0; 703 + 704 + BUG_ON(!folio_test_locked(folio)); 705 + 706 + folio_wait_writeback(folio); 707 + 708 + if (folio_clear_dirty_for_io(folio)) { 709 + folio_get(folio); 710 + ret = metapage_writepage(page, &wbc); 711 + if (ret == 0) 712 + folio_wait_writeback(folio); 713 + folio_put(folio); 714 + } else { 715 + folio_unlock(folio); 716 + } 717 + 718 + if (!ret) 719 + ret = filemap_check_errors(mapping); 720 + return ret; 721 + } 722 + 694 723 void force_metapage(struct metapage *mp) 695 724 { 696 725 struct page *page = mp->page; ··· 729 700 get_page(page); 730 701 lock_page(page); 731 702 set_page_dirty(page); 732 - if (write_one_page(page)) 733 - jfs_error(mp->sb, "write_one_page() failed\n"); 703 + if (metapage_write_one(page)) 704 + jfs_error(mp->sb, "metapage_write_one() failed\n"); 734 705 clear_bit(META_forcewrite, &mp->flag); 735 706 put_page(page); 736 707 } ··· 775 746 set_page_dirty(page); 776 747 if (test_bit(META_sync, &mp->flag)) { 777 748 clear_bit(META_sync, &mp->flag); 778 - if (write_one_page(page)) 779 - jfs_error(mp->sb, "write_one_page() failed\n"); 780 - lock_page(page); /* write_one_page unlocks the page */ 749 + if (metapage_write_one(page)) 750 + jfs_error(mp->sb, "metapage_write_one() failed\n"); 751 + lock_page(page); 781 752 } 782 753 } else if (mp->lsn) /* discard_metapage doesn't remove it */ 783 754 remove_from_logsync(mp);
+5 -4
fs/ocfs2/refcounttree.c
··· 2952 2952 */ 2953 2953 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { 2954 2954 if (PageDirty(page)) { 2955 - /* 2956 - * write_on_page will unlock the page on return 2957 - */ 2958 - ret = write_one_page(page); 2955 + unlock_page(page); 2956 + put_page(page); 2957 + 2958 + ret = filemap_write_and_wait_range(mapping, 2959 + offset, map_end - 1); 2959 2960 goto retry; 2960 2961 } 2961 2962 }
+19 -10
fs/ufs/dir.c
··· 42 42 return !memcmp(name, de->d_name, len); 43 43 } 44 44 45 - static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) 45 + static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) 46 46 { 47 47 struct address_space *mapping = page->mapping; 48 48 struct inode *dir = mapping->host; 49 - int err = 0; 50 49 51 50 inode_inc_iversion(dir); 52 51 block_write_end(NULL, mapping, pos, len, len, page, NULL); ··· 53 54 i_size_write(dir, pos+len); 54 55 mark_inode_dirty(dir); 55 56 } 56 - if (IS_DIRSYNC(dir)) 57 - err = write_one_page(page); 58 - else 59 - unlock_page(page); 57 + unlock_page(page); 58 + } 59 + 60 + static int ufs_handle_dirsync(struct inode *dir) 61 + { 62 + int err; 63 + 64 + err = filemap_write_and_wait(dir->i_mapping); 65 + if (!err) 66 + err = sync_inode_metadata(dir, 1); 60 67 return err; 61 68 } 62 69 ··· 104 99 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 105 100 ufs_set_de_type(dir->i_sb, de, inode->i_mode); 106 101 107 - err = ufs_commit_chunk(page, pos, len); 102 + ufs_commit_chunk(page, pos, len); 108 103 ufs_put_page(page); 109 104 if (update_times) 110 105 dir->i_mtime = dir->i_ctime = current_time(dir); 111 106 mark_inode_dirty(dir); 107 + ufs_handle_dirsync(dir); 112 108 } 113 109 114 110 ··· 396 390 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 397 391 ufs_set_de_type(sb, de, inode->i_mode); 398 392 399 - err = ufs_commit_chunk(page, pos, rec_len); 393 + ufs_commit_chunk(page, pos, rec_len); 400 394 dir->i_mtime = dir->i_ctime = current_time(dir); 401 395 402 396 mark_inode_dirty(dir); 397 + err = ufs_handle_dirsync(dir); 403 398 /* OFFSET_CACHE */ 404 399 out_put: 405 400 ufs_put_page(page); ··· 538 531 if (pde) 539 532 pde->d_reclen = cpu_to_fs16(sb, to - from); 540 533 dir->d_ino = 0; 541 - err = ufs_commit_chunk(page, pos, to - from); 534 + ufs_commit_chunk(page, pos, to - from); 542 535 inode->i_ctime = inode->i_mtime = current_time(inode); 543 536 mark_inode_dirty(inode); 537 + err = ufs_handle_dirsync(inode); 544 538 out: 545 539 ufs_put_page(page); 546 540 UFSD("EXIT\n"); ··· 587 579 strcpy (de->d_name, ".."); 588 580 kunmap(page); 589 581 590 - err = ufs_commit_chunk(page, 0, chunk_size); 582 + ufs_commit_chunk(page, 0, chunk_size); 583 + err = ufs_handle_dirsync(inode); 591 584 fail: 592 585 put_page(page); 593 586 return err;
-6
include/linux/pagemap.h
··· 1066 1066 bool folio_clear_dirty_for_io(struct folio *folio); 1067 1067 bool clear_page_dirty_for_io(struct page *page); 1068 1068 void folio_invalidate(struct folio *folio, size_t offset, size_t length); 1069 - int __must_check folio_write_one(struct folio *folio); 1070 - static inline int __must_check write_one_page(struct page *page) 1071 - { 1072 - return folio_write_one(page_folio(page)); 1073 - } 1074 - 1075 1069 int __set_page_dirty_nobuffers(struct page *page); 1076 1070 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); 1077 1071
-40
mm/page-writeback.c
··· 2583 2583 return ret; 2584 2584 } 2585 2585 2586 - /** 2587 - * folio_write_one - write out a single folio and wait on I/O. 2588 - * @folio: The folio to write. 2589 - * 2590 - * The folio must be locked by the caller and will be unlocked upon return. 2591 - * 2592 - * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this 2593 - * function returns. 2594 - * 2595 - * Return: %0 on success, negative error code otherwise 2596 - */ 2597 - int folio_write_one(struct folio *folio) 2598 - { 2599 - struct address_space *mapping = folio->mapping; 2600 - int ret = 0; 2601 - struct writeback_control wbc = { 2602 - .sync_mode = WB_SYNC_ALL, 2603 - .nr_to_write = folio_nr_pages(folio), 2604 - }; 2605 - 2606 - BUG_ON(!folio_test_locked(folio)); 2607 - 2608 - folio_wait_writeback(folio); 2609 - 2610 - if (folio_clear_dirty_for_io(folio)) { 2611 - folio_get(folio); 2612 - ret = mapping->a_ops->writepage(&folio->page, &wbc); 2613 - if (ret == 0) 2614 - folio_wait_writeback(folio); 2615 - folio_put(folio); 2616 - } else { 2617 - folio_unlock(folio); 2618 - } 2619 - 2620 - if (!ret) 2621 - ret = filemap_check_errors(mapping); 2622 - return ret; 2623 - } 2624 - EXPORT_SYMBOL(folio_write_one); 2625 - 2626 2586 /* 2627 2587 * For address_spaces which do not use buffers nor write back. 2628 2588 */