Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: Convert __set_page_dirty_buffers to block_dirty_folio

Convert all callers; mostly this is just changing the aops to point
at it, but a few implementations need a little more work.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs

+76 -85
+1 -1
block/fops.c
··· 429 429 } 430 430 431 431 const struct address_space_operations def_blk_aops = { 432 - .set_page_dirty = __set_page_dirty_buffers, 432 + .dirty_folio = block_dirty_folio, 433 433 .invalidate_folio = block_invalidate_folio, 434 434 .readpage = blkdev_readpage, 435 435 .readahead = blkdev_readahead,
+1 -1
fs/adfs/inode.c
··· 73 73 } 74 74 75 75 static const struct address_space_operations adfs_aops = { 76 - .set_page_dirty = __set_page_dirty_buffers, 76 + .dirty_folio = block_dirty_folio, 77 77 .invalidate_folio = block_invalidate_folio, 78 78 .readpage = adfs_readpage, 79 79 .writepage = adfs_writepage,
+2 -2
fs/affs/file.c
··· 453 453 } 454 454 455 455 const struct address_space_operations affs_aops = { 456 - .set_page_dirty = __set_page_dirty_buffers, 456 + .dirty_folio = block_dirty_folio, 457 457 .invalidate_folio = block_invalidate_folio, 458 458 .readpage = affs_readpage, 459 459 .writepage = affs_writepage, ··· 835 835 } 836 836 837 837 const struct address_space_operations affs_aops_ofs = { 838 - .set_page_dirty = __set_page_dirty_buffers, 838 + .dirty_folio = block_dirty_folio, 839 839 .invalidate_folio = block_invalidate_folio, 840 840 .readpage = affs_readpage_ofs, 841 841 //.writepage = affs_writepage_ofs,
+1 -1
fs/bfs/file.c
··· 188 188 } 189 189 190 190 const struct address_space_operations bfs_aops = { 191 - .set_page_dirty = __set_page_dirty_buffers, 191 + .dirty_folio = block_dirty_folio, 192 192 .invalidate_folio = block_invalidate_folio, 193 193 .readpage = bfs_readpage, 194 194 .writepage = bfs_writepage,
+15 -18
fs/buffer.c
··· 613 613 * FIXME: may need to call ->reservepage here as well. That's rather up to the 614 614 * address_space though. 615 615 */ 616 - int __set_page_dirty_buffers(struct page *page) 616 + bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 617 617 { 618 - int newly_dirty; 619 - struct address_space *mapping = page_mapping(page); 620 - 621 - if (unlikely(!mapping)) 622 - return !TestSetPageDirty(page); 618 + struct buffer_head *head; 619 + bool newly_dirty; 623 620 624 621 spin_lock(&mapping->private_lock); 625 - if (page_has_buffers(page)) { 626 - struct buffer_head *head = page_buffers(page); 622 + head = folio_buffers(folio); 623 + if (head) { 627 624 struct buffer_head *bh = head; 628 625 629 626 do { ··· 632 635 * Lock out page's memcg migration to keep PageDirty 633 636 * synchronized with per-memcg dirty page counters. 634 637 */ 635 - lock_page_memcg(page); 636 - newly_dirty = !TestSetPageDirty(page); 638 + folio_memcg_lock(folio); 639 + newly_dirty = !folio_test_set_dirty(folio); 637 640 spin_unlock(&mapping->private_lock); 638 641 639 642 if (newly_dirty) 640 - __set_page_dirty(page, mapping, 1); 643 + __folio_mark_dirty(folio, mapping, 1); 641 644 642 - unlock_page_memcg(page); 645 + folio_memcg_unlock(folio); 643 646 644 647 if (newly_dirty) 645 648 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 646 649 647 650 return newly_dirty; 648 651 } 649 - EXPORT_SYMBOL(__set_page_dirty_buffers); 652 + EXPORT_SYMBOL(block_dirty_folio); 650 653 651 654 /* 652 655 * Write out and wait upon a list of buffers. ··· 1545 1548 1546 1549 /* 1547 1550 * We attach and possibly dirty the buffers atomically wrt 1548 - * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1551 + * block_dirty_folio() via private_lock. try_to_free_buffers 1549 1552 * is already excluded via the page lock. 1550 1553 */ 1551 1554 void create_empty_buffers(struct page *page, ··· 1720 1723 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1721 1724 1722 1725 /* 1723 - * Be very careful. We have no exclusion from __set_page_dirty_buffers 1726 + * Be very careful. We have no exclusion from block_dirty_folio 1724 1727 * here, and the (potentially unmapped) buffers may become dirty at 1725 1728 * any time. If a buffer becomes dirty here after we've inspected it 1726 1729 * then we just miss that fact, and the page stays dirty. 1727 1730 * 1728 - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 1731 + * Buffers outside i_size may be dirtied by block_dirty_folio; 1729 1732 * handle that here by just cleaning them. 1730 1733 */ 1731 1734 ··· 3179 3182 * 3180 3183 * The same applies to regular filesystem pages: if all the buffers are 3181 3184 * clean then we set the page clean and proceed. To do that, we require 3182 - * total exclusion from __set_page_dirty_buffers(). That is obtained with 3185 + * total exclusion from block_dirty_folio(). That is obtained with 3183 3186 * private_lock. 3184 3187 * 3185 3188 * try_to_free_buffers() is non-blocking. ··· 3246 3249 * the page also. 3247 3250 * 3248 3251 * private_lock must be held over this entire operation in order 3249 - * to synchronise against __set_page_dirty_buffers and prevent the 3252 + * to synchronise against block_dirty_folio and prevent the 3250 3253 * dirty bit from being lost. 3251 3254 */ 3252 3255 if (ret)
+1 -1
fs/ecryptfs/mmap.c
··· 545 545 * feedback. 546 546 */ 547 547 #ifdef CONFIG_BLOCK 548 - .set_page_dirty = __set_page_dirty_buffers, 548 + .dirty_folio = block_dirty_folio, 549 549 .invalidate_folio = block_invalidate_folio, 550 550 #endif 551 551 .writepage = ecryptfs_writepage,
+1 -1
fs/exfat/inode.c
··· 490 490 } 491 491 492 492 static const struct address_space_operations exfat_aops = { 493 - .set_page_dirty = __set_page_dirty_buffers, 493 + .dirty_folio = block_dirty_folio, 494 494 .invalidate_folio = block_invalidate_folio, 495 495 .readpage = exfat_readpage, 496 496 .readahead = exfat_readahead,
+4 -4
fs/ext2/inode.c
··· 967 967 } 968 968 969 969 const struct address_space_operations ext2_aops = { 970 - .set_page_dirty = __set_page_dirty_buffers, 971 - .invalidate_folio = block_invalidate_folio, 970 + .dirty_folio = block_dirty_folio, 971 + .invalidate_folio = block_invalidate_folio, 972 972 .readpage = ext2_readpage, 973 973 .readahead = ext2_readahead, 974 974 .writepage = ext2_writepage, ··· 983 983 }; 984 984 985 985 const struct address_space_operations ext2_nobh_aops = { 986 - .set_page_dirty = __set_page_dirty_buffers, 987 - .invalidate_folio = block_invalidate_folio, 986 + .dirty_folio = block_dirty_folio, 987 + .invalidate_folio = block_invalidate_folio, 988 988 .readpage = ext2_readpage, 989 989 .readahead = ext2_readahead, 990 990 .writepage = ext2_nobh_writepage,
+6 -6
fs/ext4/inode.c
··· 3560 3560 return filemap_dirty_folio(mapping, folio); 3561 3561 } 3562 3562 3563 - static int ext4_set_page_dirty(struct page *page) 3563 + static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio) 3564 3564 { 3565 - WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); 3566 - WARN_ON_ONCE(!page_has_buffers(page)); 3567 - return __set_page_dirty_buffers(page); 3565 + WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio)); 3566 + WARN_ON_ONCE(!folio_buffers(folio)); 3567 + return block_dirty_folio(mapping, folio); 3568 3568 } 3569 3569 3570 3570 static int ext4_iomap_swap_activate(struct swap_info_struct *sis, ··· 3581 3581 .writepages = ext4_writepages, 3582 3582 .write_begin = ext4_write_begin, 3583 3583 .write_end = ext4_write_end, 3584 - .set_page_dirty = ext4_set_page_dirty, 3584 + .dirty_folio = ext4_dirty_folio, 3585 3585 .bmap = ext4_bmap, 3586 3586 .invalidate_folio = ext4_invalidate_folio, 3587 3587 .releasepage = ext4_releasepage, ··· 3616 3616 .writepages = ext4_writepages, 3617 3617 .write_begin = ext4_da_write_begin, 3618 3618 .write_end = ext4_da_write_end, 3619 - .set_page_dirty = ext4_set_page_dirty, 3619 + .dirty_folio = ext4_dirty_folio, 3620 3620 .bmap = ext4_bmap, 3621 3621 .invalidate_folio = ext4_invalidate_folio, 3622 3622 .releasepage = ext4_releasepage,
+1 -1
fs/fat/inode.c
··· 342 342 } 343 343 344 344 static const struct address_space_operations fat_aops = { 345 - .set_page_dirty = __set_page_dirty_buffers, 345 + .dirty_folio = block_dirty_folio, 346 346 .invalidate_folio = block_invalidate_folio, 347 347 .readpage = fat_readpage, 348 348 .readahead = fat_readahead,
+5 -11
fs/gfs2/aops.c
··· 606 606 gfs2_trans_end(sdp); 607 607 } 608 608 609 - /** 610 - * jdata_set_page_dirty - Page dirtying function 611 - * @page: The page to dirty 612 - * 613 - * Returns: 1 if it dirtyed the page, or 0 otherwise 614 - */ 615 - 616 - static int jdata_set_page_dirty(struct page *page) 609 + static bool jdata_dirty_folio(struct address_space *mapping, 610 + struct folio *folio) 617 611 { 618 612 if (current->journal_info) 619 - SetPageChecked(page); 620 - return __set_page_dirty_buffers(page); 613 + folio_set_checked(folio); 614 + return block_dirty_folio(mapping, folio); 621 615 } 622 616 623 617 /** ··· 789 795 .writepages = gfs2_jdata_writepages, 790 796 .readpage = gfs2_readpage, 791 797 .readahead = gfs2_readahead, 792 - .set_page_dirty = jdata_set_page_dirty, 798 + .dirty_folio = jdata_dirty_folio, 793 799 .bmap = gfs2_bmap, 794 800 .invalidate_folio = gfs2_invalidate_folio, 795 801 .releasepage = gfs2_releasepage,
+2 -2
fs/gfs2/meta_io.c
··· 89 89 } 90 90 91 91 const struct address_space_operations gfs2_meta_aops = { 92 - .set_page_dirty = __set_page_dirty_buffers, 92 + .dirty_folio = block_dirty_folio, 93 93 .invalidate_folio = block_invalidate_folio, 94 94 .writepage = gfs2_aspace_writepage, 95 95 .releasepage = gfs2_releasepage, 96 96 }; 97 97 98 98 const struct address_space_operations gfs2_rgrp_aops = { 99 - .set_page_dirty = __set_page_dirty_buffers, 99 + .dirty_folio = block_dirty_folio, 100 100 .invalidate_folio = block_invalidate_folio, 101 101 .writepage = gfs2_aspace_writepage, 102 102 .releasepage = gfs2_releasepage,
+2 -2
fs/hfs/inode.c
··· 159 159 } 160 160 161 161 const struct address_space_operations hfs_btree_aops = { 162 - .set_page_dirty = __set_page_dirty_buffers, 162 + .dirty_folio = block_dirty_folio, 163 163 .invalidate_folio = block_invalidate_folio, 164 164 .readpage = hfs_readpage, 165 165 .writepage = hfs_writepage, ··· 170 170 }; 171 171 172 172 const struct address_space_operations hfs_aops = { 173 - .set_page_dirty = __set_page_dirty_buffers, 173 + .dirty_folio = block_dirty_folio, 174 174 .invalidate_folio = block_invalidate_folio, 175 175 .readpage = hfs_readpage, 176 176 .writepage = hfs_writepage,
+2 -2
fs/hfsplus/inode.c
··· 156 156 } 157 157 158 158 const struct address_space_operations hfsplus_btree_aops = { 159 - .set_page_dirty = __set_page_dirty_buffers, 159 + .dirty_folio = block_dirty_folio, 160 160 .invalidate_folio = block_invalidate_folio, 161 161 .readpage = hfsplus_readpage, 162 162 .writepage = hfsplus_writepage, ··· 167 167 }; 168 168 169 169 const struct address_space_operations hfsplus_aops = { 170 - .set_page_dirty = __set_page_dirty_buffers, 170 + .dirty_folio = block_dirty_folio, 171 171 .invalidate_folio = block_invalidate_folio, 172 172 .readpage = hfsplus_readpage, 173 173 .writepage = hfsplus_writepage,
+1 -1
fs/hpfs/file.c
··· 245 245 } 246 246 247 247 const struct address_space_operations hpfs_aops = { 248 - .set_page_dirty = __set_page_dirty_buffers, 248 + .dirty_folio = block_dirty_folio, 249 249 .invalidate_folio = block_invalidate_folio, 250 250 .readpage = hpfs_readpage, 251 251 .writepage = hpfs_writepage,
+1 -1
fs/jfs/inode.c
··· 357 357 } 358 358 359 359 const struct address_space_operations jfs_aops = { 360 - .set_page_dirty = __set_page_dirty_buffers, 360 + .dirty_folio = block_dirty_folio, 361 361 .invalidate_folio = block_invalidate_folio, 362 362 .readpage = jfs_readpage, 363 363 .readahead = jfs_readahead,
+1 -1
fs/minix/inode.c
··· 442 442 } 443 443 444 444 static const struct address_space_operations minix_aops = { 445 - .set_page_dirty = __set_page_dirty_buffers, 445 + .dirty_folio = block_dirty_folio, 446 446 .invalidate_folio = block_invalidate_folio, 447 447 .readpage = minix_readpage, 448 448 .writepage = minix_writepage,
+1 -1
fs/mpage.c
··· 504 504 if (!buffer_mapped(bh)) { 505 505 /* 506 506 * unmapped dirty buffers are created by 507 - * __set_page_dirty_buffers -> mmapped data 507 + * block_dirty_folio -> mmapped data 508 508 */ 509 509 if (buffer_dirty(bh)) 510 510 goto confused;
+2 -2
fs/nilfs2/mdt.c
··· 434 434 435 435 436 436 static const struct address_space_operations def_mdt_aops = { 437 - .set_page_dirty = __set_page_dirty_buffers, 438 - .invalidate_folio = block_invalidate_folio, 437 + .dirty_folio = block_dirty_folio, 438 + .invalidate_folio = block_invalidate_folio, 439 439 .writepage = nilfs_mdt_write_page, 440 440 }; 441 441
+6 -6
fs/ntfs/aops.c
··· 593 593 iblock = initialized_size >> blocksize_bits; 594 594 595 595 /* 596 - * Be very careful. We have no exclusion from __set_page_dirty_buffers 596 + * Be very careful. We have no exclusion from block_dirty_folio 597 597 * here, and the (potentially unmapped) buffers may become dirty at 598 598 * any time. If a buffer becomes dirty here after we've inspected it 599 599 * then we just miss that fact, and the page stays dirty. 600 600 * 601 - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 601 + * Buffers outside i_size may be dirtied by block_dirty_folio; 602 602 * handle that here by just cleaning them. 603 603 */ 604 604 ··· 653 653 // Update initialized size in the attribute and 654 654 // in the inode. 655 655 // Again, for each page do: 656 - // __set_page_dirty_buffers(); 656 + // block_dirty_folio(); 657 657 // put_page() 658 658 // We don't need to wait on the writes. 659 659 // Update iblock. ··· 1654 1654 .readpage = ntfs_readpage, 1655 1655 #ifdef NTFS_RW 1656 1656 .writepage = ntfs_writepage, 1657 - .set_page_dirty = __set_page_dirty_buffers, 1657 + .dirty_folio = block_dirty_folio, 1658 1658 #endif /* NTFS_RW */ 1659 1659 .bmap = ntfs_bmap, 1660 1660 .migratepage = buffer_migrate_page, ··· 1669 1669 .readpage = ntfs_readpage, 1670 1670 #ifdef NTFS_RW 1671 1671 .writepage = ntfs_writepage, 1672 - .set_page_dirty = __set_page_dirty_buffers, 1672 + .dirty_folio = block_dirty_folio, 1673 1673 #endif /* NTFS_RW */ 1674 1674 .migratepage = buffer_migrate_page, 1675 1675 .is_partially_uptodate = block_is_partially_uptodate, ··· 1746 1746 set_buffer_dirty(bh); 1747 1747 } while ((bh = bh->b_this_page) != head); 1748 1748 spin_unlock(&mapping->private_lock); 1749 - __set_page_dirty_nobuffers(page); 1749 + block_dirty_folio(mapping, page_folio(page)); 1750 1750 if (unlikely(buffers_to_free)) { 1751 1751 do { 1752 1752 bh = buffers_to_free->b_this_page;
+1 -1
fs/ntfs3/inode.c
··· 1950 1950 .write_end = ntfs_write_end, 1951 1951 .direct_IO = ntfs_direct_IO, 1952 1952 .bmap = ntfs_bmap, 1953 - .set_page_dirty = __set_page_dirty_buffers, 1953 + .dirty_folio = block_dirty_folio, 1954 1954 }; 1955 1955 1956 1956 const struct address_space_operations ntfs_aops_cmpr = {
+1 -1
fs/ocfs2/aops.c
··· 2453 2453 } 2454 2454 2455 2455 const struct address_space_operations ocfs2_aops = { 2456 - .set_page_dirty = __set_page_dirty_buffers, 2456 + .dirty_folio = block_dirty_folio, 2457 2457 .readpage = ocfs2_readpage, 2458 2458 .readahead = ocfs2_readahead, 2459 2459 .writepage = ocfs2_writepage,
+1 -1
fs/omfs/file.c
··· 372 372 }; 373 373 374 374 const struct address_space_operations omfs_aops = { 375 - .set_page_dirty = __set_page_dirty_buffers, 375 + .dirty_folio = block_dirty_folio, 376 376 .invalidate_folio = block_invalidate_folio, 377 377 .readpage = omfs_readpage, 378 378 .readahead = omfs_readahead,
+7 -7
fs/reiserfs/inode.c
··· 3201 3201 return; 3202 3202 } 3203 3203 3204 - static int reiserfs_set_page_dirty(struct page *page) 3204 + static bool reiserfs_dirty_folio(struct address_space *mapping, 3205 + struct folio *folio) 3205 3206 { 3206 - struct inode *inode = page->mapping->host; 3207 - if (reiserfs_file_data_log(inode)) { 3208 - SetPageChecked(page); 3209 - return __set_page_dirty_nobuffers(page); 3207 + if (reiserfs_file_data_log(mapping->host)) { 3208 + folio_set_checked(folio); 3209 + return filemap_dirty_folio(mapping, folio); 3210 3210 } 3211 - return __set_page_dirty_buffers(page); 3211 + return block_dirty_folio(mapping, folio); 3212 3212 } 3213 3213 3214 3214 /* ··· 3435 3435 .write_end = reiserfs_write_end, 3436 3436 .bmap = reiserfs_aop_bmap, 3437 3437 .direct_IO = reiserfs_direct_IO, 3438 - .set_page_dirty = reiserfs_set_page_dirty, 3438 + .dirty_folio = reiserfs_dirty_folio, 3439 3439 };
+1 -1
fs/sysv/itree.c
··· 495 495 } 496 496 497 497 const struct address_space_operations sysv_aops = { 498 - .set_page_dirty = __set_page_dirty_buffers, 498 + .dirty_folio = block_dirty_folio, 499 499 .invalidate_folio = block_invalidate_folio, 500 500 .readpage = sysv_readpage, 501 501 .writepage = sysv_writepage,
+1 -1
fs/udf/file.c
··· 125 125 } 126 126 127 127 const struct address_space_operations udf_adinicb_aops = { 128 - .set_page_dirty = __set_page_dirty_buffers, 128 + .dirty_folio = block_dirty_folio, 129 129 .invalidate_folio = block_invalidate_folio, 130 130 .readpage = udf_adinicb_readpage, 131 131 .writepage = udf_adinicb_writepage,
+1 -1
fs/udf/inode.c
··· 235 235 } 236 236 237 237 const struct address_space_operations udf_aops = { 238 - .set_page_dirty = __set_page_dirty_buffers, 238 + .dirty_folio = block_dirty_folio, 239 239 .invalidate_folio = block_invalidate_folio, 240 240 .readpage = udf_readpage, 241 241 .readahead = udf_readahead,
+1 -1
fs/ufs/inode.c
··· 526 526 } 527 527 528 528 const struct address_space_operations ufs_aops = { 529 - .set_page_dirty = __set_page_dirty_buffers, 529 + .dirty_folio = block_dirty_folio, 530 530 .invalidate_folio = block_invalidate_folio, 531 531 .readpage = ufs_readpage, 532 532 .writepage = ufs_writepage,
+1 -1
include/linux/buffer_head.h
··· 397 397 return __bread_gfp(bdev, block, size, __GFP_MOVABLE); 398 398 } 399 399 400 - extern int __set_page_dirty_buffers(struct page *page); 400 + bool block_dirty_folio(struct address_space *mapping, struct folio *folio); 401 401 402 402 #else /* CONFIG_BLOCK */ 403 403
+2 -2
mm/filemap.c
··· 72 72 * Lock ordering: 73 73 * 74 74 * ->i_mmap_rwsem (truncate_pagecache) 75 - * ->private_lock (__free_pte->__set_page_dirty_buffers) 75 + * ->private_lock (__free_pte->block_dirty_folio) 76 76 * ->swap_lock (exclusive_swap_page, others) 77 77 * ->i_pages lock 78 78 * ··· 115 115 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 116 116 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 117 117 * ->inode->i_lock (zap_pte_range->set_page_dirty) 118 - * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 118 + * ->private_lock (zap_pte_range->block_dirty_folio) 119 119 * 120 120 * ->i_mmap_rwsem 121 121 * ->tasklist_lock (memory_failure, collect_procs_ao)
+1 -1
mm/page-writeback.c
··· 2530 2530 * This is also sometimes used by filesystems which use buffer_heads when 2531 2531 * a single buffer is being dirtied: we want to set the folio dirty in 2532 2532 * that case, but not all the buffers. This is a "bottom-up" dirtying, 2533 - * whereas __set_page_dirty_buffers() is a "top-down" dirtying. 2533 + * whereas block_dirty_folio() is a "top-down" dirtying. 2534 2534 * 2535 2535 * The caller must ensure this doesn't race with truncation. Most will 2536 2536 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
+2 -2
mm/rmap.c
··· 31 31 * mm->page_table_lock or pte_lock 32 32 * swap_lock (in swap_duplicate, swap_info_get) 33 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 - * mapping->private_lock (in __set_page_dirty_buffers) 35 - * lock_page_memcg move_lock (in __set_page_dirty_buffers) 34 + * mapping->private_lock (in block_dirty_folio) 35 + * folio_lock_memcg move_lock (in block_dirty_folio) 36 36 * i_pages lock (widely used) 37 37 * lruvec->lru_lock (in folio_lruvec_lock_irq) 38 38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)