Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: stop passing a writeback_control structure to shmem_writeout

shmem_writeout only needs the swap_iocb cookie and the split folio list.
Pass those explicitly and remove the now unused list member from struct
writeback_control.

Link: https://lkml.kernel.org/r/20250610054959.2057526-3-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Andrew Morton
44b1b073 86c4a946

+26 -31
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
··· 317 317 if (folio_mapped(folio)) 318 318 folio_redirty_for_writepage(&wbc, folio); 319 319 else 320 - error = shmem_writeout(folio, &wbc); 320 + error = shmem_writeout(folio, NULL, NULL); 321 321 } 322 322 } 323 323
+1 -8
drivers/gpu/drm/ttm/ttm_backup.c
··· 112 112 113 113 if (writeback && !folio_mapped(to_folio) && 114 114 folio_clear_dirty_for_io(to_folio)) { 115 - struct writeback_control wbc = { 116 - .sync_mode = WB_SYNC_NONE, 117 - .nr_to_write = SWAP_CLUSTER_MAX, 118 - .range_start = 0, 119 - .range_end = LLONG_MAX, 120 - .for_reclaim = 1, 121 - }; 122 115 folio_set_reclaim(to_folio); 123 - ret = shmem_writeout(to_folio, &wbc); 116 + ret = shmem_writeout(to_folio, NULL, NULL); 124 117 if (!folio_test_writeback(to_folio)) 125 118 folio_clear_reclaim(to_folio); 126 119 /*
+4 -1
include/linux/shmem_fs.h
··· 11 11 #include <linux/fs_parser.h> 12 12 #include <linux/userfaultfd_k.h> 13 13 14 + struct swap_iocb; 15 + 14 16 /* inode in-kernel data */ 15 17 16 18 #ifdef CONFIG_TMPFS_QUOTA ··· 109 107 void shmem_unlock_mapping(struct address_space *mapping); 110 108 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 111 109 pgoff_t index, gfp_t gfp_mask); 112 - int shmem_writeout(struct folio *folio, struct writeback_control *wbc); 110 + int shmem_writeout(struct folio *folio, struct swap_iocb **plug, 111 + struct list_head *folio_list); 113 112 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); 114 113 int shmem_unuse(unsigned int type); 115 114
-3
include/linux/writeback.h
··· 79 79 */ 80 80 struct swap_iocb **swap_plug; 81 81 82 - /* Target list for splitting a large folio */ 83 - struct list_head *list; 84 - 85 82 /* internal fields used by the ->writepages implementation: */ 86 83 struct folio_batch fbatch; 87 84 pgoff_t index;
+15 -11
mm/shmem.c
··· 1540 1540 /** 1541 1541 * shmem_writeout - Write the folio to swap 1542 1542 * @folio: The folio to write 1543 - * @wbc: How writeback is to be done 1543 + * @plug: swap plug 1544 + * @folio_list: list to put back folios on split 1544 1545 * 1545 1546 * Move the folio from the page cache to the swap cache. 1546 1547 */ 1547 - int shmem_writeout(struct folio *folio, struct writeback_control *wbc) 1548 + int shmem_writeout(struct folio *folio, struct swap_iocb **plug, 1549 + struct list_head *folio_list) 1548 1550 { 1549 1551 struct address_space *mapping = folio->mapping; 1550 1552 struct inode *inode = mapping->host; ··· 1555 1553 pgoff_t index; 1556 1554 int nr_pages; 1557 1555 bool split = false; 1558 - 1559 - if (WARN_ON_ONCE(!wbc->for_reclaim)) 1560 - goto redirty; 1561 1556 1562 1557 if ((info->flags & VM_LOCKED) || sbinfo->noswap) 1563 1558 goto redirty; ··· 1582 1583 try_split: 1583 1584 /* Ensure the subpages are still dirty */ 1584 1585 folio_test_set_dirty(folio); 1585 - if (split_folio_to_list(folio, wbc->list)) 1586 + if (split_folio_to_list(folio, folio_list)) 1586 1587 goto redirty; 1587 1588 folio_clear_dirty(folio); 1588 1589 } ··· 1635 1636 list_add(&info->swaplist, &shmem_swaplist); 1636 1637 1637 1638 if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) { 1639 + struct writeback_control wbc = { 1640 + .sync_mode = WB_SYNC_NONE, 1641 + .nr_to_write = SWAP_CLUSTER_MAX, 1642 + .range_start = 0, 1643 + .range_end = LLONG_MAX, 1644 + .for_reclaim = 1, 1645 + .swap_plug = plug, 1646 + }; 1638 1647 shmem_recalc_inode(inode, 0, nr_pages); 1639 1648 swap_shmem_alloc(folio->swap, nr_pages); 1640 1649 shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap)); 1641 1650 1642 1651 mutex_unlock(&shmem_swaplist_mutex); 1643 1652 BUG_ON(folio_mapped(folio)); 1644 - return swap_writeout(folio, wbc); 1653 + return swap_writeout(folio, &wbc); 1645 1654 } 1646 1655 if (!info->swapped) 1647 1656 list_del_init(&info->swaplist); ··· 1658 1651 goto try_split; 1659 1652 redirty: 1660 1653 folio_mark_dirty(folio); 1661 - if (wbc->for_reclaim) 1662 - return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1663 - folio_unlock(folio); 1664 - return 0; 1654 + return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1665 1655 } 1666 1656 EXPORT_SYMBOL_GPL(shmem_writeout); 1667 1657
+5 -7
mm/vmscan.c
··· 669 669 670 670 /* 671 671 * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled 672 - * or we failed to allocate contiguous swap entries. 672 + * or we failed to allocate contiguous swap entries, in which case 673 + * the split out folios get added back to folio_list. 673 674 */ 674 - if (shmem_mapping(mapping)) { 675 - if (folio_test_large(folio)) 676 - wbc.list = folio_list; 677 - res = shmem_writeout(folio, &wbc); 678 - } else { 675 + if (shmem_mapping(mapping)) 676 + res = shmem_writeout(folio, plug, folio_list); 677 + else 679 678 res = swap_writeout(folio, &wbc); 680 - } 681 679 682 680 if (res < 0) 683 681 handle_write_error(mapping, folio, res);