Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/swap: convert delete_from_swap_cache() to take a folio

All but one caller already has a folio, so convert it to use a folio.

Link: https://lkml.kernel.org/r/20220617175020.717127-22-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
akpm
75fa68a5 b98c359f

+16 -15
+3 -2
mm/memory-failure.c
··· 1007 1007 1008 1008 static int me_swapcache_clean(struct page_state *ps, struct page *p) 1009 1009 { 1010 + struct folio *folio = page_folio(p); 1010 1011 int ret; 1011 1012 1012 - delete_from_swap_cache(p); 1013 + delete_from_swap_cache(folio); 1013 1014 1014 1015 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; 1015 - unlock_page(p); 1016 + folio_unlock(folio); 1016 1017 1017 1018 if (has_extra_refcount(ps, p, false)) 1018 1019 ret = MF_FAILED;
+2 -2
mm/shmem.c
··· 1691 1691 return; 1692 1692 1693 1693 folio_wait_writeback(folio); 1694 - delete_from_swap_cache(&folio->page); 1694 + delete_from_swap_cache(folio); 1695 1695 spin_lock_irq(&info->lock); 1696 1696 /* 1697 1697 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't ··· 1789 1789 if (sgp == SGP_WRITE) 1790 1790 folio_mark_accessed(folio); 1791 1791 1792 - delete_from_swap_cache(&folio->page); 1792 + delete_from_swap_cache(folio); 1793 1793 folio_mark_dirty(folio); 1794 1794 swap_free(swap); 1795 1795
+2 -2
mm/swap.h
··· 38 38 gfp_t gfp, void **shadowp); 39 39 void __delete_from_swap_cache(struct page *page, 40 40 swp_entry_t entry, void *shadow); 41 - void delete_from_swap_cache(struct page *page); 41 + void delete_from_swap_cache(struct folio *folio); 42 42 void clear_shadow_from_swap_cache(int type, unsigned long begin, 43 43 unsigned long end); 44 44 void free_swap_cache(struct page *page); ··· 140 140 { 141 141 } 142 142 143 - static inline void delete_from_swap_cache(struct page *page) 143 + static inline void delete_from_swap_cache(struct folio *folio) 144 144 { 145 145 } 146 146
+8 -8
mm/swap_state.c
··· 222 222 } 223 223 224 224 /* 225 - * This must be called only on pages that have 225 + * This must be called only on folios that have 226 226 * been verified to be in the swap cache and locked. 227 - * It will never put the page into the free list, 228 - * the caller has a reference on the page. 227 + * It will never put the folio into the free list, 228 + * the caller has a reference on the folio. 229 229 */ 230 - void delete_from_swap_cache(struct page *page) 230 + void delete_from_swap_cache(struct folio *folio) 231 231 { 232 - swp_entry_t entry = { .val = page_private(page) }; 232 + swp_entry_t entry = folio_swap_entry(folio); 233 233 struct address_space *address_space = swap_address_space(entry); 234 234 235 235 xa_lock_irq(&address_space->i_pages); 236 - __delete_from_swap_cache(page, entry, NULL); 236 + __delete_from_swap_cache(&folio->page, entry, NULL); 237 237 xa_unlock_irq(&address_space->i_pages); 238 238 239 - put_swap_page(page, entry); 240 - page_ref_sub(page, thp_nr_pages(page)); 239 + put_swap_page(&folio->page, entry); 240 + folio_ref_sub(folio, folio_nr_pages(folio)); 241 241 } 242 242 243 243 void clear_shadow_from_swap_cache(int type, unsigned long begin,
+1 -1
mm/swapfile.c
··· 1617 1617 if (pm_suspended_storage()) 1618 1618 return 0; 1619 1619 1620 - delete_from_swap_cache(&folio->page); 1620 + delete_from_swap_cache(folio); 1621 1621 folio_set_dirty(folio); 1622 1622 return 1; 1623 1623 }