Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/swap: convert __delete_from_swap_cache() to a folio

All callers now have a folio, so convert the entire function to operate
on folios.

Link: https://lkml.kernel.org/r/20220617175020.717127-23-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
akpm
ceff9d33 75fa68a5

+16 -15
+2 -2
mm/swap.h
··· 36 36 void *get_shadow_from_swap_cache(swp_entry_t entry); 37 37 int add_to_swap_cache(struct page *page, swp_entry_t entry, 38 38 gfp_t gfp, void **shadowp); 39 - void __delete_from_swap_cache(struct page *page, 39 + void __delete_from_swap_cache(struct folio *folio, 40 40 swp_entry_t entry, void *shadow); 41 41 void delete_from_swap_cache(struct folio *folio); 42 42 void clear_shadow_from_swap_cache(int type, unsigned long begin, ··· 135 135 return -1; 136 136 } 137 137 138 - static inline void __delete_from_swap_cache(struct page *page, 138 + static inline void __delete_from_swap_cache(struct folio *folio, 139 139 swp_entry_t entry, void *shadow) 140 140 { 141 141 }
+13 -12
mm/swap_state.c
··· 133 133 } 134 134 135 135 /* 136 - * This must be called only on pages that have 136 + * This must be called only on folios that have 137 137 * been verified to be in the swap cache. 138 138 */ 139 - void __delete_from_swap_cache(struct page *page, 139 + void __delete_from_swap_cache(struct folio *folio, 140 140 swp_entry_t entry, void *shadow) 141 141 { 142 142 struct address_space *address_space = swap_address_space(entry); 143 - int i, nr = thp_nr_pages(page); 143 + int i; 144 + long nr = folio_nr_pages(folio); 144 145 pgoff_t idx = swp_offset(entry); 145 146 XA_STATE(xas, &address_space->i_pages, idx); 146 147 147 - VM_BUG_ON_PAGE(!PageLocked(page), page); 148 - VM_BUG_ON_PAGE(!PageSwapCache(page), page); 149 - VM_BUG_ON_PAGE(PageWriteback(page), page); 148 + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 149 + VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); 150 + VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); 150 151 151 152 for (i = 0; i < nr; i++) { 152 153 void *entry = xas_store(&xas, shadow); 153 - VM_BUG_ON_PAGE(entry != page, entry); 154 - set_page_private(page + i, 0); 154 + VM_BUG_ON_FOLIO(entry != folio, folio); 155 + set_page_private(folio_page(folio, i), 0); 155 156 xas_next(&xas); 156 157 } 157 - ClearPageSwapCache(page); 158 + folio_clear_swapcache(folio); 158 159 address_space->nrpages -= nr; 159 - __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 160 - __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); 160 + __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 161 + __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); 161 162 } 162 163 163 164 /** ··· 234 233 struct address_space *address_space = swap_address_space(entry); 235 234 236 235 xa_lock_irq(&address_space->i_pages); 237 - __delete_from_swap_cache(&folio->page, entry, NULL); 236 + __delete_from_swap_cache(folio, entry, NULL); 238 237 xa_unlock_irq(&address_space->i_pages); 239 238 240 239 put_swap_page(&folio->page, entry);
+1 -1
mm/vmscan.c
··· 1329 1329 mem_cgroup_swapout(folio, swap); 1330 1330 if (reclaimed && !mapping_exiting(mapping)) 1331 1331 shadow = workingset_eviction(folio, target_memcg); 1332 - __delete_from_swap_cache(&folio->page, swap, shadow); 1332 + __delete_from_swap_cache(folio, swap, shadow); 1333 1333 xa_unlock_irq(&mapping->i_pages); 1334 1334 put_swap_page(&folio->page, swap); 1335 1335 } else {