Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/swap: convert put_swap_page() to put_swap_folio()

With all callers now using a folio, we can convert this function.

Link: https://lkml.kernel.org/r/20220902194653.1739778-14-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
4081f744 a4c366f0

+10 -10
+2 -2
include/linux/swap.h
··· 491 491 extern void si_swapinfo(struct sysinfo *); 492 492 swp_entry_t folio_alloc_swap(struct folio *folio); 493 493 bool folio_free_swap(struct folio *folio); 494 - extern void put_swap_page(struct page *page, swp_entry_t entry); 494 + void put_swap_folio(struct folio *folio, swp_entry_t entry); 495 495 extern swp_entry_t get_swap_page_of_type(int); 496 496 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); 497 497 extern int add_swap_count_continuation(swp_entry_t, gfp_t); ··· 576 576 { 577 577 } 578 578 579 - static inline void put_swap_page(struct page *page, swp_entry_t swp) 579 + static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) 580 580 { 581 581 } 582 582
+1 -1
mm/shmem.c
··· 1424 1424 } 1425 1425 1426 1426 mutex_unlock(&shmem_swaplist_mutex); 1427 - put_swap_page(&folio->page, swap); 1427 + put_swap_folio(folio, swap); 1428 1428 redirty: 1429 1429 folio_mark_dirty(folio); 1430 1430 if (wbc->for_reclaim)
+1 -1
mm/swap_slots.c
··· 343 343 get_swap_pages(1, &entry, 1); 344 344 out: 345 345 if (mem_cgroup_try_charge_swap(folio, entry)) { 346 - put_swap_page(&folio->page, entry); 346 + put_swap_folio(folio, entry); 347 347 entry.val = 0; 348 348 } 349 349 return entry;
+3 -3
mm/swap_state.c
··· 218 218 return true; 219 219 220 220 fail: 221 - put_swap_page(&folio->page, entry); 221 + put_swap_folio(folio, entry); 222 222 return false; 223 223 } 224 224 ··· 237 237 __delete_from_swap_cache(folio, entry, NULL); 238 238 xa_unlock_irq(&address_space->i_pages); 239 239 240 - put_swap_page(&folio->page, entry); 240 + put_swap_folio(folio, entry); 241 241 folio_ref_sub(folio, folio_nr_pages(folio)); 242 242 } 243 243 ··· 498 498 return &folio->page; 499 499 500 500 fail_unlock: 501 - put_swap_page(&folio->page, entry); 501 + put_swap_folio(folio, entry); 502 502 folio_unlock(folio); 503 503 folio_put(folio); 504 504 return NULL;
+2 -2
mm/swapfile.c
··· 1332 1332 /* 1333 1333 * Called after dropping swapcache to decrease refcnt to swap entries. 1334 1334 */ 1335 - void put_swap_page(struct page *page, swp_entry_t entry) 1335 + void put_swap_folio(struct folio *folio, swp_entry_t entry) 1336 1336 { 1337 1337 unsigned long offset = swp_offset(entry); 1338 1338 unsigned long idx = offset / SWAPFILE_CLUSTER; ··· 1341 1341 unsigned char *map; 1342 1342 unsigned int i, free_entries = 0; 1343 1343 unsigned char val; 1344 - int size = swap_entry_size(thp_nr_pages(page)); 1344 + int size = swap_entry_size(folio_nr_pages(folio)); 1345 1345 1346 1346 si = _swap_info_get(entry); 1347 1347 if (!si)
+1 -1
mm/vmscan.c
··· 1352 1352 mem_cgroup_swapout(folio, swap); 1353 1353 __delete_from_swap_cache(folio, swap, shadow); 1354 1354 xa_unlock_irq(&mapping->i_pages); 1355 - put_swap_page(&folio->page, swap); 1355 + put_swap_folio(folio, swap); 1356 1356 } else { 1357 1357 void (*free_folio)(struct folio *); 1358 1358