Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/swap: convert add_to_swap_cache() to take a folio

With all callers using folios, we can convert add_to_swap_cache() to take
a folio and use it throughout.

Link: https://lkml.kernel.org/r/20220902194653.1739778-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
a4c366f0 a0d3374b

+20 -20
+1 -1
mm/shmem.c
··· 1406 1406 if (list_empty(&info->swaplist)) 1407 1407 list_add(&info->swaplist, &shmem_swaplist); 1408 1408 1409 - if (add_to_swap_cache(&folio->page, swap, 1409 + if (add_to_swap_cache(folio, swap, 1410 1410 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 1411 1411 NULL) == 0) { 1412 1412 spin_lock_irq(&info->lock);
+2 -2
mm/swap.h
··· 32 32 void show_swap_cache_info(void); 33 33 bool add_to_swap(struct folio *folio); 34 34 void *get_shadow_from_swap_cache(swp_entry_t entry); 35 - int add_to_swap_cache(struct page *page, swp_entry_t entry, 35 + int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 36 36 gfp_t gfp, void **shadowp); 37 37 void __delete_from_swap_cache(struct folio *folio, 38 38 swp_entry_t entry, void *shadow); ··· 122 122 return NULL; 123 123 } 124 124 125 - static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, 125 + static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 126 126 gfp_t gfp_mask, void **shadowp) 127 127 { 128 128 return -1;
+17 -17
mm/swap_state.c
··· 85 85 * add_to_swap_cache resembles filemap_add_folio on swapper_space, 86 86 * but sets SwapCache flag and private instead of mapping and index. 87 87 */ 88 - int add_to_swap_cache(struct page *page, swp_entry_t entry, 88 + int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 89 89 gfp_t gfp, void **shadowp) 90 90 { 91 91 struct address_space *address_space = swap_address_space(entry); 92 92 pgoff_t idx = swp_offset(entry); 93 - XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 94 - unsigned long i, nr = thp_nr_pages(page); 93 + XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); 94 + unsigned long i, nr = folio_nr_pages(folio); 95 95 void *old; 96 96 97 - VM_BUG_ON_PAGE(!PageLocked(page), page); 98 - VM_BUG_ON_PAGE(PageSwapCache(page), page); 99 - VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 97 + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 98 + VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 99 + VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); 100 100 101 - page_ref_add(page, nr); 102 - SetPageSwapCache(page); 101 + folio_ref_add(folio, nr); 102 + folio_set_swapcache(folio); 103 103 104 104 do { 105 105 xas_lock_irq(&xas); ··· 107 107 if (xas_error(&xas)) 108 108 goto unlock; 109 109 for (i = 0; i < nr; i++) { 110 - VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 110 + VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); 111 111 old = xas_load(&xas); 112 112 if (xa_is_value(old)) { 113 113 if (shadowp) 114 114 *shadowp = old; 115 115 } 116 - set_page_private(page + i, entry.val + i); 117 - xas_store(&xas, page); 116 + set_page_private(folio_page(folio, i), entry.val + i); 117 + xas_store(&xas, folio); 118 118 xas_next(&xas); 119 119 } 120 120 address_space->nrpages += nr; 121 - __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 122 - __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); 121 + __node_stat_mod_folio(folio, NR_FILE_PAGES, nr); 122 + __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr); 123 123 unlock: 124 124 xas_unlock_irq(&xas); 125 125 } while (xas_nomem(&xas, gfp)); ··· 127 127 if (!xas_error(&xas)) 128 128 return 0; 129 129 130 - ClearPageSwapCache(page); 131 - page_ref_sub(page, nr); 130 + folio_clear_swapcache(folio); 131 + folio_ref_sub(folio, nr); 132 132 return xas_error(&xas); 133 133 } 134 134 ··· 194 194 /* 195 195 * Add it to the swap cache. 196 196 */ 197 - err = add_to_swap_cache(&folio->page, entry, 197 + err = add_to_swap_cache(folio, entry, 198 198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 199 199 if (err) 200 200 /* ··· 484 484 goto fail_unlock; 485 485 486 486 /* May fail (-ENOMEM) if XArray node allocation failed. */ 487 - if (add_to_swap_cache(&folio->page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 487 + if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 488 488 goto fail_unlock; 489 489 490 490 mem_cgroup_swapin_uncharge_swap(entry);