Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/swap: convert __read_swap_cache_async() to use a folio

Remove a few hidden (and one visible) calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
a0d3374b bdb0ed54

+19 -19
+19 -19
mm/swap_state.c
··· 411 411 bool *new_page_allocated) 412 412 { 413 413 struct swap_info_struct *si; 414 - struct page *page; 414 + struct folio *folio; 415 415 void *shadow = NULL; 416 416 417 417 *new_page_allocated = false; ··· 426 426 si = get_swap_device(entry); 427 427 if (!si) 428 428 return NULL; 429 - page = find_get_page(swap_address_space(entry), 430 - swp_offset(entry)); 429 + folio = filemap_get_folio(swap_address_space(entry), 430 + swp_offset(entry)); 431 431 put_swap_device(si); 432 - if (page) 433 - return page; 432 + if (folio) 433 + return folio_file_page(folio, swp_offset(entry)); 434 434 435 435 /* 436 436 * Just skip read ahead for unused swap slot. ··· 448 448 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 449 449 * cause any racers to loop around until we add it to cache. 450 450 */ 451 - page = alloc_page_vma(gfp_mask, vma, addr); 452 - if (!page) 451 + folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false); 452 + if (!folio) 453 453 return NULL; 454 454 455 455 /* ··· 459 459 if (!err) 460 460 break; 461 461 462 - put_page(page); 462 + folio_put(folio); 463 463 if (err != -EEXIST) 464 464 return NULL; 465 465 ··· 477 477 * The swap entry is ours to swap in. Prepare the new page. 478 478 */ 479 479 480 - __SetPageLocked(page); 481 - __SetPageSwapBacked(page); 480 + __folio_set_locked(folio); 481 + __folio_set_swapbacked(folio); 482 482 483 - if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) 483 + if (mem_cgroup_swapin_charge_page(&folio->page, NULL, gfp_mask, entry)) 484 484 goto fail_unlock; 485 485 486 486 /* May fail (-ENOMEM) if XArray node allocation failed. */ 487 - if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 487 + if (add_to_swap_cache(&folio->page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 488 488 goto fail_unlock; 489 489 490 490 mem_cgroup_swapin_uncharge_swap(entry); 491 491 492 492 if (shadow) 493 - workingset_refault(page_folio(page), shadow); 493 + workingset_refault(folio, shadow); 494 494 495 - /* Caller will initiate read into locked page */ 496 - lru_cache_add(page); 495 + /* Caller will initiate read into locked folio */ 496 + folio_add_lru(folio); 497 497 *new_page_allocated = true; 498 - return page; 498 + return &folio->page; 499 499 500 500 fail_unlock: 501 - put_swap_page(page, entry); 502 - unlock_page(page); 503 - put_page(page); 501 + put_swap_page(&folio->page, entry); 502 + folio_unlock(folio); 503 + folio_put(folio); 504 504 return NULL; 505 505 } 506 506