Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove PageSwapCache

This flag is now only used on folios, so we can remove all the page
accessors and reword the comments that refer to them.

Link: https://lkml.kernel.org/r/20240821193445.2294269-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
32f51ead 6f394ee9

+22 -24
+1 -1
include/linux/mm_types.h
··· 109 109 /** 110 110 * @private: Mapping-private opaque data. 111 111 * Usually used for buffer_heads if PagePrivate. 112 - * Used for swp_entry_t if PageSwapCache. 112 + * Used for swp_entry_t if swapcache flag set. 113 113 * Indicates order in the buddy system if PageBuddy. 114 114 */ 115 115 unsigned long private;
+3 -8
include/linux/page-flags.h
··· 574 574 test_bit(PG_swapcache, const_folio_flags(folio, 0)); 575 575 } 576 576 577 - static __always_inline bool PageSwapCache(const struct page *page) 578 - { 579 - return folio_test_swapcache(page_folio(page)); 580 - } 581 - 582 - SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 583 - CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 577 + FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE) 578 + FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE) 584 579 #else 585 - PAGEFLAG_FALSE(SwapCache, swapcache) 580 + FOLIO_FLAG_FALSE(swapcache) 586 581 #endif 587 582 588 583 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
+10 -9
mm/ksm.c
··· 909 909 */ 910 910 while (!folio_try_get(folio)) { 911 911 /* 912 - * Another check for page->mapping != expected_mapping would 913 - * work here too. We have chosen the !PageSwapCache test to 914 - * optimize the common case, when the page is or is about to 915 - * be freed: PageSwapCache is cleared (under spin_lock_irq) 916 - * in the ref_freeze section of __remove_mapping(); but Anon 917 - * folio->mapping reset to NULL later, in free_pages_prepare(). 912 + * Another check for folio->mapping != expected_mapping 913 + * would work here too. We have chosen to test the 914 + * swapcache flag to optimize the common case, when the 915 + * folio is or is about to be freed: the swapcache flag 916 + * is cleared (under spin_lock_irq) in the ref_freeze 917 + * section of __remove_mapping(); but anon folio->mapping 918 + * is reset to NULL later, in free_pages_prepare(). 918 919 */ 919 920 if (!folio_test_swapcache(folio)) 920 921 goto stale; ··· 946 945 947 946 stale: 948 947 /* 949 - * We come here from above when page->mapping or !PageSwapCache 948 + * We come here from above when folio->mapping or the swapcache flag 950 949 * suggests that the node is stale; but it might be under migration. 951 950 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), 952 951 * before checking whether node->kpfn has been changed. ··· 1453 1452 goto out; 1454 1453 1455 1454 /* 1456 - * We need the page lock to read a stable PageSwapCache in 1455 + * We need the folio lock to read a stable swapcache flag in 1457 1456 * write_protect_page(). We use trylock_page() instead of 1458 1457 * lock_page() because we don't want to wait here - we 1459 1458 * prefer to continue scanning and merging different pages, ··· 3124 3123 * newfolio->mapping was set in advance; now we need smp_wmb() 3125 3124 * to make sure that the new stable_node->kpfn is visible 3126 3125 * to ksm_get_folio() before it can see that folio->mapping 3127 - * has gone stale (or that folio_test_swapcache has been cleared). 3126 + * has gone stale (or that the swapcache flag has been cleared). 3128 3127 */ 3129 3128 smp_wmb(); 3130 3129 folio_set_stable_node(folio, NULL);
+2 -1
mm/migrate.c
··· 639 639 folio_migrate_ksm(newfolio, folio); 640 640 /* 641 641 * Please do not reorder this without considering how mm/ksm.c's 642 - * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache(). 642 + * ksm_get_folio() depends upon ksm_migrate_page() and the 643 + * swapcache flag. 643 644 */ 644 645 if (folio_test_swapcache(folio)) 645 646 folio_clear_swapcache(folio);
+6 -5
mm/shmem.c
··· 502 502 * Sometimes, before we decide whether to proceed or to fail, we must check 503 503 * that an entry was not already brought back from swap by a racing thread. 504 504 * 505 - * Checking page is not enough: by the time a SwapCache page is locked, it 506 - * might be reused, and again be SwapCache, using the same swap as before. 505 + * Checking folio is not enough: by the time a swapcache folio is locked, it 506 + * might be reused, and again be swapcache, using the same swap as before. 507 507 */ 508 508 static bool shmem_confirm_swap(struct address_space *mapping, 509 509 pgoff_t index, swp_entry_t swap) ··· 1965 1965 1966 1966 if (unlikely(error)) { 1967 1967 /* 1968 - * Is this possible? I think not, now that our callers check 1969 - * both PageSwapCache and page_private after getting page lock; 1970 - * but be defensive. Reverse old to newpage for clear and free. 1968 + * Is this possible? I think not, now that our callers 1969 + * check both the swapcache flag and folio->private 1970 + * after getting the folio lock; but be defensive. 1971 + * Reverse old to newpage for clear and free. 1971 1972 */ 1972 1973 old = new; 1973 1974 } else {