Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: migrate: convert migrate_misplaced_page() to migrate_misplaced_folio()

At present, numa balance only support base page and PMD-mapped THP, but we
will expand to support to migrate large folio/pte-mapped THP in the
future, it is better to make migrate_misplaced_page() to take a folio
instead of a page, and rename it to migrate_misplaced_folio(), it is a
preparation, also this remove several compound_head() calls.

Link: https://lkml.kernel.org/r/20230913095131.2426871-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kefeng Wang and committed by
Andrew Morton
73eab3ca 2ac9e99f

+25 -22
+2 -2
include/linux/migrate.h
··· 142 142 } 143 143 144 144 #ifdef CONFIG_NUMA_BALANCING 145 - int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 145 + int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, 146 146 int node); 147 147 #else 148 - static inline int migrate_misplaced_page(struct page *page, 148 + static inline int migrate_misplaced_folio(struct folio *folio, 149 149 struct vm_area_struct *vma, int node) 150 150 { 151 151 return -EAGAIN; /* can't migrate now */
+1 -1
mm/huge_memory.c
··· 1567 1567 spin_unlock(vmf->ptl); 1568 1568 writable = false; 1569 1569 1570 - migrated = migrate_misplaced_page(page, vma, target_nid); 1570 + migrated = migrate_misplaced_folio(page_folio(page), vma, target_nid); 1571 1571 if (migrated) { 1572 1572 flags |= TNF_MIGRATED; 1573 1573 page_nid = target_nid;
+1 -1
mm/memory.c
··· 4812 4812 writable = false; 4813 4813 4814 4814 /* Migrate to the requested node */ 4815 - if (migrate_misplaced_page(page, vma, target_nid)) { 4815 + if (migrate_misplaced_folio(page_folio(page), vma, target_nid)) { 4816 4816 page_nid = target_nid; 4817 4817 flags |= TNF_MIGRATED; 4818 4818 } else {
+21 -18
mm/migrate.c
··· 2516 2516 } 2517 2517 2518 2518 /* 2519 - * Attempt to migrate a misplaced page to the specified destination 2519 + * Attempt to migrate a misplaced folio to the specified destination 2520 2520 * node. Caller is expected to have an elevated reference count on 2521 - * the page that will be dropped by this function before returning. 2521 + * the folio that will be dropped by this function before returning. 2522 2522 */ 2523 - int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2524 - int node) 2523 + int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, 2524 + int node) 2525 2525 { 2526 2526 pg_data_t *pgdat = NODE_DATA(node); 2527 2527 int isolated; 2528 2528 int nr_remaining; 2529 2529 unsigned int nr_succeeded; 2530 2530 LIST_HEAD(migratepages); 2531 - int nr_pages = thp_nr_pages(page); 2531 + int nr_pages = folio_nr_pages(folio); 2532 2532 2533 2533 /* 2534 - * Don't migrate file pages that are mapped in multiple processes 2534 + * Don't migrate file folios that are mapped in multiple processes 2535 2535 * with execute permissions as they are probably shared libraries. 2536 + * To check if the folio is shared, ideally we want to make sure 2537 + * every page is mapped to the same process. Doing that is very 2538 + * expensive, so check the estimated mapcount of the folio instead. 2536 2539 */ 2537 - if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2540 + if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) && 2538 2541 (vma->vm_flags & VM_EXEC)) 2539 2542 goto out; 2540 2543 2541 2544 /* 2542 - * Also do not migrate dirty pages as not all filesystems can move 2543 - * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2545 + * Also do not migrate dirty folios as not all filesystems can move 2546 + * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles. 2544 2547 */ 2545 - if (page_is_file_lru(page) && PageDirty(page)) 2548 + if (folio_is_file_lru(folio) && folio_test_dirty(folio)) 2546 2549 goto out; 2547 2550 2548 - isolated = numamigrate_isolate_folio(pgdat, page_folio(page)); 2551 + isolated = numamigrate_isolate_folio(pgdat, folio); 2549 2552 if (!isolated) 2550 2553 goto out; 2551 2554 2552 - list_add(&page->lru, &migratepages); 2555 + list_add(&folio->lru, &migratepages); 2553 2556 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, 2554 2557 NULL, node, MIGRATE_ASYNC, 2555 2558 MR_NUMA_MISPLACED, &nr_succeeded); 2556 2559 if (nr_remaining) { 2557 2560 if (!list_empty(&migratepages)) { 2558 - list_del(&page->lru); 2559 - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2560 - page_is_file_lru(page), -nr_pages); 2561 - putback_lru_page(page); 2561 + list_del(&folio->lru); 2562 + node_stat_mod_folio(folio, NR_ISOLATED_ANON + 2563 + folio_is_file_lru(folio), -nr_pages); 2564 + folio_putback_lru(folio); 2562 2565 } 2563 2566 isolated = 0; 2564 2567 } 2565 2568 if (nr_succeeded) { 2566 2569 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2567 - if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2570 + if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node)) 2568 2571 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2569 2572 nr_succeeded); 2570 2573 } ··· 2575 2572 return isolated; 2576 2573 2577 2574 out: 2578 - put_page(page); 2575 + folio_put(folio); 2579 2576 return 0; 2580 2577 } 2581 2578 #endif /* CONFIG_NUMA_BALANCING */