Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/gup: introduce check_and_migrate_movable_folios()

This helper is the folio equivalent of check_and_migrate_movable_pages().
Therefore, all the rules that apply to check_and_migrate_movable_pages()
also apply to this one as well. Currently, this helper is only used by
memfd_pin_folios().

This patch also includes changes to rename and convert the internal
functions collect_longterm_unpinnable_pages() and
migrate_longterm_unpinnable_pages() to work on folios. As a result,
check_and_migrate_movable_pages() is now a wrapper around
check_and_migrate_movable_folios().

Link: https://lkml.kernel.org/r/20240624063952.1572359-3-vivek.kasireddy@intel.com
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Dave Airlie <airlied@redhat.com>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dongwon Kim <dongwon.kim@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Junxiao Chang <junxiao.chang@intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Vivek Kasireddy and committed by
Andrew Morton
53ba78de 6cc04054

+78 -48
+78 -48
mm/gup.c
··· 2441 2441 2442 2442 #ifdef CONFIG_MIGRATION 2443 2443 /* 2444 - * Returns the number of collected pages. Return value is always >= 0. 2444 + * Returns the number of collected folios. Return value is always >= 0. 2445 2445 */ 2446 - static unsigned long collect_longterm_unpinnable_pages( 2447 - struct list_head *movable_page_list, 2448 - unsigned long nr_pages, 2449 - struct page **pages) 2446 + static unsigned long collect_longterm_unpinnable_folios( 2447 + struct list_head *movable_folio_list, 2448 + unsigned long nr_folios, 2449 + struct folio **folios) 2450 2450 { 2451 2451 unsigned long i, collected = 0; 2452 2452 struct folio *prev_folio = NULL; 2453 2453 bool drain_allow = true; 2454 2454 2455 - for (i = 0; i < nr_pages; i++) { 2456 - struct folio *folio = page_folio(pages[i]); 2455 + for (i = 0; i < nr_folios; i++) { 2456 + struct folio *folio = folios[i]; 2457 2457 2458 2458 if (folio == prev_folio) 2459 2459 continue; ··· 2468 2468 continue; 2469 2469 2470 2470 if (folio_test_hugetlb(folio)) { 2471 - isolate_hugetlb(folio, movable_page_list); 2471 + isolate_hugetlb(folio, movable_folio_list); 2472 2472 continue; 2473 2473 } 2474 2474 ··· 2480 2480 if (!folio_isolate_lru(folio)) 2481 2481 continue; 2482 2482 2483 - list_add_tail(&folio->lru, movable_page_list); 2483 + list_add_tail(&folio->lru, movable_folio_list); 2484 2484 node_stat_mod_folio(folio, 2485 2485 NR_ISOLATED_ANON + folio_is_file_lru(folio), 2486 2486 folio_nr_pages(folio)); ··· 2490 2490 } 2491 2491 2492 2492 /* 2493 - * Unpins all pages and migrates device coherent pages and movable_page_list. 2494 - * Returns -EAGAIN if all pages were successfully migrated or -errno for failure 2495 - * (or partial success). 2493 + * Unpins all folios and migrates device coherent folios and movable_folio_list. 2494 + * Returns -EAGAIN if all folios were successfully migrated or -errno for 2495 + * failure (or partial success). 2496 2496 */ 2497 - static int migrate_longterm_unpinnable_pages( 2498 - struct list_head *movable_page_list, 2499 - unsigned long nr_pages, 2500 - struct page **pages) 2497 + static int migrate_longterm_unpinnable_folios( 2498 + struct list_head *movable_folio_list, 2499 + unsigned long nr_folios, 2500 + struct folio **folios) 2501 2501 { 2502 2502 int ret; 2503 2503 unsigned long i; 2504 2504 2505 - for (i = 0; i < nr_pages; i++) { 2506 - struct folio *folio = page_folio(pages[i]); 2505 + for (i = 0; i < nr_folios; i++) { 2506 + struct folio *folio = folios[i]; 2507 2507 2508 2508 if (folio_is_device_coherent(folio)) { 2509 2509 /* 2510 - * Migration will fail if the page is pinned, so convert 2511 - * the pin on the source page to a normal reference. 2510 + * Migration will fail if the folio is pinned, so 2511 + * convert the pin on the source folio to a normal 2512 + * reference. 2512 2513 */ 2513 - pages[i] = NULL; 2514 + folios[i] = NULL; 2514 2515 folio_get(folio); 2515 2516 gup_put_folio(folio, 1, FOLL_PIN); 2516 2517 ··· 2524 2523 } 2525 2524 2526 2525 /* 2527 - * We can't migrate pages with unexpected references, so drop 2526 + * We can't migrate folios with unexpected references, so drop 2528 2527 * the reference obtained by __get_user_pages_locked(). 2529 - * Migrating pages have been added to movable_page_list after 2528 + * Migrating folios have been added to movable_folio_list after 2530 2529 * calling folio_isolate_lru() which takes a reference so the 2531 - * page won't be freed if it's migrating. 2530 + * folio won't be freed if it's migrating. 2532 2531 */ 2533 - unpin_user_page(pages[i]); 2534 - pages[i] = NULL; 2532 + unpin_folio(folios[i]); 2533 + folios[i] = NULL; 2535 2534 } 2536 2535 2537 - if (!list_empty(movable_page_list)) { 2536 + if (!list_empty(movable_folio_list)) { 2538 2537 struct migration_target_control mtc = { 2539 2538 .nid = NUMA_NO_NODE, 2540 2539 .gfp_mask = GFP_USER | __GFP_NOWARN, 2541 2540 .reason = MR_LONGTERM_PIN, 2542 2541 }; 2543 2542 2544 - if (migrate_pages(movable_page_list, alloc_migration_target, 2543 + if (migrate_pages(movable_folio_list, alloc_migration_target, 2545 2544 NULL, (unsigned long)&mtc, MIGRATE_SYNC, 2546 2545 MR_LONGTERM_PIN, NULL)) { 2547 2546 ret = -ENOMEM; ··· 2549 2548 } 2550 2549 } 2551 2550 2552 - putback_movable_pages(movable_page_list); 2551 + putback_movable_pages(movable_folio_list); 2553 2552 2554 2553 return -EAGAIN; 2555 2554 2556 2555 err: 2557 - for (i = 0; i < nr_pages; i++) 2558 - if (pages[i]) 2559 - unpin_user_page(pages[i]); 2560 - putback_movable_pages(movable_page_list); 2556 + unpin_folios(folios, nr_folios); 2557 + putback_movable_pages(movable_folio_list); 2561 2558 2562 2559 return ret; 2563 2560 } 2564 2561 2565 2562 /* 2566 - * Check whether all pages are *allowed* to be pinned. Rather confusingly, all 2567 - * pages in the range are required to be pinned via FOLL_PIN, before calling 2568 - * this routine. 2563 + * Check whether all folios are *allowed* to be pinned indefinitely (longterm). 2564 + * Rather confusingly, all folios in the range are required to be pinned via 2565 + * FOLL_PIN, before calling this routine. 2569 2566 * 2570 - * If any pages in the range are not allowed to be pinned, then this routine 2571 - * will migrate those pages away, unpin all the pages in the range and return 2567 + * If any folios in the range are not allowed to be pinned, then this routine 2568 + * will migrate those folios away, unpin all the folios in the range and return 2572 2569 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then 2573 2570 * call this routine again. 2574 2571 * 2575 2572 * If an error other than -EAGAIN occurs, this indicates a migration failure. 2576 2573 * The caller should give up, and propagate the error back up the call stack. 2577 2574 * 2578 - * If everything is OK and all pages in the range are allowed to be pinned, then 2579 - * this routine leaves all pages pinned and returns zero for success. 2575 + * If everything is OK and all folios in the range are allowed to be pinned, 2576 + * then this routine leaves all folios pinned and returns zero for success. 2577 + */ 2578 + static long check_and_migrate_movable_folios(unsigned long nr_folios, 2579 + struct folio **folios) 2580 + { 2581 + unsigned long collected; 2582 + LIST_HEAD(movable_folio_list); 2583 + 2584 + collected = collect_longterm_unpinnable_folios(&movable_folio_list, 2585 + nr_folios, folios); 2586 + if (!collected) 2587 + return 0; 2588 + 2589 + return migrate_longterm_unpinnable_folios(&movable_folio_list, 2590 + nr_folios, folios); 2591 + } 2592 + 2593 + /* 2594 + * This routine just converts all the pages in the @pages array to folios and 2595 + * calls check_and_migrate_movable_folios() to do the heavy lifting. 2596 + * 2597 + * Please see the check_and_migrate_movable_folios() documentation for details. 2580 2598 */ 2581 2599 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2582 2600 struct page **pages) 2583 2601 { 2584 - unsigned long collected; 2585 - LIST_HEAD(movable_page_list); 2602 + struct folio **folios; 2603 + long i, ret; 2586 2604 2587 - collected = collect_longterm_unpinnable_pages(&movable_page_list, 2588 - nr_pages, pages); 2589 - if (!collected) 2590 - return 0; 2605 + folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL); 2606 + if (!folios) 2607 + return -ENOMEM; 2591 2608 2592 - return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages, 2593 - pages); 2609 + for (i = 0; i < nr_pages; i++) 2610 + folios[i] = page_folio(pages[i]); 2611 + 2612 + ret = check_and_migrate_movable_folios(nr_pages, folios); 2613 + 2614 + kfree(folios); 2615 + return ret; 2594 2616 } 2595 2617 #else 2596 2618 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2597 2619 struct page **pages) 2620 + { 2621 + return 0; 2622 + } 2623 + 2624 + static long check_and_migrate_movable_folios(unsigned long nr_folios, 2625 + struct folio **folios) 2598 2626 { 2599 2627 return 0; 2600 2628 }