Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/hugetlb: convert alloc_surplus_huge_page() to folios

Change alloc_surplus_huge_page() to alloc_surplus_hugetlb_folio() and
update its callers.

Link: https://lkml.kernel.org/r/20230113223057.173292-5-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Sidhartha Kumar and committed by
Andrew Morton
3a740e8b a36f1e90

+14 -13
+14 -13
mm/hugetlb.c
··· 2378 2378 /* 2379 2379 * Allocates a fresh surplus page from the page allocator. 2380 2380 */ 2381 - static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 2382 - int nid, nodemask_t *nmask) 2381 + static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2382 + gfp_t gfp_mask, int nid, nodemask_t *nmask) 2383 2383 { 2384 2384 struct folio *folio = NULL; 2385 2385 ··· 2416 2416 out_unlock: 2417 2417 spin_unlock_irq(&hugetlb_lock); 2418 2418 2419 - return &folio->page; 2419 + return folio; 2420 2420 } 2421 2421 2422 2422 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, ··· 2449 2449 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 2450 2450 struct vm_area_struct *vma, unsigned long addr) 2451 2451 { 2452 - struct page *page = NULL; 2452 + struct folio *folio = NULL; 2453 2453 struct mempolicy *mpol; 2454 2454 gfp_t gfp_mask = htlb_alloc_mask(h); 2455 2455 int nid; ··· 2460 2460 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2461 2461 2462 2462 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2463 - page = alloc_surplus_huge_page(h, gfp, nid, nodemask); 2463 + folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2464 2464 2465 2465 /* Fallback to all nodes if page==NULL */ 2466 2466 nodemask = NULL; 2467 2467 } 2468 2468 2469 - if (!page) 2470 - page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); 2469 + if (!folio) 2470 + folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2471 2471 mpol_cond_put(mpol); 2472 - return page; 2472 + return &folio->page; 2473 2473 } 2474 2474 2475 2475 /* page migration callback function */ ··· 2518 2518 __must_hold(&hugetlb_lock) 2519 2519 { 2520 2520 LIST_HEAD(surplus_list); 2521 + struct folio *folio; 2521 2522 struct page *page, *tmp; 2522 2523 int ret; 2523 2524 long i; ··· 2538 2537 retry: 2539 2538 spin_unlock_irq(&hugetlb_lock); 2540 2539 for (i = 0; i < needed; i++) { 2541 - page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 2540 + folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2542 2541 NUMA_NO_NODE, NULL); 2543 - if (!page) { 2542 + if (!folio) { 2544 2543 alloc_ok = false; 2545 2544 break; 2546 2545 } 2547 - list_add(&page->lru, &surplus_list); 2546 + list_add(&folio->lru, &surplus_list); 2548 2547 cond_resched(); 2549 2548 } 2550 2549 allocated += i; ··· 3497 3496 * First take pages out of surplus state. Then make up the 3498 3497 * remaining difference by allocating fresh huge pages. 3499 3498 * 3500 - * We might race with alloc_surplus_huge_page() here and be unable 3499 + * We might race with alloc_surplus_hugetlb_folio() here and be unable 3501 3500 * to convert a surplus huge page to a normal huge page. That is 3502 3501 * not critical, though, it just means the overall size of the 3503 3502 * pool might be one hugepage larger than it needs to be, but ··· 3540 3539 * By placing pages into the surplus state independent of the 3541 3540 * overcommit value, we are allowing the surplus pool size to 3542 3541 * exceed overcommit. There are few sane options here. Since 3543 - * alloc_surplus_huge_page() is checking the global counter, 3542 + * alloc_surplus_hugetlb_folio() is checking the global counter, 3544 3543 * though, we'll note that we're not allowed to exceed surplus 3545 3544 * and won't grow the pool anywhere else. Not until one of the 3546 3545 * sysctls are changed, or the surplus pages go out of use.