Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/hugetlb: increase use of folios in alloc_huge_page()

Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma() and
alloc_buddy_huge_page_with_mpol() to use folios so alloc_huge_page() is
cleaned by operating on folios until its return.

Link: https://lkml.kernel.org/r/20230113223057.173292-6-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Sidhartha Kumar and committed by
Andrew Morton
ff7d853b 3a740e8b

+22 -27
+4 -4
include/linux/hugetlb_cgroup.h
··· 141 141 struct hugetlb_cgroup **ptr); 142 142 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 143 143 struct hugetlb_cgroup *h_cg, 144 - struct page *page); 144 + struct folio *folio); 145 145 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, 146 146 struct hugetlb_cgroup *h_cg, 147 - struct page *page); 147 + struct folio *folio); 148 148 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, 149 149 struct folio *folio); 150 150 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, ··· 230 230 231 231 static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 232 232 struct hugetlb_cgroup *h_cg, 233 - struct page *page) 233 + struct folio *folio) 234 234 { 235 235 } 236 236 237 237 static inline void 238 238 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, 239 239 struct hugetlb_cgroup *h_cg, 240 - struct page *page) 240 + struct folio *folio) 241 241 { 242 242 } 243 243
+16 -17
mm/hugetlb.c
··· 1348 1348 return h->free_huge_pages - h->resv_huge_pages; 1349 1349 } 1350 1350 1351 - static struct page *dequeue_huge_page_vma(struct hstate *h, 1351 + static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1352 1352 struct vm_area_struct *vma, 1353 1353 unsigned long address, int avoid_reserve, 1354 1354 long chg) ··· 1392 1392 } 1393 1393 1394 1394 mpol_cond_put(mpol); 1395 - return &folio->page; 1395 + return folio; 1396 1396 1397 1397 err: 1398 1398 return NULL; ··· 2446 2446 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2447 2447 */ 2448 2448 static 2449 - struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 2449 + struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2450 2450 struct vm_area_struct *vma, unsigned long addr) 2451 2451 { 2452 2452 struct folio *folio = NULL; ··· 2469 2469 if (!folio) 2470 2470 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2471 2471 mpol_cond_put(mpol); 2472 - return &folio->page; 2472 + return folio; 2473 2473 } 2474 2474 2475 2475 /* page migration callback function */ ··· 3018 3018 { 3019 3019 struct hugepage_subpool *spool = subpool_vma(vma); 3020 3020 struct hstate *h = hstate_vma(vma); 3021 - struct page *page; 3022 3021 struct folio *folio; 3023 3022 long map_chg, map_commit; 3024 3023 long gbl_chg; ··· 3081 3082 * from the global free pool (global change). gbl_chg == 0 indicates 3082 3083 * a reservation exists for the allocation. 3083 3084 */ 3084 - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 3085 - if (!page) { 3085 + folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); 3086 + if (!folio) { 3086 3087 spin_unlock_irq(&hugetlb_lock); 3087 - page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 3088 - if (!page) 3088 + folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3089 + if (!folio) 3089 3090 goto out_uncharge_cgroup; 3090 3091 spin_lock_irq(&hugetlb_lock); 3091 3092 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3092 - SetHPageRestoreReserve(page); 3093 + folio_set_hugetlb_restore_reserve(folio); 3093 3094 h->resv_huge_pages--; 3094 3095 } 3095 - list_add(&page->lru, &h->hugepage_activelist); 3096 - set_page_refcounted(page); 3096 + list_add(&folio->lru, &h->hugepage_activelist); 3097 + folio_ref_unfreeze(folio, 1); 3097 3098 /* Fall through */ 3098 3099 } 3099 - folio = page_folio(page); 3100 - hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 3100 + 3101 + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3101 3102 /* If allocation is not consuming a reservation, also store the 3102 3103 * hugetlb_cgroup pointer on the page. 3103 3104 */ 3104 3105 if (deferred_reserve) { 3105 3106 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3106 - h_cg, page); 3107 + h_cg, folio); 3107 3108 } 3108 3109 3109 3110 spin_unlock_irq(&hugetlb_lock); 3110 3111 3111 - hugetlb_set_page_subpool(page, spool); 3112 + hugetlb_set_folio_subpool(folio, spool); 3112 3113 3113 3114 map_commit = vma_commit_reservation(h, vma, addr); 3114 3115 if (unlikely(map_chg > map_commit)) { ··· 3129 3130 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3130 3131 pages_per_huge_page(h), folio); 3131 3132 } 3132 - return page; 3133 + return &folio->page; 3133 3134 3134 3135 out_uncharge_cgroup: 3135 3136 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
+2 -6
mm/hugetlb_cgroup.c
··· 331 331 332 332 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 333 333 struct hugetlb_cgroup *h_cg, 334 - struct page *page) 334 + struct folio *folio) 335 335 { 336 - struct folio *folio = page_folio(page); 337 - 338 336 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false); 339 337 } 340 338 341 339 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, 342 340 struct hugetlb_cgroup *h_cg, 343 - struct page *page) 341 + struct folio *folio) 344 342 { 345 - struct folio *folio = page_folio(page); 346 - 347 343 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true); 348 344 } 349 345