Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/hugetlb: convert dequeue_hugetlb_page functions to folios

dequeue_huge_page_node_exact() is changed to dequeue_hugetlb_folio_node_
exact() and dequeue_huge_page_nodemask() is changed to dequeue_hugetlb_
folio_nodemask(). Update their callers to pass in a folio.

Link: https://lkml.kernel.org/r/20230113223057.173292-4-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Sidhartha Kumar and committed by
Andrew Morton
a36f1e90 6f6956cf

+30 -26
+30 -26
mm/hugetlb.c
··· 1282 1282 folio_set_hugetlb_freed(folio); 1283 1283 } 1284 1284 1285 - static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 1285 + static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1286 + int nid) 1286 1287 { 1287 - struct page *page; 1288 + struct folio *folio; 1288 1289 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1289 1290 1290 1291 lockdep_assert_held(&hugetlb_lock); 1291 - list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { 1292 - if (pin && !is_longterm_pinnable_page(page)) 1292 + list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1293 + if (pin && !folio_is_longterm_pinnable(folio)) 1293 1294 continue; 1294 1295 1295 - if (PageHWPoison(page)) 1296 + if (folio_test_hwpoison(folio)) 1296 1297 continue; 1297 1298 1298 - list_move(&page->lru, &h->hugepage_activelist); 1299 - set_page_refcounted(page); 1300 - ClearHPageFreed(page); 1299 + list_move(&folio->lru, &h->hugepage_activelist); 1300 + folio_ref_unfreeze(folio, 1); 1301 + folio_clear_hugetlb_freed(folio); 1301 1302 h->free_huge_pages--; 1302 1303 h->free_huge_pages_node[nid]--; 1303 - return page; 1304 + return folio; 1304 1305 } 1305 1306 1306 1307 return NULL; 1307 1308 } 1308 1309 1309 - static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 1310 - nodemask_t *nmask) 1310 + static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1311 + int nid, nodemask_t *nmask) 1311 1312 { 1312 1313 unsigned int cpuset_mems_cookie; 1313 1314 struct zonelist *zonelist; ··· 1321 1320 retry_cpuset: 1322 1321 cpuset_mems_cookie = read_mems_allowed_begin(); 1323 1322 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1324 - struct page *page; 1323 + struct folio *folio; 1325 1324 1326 1325 if (!cpuset_zone_allowed(zone, gfp_mask)) 1327 1326 continue; ··· 1333 1332 continue; 1334 1333 node = zone_to_nid(zone); 1335 1334 1336 - page = dequeue_huge_page_node_exact(h, node); 1337 - if (page) 1338 - return page; 1335 + folio = dequeue_hugetlb_folio_node_exact(h, node); 1336 + if (folio) 1337 + return folio; 1339 1338 } 1340 1339 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1341 1340 goto retry_cpuset; ··· 1353 1352 unsigned long address, int avoid_reserve, 1354 1353 long chg) 1355 1354 { 1356 - struct page *page = NULL; 1355 + struct folio *folio = NULL; 1357 1356 struct mempolicy *mpol; 1358 1357 gfp_t gfp_mask; 1359 1358 nodemask_t *nodemask; ··· 1375 1374 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1376 1375 1377 1376 if (mpol_is_preferred_many(mpol)) { 1378 - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1377 + folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1378 + nid, nodemask); 1379 1379 1380 1380 /* Fallback to all nodes if page==NULL */ 1381 1381 nodemask = NULL; 1382 1382 } 1383 1383 1384 - if (!page) 1385 - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1384 + if (!folio) 1385 + folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1386 + nid, nodemask); 1386 1387 1387 - if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 1388 - SetHPageRestoreReserve(page); 1388 + if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { 1389 + folio_set_hugetlb_restore_reserve(folio); 1389 1390 h->resv_huge_pages--; 1390 1391 } 1391 1392 1392 1393 mpol_cond_put(mpol); 1393 - return page; 1394 + return &folio->page; 1394 1395 1395 1396 err: 1396 1397 return NULL; ··· 2478 2475 { 2479 2476 spin_lock_irq(&hugetlb_lock); 2480 2477 if (available_huge_pages(h)) { 2481 - struct page *page; 2478 + struct folio *folio; 2482 2479 2483 - page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 2484 - if (page) { 2480 + folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2481 + preferred_nid, nmask); 2482 + if (folio) { 2485 2483 spin_unlock_irq(&hugetlb_lock); 2486 - return page; 2484 + return &folio->page; 2487 2485 } 2488 2486 } 2489 2487 spin_unlock_irq(&hugetlb_lock);