Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: hugetlb_vmemmap: convert page to folio

There are still some places where it does not be converted to folio, this
patch convert all of them to folio. And this patch also does some trival
cleanup to fix the code style problems.

Link: https://lkml.kernel.org/r/20231127084645.27017-5-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Muchun Song and committed by
Andrew Morton
ebc20dca be035a2a

+25 -26
+25 -26
mm/hugetlb_vmemmap.c
··· 280 280 * Return: %0 on success, negative error code otherwise. 281 281 */ 282 282 static int vmemmap_remap_split(unsigned long start, unsigned long end, 283 - unsigned long reuse) 283 + unsigned long reuse) 284 284 { 285 285 int ret; 286 286 struct vmemmap_remap_walk walk = { ··· 447 447 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); 448 448 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); 449 449 450 - static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags) 450 + static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, 451 + struct folio *folio, unsigned long flags) 451 452 { 452 453 int ret; 453 - struct page *head = &folio->page; 454 - unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; 454 + unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end; 455 455 unsigned long vmemmap_reuse; 456 456 457 - VM_WARN_ON_ONCE(!PageHuge(head)); 457 + VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio); 458 458 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 459 459 return 0; 460 460 ··· 517 517 list_for_each_entry_safe(folio, t_folio, folio_list, lru) { 518 518 if (folio_test_hugetlb_vmemmap_optimized(folio)) { 519 519 ret = __hugetlb_vmemmap_restore_folio(h, folio, 520 - VMEMMAP_REMAP_NO_TLB_FLUSH); 520 + VMEMMAP_REMAP_NO_TLB_FLUSH); 521 521 if (ret) 522 522 break; 523 523 restored++; ··· 535 535 } 536 536 537 537 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ 538 - static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) 538 + static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio) 539 539 { 540 - if (HPageVmemmapOptimized((struct page *)head)) 540 + if (folio_test_hugetlb_vmemmap_optimized(folio)) 541 541 return false; 542 542 543 543 if (!READ_ONCE(vmemmap_optimize_enabled)) ··· 550 550 } 551 551 552 552 static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h, 553 - struct folio *folio, 554 - struct list_head *vmemmap_pages, 555 - unsigned long flags) 553 + struct folio *folio, 554 + struct list_head *vmemmap_pages, 555 + unsigned long flags) 556 556 { 557 557 int ret = 0; 558 - struct page *head = &folio->page; 559 - unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; 558 + unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end; 560 559 unsigned long vmemmap_reuse; 561 560 562 - VM_WARN_ON_ONCE(!PageHuge(head)); 563 - if (!vmemmap_should_optimize(h, head)) 561 + VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio); 562 + if (!vmemmap_should_optimize_folio(h, folio)) 564 563 return ret; 565 564 566 565 static_branch_inc(&hugetlb_optimize_vmemmap_key); ··· 587 588 * the caller. 588 589 */ 589 590 ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse, 590 - vmemmap_pages, flags); 591 + vmemmap_pages, flags); 591 592 if (ret) { 592 593 static_branch_dec(&hugetlb_optimize_vmemmap_key); 593 594 folio_clear_hugetlb_vmemmap_optimized(folio); ··· 614 615 free_vmemmap_page_list(&vmemmap_pages); 615 616 } 616 617 617 - static int hugetlb_vmemmap_split(const struct hstate *h, struct page *head) 618 + static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio) 618 619 { 619 - unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; 620 + unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end; 620 621 unsigned long vmemmap_reuse; 621 622 622 - if (!vmemmap_should_optimize(h, head)) 623 + if (!vmemmap_should_optimize_folio(h, folio)) 623 624 return 0; 624 625 625 626 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); ··· 639 640 LIST_HEAD(vmemmap_pages); 640 641 641 642 list_for_each_entry(folio, folio_list, lru) { 642 - int ret = hugetlb_vmemmap_split(h, &folio->page); 643 + int ret = hugetlb_vmemmap_split_folio(h, folio); 643 644 644 645 /* 645 646 * Spliting the PMD requires allocating a page, thus lets fail ··· 654 655 flush_tlb_all(); 655 656 656 657 list_for_each_entry(folio, folio_list, lru) { 657 - int ret = __hugetlb_vmemmap_optimize_folio(h, folio, 658 - &vmemmap_pages, 659 - VMEMMAP_REMAP_NO_TLB_FLUSH); 658 + int ret; 659 + 660 + ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 661 + VMEMMAP_REMAP_NO_TLB_FLUSH); 660 662 661 663 /* 662 664 * Pages to be freed may have been accumulated. If we ··· 671 671 flush_tlb_all(); 672 672 free_vmemmap_page_list(&vmemmap_pages); 673 673 INIT_LIST_HEAD(&vmemmap_pages); 674 - __hugetlb_vmemmap_optimize_folio(h, folio, 675 - &vmemmap_pages, 676 - VMEMMAP_REMAP_NO_TLB_FLUSH); 674 + __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 675 + VMEMMAP_REMAP_NO_TLB_FLUSH); 677 676 } 678 677 } 679 678