Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/hugetlb: convert __update_and_free_page() to folios

Change __update_and_free_page() to __update_and_free_hugetlb_folio() by
changing its callers to pass in a folio.

Link: https://lkml.kernel.org/r/20230113223057.173292-3-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Sidhartha Kumar and committed by
Andrew Morton
6f6956cf 6aa3a920

+6 -6
+6 -6
mm/hugetlb.c
··· 1698 1698 enqueue_hugetlb_folio(h, folio); 1699 1699 } 1700 1700 1701 - static void __update_and_free_page(struct hstate *h, struct page *page) 1701 + static void __update_and_free_hugetlb_folio(struct hstate *h, 1702 + struct folio *folio) 1702 1703 { 1703 1704 int i; 1704 - struct folio *folio = page_folio(page); 1705 1705 struct page *subpage; 1706 1706 1707 1707 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) ··· 1714 1714 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1715 1715 return; 1716 1716 1717 - if (hugetlb_vmemmap_restore(h, page)) { 1717 + if (hugetlb_vmemmap_restore(h, &folio->page)) { 1718 1718 spin_lock_irq(&hugetlb_lock); 1719 1719 /* 1720 1720 * If we cannot allocate vmemmap pages, just refuse to free the ··· 1750 1750 destroy_compound_gigantic_folio(folio, huge_page_order(h)); 1751 1751 free_gigantic_folio(folio, huge_page_order(h)); 1752 1752 } else { 1753 - __free_pages(page, huge_page_order(h)); 1753 + __free_pages(&folio->page, huge_page_order(h)); 1754 1754 } 1755 1755 } 1756 1756 ··· 1790 1790 */ 1791 1791 h = size_to_hstate(page_size(page)); 1792 1792 1793 - __update_and_free_page(h, page); 1793 + __update_and_free_hugetlb_folio(h, page_folio(page)); 1794 1794 1795 1795 cond_resched(); 1796 1796 } ··· 1807 1807 bool atomic) 1808 1808 { 1809 1809 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1810 - __update_and_free_page(h, &folio->page); 1810 + __update_and_free_hugetlb_folio(h, folio); 1811 1811 return; 1812 1812 } 1813 1813