mm: Make copy_huge_page() always available

Rewrite copy_huge_page() and move it into mm/util.c so it's always
available. Fixes an exposure of uninitialised memory on configurations
with HUGETLB and UFFD enabled and MIGRATION disabled.

Fixes: 8cc5fcbb5be8 ("mm, hugetlb: fix racy resv_huge_pages underflow on UFFDIO_COPY")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Matthew Wilcox (Oracle) and committed by Linus Torvalds 79789db0 efdb6720

+11 -53
-5
include/linux/migrate.h
··· 51 51 struct page *newpage, struct page *page); 52 52 extern int migrate_page_move_mapping(struct address_space *mapping, 53 53 struct page *newpage, struct page *page, int extra_count); 54 - extern void copy_huge_page(struct page *dst, struct page *src); 55 54 #else 56 55 57 56 static inline void putback_movable_pages(struct list_head *l) {} ··· 75 76 struct page *newpage, struct page *page) 76 77 { 77 78 return -ENOSYS; 78 - } 79 - 80 - static inline void copy_huge_page(struct page *dst, struct page *src) 81 - { 82 79 } 83 80 #endif /* CONFIG_MIGRATION */ 84 81
+1
include/linux/mm.h
··· 906 906 void put_pages_list(struct list_head *pages); 907 907 908 908 void split_page(struct page *page, unsigned int order); 909 + void copy_huge_page(struct page *dst, struct page *src); 909 910 910 911 /* 911 912 * Compound pages have a destructor function. Provide a
-48
mm/migrate.c
··· 537 537 } 538 538 539 539 /* 540 - * Gigantic pages are so large that we do not guarantee that page++ pointer 541 - * arithmetic will work across the entire page. We need something more 542 - * specialized. 543 - */ 544 - static void __copy_gigantic_page(struct page *dst, struct page *src, 545 - int nr_pages) 546 - { 547 - int i; 548 - struct page *dst_base = dst; 549 - struct page *src_base = src; 550 - 551 - for (i = 0; i < nr_pages; ) { 552 - cond_resched(); 553 - copy_highpage(dst, src); 554 - 555 - i++; 556 - dst = mem_map_next(dst, dst_base, i); 557 - src = mem_map_next(src, src_base, i); 558 - } 559 - } 560 - 561 - void copy_huge_page(struct page *dst, struct page *src) 562 - { 563 - int i; 564 - int nr_pages; 565 - 566 - if (PageHuge(src)) { 567 - /* hugetlbfs page */ 568 - struct hstate *h = page_hstate(src); 569 - nr_pages = pages_per_huge_page(h); 570 - 571 - if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 572 - __copy_gigantic_page(dst, src, nr_pages); 573 - return; 574 - } 575 - } else { 576 - /* thp page */ 577 - BUG_ON(!PageTransHuge(src)); 578 - nr_pages = thp_nr_pages(src); 579 - } 580 - 581 - for (i = 0; i < nr_pages; i++) { 582 - cond_resched(); 583 - copy_highpage(dst + i, src + i); 584 - } 585 - } 586 - 587 - /* 588 540 * Copy the page to its new location 589 541 */ 590 542 void migrate_page_states(struct page *newpage, struct page *page)
+10
mm/util.c
··· 731 731 } 732 732 EXPORT_SYMBOL_GPL(__page_mapcount); 733 733 734 + void copy_huge_page(struct page *dst, struct page *src) 735 + { 736 + unsigned i, nr = compound_nr(src); 737 + 738 + for (i = 0; i < nr; i++) { 739 + cond_resched(); 740 + copy_highpage(nth_page(dst, i), nth_page(src, i)); 741 + } 742 + } 743 + 734 744 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 735 745 int sysctl_overcommit_ratio __read_mostly = 50; 736 746 unsigned long sysctl_overcommit_kbytes __read_mostly;