Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

madvise: convert madvise_free_pte_range() to use a folio

Saves a lot of calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-44-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
98b211d6 2fad3d14

+25 -24
+25 -24
mm/madvise.c
··· 601 601 struct vm_area_struct *vma = walk->vma; 602 602 spinlock_t *ptl; 603 603 pte_t *orig_pte, *pte, ptent; 604 + struct folio *folio; 604 605 struct page *page; 605 606 int nr_swap = 0; 606 607 unsigned long next; ··· 646 645 page = vm_normal_page(vma, addr, ptent); 647 646 if (!page || is_zone_device_page(page)) 648 647 continue; 648 + folio = page_folio(page); 649 649 650 650 /* 651 - * If pmd isn't transhuge but the page is THP and 651 + * If pmd isn't transhuge but the folio is large and 652 652 * is owned by only this process, split it and 653 653 * deactivate all pages. 654 654 */ 655 - if (PageTransCompound(page)) { 656 - if (page_mapcount(page) != 1) 655 + if (folio_test_large(folio)) { 656 + if (folio_mapcount(folio) != 1) 657 657 goto out; 658 - get_page(page); 659 - if (!trylock_page(page)) { 660 - put_page(page); 658 + folio_get(folio); 659 + if (!folio_trylock(folio)) { 660 + folio_put(folio); 661 661 goto out; 662 662 } 663 663 pte_unmap_unlock(orig_pte, ptl); 664 - if (split_huge_page(page)) { 665 - unlock_page(page); 666 - put_page(page); 664 + if (split_folio(folio)) { 665 + folio_unlock(folio); 666 + folio_put(folio); 667 667 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 668 668 goto out; 669 669 } 670 - unlock_page(page); 671 - put_page(page); 670 + folio_unlock(folio); 671 + folio_put(folio); 672 672 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 673 673 pte--; 674 674 addr -= PAGE_SIZE; 675 675 continue; 676 676 } 677 677 678 - VM_BUG_ON_PAGE(PageTransCompound(page), page); 679 - 680 - if (PageSwapCache(page) || PageDirty(page)) { 681 - if (!trylock_page(page)) 678 + if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 679 + if (!folio_trylock(folio)) 682 680 continue; 683 681 /* 684 - * If page is shared with others, we couldn't clear 685 - * PG_dirty of the page. 682 + * If folio is shared with others, we mustn't clear 683 + * the folio's dirty flag. 686 684 */ 687 - if (page_mapcount(page) != 1) { 688 - unlock_page(page); 685 + if (folio_mapcount(folio) != 1) { 686 + folio_unlock(folio); 689 687 continue; 690 688 } 691 689 692 - if (PageSwapCache(page) && !try_to_free_swap(page)) { 693 - unlock_page(page); 690 + if (folio_test_swapcache(folio) && 691 + !folio_free_swap(folio)) { 692 + folio_unlock(folio); 694 693 continue; 695 694 } 696 695 697 - ClearPageDirty(page); 698 - unlock_page(page); 696 + folio_clear_dirty(folio); 697 + folio_unlock(folio); 699 698 } 700 699 701 700 if (pte_young(ptent) || pte_dirty(ptent)) { ··· 713 712 set_pte_at(mm, addr, pte, ptent); 714 713 tlb_remove_tlb_entry(tlb, pte, addr); 715 714 } 716 - mark_page_lazyfree(page); 715 + mark_page_lazyfree(&folio->page); 717 716 } 718 717 out: 719 718 if (nr_swap) {