Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: hugetlb: cleanup using paeg_huge_active()

Now we have an easy access to hugepages' activeness, so existing helpers to
get the information can be cleaned up.

[akpm@linux-foundation.org: s/PageHugeActive/page_huge_active/]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Hugh Dickins <hughd@google.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Naoya Horiguchi and committed by
Linus Torvalds
7e1f049e bcc54222

+13 -40
-2
include/linux/hugetlb.h
··· 84 84 int dequeue_hwpoisoned_huge_page(struct page *page); 85 85 bool isolate_huge_page(struct page *page, struct list_head *list); 86 86 void putback_active_hugepage(struct page *page); 87 - bool is_hugepage_active(struct page *page); 88 87 void free_huge_page(struct page *page); 89 88 90 89 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE ··· 151 152 return false; 152 153 } 153 154 #define putback_active_hugepage(p) do {} while (0) 154 - #define is_hugepage_active(x) false 155 155 156 156 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 157 157 unsigned long address, unsigned long end, pgprot_t newprot)
+7
include/linux/page-flags.h
··· 470 470 #ifdef CONFIG_HUGETLB_PAGE 471 471 int PageHuge(struct page *page); 472 472 int PageHeadHuge(struct page *page); 473 + bool page_huge_active(struct page *page); 473 474 #else 474 475 TESTPAGEFLAG_FALSE(Huge) 475 476 TESTPAGEFLAG_FALSE(HeadHuge) 477 + 478 + static inline bool page_huge_active(struct page *page) 479 + { 480 + return 0; 481 + } 476 482 #endif 483 + 477 484 478 485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 479 486 /*
+5 -37
mm/hugetlb.c
··· 3896 3896 3897 3897 #ifdef CONFIG_MEMORY_FAILURE 3898 3898 3899 - /* Should be called in hugetlb_lock */ 3900 - static int is_hugepage_on_freelist(struct page *hpage) 3901 - { 3902 - struct page *page; 3903 - struct page *tmp; 3904 - struct hstate *h = page_hstate(hpage); 3905 - int nid = page_to_nid(hpage); 3906 - 3907 - list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 3908 - if (page == hpage) 3909 - return 1; 3910 - return 0; 3911 - } 3912 - 3913 3899 /* 3914 3900 * This function is called from memory failure code. 3915 3901 * Assume the caller holds page lock of the head page. ··· 3907 3921 int ret = -EBUSY; 3908 3922 3909 3923 spin_lock(&hugetlb_lock); 3910 - if (is_hugepage_on_freelist(hpage)) { 3924 + /* 3925 + * Just checking !page_huge_active is not enough, because that could be 3926 + * an isolated/hwpoisoned hugepage (which have >0 refcount). 3927 + */ 3928 + if (!page_huge_active(hpage) && !page_count(hpage)) { 3911 3929 /* 3912 3930 * Hwpoisoned hugepage isn't linked to activelist or freelist, 3913 3931 * but dangling hpage->lru can trigger list-debug warnings ··· 3954 3964 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 3955 3965 spin_unlock(&hugetlb_lock); 3956 3966 put_page(page); 3957 - } 3958 - 3959 - bool is_hugepage_active(struct page *page) 3960 - { 3961 - VM_BUG_ON_PAGE(!PageHuge(page), page); 3962 - /* 3963 - * This function can be called for a tail page because the caller, 3964 - * scan_movable_pages, scans through a given pfn-range which typically 3965 - * covers one memory block. In systems using gigantic hugepage (1GB 3966 - * for x86_64,) a hugepage is larger than a memory block, and we don't 3967 - * support migrating such large hugepages for now, so return false 3968 - * when called for tail pages. 3969 - */ 3970 - if (PageTail(page)) 3971 - return false; 3972 - /* 3973 - * Refcount of a hwpoisoned hugepages is 1, but they are not active, 3974 - * so we should return false for them. 3975 - */ 3976 - if (unlikely(PageHWPoison(page))) 3977 - return false; 3978 - return page_count(page) > 0; 3979 3967 }
+1 -1
mm/memory_hotplug.c
··· 1373 1373 if (PageLRU(page)) 1374 1374 return pfn; 1375 1375 if (PageHuge(page)) { 1376 - if (is_hugepage_active(page)) 1376 + if (page_huge_active(page)) 1377 1377 return pfn; 1378 1378 else 1379 1379 pfn = round_up(pfn + 1,