mm: khugepaged: recalculate min_free_kbytes after memory hotplug as expected by khugepaged

When memory is hotplug added or removed the min_free_kbytes should be
recalculated based on what is expected by khugepaged. Currently after
hotplug, min_free_kbytes will be set to a lower default and higher
default set when THP enabled is lost.

This change restores min_free_kbytes as expected for THP consumers.

[vijayb@linux.microsoft.com: v5]
Link: https://lkml.kernel.org/r/1601398153-5517-1-git-send-email-vijayb@linux.microsoft.com

Fixes: f000565adb77 ("thp: set recommended min free kbytes")
Signed-off-by: Vijay Balakrishna <vijayb@linux.microsoft.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Allen Pais <apais@microsoft.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: <stable@vger.kernel.org>
Link: https://lkml.kernel.org/r/1600305709-2319-2-git-send-email-vijayb@linux.microsoft.com
Link: https://lkml.kernel.org/r/1600204258-13683-1-git-send-email-vijayb@linux.microsoft.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Vijay Balakrishna and committed by Linus Torvalds 4aab2be0 8b7b2eb1

Changed files
+19 -2
include
linux
mm
+5
include/linux/khugepaged.h
··· 15 15 extern void __khugepaged_exit(struct mm_struct *mm); 16 16 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, 17 17 unsigned long vm_flags); 18 + extern void khugepaged_min_free_kbytes_update(void); 18 19 #ifdef CONFIG_SHMEM 19 20 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); 20 21 #else ··· 84 83 } 85 84 static inline void collapse_pte_mapped_thp(struct mm_struct *mm, 86 85 unsigned long addr) 86 + { 87 + } 88 + 89 + static inline void khugepaged_min_free_kbytes_update(void) 87 90 { 88 91 } 89 92 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+11 -2
mm/khugepaged.c
··· 56 56 #define CREATE_TRACE_POINTS 57 57 #include <trace/events/huge_memory.h> 58 58 59 + static struct task_struct *khugepaged_thread __read_mostly; 60 + static DEFINE_MUTEX(khugepaged_mutex); 61 + 59 62 /* default scan 8*512 pte (or vmas) every 30 second */ 60 63 static unsigned int khugepaged_pages_to_scan __read_mostly; 61 64 static unsigned int khugepaged_pages_collapsed; ··· 2307 2304 2308 2305 int start_stop_khugepaged(void) 2309 2306 { 2310 - static struct task_struct *khugepaged_thread __read_mostly; 2311 - static DEFINE_MUTEX(khugepaged_mutex); 2312 2307 int err = 0; 2313 2308 2314 2309 mutex_lock(&khugepaged_mutex); ··· 2332 2331 fail: 2333 2332 mutex_unlock(&khugepaged_mutex); 2334 2333 return err; 2334 + } 2335 + 2336 + void khugepaged_min_free_kbytes_update(void) 2337 + { 2338 + mutex_lock(&khugepaged_mutex); 2339 + if (khugepaged_enabled() && khugepaged_thread) 2340 + set_recommended_min_free_kbytes(); 2341 + mutex_unlock(&khugepaged_mutex); 2335 2342 }
+3
mm/page_alloc.c
··· 69 69 #include <linux/nmi.h> 70 70 #include <linux/psi.h> 71 71 #include <linux/padata.h> 72 + #include <linux/khugepaged.h> 72 73 73 74 #include <asm/sections.h> 74 75 #include <asm/tlbflush.h> ··· 7904 7903 setup_min_unmapped_ratio(); 7905 7904 setup_min_slab_ratio(); 7906 7905 #endif 7906 + 7907 + khugepaged_min_free_kbytes_update(); 7907 7908 7908 7909 return 0; 7909 7910 }