Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/sched/mm.h>
7#include <linux/sched/coredump.h>
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/page_table_check.h>
20#include <linux/swapops.h>
21#include <linux/shmem_fs.h>
22
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
26#include "mm_slot.h"
27
28enum scan_result {
29 SCAN_FAIL,
30 SCAN_SUCCEED,
31 SCAN_PMD_NULL,
32 SCAN_PMD_NONE,
33 SCAN_PMD_MAPPED,
34 SCAN_EXCEED_NONE_PTE,
35 SCAN_EXCEED_SWAP_PTE,
36 SCAN_EXCEED_SHARED_PTE,
37 SCAN_PTE_NON_PRESENT,
38 SCAN_PTE_UFFD_WP,
39 SCAN_PTE_MAPPED_HUGEPAGE,
40 SCAN_PAGE_RO,
41 SCAN_LACK_REFERENCED_PAGE,
42 SCAN_PAGE_NULL,
43 SCAN_SCAN_ABORT,
44 SCAN_PAGE_COUNT,
45 SCAN_PAGE_LRU,
46 SCAN_PAGE_LOCK,
47 SCAN_PAGE_ANON,
48 SCAN_PAGE_COMPOUND,
49 SCAN_ANY_PROCESS,
50 SCAN_VMA_NULL,
51 SCAN_VMA_CHECK,
52 SCAN_ADDRESS_RANGE,
53 SCAN_DEL_PAGE_LRU,
54 SCAN_ALLOC_HUGE_PAGE_FAIL,
55 SCAN_CGROUP_CHARGE_FAIL,
56 SCAN_TRUNCATED,
57 SCAN_PAGE_HAS_PRIVATE,
58};
59
60#define CREATE_TRACE_POINTS
61#include <trace/events/huge_memory.h>
62
63static struct task_struct *khugepaged_thread __read_mostly;
64static DEFINE_MUTEX(khugepaged_mutex);
65
66/* default scan 8*512 pte (or vmas) every 30 second */
67static unsigned int khugepaged_pages_to_scan __read_mostly;
68static unsigned int khugepaged_pages_collapsed;
69static unsigned int khugepaged_full_scans;
70static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
71/* during fragmentation poll the hugepage allocator once every minute */
72static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
73static unsigned long khugepaged_sleep_expire;
74static DEFINE_SPINLOCK(khugepaged_mm_lock);
75static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
76/*
77 * default collapse hugepages if there is at least one pte mapped like
78 * it would have happened if the vma was large enough during page
79 * fault.
80 *
81 * Note that these are only respected if collapse was initiated by khugepaged.
82 */
83static unsigned int khugepaged_max_ptes_none __read_mostly;
84static unsigned int khugepaged_max_ptes_swap __read_mostly;
85static unsigned int khugepaged_max_ptes_shared __read_mostly;
86
87#define MM_SLOTS_HASH_BITS 10
88static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
89
90static struct kmem_cache *mm_slot_cache __read_mostly;
91
92#define MAX_PTE_MAPPED_THP 8
93
94struct collapse_control {
95 bool is_khugepaged;
96
97 /* Num pages scanned per node */
98 u32 node_load[MAX_NUMNODES];
99
100 /* nodemask for allocation fallback */
101 nodemask_t alloc_nmask;
102};
103
104/**
105 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
106 * @slot: hash lookup from mm to mm_slot
107 * @nr_pte_mapped_thp: number of pte mapped THP
108 * @pte_mapped_thp: address array corresponding pte mapped THP
109 */
110struct khugepaged_mm_slot {
111 struct mm_slot slot;
112
113 /* pte-mapped THP in this mm */
114 int nr_pte_mapped_thp;
115 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
116};
117
118/**
119 * struct khugepaged_scan - cursor for scanning
120 * @mm_head: the head of the mm list to scan
121 * @mm_slot: the current mm_slot we are scanning
122 * @address: the next address inside that to be scanned
123 *
124 * There is only the one khugepaged_scan instance of this cursor structure.
125 */
126struct khugepaged_scan {
127 struct list_head mm_head;
128 struct khugepaged_mm_slot *mm_slot;
129 unsigned long address;
130};
131
132static struct khugepaged_scan khugepaged_scan = {
133 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
134};
135
136#ifdef CONFIG_SYSFS
137static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
138 struct kobj_attribute *attr,
139 char *buf)
140{
141 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
142}
143
144static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
145 struct kobj_attribute *attr,
146 const char *buf, size_t count)
147{
148 unsigned int msecs;
149 int err;
150
151 err = kstrtouint(buf, 10, &msecs);
152 if (err)
153 return -EINVAL;
154
155 khugepaged_scan_sleep_millisecs = msecs;
156 khugepaged_sleep_expire = 0;
157 wake_up_interruptible(&khugepaged_wait);
158
159 return count;
160}
161static struct kobj_attribute scan_sleep_millisecs_attr =
162 __ATTR_RW(scan_sleep_millisecs);
163
164static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
165 struct kobj_attribute *attr,
166 char *buf)
167{
168 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
169}
170
171static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
174{
175 unsigned int msecs;
176 int err;
177
178 err = kstrtouint(buf, 10, &msecs);
179 if (err)
180 return -EINVAL;
181
182 khugepaged_alloc_sleep_millisecs = msecs;
183 khugepaged_sleep_expire = 0;
184 wake_up_interruptible(&khugepaged_wait);
185
186 return count;
187}
188static struct kobj_attribute alloc_sleep_millisecs_attr =
189 __ATTR_RW(alloc_sleep_millisecs);
190
191static ssize_t pages_to_scan_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
193 char *buf)
194{
195 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
196}
197static ssize_t pages_to_scan_store(struct kobject *kobj,
198 struct kobj_attribute *attr,
199 const char *buf, size_t count)
200{
201 unsigned int pages;
202 int err;
203
204 err = kstrtouint(buf, 10, &pages);
205 if (err || !pages)
206 return -EINVAL;
207
208 khugepaged_pages_to_scan = pages;
209
210 return count;
211}
212static struct kobj_attribute pages_to_scan_attr =
213 __ATTR_RW(pages_to_scan);
214
215static ssize_t pages_collapsed_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
219 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
220}
221static struct kobj_attribute pages_collapsed_attr =
222 __ATTR_RO(pages_collapsed);
223
224static ssize_t full_scans_show(struct kobject *kobj,
225 struct kobj_attribute *attr,
226 char *buf)
227{
228 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
229}
230static struct kobj_attribute full_scans_attr =
231 __ATTR_RO(full_scans);
232
233static ssize_t defrag_show(struct kobject *kobj,
234 struct kobj_attribute *attr, char *buf)
235{
236 return single_hugepage_flag_show(kobj, attr, buf,
237 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
238}
239static ssize_t defrag_store(struct kobject *kobj,
240 struct kobj_attribute *attr,
241 const char *buf, size_t count)
242{
243 return single_hugepage_flag_store(kobj, attr, buf, count,
244 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
245}
246static struct kobj_attribute khugepaged_defrag_attr =
247 __ATTR_RW(defrag);
248
249/*
250 * max_ptes_none controls if khugepaged should collapse hugepages over
251 * any unmapped ptes in turn potentially increasing the memory
252 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
253 * reduce the available free memory in the system as it
254 * runs. Increasing max_ptes_none will instead potentially reduce the
255 * free memory in the system during the khugepaged scan.
256 */
257static ssize_t max_ptes_none_show(struct kobject *kobj,
258 struct kobj_attribute *attr,
259 char *buf)
260{
261 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
262}
263static ssize_t max_ptes_none_store(struct kobject *kobj,
264 struct kobj_attribute *attr,
265 const char *buf, size_t count)
266{
267 int err;
268 unsigned long max_ptes_none;
269
270 err = kstrtoul(buf, 10, &max_ptes_none);
271 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
272 return -EINVAL;
273
274 khugepaged_max_ptes_none = max_ptes_none;
275
276 return count;
277}
278static struct kobj_attribute khugepaged_max_ptes_none_attr =
279 __ATTR_RW(max_ptes_none);
280
281static ssize_t max_ptes_swap_show(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 char *buf)
284{
285 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
286}
287
288static ssize_t max_ptes_swap_store(struct kobject *kobj,
289 struct kobj_attribute *attr,
290 const char *buf, size_t count)
291{
292 int err;
293 unsigned long max_ptes_swap;
294
295 err = kstrtoul(buf, 10, &max_ptes_swap);
296 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
297 return -EINVAL;
298
299 khugepaged_max_ptes_swap = max_ptes_swap;
300
301 return count;
302}
303
304static struct kobj_attribute khugepaged_max_ptes_swap_attr =
305 __ATTR_RW(max_ptes_swap);
306
307static ssize_t max_ptes_shared_show(struct kobject *kobj,
308 struct kobj_attribute *attr,
309 char *buf)
310{
311 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
312}
313
314static ssize_t max_ptes_shared_store(struct kobject *kobj,
315 struct kobj_attribute *attr,
316 const char *buf, size_t count)
317{
318 int err;
319 unsigned long max_ptes_shared;
320
321 err = kstrtoul(buf, 10, &max_ptes_shared);
322 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
323 return -EINVAL;
324
325 khugepaged_max_ptes_shared = max_ptes_shared;
326
327 return count;
328}
329
330static struct kobj_attribute khugepaged_max_ptes_shared_attr =
331 __ATTR_RW(max_ptes_shared);
332
333static struct attribute *khugepaged_attr[] = {
334 &khugepaged_defrag_attr.attr,
335 &khugepaged_max_ptes_none_attr.attr,
336 &khugepaged_max_ptes_swap_attr.attr,
337 &khugepaged_max_ptes_shared_attr.attr,
338 &pages_to_scan_attr.attr,
339 &pages_collapsed_attr.attr,
340 &full_scans_attr.attr,
341 &scan_sleep_millisecs_attr.attr,
342 &alloc_sleep_millisecs_attr.attr,
343 NULL,
344};
345
346struct attribute_group khugepaged_attr_group = {
347 .attrs = khugepaged_attr,
348 .name = "khugepaged",
349};
350#endif /* CONFIG_SYSFS */
351
352int hugepage_madvise(struct vm_area_struct *vma,
353 unsigned long *vm_flags, int advice)
354{
355 switch (advice) {
356 case MADV_HUGEPAGE:
357#ifdef CONFIG_S390
358 /*
359 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
360 * can't handle this properly after s390_enable_sie, so we simply
361 * ignore the madvise to prevent qemu from causing a SIGSEGV.
362 */
363 if (mm_has_pgste(vma->vm_mm))
364 return 0;
365#endif
366 *vm_flags &= ~VM_NOHUGEPAGE;
367 *vm_flags |= VM_HUGEPAGE;
368 /*
369 * If the vma become good for khugepaged to scan,
370 * register it here without waiting a page fault that
371 * may not happen any time soon.
372 */
373 khugepaged_enter_vma(vma, *vm_flags);
374 break;
375 case MADV_NOHUGEPAGE:
376 *vm_flags &= ~VM_HUGEPAGE;
377 *vm_flags |= VM_NOHUGEPAGE;
378 /*
379 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
380 * this vma even if we leave the mm registered in khugepaged if
381 * it got registered before VM_NOHUGEPAGE was set.
382 */
383 break;
384 }
385
386 return 0;
387}
388
389int __init khugepaged_init(void)
390{
391 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
392 sizeof(struct khugepaged_mm_slot),
393 __alignof__(struct khugepaged_mm_slot),
394 0, NULL);
395 if (!mm_slot_cache)
396 return -ENOMEM;
397
398 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
399 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
400 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
401 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
402
403 return 0;
404}
405
406void __init khugepaged_destroy(void)
407{
408 kmem_cache_destroy(mm_slot_cache);
409}
410
411static inline int hpage_collapse_test_exit(struct mm_struct *mm)
412{
413 return atomic_read(&mm->mm_users) == 0;
414}
415
416void __khugepaged_enter(struct mm_struct *mm)
417{
418 struct khugepaged_mm_slot *mm_slot;
419 struct mm_slot *slot;
420 int wakeup;
421
422 mm_slot = mm_slot_alloc(mm_slot_cache);
423 if (!mm_slot)
424 return;
425
426 slot = &mm_slot->slot;
427
428 /* __khugepaged_exit() must not run from under us */
429 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
430 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
431 mm_slot_free(mm_slot_cache, mm_slot);
432 return;
433 }
434
435 spin_lock(&khugepaged_mm_lock);
436 mm_slot_insert(mm_slots_hash, mm, slot);
437 /*
438 * Insert just behind the scanning cursor, to let the area settle
439 * down a little.
440 */
441 wakeup = list_empty(&khugepaged_scan.mm_head);
442 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
443 spin_unlock(&khugepaged_mm_lock);
444
445 mmgrab(mm);
446 if (wakeup)
447 wake_up_interruptible(&khugepaged_wait);
448}
449
450void khugepaged_enter_vma(struct vm_area_struct *vma,
451 unsigned long vm_flags)
452{
453 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
454 hugepage_flags_enabled()) {
455 if (hugepage_vma_check(vma, vm_flags, false, false, true))
456 __khugepaged_enter(vma->vm_mm);
457 }
458}
459
460void __khugepaged_exit(struct mm_struct *mm)
461{
462 struct khugepaged_mm_slot *mm_slot;
463 struct mm_slot *slot;
464 int free = 0;
465
466 spin_lock(&khugepaged_mm_lock);
467 slot = mm_slot_lookup(mm_slots_hash, mm);
468 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
469 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
470 hash_del(&slot->hash);
471 list_del(&slot->mm_node);
472 free = 1;
473 }
474 spin_unlock(&khugepaged_mm_lock);
475
476 if (free) {
477 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
478 mm_slot_free(mm_slot_cache, mm_slot);
479 mmdrop(mm);
480 } else if (mm_slot) {
481 /*
482 * This is required to serialize against
483 * hpage_collapse_test_exit() (which is guaranteed to run
484 * under mmap sem read mode). Stop here (after we return all
485 * pagetables will be destroyed) until khugepaged has finished
486 * working on the pagetables under the mmap_lock.
487 */
488 mmap_write_lock(mm);
489 mmap_write_unlock(mm);
490 }
491}
492
493static void release_pte_page(struct page *page)
494{
495 mod_node_page_state(page_pgdat(page),
496 NR_ISOLATED_ANON + page_is_file_lru(page),
497 -compound_nr(page));
498 unlock_page(page);
499 putback_lru_page(page);
500}
501
502static void release_pte_pages(pte_t *pte, pte_t *_pte,
503 struct list_head *compound_pagelist)
504{
505 struct page *page, *tmp;
506
507 while (--_pte >= pte) {
508 pte_t pteval = *_pte;
509
510 page = pte_page(pteval);
511 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
512 !PageCompound(page))
513 release_pte_page(page);
514 }
515
516 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
517 list_del(&page->lru);
518 release_pte_page(page);
519 }
520}
521
522static bool is_refcount_suitable(struct page *page)
523{
524 int expected_refcount;
525
526 expected_refcount = total_mapcount(page);
527 if (PageSwapCache(page))
528 expected_refcount += compound_nr(page);
529
530 return page_count(page) == expected_refcount;
531}
532
533static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
534 unsigned long address,
535 pte_t *pte,
536 struct collapse_control *cc,
537 struct list_head *compound_pagelist)
538{
539 struct page *page = NULL;
540 pte_t *_pte;
541 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
542 bool writable = false;
543
544 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
545 _pte++, address += PAGE_SIZE) {
546 pte_t pteval = *_pte;
547 if (pte_none(pteval) || (pte_present(pteval) &&
548 is_zero_pfn(pte_pfn(pteval)))) {
549 ++none_or_zero;
550 if (!userfaultfd_armed(vma) &&
551 (!cc->is_khugepaged ||
552 none_or_zero <= khugepaged_max_ptes_none)) {
553 continue;
554 } else {
555 result = SCAN_EXCEED_NONE_PTE;
556 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
557 goto out;
558 }
559 }
560 if (!pte_present(pteval)) {
561 result = SCAN_PTE_NON_PRESENT;
562 goto out;
563 }
564 page = vm_normal_page(vma, address, pteval);
565 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
566 result = SCAN_PAGE_NULL;
567 goto out;
568 }
569
570 VM_BUG_ON_PAGE(!PageAnon(page), page);
571
572 if (page_mapcount(page) > 1) {
573 ++shared;
574 if (cc->is_khugepaged &&
575 shared > khugepaged_max_ptes_shared) {
576 result = SCAN_EXCEED_SHARED_PTE;
577 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
578 goto out;
579 }
580 }
581
582 if (PageCompound(page)) {
583 struct page *p;
584 page = compound_head(page);
585
586 /*
587 * Check if we have dealt with the compound page
588 * already
589 */
590 list_for_each_entry(p, compound_pagelist, lru) {
591 if (page == p)
592 goto next;
593 }
594 }
595
596 /*
597 * We can do it before isolate_lru_page because the
598 * page can't be freed from under us. NOTE: PG_lock
599 * is needed to serialize against split_huge_page
600 * when invoked from the VM.
601 */
602 if (!trylock_page(page)) {
603 result = SCAN_PAGE_LOCK;
604 goto out;
605 }
606
607 /*
608 * Check if the page has any GUP (or other external) pins.
609 *
610 * The page table that maps the page has been already unlinked
611 * from the page table tree and this process cannot get
612 * an additional pin on the page.
613 *
614 * New pins can come later if the page is shared across fork,
615 * but not from this process. The other process cannot write to
616 * the page, only trigger CoW.
617 */
618 if (!is_refcount_suitable(page)) {
619 unlock_page(page);
620 result = SCAN_PAGE_COUNT;
621 goto out;
622 }
623
624 /*
625 * Isolate the page to avoid collapsing an hugepage
626 * currently in use by the VM.
627 */
628 if (isolate_lru_page(page)) {
629 unlock_page(page);
630 result = SCAN_DEL_PAGE_LRU;
631 goto out;
632 }
633 mod_node_page_state(page_pgdat(page),
634 NR_ISOLATED_ANON + page_is_file_lru(page),
635 compound_nr(page));
636 VM_BUG_ON_PAGE(!PageLocked(page), page);
637 VM_BUG_ON_PAGE(PageLRU(page), page);
638
639 if (PageCompound(page))
640 list_add_tail(&page->lru, compound_pagelist);
641next:
642 /*
643 * If collapse was initiated by khugepaged, check that there is
644 * enough young pte to justify collapsing the page
645 */
646 if (cc->is_khugepaged &&
647 (pte_young(pteval) || page_is_young(page) ||
648 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
649 address)))
650 referenced++;
651
652 if (pte_write(pteval))
653 writable = true;
654 }
655
656 if (unlikely(!writable)) {
657 result = SCAN_PAGE_RO;
658 } else if (unlikely(cc->is_khugepaged && !referenced)) {
659 result = SCAN_LACK_REFERENCED_PAGE;
660 } else {
661 result = SCAN_SUCCEED;
662 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
663 referenced, writable, result);
664 return result;
665 }
666out:
667 release_pte_pages(pte, _pte, compound_pagelist);
668 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
669 referenced, writable, result);
670 return result;
671}
672
673static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
674 struct vm_area_struct *vma,
675 unsigned long address,
676 spinlock_t *ptl,
677 struct list_head *compound_pagelist)
678{
679 struct page *src_page, *tmp;
680 pte_t *_pte;
681 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
682 _pte++, page++, address += PAGE_SIZE) {
683 pte_t pteval = *_pte;
684
685 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
686 clear_user_highpage(page, address);
687 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
688 if (is_zero_pfn(pte_pfn(pteval))) {
689 /*
690 * ptl mostly unnecessary.
691 */
692 spin_lock(ptl);
693 ptep_clear(vma->vm_mm, address, _pte);
694 spin_unlock(ptl);
695 }
696 } else {
697 src_page = pte_page(pteval);
698 copy_user_highpage(page, src_page, address, vma);
699 if (!PageCompound(src_page))
700 release_pte_page(src_page);
701 /*
702 * ptl mostly unnecessary, but preempt has to
703 * be disabled to update the per-cpu stats
704 * inside page_remove_rmap().
705 */
706 spin_lock(ptl);
707 ptep_clear(vma->vm_mm, address, _pte);
708 page_remove_rmap(src_page, vma, false);
709 spin_unlock(ptl);
710 free_page_and_swap_cache(src_page);
711 }
712 }
713
714 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
715 list_del(&src_page->lru);
716 mod_node_page_state(page_pgdat(src_page),
717 NR_ISOLATED_ANON + page_is_file_lru(src_page),
718 -compound_nr(src_page));
719 unlock_page(src_page);
720 free_swap_cache(src_page);
721 putback_lru_page(src_page);
722 }
723}
724
725static void khugepaged_alloc_sleep(void)
726{
727 DEFINE_WAIT(wait);
728
729 add_wait_queue(&khugepaged_wait, &wait);
730 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
731 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
732 remove_wait_queue(&khugepaged_wait, &wait);
733}
734
735struct collapse_control khugepaged_collapse_control = {
736 .is_khugepaged = true,
737};
738
739static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
740{
741 int i;
742
743 /*
744 * If node_reclaim_mode is disabled, then no extra effort is made to
745 * allocate memory locally.
746 */
747 if (!node_reclaim_enabled())
748 return false;
749
750 /* If there is a count for this node already, it must be acceptable */
751 if (cc->node_load[nid])
752 return false;
753
754 for (i = 0; i < MAX_NUMNODES; i++) {
755 if (!cc->node_load[i])
756 continue;
757 if (node_distance(nid, i) > node_reclaim_distance)
758 return true;
759 }
760 return false;
761}
762
763#define khugepaged_defrag() \
764 (transparent_hugepage_flags & \
765 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
766
767/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
768static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
769{
770 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
771}
772
773#ifdef CONFIG_NUMA
774static int hpage_collapse_find_target_node(struct collapse_control *cc)
775{
776 int nid, target_node = 0, max_value = 0;
777
778 /* find first node with max normal pages hit */
779 for (nid = 0; nid < MAX_NUMNODES; nid++)
780 if (cc->node_load[nid] > max_value) {
781 max_value = cc->node_load[nid];
782 target_node = nid;
783 }
784
785 for_each_online_node(nid) {
786 if (max_value == cc->node_load[nid])
787 node_set(nid, cc->alloc_nmask);
788 }
789
790 return target_node;
791}
792#else
793static int hpage_collapse_find_target_node(struct collapse_control *cc)
794{
795 return 0;
796}
797#endif
798
799static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
800 nodemask_t *nmask)
801{
802 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
803 if (unlikely(!*hpage)) {
804 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
805 return false;
806 }
807
808 prep_transhuge_page(*hpage);
809 count_vm_event(THP_COLLAPSE_ALLOC);
810 return true;
811}
812
813/*
814 * If mmap_lock temporarily dropped, revalidate vma
815 * before taking mmap_lock.
816 * Returns enum scan_result value.
817 */
818
819static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
820 bool expect_anon,
821 struct vm_area_struct **vmap,
822 struct collapse_control *cc)
823{
824 struct vm_area_struct *vma;
825
826 if (unlikely(hpage_collapse_test_exit(mm)))
827 return SCAN_ANY_PROCESS;
828
829 *vmap = vma = find_vma(mm, address);
830 if (!vma)
831 return SCAN_VMA_NULL;
832
833 if (!transhuge_vma_suitable(vma, address))
834 return SCAN_ADDRESS_RANGE;
835 if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
836 cc->is_khugepaged))
837 return SCAN_VMA_CHECK;
838 /*
839 * Anon VMA expected, the address may be unmapped then
840 * remapped to file after khugepaged reaquired the mmap_lock.
841 *
842 * hugepage_vma_check may return true for qualified file
843 * vmas.
844 */
845 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
846 return SCAN_PAGE_ANON;
847 return SCAN_SUCCEED;
848}
849
850static int find_pmd_or_thp_or_none(struct mm_struct *mm,
851 unsigned long address,
852 pmd_t **pmd)
853{
854 pmd_t pmde;
855
856 *pmd = mm_find_pmd(mm, address);
857 if (!*pmd)
858 return SCAN_PMD_NULL;
859
860 pmde = pmdp_get_lockless(*pmd);
861
862#ifdef CONFIG_TRANSPARENT_HUGEPAGE
863 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
864 barrier();
865#endif
866 if (pmd_none(pmde))
867 return SCAN_PMD_NONE;
868 if (pmd_trans_huge(pmde))
869 return SCAN_PMD_MAPPED;
870 if (pmd_bad(pmde))
871 return SCAN_PMD_NULL;
872 return SCAN_SUCCEED;
873}
874
875static int check_pmd_still_valid(struct mm_struct *mm,
876 unsigned long address,
877 pmd_t *pmd)
878{
879 pmd_t *new_pmd;
880 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
881
882 if (result != SCAN_SUCCEED)
883 return result;
884 if (new_pmd != pmd)
885 return SCAN_FAIL;
886 return SCAN_SUCCEED;
887}
888
889/*
890 * Bring missing pages in from swap, to complete THP collapse.
891 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
892 *
893 * Called and returns without pte mapped or spinlocks held.
894 * Note that if false is returned, mmap_lock will be released.
895 */
896
897static int __collapse_huge_page_swapin(struct mm_struct *mm,
898 struct vm_area_struct *vma,
899 unsigned long haddr, pmd_t *pmd,
900 int referenced)
901{
902 int swapped_in = 0;
903 vm_fault_t ret = 0;
904 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
905
906 for (address = haddr; address < end; address += PAGE_SIZE) {
907 struct vm_fault vmf = {
908 .vma = vma,
909 .address = address,
910 .pgoff = linear_page_index(vma, haddr),
911 .flags = FAULT_FLAG_ALLOW_RETRY,
912 .pmd = pmd,
913 };
914
915 vmf.pte = pte_offset_map(pmd, address);
916 vmf.orig_pte = *vmf.pte;
917 if (!is_swap_pte(vmf.orig_pte)) {
918 pte_unmap(vmf.pte);
919 continue;
920 }
921 ret = do_swap_page(&vmf);
922
923 /*
924 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
925 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
926 * we do not retry here and swap entry will remain in pagetable
927 * resulting in later failure.
928 */
929 if (ret & VM_FAULT_RETRY) {
930 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
931 /* Likely, but not guaranteed, that page lock failed */
932 return SCAN_PAGE_LOCK;
933 }
934 if (ret & VM_FAULT_ERROR) {
935 mmap_read_unlock(mm);
936 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
937 return SCAN_FAIL;
938 }
939 swapped_in++;
940 }
941
942 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
943 if (swapped_in)
944 lru_add_drain();
945
946 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
947 return SCAN_SUCCEED;
948}
949
950static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
951 struct collapse_control *cc)
952{
953 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
954 GFP_TRANSHUGE);
955 int node = hpage_collapse_find_target_node(cc);
956
957 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
958 return SCAN_ALLOC_HUGE_PAGE_FAIL;
959 if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
960 return SCAN_CGROUP_CHARGE_FAIL;
961 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
962 return SCAN_SUCCEED;
963}
964
965static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
966 int referenced, int unmapped,
967 struct collapse_control *cc)
968{
969 LIST_HEAD(compound_pagelist);
970 pmd_t *pmd, _pmd;
971 pte_t *pte;
972 pgtable_t pgtable;
973 struct page *hpage;
974 spinlock_t *pmd_ptl, *pte_ptl;
975 int result = SCAN_FAIL;
976 struct vm_area_struct *vma;
977 struct mmu_notifier_range range;
978
979 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
980
981 /*
982 * Before allocating the hugepage, release the mmap_lock read lock.
983 * The allocation can take potentially a long time if it involves
984 * sync compaction, and we do not need to hold the mmap_lock during
985 * that. We will recheck the vma after taking it again in write mode.
986 */
987 mmap_read_unlock(mm);
988
989 result = alloc_charge_hpage(&hpage, mm, cc);
990 if (result != SCAN_SUCCEED)
991 goto out_nolock;
992
993 mmap_read_lock(mm);
994 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
995 if (result != SCAN_SUCCEED) {
996 mmap_read_unlock(mm);
997 goto out_nolock;
998 }
999
1000 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1001 if (result != SCAN_SUCCEED) {
1002 mmap_read_unlock(mm);
1003 goto out_nolock;
1004 }
1005
1006 if (unmapped) {
1007 /*
1008 * __collapse_huge_page_swapin will return with mmap_lock
1009 * released when it fails. So we jump out_nolock directly in
1010 * that case. Continuing to collapse causes inconsistency.
1011 */
1012 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1013 referenced);
1014 if (result != SCAN_SUCCEED)
1015 goto out_nolock;
1016 }
1017
1018 mmap_read_unlock(mm);
1019 /*
1020 * Prevent all access to pagetables with the exception of
1021 * gup_fast later handled by the ptep_clear_flush and the VM
1022 * handled by the anon_vma lock + PG_lock.
1023 */
1024 mmap_write_lock(mm);
1025 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1026 if (result != SCAN_SUCCEED)
1027 goto out_up_write;
1028 /* check if the pmd is still valid */
1029 result = check_pmd_still_valid(mm, address, pmd);
1030 if (result != SCAN_SUCCEED)
1031 goto out_up_write;
1032
1033 anon_vma_lock_write(vma->anon_vma);
1034
1035 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1036 address, address + HPAGE_PMD_SIZE);
1037 mmu_notifier_invalidate_range_start(&range);
1038
1039 pte = pte_offset_map(pmd, address);
1040 pte_ptl = pte_lockptr(mm, pmd);
1041
1042 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1043 /*
1044 * This removes any huge TLB entry from the CPU so we won't allow
1045 * huge and small TLB entries for the same virtual address to
1046 * avoid the risk of CPU bugs in that area.
1047 *
1048 * Parallel fast GUP is fine since fast GUP will back off when
1049 * it detects PMD is changed.
1050 */
1051 _pmd = pmdp_collapse_flush(vma, address, pmd);
1052 spin_unlock(pmd_ptl);
1053 mmu_notifier_invalidate_range_end(&range);
1054 tlb_remove_table_sync_one();
1055
1056 spin_lock(pte_ptl);
1057 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1058 &compound_pagelist);
1059 spin_unlock(pte_ptl);
1060
1061 if (unlikely(result != SCAN_SUCCEED)) {
1062 pte_unmap(pte);
1063 spin_lock(pmd_ptl);
1064 BUG_ON(!pmd_none(*pmd));
1065 /*
1066 * We can only use set_pmd_at when establishing
1067 * hugepmds and never for establishing regular pmds that
1068 * points to regular pagetables. Use pmd_populate for that
1069 */
1070 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1071 spin_unlock(pmd_ptl);
1072 anon_vma_unlock_write(vma->anon_vma);
1073 goto out_up_write;
1074 }
1075
1076 /*
1077 * All pages are isolated and locked so anon_vma rmap
1078 * can't run anymore.
1079 */
1080 anon_vma_unlock_write(vma->anon_vma);
1081
1082 __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
1083 &compound_pagelist);
1084 pte_unmap(pte);
1085 /*
1086 * spin_lock() below is not the equivalent of smp_wmb(), but
1087 * the smp_wmb() inside __SetPageUptodate() can be reused to
1088 * avoid the copy_huge_page writes to become visible after
1089 * the set_pmd_at() write.
1090 */
1091 __SetPageUptodate(hpage);
1092 pgtable = pmd_pgtable(_pmd);
1093
1094 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1095 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1096
1097 spin_lock(pmd_ptl);
1098 BUG_ON(!pmd_none(*pmd));
1099 page_add_new_anon_rmap(hpage, vma, address);
1100 lru_cache_add_inactive_or_unevictable(hpage, vma);
1101 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1102 set_pmd_at(mm, address, pmd, _pmd);
1103 update_mmu_cache_pmd(vma, address, pmd);
1104 spin_unlock(pmd_ptl);
1105
1106 hpage = NULL;
1107
1108 result = SCAN_SUCCEED;
1109out_up_write:
1110 mmap_write_unlock(mm);
1111out_nolock:
1112 if (hpage) {
1113 mem_cgroup_uncharge(page_folio(hpage));
1114 put_page(hpage);
1115 }
1116 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1117 return result;
1118}
1119
1120static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1121 struct vm_area_struct *vma,
1122 unsigned long address, bool *mmap_locked,
1123 struct collapse_control *cc)
1124{
1125 pmd_t *pmd;
1126 pte_t *pte, *_pte;
1127 int result = SCAN_FAIL, referenced = 0;
1128 int none_or_zero = 0, shared = 0;
1129 struct page *page = NULL;
1130 unsigned long _address;
1131 spinlock_t *ptl;
1132 int node = NUMA_NO_NODE, unmapped = 0;
1133 bool writable = false;
1134
1135 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1136
1137 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1138 if (result != SCAN_SUCCEED)
1139 goto out;
1140
1141 memset(cc->node_load, 0, sizeof(cc->node_load));
1142 nodes_clear(cc->alloc_nmask);
1143 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1144 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1145 _pte++, _address += PAGE_SIZE) {
1146 pte_t pteval = *_pte;
1147 if (is_swap_pte(pteval)) {
1148 ++unmapped;
1149 if (!cc->is_khugepaged ||
1150 unmapped <= khugepaged_max_ptes_swap) {
1151 /*
1152 * Always be strict with uffd-wp
1153 * enabled swap entries. Please see
1154 * comment below for pte_uffd_wp().
1155 */
1156 if (pte_swp_uffd_wp(pteval)) {
1157 result = SCAN_PTE_UFFD_WP;
1158 goto out_unmap;
1159 }
1160 continue;
1161 } else {
1162 result = SCAN_EXCEED_SWAP_PTE;
1163 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1164 goto out_unmap;
1165 }
1166 }
1167 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1168 ++none_or_zero;
1169 if (!userfaultfd_armed(vma) &&
1170 (!cc->is_khugepaged ||
1171 none_or_zero <= khugepaged_max_ptes_none)) {
1172 continue;
1173 } else {
1174 result = SCAN_EXCEED_NONE_PTE;
1175 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1176 goto out_unmap;
1177 }
1178 }
1179 if (pte_uffd_wp(pteval)) {
1180 /*
1181 * Don't collapse the page if any of the small
1182 * PTEs are armed with uffd write protection.
1183 * Here we can also mark the new huge pmd as
1184 * write protected if any of the small ones is
1185 * marked but that could bring unknown
1186 * userfault messages that falls outside of
1187 * the registered range. So, just be simple.
1188 */
1189 result = SCAN_PTE_UFFD_WP;
1190 goto out_unmap;
1191 }
1192 if (pte_write(pteval))
1193 writable = true;
1194
1195 page = vm_normal_page(vma, _address, pteval);
1196 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1197 result = SCAN_PAGE_NULL;
1198 goto out_unmap;
1199 }
1200
1201 if (page_mapcount(page) > 1) {
1202 ++shared;
1203 if (cc->is_khugepaged &&
1204 shared > khugepaged_max_ptes_shared) {
1205 result = SCAN_EXCEED_SHARED_PTE;
1206 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1207 goto out_unmap;
1208 }
1209 }
1210
1211 page = compound_head(page);
1212
1213 /*
1214 * Record which node the original page is from and save this
1215 * information to cc->node_load[].
1216 * Khugepaged will allocate hugepage from the node has the max
1217 * hit record.
1218 */
1219 node = page_to_nid(page);
1220 if (hpage_collapse_scan_abort(node, cc)) {
1221 result = SCAN_SCAN_ABORT;
1222 goto out_unmap;
1223 }
1224 cc->node_load[node]++;
1225 if (!PageLRU(page)) {
1226 result = SCAN_PAGE_LRU;
1227 goto out_unmap;
1228 }
1229 if (PageLocked(page)) {
1230 result = SCAN_PAGE_LOCK;
1231 goto out_unmap;
1232 }
1233 if (!PageAnon(page)) {
1234 result = SCAN_PAGE_ANON;
1235 goto out_unmap;
1236 }
1237
1238 /*
1239 * Check if the page has any GUP (or other external) pins.
1240 *
1241 * Here the check may be racy:
1242 * it may see total_mapcount > refcount in some cases?
1243 * But such case is ephemeral we could always retry collapse
1244 * later. However it may report false positive if the page
1245 * has excessive GUP pins (i.e. 512). Anyway the same check
1246 * will be done again later the risk seems low.
1247 */
1248 if (!is_refcount_suitable(page)) {
1249 result = SCAN_PAGE_COUNT;
1250 goto out_unmap;
1251 }
1252
1253 /*
1254 * If collapse was initiated by khugepaged, check that there is
1255 * enough young pte to justify collapsing the page
1256 */
1257 if (cc->is_khugepaged &&
1258 (pte_young(pteval) || page_is_young(page) ||
1259 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1260 address)))
1261 referenced++;
1262 }
1263 if (!writable) {
1264 result = SCAN_PAGE_RO;
1265 } else if (cc->is_khugepaged &&
1266 (!referenced ||
1267 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1268 result = SCAN_LACK_REFERENCED_PAGE;
1269 } else {
1270 result = SCAN_SUCCEED;
1271 }
1272out_unmap:
1273 pte_unmap_unlock(pte, ptl);
1274 if (result == SCAN_SUCCEED) {
1275 result = collapse_huge_page(mm, address, referenced,
1276 unmapped, cc);
1277 /* collapse_huge_page will return with the mmap_lock released */
1278 *mmap_locked = false;
1279 }
1280out:
1281 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1282 none_or_zero, result, unmapped);
1283 return result;
1284}
1285
1286static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1287{
1288 struct mm_slot *slot = &mm_slot->slot;
1289 struct mm_struct *mm = slot->mm;
1290
1291 lockdep_assert_held(&khugepaged_mm_lock);
1292
1293 if (hpage_collapse_test_exit(mm)) {
1294 /* free mm_slot */
1295 hash_del(&slot->hash);
1296 list_del(&slot->mm_node);
1297
1298 /*
1299 * Not strictly needed because the mm exited already.
1300 *
1301 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1302 */
1303
1304 /* khugepaged_mm_lock actually not necessary for the below */
1305 mm_slot_free(mm_slot_cache, mm_slot);
1306 mmdrop(mm);
1307 }
1308}
1309
1310#ifdef CONFIG_SHMEM
1311/*
1312 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1313 * khugepaged should try to collapse the page table.
1314 *
1315 * Note that following race exists:
1316 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1317 * emptying the A's ->pte_mapped_thp[] array.
1318 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1319 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1320 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1321 * ->pte-mapped_thp[] array.
1322 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1323 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1324 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1325 * Thus, it's possible the same address is added multiple times for the same
1326 * mm_struct. Should this happen, we'll simply attempt
1327 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1328 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1329 * attempts will return quickly (without grabbing any additional locks) when
1330 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1331 * check, and since this is a rare occurrence, the cost of preventing this
1332 * "multiple-add" is thought to be more expensive than just handling it, should
1333 * it occur.
1334 */
1335static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1336 unsigned long addr)
1337{
1338 struct khugepaged_mm_slot *mm_slot;
1339 struct mm_slot *slot;
1340 bool ret = false;
1341
1342 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1343
1344 spin_lock(&khugepaged_mm_lock);
1345 slot = mm_slot_lookup(mm_slots_hash, mm);
1346 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
1347 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
1348 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1349 ret = true;
1350 }
1351 spin_unlock(&khugepaged_mm_lock);
1352 return ret;
1353}
1354
1355/* hpage must be locked, and mmap_lock must be held in write */
1356static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1357 pmd_t *pmdp, struct page *hpage)
1358{
1359 struct vm_fault vmf = {
1360 .vma = vma,
1361 .address = addr,
1362 .flags = 0,
1363 .pmd = pmdp,
1364 };
1365
1366 VM_BUG_ON(!PageTransHuge(hpage));
1367 mmap_assert_write_locked(vma->vm_mm);
1368
1369 if (do_set_pmd(&vmf, hpage))
1370 return SCAN_FAIL;
1371
1372 get_page(hpage);
1373 return SCAN_SUCCEED;
1374}
1375
1376/*
1377 * A note about locking:
1378 * Trying to take the page table spinlocks would be useless here because those
1379 * are only used to synchronize:
1380 *
1381 * - modifying terminal entries (ones that point to a data page, not to another
1382 * page table)
1383 * - installing *new* non-terminal entries
1384 *
1385 * Instead, we need roughly the same kind of protection as free_pgtables() or
1386 * mm_take_all_locks() (but only for a single VMA):
1387 * The mmap lock together with this VMA's rmap locks covers all paths towards
1388 * the page table entries we're messing with here, except for hardware page
1389 * table walks and lockless_pages_from_mm().
1390 */
1391static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1392 unsigned long addr, pmd_t *pmdp)
1393{
1394 pmd_t pmd;
1395 struct mmu_notifier_range range;
1396
1397 mmap_assert_write_locked(mm);
1398 if (vma->vm_file)
1399 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1400 /*
1401 * All anon_vmas attached to the VMA have the same root and are
1402 * therefore locked by the same lock.
1403 */
1404 if (vma->anon_vma)
1405 lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1406
1407 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
1408 addr + HPAGE_PMD_SIZE);
1409 mmu_notifier_invalidate_range_start(&range);
1410 pmd = pmdp_collapse_flush(vma, addr, pmdp);
1411 tlb_remove_table_sync_one();
1412 mmu_notifier_invalidate_range_end(&range);
1413 mm_dec_nr_ptes(mm);
1414 page_table_check_pte_clear_range(mm, addr, pmd);
1415 pte_free(mm, pmd_pgtable(pmd));
1416}
1417
1418/**
1419 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1420 * address haddr.
1421 *
1422 * @mm: process address space where collapse happens
1423 * @addr: THP collapse address
1424 * @install_pmd: If a huge PMD should be installed
1425 *
1426 * This function checks whether all the PTEs in the PMD are pointing to the
1427 * right THP. If so, retract the page table so the THP can refault in with
1428 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1429 */
1430int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1431 bool install_pmd)
1432{
1433 unsigned long haddr = addr & HPAGE_PMD_MASK;
1434 struct vm_area_struct *vma = vma_lookup(mm, haddr);
1435 struct page *hpage;
1436 pte_t *start_pte, *pte;
1437 pmd_t *pmd;
1438 spinlock_t *ptl;
1439 int count = 0, result = SCAN_FAIL;
1440 int i;
1441
1442 mmap_assert_write_locked(mm);
1443
1444 /* Fast check before locking page if already PMD-mapped */
1445 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1446 if (result == SCAN_PMD_MAPPED)
1447 return result;
1448
1449 if (!vma || !vma->vm_file ||
1450 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1451 return SCAN_VMA_CHECK;
1452
1453 /*
1454 * If we are here, we've succeeded in replacing all the native pages
1455 * in the page cache with a single hugepage. If a mm were to fault-in
1456 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1457 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1458 * analogously elide sysfs THP settings here.
1459 */
1460 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
1461 return SCAN_VMA_CHECK;
1462
1463 /*
1464 * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
1465 * that got written to. Without this, we'd have to also lock the
1466 * anon_vma if one exists.
1467 */
1468 if (vma->anon_vma)
1469 return SCAN_VMA_CHECK;
1470
1471 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1472 if (userfaultfd_wp(vma))
1473 return SCAN_PTE_UFFD_WP;
1474
1475 hpage = find_lock_page(vma->vm_file->f_mapping,
1476 linear_page_index(vma, haddr));
1477 if (!hpage)
1478 return SCAN_PAGE_NULL;
1479
1480 if (!PageHead(hpage)) {
1481 result = SCAN_FAIL;
1482 goto drop_hpage;
1483 }
1484
1485 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1486 result = SCAN_PAGE_COMPOUND;
1487 goto drop_hpage;
1488 }
1489
1490 switch (result) {
1491 case SCAN_SUCCEED:
1492 break;
1493 case SCAN_PMD_NONE:
1494 /*
1495 * In MADV_COLLAPSE path, possible race with khugepaged where
1496 * all pte entries have been removed and pmd cleared. If so,
1497 * skip all the pte checks and just update the pmd mapping.
1498 */
1499 goto maybe_install_pmd;
1500 default:
1501 goto drop_hpage;
1502 }
1503
1504 /*
1505 * We need to lock the mapping so that from here on, only GUP-fast and
1506 * hardware page walks can access the parts of the page tables that
1507 * we're operating on.
1508 * See collapse_and_free_pmd().
1509 */
1510 i_mmap_lock_write(vma->vm_file->f_mapping);
1511
1512 /*
1513 * This spinlock should be unnecessary: Nobody else should be accessing
1514 * the page tables under spinlock protection here, only
1515 * lockless_pages_from_mm() and the hardware page walker can access page
1516 * tables while all the high-level locks are held in write mode.
1517 */
1518 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1519 result = SCAN_FAIL;
1520
1521 /* step 1: check all mapped PTEs are to the right huge page */
1522 for (i = 0, addr = haddr, pte = start_pte;
1523 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1524 struct page *page;
1525
1526 /* empty pte, skip */
1527 if (pte_none(*pte))
1528 continue;
1529
1530 /* page swapped out, abort */
1531 if (!pte_present(*pte)) {
1532 result = SCAN_PTE_NON_PRESENT;
1533 goto abort;
1534 }
1535
1536 page = vm_normal_page(vma, addr, *pte);
1537 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1538 page = NULL;
1539 /*
1540 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1541 * page table, but the new page will not be a subpage of hpage.
1542 */
1543 if (hpage + i != page)
1544 goto abort;
1545 count++;
1546 }
1547
1548 /* step 2: adjust rmap */
1549 for (i = 0, addr = haddr, pte = start_pte;
1550 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1551 struct page *page;
1552
1553 if (pte_none(*pte))
1554 continue;
1555 page = vm_normal_page(vma, addr, *pte);
1556 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1557 goto abort;
1558 page_remove_rmap(page, vma, false);
1559 }
1560
1561 pte_unmap_unlock(start_pte, ptl);
1562
1563 /* step 3: set proper refcount and mm_counters. */
1564 if (count) {
1565 page_ref_sub(hpage, count);
1566 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1567 }
1568
1569 /* step 4: remove pte entries */
1570 collapse_and_free_pmd(mm, vma, haddr, pmd);
1571
1572 i_mmap_unlock_write(vma->vm_file->f_mapping);
1573
1574maybe_install_pmd:
1575 /* step 5: install pmd entry */
1576 result = install_pmd
1577 ? set_huge_pmd(vma, haddr, pmd, hpage)
1578 : SCAN_SUCCEED;
1579
1580drop_hpage:
1581 unlock_page(hpage);
1582 put_page(hpage);
1583 return result;
1584
1585abort:
1586 pte_unmap_unlock(start_pte, ptl);
1587 i_mmap_unlock_write(vma->vm_file->f_mapping);
1588 goto drop_hpage;
1589}
1590
1591static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
1592{
1593 struct mm_slot *slot = &mm_slot->slot;
1594 struct mm_struct *mm = slot->mm;
1595 int i;
1596
1597 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1598 return;
1599
1600 if (!mmap_write_trylock(mm))
1601 return;
1602
1603 if (unlikely(hpage_collapse_test_exit(mm)))
1604 goto out;
1605
1606 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1607 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
1608
1609out:
1610 mm_slot->nr_pte_mapped_thp = 0;
1611 mmap_write_unlock(mm);
1612}
1613
1614static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1615 struct mm_struct *target_mm,
1616 unsigned long target_addr, struct page *hpage,
1617 struct collapse_control *cc)
1618{
1619 struct vm_area_struct *vma;
1620 int target_result = SCAN_FAIL;
1621
1622 i_mmap_lock_write(mapping);
1623 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1624 int result = SCAN_FAIL;
1625 struct mm_struct *mm = NULL;
1626 unsigned long addr = 0;
1627 pmd_t *pmd;
1628 bool is_target = false;
1629
1630 /*
1631 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1632 * got written to. These VMAs are likely not worth investing
1633 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1634 * later.
1635 *
1636 * Note that vma->anon_vma check is racy: it can be set up after
1637 * the check but before we took mmap_lock by the fault path.
1638 * But page lock would prevent establishing any new ptes of the
1639 * page, so we are safe.
1640 *
1641 * An alternative would be drop the check, but check that page
1642 * table is clear before calling pmdp_collapse_flush() under
1643 * ptl. It has higher chance to recover THP for the VMA, but
1644 * has higher cost too. It would also probably require locking
1645 * the anon_vma.
1646 */
1647 if (vma->anon_vma) {
1648 result = SCAN_PAGE_ANON;
1649 goto next;
1650 }
1651 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1652 if (addr & ~HPAGE_PMD_MASK ||
1653 vma->vm_end < addr + HPAGE_PMD_SIZE) {
1654 result = SCAN_VMA_CHECK;
1655 goto next;
1656 }
1657 mm = vma->vm_mm;
1658 is_target = mm == target_mm && addr == target_addr;
1659 result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1660 if (result != SCAN_SUCCEED)
1661 goto next;
1662 /*
1663 * We need exclusive mmap_lock to retract page table.
1664 *
1665 * We use trylock due to lock inversion: we need to acquire
1666 * mmap_lock while holding page lock. Fault path does it in
1667 * reverse order. Trylock is a way to avoid deadlock.
1668 *
1669 * Also, it's not MADV_COLLAPSE's job to collapse other
1670 * mappings - let khugepaged take care of them later.
1671 */
1672 result = SCAN_PTE_MAPPED_HUGEPAGE;
1673 if ((cc->is_khugepaged || is_target) &&
1674 mmap_write_trylock(mm)) {
1675 /*
1676 * When a vma is registered with uffd-wp, we can't
1677 * recycle the pmd pgtable because there can be pte
1678 * markers installed. Skip it only, so the rest mm/vma
1679 * can still have the same file mapped hugely, however
1680 * it'll always mapped in small page size for uffd-wp
1681 * registered ranges.
1682 */
1683 if (hpage_collapse_test_exit(mm)) {
1684 result = SCAN_ANY_PROCESS;
1685 goto unlock_next;
1686 }
1687 if (userfaultfd_wp(vma)) {
1688 result = SCAN_PTE_UFFD_WP;
1689 goto unlock_next;
1690 }
1691 collapse_and_free_pmd(mm, vma, addr, pmd);
1692 if (!cc->is_khugepaged && is_target)
1693 result = set_huge_pmd(vma, addr, pmd, hpage);
1694 else
1695 result = SCAN_SUCCEED;
1696
1697unlock_next:
1698 mmap_write_unlock(mm);
1699 goto next;
1700 }
1701 /*
1702 * Calling context will handle target mm/addr. Otherwise, let
1703 * khugepaged try again later.
1704 */
1705 if (!is_target) {
1706 khugepaged_add_pte_mapped_thp(mm, addr);
1707 continue;
1708 }
1709next:
1710 if (is_target)
1711 target_result = result;
1712 }
1713 i_mmap_unlock_write(mapping);
1714 return target_result;
1715}
1716
1717/**
1718 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1719 *
1720 * @mm: process address space where collapse happens
1721 * @addr: virtual collapse start address
1722 * @file: file that collapse on
1723 * @start: collapse start address
1724 * @cc: collapse context and scratchpad
1725 *
1726 * Basic scheme is simple, details are more complex:
1727 * - allocate and lock a new huge page;
1728 * - scan page cache replacing old pages with the new one
1729 * + swap/gup in pages if necessary;
1730 * + fill in gaps;
1731 * + keep old pages around in case rollback is required;
1732 * - if replacing succeeds:
1733 * + copy data over;
1734 * + free old pages;
1735 * + unlock huge page;
1736 * - if replacing failed;
1737 * + put all pages back and unfreeze them;
1738 * + restore gaps in the page cache;
1739 * + unlock and free huge page;
1740 */
1741static int collapse_file(struct mm_struct *mm, unsigned long addr,
1742 struct file *file, pgoff_t start,
1743 struct collapse_control *cc)
1744{
1745 struct address_space *mapping = file->f_mapping;
1746 struct page *hpage;
1747 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1748 LIST_HEAD(pagelist);
1749 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1750 int nr_none = 0, result = SCAN_SUCCEED;
1751 bool is_shmem = shmem_file(file);
1752 int nr = 0;
1753
1754 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1755 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1756
1757 result = alloc_charge_hpage(&hpage, mm, cc);
1758 if (result != SCAN_SUCCEED)
1759 goto out;
1760
1761 /*
1762 * Ensure we have slots for all the pages in the range. This is
1763 * almost certainly a no-op because most of the pages must be present
1764 */
1765 do {
1766 xas_lock_irq(&xas);
1767 xas_create_range(&xas);
1768 if (!xas_error(&xas))
1769 break;
1770 xas_unlock_irq(&xas);
1771 if (!xas_nomem(&xas, GFP_KERNEL)) {
1772 result = SCAN_FAIL;
1773 goto out;
1774 }
1775 } while (1);
1776
1777 __SetPageLocked(hpage);
1778 if (is_shmem)
1779 __SetPageSwapBacked(hpage);
1780 hpage->index = start;
1781 hpage->mapping = mapping;
1782
1783 /*
1784 * At this point the hpage is locked and not up-to-date.
1785 * It's safe to insert it into the page cache, because nobody would
1786 * be able to map it or use it in another way until we unlock it.
1787 */
1788
1789 xas_set(&xas, start);
1790 for (index = start; index < end; index++) {
1791 struct page *page = xas_next(&xas);
1792 struct folio *folio;
1793
1794 VM_BUG_ON(index != xas.xa_index);
1795 if (is_shmem) {
1796 if (!page) {
1797 /*
1798 * Stop if extent has been truncated or
1799 * hole-punched, and is now completely
1800 * empty.
1801 */
1802 if (index == start) {
1803 if (!xas_next_entry(&xas, end - 1)) {
1804 result = SCAN_TRUNCATED;
1805 goto xa_locked;
1806 }
1807 xas_set(&xas, index);
1808 }
1809 if (!shmem_charge(mapping->host, 1)) {
1810 result = SCAN_FAIL;
1811 goto xa_locked;
1812 }
1813 xas_store(&xas, hpage);
1814 nr_none++;
1815 continue;
1816 }
1817
1818 if (xa_is_value(page) || !PageUptodate(page)) {
1819 xas_unlock_irq(&xas);
1820 /* swap in or instantiate fallocated page */
1821 if (shmem_get_folio(mapping->host, index,
1822 &folio, SGP_NOALLOC)) {
1823 result = SCAN_FAIL;
1824 goto xa_unlocked;
1825 }
1826 page = folio_file_page(folio, index);
1827 } else if (trylock_page(page)) {
1828 get_page(page);
1829 xas_unlock_irq(&xas);
1830 } else {
1831 result = SCAN_PAGE_LOCK;
1832 goto xa_locked;
1833 }
1834 } else { /* !is_shmem */
1835 if (!page || xa_is_value(page)) {
1836 xas_unlock_irq(&xas);
1837 page_cache_sync_readahead(mapping, &file->f_ra,
1838 file, index,
1839 end - index);
1840 /* drain pagevecs to help isolate_lru_page() */
1841 lru_add_drain();
1842 page = find_lock_page(mapping, index);
1843 if (unlikely(page == NULL)) {
1844 result = SCAN_FAIL;
1845 goto xa_unlocked;
1846 }
1847 } else if (PageDirty(page)) {
1848 /*
1849 * khugepaged only works on read-only fd,
1850 * so this page is dirty because it hasn't
1851 * been flushed since first write. There
1852 * won't be new dirty pages.
1853 *
1854 * Trigger async flush here and hope the
1855 * writeback is done when khugepaged
1856 * revisits this page.
1857 *
1858 * This is a one-off situation. We are not
1859 * forcing writeback in loop.
1860 */
1861 xas_unlock_irq(&xas);
1862 filemap_flush(mapping);
1863 result = SCAN_FAIL;
1864 goto xa_unlocked;
1865 } else if (PageWriteback(page)) {
1866 xas_unlock_irq(&xas);
1867 result = SCAN_FAIL;
1868 goto xa_unlocked;
1869 } else if (trylock_page(page)) {
1870 get_page(page);
1871 xas_unlock_irq(&xas);
1872 } else {
1873 result = SCAN_PAGE_LOCK;
1874 goto xa_locked;
1875 }
1876 }
1877
1878 /*
1879 * The page must be locked, so we can drop the i_pages lock
1880 * without racing with truncate.
1881 */
1882 VM_BUG_ON_PAGE(!PageLocked(page), page);
1883
1884 /* make sure the page is up to date */
1885 if (unlikely(!PageUptodate(page))) {
1886 result = SCAN_FAIL;
1887 goto out_unlock;
1888 }
1889
1890 /*
1891 * If file was truncated then extended, or hole-punched, before
1892 * we locked the first page, then a THP might be there already.
1893 * This will be discovered on the first iteration.
1894 */
1895 if (PageTransCompound(page)) {
1896 struct page *head = compound_head(page);
1897
1898 result = compound_order(head) == HPAGE_PMD_ORDER &&
1899 head->index == start
1900 /* Maybe PMD-mapped */
1901 ? SCAN_PTE_MAPPED_HUGEPAGE
1902 : SCAN_PAGE_COMPOUND;
1903 goto out_unlock;
1904 }
1905
1906 folio = page_folio(page);
1907
1908 if (folio_mapping(folio) != mapping) {
1909 result = SCAN_TRUNCATED;
1910 goto out_unlock;
1911 }
1912
1913 if (!is_shmem && (folio_test_dirty(folio) ||
1914 folio_test_writeback(folio))) {
1915 /*
1916 * khugepaged only works on read-only fd, so this
1917 * page is dirty because it hasn't been flushed
1918 * since first write.
1919 */
1920 result = SCAN_FAIL;
1921 goto out_unlock;
1922 }
1923
1924 if (folio_isolate_lru(folio)) {
1925 result = SCAN_DEL_PAGE_LRU;
1926 goto out_unlock;
1927 }
1928
1929 if (folio_has_private(folio) &&
1930 !filemap_release_folio(folio, GFP_KERNEL)) {
1931 result = SCAN_PAGE_HAS_PRIVATE;
1932 folio_putback_lru(folio);
1933 goto out_unlock;
1934 }
1935
1936 if (folio_mapped(folio))
1937 try_to_unmap(folio,
1938 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1939
1940 xas_lock_irq(&xas);
1941 xas_set(&xas, index);
1942
1943 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1944
1945 /*
1946 * The page is expected to have page_count() == 3:
1947 * - we hold a pin on it;
1948 * - one reference from page cache;
1949 * - one from isolate_lru_page;
1950 */
1951 if (!page_ref_freeze(page, 3)) {
1952 result = SCAN_PAGE_COUNT;
1953 xas_unlock_irq(&xas);
1954 putback_lru_page(page);
1955 goto out_unlock;
1956 }
1957
1958 /*
1959 * Add the page to the list to be able to undo the collapse if
1960 * something go wrong.
1961 */
1962 list_add_tail(&page->lru, &pagelist);
1963
1964 /* Finally, replace with the new page. */
1965 xas_store(&xas, hpage);
1966 continue;
1967out_unlock:
1968 unlock_page(page);
1969 put_page(page);
1970 goto xa_unlocked;
1971 }
1972 nr = thp_nr_pages(hpage);
1973
1974 if (is_shmem)
1975 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
1976 else {
1977 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
1978 filemap_nr_thps_inc(mapping);
1979 /*
1980 * Paired with smp_mb() in do_dentry_open() to ensure
1981 * i_writecount is up to date and the update to nr_thps is
1982 * visible. Ensures the page cache will be truncated if the
1983 * file is opened writable.
1984 */
1985 smp_mb();
1986 if (inode_is_open_for_write(mapping->host)) {
1987 result = SCAN_FAIL;
1988 __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
1989 filemap_nr_thps_dec(mapping);
1990 goto xa_locked;
1991 }
1992 }
1993
1994 if (nr_none) {
1995 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
1996 /* nr_none is always 0 for non-shmem. */
1997 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
1998 }
1999
2000 /* Join all the small entries into a single multi-index entry */
2001 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2002 xas_store(&xas, hpage);
2003xa_locked:
2004 xas_unlock_irq(&xas);
2005xa_unlocked:
2006
2007 /*
2008 * If collapse is successful, flush must be done now before copying.
2009 * If collapse is unsuccessful, does flush actually need to be done?
2010 * Do it anyway, to clear the state.
2011 */
2012 try_to_unmap_flush();
2013
2014 if (result == SCAN_SUCCEED) {
2015 struct page *page, *tmp;
2016 struct folio *folio;
2017
2018 /*
2019 * Replacing old pages with new one has succeeded, now we
2020 * need to copy the content and free the old pages.
2021 */
2022 index = start;
2023 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2024 while (index < page->index) {
2025 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2026 index++;
2027 }
2028 copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
2029 page);
2030 list_del(&page->lru);
2031 page->mapping = NULL;
2032 page_ref_unfreeze(page, 1);
2033 ClearPageActive(page);
2034 ClearPageUnevictable(page);
2035 unlock_page(page);
2036 put_page(page);
2037 index++;
2038 }
2039 while (index < end) {
2040 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2041 index++;
2042 }
2043
2044 folio = page_folio(hpage);
2045 folio_mark_uptodate(folio);
2046 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2047
2048 if (is_shmem)
2049 folio_mark_dirty(folio);
2050 folio_add_lru(folio);
2051
2052 /*
2053 * Remove pte page tables, so we can re-fault the page as huge.
2054 */
2055 result = retract_page_tables(mapping, start, mm, addr, hpage,
2056 cc);
2057 unlock_page(hpage);
2058 hpage = NULL;
2059 } else {
2060 struct page *page;
2061
2062 /* Something went wrong: roll back page cache changes */
2063 xas_lock_irq(&xas);
2064 if (nr_none) {
2065 mapping->nrpages -= nr_none;
2066 shmem_uncharge(mapping->host, nr_none);
2067 }
2068
2069 xas_set(&xas, start);
2070 xas_for_each(&xas, page, end - 1) {
2071 page = list_first_entry_or_null(&pagelist,
2072 struct page, lru);
2073 if (!page || xas.xa_index < page->index) {
2074 if (!nr_none)
2075 break;
2076 nr_none--;
2077 /* Put holes back where they were */
2078 xas_store(&xas, NULL);
2079 continue;
2080 }
2081
2082 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2083
2084 /* Unfreeze the page. */
2085 list_del(&page->lru);
2086 page_ref_unfreeze(page, 2);
2087 xas_store(&xas, page);
2088 xas_pause(&xas);
2089 xas_unlock_irq(&xas);
2090 unlock_page(page);
2091 putback_lru_page(page);
2092 xas_lock_irq(&xas);
2093 }
2094 VM_BUG_ON(nr_none);
2095 xas_unlock_irq(&xas);
2096
2097 hpage->mapping = NULL;
2098 }
2099
2100 if (hpage)
2101 unlock_page(hpage);
2102out:
2103 VM_BUG_ON(!list_empty(&pagelist));
2104 if (hpage) {
2105 mem_cgroup_uncharge(page_folio(hpage));
2106 put_page(hpage);
2107 }
2108
2109 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
2110 return result;
2111}
2112
2113static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2114 struct file *file, pgoff_t start,
2115 struct collapse_control *cc)
2116{
2117 struct page *page = NULL;
2118 struct address_space *mapping = file->f_mapping;
2119 XA_STATE(xas, &mapping->i_pages, start);
2120 int present, swap;
2121 int node = NUMA_NO_NODE;
2122 int result = SCAN_SUCCEED;
2123
2124 present = 0;
2125 swap = 0;
2126 memset(cc->node_load, 0, sizeof(cc->node_load));
2127 nodes_clear(cc->alloc_nmask);
2128 rcu_read_lock();
2129 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2130 if (xas_retry(&xas, page))
2131 continue;
2132
2133 if (xa_is_value(page)) {
2134 ++swap;
2135 if (cc->is_khugepaged &&
2136 swap > khugepaged_max_ptes_swap) {
2137 result = SCAN_EXCEED_SWAP_PTE;
2138 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2139 break;
2140 }
2141 continue;
2142 }
2143
2144 /*
2145 * TODO: khugepaged should compact smaller compound pages
2146 * into a PMD sized page
2147 */
2148 if (PageTransCompound(page)) {
2149 struct page *head = compound_head(page);
2150
2151 result = compound_order(head) == HPAGE_PMD_ORDER &&
2152 head->index == start
2153 /* Maybe PMD-mapped */
2154 ? SCAN_PTE_MAPPED_HUGEPAGE
2155 : SCAN_PAGE_COMPOUND;
2156 /*
2157 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2158 * by the caller won't touch the page cache, and so
2159 * it's safe to skip LRU and refcount checks before
2160 * returning.
2161 */
2162 break;
2163 }
2164
2165 node = page_to_nid(page);
2166 if (hpage_collapse_scan_abort(node, cc)) {
2167 result = SCAN_SCAN_ABORT;
2168 break;
2169 }
2170 cc->node_load[node]++;
2171
2172 if (!PageLRU(page)) {
2173 result = SCAN_PAGE_LRU;
2174 break;
2175 }
2176
2177 if (page_count(page) !=
2178 1 + page_mapcount(page) + page_has_private(page)) {
2179 result = SCAN_PAGE_COUNT;
2180 break;
2181 }
2182
2183 /*
2184 * We probably should check if the page is referenced here, but
2185 * nobody would transfer pte_young() to PageReferenced() for us.
2186 * And rmap walk here is just too costly...
2187 */
2188
2189 present++;
2190
2191 if (need_resched()) {
2192 xas_pause(&xas);
2193 cond_resched_rcu();
2194 }
2195 }
2196 rcu_read_unlock();
2197
2198 if (result == SCAN_SUCCEED) {
2199 if (cc->is_khugepaged &&
2200 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2201 result = SCAN_EXCEED_NONE_PTE;
2202 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2203 } else {
2204 result = collapse_file(mm, addr, file, start, cc);
2205 }
2206 }
2207
2208 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
2209 return result;
2210}
2211#else
2212static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2213 struct file *file, pgoff_t start,
2214 struct collapse_control *cc)
2215{
2216 BUILD_BUG();
2217}
2218
2219static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
2220{
2221}
2222
2223static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2224 unsigned long addr)
2225{
2226 return false;
2227}
2228#endif
2229
2230static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2231 struct collapse_control *cc)
2232 __releases(&khugepaged_mm_lock)
2233 __acquires(&khugepaged_mm_lock)
2234{
2235 struct vma_iterator vmi;
2236 struct khugepaged_mm_slot *mm_slot;
2237 struct mm_slot *slot;
2238 struct mm_struct *mm;
2239 struct vm_area_struct *vma;
2240 int progress = 0;
2241
2242 VM_BUG_ON(!pages);
2243 lockdep_assert_held(&khugepaged_mm_lock);
2244 *result = SCAN_FAIL;
2245
2246 if (khugepaged_scan.mm_slot) {
2247 mm_slot = khugepaged_scan.mm_slot;
2248 slot = &mm_slot->slot;
2249 } else {
2250 slot = list_entry(khugepaged_scan.mm_head.next,
2251 struct mm_slot, mm_node);
2252 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2253 khugepaged_scan.address = 0;
2254 khugepaged_scan.mm_slot = mm_slot;
2255 }
2256 spin_unlock(&khugepaged_mm_lock);
2257 khugepaged_collapse_pte_mapped_thps(mm_slot);
2258
2259 mm = slot->mm;
2260 /*
2261 * Don't wait for semaphore (to avoid long wait times). Just move to
2262 * the next mm on the list.
2263 */
2264 vma = NULL;
2265 if (unlikely(!mmap_read_trylock(mm)))
2266 goto breakouterloop_mmap_lock;
2267
2268 progress++;
2269 if (unlikely(hpage_collapse_test_exit(mm)))
2270 goto breakouterloop;
2271
2272 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2273 for_each_vma(vmi, vma) {
2274 unsigned long hstart, hend;
2275
2276 cond_resched();
2277 if (unlikely(hpage_collapse_test_exit(mm))) {
2278 progress++;
2279 break;
2280 }
2281 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2282skip:
2283 progress++;
2284 continue;
2285 }
2286 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2287 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2288 if (khugepaged_scan.address > hend)
2289 goto skip;
2290 if (khugepaged_scan.address < hstart)
2291 khugepaged_scan.address = hstart;
2292 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2293
2294 while (khugepaged_scan.address < hend) {
2295 bool mmap_locked = true;
2296
2297 cond_resched();
2298 if (unlikely(hpage_collapse_test_exit(mm)))
2299 goto breakouterloop;
2300
2301 VM_BUG_ON(khugepaged_scan.address < hstart ||
2302 khugepaged_scan.address + HPAGE_PMD_SIZE >
2303 hend);
2304 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2305 struct file *file = get_file(vma->vm_file);
2306 pgoff_t pgoff = linear_page_index(vma,
2307 khugepaged_scan.address);
2308
2309 mmap_read_unlock(mm);
2310 *result = hpage_collapse_scan_file(mm,
2311 khugepaged_scan.address,
2312 file, pgoff, cc);
2313 mmap_locked = false;
2314 fput(file);
2315 } else {
2316 *result = hpage_collapse_scan_pmd(mm, vma,
2317 khugepaged_scan.address,
2318 &mmap_locked,
2319 cc);
2320 }
2321 switch (*result) {
2322 case SCAN_PTE_MAPPED_HUGEPAGE: {
2323 pmd_t *pmd;
2324
2325 *result = find_pmd_or_thp_or_none(mm,
2326 khugepaged_scan.address,
2327 &pmd);
2328 if (*result != SCAN_SUCCEED)
2329 break;
2330 if (!khugepaged_add_pte_mapped_thp(mm,
2331 khugepaged_scan.address))
2332 break;
2333 } fallthrough;
2334 case SCAN_SUCCEED:
2335 ++khugepaged_pages_collapsed;
2336 break;
2337 default:
2338 break;
2339 }
2340
2341 /* move to next address */
2342 khugepaged_scan.address += HPAGE_PMD_SIZE;
2343 progress += HPAGE_PMD_NR;
2344 if (!mmap_locked)
2345 /*
2346 * We released mmap_lock so break loop. Note
2347 * that we drop mmap_lock before all hugepage
2348 * allocations, so if allocation fails, we are
2349 * guaranteed to break here and report the
2350 * correct result back to caller.
2351 */
2352 goto breakouterloop_mmap_lock;
2353 if (progress >= pages)
2354 goto breakouterloop;
2355 }
2356 }
2357breakouterloop:
2358 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2359breakouterloop_mmap_lock:
2360
2361 spin_lock(&khugepaged_mm_lock);
2362 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2363 /*
2364 * Release the current mm_slot if this mm is about to die, or
2365 * if we scanned all vmas of this mm.
2366 */
2367 if (hpage_collapse_test_exit(mm) || !vma) {
2368 /*
2369 * Make sure that if mm_users is reaching zero while
2370 * khugepaged runs here, khugepaged_exit will find
2371 * mm_slot not pointing to the exiting mm.
2372 */
2373 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2374 slot = list_entry(slot->mm_node.next,
2375 struct mm_slot, mm_node);
2376 khugepaged_scan.mm_slot =
2377 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2378 khugepaged_scan.address = 0;
2379 } else {
2380 khugepaged_scan.mm_slot = NULL;
2381 khugepaged_full_scans++;
2382 }
2383
2384 collect_mm_slot(mm_slot);
2385 }
2386
2387 return progress;
2388}
2389
2390static int khugepaged_has_work(void)
2391{
2392 return !list_empty(&khugepaged_scan.mm_head) &&
2393 hugepage_flags_enabled();
2394}
2395
2396static int khugepaged_wait_event(void)
2397{
2398 return !list_empty(&khugepaged_scan.mm_head) ||
2399 kthread_should_stop();
2400}
2401
2402static void khugepaged_do_scan(struct collapse_control *cc)
2403{
2404 unsigned int progress = 0, pass_through_head = 0;
2405 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2406 bool wait = true;
2407 int result = SCAN_SUCCEED;
2408
2409 lru_add_drain_all();
2410
2411 while (true) {
2412 cond_resched();
2413
2414 if (unlikely(kthread_should_stop() || try_to_freeze()))
2415 break;
2416
2417 spin_lock(&khugepaged_mm_lock);
2418 if (!khugepaged_scan.mm_slot)
2419 pass_through_head++;
2420 if (khugepaged_has_work() &&
2421 pass_through_head < 2)
2422 progress += khugepaged_scan_mm_slot(pages - progress,
2423 &result, cc);
2424 else
2425 progress = pages;
2426 spin_unlock(&khugepaged_mm_lock);
2427
2428 if (progress >= pages)
2429 break;
2430
2431 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2432 /*
2433 * If fail to allocate the first time, try to sleep for
2434 * a while. When hit again, cancel the scan.
2435 */
2436 if (!wait)
2437 break;
2438 wait = false;
2439 khugepaged_alloc_sleep();
2440 }
2441 }
2442}
2443
2444static bool khugepaged_should_wakeup(void)
2445{
2446 return kthread_should_stop() ||
2447 time_after_eq(jiffies, khugepaged_sleep_expire);
2448}
2449
2450static void khugepaged_wait_work(void)
2451{
2452 if (khugepaged_has_work()) {
2453 const unsigned long scan_sleep_jiffies =
2454 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2455
2456 if (!scan_sleep_jiffies)
2457 return;
2458
2459 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2460 wait_event_freezable_timeout(khugepaged_wait,
2461 khugepaged_should_wakeup(),
2462 scan_sleep_jiffies);
2463 return;
2464 }
2465
2466 if (hugepage_flags_enabled())
2467 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2468}
2469
2470static int khugepaged(void *none)
2471{
2472 struct khugepaged_mm_slot *mm_slot;
2473
2474 set_freezable();
2475 set_user_nice(current, MAX_NICE);
2476
2477 while (!kthread_should_stop()) {
2478 khugepaged_do_scan(&khugepaged_collapse_control);
2479 khugepaged_wait_work();
2480 }
2481
2482 spin_lock(&khugepaged_mm_lock);
2483 mm_slot = khugepaged_scan.mm_slot;
2484 khugepaged_scan.mm_slot = NULL;
2485 if (mm_slot)
2486 collect_mm_slot(mm_slot);
2487 spin_unlock(&khugepaged_mm_lock);
2488 return 0;
2489}
2490
2491static void set_recommended_min_free_kbytes(void)
2492{
2493 struct zone *zone;
2494 int nr_zones = 0;
2495 unsigned long recommended_min;
2496
2497 if (!hugepage_flags_enabled()) {
2498 calculate_min_free_kbytes();
2499 goto update_wmarks;
2500 }
2501
2502 for_each_populated_zone(zone) {
2503 /*
2504 * We don't need to worry about fragmentation of
2505 * ZONE_MOVABLE since it only has movable pages.
2506 */
2507 if (zone_idx(zone) > gfp_zone(GFP_USER))
2508 continue;
2509
2510 nr_zones++;
2511 }
2512
2513 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2514 recommended_min = pageblock_nr_pages * nr_zones * 2;
2515
2516 /*
2517 * Make sure that on average at least two pageblocks are almost free
2518 * of another type, one for a migratetype to fall back to and a
2519 * second to avoid subsequent fallbacks of other types There are 3
2520 * MIGRATE_TYPES we care about.
2521 */
2522 recommended_min += pageblock_nr_pages * nr_zones *
2523 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2524
2525 /* don't ever allow to reserve more than 5% of the lowmem */
2526 recommended_min = min(recommended_min,
2527 (unsigned long) nr_free_buffer_pages() / 20);
2528 recommended_min <<= (PAGE_SHIFT-10);
2529
2530 if (recommended_min > min_free_kbytes) {
2531 if (user_min_free_kbytes >= 0)
2532 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2533 min_free_kbytes, recommended_min);
2534
2535 min_free_kbytes = recommended_min;
2536 }
2537
2538update_wmarks:
2539 setup_per_zone_wmarks();
2540}
2541
2542int start_stop_khugepaged(void)
2543{
2544 int err = 0;
2545
2546 mutex_lock(&khugepaged_mutex);
2547 if (hugepage_flags_enabled()) {
2548 if (!khugepaged_thread)
2549 khugepaged_thread = kthread_run(khugepaged, NULL,
2550 "khugepaged");
2551 if (IS_ERR(khugepaged_thread)) {
2552 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2553 err = PTR_ERR(khugepaged_thread);
2554 khugepaged_thread = NULL;
2555 goto fail;
2556 }
2557
2558 if (!list_empty(&khugepaged_scan.mm_head))
2559 wake_up_interruptible(&khugepaged_wait);
2560 } else if (khugepaged_thread) {
2561 kthread_stop(khugepaged_thread);
2562 khugepaged_thread = NULL;
2563 }
2564 set_recommended_min_free_kbytes();
2565fail:
2566 mutex_unlock(&khugepaged_mutex);
2567 return err;
2568}
2569
2570void khugepaged_min_free_kbytes_update(void)
2571{
2572 mutex_lock(&khugepaged_mutex);
2573 if (hugepage_flags_enabled() && khugepaged_thread)
2574 set_recommended_min_free_kbytes();
2575 mutex_unlock(&khugepaged_mutex);
2576}
2577
2578bool current_is_khugepaged(void)
2579{
2580 return kthread_func(current) == khugepaged;
2581}
2582
2583static int madvise_collapse_errno(enum scan_result r)
2584{
2585 /*
2586 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2587 * actionable feedback to caller, so they may take an appropriate
2588 * fallback measure depending on the nature of the failure.
2589 */
2590 switch (r) {
2591 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2592 return -ENOMEM;
2593 case SCAN_CGROUP_CHARGE_FAIL:
2594 return -EBUSY;
2595 /* Resource temporary unavailable - trying again might succeed */
2596 case SCAN_PAGE_LOCK:
2597 case SCAN_PAGE_LRU:
2598 case SCAN_DEL_PAGE_LRU:
2599 return -EAGAIN;
2600 /*
2601 * Other: Trying again likely not to succeed / error intrinsic to
2602 * specified memory range. khugepaged likely won't be able to collapse
2603 * either.
2604 */
2605 default:
2606 return -EINVAL;
2607 }
2608}
2609
2610int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2611 unsigned long start, unsigned long end)
2612{
2613 struct collapse_control *cc;
2614 struct mm_struct *mm = vma->vm_mm;
2615 unsigned long hstart, hend, addr;
2616 int thps = 0, last_fail = SCAN_FAIL;
2617 bool mmap_locked = true;
2618
2619 BUG_ON(vma->vm_start > start);
2620 BUG_ON(vma->vm_end < end);
2621
2622 *prev = vma;
2623
2624 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2625 return -EINVAL;
2626
2627 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2628 if (!cc)
2629 return -ENOMEM;
2630 cc->is_khugepaged = false;
2631
2632 mmgrab(mm);
2633 lru_add_drain_all();
2634
2635 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2636 hend = end & HPAGE_PMD_MASK;
2637
2638 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2639 int result = SCAN_FAIL;
2640
2641 if (!mmap_locked) {
2642 cond_resched();
2643 mmap_read_lock(mm);
2644 mmap_locked = true;
2645 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2646 cc);
2647 if (result != SCAN_SUCCEED) {
2648 last_fail = result;
2649 goto out_nolock;
2650 }
2651
2652 hend = vma->vm_end & HPAGE_PMD_MASK;
2653 }
2654 mmap_assert_locked(mm);
2655 memset(cc->node_load, 0, sizeof(cc->node_load));
2656 nodes_clear(cc->alloc_nmask);
2657 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2658 struct file *file = get_file(vma->vm_file);
2659 pgoff_t pgoff = linear_page_index(vma, addr);
2660
2661 mmap_read_unlock(mm);
2662 mmap_locked = false;
2663 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2664 cc);
2665 fput(file);
2666 } else {
2667 result = hpage_collapse_scan_pmd(mm, vma, addr,
2668 &mmap_locked, cc);
2669 }
2670 if (!mmap_locked)
2671 *prev = NULL; /* Tell caller we dropped mmap_lock */
2672
2673handle_result:
2674 switch (result) {
2675 case SCAN_SUCCEED:
2676 case SCAN_PMD_MAPPED:
2677 ++thps;
2678 break;
2679 case SCAN_PTE_MAPPED_HUGEPAGE:
2680 BUG_ON(mmap_locked);
2681 BUG_ON(*prev);
2682 mmap_write_lock(mm);
2683 result = collapse_pte_mapped_thp(mm, addr, true);
2684 mmap_write_unlock(mm);
2685 goto handle_result;
2686 /* Whitelisted set of results where continuing OK */
2687 case SCAN_PMD_NULL:
2688 case SCAN_PTE_NON_PRESENT:
2689 case SCAN_PTE_UFFD_WP:
2690 case SCAN_PAGE_RO:
2691 case SCAN_LACK_REFERENCED_PAGE:
2692 case SCAN_PAGE_NULL:
2693 case SCAN_PAGE_COUNT:
2694 case SCAN_PAGE_LOCK:
2695 case SCAN_PAGE_COMPOUND:
2696 case SCAN_PAGE_LRU:
2697 case SCAN_DEL_PAGE_LRU:
2698 last_fail = result;
2699 break;
2700 default:
2701 last_fail = result;
2702 /* Other error, exit */
2703 goto out_maybelock;
2704 }
2705 }
2706
2707out_maybelock:
2708 /* Caller expects us to hold mmap_lock on return */
2709 if (!mmap_locked)
2710 mmap_read_lock(mm);
2711out_nolock:
2712 mmap_assert_locked(mm);
2713 mmdrop(mm);
2714 kfree(cc);
2715
2716 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2717 : madvise_collapse_errno(last_fail);
2718}