Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/pagevec.h>
25#include <linux/ksm.h>
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
30#include <linux/writeback.h>
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
33#include <linux/security.h>
34#include <linux/backing-dev.h>
35#include <linux/compaction.h>
36#include <linux/syscalls.h>
37#include <linux/compat.h>
38#include <linux/hugetlb.h>
39#include <linux/hugetlb_cgroup.h>
40#include <linux/gfp.h>
41#include <linux/pfn_t.h>
42#include <linux/memremap.h>
43#include <linux/userfaultfd_k.h>
44#include <linux/balloon_compaction.h>
45#include <linux/page_idle.h>
46#include <linux/page_owner.h>
47#include <linux/sched/mm.h>
48#include <linux/ptrace.h>
49#include <linux/oom.h>
50#include <linux/memory.h>
51#include <linux/random.h>
52#include <linux/sched/sysctl.h>
53#include <linux/memory-tiers.h>
54
55#include <asm/tlbflush.h>
56
57#include <trace/events/migrate.h>
58
59#include "internal.h"
60
61bool isolate_movable_page(struct page *page, isolate_mode_t mode)
62{
63 struct folio *folio = folio_get_nontail_page(page);
64 const struct movable_operations *mops;
65
66 /*
67 * Avoid burning cycles with pages that are yet under __free_pages(),
68 * or just got freed under us.
69 *
70 * In case we 'win' a race for a movable page being freed under us and
71 * raise its refcount preventing __free_pages() from doing its job
72 * the put_page() at the end of this block will take care of
73 * release this page, thus avoiding a nasty leakage.
74 */
75 if (!folio)
76 goto out;
77
78 if (unlikely(folio_test_slab(folio)))
79 goto out_putfolio;
80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
81 smp_rmb();
82 /*
83 * Check movable flag before taking the page lock because
84 * we use non-atomic bitops on newly allocated page flags so
85 * unconditionally grabbing the lock ruins page's owner side.
86 */
87 if (unlikely(!__folio_test_movable(folio)))
88 goto out_putfolio;
89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 smp_rmb();
91 if (unlikely(folio_test_slab(folio)))
92 goto out_putfolio;
93
94 /*
95 * As movable pages are not isolated from LRU lists, concurrent
96 * compaction threads can race against page migration functions
97 * as well as race against the releasing a page.
98 *
99 * In order to avoid having an already isolated movable page
100 * being (wrongly) re-isolated while it is under migration,
101 * or to avoid attempting to isolate pages being released,
102 * lets be sure we have the page lock
103 * before proceeding with the movable page isolation steps.
104 */
105 if (unlikely(!folio_trylock(folio)))
106 goto out_putfolio;
107
108 if (!folio_test_movable(folio) || folio_test_isolated(folio))
109 goto out_no_isolated;
110
111 mops = folio_movable_ops(folio);
112 VM_BUG_ON_FOLIO(!mops, folio);
113
114 if (!mops->isolate_page(&folio->page, mode))
115 goto out_no_isolated;
116
117 /* Driver shouldn't use PG_isolated bit of page->flags */
118 WARN_ON_ONCE(folio_test_isolated(folio));
119 folio_set_isolated(folio);
120 folio_unlock(folio);
121
122 return true;
123
124out_no_isolated:
125 folio_unlock(folio);
126out_putfolio:
127 folio_put(folio);
128out:
129 return false;
130}
131
132static void putback_movable_folio(struct folio *folio)
133{
134 const struct movable_operations *mops = folio_movable_ops(folio);
135
136 mops->putback_page(&folio->page);
137 folio_clear_isolated(folio);
138}
139
140/*
141 * Put previously isolated pages back onto the appropriate lists
142 * from where they were once taken off for compaction/migration.
143 *
144 * This function shall be used whenever the isolated pageset has been
145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
146 * and isolate_hugetlb().
147 */
148void putback_movable_pages(struct list_head *l)
149{
150 struct folio *folio;
151 struct folio *folio2;
152
153 list_for_each_entry_safe(folio, folio2, l, lru) {
154 if (unlikely(folio_test_hugetlb(folio))) {
155 folio_putback_active_hugetlb(folio);
156 continue;
157 }
158 list_del(&folio->lru);
159 /*
160 * We isolated non-lru movable folio so here we can use
161 * __PageMovable because LRU folio's mapping cannot have
162 * PAGE_MAPPING_MOVABLE.
163 */
164 if (unlikely(__folio_test_movable(folio))) {
165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
166 folio_lock(folio);
167 if (folio_test_movable(folio))
168 putback_movable_folio(folio);
169 else
170 folio_clear_isolated(folio);
171 folio_unlock(folio);
172 folio_put(folio);
173 } else {
174 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
175 folio_is_file_lru(folio), -folio_nr_pages(folio));
176 folio_putback_lru(folio);
177 }
178 }
179}
180
181/*
182 * Restore a potential migration pte to a working pte entry
183 */
184static bool remove_migration_pte(struct folio *folio,
185 struct vm_area_struct *vma, unsigned long addr, void *old)
186{
187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
188
189 while (page_vma_mapped_walk(&pvmw)) {
190 rmap_t rmap_flags = RMAP_NONE;
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
200
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
211 folio_get(folio);
212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 if (pte_swp_soft_dirty(*pvmw.pte))
214 pte = pte_mksoft_dirty(pte);
215
216 /*
217 * Recheck VMA as permissions can change since migration started
218 */
219 entry = pte_to_swp_entry(*pvmw.pte);
220 if (!is_migration_entry_young(entry))
221 pte = pte_mkold(pte);
222 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
223 pte = pte_mkdirty(pte);
224 if (is_writable_migration_entry(entry))
225 pte = maybe_mkwrite(pte, vma);
226 else if (pte_swp_uffd_wp(*pvmw.pte))
227 pte = pte_mkuffd_wp(pte);
228 else
229 pte = pte_wrprotect(pte);
230
231 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
232 rmap_flags |= RMAP_EXCLUSIVE;
233
234 if (unlikely(is_device_private_page(new))) {
235 if (pte_write(pte))
236 entry = make_writable_device_private_entry(
237 page_to_pfn(new));
238 else
239 entry = make_readable_device_private_entry(
240 page_to_pfn(new));
241 pte = swp_entry_to_pte(entry);
242 if (pte_swp_soft_dirty(*pvmw.pte))
243 pte = pte_swp_mksoft_dirty(pte);
244 if (pte_swp_uffd_wp(*pvmw.pte))
245 pte = pte_swp_mkuffd_wp(pte);
246 }
247
248#ifdef CONFIG_HUGETLB_PAGE
249 if (folio_test_hugetlb(folio)) {
250 unsigned int shift = huge_page_shift(hstate_vma(vma));
251
252 pte = pte_mkhuge(pte);
253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
254 if (folio_test_anon(folio))
255 hugepage_add_anon_rmap(new, vma, pvmw.address,
256 rmap_flags);
257 else
258 page_dup_file_rmap(new, true);
259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
260 } else
261#endif
262 {
263 if (folio_test_anon(folio))
264 page_add_anon_rmap(new, vma, pvmw.address,
265 rmap_flags);
266 else
267 page_add_file_rmap(new, vma, false);
268 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
269 }
270 if (vma->vm_flags & VM_LOCKED)
271 mlock_drain_local();
272
273 trace_remove_migration_pte(pvmw.address, pte_val(pte),
274 compound_order(new));
275
276 /* No need to invalidate - it was non-present before */
277 update_mmu_cache(vma, pvmw.address, pvmw.pte);
278 }
279
280 return true;
281}
282
283/*
284 * Get rid of all migration entries and replace them by
285 * references to the indicated page.
286 */
287void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
288{
289 struct rmap_walk_control rwc = {
290 .rmap_one = remove_migration_pte,
291 .arg = src,
292 };
293
294 if (locked)
295 rmap_walk_locked(dst, &rwc);
296 else
297 rmap_walk(dst, &rwc);
298}
299
300/*
301 * Something used the pte of a page under migration. We need to
302 * get to the page and wait until migration is finished.
303 * When we return from this function the fault will be retried.
304 */
305void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
306 spinlock_t *ptl)
307{
308 pte_t pte;
309 swp_entry_t entry;
310
311 spin_lock(ptl);
312 pte = *ptep;
313 if (!is_swap_pte(pte))
314 goto out;
315
316 entry = pte_to_swp_entry(pte);
317 if (!is_migration_entry(entry))
318 goto out;
319
320 migration_entry_wait_on_locked(entry, ptep, ptl);
321 return;
322out:
323 pte_unmap_unlock(ptep, ptl);
324}
325
326void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
327 unsigned long address)
328{
329 spinlock_t *ptl = pte_lockptr(mm, pmd);
330 pte_t *ptep = pte_offset_map(pmd, address);
331 __migration_entry_wait(mm, ptep, ptl);
332}
333
334#ifdef CONFIG_HUGETLB_PAGE
335/*
336 * The vma read lock must be held upon entry. Holding that lock prevents either
337 * the pte or the ptl from being freed.
338 *
339 * This function will release the vma lock before returning.
340 */
341void __migration_entry_wait_huge(struct vm_area_struct *vma,
342 pte_t *ptep, spinlock_t *ptl)
343{
344 pte_t pte;
345
346 hugetlb_vma_assert_locked(vma);
347 spin_lock(ptl);
348 pte = huge_ptep_get(ptep);
349
350 if (unlikely(!is_hugetlb_entry_migration(pte))) {
351 spin_unlock(ptl);
352 hugetlb_vma_unlock_read(vma);
353 } else {
354 /*
355 * If migration entry existed, safe to release vma lock
356 * here because the pgtable page won't be freed without the
357 * pgtable lock released. See comment right above pgtable
358 * lock release in migration_entry_wait_on_locked().
359 */
360 hugetlb_vma_unlock_read(vma);
361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
362 }
363}
364
365void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
366{
367 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
368
369 __migration_entry_wait_huge(vma, pte, ptl);
370}
371#endif
372
373#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
374void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
375{
376 spinlock_t *ptl;
377
378 ptl = pmd_lock(mm, pmd);
379 if (!is_pmd_migration_entry(*pmd))
380 goto unlock;
381 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
382 return;
383unlock:
384 spin_unlock(ptl);
385}
386#endif
387
388static int folio_expected_refs(struct address_space *mapping,
389 struct folio *folio)
390{
391 int refs = 1;
392 if (!mapping)
393 return refs;
394
395 refs += folio_nr_pages(folio);
396 if (folio_test_private(folio))
397 refs++;
398
399 return refs;
400}
401
402/*
403 * Replace the page in the mapping.
404 *
405 * The number of remaining references must be:
406 * 1 for anonymous pages without a mapping
407 * 2 for pages with a mapping
408 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
409 */
410int folio_migrate_mapping(struct address_space *mapping,
411 struct folio *newfolio, struct folio *folio, int extra_count)
412{
413 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
414 struct zone *oldzone, *newzone;
415 int dirty;
416 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
417 long nr = folio_nr_pages(folio);
418
419 if (!mapping) {
420 /* Anonymous page without mapping */
421 if (folio_ref_count(folio) != expected_count)
422 return -EAGAIN;
423
424 /* No turning back from here */
425 newfolio->index = folio->index;
426 newfolio->mapping = folio->mapping;
427 if (folio_test_swapbacked(folio))
428 __folio_set_swapbacked(newfolio);
429
430 return MIGRATEPAGE_SUCCESS;
431 }
432
433 oldzone = folio_zone(folio);
434 newzone = folio_zone(newfolio);
435
436 xas_lock_irq(&xas);
437 if (!folio_ref_freeze(folio, expected_count)) {
438 xas_unlock_irq(&xas);
439 return -EAGAIN;
440 }
441
442 /*
443 * Now we know that no one else is looking at the folio:
444 * no turning back from here.
445 */
446 newfolio->index = folio->index;
447 newfolio->mapping = folio->mapping;
448 folio_ref_add(newfolio, nr); /* add cache reference */
449 if (folio_test_swapbacked(folio)) {
450 __folio_set_swapbacked(newfolio);
451 if (folio_test_swapcache(folio)) {
452 folio_set_swapcache(newfolio);
453 newfolio->private = folio_get_private(folio);
454 }
455 } else {
456 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
457 }
458
459 /* Move dirty while page refs frozen and newpage not yet exposed */
460 dirty = folio_test_dirty(folio);
461 if (dirty) {
462 folio_clear_dirty(folio);
463 folio_set_dirty(newfolio);
464 }
465
466 xas_store(&xas, newfolio);
467
468 /*
469 * Drop cache reference from old page by unfreezing
470 * to one less reference.
471 * We know this isn't the last reference.
472 */
473 folio_ref_unfreeze(folio, expected_count - nr);
474
475 xas_unlock(&xas);
476 /* Leave irq disabled to prevent preemption while updating stats */
477
478 /*
479 * If moved to a different zone then also account
480 * the page for that zone. Other VM counters will be
481 * taken care of when we establish references to the
482 * new page and drop references to the old page.
483 *
484 * Note that anonymous pages are accounted for
485 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
486 * are mapped to swap space.
487 */
488 if (newzone != oldzone) {
489 struct lruvec *old_lruvec, *new_lruvec;
490 struct mem_cgroup *memcg;
491
492 memcg = folio_memcg(folio);
493 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495
496 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
497 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
498 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
499 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
500 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
501 }
502#ifdef CONFIG_SWAP
503 if (folio_test_swapcache(folio)) {
504 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
505 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
506 }
507#endif
508 if (dirty && mapping_can_writeback(mapping)) {
509 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
510 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
511 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
512 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
513 }
514 }
515 local_irq_enable();
516
517 return MIGRATEPAGE_SUCCESS;
518}
519EXPORT_SYMBOL(folio_migrate_mapping);
520
521/*
522 * The expected number of remaining references is the same as that
523 * of folio_migrate_mapping().
524 */
525int migrate_huge_page_move_mapping(struct address_space *mapping,
526 struct folio *dst, struct folio *src)
527{
528 XA_STATE(xas, &mapping->i_pages, folio_index(src));
529 int expected_count;
530
531 xas_lock_irq(&xas);
532 expected_count = 2 + folio_has_private(src);
533 if (!folio_ref_freeze(src, expected_count)) {
534 xas_unlock_irq(&xas);
535 return -EAGAIN;
536 }
537
538 dst->index = src->index;
539 dst->mapping = src->mapping;
540
541 folio_get(dst);
542
543 xas_store(&xas, dst);
544
545 folio_ref_unfreeze(src, expected_count - 1);
546
547 xas_unlock_irq(&xas);
548
549 return MIGRATEPAGE_SUCCESS;
550}
551
552/*
553 * Copy the flags and some other ancillary information
554 */
555void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
556{
557 int cpupid;
558
559 if (folio_test_error(folio))
560 folio_set_error(newfolio);
561 if (folio_test_referenced(folio))
562 folio_set_referenced(newfolio);
563 if (folio_test_uptodate(folio))
564 folio_mark_uptodate(newfolio);
565 if (folio_test_clear_active(folio)) {
566 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
567 folio_set_active(newfolio);
568 } else if (folio_test_clear_unevictable(folio))
569 folio_set_unevictable(newfolio);
570 if (folio_test_workingset(folio))
571 folio_set_workingset(newfolio);
572 if (folio_test_checked(folio))
573 folio_set_checked(newfolio);
574 /*
575 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
576 * migration entries. We can still have PG_anon_exclusive set on an
577 * effectively unmapped and unreferenced first sub-pages of an
578 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
579 */
580 if (folio_test_mappedtodisk(folio))
581 folio_set_mappedtodisk(newfolio);
582
583 /* Move dirty on pages not done by folio_migrate_mapping() */
584 if (folio_test_dirty(folio))
585 folio_set_dirty(newfolio);
586
587 if (folio_test_young(folio))
588 folio_set_young(newfolio);
589 if (folio_test_idle(folio))
590 folio_set_idle(newfolio);
591
592 /*
593 * Copy NUMA information to the new page, to prevent over-eager
594 * future migrations of this same page.
595 */
596 cpupid = page_cpupid_xchg_last(&folio->page, -1);
597 /*
598 * For memory tiering mode, when migrate between slow and fast
599 * memory node, reset cpupid, because that is used to record
600 * page access time in slow memory node.
601 */
602 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
603 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
604 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
605
606 if (f_toptier != t_toptier)
607 cpupid = -1;
608 }
609 page_cpupid_xchg_last(&newfolio->page, cpupid);
610
611 folio_migrate_ksm(newfolio, folio);
612 /*
613 * Please do not reorder this without considering how mm/ksm.c's
614 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
615 */
616 if (folio_test_swapcache(folio))
617 folio_clear_swapcache(folio);
618 folio_clear_private(folio);
619
620 /* page->private contains hugetlb specific flags */
621 if (!folio_test_hugetlb(folio))
622 folio->private = NULL;
623
624 /*
625 * If any waiters have accumulated on the new page then
626 * wake them up.
627 */
628 if (folio_test_writeback(newfolio))
629 folio_end_writeback(newfolio);
630
631 /*
632 * PG_readahead shares the same bit with PG_reclaim. The above
633 * end_page_writeback() may clear PG_readahead mistakenly, so set the
634 * bit after that.
635 */
636 if (folio_test_readahead(folio))
637 folio_set_readahead(newfolio);
638
639 folio_copy_owner(newfolio, folio);
640
641 if (!folio_test_hugetlb(folio))
642 mem_cgroup_migrate(folio, newfolio);
643}
644EXPORT_SYMBOL(folio_migrate_flags);
645
646void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
647{
648 folio_copy(newfolio, folio);
649 folio_migrate_flags(newfolio, folio);
650}
651EXPORT_SYMBOL(folio_migrate_copy);
652
653/************************************************************
654 * Migration functions
655 ***********************************************************/
656
657int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
658 struct folio *src, enum migrate_mode mode, int extra_count)
659{
660 int rc;
661
662 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
663
664 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
665
666 if (rc != MIGRATEPAGE_SUCCESS)
667 return rc;
668
669 if (mode != MIGRATE_SYNC_NO_COPY)
670 folio_migrate_copy(dst, src);
671 else
672 folio_migrate_flags(dst, src);
673 return MIGRATEPAGE_SUCCESS;
674}
675
676/**
677 * migrate_folio() - Simple folio migration.
678 * @mapping: The address_space containing the folio.
679 * @dst: The folio to migrate the data to.
680 * @src: The folio containing the current data.
681 * @mode: How to migrate the page.
682 *
683 * Common logic to directly migrate a single LRU folio suitable for
684 * folios that do not use PagePrivate/PagePrivate2.
685 *
686 * Folios are locked upon entry and exit.
687 */
688int migrate_folio(struct address_space *mapping, struct folio *dst,
689 struct folio *src, enum migrate_mode mode)
690{
691 return migrate_folio_extra(mapping, dst, src, mode, 0);
692}
693EXPORT_SYMBOL(migrate_folio);
694
695#ifdef CONFIG_BLOCK
696/* Returns true if all buffers are successfully locked */
697static bool buffer_migrate_lock_buffers(struct buffer_head *head,
698 enum migrate_mode mode)
699{
700 struct buffer_head *bh = head;
701
702 /* Simple case, sync compaction */
703 if (mode != MIGRATE_ASYNC) {
704 do {
705 lock_buffer(bh);
706 bh = bh->b_this_page;
707
708 } while (bh != head);
709
710 return true;
711 }
712
713 /* async case, we cannot block on lock_buffer so use trylock_buffer */
714 do {
715 if (!trylock_buffer(bh)) {
716 /*
717 * We failed to lock the buffer and cannot stall in
718 * async migration. Release the taken locks
719 */
720 struct buffer_head *failed_bh = bh;
721 bh = head;
722 while (bh != failed_bh) {
723 unlock_buffer(bh);
724 bh = bh->b_this_page;
725 }
726 return false;
727 }
728
729 bh = bh->b_this_page;
730 } while (bh != head);
731 return true;
732}
733
734static int __buffer_migrate_folio(struct address_space *mapping,
735 struct folio *dst, struct folio *src, enum migrate_mode mode,
736 bool check_refs)
737{
738 struct buffer_head *bh, *head;
739 int rc;
740 int expected_count;
741
742 head = folio_buffers(src);
743 if (!head)
744 return migrate_folio(mapping, dst, src, mode);
745
746 /* Check whether page does not have extra refs before we do more work */
747 expected_count = folio_expected_refs(mapping, src);
748 if (folio_ref_count(src) != expected_count)
749 return -EAGAIN;
750
751 if (!buffer_migrate_lock_buffers(head, mode))
752 return -EAGAIN;
753
754 if (check_refs) {
755 bool busy;
756 bool invalidated = false;
757
758recheck_buffers:
759 busy = false;
760 spin_lock(&mapping->private_lock);
761 bh = head;
762 do {
763 if (atomic_read(&bh->b_count)) {
764 busy = true;
765 break;
766 }
767 bh = bh->b_this_page;
768 } while (bh != head);
769 if (busy) {
770 if (invalidated) {
771 rc = -EAGAIN;
772 goto unlock_buffers;
773 }
774 spin_unlock(&mapping->private_lock);
775 invalidate_bh_lrus();
776 invalidated = true;
777 goto recheck_buffers;
778 }
779 }
780
781 rc = folio_migrate_mapping(mapping, dst, src, 0);
782 if (rc != MIGRATEPAGE_SUCCESS)
783 goto unlock_buffers;
784
785 folio_attach_private(dst, folio_detach_private(src));
786
787 bh = head;
788 do {
789 set_bh_page(bh, &dst->page, bh_offset(bh));
790 bh = bh->b_this_page;
791 } while (bh != head);
792
793 if (mode != MIGRATE_SYNC_NO_COPY)
794 folio_migrate_copy(dst, src);
795 else
796 folio_migrate_flags(dst, src);
797
798 rc = MIGRATEPAGE_SUCCESS;
799unlock_buffers:
800 if (check_refs)
801 spin_unlock(&mapping->private_lock);
802 bh = head;
803 do {
804 unlock_buffer(bh);
805 bh = bh->b_this_page;
806 } while (bh != head);
807
808 return rc;
809}
810
811/**
812 * buffer_migrate_folio() - Migration function for folios with buffers.
813 * @mapping: The address space containing @src.
814 * @dst: The folio to migrate to.
815 * @src: The folio to migrate from.
816 * @mode: How to migrate the folio.
817 *
818 * This function can only be used if the underlying filesystem guarantees
819 * that no other references to @src exist. For example attached buffer
820 * heads are accessed only under the folio lock. If your filesystem cannot
821 * provide this guarantee, buffer_migrate_folio_norefs() may be more
822 * appropriate.
823 *
824 * Return: 0 on success or a negative errno on failure.
825 */
826int buffer_migrate_folio(struct address_space *mapping,
827 struct folio *dst, struct folio *src, enum migrate_mode mode)
828{
829 return __buffer_migrate_folio(mapping, dst, src, mode, false);
830}
831EXPORT_SYMBOL(buffer_migrate_folio);
832
833/**
834 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
835 * @mapping: The address space containing @src.
836 * @dst: The folio to migrate to.
837 * @src: The folio to migrate from.
838 * @mode: How to migrate the folio.
839 *
840 * Like buffer_migrate_folio() except that this variant is more careful
841 * and checks that there are also no buffer head references. This function
842 * is the right one for mappings where buffer heads are directly looked
843 * up and referenced (such as block device mappings).
844 *
845 * Return: 0 on success or a negative errno on failure.
846 */
847int buffer_migrate_folio_norefs(struct address_space *mapping,
848 struct folio *dst, struct folio *src, enum migrate_mode mode)
849{
850 return __buffer_migrate_folio(mapping, dst, src, mode, true);
851}
852EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
853#endif
854
855int filemap_migrate_folio(struct address_space *mapping,
856 struct folio *dst, struct folio *src, enum migrate_mode mode)
857{
858 int ret;
859
860 ret = folio_migrate_mapping(mapping, dst, src, 0);
861 if (ret != MIGRATEPAGE_SUCCESS)
862 return ret;
863
864 if (folio_get_private(src))
865 folio_attach_private(dst, folio_detach_private(src));
866
867 if (mode != MIGRATE_SYNC_NO_COPY)
868 folio_migrate_copy(dst, src);
869 else
870 folio_migrate_flags(dst, src);
871 return MIGRATEPAGE_SUCCESS;
872}
873EXPORT_SYMBOL_GPL(filemap_migrate_folio);
874
875/*
876 * Writeback a folio to clean the dirty state
877 */
878static int writeout(struct address_space *mapping, struct folio *folio)
879{
880 struct writeback_control wbc = {
881 .sync_mode = WB_SYNC_NONE,
882 .nr_to_write = 1,
883 .range_start = 0,
884 .range_end = LLONG_MAX,
885 .for_reclaim = 1
886 };
887 int rc;
888
889 if (!mapping->a_ops->writepage)
890 /* No write method for the address space */
891 return -EINVAL;
892
893 if (!folio_clear_dirty_for_io(folio))
894 /* Someone else already triggered a write */
895 return -EAGAIN;
896
897 /*
898 * A dirty folio may imply that the underlying filesystem has
899 * the folio on some queue. So the folio must be clean for
900 * migration. Writeout may mean we lose the lock and the
901 * folio state is no longer what we checked for earlier.
902 * At this point we know that the migration attempt cannot
903 * be successful.
904 */
905 remove_migration_ptes(folio, folio, false);
906
907 rc = mapping->a_ops->writepage(&folio->page, &wbc);
908
909 if (rc != AOP_WRITEPAGE_ACTIVATE)
910 /* unlocked. Relock */
911 folio_lock(folio);
912
913 return (rc < 0) ? -EIO : -EAGAIN;
914}
915
916/*
917 * Default handling if a filesystem does not provide a migration function.
918 */
919static int fallback_migrate_folio(struct address_space *mapping,
920 struct folio *dst, struct folio *src, enum migrate_mode mode)
921{
922 if (folio_test_dirty(src)) {
923 /* Only writeback folios in full synchronous migration */
924 switch (mode) {
925 case MIGRATE_SYNC:
926 case MIGRATE_SYNC_NO_COPY:
927 break;
928 default:
929 return -EBUSY;
930 }
931 return writeout(mapping, src);
932 }
933
934 /*
935 * Buffers may be managed in a filesystem specific way.
936 * We must have no buffers or drop them.
937 */
938 if (folio_test_private(src) &&
939 !filemap_release_folio(src, GFP_KERNEL))
940 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
941
942 return migrate_folio(mapping, dst, src, mode);
943}
944
945/*
946 * Move a page to a newly allocated page
947 * The page is locked and all ptes have been successfully removed.
948 *
949 * The new page will have replaced the old page if this function
950 * is successful.
951 *
952 * Return value:
953 * < 0 - error code
954 * MIGRATEPAGE_SUCCESS - success
955 */
956static int move_to_new_folio(struct folio *dst, struct folio *src,
957 enum migrate_mode mode)
958{
959 int rc = -EAGAIN;
960 bool is_lru = !__PageMovable(&src->page);
961
962 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
963 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
964
965 if (likely(is_lru)) {
966 struct address_space *mapping = folio_mapping(src);
967
968 if (!mapping)
969 rc = migrate_folio(mapping, dst, src, mode);
970 else if (mapping->a_ops->migrate_folio)
971 /*
972 * Most folios have a mapping and most filesystems
973 * provide a migrate_folio callback. Anonymous folios
974 * are part of swap space which also has its own
975 * migrate_folio callback. This is the most common path
976 * for page migration.
977 */
978 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
979 mode);
980 else
981 rc = fallback_migrate_folio(mapping, dst, src, mode);
982 } else {
983 const struct movable_operations *mops;
984
985 /*
986 * In case of non-lru page, it could be released after
987 * isolation step. In that case, we shouldn't try migration.
988 */
989 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
990 if (!folio_test_movable(src)) {
991 rc = MIGRATEPAGE_SUCCESS;
992 folio_clear_isolated(src);
993 goto out;
994 }
995
996 mops = folio_movable_ops(src);
997 rc = mops->migrate_page(&dst->page, &src->page, mode);
998 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
999 !folio_test_isolated(src));
1000 }
1001
1002 /*
1003 * When successful, old pagecache src->mapping must be cleared before
1004 * src is freed; but stats require that PageAnon be left as PageAnon.
1005 */
1006 if (rc == MIGRATEPAGE_SUCCESS) {
1007 if (__PageMovable(&src->page)) {
1008 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1009
1010 /*
1011 * We clear PG_movable under page_lock so any compactor
1012 * cannot try to migrate this page.
1013 */
1014 folio_clear_isolated(src);
1015 }
1016
1017 /*
1018 * Anonymous and movable src->mapping will be cleared by
1019 * free_pages_prepare so don't reset it here for keeping
1020 * the type to work PageAnon, for example.
1021 */
1022 if (!folio_mapping_flags(src))
1023 src->mapping = NULL;
1024
1025 if (likely(!folio_is_zone_device(dst)))
1026 flush_dcache_folio(dst);
1027 }
1028out:
1029 return rc;
1030}
1031
1032/*
1033 * To record some information during migration, we use some unused
1034 * fields (mapping and private) of struct folio of the newly allocated
1035 * destination folio. This is safe because nobody is using them
1036 * except us.
1037 */
1038union migration_ptr {
1039 struct anon_vma *anon_vma;
1040 struct address_space *mapping;
1041};
1042static void __migrate_folio_record(struct folio *dst,
1043 unsigned long page_was_mapped,
1044 struct anon_vma *anon_vma)
1045{
1046 union migration_ptr ptr = { .anon_vma = anon_vma };
1047 dst->mapping = ptr.mapping;
1048 dst->private = (void *)page_was_mapped;
1049}
1050
1051static void __migrate_folio_extract(struct folio *dst,
1052 int *page_was_mappedp,
1053 struct anon_vma **anon_vmap)
1054{
1055 union migration_ptr ptr = { .mapping = dst->mapping };
1056 *anon_vmap = ptr.anon_vma;
1057 *page_was_mappedp = (unsigned long)dst->private;
1058 dst->mapping = NULL;
1059 dst->private = NULL;
1060}
1061
1062/* Restore the source folio to the original state upon failure */
1063static void migrate_folio_undo_src(struct folio *src,
1064 int page_was_mapped,
1065 struct anon_vma *anon_vma,
1066 bool locked,
1067 struct list_head *ret)
1068{
1069 if (page_was_mapped)
1070 remove_migration_ptes(src, src, false);
1071 /* Drop an anon_vma reference if we took one */
1072 if (anon_vma)
1073 put_anon_vma(anon_vma);
1074 if (locked)
1075 folio_unlock(src);
1076 if (ret)
1077 list_move_tail(&src->lru, ret);
1078}
1079
1080/* Restore the destination folio to the original state upon failure */
1081static void migrate_folio_undo_dst(struct folio *dst,
1082 bool locked,
1083 free_page_t put_new_page,
1084 unsigned long private)
1085{
1086 if (locked)
1087 folio_unlock(dst);
1088 if (put_new_page)
1089 put_new_page(&dst->page, private);
1090 else
1091 folio_put(dst);
1092}
1093
1094/* Cleanup src folio upon migration success */
1095static void migrate_folio_done(struct folio *src,
1096 enum migrate_reason reason)
1097{
1098 /*
1099 * Compaction can migrate also non-LRU pages which are
1100 * not accounted to NR_ISOLATED_*. They can be recognized
1101 * as __PageMovable
1102 */
1103 if (likely(!__folio_test_movable(src)))
1104 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1105 folio_is_file_lru(src), -folio_nr_pages(src));
1106
1107 if (reason != MR_MEMORY_FAILURE)
1108 /* We release the page in page_handle_poison. */
1109 folio_put(src);
1110}
1111
1112/* Obtain the lock on page, remove all ptes. */
1113static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
1114 unsigned long private, struct folio *src,
1115 struct folio **dstp, enum migrate_mode mode,
1116 enum migrate_reason reason, struct list_head *ret)
1117{
1118 struct folio *dst;
1119 int rc = -EAGAIN;
1120 struct page *newpage = NULL;
1121 int page_was_mapped = 0;
1122 struct anon_vma *anon_vma = NULL;
1123 bool is_lru = !__PageMovable(&src->page);
1124 bool locked = false;
1125 bool dst_locked = false;
1126
1127 if (folio_ref_count(src) == 1) {
1128 /* Folio was freed from under us. So we are done. */
1129 folio_clear_active(src);
1130 folio_clear_unevictable(src);
1131 /* free_pages_prepare() will clear PG_isolated. */
1132 list_del(&src->lru);
1133 migrate_folio_done(src, reason);
1134 return MIGRATEPAGE_SUCCESS;
1135 }
1136
1137 newpage = get_new_page(&src->page, private);
1138 if (!newpage)
1139 return -ENOMEM;
1140 dst = page_folio(newpage);
1141 *dstp = dst;
1142
1143 dst->private = NULL;
1144
1145 if (!folio_trylock(src)) {
1146 if (mode == MIGRATE_ASYNC)
1147 goto out;
1148
1149 /*
1150 * It's not safe for direct compaction to call lock_page.
1151 * For example, during page readahead pages are added locked
1152 * to the LRU. Later, when the IO completes the pages are
1153 * marked uptodate and unlocked. However, the queueing
1154 * could be merging multiple pages for one bio (e.g.
1155 * mpage_readahead). If an allocation happens for the
1156 * second or third page, the process can end up locking
1157 * the same page twice and deadlocking. Rather than
1158 * trying to be clever about what pages can be locked,
1159 * avoid the use of lock_page for direct compaction
1160 * altogether.
1161 */
1162 if (current->flags & PF_MEMALLOC)
1163 goto out;
1164
1165 folio_lock(src);
1166 }
1167 locked = true;
1168
1169 if (folio_test_writeback(src)) {
1170 /*
1171 * Only in the case of a full synchronous migration is it
1172 * necessary to wait for PageWriteback. In the async case,
1173 * the retry loop is too short and in the sync-light case,
1174 * the overhead of stalling is too much
1175 */
1176 switch (mode) {
1177 case MIGRATE_SYNC:
1178 case MIGRATE_SYNC_NO_COPY:
1179 break;
1180 default:
1181 rc = -EBUSY;
1182 goto out;
1183 }
1184 folio_wait_writeback(src);
1185 }
1186
1187 /*
1188 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1189 * we cannot notice that anon_vma is freed while we migrate a page.
1190 * This get_anon_vma() delays freeing anon_vma pointer until the end
1191 * of migration. File cache pages are no problem because of page_lock()
1192 * File Caches may use write_page() or lock_page() in migration, then,
1193 * just care Anon page here.
1194 *
1195 * Only folio_get_anon_vma() understands the subtleties of
1196 * getting a hold on an anon_vma from outside one of its mms.
1197 * But if we cannot get anon_vma, then we won't need it anyway,
1198 * because that implies that the anon page is no longer mapped
1199 * (and cannot be remapped so long as we hold the page lock).
1200 */
1201 if (folio_test_anon(src) && !folio_test_ksm(src))
1202 anon_vma = folio_get_anon_vma(src);
1203
1204 /*
1205 * Block others from accessing the new page when we get around to
1206 * establishing additional references. We are usually the only one
1207 * holding a reference to dst at this point. We used to have a BUG
1208 * here if folio_trylock(dst) fails, but would like to allow for
1209 * cases where there might be a race with the previous use of dst.
1210 * This is much like races on refcount of oldpage: just don't BUG().
1211 */
1212 if (unlikely(!folio_trylock(dst)))
1213 goto out;
1214 dst_locked = true;
1215
1216 if (unlikely(!is_lru)) {
1217 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1218 return MIGRATEPAGE_UNMAP;
1219 }
1220
1221 /*
1222 * Corner case handling:
1223 * 1. When a new swap-cache page is read into, it is added to the LRU
1224 * and treated as swapcache but it has no rmap yet.
1225 * Calling try_to_unmap() against a src->mapping==NULL page will
1226 * trigger a BUG. So handle it here.
1227 * 2. An orphaned page (see truncate_cleanup_page) might have
1228 * fs-private metadata. The page can be picked up due to memory
1229 * offlining. Everywhere else except page reclaim, the page is
1230 * invisible to the vm, so the page can not be migrated. So try to
1231 * free the metadata, so the page can be freed.
1232 */
1233 if (!src->mapping) {
1234 if (folio_test_private(src)) {
1235 try_to_free_buffers(src);
1236 goto out;
1237 }
1238 } else if (folio_mapped(src)) {
1239 /* Establish migration ptes */
1240 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1241 !folio_test_ksm(src) && !anon_vma, src);
1242 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1243 page_was_mapped = 1;
1244 }
1245
1246 if (!folio_mapped(src)) {
1247 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1248 return MIGRATEPAGE_UNMAP;
1249 }
1250
1251out:
1252 /*
1253 * A folio that has not been unmapped will be restored to
1254 * right list unless we want to retry.
1255 */
1256 if (rc == -EAGAIN)
1257 ret = NULL;
1258
1259 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
1260 migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
1261
1262 return rc;
1263}
1264
1265/* Migrate the folio to the newly allocated folio in dst. */
1266static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
1267 struct folio *src, struct folio *dst,
1268 enum migrate_mode mode, enum migrate_reason reason,
1269 struct list_head *ret)
1270{
1271 int rc;
1272 int page_was_mapped = 0;
1273 struct anon_vma *anon_vma = NULL;
1274 bool is_lru = !__PageMovable(&src->page);
1275 struct list_head *prev;
1276
1277 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1278 prev = dst->lru.prev;
1279 list_del(&dst->lru);
1280
1281 rc = move_to_new_folio(dst, src, mode);
1282 if (rc)
1283 goto out;
1284
1285 if (unlikely(!is_lru))
1286 goto out_unlock_both;
1287
1288 /*
1289 * When successful, push dst to LRU immediately: so that if it
1290 * turns out to be an mlocked page, remove_migration_ptes() will
1291 * automatically build up the correct dst->mlock_count for it.
1292 *
1293 * We would like to do something similar for the old page, when
1294 * unsuccessful, and other cases when a page has been temporarily
1295 * isolated from the unevictable LRU: but this case is the easiest.
1296 */
1297 folio_add_lru(dst);
1298 if (page_was_mapped)
1299 lru_add_drain();
1300
1301 if (page_was_mapped)
1302 remove_migration_ptes(src, dst, false);
1303
1304out_unlock_both:
1305 folio_unlock(dst);
1306 set_page_owner_migrate_reason(&dst->page, reason);
1307 /*
1308 * If migration is successful, decrease refcount of dst,
1309 * which will not free the page because new page owner increased
1310 * refcounter.
1311 */
1312 folio_put(dst);
1313
1314 /*
1315 * A folio that has been migrated has all references removed
1316 * and will be freed.
1317 */
1318 list_del(&src->lru);
1319 /* Drop an anon_vma reference if we took one */
1320 if (anon_vma)
1321 put_anon_vma(anon_vma);
1322 folio_unlock(src);
1323 migrate_folio_done(src, reason);
1324
1325 return rc;
1326out:
1327 /*
1328 * A folio that has not been migrated will be restored to
1329 * right list unless we want to retry.
1330 */
1331 if (rc == -EAGAIN) {
1332 list_add(&dst->lru, prev);
1333 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1334 return rc;
1335 }
1336
1337 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
1338 migrate_folio_undo_dst(dst, true, put_new_page, private);
1339
1340 return rc;
1341}
1342
1343/*
1344 * Counterpart of unmap_and_move_page() for hugepage migration.
1345 *
1346 * This function doesn't wait the completion of hugepage I/O
1347 * because there is no race between I/O and migration for hugepage.
1348 * Note that currently hugepage I/O occurs only in direct I/O
1349 * where no lock is held and PG_writeback is irrelevant,
1350 * and writeback status of all subpages are counted in the reference
1351 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1352 * under direct I/O, the reference of the head page is 512 and a bit more.)
1353 * This means that when we try to migrate hugepage whose subpages are
1354 * doing direct I/O, some references remain after try_to_unmap() and
1355 * hugepage migration fails without data corruption.
1356 *
1357 * There is also no race when direct I/O is issued on the page under migration,
1358 * because then pte is replaced with migration swap entry and direct I/O code
1359 * will wait in the page fault for migration to complete.
1360 */
1361static int unmap_and_move_huge_page(new_page_t get_new_page,
1362 free_page_t put_new_page, unsigned long private,
1363 struct page *hpage, int force,
1364 enum migrate_mode mode, int reason,
1365 struct list_head *ret)
1366{
1367 struct folio *dst, *src = page_folio(hpage);
1368 int rc = -EAGAIN;
1369 int page_was_mapped = 0;
1370 struct page *new_hpage;
1371 struct anon_vma *anon_vma = NULL;
1372 struct address_space *mapping = NULL;
1373
1374 if (folio_ref_count(src) == 1) {
1375 /* page was freed from under us. So we are done. */
1376 folio_putback_active_hugetlb(src);
1377 return MIGRATEPAGE_SUCCESS;
1378 }
1379
1380 new_hpage = get_new_page(hpage, private);
1381 if (!new_hpage)
1382 return -ENOMEM;
1383 dst = page_folio(new_hpage);
1384
1385 if (!folio_trylock(src)) {
1386 if (!force)
1387 goto out;
1388 switch (mode) {
1389 case MIGRATE_SYNC:
1390 case MIGRATE_SYNC_NO_COPY:
1391 break;
1392 default:
1393 goto out;
1394 }
1395 folio_lock(src);
1396 }
1397
1398 /*
1399 * Check for pages which are in the process of being freed. Without
1400 * folio_mapping() set, hugetlbfs specific move page routine will not
1401 * be called and we could leak usage counts for subpools.
1402 */
1403 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1404 rc = -EBUSY;
1405 goto out_unlock;
1406 }
1407
1408 if (folio_test_anon(src))
1409 anon_vma = folio_get_anon_vma(src);
1410
1411 if (unlikely(!folio_trylock(dst)))
1412 goto put_anon;
1413
1414 if (folio_mapped(src)) {
1415 enum ttu_flags ttu = 0;
1416
1417 if (!folio_test_anon(src)) {
1418 /*
1419 * In shared mappings, try_to_unmap could potentially
1420 * call huge_pmd_unshare. Because of this, take
1421 * semaphore in write mode here and set TTU_RMAP_LOCKED
1422 * to let lower levels know we have taken the lock.
1423 */
1424 mapping = hugetlb_page_mapping_lock_write(hpage);
1425 if (unlikely(!mapping))
1426 goto unlock_put_anon;
1427
1428 ttu = TTU_RMAP_LOCKED;
1429 }
1430
1431 try_to_migrate(src, ttu);
1432 page_was_mapped = 1;
1433
1434 if (ttu & TTU_RMAP_LOCKED)
1435 i_mmap_unlock_write(mapping);
1436 }
1437
1438 if (!folio_mapped(src))
1439 rc = move_to_new_folio(dst, src, mode);
1440
1441 if (page_was_mapped)
1442 remove_migration_ptes(src,
1443 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1444
1445unlock_put_anon:
1446 folio_unlock(dst);
1447
1448put_anon:
1449 if (anon_vma)
1450 put_anon_vma(anon_vma);
1451
1452 if (rc == MIGRATEPAGE_SUCCESS) {
1453 move_hugetlb_state(src, dst, reason);
1454 put_new_page = NULL;
1455 }
1456
1457out_unlock:
1458 folio_unlock(src);
1459out:
1460 if (rc == MIGRATEPAGE_SUCCESS)
1461 folio_putback_active_hugetlb(src);
1462 else if (rc != -EAGAIN)
1463 list_move_tail(&src->lru, ret);
1464
1465 /*
1466 * If migration was not successful and there's a freeing callback, use
1467 * it. Otherwise, put_page() will drop the reference grabbed during
1468 * isolation.
1469 */
1470 if (put_new_page)
1471 put_new_page(new_hpage, private);
1472 else
1473 folio_putback_active_hugetlb(dst);
1474
1475 return rc;
1476}
1477
1478static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1479{
1480 int rc;
1481
1482 folio_lock(folio);
1483 rc = split_folio_to_list(folio, split_folios);
1484 folio_unlock(folio);
1485 if (!rc)
1486 list_move_tail(&folio->lru, split_folios);
1487
1488 return rc;
1489}
1490
1491#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1492#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1493#else
1494#define NR_MAX_BATCHED_MIGRATION 512
1495#endif
1496#define NR_MAX_MIGRATE_PAGES_RETRY 10
1497#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1498#define NR_MAX_MIGRATE_SYNC_RETRY \
1499 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1500
1501struct migrate_pages_stats {
1502 int nr_succeeded; /* Normal and large folios migrated successfully, in
1503 units of base pages */
1504 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1505 units of base pages. Untried folios aren't counted */
1506 int nr_thp_succeeded; /* THP migrated successfully */
1507 int nr_thp_failed; /* THP failed to be migrated */
1508 int nr_thp_split; /* THP split before migrating */
1509};
1510
1511/*
1512 * Returns the number of hugetlb folios that were not migrated, or an error code
1513 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1514 * any more because the list has become empty or no retryable hugetlb folios
1515 * exist any more. It is caller's responsibility to call putback_movable_pages()
1516 * only if ret != 0.
1517 */
1518static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
1519 free_page_t put_new_page, unsigned long private,
1520 enum migrate_mode mode, int reason,
1521 struct migrate_pages_stats *stats,
1522 struct list_head *ret_folios)
1523{
1524 int retry = 1;
1525 int nr_failed = 0;
1526 int nr_retry_pages = 0;
1527 int pass = 0;
1528 struct folio *folio, *folio2;
1529 int rc, nr_pages;
1530
1531 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1532 retry = 0;
1533 nr_retry_pages = 0;
1534
1535 list_for_each_entry_safe(folio, folio2, from, lru) {
1536 if (!folio_test_hugetlb(folio))
1537 continue;
1538
1539 nr_pages = folio_nr_pages(folio);
1540
1541 cond_resched();
1542
1543 /*
1544 * Migratability of hugepages depends on architectures and
1545 * their size. This check is necessary because some callers
1546 * of hugepage migration like soft offline and memory
1547 * hotremove don't walk through page tables or check whether
1548 * the hugepage is pmd-based or not before kicking migration.
1549 */
1550 if (!hugepage_migration_supported(folio_hstate(folio))) {
1551 nr_failed++;
1552 stats->nr_failed_pages += nr_pages;
1553 list_move_tail(&folio->lru, ret_folios);
1554 continue;
1555 }
1556
1557 rc = unmap_and_move_huge_page(get_new_page,
1558 put_new_page, private,
1559 &folio->page, pass > 2, mode,
1560 reason, ret_folios);
1561 /*
1562 * The rules are:
1563 * Success: hugetlb folio will be put back
1564 * -EAGAIN: stay on the from list
1565 * -ENOMEM: stay on the from list
1566 * Other errno: put on ret_folios list
1567 */
1568 switch(rc) {
1569 case -ENOMEM:
1570 /*
1571 * When memory is low, don't bother to try to migrate
1572 * other folios, just exit.
1573 */
1574 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1575 return -ENOMEM;
1576 case -EAGAIN:
1577 retry++;
1578 nr_retry_pages += nr_pages;
1579 break;
1580 case MIGRATEPAGE_SUCCESS:
1581 stats->nr_succeeded += nr_pages;
1582 break;
1583 default:
1584 /*
1585 * Permanent failure (-EBUSY, etc.):
1586 * unlike -EAGAIN case, the failed folio is
1587 * removed from migration folio list and not
1588 * retried in the next outer loop.
1589 */
1590 nr_failed++;
1591 stats->nr_failed_pages += nr_pages;
1592 break;
1593 }
1594 }
1595 }
1596 /*
1597 * nr_failed is number of hugetlb folios failed to be migrated. After
1598 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1599 * folios as failed.
1600 */
1601 nr_failed += retry;
1602 stats->nr_failed_pages += nr_retry_pages;
1603
1604 return nr_failed;
1605}
1606
1607/*
1608 * migrate_pages_batch() first unmaps folios in the from list as many as
1609 * possible, then move the unmapped folios.
1610 *
1611 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1612 * lock or bit when we have locked more than one folio. Which may cause
1613 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1614 * length of the from list must be <= 1.
1615 */
1616static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1617 free_page_t put_new_page, unsigned long private,
1618 enum migrate_mode mode, int reason, struct list_head *ret_folios,
1619 struct list_head *split_folios, struct migrate_pages_stats *stats,
1620 int nr_pass)
1621{
1622 int retry = 1;
1623 int large_retry = 1;
1624 int thp_retry = 1;
1625 int nr_failed = 0;
1626 int nr_retry_pages = 0;
1627 int nr_large_failed = 0;
1628 int pass = 0;
1629 bool is_large = false;
1630 bool is_thp = false;
1631 struct folio *folio, *folio2, *dst = NULL, *dst2;
1632 int rc, rc_saved = 0, nr_pages;
1633 LIST_HEAD(unmap_folios);
1634 LIST_HEAD(dst_folios);
1635 bool nosplit = (reason == MR_NUMA_MISPLACED);
1636
1637 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1638 !list_empty(from) && !list_is_singular(from));
1639
1640 for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
1641 retry = 0;
1642 large_retry = 0;
1643 thp_retry = 0;
1644 nr_retry_pages = 0;
1645
1646 list_for_each_entry_safe(folio, folio2, from, lru) {
1647 /*
1648 * Large folio statistics is based on the source large
1649 * folio. Capture required information that might get
1650 * lost during migration.
1651 */
1652 is_large = folio_test_large(folio);
1653 is_thp = is_large && folio_test_pmd_mappable(folio);
1654 nr_pages = folio_nr_pages(folio);
1655
1656 cond_resched();
1657
1658 /*
1659 * Large folio migration might be unsupported or
1660 * the allocation might be failed so we should retry
1661 * on the same folio with the large folio split
1662 * to normal folios.
1663 *
1664 * Split folios are put in split_folios, and
1665 * we will migrate them after the rest of the
1666 * list is processed.
1667 */
1668 if (!thp_migration_supported() && is_thp) {
1669 nr_large_failed++;
1670 stats->nr_thp_failed++;
1671 if (!try_split_folio(folio, split_folios)) {
1672 stats->nr_thp_split++;
1673 continue;
1674 }
1675 stats->nr_failed_pages += nr_pages;
1676 list_move_tail(&folio->lru, ret_folios);
1677 continue;
1678 }
1679
1680 rc = migrate_folio_unmap(get_new_page, put_new_page, private,
1681 folio, &dst, mode, reason, ret_folios);
1682 /*
1683 * The rules are:
1684 * Success: folio will be freed
1685 * Unmap: folio will be put on unmap_folios list,
1686 * dst folio put on dst_folios list
1687 * -EAGAIN: stay on the from list
1688 * -ENOMEM: stay on the from list
1689 * Other errno: put on ret_folios list
1690 */
1691 switch(rc) {
1692 case -ENOMEM:
1693 /*
1694 * When memory is low, don't bother to try to migrate
1695 * other folios, move unmapped folios, then exit.
1696 */
1697 if (is_large) {
1698 nr_large_failed++;
1699 stats->nr_thp_failed += is_thp;
1700 /* Large folio NUMA faulting doesn't split to retry. */
1701 if (!nosplit) {
1702 int ret = try_split_folio(folio, split_folios);
1703
1704 if (!ret) {
1705 stats->nr_thp_split += is_thp;
1706 break;
1707 } else if (reason == MR_LONGTERM_PIN &&
1708 ret == -EAGAIN) {
1709 /*
1710 * Try again to split large folio to
1711 * mitigate the failure of longterm pinning.
1712 */
1713 large_retry++;
1714 thp_retry += is_thp;
1715 nr_retry_pages += nr_pages;
1716 break;
1717 }
1718 }
1719 } else {
1720 nr_failed++;
1721 }
1722
1723 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1724 /* nr_failed isn't updated for not used */
1725 nr_large_failed += large_retry;
1726 stats->nr_thp_failed += thp_retry;
1727 rc_saved = rc;
1728 if (list_empty(&unmap_folios))
1729 goto out;
1730 else
1731 goto move;
1732 case -EAGAIN:
1733 if (is_large) {
1734 large_retry++;
1735 thp_retry += is_thp;
1736 } else {
1737 retry++;
1738 }
1739 nr_retry_pages += nr_pages;
1740 break;
1741 case MIGRATEPAGE_SUCCESS:
1742 stats->nr_succeeded += nr_pages;
1743 stats->nr_thp_succeeded += is_thp;
1744 break;
1745 case MIGRATEPAGE_UNMAP:
1746 list_move_tail(&folio->lru, &unmap_folios);
1747 list_add_tail(&dst->lru, &dst_folios);
1748 break;
1749 default:
1750 /*
1751 * Permanent failure (-EBUSY, etc.):
1752 * unlike -EAGAIN case, the failed folio is
1753 * removed from migration folio list and not
1754 * retried in the next outer loop.
1755 */
1756 if (is_large) {
1757 nr_large_failed++;
1758 stats->nr_thp_failed += is_thp;
1759 } else {
1760 nr_failed++;
1761 }
1762
1763 stats->nr_failed_pages += nr_pages;
1764 break;
1765 }
1766 }
1767 }
1768 nr_failed += retry;
1769 nr_large_failed += large_retry;
1770 stats->nr_thp_failed += thp_retry;
1771 stats->nr_failed_pages += nr_retry_pages;
1772move:
1773 /* Flush TLBs for all unmapped folios */
1774 try_to_unmap_flush();
1775
1776 retry = 1;
1777 for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
1778 retry = 0;
1779 large_retry = 0;
1780 thp_retry = 0;
1781 nr_retry_pages = 0;
1782
1783 dst = list_first_entry(&dst_folios, struct folio, lru);
1784 dst2 = list_next_entry(dst, lru);
1785 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1786 is_large = folio_test_large(folio);
1787 is_thp = is_large && folio_test_pmd_mappable(folio);
1788 nr_pages = folio_nr_pages(folio);
1789
1790 cond_resched();
1791
1792 rc = migrate_folio_move(put_new_page, private,
1793 folio, dst, mode,
1794 reason, ret_folios);
1795 /*
1796 * The rules are:
1797 * Success: folio will be freed
1798 * -EAGAIN: stay on the unmap_folios list
1799 * Other errno: put on ret_folios list
1800 */
1801 switch(rc) {
1802 case -EAGAIN:
1803 if (is_large) {
1804 large_retry++;
1805 thp_retry += is_thp;
1806 } else {
1807 retry++;
1808 }
1809 nr_retry_pages += nr_pages;
1810 break;
1811 case MIGRATEPAGE_SUCCESS:
1812 stats->nr_succeeded += nr_pages;
1813 stats->nr_thp_succeeded += is_thp;
1814 break;
1815 default:
1816 if (is_large) {
1817 nr_large_failed++;
1818 stats->nr_thp_failed += is_thp;
1819 } else {
1820 nr_failed++;
1821 }
1822
1823 stats->nr_failed_pages += nr_pages;
1824 break;
1825 }
1826 dst = dst2;
1827 dst2 = list_next_entry(dst, lru);
1828 }
1829 }
1830 nr_failed += retry;
1831 nr_large_failed += large_retry;
1832 stats->nr_thp_failed += thp_retry;
1833 stats->nr_failed_pages += nr_retry_pages;
1834
1835 if (rc_saved)
1836 rc = rc_saved;
1837 else
1838 rc = nr_failed + nr_large_failed;
1839out:
1840 /* Cleanup remaining folios */
1841 dst = list_first_entry(&dst_folios, struct folio, lru);
1842 dst2 = list_next_entry(dst, lru);
1843 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1844 int page_was_mapped = 0;
1845 struct anon_vma *anon_vma = NULL;
1846
1847 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1848 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
1849 true, ret_folios);
1850 list_del(&dst->lru);
1851 migrate_folio_undo_dst(dst, true, put_new_page, private);
1852 dst = dst2;
1853 dst2 = list_next_entry(dst, lru);
1854 }
1855
1856 return rc;
1857}
1858
1859static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
1860 free_page_t put_new_page, unsigned long private,
1861 enum migrate_mode mode, int reason, struct list_head *ret_folios,
1862 struct list_head *split_folios, struct migrate_pages_stats *stats)
1863{
1864 int rc, nr_failed = 0;
1865 LIST_HEAD(folios);
1866 struct migrate_pages_stats astats;
1867
1868 memset(&astats, 0, sizeof(astats));
1869 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1870 rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
1871 reason, &folios, split_folios, &astats,
1872 NR_MAX_MIGRATE_ASYNC_RETRY);
1873 stats->nr_succeeded += astats.nr_succeeded;
1874 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1875 stats->nr_thp_split += astats.nr_thp_split;
1876 if (rc < 0) {
1877 stats->nr_failed_pages += astats.nr_failed_pages;
1878 stats->nr_thp_failed += astats.nr_thp_failed;
1879 list_splice_tail(&folios, ret_folios);
1880 return rc;
1881 }
1882 stats->nr_thp_failed += astats.nr_thp_split;
1883 nr_failed += astats.nr_thp_split;
1884 /*
1885 * Fall back to migrate all failed folios one by one synchronously. All
1886 * failed folios except split THPs will be retried, so their failure
1887 * isn't counted
1888 */
1889 list_splice_tail_init(&folios, from);
1890 while (!list_empty(from)) {
1891 list_move(from->next, &folios);
1892 rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
1893 private, mode, reason, ret_folios,
1894 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1895 list_splice_tail_init(&folios, ret_folios);
1896 if (rc < 0)
1897 return rc;
1898 nr_failed += rc;
1899 }
1900
1901 return nr_failed;
1902}
1903
1904/*
1905 * migrate_pages - migrate the folios specified in a list, to the free folios
1906 * supplied as the target for the page migration
1907 *
1908 * @from: The list of folios to be migrated.
1909 * @get_new_page: The function used to allocate free folios to be used
1910 * as the target of the folio migration.
1911 * @put_new_page: The function used to free target folios if migration
1912 * fails, or NULL if no special handling is necessary.
1913 * @private: Private data to be passed on to get_new_page()
1914 * @mode: The migration mode that specifies the constraints for
1915 * folio migration, if any.
1916 * @reason: The reason for folio migration.
1917 * @ret_succeeded: Set to the number of folios migrated successfully if
1918 * the caller passes a non-NULL pointer.
1919 *
1920 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1921 * are movable any more because the list has become empty or no retryable folios
1922 * exist any more. It is caller's responsibility to call putback_movable_pages()
1923 * only if ret != 0.
1924 *
1925 * Returns the number of {normal folio, large folio, hugetlb} that were not
1926 * migrated, or an error code. The number of large folio splits will be
1927 * considered as the number of non-migrated large folio, no matter how many
1928 * split folios of the large folio are migrated successfully.
1929 */
1930int migrate_pages(struct list_head *from, new_page_t get_new_page,
1931 free_page_t put_new_page, unsigned long private,
1932 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1933{
1934 int rc, rc_gather;
1935 int nr_pages;
1936 struct folio *folio, *folio2;
1937 LIST_HEAD(folios);
1938 LIST_HEAD(ret_folios);
1939 LIST_HEAD(split_folios);
1940 struct migrate_pages_stats stats;
1941
1942 trace_mm_migrate_pages_start(mode, reason);
1943
1944 memset(&stats, 0, sizeof(stats));
1945
1946 rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
1947 mode, reason, &stats, &ret_folios);
1948 if (rc_gather < 0)
1949 goto out;
1950
1951again:
1952 nr_pages = 0;
1953 list_for_each_entry_safe(folio, folio2, from, lru) {
1954 /* Retried hugetlb folios will be kept in list */
1955 if (folio_test_hugetlb(folio)) {
1956 list_move_tail(&folio->lru, &ret_folios);
1957 continue;
1958 }
1959
1960 nr_pages += folio_nr_pages(folio);
1961 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1962 break;
1963 }
1964 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1965 list_cut_before(&folios, from, &folio2->lru);
1966 else
1967 list_splice_init(from, &folios);
1968 if (mode == MIGRATE_ASYNC)
1969 rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
1970 mode, reason, &ret_folios, &split_folios, &stats,
1971 NR_MAX_MIGRATE_PAGES_RETRY);
1972 else
1973 rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
1974 mode, reason, &ret_folios, &split_folios, &stats);
1975 list_splice_tail_init(&folios, &ret_folios);
1976 if (rc < 0) {
1977 rc_gather = rc;
1978 list_splice_tail(&split_folios, &ret_folios);
1979 goto out;
1980 }
1981 if (!list_empty(&split_folios)) {
1982 /*
1983 * Failure isn't counted since all split folios of a large folio
1984 * is counted as 1 failure already. And, we only try to migrate
1985 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1986 */
1987 migrate_pages_batch(&split_folios, get_new_page, put_new_page, private,
1988 MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1);
1989 list_splice_tail_init(&split_folios, &ret_folios);
1990 }
1991 rc_gather += rc;
1992 if (!list_empty(from))
1993 goto again;
1994out:
1995 /*
1996 * Put the permanent failure folio back to migration list, they
1997 * will be put back to the right list by the caller.
1998 */
1999 list_splice(&ret_folios, from);
2000
2001 /*
2002 * Return 0 in case all split folios of fail-to-migrate large folios
2003 * are migrated successfully.
2004 */
2005 if (list_empty(from))
2006 rc_gather = 0;
2007
2008 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2009 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2010 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2011 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2012 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2013 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2014 stats.nr_thp_succeeded, stats.nr_thp_failed,
2015 stats.nr_thp_split, mode, reason);
2016
2017 if (ret_succeeded)
2018 *ret_succeeded = stats.nr_succeeded;
2019
2020 return rc_gather;
2021}
2022
2023struct page *alloc_migration_target(struct page *page, unsigned long private)
2024{
2025 struct folio *folio = page_folio(page);
2026 struct migration_target_control *mtc;
2027 gfp_t gfp_mask;
2028 unsigned int order = 0;
2029 struct folio *hugetlb_folio = NULL;
2030 struct folio *new_folio = NULL;
2031 int nid;
2032 int zidx;
2033
2034 mtc = (struct migration_target_control *)private;
2035 gfp_mask = mtc->gfp_mask;
2036 nid = mtc->nid;
2037 if (nid == NUMA_NO_NODE)
2038 nid = folio_nid(folio);
2039
2040 if (folio_test_hugetlb(folio)) {
2041 struct hstate *h = folio_hstate(folio);
2042
2043 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2044 hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
2045 mtc->nmask, gfp_mask);
2046 return &hugetlb_folio->page;
2047 }
2048
2049 if (folio_test_large(folio)) {
2050 /*
2051 * clear __GFP_RECLAIM to make the migration callback
2052 * consistent with regular THP allocations.
2053 */
2054 gfp_mask &= ~__GFP_RECLAIM;
2055 gfp_mask |= GFP_TRANSHUGE;
2056 order = folio_order(folio);
2057 }
2058 zidx = zone_idx(folio_zone(folio));
2059 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2060 gfp_mask |= __GFP_HIGHMEM;
2061
2062 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2063
2064 return &new_folio->page;
2065}
2066
2067#ifdef CONFIG_NUMA
2068
2069static int store_status(int __user *status, int start, int value, int nr)
2070{
2071 while (nr-- > 0) {
2072 if (put_user(value, status + start))
2073 return -EFAULT;
2074 start++;
2075 }
2076
2077 return 0;
2078}
2079
2080static int do_move_pages_to_node(struct mm_struct *mm,
2081 struct list_head *pagelist, int node)
2082{
2083 int err;
2084 struct migration_target_control mtc = {
2085 .nid = node,
2086 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2087 };
2088
2089 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2090 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2091 if (err)
2092 putback_movable_pages(pagelist);
2093 return err;
2094}
2095
2096/*
2097 * Resolves the given address to a struct page, isolates it from the LRU and
2098 * puts it to the given pagelist.
2099 * Returns:
2100 * errno - if the page cannot be found/isolated
2101 * 0 - when it doesn't have to be migrated because it is already on the
2102 * target node
2103 * 1 - when it has been queued
2104 */
2105static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
2106 int node, struct list_head *pagelist, bool migrate_all)
2107{
2108 struct vm_area_struct *vma;
2109 struct page *page;
2110 int err;
2111 bool isolated;
2112
2113 mmap_read_lock(mm);
2114 err = -EFAULT;
2115 vma = vma_lookup(mm, addr);
2116 if (!vma || !vma_migratable(vma))
2117 goto out;
2118
2119 /* FOLL_DUMP to ignore special (like zero) pages */
2120 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2121
2122 err = PTR_ERR(page);
2123 if (IS_ERR(page))
2124 goto out;
2125
2126 err = -ENOENT;
2127 if (!page)
2128 goto out;
2129
2130 if (is_zone_device_page(page))
2131 goto out_putpage;
2132
2133 err = 0;
2134 if (page_to_nid(page) == node)
2135 goto out_putpage;
2136
2137 err = -EACCES;
2138 if (page_mapcount(page) > 1 && !migrate_all)
2139 goto out_putpage;
2140
2141 if (PageHuge(page)) {
2142 if (PageHead(page)) {
2143 isolated = isolate_hugetlb(page_folio(page), pagelist);
2144 err = isolated ? 1 : -EBUSY;
2145 }
2146 } else {
2147 struct page *head;
2148
2149 head = compound_head(page);
2150 isolated = isolate_lru_page(head);
2151 if (!isolated) {
2152 err = -EBUSY;
2153 goto out_putpage;
2154 }
2155
2156 err = 1;
2157 list_add_tail(&head->lru, pagelist);
2158 mod_node_page_state(page_pgdat(head),
2159 NR_ISOLATED_ANON + page_is_file_lru(head),
2160 thp_nr_pages(head));
2161 }
2162out_putpage:
2163 /*
2164 * Either remove the duplicate refcount from
2165 * isolate_lru_page() or drop the page ref if it was
2166 * not isolated.
2167 */
2168 put_page(page);
2169out:
2170 mmap_read_unlock(mm);
2171 return err;
2172}
2173
2174static int move_pages_and_store_status(struct mm_struct *mm, int node,
2175 struct list_head *pagelist, int __user *status,
2176 int start, int i, unsigned long nr_pages)
2177{
2178 int err;
2179
2180 if (list_empty(pagelist))
2181 return 0;
2182
2183 err = do_move_pages_to_node(mm, pagelist, node);
2184 if (err) {
2185 /*
2186 * Positive err means the number of failed
2187 * pages to migrate. Since we are going to
2188 * abort and return the number of non-migrated
2189 * pages, so need to include the rest of the
2190 * nr_pages that have not been attempted as
2191 * well.
2192 */
2193 if (err > 0)
2194 err += nr_pages - i;
2195 return err;
2196 }
2197 return store_status(status, start, node, i - start);
2198}
2199
2200/*
2201 * Migrate an array of page address onto an array of nodes and fill
2202 * the corresponding array of status.
2203 */
2204static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2205 unsigned long nr_pages,
2206 const void __user * __user *pages,
2207 const int __user *nodes,
2208 int __user *status, int flags)
2209{
2210 int current_node = NUMA_NO_NODE;
2211 LIST_HEAD(pagelist);
2212 int start, i;
2213 int err = 0, err1;
2214
2215 lru_cache_disable();
2216
2217 for (i = start = 0; i < nr_pages; i++) {
2218 const void __user *p;
2219 unsigned long addr;
2220 int node;
2221
2222 err = -EFAULT;
2223 if (get_user(p, pages + i))
2224 goto out_flush;
2225 if (get_user(node, nodes + i))
2226 goto out_flush;
2227 addr = (unsigned long)untagged_addr(p);
2228
2229 err = -ENODEV;
2230 if (node < 0 || node >= MAX_NUMNODES)
2231 goto out_flush;
2232 if (!node_state(node, N_MEMORY))
2233 goto out_flush;
2234
2235 err = -EACCES;
2236 if (!node_isset(node, task_nodes))
2237 goto out_flush;
2238
2239 if (current_node == NUMA_NO_NODE) {
2240 current_node = node;
2241 start = i;
2242 } else if (node != current_node) {
2243 err = move_pages_and_store_status(mm, current_node,
2244 &pagelist, status, start, i, nr_pages);
2245 if (err)
2246 goto out;
2247 start = i;
2248 current_node = node;
2249 }
2250
2251 /*
2252 * Errors in the page lookup or isolation are not fatal and we simply
2253 * report them via status
2254 */
2255 err = add_page_for_migration(mm, addr, current_node,
2256 &pagelist, flags & MPOL_MF_MOVE_ALL);
2257
2258 if (err > 0) {
2259 /* The page is successfully queued for migration */
2260 continue;
2261 }
2262
2263 /*
2264 * The move_pages() man page does not have an -EEXIST choice, so
2265 * use -EFAULT instead.
2266 */
2267 if (err == -EEXIST)
2268 err = -EFAULT;
2269
2270 /*
2271 * If the page is already on the target node (!err), store the
2272 * node, otherwise, store the err.
2273 */
2274 err = store_status(status, i, err ? : current_node, 1);
2275 if (err)
2276 goto out_flush;
2277
2278 err = move_pages_and_store_status(mm, current_node, &pagelist,
2279 status, start, i, nr_pages);
2280 if (err) {
2281 /* We have accounted for page i */
2282 if (err > 0)
2283 err--;
2284 goto out;
2285 }
2286 current_node = NUMA_NO_NODE;
2287 }
2288out_flush:
2289 /* Make sure we do not overwrite the existing error */
2290 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2291 status, start, i, nr_pages);
2292 if (err >= 0)
2293 err = err1;
2294out:
2295 lru_cache_enable();
2296 return err;
2297}
2298
2299/*
2300 * Determine the nodes of an array of pages and store it in an array of status.
2301 */
2302static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2303 const void __user **pages, int *status)
2304{
2305 unsigned long i;
2306
2307 mmap_read_lock(mm);
2308
2309 for (i = 0; i < nr_pages; i++) {
2310 unsigned long addr = (unsigned long)(*pages);
2311 struct vm_area_struct *vma;
2312 struct page *page;
2313 int err = -EFAULT;
2314
2315 vma = vma_lookup(mm, addr);
2316 if (!vma)
2317 goto set_status;
2318
2319 /* FOLL_DUMP to ignore special (like zero) pages */
2320 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2321
2322 err = PTR_ERR(page);
2323 if (IS_ERR(page))
2324 goto set_status;
2325
2326 err = -ENOENT;
2327 if (!page)
2328 goto set_status;
2329
2330 if (!is_zone_device_page(page))
2331 err = page_to_nid(page);
2332
2333 put_page(page);
2334set_status:
2335 *status = err;
2336
2337 pages++;
2338 status++;
2339 }
2340
2341 mmap_read_unlock(mm);
2342}
2343
2344static int get_compat_pages_array(const void __user *chunk_pages[],
2345 const void __user * __user *pages,
2346 unsigned long chunk_nr)
2347{
2348 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2349 compat_uptr_t p;
2350 int i;
2351
2352 for (i = 0; i < chunk_nr; i++) {
2353 if (get_user(p, pages32 + i))
2354 return -EFAULT;
2355 chunk_pages[i] = compat_ptr(p);
2356 }
2357
2358 return 0;
2359}
2360
2361/*
2362 * Determine the nodes of a user array of pages and store it in
2363 * a user array of status.
2364 */
2365static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2366 const void __user * __user *pages,
2367 int __user *status)
2368{
2369#define DO_PAGES_STAT_CHUNK_NR 16UL
2370 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2371 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2372
2373 while (nr_pages) {
2374 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2375
2376 if (in_compat_syscall()) {
2377 if (get_compat_pages_array(chunk_pages, pages,
2378 chunk_nr))
2379 break;
2380 } else {
2381 if (copy_from_user(chunk_pages, pages,
2382 chunk_nr * sizeof(*chunk_pages)))
2383 break;
2384 }
2385
2386 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2387
2388 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2389 break;
2390
2391 pages += chunk_nr;
2392 status += chunk_nr;
2393 nr_pages -= chunk_nr;
2394 }
2395 return nr_pages ? -EFAULT : 0;
2396}
2397
2398static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2399{
2400 struct task_struct *task;
2401 struct mm_struct *mm;
2402
2403 /*
2404 * There is no need to check if current process has the right to modify
2405 * the specified process when they are same.
2406 */
2407 if (!pid) {
2408 mmget(current->mm);
2409 *mem_nodes = cpuset_mems_allowed(current);
2410 return current->mm;
2411 }
2412
2413 /* Find the mm_struct */
2414 rcu_read_lock();
2415 task = find_task_by_vpid(pid);
2416 if (!task) {
2417 rcu_read_unlock();
2418 return ERR_PTR(-ESRCH);
2419 }
2420 get_task_struct(task);
2421
2422 /*
2423 * Check if this process has the right to modify the specified
2424 * process. Use the regular "ptrace_may_access()" checks.
2425 */
2426 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2427 rcu_read_unlock();
2428 mm = ERR_PTR(-EPERM);
2429 goto out;
2430 }
2431 rcu_read_unlock();
2432
2433 mm = ERR_PTR(security_task_movememory(task));
2434 if (IS_ERR(mm))
2435 goto out;
2436 *mem_nodes = cpuset_mems_allowed(task);
2437 mm = get_task_mm(task);
2438out:
2439 put_task_struct(task);
2440 if (!mm)
2441 mm = ERR_PTR(-EINVAL);
2442 return mm;
2443}
2444
2445/*
2446 * Move a list of pages in the address space of the currently executing
2447 * process.
2448 */
2449static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2450 const void __user * __user *pages,
2451 const int __user *nodes,
2452 int __user *status, int flags)
2453{
2454 struct mm_struct *mm;
2455 int err;
2456 nodemask_t task_nodes;
2457
2458 /* Check flags */
2459 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2460 return -EINVAL;
2461
2462 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2463 return -EPERM;
2464
2465 mm = find_mm_struct(pid, &task_nodes);
2466 if (IS_ERR(mm))
2467 return PTR_ERR(mm);
2468
2469 if (nodes)
2470 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2471 nodes, status, flags);
2472 else
2473 err = do_pages_stat(mm, nr_pages, pages, status);
2474
2475 mmput(mm);
2476 return err;
2477}
2478
2479SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2480 const void __user * __user *, pages,
2481 const int __user *, nodes,
2482 int __user *, status, int, flags)
2483{
2484 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2485}
2486
2487#ifdef CONFIG_NUMA_BALANCING
2488/*
2489 * Returns true if this is a safe migration target node for misplaced NUMA
2490 * pages. Currently it only checks the watermarks which is crude.
2491 */
2492static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2493 unsigned long nr_migrate_pages)
2494{
2495 int z;
2496
2497 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2498 struct zone *zone = pgdat->node_zones + z;
2499
2500 if (!managed_zone(zone))
2501 continue;
2502
2503 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2504 if (!zone_watermark_ok(zone, 0,
2505 high_wmark_pages(zone) +
2506 nr_migrate_pages,
2507 ZONE_MOVABLE, 0))
2508 continue;
2509 return true;
2510 }
2511 return false;
2512}
2513
2514static struct page *alloc_misplaced_dst_page(struct page *page,
2515 unsigned long data)
2516{
2517 int nid = (int) data;
2518 int order = compound_order(page);
2519 gfp_t gfp = __GFP_THISNODE;
2520 struct folio *new;
2521
2522 if (order > 0)
2523 gfp |= GFP_TRANSHUGE_LIGHT;
2524 else {
2525 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2526 __GFP_NOWARN;
2527 gfp &= ~__GFP_RECLAIM;
2528 }
2529 new = __folio_alloc_node(gfp, order, nid);
2530
2531 return &new->page;
2532}
2533
2534static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2535{
2536 int nr_pages = thp_nr_pages(page);
2537 int order = compound_order(page);
2538
2539 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2540
2541 /* Do not migrate THP mapped by multiple processes */
2542 if (PageTransHuge(page) && total_mapcount(page) > 1)
2543 return 0;
2544
2545 /* Avoid migrating to a node that is nearly full */
2546 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2547 int z;
2548
2549 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2550 return 0;
2551 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2552 if (managed_zone(pgdat->node_zones + z))
2553 break;
2554 }
2555 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2556 return 0;
2557 }
2558
2559 if (!isolate_lru_page(page))
2560 return 0;
2561
2562 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2563 nr_pages);
2564
2565 /*
2566 * Isolating the page has taken another reference, so the
2567 * caller's reference can be safely dropped without the page
2568 * disappearing underneath us during migration.
2569 */
2570 put_page(page);
2571 return 1;
2572}
2573
2574/*
2575 * Attempt to migrate a misplaced page to the specified destination
2576 * node. Caller is expected to have an elevated reference count on
2577 * the page that will be dropped by this function before returning.
2578 */
2579int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2580 int node)
2581{
2582 pg_data_t *pgdat = NODE_DATA(node);
2583 int isolated;
2584 int nr_remaining;
2585 unsigned int nr_succeeded;
2586 LIST_HEAD(migratepages);
2587 int nr_pages = thp_nr_pages(page);
2588
2589 /*
2590 * Don't migrate file pages that are mapped in multiple processes
2591 * with execute permissions as they are probably shared libraries.
2592 */
2593 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2594 (vma->vm_flags & VM_EXEC))
2595 goto out;
2596
2597 /*
2598 * Also do not migrate dirty pages as not all filesystems can move
2599 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2600 */
2601 if (page_is_file_lru(page) && PageDirty(page))
2602 goto out;
2603
2604 isolated = numamigrate_isolate_page(pgdat, page);
2605 if (!isolated)
2606 goto out;
2607
2608 list_add(&page->lru, &migratepages);
2609 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2610 NULL, node, MIGRATE_ASYNC,
2611 MR_NUMA_MISPLACED, &nr_succeeded);
2612 if (nr_remaining) {
2613 if (!list_empty(&migratepages)) {
2614 list_del(&page->lru);
2615 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2616 page_is_file_lru(page), -nr_pages);
2617 putback_lru_page(page);
2618 }
2619 isolated = 0;
2620 }
2621 if (nr_succeeded) {
2622 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2623 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2624 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2625 nr_succeeded);
2626 }
2627 BUG_ON(!list_empty(&migratepages));
2628 return isolated;
2629
2630out:
2631 put_page(page);
2632 return 0;
2633}
2634#endif /* CONFIG_NUMA_BALANCING */
2635#endif /* CONFIG_NUMA */