Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/ksm.h>
24#include <linux/rmap.h>
25#include <linux/topology.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
28#include <linux/writeback.h>
29#include <linux/mempolicy.h>
30#include <linux/vmalloc.h>
31#include <linux/security.h>
32#include <linux/backing-dev.h>
33#include <linux/compaction.h>
34#include <linux/syscalls.h>
35#include <linux/compat.h>
36#include <linux/hugetlb.h>
37#include <linux/gfp.h>
38#include <linux/pfn_t.h>
39#include <linux/page_idle.h>
40#include <linux/page_owner.h>
41#include <linux/sched/mm.h>
42#include <linux/ptrace.h>
43#include <linux/memory.h>
44#include <linux/sched/sysctl.h>
45#include <linux/memory-tiers.h>
46#include <linux/pagewalk.h>
47
48#include <asm/tlbflush.h>
49
50#include <trace/events/migrate.h>
51
52#include "internal.h"
53
54bool isolate_movable_page(struct page *page, isolate_mode_t mode)
55{
56 struct folio *folio = folio_get_nontail_page(page);
57 const struct movable_operations *mops;
58
59 /*
60 * Avoid burning cycles with pages that are yet under __free_pages(),
61 * or just got freed under us.
62 *
63 * In case we 'win' a race for a movable page being freed under us and
64 * raise its refcount preventing __free_pages() from doing its job
65 * the put_page() at the end of this block will take care of
66 * release this page, thus avoiding a nasty leakage.
67 */
68 if (!folio)
69 goto out;
70
71 if (unlikely(folio_test_slab(folio)))
72 goto out_putfolio;
73 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
74 smp_rmb();
75 /*
76 * Check movable flag before taking the page lock because
77 * we use non-atomic bitops on newly allocated page flags so
78 * unconditionally grabbing the lock ruins page's owner side.
79 */
80 if (unlikely(!__folio_test_movable(folio)))
81 goto out_putfolio;
82 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
83 smp_rmb();
84 if (unlikely(folio_test_slab(folio)))
85 goto out_putfolio;
86
87 /*
88 * As movable pages are not isolated from LRU lists, concurrent
89 * compaction threads can race against page migration functions
90 * as well as race against the releasing a page.
91 *
92 * In order to avoid having an already isolated movable page
93 * being (wrongly) re-isolated while it is under migration,
94 * or to avoid attempting to isolate pages being released,
95 * lets be sure we have the page lock
96 * before proceeding with the movable page isolation steps.
97 */
98 if (unlikely(!folio_trylock(folio)))
99 goto out_putfolio;
100
101 if (!folio_test_movable(folio) || folio_test_isolated(folio))
102 goto out_no_isolated;
103
104 mops = folio_movable_ops(folio);
105 VM_BUG_ON_FOLIO(!mops, folio);
106
107 if (!mops->isolate_page(&folio->page, mode))
108 goto out_no_isolated;
109
110 /* Driver shouldn't use the isolated flag */
111 WARN_ON_ONCE(folio_test_isolated(folio));
112 folio_set_isolated(folio);
113 folio_unlock(folio);
114
115 return true;
116
117out_no_isolated:
118 folio_unlock(folio);
119out_putfolio:
120 folio_put(folio);
121out:
122 return false;
123}
124
125static void putback_movable_folio(struct folio *folio)
126{
127 const struct movable_operations *mops = folio_movable_ops(folio);
128
129 mops->putback_page(&folio->page);
130 folio_clear_isolated(folio);
131}
132
133/*
134 * Put previously isolated pages back onto the appropriate lists
135 * from where they were once taken off for compaction/migration.
136 *
137 * This function shall be used whenever the isolated pageset has been
138 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
139 * and isolate_hugetlb().
140 */
141void putback_movable_pages(struct list_head *l)
142{
143 struct folio *folio;
144 struct folio *folio2;
145
146 list_for_each_entry_safe(folio, folio2, l, lru) {
147 if (unlikely(folio_test_hugetlb(folio))) {
148 folio_putback_active_hugetlb(folio);
149 continue;
150 }
151 list_del(&folio->lru);
152 /*
153 * We isolated non-lru movable folio so here we can use
154 * __folio_test_movable because LRU folio's mapping cannot
155 * have PAGE_MAPPING_MOVABLE.
156 */
157 if (unlikely(__folio_test_movable(folio))) {
158 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
159 folio_lock(folio);
160 if (folio_test_movable(folio))
161 putback_movable_folio(folio);
162 else
163 folio_clear_isolated(folio);
164 folio_unlock(folio);
165 folio_put(folio);
166 } else {
167 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
168 folio_is_file_lru(folio), -folio_nr_pages(folio));
169 folio_putback_lru(folio);
170 }
171 }
172}
173
174/* Must be called with an elevated refcount on the non-hugetlb folio */
175bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
176{
177 bool isolated, lru;
178
179 if (folio_test_hugetlb(folio))
180 return isolate_hugetlb(folio, list);
181
182 lru = !__folio_test_movable(folio);
183 if (lru)
184 isolated = folio_isolate_lru(folio);
185 else
186 isolated = isolate_movable_page(&folio->page,
187 ISOLATE_UNEVICTABLE);
188
189 if (!isolated)
190 return false;
191
192 list_add(&folio->lru, list);
193 if (lru)
194 node_stat_add_folio(folio, NR_ISOLATED_ANON +
195 folio_is_file_lru(folio));
196
197 return true;
198}
199
200static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
201 struct folio *folio,
202 unsigned long idx)
203{
204 struct page *page = folio_page(folio, idx);
205 bool contains_data;
206 pte_t newpte;
207 void *addr;
208
209 VM_BUG_ON_PAGE(PageCompound(page), page);
210 VM_BUG_ON_PAGE(!PageAnon(page), page);
211 VM_BUG_ON_PAGE(!PageLocked(page), page);
212 VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
213
214 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
215 mm_forbids_zeropage(pvmw->vma->vm_mm))
216 return false;
217
218 /*
219 * The pmd entry mapping the old thp was flushed and the pte mapping
220 * this subpage has been non present. If the subpage is only zero-filled
221 * then map it to the shared zeropage.
222 */
223 addr = kmap_local_page(page);
224 contains_data = memchr_inv(addr, 0, PAGE_SIZE);
225 kunmap_local(addr);
226
227 if (contains_data)
228 return false;
229
230 newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
231 pvmw->vma->vm_page_prot));
232 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
233
234 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
235 return true;
236}
237
238struct rmap_walk_arg {
239 struct folio *folio;
240 bool map_unused_to_zeropage;
241};
242
243/*
244 * Restore a potential migration pte to a working pte entry
245 */
246static bool remove_migration_pte(struct folio *folio,
247 struct vm_area_struct *vma, unsigned long addr, void *arg)
248{
249 struct rmap_walk_arg *rmap_walk_arg = arg;
250 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
251
252 while (page_vma_mapped_walk(&pvmw)) {
253 rmap_t rmap_flags = RMAP_NONE;
254 pte_t old_pte;
255 pte_t pte;
256 swp_entry_t entry;
257 struct page *new;
258 unsigned long idx = 0;
259
260 /* pgoff is invalid for ksm pages, but they are never large */
261 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
262 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
263 new = folio_page(folio, idx);
264
265#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
266 /* PMD-mapped THP migration entry */
267 if (!pvmw.pte) {
268 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
269 !folio_test_pmd_mappable(folio), folio);
270 remove_migration_pmd(&pvmw, new);
271 continue;
272 }
273#endif
274 if (rmap_walk_arg->map_unused_to_zeropage &&
275 try_to_map_unused_to_zeropage(&pvmw, folio, idx))
276 continue;
277
278 folio_get(folio);
279 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
280 old_pte = ptep_get(pvmw.pte);
281
282 entry = pte_to_swp_entry(old_pte);
283 if (!is_migration_entry_young(entry))
284 pte = pte_mkold(pte);
285 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
286 pte = pte_mkdirty(pte);
287 if (pte_swp_soft_dirty(old_pte))
288 pte = pte_mksoft_dirty(pte);
289 else
290 pte = pte_clear_soft_dirty(pte);
291
292 if (is_writable_migration_entry(entry))
293 pte = pte_mkwrite(pte, vma);
294 else if (pte_swp_uffd_wp(old_pte))
295 pte = pte_mkuffd_wp(pte);
296
297 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
298 rmap_flags |= RMAP_EXCLUSIVE;
299
300 if (unlikely(is_device_private_page(new))) {
301 if (pte_write(pte))
302 entry = make_writable_device_private_entry(
303 page_to_pfn(new));
304 else
305 entry = make_readable_device_private_entry(
306 page_to_pfn(new));
307 pte = swp_entry_to_pte(entry);
308 if (pte_swp_soft_dirty(old_pte))
309 pte = pte_swp_mksoft_dirty(pte);
310 if (pte_swp_uffd_wp(old_pte))
311 pte = pte_swp_mkuffd_wp(pte);
312 }
313
314#ifdef CONFIG_HUGETLB_PAGE
315 if (folio_test_hugetlb(folio)) {
316 struct hstate *h = hstate_vma(vma);
317 unsigned int shift = huge_page_shift(h);
318 unsigned long psize = huge_page_size(h);
319
320 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
321 if (folio_test_anon(folio))
322 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
323 rmap_flags);
324 else
325 hugetlb_add_file_rmap(folio);
326 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
327 psize);
328 } else
329#endif
330 {
331 if (folio_test_anon(folio))
332 folio_add_anon_rmap_pte(folio, new, vma,
333 pvmw.address, rmap_flags);
334 else
335 folio_add_file_rmap_pte(folio, new, vma);
336 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
337 }
338 if (vma->vm_flags & VM_LOCKED)
339 mlock_drain_local();
340
341 trace_remove_migration_pte(pvmw.address, pte_val(pte),
342 compound_order(new));
343
344 /* No need to invalidate - it was non-present before */
345 update_mmu_cache(vma, pvmw.address, pvmw.pte);
346 }
347
348 return true;
349}
350
351/*
352 * Get rid of all migration entries and replace them by
353 * references to the indicated page.
354 */
355void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
356{
357 struct rmap_walk_arg rmap_walk_arg = {
358 .folio = src,
359 .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
360 };
361
362 struct rmap_walk_control rwc = {
363 .rmap_one = remove_migration_pte,
364 .arg = &rmap_walk_arg,
365 };
366
367 VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
368
369 if (flags & RMP_LOCKED)
370 rmap_walk_locked(dst, &rwc);
371 else
372 rmap_walk(dst, &rwc);
373}
374
375/*
376 * Something used the pte of a page under migration. We need to
377 * get to the page and wait until migration is finished.
378 * When we return from this function the fault will be retried.
379 */
380void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
381 unsigned long address)
382{
383 spinlock_t *ptl;
384 pte_t *ptep;
385 pte_t pte;
386 swp_entry_t entry;
387
388 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
389 if (!ptep)
390 return;
391
392 pte = ptep_get(ptep);
393 pte_unmap(ptep);
394
395 if (!is_swap_pte(pte))
396 goto out;
397
398 entry = pte_to_swp_entry(pte);
399 if (!is_migration_entry(entry))
400 goto out;
401
402 migration_entry_wait_on_locked(entry, ptl);
403 return;
404out:
405 spin_unlock(ptl);
406}
407
408#ifdef CONFIG_HUGETLB_PAGE
409/*
410 * The vma read lock must be held upon entry. Holding that lock prevents either
411 * the pte or the ptl from being freed.
412 *
413 * This function will release the vma lock before returning.
414 */
415void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
416{
417 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
418 pte_t pte;
419
420 hugetlb_vma_assert_locked(vma);
421 spin_lock(ptl);
422 pte = huge_ptep_get(vma->vm_mm, addr, ptep);
423
424 if (unlikely(!is_hugetlb_entry_migration(pte))) {
425 spin_unlock(ptl);
426 hugetlb_vma_unlock_read(vma);
427 } else {
428 /*
429 * If migration entry existed, safe to release vma lock
430 * here because the pgtable page won't be freed without the
431 * pgtable lock released. See comment right above pgtable
432 * lock release in migration_entry_wait_on_locked().
433 */
434 hugetlb_vma_unlock_read(vma);
435 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
436 }
437}
438#endif
439
440#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
441void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
442{
443 spinlock_t *ptl;
444
445 ptl = pmd_lock(mm, pmd);
446 if (!is_pmd_migration_entry(*pmd))
447 goto unlock;
448 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
449 return;
450unlock:
451 spin_unlock(ptl);
452}
453#endif
454
455static int folio_expected_refs(struct address_space *mapping,
456 struct folio *folio)
457{
458 int refs = 1;
459 if (!mapping)
460 return refs;
461
462 refs += folio_nr_pages(folio);
463 if (folio_test_private(folio))
464 refs++;
465
466 return refs;
467}
468
469/*
470 * Replace the folio in the mapping.
471 *
472 * The number of remaining references must be:
473 * 1 for anonymous folios without a mapping
474 * 2 for folios with a mapping
475 * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
476 */
477static int __folio_migrate_mapping(struct address_space *mapping,
478 struct folio *newfolio, struct folio *folio, int expected_count)
479{
480 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
481 struct zone *oldzone, *newzone;
482 int dirty;
483 long nr = folio_nr_pages(folio);
484 long entries, i;
485
486 if (!mapping) {
487 /* Take off deferred split queue while frozen and memcg set */
488 if (folio_test_large(folio) &&
489 folio_test_large_rmappable(folio)) {
490 if (!folio_ref_freeze(folio, expected_count))
491 return -EAGAIN;
492 folio_undo_large_rmappable(folio);
493 folio_ref_unfreeze(folio, expected_count);
494 }
495
496 /* No turning back from here */
497 newfolio->index = folio->index;
498 newfolio->mapping = folio->mapping;
499 if (folio_test_anon(folio) && folio_test_large(folio))
500 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
501 if (folio_test_swapbacked(folio))
502 __folio_set_swapbacked(newfolio);
503
504 return MIGRATEPAGE_SUCCESS;
505 }
506
507 oldzone = folio_zone(folio);
508 newzone = folio_zone(newfolio);
509
510 xas_lock_irq(&xas);
511 if (!folio_ref_freeze(folio, expected_count)) {
512 xas_unlock_irq(&xas);
513 return -EAGAIN;
514 }
515
516 /* Take off deferred split queue while frozen and memcg set */
517 folio_undo_large_rmappable(folio);
518
519 /*
520 * Now we know that no one else is looking at the folio:
521 * no turning back from here.
522 */
523 newfolio->index = folio->index;
524 newfolio->mapping = folio->mapping;
525 if (folio_test_anon(folio) && folio_test_large(folio))
526 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
527 folio_ref_add(newfolio, nr); /* add cache reference */
528 if (folio_test_swapbacked(folio)) {
529 __folio_set_swapbacked(newfolio);
530 if (folio_test_swapcache(folio)) {
531 folio_set_swapcache(newfolio);
532 newfolio->private = folio_get_private(folio);
533 }
534 entries = nr;
535 } else {
536 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
537 entries = 1;
538 }
539
540 /* Move dirty while folio refs frozen and newfolio not yet exposed */
541 dirty = folio_test_dirty(folio);
542 if (dirty) {
543 folio_clear_dirty(folio);
544 folio_set_dirty(newfolio);
545 }
546
547 /* Swap cache still stores N entries instead of a high-order entry */
548 for (i = 0; i < entries; i++) {
549 xas_store(&xas, newfolio);
550 xas_next(&xas);
551 }
552
553 /*
554 * Drop cache reference from old folio by unfreezing
555 * to one less reference.
556 * We know this isn't the last reference.
557 */
558 folio_ref_unfreeze(folio, expected_count - nr);
559
560 xas_unlock(&xas);
561 /* Leave irq disabled to prevent preemption while updating stats */
562
563 /*
564 * If moved to a different zone then also account
565 * the folio for that zone. Other VM counters will be
566 * taken care of when we establish references to the
567 * new folio and drop references to the old folio.
568 *
569 * Note that anonymous folios are accounted for
570 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
571 * are mapped to swap space.
572 */
573 if (newzone != oldzone) {
574 struct lruvec *old_lruvec, *new_lruvec;
575 struct mem_cgroup *memcg;
576
577 memcg = folio_memcg(folio);
578 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
579 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
580
581 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
582 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
583 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
584 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
585 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
586
587 if (folio_test_pmd_mappable(folio)) {
588 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
589 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
590 }
591 }
592#ifdef CONFIG_SWAP
593 if (folio_test_swapcache(folio)) {
594 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
595 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
596 }
597#endif
598 if (dirty && mapping_can_writeback(mapping)) {
599 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
600 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
601 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
602 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
603 }
604 }
605 local_irq_enable();
606
607 return MIGRATEPAGE_SUCCESS;
608}
609
610int folio_migrate_mapping(struct address_space *mapping,
611 struct folio *newfolio, struct folio *folio, int extra_count)
612{
613 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
614
615 if (folio_ref_count(folio) != expected_count)
616 return -EAGAIN;
617
618 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
619}
620EXPORT_SYMBOL(folio_migrate_mapping);
621
622/*
623 * The expected number of remaining references is the same as that
624 * of folio_migrate_mapping().
625 */
626int migrate_huge_page_move_mapping(struct address_space *mapping,
627 struct folio *dst, struct folio *src)
628{
629 XA_STATE(xas, &mapping->i_pages, folio_index(src));
630 int rc, expected_count = folio_expected_refs(mapping, src);
631
632 if (folio_ref_count(src) != expected_count)
633 return -EAGAIN;
634
635 rc = folio_mc_copy(dst, src);
636 if (unlikely(rc))
637 return rc;
638
639 xas_lock_irq(&xas);
640 if (!folio_ref_freeze(src, expected_count)) {
641 xas_unlock_irq(&xas);
642 return -EAGAIN;
643 }
644
645 dst->index = src->index;
646 dst->mapping = src->mapping;
647
648 folio_ref_add(dst, folio_nr_pages(dst));
649
650 xas_store(&xas, dst);
651
652 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
653
654 xas_unlock_irq(&xas);
655
656 return MIGRATEPAGE_SUCCESS;
657}
658
659/*
660 * Copy the flags and some other ancillary information
661 */
662void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
663{
664 int cpupid;
665
666 if (folio_test_referenced(folio))
667 folio_set_referenced(newfolio);
668 if (folio_test_uptodate(folio))
669 folio_mark_uptodate(newfolio);
670 if (folio_test_clear_active(folio)) {
671 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
672 folio_set_active(newfolio);
673 } else if (folio_test_clear_unevictable(folio))
674 folio_set_unevictable(newfolio);
675 if (folio_test_workingset(folio))
676 folio_set_workingset(newfolio);
677 if (folio_test_checked(folio))
678 folio_set_checked(newfolio);
679 /*
680 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
681 * migration entries. We can still have PG_anon_exclusive set on an
682 * effectively unmapped and unreferenced first sub-pages of an
683 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
684 */
685 if (folio_test_mappedtodisk(folio))
686 folio_set_mappedtodisk(newfolio);
687
688 /* Move dirty on pages not done by folio_migrate_mapping() */
689 if (folio_test_dirty(folio))
690 folio_set_dirty(newfolio);
691
692 if (folio_test_young(folio))
693 folio_set_young(newfolio);
694 if (folio_test_idle(folio))
695 folio_set_idle(newfolio);
696
697 /*
698 * Copy NUMA information to the new page, to prevent over-eager
699 * future migrations of this same page.
700 */
701 cpupid = folio_xchg_last_cpupid(folio, -1);
702 /*
703 * For memory tiering mode, when migrate between slow and fast
704 * memory node, reset cpupid, because that is used to record
705 * page access time in slow memory node.
706 */
707 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
708 bool f_toptier = node_is_toptier(folio_nid(folio));
709 bool t_toptier = node_is_toptier(folio_nid(newfolio));
710
711 if (f_toptier != t_toptier)
712 cpupid = -1;
713 }
714 folio_xchg_last_cpupid(newfolio, cpupid);
715
716 folio_migrate_ksm(newfolio, folio);
717 /*
718 * Please do not reorder this without considering how mm/ksm.c's
719 * ksm_get_folio() depends upon ksm_migrate_page() and the
720 * swapcache flag.
721 */
722 if (folio_test_swapcache(folio))
723 folio_clear_swapcache(folio);
724 folio_clear_private(folio);
725
726 /* page->private contains hugetlb specific flags */
727 if (!folio_test_hugetlb(folio))
728 folio->private = NULL;
729
730 /*
731 * If any waiters have accumulated on the new page then
732 * wake them up.
733 */
734 if (folio_test_writeback(newfolio))
735 folio_end_writeback(newfolio);
736
737 /*
738 * PG_readahead shares the same bit with PG_reclaim. The above
739 * end_page_writeback() may clear PG_readahead mistakenly, so set the
740 * bit after that.
741 */
742 if (folio_test_readahead(folio))
743 folio_set_readahead(newfolio);
744
745 folio_copy_owner(newfolio, folio);
746 pgalloc_tag_copy(newfolio, folio);
747
748 mem_cgroup_migrate(folio, newfolio);
749}
750EXPORT_SYMBOL(folio_migrate_flags);
751
752/************************************************************
753 * Migration functions
754 ***********************************************************/
755
756static int __migrate_folio(struct address_space *mapping, struct folio *dst,
757 struct folio *src, void *src_private,
758 enum migrate_mode mode)
759{
760 int rc, expected_count = folio_expected_refs(mapping, src);
761
762 /* Check whether src does not have extra refs before we do more work */
763 if (folio_ref_count(src) != expected_count)
764 return -EAGAIN;
765
766 rc = folio_mc_copy(dst, src);
767 if (unlikely(rc))
768 return rc;
769
770 rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
771 if (rc != MIGRATEPAGE_SUCCESS)
772 return rc;
773
774 if (src_private)
775 folio_attach_private(dst, folio_detach_private(src));
776
777 folio_migrate_flags(dst, src);
778 return MIGRATEPAGE_SUCCESS;
779}
780
781/**
782 * migrate_folio() - Simple folio migration.
783 * @mapping: The address_space containing the folio.
784 * @dst: The folio to migrate the data to.
785 * @src: The folio containing the current data.
786 * @mode: How to migrate the page.
787 *
788 * Common logic to directly migrate a single LRU folio suitable for
789 * folios that do not use PagePrivate/PagePrivate2.
790 *
791 * Folios are locked upon entry and exit.
792 */
793int migrate_folio(struct address_space *mapping, struct folio *dst,
794 struct folio *src, enum migrate_mode mode)
795{
796 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
797 return __migrate_folio(mapping, dst, src, NULL, mode);
798}
799EXPORT_SYMBOL(migrate_folio);
800
801#ifdef CONFIG_BUFFER_HEAD
802/* Returns true if all buffers are successfully locked */
803static bool buffer_migrate_lock_buffers(struct buffer_head *head,
804 enum migrate_mode mode)
805{
806 struct buffer_head *bh = head;
807 struct buffer_head *failed_bh;
808
809 do {
810 if (!trylock_buffer(bh)) {
811 if (mode == MIGRATE_ASYNC)
812 goto unlock;
813 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
814 goto unlock;
815 lock_buffer(bh);
816 }
817
818 bh = bh->b_this_page;
819 } while (bh != head);
820
821 return true;
822
823unlock:
824 /* We failed to lock the buffer and cannot stall. */
825 failed_bh = bh;
826 bh = head;
827 while (bh != failed_bh) {
828 unlock_buffer(bh);
829 bh = bh->b_this_page;
830 }
831
832 return false;
833}
834
835static int __buffer_migrate_folio(struct address_space *mapping,
836 struct folio *dst, struct folio *src, enum migrate_mode mode,
837 bool check_refs)
838{
839 struct buffer_head *bh, *head;
840 int rc;
841 int expected_count;
842
843 head = folio_buffers(src);
844 if (!head)
845 return migrate_folio(mapping, dst, src, mode);
846
847 /* Check whether page does not have extra refs before we do more work */
848 expected_count = folio_expected_refs(mapping, src);
849 if (folio_ref_count(src) != expected_count)
850 return -EAGAIN;
851
852 if (!buffer_migrate_lock_buffers(head, mode))
853 return -EAGAIN;
854
855 if (check_refs) {
856 bool busy;
857 bool invalidated = false;
858
859recheck_buffers:
860 busy = false;
861 spin_lock(&mapping->i_private_lock);
862 bh = head;
863 do {
864 if (atomic_read(&bh->b_count)) {
865 busy = true;
866 break;
867 }
868 bh = bh->b_this_page;
869 } while (bh != head);
870 if (busy) {
871 if (invalidated) {
872 rc = -EAGAIN;
873 goto unlock_buffers;
874 }
875 spin_unlock(&mapping->i_private_lock);
876 invalidate_bh_lrus();
877 invalidated = true;
878 goto recheck_buffers;
879 }
880 }
881
882 rc = filemap_migrate_folio(mapping, dst, src, mode);
883 if (rc != MIGRATEPAGE_SUCCESS)
884 goto unlock_buffers;
885
886 bh = head;
887 do {
888 folio_set_bh(bh, dst, bh_offset(bh));
889 bh = bh->b_this_page;
890 } while (bh != head);
891
892unlock_buffers:
893 if (check_refs)
894 spin_unlock(&mapping->i_private_lock);
895 bh = head;
896 do {
897 unlock_buffer(bh);
898 bh = bh->b_this_page;
899 } while (bh != head);
900
901 return rc;
902}
903
904/**
905 * buffer_migrate_folio() - Migration function for folios with buffers.
906 * @mapping: The address space containing @src.
907 * @dst: The folio to migrate to.
908 * @src: The folio to migrate from.
909 * @mode: How to migrate the folio.
910 *
911 * This function can only be used if the underlying filesystem guarantees
912 * that no other references to @src exist. For example attached buffer
913 * heads are accessed only under the folio lock. If your filesystem cannot
914 * provide this guarantee, buffer_migrate_folio_norefs() may be more
915 * appropriate.
916 *
917 * Return: 0 on success or a negative errno on failure.
918 */
919int buffer_migrate_folio(struct address_space *mapping,
920 struct folio *dst, struct folio *src, enum migrate_mode mode)
921{
922 return __buffer_migrate_folio(mapping, dst, src, mode, false);
923}
924EXPORT_SYMBOL(buffer_migrate_folio);
925
926/**
927 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
928 * @mapping: The address space containing @src.
929 * @dst: The folio to migrate to.
930 * @src: The folio to migrate from.
931 * @mode: How to migrate the folio.
932 *
933 * Like buffer_migrate_folio() except that this variant is more careful
934 * and checks that there are also no buffer head references. This function
935 * is the right one for mappings where buffer heads are directly looked
936 * up and referenced (such as block device mappings).
937 *
938 * Return: 0 on success or a negative errno on failure.
939 */
940int buffer_migrate_folio_norefs(struct address_space *mapping,
941 struct folio *dst, struct folio *src, enum migrate_mode mode)
942{
943 return __buffer_migrate_folio(mapping, dst, src, mode, true);
944}
945EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
946#endif /* CONFIG_BUFFER_HEAD */
947
948int filemap_migrate_folio(struct address_space *mapping,
949 struct folio *dst, struct folio *src, enum migrate_mode mode)
950{
951 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
952}
953EXPORT_SYMBOL_GPL(filemap_migrate_folio);
954
955/*
956 * Writeback a folio to clean the dirty state
957 */
958static int writeout(struct address_space *mapping, struct folio *folio)
959{
960 struct writeback_control wbc = {
961 .sync_mode = WB_SYNC_NONE,
962 .nr_to_write = 1,
963 .range_start = 0,
964 .range_end = LLONG_MAX,
965 .for_reclaim = 1
966 };
967 int rc;
968
969 if (!mapping->a_ops->writepage)
970 /* No write method for the address space */
971 return -EINVAL;
972
973 if (!folio_clear_dirty_for_io(folio))
974 /* Someone else already triggered a write */
975 return -EAGAIN;
976
977 /*
978 * A dirty folio may imply that the underlying filesystem has
979 * the folio on some queue. So the folio must be clean for
980 * migration. Writeout may mean we lose the lock and the
981 * folio state is no longer what we checked for earlier.
982 * At this point we know that the migration attempt cannot
983 * be successful.
984 */
985 remove_migration_ptes(folio, folio, 0);
986
987 rc = mapping->a_ops->writepage(&folio->page, &wbc);
988
989 if (rc != AOP_WRITEPAGE_ACTIVATE)
990 /* unlocked. Relock */
991 folio_lock(folio);
992
993 return (rc < 0) ? -EIO : -EAGAIN;
994}
995
996/*
997 * Default handling if a filesystem does not provide a migration function.
998 */
999static int fallback_migrate_folio(struct address_space *mapping,
1000 struct folio *dst, struct folio *src, enum migrate_mode mode)
1001{
1002 if (folio_test_dirty(src)) {
1003 /* Only writeback folios in full synchronous migration */
1004 switch (mode) {
1005 case MIGRATE_SYNC:
1006 break;
1007 default:
1008 return -EBUSY;
1009 }
1010 return writeout(mapping, src);
1011 }
1012
1013 /*
1014 * Buffers may be managed in a filesystem specific way.
1015 * We must have no buffers or drop them.
1016 */
1017 if (!filemap_release_folio(src, GFP_KERNEL))
1018 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1019
1020 return migrate_folio(mapping, dst, src, mode);
1021}
1022
1023/*
1024 * Move a page to a newly allocated page
1025 * The page is locked and all ptes have been successfully removed.
1026 *
1027 * The new page will have replaced the old page if this function
1028 * is successful.
1029 *
1030 * Return value:
1031 * < 0 - error code
1032 * MIGRATEPAGE_SUCCESS - success
1033 */
1034static int move_to_new_folio(struct folio *dst, struct folio *src,
1035 enum migrate_mode mode)
1036{
1037 int rc = -EAGAIN;
1038 bool is_lru = !__folio_test_movable(src);
1039
1040 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1041 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1042
1043 if (likely(is_lru)) {
1044 struct address_space *mapping = folio_mapping(src);
1045
1046 if (!mapping)
1047 rc = migrate_folio(mapping, dst, src, mode);
1048 else if (mapping_inaccessible(mapping))
1049 rc = -EOPNOTSUPP;
1050 else if (mapping->a_ops->migrate_folio)
1051 /*
1052 * Most folios have a mapping and most filesystems
1053 * provide a migrate_folio callback. Anonymous folios
1054 * are part of swap space which also has its own
1055 * migrate_folio callback. This is the most common path
1056 * for page migration.
1057 */
1058 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1059 mode);
1060 else
1061 rc = fallback_migrate_folio(mapping, dst, src, mode);
1062 } else {
1063 const struct movable_operations *mops;
1064
1065 /*
1066 * In case of non-lru page, it could be released after
1067 * isolation step. In that case, we shouldn't try migration.
1068 */
1069 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1070 if (!folio_test_movable(src)) {
1071 rc = MIGRATEPAGE_SUCCESS;
1072 folio_clear_isolated(src);
1073 goto out;
1074 }
1075
1076 mops = folio_movable_ops(src);
1077 rc = mops->migrate_page(&dst->page, &src->page, mode);
1078 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1079 !folio_test_isolated(src));
1080 }
1081
1082 /*
1083 * When successful, old pagecache src->mapping must be cleared before
1084 * src is freed; but stats require that PageAnon be left as PageAnon.
1085 */
1086 if (rc == MIGRATEPAGE_SUCCESS) {
1087 if (__folio_test_movable(src)) {
1088 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1089
1090 /*
1091 * We clear PG_movable under page_lock so any compactor
1092 * cannot try to migrate this page.
1093 */
1094 folio_clear_isolated(src);
1095 }
1096
1097 /*
1098 * Anonymous and movable src->mapping will be cleared by
1099 * free_pages_prepare so don't reset it here for keeping
1100 * the type to work PageAnon, for example.
1101 */
1102 if (!folio_mapping_flags(src))
1103 src->mapping = NULL;
1104
1105 if (likely(!folio_is_zone_device(dst)))
1106 flush_dcache_folio(dst);
1107 }
1108out:
1109 return rc;
1110}
1111
1112/*
1113 * To record some information during migration, we use unused private
1114 * field of struct folio of the newly allocated destination folio.
1115 * This is safe because nobody is using it except us.
1116 */
1117enum {
1118 PAGE_WAS_MAPPED = BIT(0),
1119 PAGE_WAS_MLOCKED = BIT(1),
1120 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1121};
1122
1123static void __migrate_folio_record(struct folio *dst,
1124 int old_page_state,
1125 struct anon_vma *anon_vma)
1126{
1127 dst->private = (void *)anon_vma + old_page_state;
1128}
1129
1130static void __migrate_folio_extract(struct folio *dst,
1131 int *old_page_state,
1132 struct anon_vma **anon_vmap)
1133{
1134 unsigned long private = (unsigned long)dst->private;
1135
1136 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1137 *old_page_state = private & PAGE_OLD_STATES;
1138 dst->private = NULL;
1139}
1140
1141/* Restore the source folio to the original state upon failure */
1142static void migrate_folio_undo_src(struct folio *src,
1143 int page_was_mapped,
1144 struct anon_vma *anon_vma,
1145 bool locked,
1146 struct list_head *ret)
1147{
1148 if (page_was_mapped)
1149 remove_migration_ptes(src, src, 0);
1150 /* Drop an anon_vma reference if we took one */
1151 if (anon_vma)
1152 put_anon_vma(anon_vma);
1153 if (locked)
1154 folio_unlock(src);
1155 if (ret)
1156 list_move_tail(&src->lru, ret);
1157}
1158
1159/* Restore the destination folio to the original state upon failure */
1160static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1161 free_folio_t put_new_folio, unsigned long private)
1162{
1163 if (locked)
1164 folio_unlock(dst);
1165 if (put_new_folio)
1166 put_new_folio(dst, private);
1167 else
1168 folio_put(dst);
1169}
1170
1171/* Cleanup src folio upon migration success */
1172static void migrate_folio_done(struct folio *src,
1173 enum migrate_reason reason)
1174{
1175 /*
1176 * Compaction can migrate also non-LRU pages which are
1177 * not accounted to NR_ISOLATED_*. They can be recognized
1178 * as __folio_test_movable
1179 */
1180 if (likely(!__folio_test_movable(src)))
1181 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1182 folio_is_file_lru(src), -folio_nr_pages(src));
1183
1184 if (reason != MR_MEMORY_FAILURE)
1185 /* We release the page in page_handle_poison. */
1186 folio_put(src);
1187}
1188
1189/* Obtain the lock on page, remove all ptes. */
1190static int migrate_folio_unmap(new_folio_t get_new_folio,
1191 free_folio_t put_new_folio, unsigned long private,
1192 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1193 enum migrate_reason reason, struct list_head *ret)
1194{
1195 struct folio *dst;
1196 int rc = -EAGAIN;
1197 int old_page_state = 0;
1198 struct anon_vma *anon_vma = NULL;
1199 bool is_lru = data_race(!__folio_test_movable(src));
1200 bool locked = false;
1201 bool dst_locked = false;
1202
1203 if (folio_ref_count(src) == 1) {
1204 /* Folio was freed from under us. So we are done. */
1205 folio_clear_active(src);
1206 folio_clear_unevictable(src);
1207 /* free_pages_prepare() will clear PG_isolated. */
1208 list_del(&src->lru);
1209 migrate_folio_done(src, reason);
1210 return MIGRATEPAGE_SUCCESS;
1211 }
1212
1213 dst = get_new_folio(src, private);
1214 if (!dst)
1215 return -ENOMEM;
1216 *dstp = dst;
1217
1218 dst->private = NULL;
1219
1220 if (!folio_trylock(src)) {
1221 if (mode == MIGRATE_ASYNC)
1222 goto out;
1223
1224 /*
1225 * It's not safe for direct compaction to call lock_page.
1226 * For example, during page readahead pages are added locked
1227 * to the LRU. Later, when the IO completes the pages are
1228 * marked uptodate and unlocked. However, the queueing
1229 * could be merging multiple pages for one bio (e.g.
1230 * mpage_readahead). If an allocation happens for the
1231 * second or third page, the process can end up locking
1232 * the same page twice and deadlocking. Rather than
1233 * trying to be clever about what pages can be locked,
1234 * avoid the use of lock_page for direct compaction
1235 * altogether.
1236 */
1237 if (current->flags & PF_MEMALLOC)
1238 goto out;
1239
1240 /*
1241 * In "light" mode, we can wait for transient locks (eg
1242 * inserting a page into the page table), but it's not
1243 * worth waiting for I/O.
1244 */
1245 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1246 goto out;
1247
1248 folio_lock(src);
1249 }
1250 locked = true;
1251 if (folio_test_mlocked(src))
1252 old_page_state |= PAGE_WAS_MLOCKED;
1253
1254 if (folio_test_writeback(src)) {
1255 /*
1256 * Only in the case of a full synchronous migration is it
1257 * necessary to wait for PageWriteback. In the async case,
1258 * the retry loop is too short and in the sync-light case,
1259 * the overhead of stalling is too much
1260 */
1261 switch (mode) {
1262 case MIGRATE_SYNC:
1263 break;
1264 default:
1265 rc = -EBUSY;
1266 goto out;
1267 }
1268 folio_wait_writeback(src);
1269 }
1270
1271 /*
1272 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1273 * we cannot notice that anon_vma is freed while we migrate a page.
1274 * This get_anon_vma() delays freeing anon_vma pointer until the end
1275 * of migration. File cache pages are no problem because of page_lock()
1276 * File Caches may use write_page() or lock_page() in migration, then,
1277 * just care Anon page here.
1278 *
1279 * Only folio_get_anon_vma() understands the subtleties of
1280 * getting a hold on an anon_vma from outside one of its mms.
1281 * But if we cannot get anon_vma, then we won't need it anyway,
1282 * because that implies that the anon page is no longer mapped
1283 * (and cannot be remapped so long as we hold the page lock).
1284 */
1285 if (folio_test_anon(src) && !folio_test_ksm(src))
1286 anon_vma = folio_get_anon_vma(src);
1287
1288 /*
1289 * Block others from accessing the new page when we get around to
1290 * establishing additional references. We are usually the only one
1291 * holding a reference to dst at this point. We used to have a BUG
1292 * here if folio_trylock(dst) fails, but would like to allow for
1293 * cases where there might be a race with the previous use of dst.
1294 * This is much like races on refcount of oldpage: just don't BUG().
1295 */
1296 if (unlikely(!folio_trylock(dst)))
1297 goto out;
1298 dst_locked = true;
1299
1300 if (unlikely(!is_lru)) {
1301 __migrate_folio_record(dst, old_page_state, anon_vma);
1302 return MIGRATEPAGE_UNMAP;
1303 }
1304
1305 /*
1306 * Corner case handling:
1307 * 1. When a new swap-cache page is read into, it is added to the LRU
1308 * and treated as swapcache but it has no rmap yet.
1309 * Calling try_to_unmap() against a src->mapping==NULL page will
1310 * trigger a BUG. So handle it here.
1311 * 2. An orphaned page (see truncate_cleanup_page) might have
1312 * fs-private metadata. The page can be picked up due to memory
1313 * offlining. Everywhere else except page reclaim, the page is
1314 * invisible to the vm, so the page can not be migrated. So try to
1315 * free the metadata, so the page can be freed.
1316 */
1317 if (!src->mapping) {
1318 if (folio_test_private(src)) {
1319 try_to_free_buffers(src);
1320 goto out;
1321 }
1322 } else if (folio_mapped(src)) {
1323 /* Establish migration ptes */
1324 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1325 !folio_test_ksm(src) && !anon_vma, src);
1326 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1327 old_page_state |= PAGE_WAS_MAPPED;
1328 }
1329
1330 if (!folio_mapped(src)) {
1331 __migrate_folio_record(dst, old_page_state, anon_vma);
1332 return MIGRATEPAGE_UNMAP;
1333 }
1334
1335out:
1336 /*
1337 * A folio that has not been unmapped will be restored to
1338 * right list unless we want to retry.
1339 */
1340 if (rc == -EAGAIN)
1341 ret = NULL;
1342
1343 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1344 anon_vma, locked, ret);
1345 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1346
1347 return rc;
1348}
1349
1350/* Migrate the folio to the newly allocated folio in dst. */
1351static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1352 struct folio *src, struct folio *dst,
1353 enum migrate_mode mode, enum migrate_reason reason,
1354 struct list_head *ret)
1355{
1356 int rc;
1357 int old_page_state = 0;
1358 struct anon_vma *anon_vma = NULL;
1359 bool is_lru = !__folio_test_movable(src);
1360 struct list_head *prev;
1361
1362 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1363 prev = dst->lru.prev;
1364 list_del(&dst->lru);
1365
1366 rc = move_to_new_folio(dst, src, mode);
1367 if (rc)
1368 goto out;
1369
1370 if (unlikely(!is_lru))
1371 goto out_unlock_both;
1372
1373 /*
1374 * When successful, push dst to LRU immediately: so that if it
1375 * turns out to be an mlocked page, remove_migration_ptes() will
1376 * automatically build up the correct dst->mlock_count for it.
1377 *
1378 * We would like to do something similar for the old page, when
1379 * unsuccessful, and other cases when a page has been temporarily
1380 * isolated from the unevictable LRU: but this case is the easiest.
1381 */
1382 folio_add_lru(dst);
1383 if (old_page_state & PAGE_WAS_MLOCKED)
1384 lru_add_drain();
1385
1386 if (old_page_state & PAGE_WAS_MAPPED)
1387 remove_migration_ptes(src, dst, 0);
1388
1389out_unlock_both:
1390 folio_unlock(dst);
1391 set_page_owner_migrate_reason(&dst->page, reason);
1392 /*
1393 * If migration is successful, decrease refcount of dst,
1394 * which will not free the page because new page owner increased
1395 * refcounter.
1396 */
1397 folio_put(dst);
1398
1399 /*
1400 * A folio that has been migrated has all references removed
1401 * and will be freed.
1402 */
1403 list_del(&src->lru);
1404 /* Drop an anon_vma reference if we took one */
1405 if (anon_vma)
1406 put_anon_vma(anon_vma);
1407 folio_unlock(src);
1408 migrate_folio_done(src, reason);
1409
1410 return rc;
1411out:
1412 /*
1413 * A folio that has not been migrated will be restored to
1414 * right list unless we want to retry.
1415 */
1416 if (rc == -EAGAIN) {
1417 list_add(&dst->lru, prev);
1418 __migrate_folio_record(dst, old_page_state, anon_vma);
1419 return rc;
1420 }
1421
1422 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1423 anon_vma, true, ret);
1424 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1425
1426 return rc;
1427}
1428
1429/*
1430 * Counterpart of unmap_and_move_page() for hugepage migration.
1431 *
1432 * This function doesn't wait the completion of hugepage I/O
1433 * because there is no race between I/O and migration for hugepage.
1434 * Note that currently hugepage I/O occurs only in direct I/O
1435 * where no lock is held and PG_writeback is irrelevant,
1436 * and writeback status of all subpages are counted in the reference
1437 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1438 * under direct I/O, the reference of the head page is 512 and a bit more.)
1439 * This means that when we try to migrate hugepage whose subpages are
1440 * doing direct I/O, some references remain after try_to_unmap() and
1441 * hugepage migration fails without data corruption.
1442 *
1443 * There is also no race when direct I/O is issued on the page under migration,
1444 * because then pte is replaced with migration swap entry and direct I/O code
1445 * will wait in the page fault for migration to complete.
1446 */
1447static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1448 free_folio_t put_new_folio, unsigned long private,
1449 struct folio *src, int force, enum migrate_mode mode,
1450 int reason, struct list_head *ret)
1451{
1452 struct folio *dst;
1453 int rc = -EAGAIN;
1454 int page_was_mapped = 0;
1455 struct anon_vma *anon_vma = NULL;
1456 struct address_space *mapping = NULL;
1457
1458 if (folio_ref_count(src) == 1) {
1459 /* page was freed from under us. So we are done. */
1460 folio_putback_active_hugetlb(src);
1461 return MIGRATEPAGE_SUCCESS;
1462 }
1463
1464 dst = get_new_folio(src, private);
1465 if (!dst)
1466 return -ENOMEM;
1467
1468 if (!folio_trylock(src)) {
1469 if (!force)
1470 goto out;
1471 switch (mode) {
1472 case MIGRATE_SYNC:
1473 break;
1474 default:
1475 goto out;
1476 }
1477 folio_lock(src);
1478 }
1479
1480 /*
1481 * Check for pages which are in the process of being freed. Without
1482 * folio_mapping() set, hugetlbfs specific move page routine will not
1483 * be called and we could leak usage counts for subpools.
1484 */
1485 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1486 rc = -EBUSY;
1487 goto out_unlock;
1488 }
1489
1490 if (folio_test_anon(src))
1491 anon_vma = folio_get_anon_vma(src);
1492
1493 if (unlikely(!folio_trylock(dst)))
1494 goto put_anon;
1495
1496 if (folio_mapped(src)) {
1497 enum ttu_flags ttu = 0;
1498
1499 if (!folio_test_anon(src)) {
1500 /*
1501 * In shared mappings, try_to_unmap could potentially
1502 * call huge_pmd_unshare. Because of this, take
1503 * semaphore in write mode here and set TTU_RMAP_LOCKED
1504 * to let lower levels know we have taken the lock.
1505 */
1506 mapping = hugetlb_folio_mapping_lock_write(src);
1507 if (unlikely(!mapping))
1508 goto unlock_put_anon;
1509
1510 ttu = TTU_RMAP_LOCKED;
1511 }
1512
1513 try_to_migrate(src, ttu);
1514 page_was_mapped = 1;
1515
1516 if (ttu & TTU_RMAP_LOCKED)
1517 i_mmap_unlock_write(mapping);
1518 }
1519
1520 if (!folio_mapped(src))
1521 rc = move_to_new_folio(dst, src, mode);
1522
1523 if (page_was_mapped)
1524 remove_migration_ptes(src,
1525 rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1526
1527unlock_put_anon:
1528 folio_unlock(dst);
1529
1530put_anon:
1531 if (anon_vma)
1532 put_anon_vma(anon_vma);
1533
1534 if (rc == MIGRATEPAGE_SUCCESS) {
1535 move_hugetlb_state(src, dst, reason);
1536 put_new_folio = NULL;
1537 }
1538
1539out_unlock:
1540 folio_unlock(src);
1541out:
1542 if (rc == MIGRATEPAGE_SUCCESS)
1543 folio_putback_active_hugetlb(src);
1544 else if (rc != -EAGAIN)
1545 list_move_tail(&src->lru, ret);
1546
1547 /*
1548 * If migration was not successful and there's a freeing callback, use
1549 * it. Otherwise, put_page() will drop the reference grabbed during
1550 * isolation.
1551 */
1552 if (put_new_folio)
1553 put_new_folio(dst, private);
1554 else
1555 folio_putback_active_hugetlb(dst);
1556
1557 return rc;
1558}
1559
1560static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1561 enum migrate_mode mode)
1562{
1563 int rc;
1564
1565 if (mode == MIGRATE_ASYNC) {
1566 if (!folio_trylock(folio))
1567 return -EAGAIN;
1568 } else {
1569 folio_lock(folio);
1570 }
1571 rc = split_folio_to_list(folio, split_folios);
1572 folio_unlock(folio);
1573 if (!rc)
1574 list_move_tail(&folio->lru, split_folios);
1575
1576 return rc;
1577}
1578
1579#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1580#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1581#else
1582#define NR_MAX_BATCHED_MIGRATION 512
1583#endif
1584#define NR_MAX_MIGRATE_PAGES_RETRY 10
1585#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1586#define NR_MAX_MIGRATE_SYNC_RETRY \
1587 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1588
1589struct migrate_pages_stats {
1590 int nr_succeeded; /* Normal and large folios migrated successfully, in
1591 units of base pages */
1592 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1593 units of base pages. Untried folios aren't counted */
1594 int nr_thp_succeeded; /* THP migrated successfully */
1595 int nr_thp_failed; /* THP failed to be migrated */
1596 int nr_thp_split; /* THP split before migrating */
1597 int nr_split; /* Large folio (include THP) split before migrating */
1598};
1599
1600/*
1601 * Returns the number of hugetlb folios that were not migrated, or an error code
1602 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1603 * any more because the list has become empty or no retryable hugetlb folios
1604 * exist any more. It is caller's responsibility to call putback_movable_pages()
1605 * only if ret != 0.
1606 */
1607static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1608 free_folio_t put_new_folio, unsigned long private,
1609 enum migrate_mode mode, int reason,
1610 struct migrate_pages_stats *stats,
1611 struct list_head *ret_folios)
1612{
1613 int retry = 1;
1614 int nr_failed = 0;
1615 int nr_retry_pages = 0;
1616 int pass = 0;
1617 struct folio *folio, *folio2;
1618 int rc, nr_pages;
1619
1620 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1621 retry = 0;
1622 nr_retry_pages = 0;
1623
1624 list_for_each_entry_safe(folio, folio2, from, lru) {
1625 if (!folio_test_hugetlb(folio))
1626 continue;
1627
1628 nr_pages = folio_nr_pages(folio);
1629
1630 cond_resched();
1631
1632 /*
1633 * Migratability of hugepages depends on architectures and
1634 * their size. This check is necessary because some callers
1635 * of hugepage migration like soft offline and memory
1636 * hotremove don't walk through page tables or check whether
1637 * the hugepage is pmd-based or not before kicking migration.
1638 */
1639 if (!hugepage_migration_supported(folio_hstate(folio))) {
1640 nr_failed++;
1641 stats->nr_failed_pages += nr_pages;
1642 list_move_tail(&folio->lru, ret_folios);
1643 continue;
1644 }
1645
1646 rc = unmap_and_move_huge_page(get_new_folio,
1647 put_new_folio, private,
1648 folio, pass > 2, mode,
1649 reason, ret_folios);
1650 /*
1651 * The rules are:
1652 * Success: hugetlb folio will be put back
1653 * -EAGAIN: stay on the from list
1654 * -ENOMEM: stay on the from list
1655 * Other errno: put on ret_folios list
1656 */
1657 switch(rc) {
1658 case -ENOMEM:
1659 /*
1660 * When memory is low, don't bother to try to migrate
1661 * other folios, just exit.
1662 */
1663 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1664 return -ENOMEM;
1665 case -EAGAIN:
1666 retry++;
1667 nr_retry_pages += nr_pages;
1668 break;
1669 case MIGRATEPAGE_SUCCESS:
1670 stats->nr_succeeded += nr_pages;
1671 break;
1672 default:
1673 /*
1674 * Permanent failure (-EBUSY, etc.):
1675 * unlike -EAGAIN case, the failed folio is
1676 * removed from migration folio list and not
1677 * retried in the next outer loop.
1678 */
1679 nr_failed++;
1680 stats->nr_failed_pages += nr_pages;
1681 break;
1682 }
1683 }
1684 }
1685 /*
1686 * nr_failed is number of hugetlb folios failed to be migrated. After
1687 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1688 * folios as failed.
1689 */
1690 nr_failed += retry;
1691 stats->nr_failed_pages += nr_retry_pages;
1692
1693 return nr_failed;
1694}
1695
1696/*
1697 * migrate_pages_batch() first unmaps folios in the from list as many as
1698 * possible, then move the unmapped folios.
1699 *
1700 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1701 * lock or bit when we have locked more than one folio. Which may cause
1702 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1703 * length of the from list must be <= 1.
1704 */
1705static int migrate_pages_batch(struct list_head *from,
1706 new_folio_t get_new_folio, free_folio_t put_new_folio,
1707 unsigned long private, enum migrate_mode mode, int reason,
1708 struct list_head *ret_folios, struct list_head *split_folios,
1709 struct migrate_pages_stats *stats, int nr_pass)
1710{
1711 int retry = 1;
1712 int thp_retry = 1;
1713 int nr_failed = 0;
1714 int nr_retry_pages = 0;
1715 int pass = 0;
1716 bool is_thp = false;
1717 bool is_large = false;
1718 struct folio *folio, *folio2, *dst = NULL, *dst2;
1719 int rc, rc_saved = 0, nr_pages;
1720 LIST_HEAD(unmap_folios);
1721 LIST_HEAD(dst_folios);
1722 bool nosplit = (reason == MR_NUMA_MISPLACED);
1723
1724 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1725 !list_empty(from) && !list_is_singular(from));
1726
1727 for (pass = 0; pass < nr_pass && retry; pass++) {
1728 retry = 0;
1729 thp_retry = 0;
1730 nr_retry_pages = 0;
1731
1732 list_for_each_entry_safe(folio, folio2, from, lru) {
1733 is_large = folio_test_large(folio);
1734 is_thp = is_large && folio_test_pmd_mappable(folio);
1735 nr_pages = folio_nr_pages(folio);
1736
1737 cond_resched();
1738
1739 /*
1740 * The rare folio on the deferred split list should
1741 * be split now. It should not count as a failure:
1742 * but increment nr_failed because, without doing so,
1743 * migrate_pages() may report success with (split but
1744 * unmigrated) pages still on its fromlist; whereas it
1745 * always reports success when its fromlist is empty.
1746 * stats->nr_thp_failed should be increased too,
1747 * otherwise stats inconsistency will happen when
1748 * migrate_pages_batch is called via migrate_pages()
1749 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1750 *
1751 * Only check it without removing it from the list.
1752 * Since the folio can be on deferred_split_scan()
1753 * local list and removing it can cause the local list
1754 * corruption. Folio split process below can handle it
1755 * with the help of folio_ref_freeze().
1756 *
1757 * nr_pages > 2 is needed to avoid checking order-1
1758 * page cache folios. They exist, in contrast to
1759 * non-existent order-1 anonymous folios, and do not
1760 * use _deferred_list.
1761 */
1762 if (nr_pages > 2 &&
1763 !list_empty(&folio->_deferred_list) &&
1764 folio_test_partially_mapped(folio)) {
1765 if (!try_split_folio(folio, split_folios, mode)) {
1766 nr_failed++;
1767 stats->nr_thp_failed += is_thp;
1768 stats->nr_thp_split += is_thp;
1769 stats->nr_split++;
1770 continue;
1771 }
1772 }
1773
1774 /*
1775 * Large folio migration might be unsupported or
1776 * the allocation might be failed so we should retry
1777 * on the same folio with the large folio split
1778 * to normal folios.
1779 *
1780 * Split folios are put in split_folios, and
1781 * we will migrate them after the rest of the
1782 * list is processed.
1783 */
1784 if (!thp_migration_supported() && is_thp) {
1785 nr_failed++;
1786 stats->nr_thp_failed++;
1787 if (!try_split_folio(folio, split_folios, mode)) {
1788 stats->nr_thp_split++;
1789 stats->nr_split++;
1790 continue;
1791 }
1792 stats->nr_failed_pages += nr_pages;
1793 list_move_tail(&folio->lru, ret_folios);
1794 continue;
1795 }
1796
1797 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1798 private, folio, &dst, mode, reason,
1799 ret_folios);
1800 /*
1801 * The rules are:
1802 * Success: folio will be freed
1803 * Unmap: folio will be put on unmap_folios list,
1804 * dst folio put on dst_folios list
1805 * -EAGAIN: stay on the from list
1806 * -ENOMEM: stay on the from list
1807 * Other errno: put on ret_folios list
1808 */
1809 switch(rc) {
1810 case -ENOMEM:
1811 /*
1812 * When memory is low, don't bother to try to migrate
1813 * other folios, move unmapped folios, then exit.
1814 */
1815 nr_failed++;
1816 stats->nr_thp_failed += is_thp;
1817 /* Large folio NUMA faulting doesn't split to retry. */
1818 if (is_large && !nosplit) {
1819 int ret = try_split_folio(folio, split_folios, mode);
1820
1821 if (!ret) {
1822 stats->nr_thp_split += is_thp;
1823 stats->nr_split++;
1824 break;
1825 } else if (reason == MR_LONGTERM_PIN &&
1826 ret == -EAGAIN) {
1827 /*
1828 * Try again to split large folio to
1829 * mitigate the failure of longterm pinning.
1830 */
1831 retry++;
1832 thp_retry += is_thp;
1833 nr_retry_pages += nr_pages;
1834 /* Undo duplicated failure counting. */
1835 nr_failed--;
1836 stats->nr_thp_failed -= is_thp;
1837 break;
1838 }
1839 }
1840
1841 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1842 /* nr_failed isn't updated for not used */
1843 stats->nr_thp_failed += thp_retry;
1844 rc_saved = rc;
1845 if (list_empty(&unmap_folios))
1846 goto out;
1847 else
1848 goto move;
1849 case -EAGAIN:
1850 retry++;
1851 thp_retry += is_thp;
1852 nr_retry_pages += nr_pages;
1853 break;
1854 case MIGRATEPAGE_SUCCESS:
1855 stats->nr_succeeded += nr_pages;
1856 stats->nr_thp_succeeded += is_thp;
1857 break;
1858 case MIGRATEPAGE_UNMAP:
1859 list_move_tail(&folio->lru, &unmap_folios);
1860 list_add_tail(&dst->lru, &dst_folios);
1861 break;
1862 default:
1863 /*
1864 * Permanent failure (-EBUSY, etc.):
1865 * unlike -EAGAIN case, the failed folio is
1866 * removed from migration folio list and not
1867 * retried in the next outer loop.
1868 */
1869 nr_failed++;
1870 stats->nr_thp_failed += is_thp;
1871 stats->nr_failed_pages += nr_pages;
1872 break;
1873 }
1874 }
1875 }
1876 nr_failed += retry;
1877 stats->nr_thp_failed += thp_retry;
1878 stats->nr_failed_pages += nr_retry_pages;
1879move:
1880 /* Flush TLBs for all unmapped folios */
1881 try_to_unmap_flush();
1882
1883 retry = 1;
1884 for (pass = 0; pass < nr_pass && retry; pass++) {
1885 retry = 0;
1886 thp_retry = 0;
1887 nr_retry_pages = 0;
1888
1889 dst = list_first_entry(&dst_folios, struct folio, lru);
1890 dst2 = list_next_entry(dst, lru);
1891 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1892 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1893 nr_pages = folio_nr_pages(folio);
1894
1895 cond_resched();
1896
1897 rc = migrate_folio_move(put_new_folio, private,
1898 folio, dst, mode,
1899 reason, ret_folios);
1900 /*
1901 * The rules are:
1902 * Success: folio will be freed
1903 * -EAGAIN: stay on the unmap_folios list
1904 * Other errno: put on ret_folios list
1905 */
1906 switch(rc) {
1907 case -EAGAIN:
1908 retry++;
1909 thp_retry += is_thp;
1910 nr_retry_pages += nr_pages;
1911 break;
1912 case MIGRATEPAGE_SUCCESS:
1913 stats->nr_succeeded += nr_pages;
1914 stats->nr_thp_succeeded += is_thp;
1915 break;
1916 default:
1917 nr_failed++;
1918 stats->nr_thp_failed += is_thp;
1919 stats->nr_failed_pages += nr_pages;
1920 break;
1921 }
1922 dst = dst2;
1923 dst2 = list_next_entry(dst, lru);
1924 }
1925 }
1926 nr_failed += retry;
1927 stats->nr_thp_failed += thp_retry;
1928 stats->nr_failed_pages += nr_retry_pages;
1929
1930 rc = rc_saved ? : nr_failed;
1931out:
1932 /* Cleanup remaining folios */
1933 dst = list_first_entry(&dst_folios, struct folio, lru);
1934 dst2 = list_next_entry(dst, lru);
1935 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1936 int old_page_state = 0;
1937 struct anon_vma *anon_vma = NULL;
1938
1939 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1940 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1941 anon_vma, true, ret_folios);
1942 list_del(&dst->lru);
1943 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1944 dst = dst2;
1945 dst2 = list_next_entry(dst, lru);
1946 }
1947
1948 return rc;
1949}
1950
1951static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1952 free_folio_t put_new_folio, unsigned long private,
1953 enum migrate_mode mode, int reason,
1954 struct list_head *ret_folios, struct list_head *split_folios,
1955 struct migrate_pages_stats *stats)
1956{
1957 int rc, nr_failed = 0;
1958 LIST_HEAD(folios);
1959 struct migrate_pages_stats astats;
1960
1961 memset(&astats, 0, sizeof(astats));
1962 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1963 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1964 reason, &folios, split_folios, &astats,
1965 NR_MAX_MIGRATE_ASYNC_RETRY);
1966 stats->nr_succeeded += astats.nr_succeeded;
1967 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1968 stats->nr_thp_split += astats.nr_thp_split;
1969 stats->nr_split += astats.nr_split;
1970 if (rc < 0) {
1971 stats->nr_failed_pages += astats.nr_failed_pages;
1972 stats->nr_thp_failed += astats.nr_thp_failed;
1973 list_splice_tail(&folios, ret_folios);
1974 return rc;
1975 }
1976 stats->nr_thp_failed += astats.nr_thp_split;
1977 /*
1978 * Do not count rc, as pages will be retried below.
1979 * Count nr_split only, since it includes nr_thp_split.
1980 */
1981 nr_failed += astats.nr_split;
1982 /*
1983 * Fall back to migrate all failed folios one by one synchronously. All
1984 * failed folios except split THPs will be retried, so their failure
1985 * isn't counted
1986 */
1987 list_splice_tail_init(&folios, from);
1988 while (!list_empty(from)) {
1989 list_move(from->next, &folios);
1990 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1991 private, mode, reason, ret_folios,
1992 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1993 list_splice_tail_init(&folios, ret_folios);
1994 if (rc < 0)
1995 return rc;
1996 nr_failed += rc;
1997 }
1998
1999 return nr_failed;
2000}
2001
2002/*
2003 * migrate_pages - migrate the folios specified in a list, to the free folios
2004 * supplied as the target for the page migration
2005 *
2006 * @from: The list of folios to be migrated.
2007 * @get_new_folio: The function used to allocate free folios to be used
2008 * as the target of the folio migration.
2009 * @put_new_folio: The function used to free target folios if migration
2010 * fails, or NULL if no special handling is necessary.
2011 * @private: Private data to be passed on to get_new_folio()
2012 * @mode: The migration mode that specifies the constraints for
2013 * folio migration, if any.
2014 * @reason: The reason for folio migration.
2015 * @ret_succeeded: Set to the number of folios migrated successfully if
2016 * the caller passes a non-NULL pointer.
2017 *
2018 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2019 * are movable any more because the list has become empty or no retryable folios
2020 * exist any more. It is caller's responsibility to call putback_movable_pages()
2021 * only if ret != 0.
2022 *
2023 * Returns the number of {normal folio, large folio, hugetlb} that were not
2024 * migrated, or an error code. The number of large folio splits will be
2025 * considered as the number of non-migrated large folio, no matter how many
2026 * split folios of the large folio are migrated successfully.
2027 */
2028int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2029 free_folio_t put_new_folio, unsigned long private,
2030 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2031{
2032 int rc, rc_gather;
2033 int nr_pages;
2034 struct folio *folio, *folio2;
2035 LIST_HEAD(folios);
2036 LIST_HEAD(ret_folios);
2037 LIST_HEAD(split_folios);
2038 struct migrate_pages_stats stats;
2039
2040 trace_mm_migrate_pages_start(mode, reason);
2041
2042 memset(&stats, 0, sizeof(stats));
2043
2044 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2045 mode, reason, &stats, &ret_folios);
2046 if (rc_gather < 0)
2047 goto out;
2048
2049again:
2050 nr_pages = 0;
2051 list_for_each_entry_safe(folio, folio2, from, lru) {
2052 /* Retried hugetlb folios will be kept in list */
2053 if (folio_test_hugetlb(folio)) {
2054 list_move_tail(&folio->lru, &ret_folios);
2055 continue;
2056 }
2057
2058 nr_pages += folio_nr_pages(folio);
2059 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2060 break;
2061 }
2062 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2063 list_cut_before(&folios, from, &folio2->lru);
2064 else
2065 list_splice_init(from, &folios);
2066 if (mode == MIGRATE_ASYNC)
2067 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2068 private, mode, reason, &ret_folios,
2069 &split_folios, &stats,
2070 NR_MAX_MIGRATE_PAGES_RETRY);
2071 else
2072 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2073 private, mode, reason, &ret_folios,
2074 &split_folios, &stats);
2075 list_splice_tail_init(&folios, &ret_folios);
2076 if (rc < 0) {
2077 rc_gather = rc;
2078 list_splice_tail(&split_folios, &ret_folios);
2079 goto out;
2080 }
2081 if (!list_empty(&split_folios)) {
2082 /*
2083 * Failure isn't counted since all split folios of a large folio
2084 * is counted as 1 failure already. And, we only try to migrate
2085 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2086 */
2087 migrate_pages_batch(&split_folios, get_new_folio,
2088 put_new_folio, private, MIGRATE_ASYNC, reason,
2089 &ret_folios, NULL, &stats, 1);
2090 list_splice_tail_init(&split_folios, &ret_folios);
2091 }
2092 rc_gather += rc;
2093 if (!list_empty(from))
2094 goto again;
2095out:
2096 /*
2097 * Put the permanent failure folio back to migration list, they
2098 * will be put back to the right list by the caller.
2099 */
2100 list_splice(&ret_folios, from);
2101
2102 /*
2103 * Return 0 in case all split folios of fail-to-migrate large folios
2104 * are migrated successfully.
2105 */
2106 if (list_empty(from))
2107 rc_gather = 0;
2108
2109 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2110 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2111 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2112 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2113 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2114 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2115 stats.nr_thp_succeeded, stats.nr_thp_failed,
2116 stats.nr_thp_split, stats.nr_split, mode,
2117 reason);
2118
2119 if (ret_succeeded)
2120 *ret_succeeded = stats.nr_succeeded;
2121
2122 return rc_gather;
2123}
2124
2125struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2126{
2127 struct migration_target_control *mtc;
2128 gfp_t gfp_mask;
2129 unsigned int order = 0;
2130 int nid;
2131 int zidx;
2132
2133 mtc = (struct migration_target_control *)private;
2134 gfp_mask = mtc->gfp_mask;
2135 nid = mtc->nid;
2136 if (nid == NUMA_NO_NODE)
2137 nid = folio_nid(src);
2138
2139 if (folio_test_hugetlb(src)) {
2140 struct hstate *h = folio_hstate(src);
2141
2142 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2143 return alloc_hugetlb_folio_nodemask(h, nid,
2144 mtc->nmask, gfp_mask,
2145 htlb_allow_alloc_fallback(mtc->reason));
2146 }
2147
2148 if (folio_test_large(src)) {
2149 /*
2150 * clear __GFP_RECLAIM to make the migration callback
2151 * consistent with regular THP allocations.
2152 */
2153 gfp_mask &= ~__GFP_RECLAIM;
2154 gfp_mask |= GFP_TRANSHUGE;
2155 order = folio_order(src);
2156 }
2157 zidx = zone_idx(folio_zone(src));
2158 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2159 gfp_mask |= __GFP_HIGHMEM;
2160
2161 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2162}
2163
2164#ifdef CONFIG_NUMA
2165
2166static int store_status(int __user *status, int start, int value, int nr)
2167{
2168 while (nr-- > 0) {
2169 if (put_user(value, status + start))
2170 return -EFAULT;
2171 start++;
2172 }
2173
2174 return 0;
2175}
2176
2177static int do_move_pages_to_node(struct list_head *pagelist, int node)
2178{
2179 int err;
2180 struct migration_target_control mtc = {
2181 .nid = node,
2182 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2183 .reason = MR_SYSCALL,
2184 };
2185
2186 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2187 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2188 if (err)
2189 putback_movable_pages(pagelist);
2190 return err;
2191}
2192
2193static int __add_folio_for_migration(struct folio *folio, int node,
2194 struct list_head *pagelist, bool migrate_all)
2195{
2196 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2197 return -EFAULT;
2198
2199 if (folio_is_zone_device(folio))
2200 return -ENOENT;
2201
2202 if (folio_nid(folio) == node)
2203 return 0;
2204
2205 if (folio_likely_mapped_shared(folio) && !migrate_all)
2206 return -EACCES;
2207
2208 if (folio_test_hugetlb(folio)) {
2209 if (isolate_hugetlb(folio, pagelist))
2210 return 1;
2211 } else if (folio_isolate_lru(folio)) {
2212 list_add_tail(&folio->lru, pagelist);
2213 node_stat_mod_folio(folio,
2214 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2215 folio_nr_pages(folio));
2216 return 1;
2217 }
2218 return -EBUSY;
2219}
2220
2221/*
2222 * Resolves the given address to a struct folio, isolates it from the LRU and
2223 * puts it to the given pagelist.
2224 * Returns:
2225 * errno - if the folio cannot be found/isolated
2226 * 0 - when it doesn't have to be migrated because it is already on the
2227 * target node
2228 * 1 - when it has been queued
2229 */
2230static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2231 int node, struct list_head *pagelist, bool migrate_all)
2232{
2233 struct vm_area_struct *vma;
2234 struct folio_walk fw;
2235 struct folio *folio;
2236 unsigned long addr;
2237 int err = -EFAULT;
2238
2239 mmap_read_lock(mm);
2240 addr = (unsigned long)untagged_addr_remote(mm, p);
2241
2242 vma = vma_lookup(mm, addr);
2243 if (vma && vma_migratable(vma)) {
2244 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2245 if (folio) {
2246 err = __add_folio_for_migration(folio, node, pagelist,
2247 migrate_all);
2248 folio_walk_end(&fw, vma);
2249 } else {
2250 err = -ENOENT;
2251 }
2252 }
2253 mmap_read_unlock(mm);
2254 return err;
2255}
2256
2257static int move_pages_and_store_status(int node,
2258 struct list_head *pagelist, int __user *status,
2259 int start, int i, unsigned long nr_pages)
2260{
2261 int err;
2262
2263 if (list_empty(pagelist))
2264 return 0;
2265
2266 err = do_move_pages_to_node(pagelist, node);
2267 if (err) {
2268 /*
2269 * Positive err means the number of failed
2270 * pages to migrate. Since we are going to
2271 * abort and return the number of non-migrated
2272 * pages, so need to include the rest of the
2273 * nr_pages that have not been attempted as
2274 * well.
2275 */
2276 if (err > 0)
2277 err += nr_pages - i;
2278 return err;
2279 }
2280 return store_status(status, start, node, i - start);
2281}
2282
2283/*
2284 * Migrate an array of page address onto an array of nodes and fill
2285 * the corresponding array of status.
2286 */
2287static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2288 unsigned long nr_pages,
2289 const void __user * __user *pages,
2290 const int __user *nodes,
2291 int __user *status, int flags)
2292{
2293 compat_uptr_t __user *compat_pages = (void __user *)pages;
2294 int current_node = NUMA_NO_NODE;
2295 LIST_HEAD(pagelist);
2296 int start, i;
2297 int err = 0, err1;
2298
2299 lru_cache_disable();
2300
2301 for (i = start = 0; i < nr_pages; i++) {
2302 const void __user *p;
2303 int node;
2304
2305 err = -EFAULT;
2306 if (in_compat_syscall()) {
2307 compat_uptr_t cp;
2308
2309 if (get_user(cp, compat_pages + i))
2310 goto out_flush;
2311
2312 p = compat_ptr(cp);
2313 } else {
2314 if (get_user(p, pages + i))
2315 goto out_flush;
2316 }
2317 if (get_user(node, nodes + i))
2318 goto out_flush;
2319
2320 err = -ENODEV;
2321 if (node < 0 || node >= MAX_NUMNODES)
2322 goto out_flush;
2323 if (!node_state(node, N_MEMORY))
2324 goto out_flush;
2325
2326 err = -EACCES;
2327 if (!node_isset(node, task_nodes))
2328 goto out_flush;
2329
2330 if (current_node == NUMA_NO_NODE) {
2331 current_node = node;
2332 start = i;
2333 } else if (node != current_node) {
2334 err = move_pages_and_store_status(current_node,
2335 &pagelist, status, start, i, nr_pages);
2336 if (err)
2337 goto out;
2338 start = i;
2339 current_node = node;
2340 }
2341
2342 /*
2343 * Errors in the page lookup or isolation are not fatal and we simply
2344 * report them via status
2345 */
2346 err = add_folio_for_migration(mm, p, current_node, &pagelist,
2347 flags & MPOL_MF_MOVE_ALL);
2348
2349 if (err > 0) {
2350 /* The page is successfully queued for migration */
2351 continue;
2352 }
2353
2354 /*
2355 * The move_pages() man page does not have an -EEXIST choice, so
2356 * use -EFAULT instead.
2357 */
2358 if (err == -EEXIST)
2359 err = -EFAULT;
2360
2361 /*
2362 * If the page is already on the target node (!err), store the
2363 * node, otherwise, store the err.
2364 */
2365 err = store_status(status, i, err ? : current_node, 1);
2366 if (err)
2367 goto out_flush;
2368
2369 err = move_pages_and_store_status(current_node, &pagelist,
2370 status, start, i, nr_pages);
2371 if (err) {
2372 /* We have accounted for page i */
2373 if (err > 0)
2374 err--;
2375 goto out;
2376 }
2377 current_node = NUMA_NO_NODE;
2378 }
2379out_flush:
2380 /* Make sure we do not overwrite the existing error */
2381 err1 = move_pages_and_store_status(current_node, &pagelist,
2382 status, start, i, nr_pages);
2383 if (err >= 0)
2384 err = err1;
2385out:
2386 lru_cache_enable();
2387 return err;
2388}
2389
2390/*
2391 * Determine the nodes of an array of pages and store it in an array of status.
2392 */
2393static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2394 const void __user **pages, int *status)
2395{
2396 unsigned long i;
2397
2398 mmap_read_lock(mm);
2399
2400 for (i = 0; i < nr_pages; i++) {
2401 unsigned long addr = (unsigned long)(*pages);
2402 struct vm_area_struct *vma;
2403 struct folio_walk fw;
2404 struct folio *folio;
2405 int err = -EFAULT;
2406
2407 vma = vma_lookup(mm, addr);
2408 if (!vma)
2409 goto set_status;
2410
2411 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2412 if (folio) {
2413 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2414 err = -EFAULT;
2415 else if (folio_is_zone_device(folio))
2416 err = -ENOENT;
2417 else
2418 err = folio_nid(folio);
2419 folio_walk_end(&fw, vma);
2420 } else {
2421 err = -ENOENT;
2422 }
2423set_status:
2424 *status = err;
2425
2426 pages++;
2427 status++;
2428 }
2429
2430 mmap_read_unlock(mm);
2431}
2432
2433static int get_compat_pages_array(const void __user *chunk_pages[],
2434 const void __user * __user *pages,
2435 unsigned long chunk_nr)
2436{
2437 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2438 compat_uptr_t p;
2439 int i;
2440
2441 for (i = 0; i < chunk_nr; i++) {
2442 if (get_user(p, pages32 + i))
2443 return -EFAULT;
2444 chunk_pages[i] = compat_ptr(p);
2445 }
2446
2447 return 0;
2448}
2449
2450/*
2451 * Determine the nodes of a user array of pages and store it in
2452 * a user array of status.
2453 */
2454static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2455 const void __user * __user *pages,
2456 int __user *status)
2457{
2458#define DO_PAGES_STAT_CHUNK_NR 16UL
2459 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2460 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2461
2462 while (nr_pages) {
2463 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2464
2465 if (in_compat_syscall()) {
2466 if (get_compat_pages_array(chunk_pages, pages,
2467 chunk_nr))
2468 break;
2469 } else {
2470 if (copy_from_user(chunk_pages, pages,
2471 chunk_nr * sizeof(*chunk_pages)))
2472 break;
2473 }
2474
2475 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2476
2477 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2478 break;
2479
2480 pages += chunk_nr;
2481 status += chunk_nr;
2482 nr_pages -= chunk_nr;
2483 }
2484 return nr_pages ? -EFAULT : 0;
2485}
2486
2487static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2488{
2489 struct task_struct *task;
2490 struct mm_struct *mm;
2491
2492 /*
2493 * There is no need to check if current process has the right to modify
2494 * the specified process when they are same.
2495 */
2496 if (!pid) {
2497 mmget(current->mm);
2498 *mem_nodes = cpuset_mems_allowed(current);
2499 return current->mm;
2500 }
2501
2502 task = find_get_task_by_vpid(pid);
2503 if (!task) {
2504 return ERR_PTR(-ESRCH);
2505 }
2506
2507 /*
2508 * Check if this process has the right to modify the specified
2509 * process. Use the regular "ptrace_may_access()" checks.
2510 */
2511 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2512 mm = ERR_PTR(-EPERM);
2513 goto out;
2514 }
2515
2516 mm = ERR_PTR(security_task_movememory(task));
2517 if (IS_ERR(mm))
2518 goto out;
2519 *mem_nodes = cpuset_mems_allowed(task);
2520 mm = get_task_mm(task);
2521out:
2522 put_task_struct(task);
2523 if (!mm)
2524 mm = ERR_PTR(-EINVAL);
2525 return mm;
2526}
2527
2528/*
2529 * Move a list of pages in the address space of the currently executing
2530 * process.
2531 */
2532static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2533 const void __user * __user *pages,
2534 const int __user *nodes,
2535 int __user *status, int flags)
2536{
2537 struct mm_struct *mm;
2538 int err;
2539 nodemask_t task_nodes;
2540
2541 /* Check flags */
2542 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2543 return -EINVAL;
2544
2545 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2546 return -EPERM;
2547
2548 mm = find_mm_struct(pid, &task_nodes);
2549 if (IS_ERR(mm))
2550 return PTR_ERR(mm);
2551
2552 if (nodes)
2553 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2554 nodes, status, flags);
2555 else
2556 err = do_pages_stat(mm, nr_pages, pages, status);
2557
2558 mmput(mm);
2559 return err;
2560}
2561
2562SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2563 const void __user * __user *, pages,
2564 const int __user *, nodes,
2565 int __user *, status, int, flags)
2566{
2567 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2568}
2569
2570#ifdef CONFIG_NUMA_BALANCING
2571/*
2572 * Returns true if this is a safe migration target node for misplaced NUMA
2573 * pages. Currently it only checks the watermarks which is crude.
2574 */
2575static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2576 unsigned long nr_migrate_pages)
2577{
2578 int z;
2579
2580 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2581 struct zone *zone = pgdat->node_zones + z;
2582
2583 if (!managed_zone(zone))
2584 continue;
2585
2586 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2587 if (!zone_watermark_ok(zone, 0,
2588 high_wmark_pages(zone) +
2589 nr_migrate_pages,
2590 ZONE_MOVABLE, ALLOC_CMA))
2591 continue;
2592 return true;
2593 }
2594 return false;
2595}
2596
2597static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2598 unsigned long data)
2599{
2600 int nid = (int) data;
2601 int order = folio_order(src);
2602 gfp_t gfp = __GFP_THISNODE;
2603
2604 if (order > 0)
2605 gfp |= GFP_TRANSHUGE_LIGHT;
2606 else {
2607 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2608 __GFP_NOWARN;
2609 gfp &= ~__GFP_RECLAIM;
2610 }
2611 return __folio_alloc_node(gfp, order, nid);
2612}
2613
2614/*
2615 * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2616 * permitted. Must be called with the PTL still held.
2617 */
2618int migrate_misplaced_folio_prepare(struct folio *folio,
2619 struct vm_area_struct *vma, int node)
2620{
2621 int nr_pages = folio_nr_pages(folio);
2622 pg_data_t *pgdat = NODE_DATA(node);
2623
2624 if (folio_is_file_lru(folio)) {
2625 /*
2626 * Do not migrate file folios that are mapped in multiple
2627 * processes with execute permissions as they are probably
2628 * shared libraries.
2629 *
2630 * See folio_likely_mapped_shared() on possible imprecision
2631 * when we cannot easily detect if a folio is shared.
2632 */
2633 if ((vma->vm_flags & VM_EXEC) &&
2634 folio_likely_mapped_shared(folio))
2635 return -EACCES;
2636
2637 /*
2638 * Do not migrate dirty folios as not all filesystems can move
2639 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2640 * cycles.
2641 */
2642 if (folio_test_dirty(folio))
2643 return -EAGAIN;
2644 }
2645
2646 /* Avoid migrating to a node that is nearly full */
2647 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2648 int z;
2649
2650 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2651 return -EAGAIN;
2652 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2653 if (managed_zone(pgdat->node_zones + z))
2654 break;
2655 }
2656
2657 /*
2658 * If there are no managed zones, it should not proceed
2659 * further.
2660 */
2661 if (z < 0)
2662 return -EAGAIN;
2663
2664 wakeup_kswapd(pgdat->node_zones + z, 0,
2665 folio_order(folio), ZONE_MOVABLE);
2666 return -EAGAIN;
2667 }
2668
2669 if (!folio_isolate_lru(folio))
2670 return -EAGAIN;
2671
2672 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2673 nr_pages);
2674 return 0;
2675}
2676
2677/*
2678 * Attempt to migrate a misplaced folio to the specified destination
2679 * node. Caller is expected to have isolated the folio by calling
2680 * migrate_misplaced_folio_prepare(), which will result in an
2681 * elevated reference count on the folio. This function will un-isolate the
2682 * folio, dereferencing the folio before returning.
2683 */
2684int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2685 int node)
2686{
2687 pg_data_t *pgdat = NODE_DATA(node);
2688 int nr_remaining;
2689 unsigned int nr_succeeded;
2690 LIST_HEAD(migratepages);
2691 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2692 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2693
2694 list_add(&folio->lru, &migratepages);
2695 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2696 NULL, node, MIGRATE_ASYNC,
2697 MR_NUMA_MISPLACED, &nr_succeeded);
2698 if (nr_remaining && !list_empty(&migratepages))
2699 putback_movable_pages(&migratepages);
2700 if (nr_succeeded) {
2701 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2702 count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2703 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2704 && !node_is_toptier(folio_nid(folio))
2705 && node_is_toptier(node))
2706 mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2707 }
2708 mem_cgroup_put(memcg);
2709 BUG_ON(!list_empty(&migratepages));
2710 return nr_remaining ? -EAGAIN : 0;
2711}
2712#endif /* CONFIG_NUMA_BALANCING */
2713#endif /* CONFIG_NUMA */