Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/page_isolation.c
4 */
5
6#include <linux/mm.h>
7#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h>
9#include <linux/memory.h>
10#include <linux/hugetlb.h>
11#include <linux/page_owner.h>
12#include <linux/migrate.h>
13#include "internal.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/page_isolation.h>
17
18/*
19 * This function checks whether the range [start_pfn, end_pfn) includes
20 * unmovable pages or not. The range must fall into a single pageblock and
21 * consequently belong to a single zone.
22 *
23 * PageLRU check without isolation or lru_lock could race so that
24 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
25 * check without lock_page also may miss some movable non-lru pages at
26 * race condition. So you can't expect this function should be exact.
27 *
28 * Returns a page without holding a reference. If the caller wants to
29 * dereference that page (e.g., dumping), it has to make sure that it
30 * cannot get removed (e.g., via memory unplug) concurrently.
31 *
32 */
33static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
34 int migratetype, int flags)
35{
36 struct page *page = pfn_to_page(start_pfn);
37 struct zone *zone = page_zone(page);
38 unsigned long pfn;
39
40 VM_BUG_ON(pageblock_start_pfn(start_pfn) !=
41 pageblock_start_pfn(end_pfn - 1));
42
43 if (is_migrate_cma_page(page)) {
44 /*
45 * CMA allocations (alloc_contig_range) really need to mark
46 * isolate CMA pageblocks even when they are not movable in fact
47 * so consider them movable here.
48 */
49 if (is_migrate_cma(migratetype))
50 return NULL;
51
52 return page;
53 }
54
55 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
56 page = pfn_to_page(pfn);
57
58 /*
59 * Both, bootmem allocations and memory holes are marked
60 * PG_reserved and are unmovable. We can even have unmovable
61 * allocations inside ZONE_MOVABLE, for example when
62 * specifying "movablecore".
63 */
64 if (PageReserved(page))
65 return page;
66
67 /*
68 * If the zone is movable and we have ruled out all reserved
69 * pages then it should be reasonably safe to assume the rest
70 * is movable.
71 */
72 if (zone_idx(zone) == ZONE_MOVABLE)
73 continue;
74
75 /*
76 * Hugepages are not in LRU lists, but they're movable.
77 * THPs are on the LRU, but need to be counted as #small pages.
78 * We need not scan over tail pages because we don't
79 * handle each tail page individually in migration.
80 */
81 if (PageHuge(page) || PageTransCompound(page)) {
82 struct folio *folio = page_folio(page);
83 unsigned int skip_pages;
84
85 if (PageHuge(page)) {
86 if (!hugepage_migration_supported(folio_hstate(folio)))
87 return page;
88 } else if (!folio_test_lru(folio) && !__folio_test_movable(folio)) {
89 return page;
90 }
91
92 skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page);
93 pfn += skip_pages - 1;
94 continue;
95 }
96
97 /*
98 * We can't use page_count without pin a page
99 * because another CPU can free compound page.
100 * This check already skips compound tails of THP
101 * because their page->_refcount is zero at all time.
102 */
103 if (!page_ref_count(page)) {
104 if (PageBuddy(page))
105 pfn += (1 << buddy_order(page)) - 1;
106 continue;
107 }
108
109 /*
110 * The HWPoisoned page may be not in buddy system, and
111 * page_count() is not 0.
112 */
113 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
114 continue;
115
116 /*
117 * We treat all PageOffline() pages as movable when offlining
118 * to give drivers a chance to decrement their reference count
119 * in MEM_GOING_OFFLINE in order to indicate that these pages
120 * can be offlined as there are no direct references anymore.
121 * For actually unmovable PageOffline() where the driver does
122 * not support this, we will fail later when trying to actually
123 * move these pages that still have a reference count > 0.
124 * (false negatives in this function only)
125 */
126 if ((flags & MEMORY_OFFLINE) && PageOffline(page))
127 continue;
128
129 if (__PageMovable(page) || PageLRU(page))
130 continue;
131
132 /*
133 * If there are RECLAIMABLE pages, we need to check
134 * it. But now, memory offline itself doesn't call
135 * shrink_node_slabs() and it still to be fixed.
136 */
137 return page;
138 }
139 return NULL;
140}
141
142/*
143 * This function set pageblock migratetype to isolate if no unmovable page is
144 * present in [start_pfn, end_pfn). The pageblock must intersect with
145 * [start_pfn, end_pfn).
146 */
147static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags,
148 unsigned long start_pfn, unsigned long end_pfn)
149{
150 struct zone *zone = page_zone(page);
151 struct page *unmovable;
152 unsigned long flags;
153 unsigned long check_unmovable_start, check_unmovable_end;
154
155 spin_lock_irqsave(&zone->lock, flags);
156
157 /*
158 * We assume the caller intended to SET migrate type to isolate.
159 * If it is already set, then someone else must have raced and
160 * set it before us.
161 */
162 if (is_migrate_isolate_page(page)) {
163 spin_unlock_irqrestore(&zone->lock, flags);
164 return -EBUSY;
165 }
166
167 /*
168 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
169 * We just check MOVABLE pages.
170 *
171 * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock
172 * to avoid redundant checks.
173 */
174 check_unmovable_start = max(page_to_pfn(page), start_pfn);
175 check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
176 end_pfn);
177
178 unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
179 migratetype, isol_flags);
180 if (!unmovable) {
181 if (!move_freepages_block_isolate(zone, page, MIGRATE_ISOLATE)) {
182 spin_unlock_irqrestore(&zone->lock, flags);
183 return -EBUSY;
184 }
185 zone->nr_isolate_pageblock++;
186 spin_unlock_irqrestore(&zone->lock, flags);
187 return 0;
188 }
189
190 spin_unlock_irqrestore(&zone->lock, flags);
191 if (isol_flags & REPORT_FAILURE) {
192 /*
193 * printk() with zone->lock held will likely trigger a
194 * lockdep splat, so defer it here.
195 */
196 dump_page(unmovable, "unmovable page");
197 }
198
199 return -EBUSY;
200}
201
202static void unset_migratetype_isolate(struct page *page, int migratetype)
203{
204 struct zone *zone;
205 unsigned long flags;
206 bool isolated_page = false;
207 unsigned int order;
208 struct page *buddy;
209
210 zone = page_zone(page);
211 spin_lock_irqsave(&zone->lock, flags);
212 if (!is_migrate_isolate_page(page))
213 goto out;
214
215 /*
216 * Because freepage with more than pageblock_order on isolated
217 * pageblock is restricted to merge due to freepage counting problem,
218 * it is possible that there is free buddy page.
219 * move_freepages_block() doesn't care of merge so we need other
220 * approach in order to merge them. Isolation and free will make
221 * these pages to be merged.
222 */
223 if (PageBuddy(page)) {
224 order = buddy_order(page);
225 if (order >= pageblock_order && order < MAX_PAGE_ORDER) {
226 buddy = find_buddy_page_pfn(page, page_to_pfn(page),
227 order, NULL);
228 if (buddy && !is_migrate_isolate_page(buddy)) {
229 isolated_page = !!__isolate_free_page(page, order);
230 /*
231 * Isolating a free page in an isolated pageblock
232 * is expected to always work as watermarks don't
233 * apply here.
234 */
235 VM_WARN_ON(!isolated_page);
236 }
237 }
238 }
239
240 /*
241 * If we isolate freepage with more than pageblock_order, there
242 * should be no freepage in the range, so we could avoid costly
243 * pageblock scanning for freepage moving.
244 *
245 * We didn't actually touch any of the isolated pages, so place them
246 * to the tail of the freelist. This is an optimization for memory
247 * onlining - just onlined memory won't immediately be considered for
248 * allocation.
249 */
250 if (!isolated_page) {
251 /*
252 * Isolating this block already succeeded, so this
253 * should not fail on zone boundaries.
254 */
255 WARN_ON_ONCE(!move_freepages_block_isolate(zone, page, migratetype));
256 } else {
257 set_pageblock_migratetype(page, migratetype);
258 __putback_isolated_page(page, order, migratetype);
259 }
260 zone->nr_isolate_pageblock--;
261out:
262 spin_unlock_irqrestore(&zone->lock, flags);
263}
264
265static inline struct page *
266__first_valid_page(unsigned long pfn, unsigned long nr_pages)
267{
268 int i;
269
270 for (i = 0; i < nr_pages; i++) {
271 struct page *page;
272
273 page = pfn_to_online_page(pfn + i);
274 if (!page)
275 continue;
276 return page;
277 }
278 return NULL;
279}
280
281/**
282 * isolate_single_pageblock() -- tries to isolate a pageblock that might be
283 * within a free or in-use page.
284 * @boundary_pfn: pageblock-aligned pfn that a page might cross
285 * @flags: isolation flags
286 * @gfp_flags: GFP flags used for migrating pages
287 * @isolate_before: isolate the pageblock before the boundary_pfn
288 * @skip_isolation: the flag to skip the pageblock isolation in second
289 * isolate_single_pageblock()
290 * @migratetype: migrate type to set in error recovery.
291 *
292 * Free and in-use pages can be as big as MAX_PAGE_ORDER and contain more than one
293 * pageblock. When not all pageblocks within a page are isolated at the same
294 * time, free page accounting can go wrong. For example, in the case of
295 * MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
296 * pagelbocks.
297 * [ MAX_PAGE_ORDER ]
298 * [ pageblock0 | pageblock1 ]
299 * When either pageblock is isolated, if it is a free page, the page is not
300 * split into separate migratetype lists, which is supposed to; if it is an
301 * in-use page and freed later, __free_one_page() does not split the free page
302 * either. The function handles this by splitting the free page or migrating
303 * the in-use page then splitting the free page.
304 */
305static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
306 gfp_t gfp_flags, bool isolate_before, bool skip_isolation,
307 int migratetype)
308{
309 unsigned long start_pfn;
310 unsigned long isolate_pageblock;
311 unsigned long pfn;
312 struct zone *zone;
313 int ret;
314
315 VM_BUG_ON(!pageblock_aligned(boundary_pfn));
316
317 if (isolate_before)
318 isolate_pageblock = boundary_pfn - pageblock_nr_pages;
319 else
320 isolate_pageblock = boundary_pfn;
321
322 /*
323 * scan at the beginning of MAX_ORDER_NR_PAGES aligned range to avoid
324 * only isolating a subset of pageblocks from a bigger than pageblock
325 * free or in-use page. Also make sure all to-be-isolated pageblocks
326 * are within the same zone.
327 */
328 zone = page_zone(pfn_to_page(isolate_pageblock));
329 start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES),
330 zone->zone_start_pfn);
331
332 if (skip_isolation) {
333 int mt __maybe_unused = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
334
335 VM_BUG_ON(!is_migrate_isolate(mt));
336 } else {
337 ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype,
338 flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
339
340 if (ret)
341 return ret;
342 }
343
344 /*
345 * Bail out early when the to-be-isolated pageblock does not form
346 * a free or in-use page across boundary_pfn:
347 *
348 * 1. isolate before boundary_pfn: the page after is not online
349 * 2. isolate after boundary_pfn: the page before is not online
350 *
351 * This also ensures correctness. Without it, when isolate after
352 * boundary_pfn and [start_pfn, boundary_pfn) are not online,
353 * __first_valid_page() will return unexpected NULL in the for loop
354 * below.
355 */
356 if (isolate_before) {
357 if (!pfn_to_online_page(boundary_pfn))
358 return 0;
359 } else {
360 if (!pfn_to_online_page(boundary_pfn - 1))
361 return 0;
362 }
363
364 for (pfn = start_pfn; pfn < boundary_pfn;) {
365 struct page *page = __first_valid_page(pfn, boundary_pfn - pfn);
366
367 VM_BUG_ON(!page);
368 pfn = page_to_pfn(page);
369
370 if (PageBuddy(page)) {
371 int order = buddy_order(page);
372
373 /* move_freepages_block_isolate() handled this */
374 VM_WARN_ON_ONCE(pfn + (1 << order) > boundary_pfn);
375
376 pfn += 1UL << order;
377 continue;
378 }
379
380 /*
381 * If a compound page is straddling our block, attempt
382 * to migrate it out of the way.
383 *
384 * We don't have to worry about this creating a large
385 * free page that straddles into our block: gigantic
386 * pages are freed as order-0 chunks, and LRU pages
387 * (currently) do not exceed pageblock_order.
388 *
389 * The block of interest has already been marked
390 * MIGRATE_ISOLATE above, so when migration is done it
391 * will free its pages onto the correct freelists.
392 */
393 if (PageCompound(page)) {
394 struct page *head = compound_head(page);
395 unsigned long head_pfn = page_to_pfn(head);
396 unsigned long nr_pages = compound_nr(head);
397
398 if (head_pfn + nr_pages <= boundary_pfn) {
399 pfn = head_pfn + nr_pages;
400 continue;
401 }
402
403#if defined CONFIG_COMPACTION || defined CONFIG_CMA
404 if (PageHuge(page)) {
405 int page_mt = get_pageblock_migratetype(page);
406 struct compact_control cc = {
407 .nr_migratepages = 0,
408 .order = -1,
409 .zone = page_zone(pfn_to_page(head_pfn)),
410 .mode = MIGRATE_SYNC,
411 .ignore_skip_hint = true,
412 .no_set_skip_hint = true,
413 .gfp_mask = gfp_flags,
414 .alloc_contig = true,
415 };
416 INIT_LIST_HEAD(&cc.migratepages);
417
418 ret = __alloc_contig_migrate_range(&cc, head_pfn,
419 head_pfn + nr_pages, page_mt);
420 if (ret)
421 goto failed;
422 pfn = head_pfn + nr_pages;
423 continue;
424 }
425
426 /*
427 * These pages are movable too, but they're
428 * not expected to exceed pageblock_order.
429 *
430 * Let us know when they do, so we can add
431 * proper free and split handling for them.
432 */
433 VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
434 VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
435#endif
436 goto failed;
437 }
438
439 pfn++;
440 }
441 return 0;
442failed:
443 /* restore the original migratetype */
444 if (!skip_isolation)
445 unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype);
446 return -EBUSY;
447}
448
449/**
450 * start_isolate_page_range() - mark page range MIGRATE_ISOLATE
451 * @start_pfn: The first PFN of the range to be isolated.
452 * @end_pfn: The last PFN of the range to be isolated.
453 * @migratetype: Migrate type to set in error recovery.
454 * @flags: The following flags are allowed (they can be combined in
455 * a bit mask)
456 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
457 * e.g., skip over PageHWPoison() pages
458 * and PageOffline() pages.
459 * REPORT_FAILURE - report details about the failure to
460 * isolate the range
461 * @gfp_flags: GFP flags used for migrating pages that sit across the
462 * range boundaries.
463 *
464 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
465 * the range will never be allocated. Any free pages and pages freed in the
466 * future will not be allocated again. If specified range includes migrate types
467 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
468 * pages in the range finally, the caller have to free all pages in the range.
469 * test_page_isolated() can be used for test it.
470 *
471 * The function first tries to isolate the pageblocks at the beginning and end
472 * of the range, since there might be pages across the range boundaries.
473 * Afterwards, it isolates the rest of the range.
474 *
475 * There is no high level synchronization mechanism that prevents two threads
476 * from trying to isolate overlapping ranges. If this happens, one thread
477 * will notice pageblocks in the overlapping range already set to isolate.
478 * This happens in set_migratetype_isolate, and set_migratetype_isolate
479 * returns an error. We then clean up by restoring the migration type on
480 * pageblocks we may have modified and return -EBUSY to caller. This
481 * prevents two threads from simultaneously working on overlapping ranges.
482 *
483 * Please note that there is no strong synchronization with the page allocator
484 * either. Pages might be freed while their page blocks are marked ISOLATED.
485 * A call to drain_all_pages() after isolation can flush most of them. However
486 * in some cases pages might still end up on pcp lists and that would allow
487 * for their allocation even when they are in fact isolated already. Depending
488 * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
489 * might be used to flush and disable pcplist before isolation and enable after
490 * unisolation.
491 *
492 * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
493 */
494int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
495 int migratetype, int flags, gfp_t gfp_flags)
496{
497 unsigned long pfn;
498 struct page *page;
499 /* isolation is done at page block granularity */
500 unsigned long isolate_start = pageblock_start_pfn(start_pfn);
501 unsigned long isolate_end = pageblock_align(end_pfn);
502 int ret;
503 bool skip_isolation = false;
504
505 /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
506 ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false,
507 skip_isolation, migratetype);
508 if (ret)
509 return ret;
510
511 if (isolate_start == isolate_end - pageblock_nr_pages)
512 skip_isolation = true;
513
514 /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
515 ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true,
516 skip_isolation, migratetype);
517 if (ret) {
518 unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
519 return ret;
520 }
521
522 /* skip isolated pageblocks at the beginning and end */
523 for (pfn = isolate_start + pageblock_nr_pages;
524 pfn < isolate_end - pageblock_nr_pages;
525 pfn += pageblock_nr_pages) {
526 page = __first_valid_page(pfn, pageblock_nr_pages);
527 if (page && set_migratetype_isolate(page, migratetype, flags,
528 start_pfn, end_pfn)) {
529 undo_isolate_page_range(isolate_start, pfn, migratetype);
530 unset_migratetype_isolate(
531 pfn_to_page(isolate_end - pageblock_nr_pages),
532 migratetype);
533 return -EBUSY;
534 }
535 }
536 return 0;
537}
538
539/**
540 * undo_isolate_page_range - undo effects of start_isolate_page_range()
541 * @start_pfn: The first PFN of the isolated range
542 * @end_pfn: The last PFN of the isolated range
543 * @migratetype: New migrate type to set on the range
544 *
545 * This finds every MIGRATE_ISOLATE page block in the given range
546 * and switches it to @migratetype.
547 */
548void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
549 int migratetype)
550{
551 unsigned long pfn;
552 struct page *page;
553 unsigned long isolate_start = pageblock_start_pfn(start_pfn);
554 unsigned long isolate_end = pageblock_align(end_pfn);
555
556 for (pfn = isolate_start;
557 pfn < isolate_end;
558 pfn += pageblock_nr_pages) {
559 page = __first_valid_page(pfn, pageblock_nr_pages);
560 if (!page || !is_migrate_isolate_page(page))
561 continue;
562 unset_migratetype_isolate(page, migratetype);
563 }
564}
565/*
566 * Test all pages in the range is free(means isolated) or not.
567 * all pages in [start_pfn...end_pfn) must be in the same zone.
568 * zone->lock must be held before call this.
569 *
570 * Returns the last tested pfn.
571 */
572static unsigned long
573__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
574 int flags)
575{
576 struct page *page;
577
578 while (pfn < end_pfn) {
579 page = pfn_to_page(pfn);
580 if (PageBuddy(page))
581 /*
582 * If the page is on a free list, it has to be on
583 * the correct MIGRATE_ISOLATE freelist. There is no
584 * simple way to verify that as VM_BUG_ON(), though.
585 */
586 pfn += 1 << buddy_order(page);
587 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
588 /* A HWPoisoned page cannot be also PageBuddy */
589 pfn++;
590 else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
591 !page_count(page))
592 /*
593 * The responsible driver agreed to skip PageOffline()
594 * pages when offlining memory by dropping its
595 * reference in MEM_GOING_OFFLINE.
596 */
597 pfn++;
598 else
599 break;
600 }
601
602 return pfn;
603}
604
605/**
606 * test_pages_isolated - check if pageblocks in range are isolated
607 * @start_pfn: The first PFN of the isolated range
608 * @end_pfn: The first PFN *after* the isolated range
609 * @isol_flags: Testing mode flags
610 *
611 * This tests if all in the specified range are free.
612 *
613 * If %MEMORY_OFFLINE is specified in @flags, it will consider
614 * poisoned and offlined pages free as well.
615 *
616 * Caller must ensure the requested range doesn't span zones.
617 *
618 * Returns 0 if true, -EBUSY if one or more pages are in use.
619 */
620int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
621 int isol_flags)
622{
623 unsigned long pfn, flags;
624 struct page *page;
625 struct zone *zone;
626 int ret;
627
628 /*
629 * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
630 * pages are not aligned to pageblock_nr_pages.
631 * Then we just check migratetype first.
632 */
633 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
634 page = __first_valid_page(pfn, pageblock_nr_pages);
635 if (page && !is_migrate_isolate_page(page))
636 break;
637 }
638 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
639 if ((pfn < end_pfn) || !page) {
640 ret = -EBUSY;
641 goto out;
642 }
643
644 /* Check all pages are free or marked as ISOLATED */
645 zone = page_zone(page);
646 spin_lock_irqsave(&zone->lock, flags);
647 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
648 spin_unlock_irqrestore(&zone->lock, flags);
649
650 ret = pfn < end_pfn ? -EBUSY : 0;
651
652out:
653 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
654
655 return ret;
656}