Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18#include <linux/stddef.h>
19#include <linux/mm.h>
20#include <linux/highmem.h>
21#include <linux/swap.h>
22#include <linux/swapops.h>
23#include <linux/interrupt.h>
24#include <linux/pagemap.h>
25#include <linux/jiffies.h>
26#include <linux/memblock.h>
27#include <linux/compiler.h>
28#include <linux/kernel.h>
29#include <linux/kasan.h>
30#include <linux/kmsan.h>
31#include <linux/module.h>
32#include <linux/suspend.h>
33#include <linux/pagevec.h>
34#include <linux/blkdev.h>
35#include <linux/slab.h>
36#include <linux/ratelimit.h>
37#include <linux/oom.h>
38#include <linux/topology.h>
39#include <linux/sysctl.h>
40#include <linux/cpu.h>
41#include <linux/cpuset.h>
42#include <linux/memory_hotplug.h>
43#include <linux/nodemask.h>
44#include <linux/vmalloc.h>
45#include <linux/vmstat.h>
46#include <linux/mempolicy.h>
47#include <linux/memremap.h>
48#include <linux/stop_machine.h>
49#include <linux/random.h>
50#include <linux/sort.h>
51#include <linux/pfn.h>
52#include <linux/backing-dev.h>
53#include <linux/fault-inject.h>
54#include <linux/page-isolation.h>
55#include <linux/debugobjects.h>
56#include <linux/kmemleak.h>
57#include <linux/compaction.h>
58#include <trace/events/kmem.h>
59#include <trace/events/oom.h>
60#include <linux/prefetch.h>
61#include <linux/mm_inline.h>
62#include <linux/mmu_notifier.h>
63#include <linux/migrate.h>
64#include <linux/hugetlb.h>
65#include <linux/sched/rt.h>
66#include <linux/sched/mm.h>
67#include <linux/page_owner.h>
68#include <linux/page_table_check.h>
69#include <linux/kthread.h>
70#include <linux/memcontrol.h>
71#include <linux/ftrace.h>
72#include <linux/lockdep.h>
73#include <linux/nmi.h>
74#include <linux/psi.h>
75#include <linux/padata.h>
76#include <linux/khugepaged.h>
77#include <linux/buffer_head.h>
78#include <linux/delayacct.h>
79#include <asm/sections.h>
80#include <asm/tlbflush.h>
81#include <asm/div64.h>
82#include "internal.h"
83#include "shuffle.h"
84#include "page_reporting.h"
85#include "swap.h"
86
87/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
88typedef int __bitwise fpi_t;
89
90/* No special request */
91#define FPI_NONE ((__force fpi_t)0)
92
93/*
94 * Skip free page reporting notification for the (possibly merged) page.
95 * This does not hinder free page reporting from grabbing the page,
96 * reporting it and marking it "reported" - it only skips notifying
97 * the free page reporting infrastructure about a newly freed page. For
98 * example, used when temporarily pulling a page from a freelist and
99 * putting it back unmodified.
100 */
101#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
102
103/*
104 * Place the (possibly merged) page to the tail of the freelist. Will ignore
105 * page shuffling (relevant code - e.g., memory onlining - is expected to
106 * shuffle the whole zone).
107 *
108 * Note: No code should rely on this flag for correctness - it's purely
109 * to allow for optimizations when handing back either fresh pages
110 * (memory onlining) or untouched pages (page isolation, free page
111 * reporting).
112 */
113#define FPI_TO_TAIL ((__force fpi_t)BIT(1))
114
115/*
116 * Don't poison memory with KASAN (only for the tag-based modes).
117 * During boot, all non-reserved memblock memory is exposed to page_alloc.
118 * Poisoning all that memory lengthens boot time, especially on systems with
119 * large amount of RAM. This flag is used to skip that poisoning.
120 * This is only done for the tag-based KASAN modes, as those are able to
121 * detect memory corruptions with the memory tags assigned by default.
122 * All memory allocated normally after boot gets poisoned as usual.
123 */
124#define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
125
126/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
127static DEFINE_MUTEX(pcp_batch_high_lock);
128#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
129
130#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
131/*
132 * On SMP, spin_trylock is sufficient protection.
133 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
134 */
135#define pcp_trylock_prepare(flags) do { } while (0)
136#define pcp_trylock_finish(flag) do { } while (0)
137#else
138
139/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
140#define pcp_trylock_prepare(flags) local_irq_save(flags)
141#define pcp_trylock_finish(flags) local_irq_restore(flags)
142#endif
143
144/*
145 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
146 * a migration causing the wrong PCP to be locked and remote memory being
147 * potentially allocated, pin the task to the CPU for the lookup+lock.
148 * preempt_disable is used on !RT because it is faster than migrate_disable.
149 * migrate_disable is used on RT because otherwise RT spinlock usage is
150 * interfered with and a high priority task cannot preempt the allocator.
151 */
152#ifndef CONFIG_PREEMPT_RT
153#define pcpu_task_pin() preempt_disable()
154#define pcpu_task_unpin() preempt_enable()
155#else
156#define pcpu_task_pin() migrate_disable()
157#define pcpu_task_unpin() migrate_enable()
158#endif
159
160/*
161 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
162 * Return value should be used with equivalent unlock helper.
163 */
164#define pcpu_spin_lock(type, member, ptr) \
165({ \
166 type *_ret; \
167 pcpu_task_pin(); \
168 _ret = this_cpu_ptr(ptr); \
169 spin_lock(&_ret->member); \
170 _ret; \
171})
172
173#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
174({ \
175 type *_ret; \
176 pcpu_task_pin(); \
177 _ret = this_cpu_ptr(ptr); \
178 spin_lock_irqsave(&_ret->member, flags); \
179 _ret; \
180})
181
182#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
183({ \
184 type *_ret; \
185 pcpu_task_pin(); \
186 _ret = this_cpu_ptr(ptr); \
187 if (!spin_trylock_irqsave(&_ret->member, flags)) { \
188 pcpu_task_unpin(); \
189 _ret = NULL; \
190 } \
191 _ret; \
192})
193
194#define pcpu_spin_unlock(member, ptr) \
195({ \
196 spin_unlock(&ptr->member); \
197 pcpu_task_unpin(); \
198})
199
200#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
201({ \
202 spin_unlock_irqrestore(&ptr->member, flags); \
203 pcpu_task_unpin(); \
204})
205
206/* struct per_cpu_pages specific helpers. */
207#define pcp_spin_lock(ptr) \
208 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
209
210#define pcp_spin_lock_irqsave(ptr, flags) \
211 pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
212
213#define pcp_spin_trylock_irqsave(ptr, flags) \
214 pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
215
216#define pcp_spin_unlock(ptr) \
217 pcpu_spin_unlock(lock, ptr)
218
219#define pcp_spin_unlock_irqrestore(ptr, flags) \
220 pcpu_spin_unlock_irqrestore(lock, ptr, flags)
221#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
222DEFINE_PER_CPU(int, numa_node);
223EXPORT_PER_CPU_SYMBOL(numa_node);
224#endif
225
226DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
227
228#ifdef CONFIG_HAVE_MEMORYLESS_NODES
229/*
230 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
231 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
232 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
233 * defined in <linux/topology.h>.
234 */
235DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
236EXPORT_PER_CPU_SYMBOL(_numa_mem_);
237#endif
238
239static DEFINE_MUTEX(pcpu_drain_mutex);
240
241#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
242volatile unsigned long latent_entropy __latent_entropy;
243EXPORT_SYMBOL(latent_entropy);
244#endif
245
246/*
247 * Array of node states.
248 */
249nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
250 [N_POSSIBLE] = NODE_MASK_ALL,
251 [N_ONLINE] = { { [0] = 1UL } },
252#ifndef CONFIG_NUMA
253 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
254#ifdef CONFIG_HIGHMEM
255 [N_HIGH_MEMORY] = { { [0] = 1UL } },
256#endif
257 [N_MEMORY] = { { [0] = 1UL } },
258 [N_CPU] = { { [0] = 1UL } },
259#endif /* NUMA */
260};
261EXPORT_SYMBOL(node_states);
262
263atomic_long_t _totalram_pages __read_mostly;
264EXPORT_SYMBOL(_totalram_pages);
265unsigned long totalreserve_pages __read_mostly;
266unsigned long totalcma_pages __read_mostly;
267
268int percpu_pagelist_high_fraction;
269gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
270DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
271EXPORT_SYMBOL(init_on_alloc);
272
273DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
274EXPORT_SYMBOL(init_on_free);
275
276static bool _init_on_alloc_enabled_early __read_mostly
277 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
278static int __init early_init_on_alloc(char *buf)
279{
280
281 return kstrtobool(buf, &_init_on_alloc_enabled_early);
282}
283early_param("init_on_alloc", early_init_on_alloc);
284
285static bool _init_on_free_enabled_early __read_mostly
286 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
287static int __init early_init_on_free(char *buf)
288{
289 return kstrtobool(buf, &_init_on_free_enabled_early);
290}
291early_param("init_on_free", early_init_on_free);
292
293/*
294 * A cached value of the page's pageblock's migratetype, used when the page is
295 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
296 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
297 * Also the migratetype set in the page does not necessarily match the pcplist
298 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
299 * other index - this ensures that it will be put on the correct CMA freelist.
300 */
301static inline int get_pcppage_migratetype(struct page *page)
302{
303 return page->index;
304}
305
306static inline void set_pcppage_migratetype(struct page *page, int migratetype)
307{
308 page->index = migratetype;
309}
310
311#ifdef CONFIG_PM_SLEEP
312/*
313 * The following functions are used by the suspend/hibernate code to temporarily
314 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
315 * while devices are suspended. To avoid races with the suspend/hibernate code,
316 * they should always be called with system_transition_mutex held
317 * (gfp_allowed_mask also should only be modified with system_transition_mutex
318 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
319 * with that modification).
320 */
321
322static gfp_t saved_gfp_mask;
323
324void pm_restore_gfp_mask(void)
325{
326 WARN_ON(!mutex_is_locked(&system_transition_mutex));
327 if (saved_gfp_mask) {
328 gfp_allowed_mask = saved_gfp_mask;
329 saved_gfp_mask = 0;
330 }
331}
332
333void pm_restrict_gfp_mask(void)
334{
335 WARN_ON(!mutex_is_locked(&system_transition_mutex));
336 WARN_ON(saved_gfp_mask);
337 saved_gfp_mask = gfp_allowed_mask;
338 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
339}
340
341bool pm_suspended_storage(void)
342{
343 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
344 return false;
345 return true;
346}
347#endif /* CONFIG_PM_SLEEP */
348
349#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
350unsigned int pageblock_order __read_mostly;
351#endif
352
353static void __free_pages_ok(struct page *page, unsigned int order,
354 fpi_t fpi_flags);
355
356/*
357 * results with 256, 32 in the lowmem_reserve sysctl:
358 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
359 * 1G machine -> (16M dma, 784M normal, 224M high)
360 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
361 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
362 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
363 *
364 * TBD: should special case ZONE_DMA32 machines here - in those we normally
365 * don't need any ZONE_NORMAL reservation
366 */
367int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
368#ifdef CONFIG_ZONE_DMA
369 [ZONE_DMA] = 256,
370#endif
371#ifdef CONFIG_ZONE_DMA32
372 [ZONE_DMA32] = 256,
373#endif
374 [ZONE_NORMAL] = 32,
375#ifdef CONFIG_HIGHMEM
376 [ZONE_HIGHMEM] = 0,
377#endif
378 [ZONE_MOVABLE] = 0,
379};
380
381static char * const zone_names[MAX_NR_ZONES] = {
382#ifdef CONFIG_ZONE_DMA
383 "DMA",
384#endif
385#ifdef CONFIG_ZONE_DMA32
386 "DMA32",
387#endif
388 "Normal",
389#ifdef CONFIG_HIGHMEM
390 "HighMem",
391#endif
392 "Movable",
393#ifdef CONFIG_ZONE_DEVICE
394 "Device",
395#endif
396};
397
398const char * const migratetype_names[MIGRATE_TYPES] = {
399 "Unmovable",
400 "Movable",
401 "Reclaimable",
402 "HighAtomic",
403#ifdef CONFIG_CMA
404 "CMA",
405#endif
406#ifdef CONFIG_MEMORY_ISOLATION
407 "Isolate",
408#endif
409};
410
411compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
412 [NULL_COMPOUND_DTOR] = NULL,
413 [COMPOUND_PAGE_DTOR] = free_compound_page,
414#ifdef CONFIG_HUGETLB_PAGE
415 [HUGETLB_PAGE_DTOR] = free_huge_page,
416#endif
417#ifdef CONFIG_TRANSPARENT_HUGEPAGE
418 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
419#endif
420};
421
422int min_free_kbytes = 1024;
423int user_min_free_kbytes = -1;
424int watermark_boost_factor __read_mostly = 15000;
425int watermark_scale_factor = 10;
426
427static unsigned long nr_kernel_pages __initdata;
428static unsigned long nr_all_pages __initdata;
429static unsigned long dma_reserve __initdata;
430
431static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
432static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
433static unsigned long required_kernelcore __initdata;
434static unsigned long required_kernelcore_percent __initdata;
435static unsigned long required_movablecore __initdata;
436static unsigned long required_movablecore_percent __initdata;
437static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
438bool mirrored_kernelcore __initdata_memblock;
439
440/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
441int movable_zone;
442EXPORT_SYMBOL(movable_zone);
443
444#if MAX_NUMNODES > 1
445unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
446unsigned int nr_online_nodes __read_mostly = 1;
447EXPORT_SYMBOL(nr_node_ids);
448EXPORT_SYMBOL(nr_online_nodes);
449#endif
450
451int page_group_by_mobility_disabled __read_mostly;
452
453#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
454/*
455 * During boot we initialize deferred pages on-demand, as needed, but once
456 * page_alloc_init_late() has finished, the deferred pages are all initialized,
457 * and we can permanently disable that path.
458 */
459static DEFINE_STATIC_KEY_TRUE(deferred_pages);
460
461static inline bool deferred_pages_enabled(void)
462{
463 return static_branch_unlikely(&deferred_pages);
464}
465
466/* Returns true if the struct page for the pfn is uninitialised */
467static inline bool __meminit early_page_uninitialised(unsigned long pfn)
468{
469 int nid = early_pfn_to_nid(pfn);
470
471 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
472 return true;
473
474 return false;
475}
476
477/*
478 * Returns true when the remaining initialisation should be deferred until
479 * later in the boot cycle when it can be parallelised.
480 */
481static bool __meminit
482defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
483{
484 static unsigned long prev_end_pfn, nr_initialised;
485
486 if (early_page_ext_enabled())
487 return false;
488 /*
489 * prev_end_pfn static that contains the end of previous zone
490 * No need to protect because called very early in boot before smp_init.
491 */
492 if (prev_end_pfn != end_pfn) {
493 prev_end_pfn = end_pfn;
494 nr_initialised = 0;
495 }
496
497 /* Always populate low zones for address-constrained allocations */
498 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
499 return false;
500
501 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
502 return true;
503 /*
504 * We start only with one section of pages, more pages are added as
505 * needed until the rest of deferred pages are initialized.
506 */
507 nr_initialised++;
508 if ((nr_initialised > PAGES_PER_SECTION) &&
509 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
510 NODE_DATA(nid)->first_deferred_pfn = pfn;
511 return true;
512 }
513 return false;
514}
515#else
516static inline bool deferred_pages_enabled(void)
517{
518 return false;
519}
520
521static inline bool early_page_uninitialised(unsigned long pfn)
522{
523 return false;
524}
525
526static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
527{
528 return false;
529}
530#endif
531
532/* Return a pointer to the bitmap storing bits affecting a block of pages */
533static inline unsigned long *get_pageblock_bitmap(const struct page *page,
534 unsigned long pfn)
535{
536#ifdef CONFIG_SPARSEMEM
537 return section_to_usemap(__pfn_to_section(pfn));
538#else
539 return page_zone(page)->pageblock_flags;
540#endif /* CONFIG_SPARSEMEM */
541}
542
543static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
544{
545#ifdef CONFIG_SPARSEMEM
546 pfn &= (PAGES_PER_SECTION-1);
547#else
548 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
549#endif /* CONFIG_SPARSEMEM */
550 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
551}
552
553static __always_inline
554unsigned long __get_pfnblock_flags_mask(const struct page *page,
555 unsigned long pfn,
556 unsigned long mask)
557{
558 unsigned long *bitmap;
559 unsigned long bitidx, word_bitidx;
560 unsigned long word;
561
562 bitmap = get_pageblock_bitmap(page, pfn);
563 bitidx = pfn_to_bitidx(page, pfn);
564 word_bitidx = bitidx / BITS_PER_LONG;
565 bitidx &= (BITS_PER_LONG-1);
566 /*
567 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
568 * a consistent read of the memory array, so that results, even though
569 * racy, are not corrupted.
570 */
571 word = READ_ONCE(bitmap[word_bitidx]);
572 return (word >> bitidx) & mask;
573}
574
575/**
576 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
577 * @page: The page within the block of interest
578 * @pfn: The target page frame number
579 * @mask: mask of bits that the caller is interested in
580 *
581 * Return: pageblock_bits flags
582 */
583unsigned long get_pfnblock_flags_mask(const struct page *page,
584 unsigned long pfn, unsigned long mask)
585{
586 return __get_pfnblock_flags_mask(page, pfn, mask);
587}
588
589static __always_inline int get_pfnblock_migratetype(const struct page *page,
590 unsigned long pfn)
591{
592 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
593}
594
595/**
596 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
597 * @page: The page within the block of interest
598 * @flags: The flags to set
599 * @pfn: The target page frame number
600 * @mask: mask of bits that the caller is interested in
601 */
602void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
603 unsigned long pfn,
604 unsigned long mask)
605{
606 unsigned long *bitmap;
607 unsigned long bitidx, word_bitidx;
608 unsigned long word;
609
610 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
611 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
612
613 bitmap = get_pageblock_bitmap(page, pfn);
614 bitidx = pfn_to_bitidx(page, pfn);
615 word_bitidx = bitidx / BITS_PER_LONG;
616 bitidx &= (BITS_PER_LONG-1);
617
618 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
619
620 mask <<= bitidx;
621 flags <<= bitidx;
622
623 word = READ_ONCE(bitmap[word_bitidx]);
624 do {
625 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
626}
627
628void set_pageblock_migratetype(struct page *page, int migratetype)
629{
630 if (unlikely(page_group_by_mobility_disabled &&
631 migratetype < MIGRATE_PCPTYPES))
632 migratetype = MIGRATE_UNMOVABLE;
633
634 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
635 page_to_pfn(page), MIGRATETYPE_MASK);
636}
637
638#ifdef CONFIG_DEBUG_VM
639static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
640{
641 int ret = 0;
642 unsigned seq;
643 unsigned long pfn = page_to_pfn(page);
644 unsigned long sp, start_pfn;
645
646 do {
647 seq = zone_span_seqbegin(zone);
648 start_pfn = zone->zone_start_pfn;
649 sp = zone->spanned_pages;
650 if (!zone_spans_pfn(zone, pfn))
651 ret = 1;
652 } while (zone_span_seqretry(zone, seq));
653
654 if (ret)
655 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
656 pfn, zone_to_nid(zone), zone->name,
657 start_pfn, start_pfn + sp);
658
659 return ret;
660}
661
662static int page_is_consistent(struct zone *zone, struct page *page)
663{
664 if (zone != page_zone(page))
665 return 0;
666
667 return 1;
668}
669/*
670 * Temporary debugging check for pages not lying within a given zone.
671 */
672static int __maybe_unused bad_range(struct zone *zone, struct page *page)
673{
674 if (page_outside_zone_boundaries(zone, page))
675 return 1;
676 if (!page_is_consistent(zone, page))
677 return 1;
678
679 return 0;
680}
681#else
682static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
683{
684 return 0;
685}
686#endif
687
688static void bad_page(struct page *page, const char *reason)
689{
690 static unsigned long resume;
691 static unsigned long nr_shown;
692 static unsigned long nr_unshown;
693
694 /*
695 * Allow a burst of 60 reports, then keep quiet for that minute;
696 * or allow a steady drip of one report per second.
697 */
698 if (nr_shown == 60) {
699 if (time_before(jiffies, resume)) {
700 nr_unshown++;
701 goto out;
702 }
703 if (nr_unshown) {
704 pr_alert(
705 "BUG: Bad page state: %lu messages suppressed\n",
706 nr_unshown);
707 nr_unshown = 0;
708 }
709 nr_shown = 0;
710 }
711 if (nr_shown++ == 0)
712 resume = jiffies + 60 * HZ;
713
714 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
715 current->comm, page_to_pfn(page));
716 dump_page(page, reason);
717
718 print_modules();
719 dump_stack();
720out:
721 /* Leave bad fields for debug, except PageBuddy could make trouble */
722 page_mapcount_reset(page); /* remove PageBuddy */
723 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
724}
725
726static inline unsigned int order_to_pindex(int migratetype, int order)
727{
728 int base = order;
729
730#ifdef CONFIG_TRANSPARENT_HUGEPAGE
731 if (order > PAGE_ALLOC_COSTLY_ORDER) {
732 VM_BUG_ON(order != pageblock_order);
733 return NR_LOWORDER_PCP_LISTS;
734 }
735#else
736 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
737#endif
738
739 return (MIGRATE_PCPTYPES * base) + migratetype;
740}
741
742static inline int pindex_to_order(unsigned int pindex)
743{
744 int order = pindex / MIGRATE_PCPTYPES;
745
746#ifdef CONFIG_TRANSPARENT_HUGEPAGE
747 if (pindex == NR_LOWORDER_PCP_LISTS)
748 order = pageblock_order;
749#else
750 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
751#endif
752
753 return order;
754}
755
756static inline bool pcp_allowed_order(unsigned int order)
757{
758 if (order <= PAGE_ALLOC_COSTLY_ORDER)
759 return true;
760#ifdef CONFIG_TRANSPARENT_HUGEPAGE
761 if (order == pageblock_order)
762 return true;
763#endif
764 return false;
765}
766
767static inline void free_the_page(struct page *page, unsigned int order)
768{
769 if (pcp_allowed_order(order)) /* Via pcp? */
770 free_unref_page(page, order);
771 else
772 __free_pages_ok(page, order, FPI_NONE);
773}
774
775/*
776 * Higher-order pages are called "compound pages". They are structured thusly:
777 *
778 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
779 *
780 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
781 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
782 *
783 * The first tail page's ->compound_dtor holds the offset in array of compound
784 * page destructors. See compound_page_dtors.
785 *
786 * The first tail page's ->compound_order holds the order of allocation.
787 * This usage means that zero-order pages may not be compound.
788 */
789
790void free_compound_page(struct page *page)
791{
792 mem_cgroup_uncharge(page_folio(page));
793 free_the_page(page, compound_order(page));
794}
795
796static void prep_compound_head(struct page *page, unsigned int order)
797{
798 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
799 set_compound_order(page, order);
800 atomic_set(compound_mapcount_ptr(page), -1);
801 atomic_set(compound_pincount_ptr(page), 0);
802}
803
804static void prep_compound_tail(struct page *head, int tail_idx)
805{
806 struct page *p = head + tail_idx;
807
808 p->mapping = TAIL_MAPPING;
809 set_compound_head(p, head);
810}
811
812void prep_compound_page(struct page *page, unsigned int order)
813{
814 int i;
815 int nr_pages = 1 << order;
816
817 __SetPageHead(page);
818 for (i = 1; i < nr_pages; i++)
819 prep_compound_tail(page, i);
820
821 prep_compound_head(page, order);
822}
823
824void destroy_large_folio(struct folio *folio)
825{
826 enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor;
827
828 VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
829 compound_page_dtors[dtor](&folio->page);
830}
831
832#ifdef CONFIG_DEBUG_PAGEALLOC
833unsigned int _debug_guardpage_minorder;
834
835bool _debug_pagealloc_enabled_early __read_mostly
836 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
837EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
838DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
839EXPORT_SYMBOL(_debug_pagealloc_enabled);
840
841DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
842
843static int __init early_debug_pagealloc(char *buf)
844{
845 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
846}
847early_param("debug_pagealloc", early_debug_pagealloc);
848
849static int __init debug_guardpage_minorder_setup(char *buf)
850{
851 unsigned long res;
852
853 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
854 pr_err("Bad debug_guardpage_minorder value\n");
855 return 0;
856 }
857 _debug_guardpage_minorder = res;
858 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
859 return 0;
860}
861early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
862
863static inline bool set_page_guard(struct zone *zone, struct page *page,
864 unsigned int order, int migratetype)
865{
866 if (!debug_guardpage_enabled())
867 return false;
868
869 if (order >= debug_guardpage_minorder())
870 return false;
871
872 __SetPageGuard(page);
873 INIT_LIST_HEAD(&page->buddy_list);
874 set_page_private(page, order);
875 /* Guard pages are not available for any usage */
876 if (!is_migrate_isolate(migratetype))
877 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
878
879 return true;
880}
881
882static inline void clear_page_guard(struct zone *zone, struct page *page,
883 unsigned int order, int migratetype)
884{
885 if (!debug_guardpage_enabled())
886 return;
887
888 __ClearPageGuard(page);
889
890 set_page_private(page, 0);
891 if (!is_migrate_isolate(migratetype))
892 __mod_zone_freepage_state(zone, (1 << order), migratetype);
893}
894#else
895static inline bool set_page_guard(struct zone *zone, struct page *page,
896 unsigned int order, int migratetype) { return false; }
897static inline void clear_page_guard(struct zone *zone, struct page *page,
898 unsigned int order, int migratetype) {}
899#endif
900
901/*
902 * Enable static keys related to various memory debugging and hardening options.
903 * Some override others, and depend on early params that are evaluated in the
904 * order of appearance. So we need to first gather the full picture of what was
905 * enabled, and then make decisions.
906 */
907void __init init_mem_debugging_and_hardening(void)
908{
909 bool page_poisoning_requested = false;
910
911#ifdef CONFIG_PAGE_POISONING
912 /*
913 * Page poisoning is debug page alloc for some arches. If
914 * either of those options are enabled, enable poisoning.
915 */
916 if (page_poisoning_enabled() ||
917 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
918 debug_pagealloc_enabled())) {
919 static_branch_enable(&_page_poisoning_enabled);
920 page_poisoning_requested = true;
921 }
922#endif
923
924 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
925 page_poisoning_requested) {
926 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
927 "will take precedence over init_on_alloc and init_on_free\n");
928 _init_on_alloc_enabled_early = false;
929 _init_on_free_enabled_early = false;
930 }
931
932 if (_init_on_alloc_enabled_early)
933 static_branch_enable(&init_on_alloc);
934 else
935 static_branch_disable(&init_on_alloc);
936
937 if (_init_on_free_enabled_early)
938 static_branch_enable(&init_on_free);
939 else
940 static_branch_disable(&init_on_free);
941
942 if (IS_ENABLED(CONFIG_KMSAN) &&
943 (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
944 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
945
946#ifdef CONFIG_DEBUG_PAGEALLOC
947 if (!debug_pagealloc_enabled())
948 return;
949
950 static_branch_enable(&_debug_pagealloc_enabled);
951
952 if (!debug_guardpage_minorder())
953 return;
954
955 static_branch_enable(&_debug_guardpage_enabled);
956#endif
957}
958
959static inline void set_buddy_order(struct page *page, unsigned int order)
960{
961 set_page_private(page, order);
962 __SetPageBuddy(page);
963}
964
965#ifdef CONFIG_COMPACTION
966static inline struct capture_control *task_capc(struct zone *zone)
967{
968 struct capture_control *capc = current->capture_control;
969
970 return unlikely(capc) &&
971 !(current->flags & PF_KTHREAD) &&
972 !capc->page &&
973 capc->cc->zone == zone ? capc : NULL;
974}
975
976static inline bool
977compaction_capture(struct capture_control *capc, struct page *page,
978 int order, int migratetype)
979{
980 if (!capc || order != capc->cc->order)
981 return false;
982
983 /* Do not accidentally pollute CMA or isolated regions*/
984 if (is_migrate_cma(migratetype) ||
985 is_migrate_isolate(migratetype))
986 return false;
987
988 /*
989 * Do not let lower order allocations pollute a movable pageblock.
990 * This might let an unmovable request use a reclaimable pageblock
991 * and vice-versa but no more than normal fallback logic which can
992 * have trouble finding a high-order free page.
993 */
994 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
995 return false;
996
997 capc->page = page;
998 return true;
999}
1000
1001#else
1002static inline struct capture_control *task_capc(struct zone *zone)
1003{
1004 return NULL;
1005}
1006
1007static inline bool
1008compaction_capture(struct capture_control *capc, struct page *page,
1009 int order, int migratetype)
1010{
1011 return false;
1012}
1013#endif /* CONFIG_COMPACTION */
1014
1015/* Used for pages not on another list */
1016static inline void add_to_free_list(struct page *page, struct zone *zone,
1017 unsigned int order, int migratetype)
1018{
1019 struct free_area *area = &zone->free_area[order];
1020
1021 list_add(&page->buddy_list, &area->free_list[migratetype]);
1022 area->nr_free++;
1023}
1024
1025/* Used for pages not on another list */
1026static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
1027 unsigned int order, int migratetype)
1028{
1029 struct free_area *area = &zone->free_area[order];
1030
1031 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
1032 area->nr_free++;
1033}
1034
1035/*
1036 * Used for pages which are on another list. Move the pages to the tail
1037 * of the list - so the moved pages won't immediately be considered for
1038 * allocation again (e.g., optimization for memory onlining).
1039 */
1040static inline void move_to_free_list(struct page *page, struct zone *zone,
1041 unsigned int order, int migratetype)
1042{
1043 struct free_area *area = &zone->free_area[order];
1044
1045 list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
1046}
1047
1048static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1049 unsigned int order)
1050{
1051 /* clear reported state and update reported page count */
1052 if (page_reported(page))
1053 __ClearPageReported(page);
1054
1055 list_del(&page->buddy_list);
1056 __ClearPageBuddy(page);
1057 set_page_private(page, 0);
1058 zone->free_area[order].nr_free--;
1059}
1060
1061/*
1062 * If this is not the largest possible page, check if the buddy
1063 * of the next-highest order is free. If it is, it's possible
1064 * that pages are being freed that will coalesce soon. In case,
1065 * that is happening, add the free page to the tail of the list
1066 * so it's less likely to be used soon and more likely to be merged
1067 * as a higher order page
1068 */
1069static inline bool
1070buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1071 struct page *page, unsigned int order)
1072{
1073 unsigned long higher_page_pfn;
1074 struct page *higher_page;
1075
1076 if (order >= MAX_ORDER - 2)
1077 return false;
1078
1079 higher_page_pfn = buddy_pfn & pfn;
1080 higher_page = page + (higher_page_pfn - pfn);
1081
1082 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
1083 NULL) != NULL;
1084}
1085
1086/*
1087 * Freeing function for a buddy system allocator.
1088 *
1089 * The concept of a buddy system is to maintain direct-mapped table
1090 * (containing bit values) for memory blocks of various "orders".
1091 * The bottom level table contains the map for the smallest allocatable
1092 * units of memory (here, pages), and each level above it describes
1093 * pairs of units from the levels below, hence, "buddies".
1094 * At a high level, all that happens here is marking the table entry
1095 * at the bottom level available, and propagating the changes upward
1096 * as necessary, plus some accounting needed to play nicely with other
1097 * parts of the VM system.
1098 * At each level, we keep a list of pages, which are heads of continuous
1099 * free pages of length of (1 << order) and marked with PageBuddy.
1100 * Page's order is recorded in page_private(page) field.
1101 * So when we are allocating or freeing one, we can derive the state of the
1102 * other. That is, if we allocate a small block, and both were
1103 * free, the remainder of the region must be split into blocks.
1104 * If a block is freed, and its buddy is also free, then this
1105 * triggers coalescing into a block of larger size.
1106 *
1107 * -- nyc
1108 */
1109
1110static inline void __free_one_page(struct page *page,
1111 unsigned long pfn,
1112 struct zone *zone, unsigned int order,
1113 int migratetype, fpi_t fpi_flags)
1114{
1115 struct capture_control *capc = task_capc(zone);
1116 unsigned long buddy_pfn = 0;
1117 unsigned long combined_pfn;
1118 struct page *buddy;
1119 bool to_tail;
1120
1121 VM_BUG_ON(!zone_is_initialized(zone));
1122 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1123
1124 VM_BUG_ON(migratetype == -1);
1125 if (likely(!is_migrate_isolate(migratetype)))
1126 __mod_zone_freepage_state(zone, 1 << order, migratetype);
1127
1128 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1129 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1130
1131 while (order < MAX_ORDER - 1) {
1132 if (compaction_capture(capc, page, order, migratetype)) {
1133 __mod_zone_freepage_state(zone, -(1 << order),
1134 migratetype);
1135 return;
1136 }
1137
1138 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
1139 if (!buddy)
1140 goto done_merging;
1141
1142 if (unlikely(order >= pageblock_order)) {
1143 /*
1144 * We want to prevent merge between freepages on pageblock
1145 * without fallbacks and normal pageblock. Without this,
1146 * pageblock isolation could cause incorrect freepage or CMA
1147 * accounting or HIGHATOMIC accounting.
1148 */
1149 int buddy_mt = get_pageblock_migratetype(buddy);
1150
1151 if (migratetype != buddy_mt
1152 && (!migratetype_is_mergeable(migratetype) ||
1153 !migratetype_is_mergeable(buddy_mt)))
1154 goto done_merging;
1155 }
1156
1157 /*
1158 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1159 * merge with it and move up one order.
1160 */
1161 if (page_is_guard(buddy))
1162 clear_page_guard(zone, buddy, order, migratetype);
1163 else
1164 del_page_from_free_list(buddy, zone, order);
1165 combined_pfn = buddy_pfn & pfn;
1166 page = page + (combined_pfn - pfn);
1167 pfn = combined_pfn;
1168 order++;
1169 }
1170
1171done_merging:
1172 set_buddy_order(page, order);
1173
1174 if (fpi_flags & FPI_TO_TAIL)
1175 to_tail = true;
1176 else if (is_shuffle_order(order))
1177 to_tail = shuffle_pick_tail();
1178 else
1179 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1180
1181 if (to_tail)
1182 add_to_free_list_tail(page, zone, order, migratetype);
1183 else
1184 add_to_free_list(page, zone, order, migratetype);
1185
1186 /* Notify page reporting subsystem of freed page */
1187 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1188 page_reporting_notify_free(order);
1189}
1190
1191/**
1192 * split_free_page() -- split a free page at split_pfn_offset
1193 * @free_page: the original free page
1194 * @order: the order of the page
1195 * @split_pfn_offset: split offset within the page
1196 *
1197 * Return -ENOENT if the free page is changed, otherwise 0
1198 *
1199 * It is used when the free page crosses two pageblocks with different migratetypes
1200 * at split_pfn_offset within the page. The split free page will be put into
1201 * separate migratetype lists afterwards. Otherwise, the function achieves
1202 * nothing.
1203 */
1204int split_free_page(struct page *free_page,
1205 unsigned int order, unsigned long split_pfn_offset)
1206{
1207 struct zone *zone = page_zone(free_page);
1208 unsigned long free_page_pfn = page_to_pfn(free_page);
1209 unsigned long pfn;
1210 unsigned long flags;
1211 int free_page_order;
1212 int mt;
1213 int ret = 0;
1214
1215 if (split_pfn_offset == 0)
1216 return ret;
1217
1218 spin_lock_irqsave(&zone->lock, flags);
1219
1220 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
1221 ret = -ENOENT;
1222 goto out;
1223 }
1224
1225 mt = get_pageblock_migratetype(free_page);
1226 if (likely(!is_migrate_isolate(mt)))
1227 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1228
1229 del_page_from_free_list(free_page, zone, order);
1230 for (pfn = free_page_pfn;
1231 pfn < free_page_pfn + (1UL << order);) {
1232 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
1233
1234 free_page_order = min_t(unsigned int,
1235 pfn ? __ffs(pfn) : order,
1236 __fls(split_pfn_offset));
1237 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
1238 mt, FPI_NONE);
1239 pfn += 1UL << free_page_order;
1240 split_pfn_offset -= (1UL << free_page_order);
1241 /* we have done the first part, now switch to second part */
1242 if (split_pfn_offset == 0)
1243 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
1244 }
1245out:
1246 spin_unlock_irqrestore(&zone->lock, flags);
1247 return ret;
1248}
1249/*
1250 * A bad page could be due to a number of fields. Instead of multiple branches,
1251 * try and check multiple fields with one check. The caller must do a detailed
1252 * check if necessary.
1253 */
1254static inline bool page_expected_state(struct page *page,
1255 unsigned long check_flags)
1256{
1257 if (unlikely(atomic_read(&page->_mapcount) != -1))
1258 return false;
1259
1260 if (unlikely((unsigned long)page->mapping |
1261 page_ref_count(page) |
1262#ifdef CONFIG_MEMCG
1263 page->memcg_data |
1264#endif
1265 (page->flags & check_flags)))
1266 return false;
1267
1268 return true;
1269}
1270
1271static const char *page_bad_reason(struct page *page, unsigned long flags)
1272{
1273 const char *bad_reason = NULL;
1274
1275 if (unlikely(atomic_read(&page->_mapcount) != -1))
1276 bad_reason = "nonzero mapcount";
1277 if (unlikely(page->mapping != NULL))
1278 bad_reason = "non-NULL mapping";
1279 if (unlikely(page_ref_count(page) != 0))
1280 bad_reason = "nonzero _refcount";
1281 if (unlikely(page->flags & flags)) {
1282 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1283 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1284 else
1285 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1286 }
1287#ifdef CONFIG_MEMCG
1288 if (unlikely(page->memcg_data))
1289 bad_reason = "page still charged to cgroup";
1290#endif
1291 return bad_reason;
1292}
1293
1294static void free_page_is_bad_report(struct page *page)
1295{
1296 bad_page(page,
1297 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1298}
1299
1300static inline bool free_page_is_bad(struct page *page)
1301{
1302 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1303 return false;
1304
1305 /* Something has gone sideways, find it */
1306 free_page_is_bad_report(page);
1307 return true;
1308}
1309
1310static int free_tail_pages_check(struct page *head_page, struct page *page)
1311{
1312 int ret = 1;
1313
1314 /*
1315 * We rely page->lru.next never has bit 0 set, unless the page
1316 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1317 */
1318 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1319
1320 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1321 ret = 0;
1322 goto out;
1323 }
1324 switch (page - head_page) {
1325 case 1:
1326 /* the first tail page: ->mapping may be compound_mapcount() */
1327 if (unlikely(compound_mapcount(page))) {
1328 bad_page(page, "nonzero compound_mapcount");
1329 goto out;
1330 }
1331 break;
1332 case 2:
1333 /*
1334 * the second tail page: ->mapping is
1335 * deferred_list.next -- ignore value.
1336 */
1337 break;
1338 default:
1339 if (page->mapping != TAIL_MAPPING) {
1340 bad_page(page, "corrupted mapping in tail page");
1341 goto out;
1342 }
1343 break;
1344 }
1345 if (unlikely(!PageTail(page))) {
1346 bad_page(page, "PageTail not set");
1347 goto out;
1348 }
1349 if (unlikely(compound_head(page) != head_page)) {
1350 bad_page(page, "compound_head not consistent");
1351 goto out;
1352 }
1353 ret = 0;
1354out:
1355 page->mapping = NULL;
1356 clear_compound_head(page);
1357 return ret;
1358}
1359
1360/*
1361 * Skip KASAN memory poisoning when either:
1362 *
1363 * 1. Deferred memory initialization has not yet completed,
1364 * see the explanation below.
1365 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON,
1366 * see the comment next to it.
1367 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
1368 * see the comment next to it.
1369 *
1370 * Poisoning pages during deferred memory init will greatly lengthen the
1371 * process and cause problem in large memory systems as the deferred pages
1372 * initialization is done with interrupt disabled.
1373 *
1374 * Assuming that there will be no reference to those newly initialized
1375 * pages before they are ever allocated, this should have no effect on
1376 * KASAN memory tracking as the poison will be properly inserted at page
1377 * allocation time. The only corner case is when pages are allocated by
1378 * on-demand allocation and then freed again before the deferred pages
1379 * initialization is done, but this is not likely to happen.
1380 */
1381static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1382{
1383 return deferred_pages_enabled() ||
1384 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
1385 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
1386 PageSkipKASanPoison(page);
1387}
1388
1389static void kernel_init_pages(struct page *page, int numpages)
1390{
1391 int i;
1392
1393 /* s390's use of memset() could override KASAN redzones. */
1394 kasan_disable_current();
1395 for (i = 0; i < numpages; i++)
1396 clear_highpage_kasan_tagged(page + i);
1397 kasan_enable_current();
1398}
1399
1400static __always_inline bool free_pages_prepare(struct page *page,
1401 unsigned int order, bool check_free, fpi_t fpi_flags)
1402{
1403 int bad = 0;
1404 bool init = want_init_on_free();
1405
1406 VM_BUG_ON_PAGE(PageTail(page), page);
1407
1408 trace_mm_page_free(page, order);
1409 kmsan_free_page(page, order);
1410
1411 if (unlikely(PageHWPoison(page)) && !order) {
1412 /*
1413 * Do not let hwpoison pages hit pcplists/buddy
1414 * Untie memcg state and reset page's owner
1415 */
1416 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1417 __memcg_kmem_uncharge_page(page, order);
1418 reset_page_owner(page, order);
1419 page_table_check_free(page, order);
1420 return false;
1421 }
1422
1423 /*
1424 * Check tail pages before head page information is cleared to
1425 * avoid checking PageCompound for order-0 pages.
1426 */
1427 if (unlikely(order)) {
1428 bool compound = PageCompound(page);
1429 int i;
1430
1431 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1432
1433 if (compound) {
1434 ClearPageDoubleMap(page);
1435 ClearPageHasHWPoisoned(page);
1436 }
1437 for (i = 1; i < (1 << order); i++) {
1438 if (compound)
1439 bad += free_tail_pages_check(page, page + i);
1440 if (unlikely(free_page_is_bad(page + i))) {
1441 bad++;
1442 continue;
1443 }
1444 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1445 }
1446 }
1447 if (PageMappingFlags(page))
1448 page->mapping = NULL;
1449 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1450 __memcg_kmem_uncharge_page(page, order);
1451 if (check_free && free_page_is_bad(page))
1452 bad++;
1453 if (bad)
1454 return false;
1455
1456 page_cpupid_reset_last(page);
1457 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1458 reset_page_owner(page, order);
1459 page_table_check_free(page, order);
1460
1461 if (!PageHighMem(page)) {
1462 debug_check_no_locks_freed(page_address(page),
1463 PAGE_SIZE << order);
1464 debug_check_no_obj_freed(page_address(page),
1465 PAGE_SIZE << order);
1466 }
1467
1468 kernel_poison_pages(page, 1 << order);
1469
1470 /*
1471 * As memory initialization might be integrated into KASAN,
1472 * KASAN poisoning and memory initialization code must be
1473 * kept together to avoid discrepancies in behavior.
1474 *
1475 * With hardware tag-based KASAN, memory tags must be set before the
1476 * page becomes unavailable via debug_pagealloc or arch_free_page.
1477 */
1478 if (!should_skip_kasan_poison(page, fpi_flags)) {
1479 kasan_poison_pages(page, order, init);
1480
1481 /* Memory is already initialized if KASAN did it internally. */
1482 if (kasan_has_integrated_init())
1483 init = false;
1484 }
1485 if (init)
1486 kernel_init_pages(page, 1 << order);
1487
1488 /*
1489 * arch_free_page() can make the page's contents inaccessible. s390
1490 * does this. So nothing which can access the page's contents should
1491 * happen after this.
1492 */
1493 arch_free_page(page, order);
1494
1495 debug_pagealloc_unmap_pages(page, 1 << order);
1496
1497 return true;
1498}
1499
1500#ifdef CONFIG_DEBUG_VM
1501/*
1502 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1503 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1504 * moved from pcp lists to free lists.
1505 */
1506static bool free_pcp_prepare(struct page *page, unsigned int order)
1507{
1508 return free_pages_prepare(page, order, true, FPI_NONE);
1509}
1510
1511/* return true if this page has an inappropriate state */
1512static bool bulkfree_pcp_prepare(struct page *page)
1513{
1514 if (debug_pagealloc_enabled_static())
1515 return free_page_is_bad(page);
1516 else
1517 return false;
1518}
1519#else
1520/*
1521 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1522 * moving from pcp lists to free list in order to reduce overhead. With
1523 * debug_pagealloc enabled, they are checked also immediately when being freed
1524 * to the pcp lists.
1525 */
1526static bool free_pcp_prepare(struct page *page, unsigned int order)
1527{
1528 if (debug_pagealloc_enabled_static())
1529 return free_pages_prepare(page, order, true, FPI_NONE);
1530 else
1531 return free_pages_prepare(page, order, false, FPI_NONE);
1532}
1533
1534static bool bulkfree_pcp_prepare(struct page *page)
1535{
1536 return free_page_is_bad(page);
1537}
1538#endif /* CONFIG_DEBUG_VM */
1539
1540/*
1541 * Frees a number of pages from the PCP lists
1542 * Assumes all pages on list are in same zone.
1543 * count is the number of pages to free.
1544 */
1545static void free_pcppages_bulk(struct zone *zone, int count,
1546 struct per_cpu_pages *pcp,
1547 int pindex)
1548{
1549 int min_pindex = 0;
1550 int max_pindex = NR_PCP_LISTS - 1;
1551 unsigned int order;
1552 bool isolated_pageblocks;
1553 struct page *page;
1554
1555 /*
1556 * Ensure proper count is passed which otherwise would stuck in the
1557 * below while (list_empty(list)) loop.
1558 */
1559 count = min(pcp->count, count);
1560
1561 /* Ensure requested pindex is drained first. */
1562 pindex = pindex - 1;
1563
1564 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
1565 spin_lock(&zone->lock);
1566 isolated_pageblocks = has_isolate_pageblock(zone);
1567
1568 while (count > 0) {
1569 struct list_head *list;
1570 int nr_pages;
1571
1572 /* Remove pages from lists in a round-robin fashion. */
1573 do {
1574 if (++pindex > max_pindex)
1575 pindex = min_pindex;
1576 list = &pcp->lists[pindex];
1577 if (!list_empty(list))
1578 break;
1579
1580 if (pindex == max_pindex)
1581 max_pindex--;
1582 if (pindex == min_pindex)
1583 min_pindex++;
1584 } while (1);
1585
1586 order = pindex_to_order(pindex);
1587 nr_pages = 1 << order;
1588 do {
1589 int mt;
1590
1591 page = list_last_entry(list, struct page, pcp_list);
1592 mt = get_pcppage_migratetype(page);
1593
1594 /* must delete to avoid corrupting pcp list */
1595 list_del(&page->pcp_list);
1596 count -= nr_pages;
1597 pcp->count -= nr_pages;
1598
1599 if (bulkfree_pcp_prepare(page))
1600 continue;
1601
1602 /* MIGRATE_ISOLATE page should not go to pcplists */
1603 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1604 /* Pageblock could have been isolated meanwhile */
1605 if (unlikely(isolated_pageblocks))
1606 mt = get_pageblock_migratetype(page);
1607
1608 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1609 trace_mm_page_pcpu_drain(page, order, mt);
1610 } while (count > 0 && !list_empty(list));
1611 }
1612
1613 spin_unlock(&zone->lock);
1614}
1615
1616static void free_one_page(struct zone *zone,
1617 struct page *page, unsigned long pfn,
1618 unsigned int order,
1619 int migratetype, fpi_t fpi_flags)
1620{
1621 unsigned long flags;
1622
1623 spin_lock_irqsave(&zone->lock, flags);
1624 if (unlikely(has_isolate_pageblock(zone) ||
1625 is_migrate_isolate(migratetype))) {
1626 migratetype = get_pfnblock_migratetype(page, pfn);
1627 }
1628 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1629 spin_unlock_irqrestore(&zone->lock, flags);
1630}
1631
1632static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1633 unsigned long zone, int nid)
1634{
1635 mm_zero_struct_page(page);
1636 set_page_links(page, zone, nid, pfn);
1637 init_page_count(page);
1638 page_mapcount_reset(page);
1639 page_cpupid_reset_last(page);
1640 page_kasan_tag_reset(page);
1641
1642 INIT_LIST_HEAD(&page->lru);
1643#ifdef WANT_PAGE_VIRTUAL
1644 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1645 if (!is_highmem_idx(zone))
1646 set_page_address(page, __va(pfn << PAGE_SHIFT));
1647#endif
1648}
1649
1650#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1651static void __meminit init_reserved_page(unsigned long pfn)
1652{
1653 pg_data_t *pgdat;
1654 int nid, zid;
1655
1656 if (!early_page_uninitialised(pfn))
1657 return;
1658
1659 nid = early_pfn_to_nid(pfn);
1660 pgdat = NODE_DATA(nid);
1661
1662 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1663 struct zone *zone = &pgdat->node_zones[zid];
1664
1665 if (zone_spans_pfn(zone, pfn))
1666 break;
1667 }
1668 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1669}
1670#else
1671static inline void init_reserved_page(unsigned long pfn)
1672{
1673}
1674#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1675
1676/*
1677 * Initialised pages do not have PageReserved set. This function is
1678 * called for each range allocated by the bootmem allocator and
1679 * marks the pages PageReserved. The remaining valid pages are later
1680 * sent to the buddy page allocator.
1681 */
1682void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1683{
1684 unsigned long start_pfn = PFN_DOWN(start);
1685 unsigned long end_pfn = PFN_UP(end);
1686
1687 for (; start_pfn < end_pfn; start_pfn++) {
1688 if (pfn_valid(start_pfn)) {
1689 struct page *page = pfn_to_page(start_pfn);
1690
1691 init_reserved_page(start_pfn);
1692
1693 /* Avoid false-positive PageTail() */
1694 INIT_LIST_HEAD(&page->lru);
1695
1696 /*
1697 * no need for atomic set_bit because the struct
1698 * page is not visible yet so nobody should
1699 * access it yet.
1700 */
1701 __SetPageReserved(page);
1702 }
1703 }
1704}
1705
1706static void __free_pages_ok(struct page *page, unsigned int order,
1707 fpi_t fpi_flags)
1708{
1709 unsigned long flags;
1710 int migratetype;
1711 unsigned long pfn = page_to_pfn(page);
1712 struct zone *zone = page_zone(page);
1713
1714 if (!free_pages_prepare(page, order, true, fpi_flags))
1715 return;
1716
1717 migratetype = get_pfnblock_migratetype(page, pfn);
1718
1719 spin_lock_irqsave(&zone->lock, flags);
1720 if (unlikely(has_isolate_pageblock(zone) ||
1721 is_migrate_isolate(migratetype))) {
1722 migratetype = get_pfnblock_migratetype(page, pfn);
1723 }
1724 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1725 spin_unlock_irqrestore(&zone->lock, flags);
1726
1727 __count_vm_events(PGFREE, 1 << order);
1728}
1729
1730void __free_pages_core(struct page *page, unsigned int order)
1731{
1732 unsigned int nr_pages = 1 << order;
1733 struct page *p = page;
1734 unsigned int loop;
1735
1736 /*
1737 * When initializing the memmap, __init_single_page() sets the refcount
1738 * of all pages to 1 ("allocated"/"not free"). We have to set the
1739 * refcount of all involved pages to 0.
1740 */
1741 prefetchw(p);
1742 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1743 prefetchw(p + 1);
1744 __ClearPageReserved(p);
1745 set_page_count(p, 0);
1746 }
1747 __ClearPageReserved(p);
1748 set_page_count(p, 0);
1749
1750 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1751
1752 /*
1753 * Bypass PCP and place fresh pages right to the tail, primarily
1754 * relevant for memory onlining.
1755 */
1756 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1757}
1758
1759#ifdef CONFIG_NUMA
1760
1761/*
1762 * During memory init memblocks map pfns to nids. The search is expensive and
1763 * this caches recent lookups. The implementation of __early_pfn_to_nid
1764 * treats start/end as pfns.
1765 */
1766struct mminit_pfnnid_cache {
1767 unsigned long last_start;
1768 unsigned long last_end;
1769 int last_nid;
1770};
1771
1772static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1773
1774/*
1775 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1776 */
1777static int __meminit __early_pfn_to_nid(unsigned long pfn,
1778 struct mminit_pfnnid_cache *state)
1779{
1780 unsigned long start_pfn, end_pfn;
1781 int nid;
1782
1783 if (state->last_start <= pfn && pfn < state->last_end)
1784 return state->last_nid;
1785
1786 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1787 if (nid != NUMA_NO_NODE) {
1788 state->last_start = start_pfn;
1789 state->last_end = end_pfn;
1790 state->last_nid = nid;
1791 }
1792
1793 return nid;
1794}
1795
1796int __meminit early_pfn_to_nid(unsigned long pfn)
1797{
1798 static DEFINE_SPINLOCK(early_pfn_lock);
1799 int nid;
1800
1801 spin_lock(&early_pfn_lock);
1802 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1803 if (nid < 0)
1804 nid = first_online_node;
1805 spin_unlock(&early_pfn_lock);
1806
1807 return nid;
1808}
1809#endif /* CONFIG_NUMA */
1810
1811void __init memblock_free_pages(struct page *page, unsigned long pfn,
1812 unsigned int order)
1813{
1814 if (early_page_uninitialised(pfn))
1815 return;
1816 if (!kmsan_memblock_free_pages(page, order)) {
1817 /* KMSAN will take care of these pages. */
1818 return;
1819 }
1820 __free_pages_core(page, order);
1821}
1822
1823/*
1824 * Check that the whole (or subset of) a pageblock given by the interval of
1825 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1826 * with the migration of free compaction scanner.
1827 *
1828 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1829 *
1830 * It's possible on some configurations to have a setup like node0 node1 node0
1831 * i.e. it's possible that all pages within a zones range of pages do not
1832 * belong to a single zone. We assume that a border between node0 and node1
1833 * can occur within a single pageblock, but not a node0 node1 node0
1834 * interleaving within a single pageblock. It is therefore sufficient to check
1835 * the first and last page of a pageblock and avoid checking each individual
1836 * page in a pageblock.
1837 */
1838struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1839 unsigned long end_pfn, struct zone *zone)
1840{
1841 struct page *start_page;
1842 struct page *end_page;
1843
1844 /* end_pfn is one past the range we are checking */
1845 end_pfn--;
1846
1847 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1848 return NULL;
1849
1850 start_page = pfn_to_online_page(start_pfn);
1851 if (!start_page)
1852 return NULL;
1853
1854 if (page_zone(start_page) != zone)
1855 return NULL;
1856
1857 end_page = pfn_to_page(end_pfn);
1858
1859 /* This gives a shorter code than deriving page_zone(end_page) */
1860 if (page_zone_id(start_page) != page_zone_id(end_page))
1861 return NULL;
1862
1863 return start_page;
1864}
1865
1866void set_zone_contiguous(struct zone *zone)
1867{
1868 unsigned long block_start_pfn = zone->zone_start_pfn;
1869 unsigned long block_end_pfn;
1870
1871 block_end_pfn = pageblock_end_pfn(block_start_pfn);
1872 for (; block_start_pfn < zone_end_pfn(zone);
1873 block_start_pfn = block_end_pfn,
1874 block_end_pfn += pageblock_nr_pages) {
1875
1876 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1877
1878 if (!__pageblock_pfn_to_page(block_start_pfn,
1879 block_end_pfn, zone))
1880 return;
1881 cond_resched();
1882 }
1883
1884 /* We confirm that there is no hole */
1885 zone->contiguous = true;
1886}
1887
1888void clear_zone_contiguous(struct zone *zone)
1889{
1890 zone->contiguous = false;
1891}
1892
1893#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1894static void __init deferred_free_range(unsigned long pfn,
1895 unsigned long nr_pages)
1896{
1897 struct page *page;
1898 unsigned long i;
1899
1900 if (!nr_pages)
1901 return;
1902
1903 page = pfn_to_page(pfn);
1904
1905 /* Free a large naturally-aligned chunk if possible */
1906 if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) {
1907 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1908 __free_pages_core(page, pageblock_order);
1909 return;
1910 }
1911
1912 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1913 if (pageblock_aligned(pfn))
1914 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1915 __free_pages_core(page, 0);
1916 }
1917}
1918
1919/* Completion tracking for deferred_init_memmap() threads */
1920static atomic_t pgdat_init_n_undone __initdata;
1921static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1922
1923static inline void __init pgdat_init_report_one_done(void)
1924{
1925 if (atomic_dec_and_test(&pgdat_init_n_undone))
1926 complete(&pgdat_init_all_done_comp);
1927}
1928
1929/*
1930 * Returns true if page needs to be initialized or freed to buddy allocator.
1931 *
1932 * We check if a current large page is valid by only checking the validity
1933 * of the head pfn.
1934 */
1935static inline bool __init deferred_pfn_valid(unsigned long pfn)
1936{
1937 if (pageblock_aligned(pfn) && !pfn_valid(pfn))
1938 return false;
1939 return true;
1940}
1941
1942/*
1943 * Free pages to buddy allocator. Try to free aligned pages in
1944 * pageblock_nr_pages sizes.
1945 */
1946static void __init deferred_free_pages(unsigned long pfn,
1947 unsigned long end_pfn)
1948{
1949 unsigned long nr_free = 0;
1950
1951 for (; pfn < end_pfn; pfn++) {
1952 if (!deferred_pfn_valid(pfn)) {
1953 deferred_free_range(pfn - nr_free, nr_free);
1954 nr_free = 0;
1955 } else if (pageblock_aligned(pfn)) {
1956 deferred_free_range(pfn - nr_free, nr_free);
1957 nr_free = 1;
1958 } else {
1959 nr_free++;
1960 }
1961 }
1962 /* Free the last block of pages to allocator */
1963 deferred_free_range(pfn - nr_free, nr_free);
1964}
1965
1966/*
1967 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1968 * by performing it only once every pageblock_nr_pages.
1969 * Return number of pages initialized.
1970 */
1971static unsigned long __init deferred_init_pages(struct zone *zone,
1972 unsigned long pfn,
1973 unsigned long end_pfn)
1974{
1975 int nid = zone_to_nid(zone);
1976 unsigned long nr_pages = 0;
1977 int zid = zone_idx(zone);
1978 struct page *page = NULL;
1979
1980 for (; pfn < end_pfn; pfn++) {
1981 if (!deferred_pfn_valid(pfn)) {
1982 page = NULL;
1983 continue;
1984 } else if (!page || pageblock_aligned(pfn)) {
1985 page = pfn_to_page(pfn);
1986 } else {
1987 page++;
1988 }
1989 __init_single_page(page, pfn, zid, nid);
1990 nr_pages++;
1991 }
1992 return (nr_pages);
1993}
1994
1995/*
1996 * This function is meant to pre-load the iterator for the zone init.
1997 * Specifically it walks through the ranges until we are caught up to the
1998 * first_init_pfn value and exits there. If we never encounter the value we
1999 * return false indicating there are no valid ranges left.
2000 */
2001static bool __init
2002deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2003 unsigned long *spfn, unsigned long *epfn,
2004 unsigned long first_init_pfn)
2005{
2006 u64 j;
2007
2008 /*
2009 * Start out by walking through the ranges in this zone that have
2010 * already been initialized. We don't need to do anything with them
2011 * so we just need to flush them out of the system.
2012 */
2013 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2014 if (*epfn <= first_init_pfn)
2015 continue;
2016 if (*spfn < first_init_pfn)
2017 *spfn = first_init_pfn;
2018 *i = j;
2019 return true;
2020 }
2021
2022 return false;
2023}
2024
2025/*
2026 * Initialize and free pages. We do it in two loops: first we initialize
2027 * struct page, then free to buddy allocator, because while we are
2028 * freeing pages we can access pages that are ahead (computing buddy
2029 * page in __free_one_page()).
2030 *
2031 * In order to try and keep some memory in the cache we have the loop
2032 * broken along max page order boundaries. This way we will not cause
2033 * any issues with the buddy page computation.
2034 */
2035static unsigned long __init
2036deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2037 unsigned long *end_pfn)
2038{
2039 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2040 unsigned long spfn = *start_pfn, epfn = *end_pfn;
2041 unsigned long nr_pages = 0;
2042 u64 j = *i;
2043
2044 /* First we loop through and initialize the page values */
2045 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2046 unsigned long t;
2047
2048 if (mo_pfn <= *start_pfn)
2049 break;
2050
2051 t = min(mo_pfn, *end_pfn);
2052 nr_pages += deferred_init_pages(zone, *start_pfn, t);
2053
2054 if (mo_pfn < *end_pfn) {
2055 *start_pfn = mo_pfn;
2056 break;
2057 }
2058 }
2059
2060 /* Reset values and now loop through freeing pages as needed */
2061 swap(j, *i);
2062
2063 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2064 unsigned long t;
2065
2066 if (mo_pfn <= spfn)
2067 break;
2068
2069 t = min(mo_pfn, epfn);
2070 deferred_free_pages(spfn, t);
2071
2072 if (mo_pfn <= epfn)
2073 break;
2074 }
2075
2076 return nr_pages;
2077}
2078
2079static void __init
2080deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2081 void *arg)
2082{
2083 unsigned long spfn, epfn;
2084 struct zone *zone = arg;
2085 u64 i;
2086
2087 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2088
2089 /*
2090 * Initialize and free pages in MAX_ORDER sized increments so that we
2091 * can avoid introducing any issues with the buddy allocator.
2092 */
2093 while (spfn < end_pfn) {
2094 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2095 cond_resched();
2096 }
2097}
2098
2099/* An arch may override for more concurrency. */
2100__weak int __init
2101deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2102{
2103 return 1;
2104}
2105
2106/* Initialise remaining memory on a node */
2107static int __init deferred_init_memmap(void *data)
2108{
2109 pg_data_t *pgdat = data;
2110 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2111 unsigned long spfn = 0, epfn = 0;
2112 unsigned long first_init_pfn, flags;
2113 unsigned long start = jiffies;
2114 struct zone *zone;
2115 int zid, max_threads;
2116 u64 i;
2117
2118 /* Bind memory initialisation thread to a local node if possible */
2119 if (!cpumask_empty(cpumask))
2120 set_cpus_allowed_ptr(current, cpumask);
2121
2122 pgdat_resize_lock(pgdat, &flags);
2123 first_init_pfn = pgdat->first_deferred_pfn;
2124 if (first_init_pfn == ULONG_MAX) {
2125 pgdat_resize_unlock(pgdat, &flags);
2126 pgdat_init_report_one_done();
2127 return 0;
2128 }
2129
2130 /* Sanity check boundaries */
2131 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2132 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2133 pgdat->first_deferred_pfn = ULONG_MAX;
2134
2135 /*
2136 * Once we unlock here, the zone cannot be grown anymore, thus if an
2137 * interrupt thread must allocate this early in boot, zone must be
2138 * pre-grown prior to start of deferred page initialization.
2139 */
2140 pgdat_resize_unlock(pgdat, &flags);
2141
2142 /* Only the highest zone is deferred so find it */
2143 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2144 zone = pgdat->node_zones + zid;
2145 if (first_init_pfn < zone_end_pfn(zone))
2146 break;
2147 }
2148
2149 /* If the zone is empty somebody else may have cleared out the zone */
2150 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2151 first_init_pfn))
2152 goto zone_empty;
2153
2154 max_threads = deferred_page_init_max_threads(cpumask);
2155
2156 while (spfn < epfn) {
2157 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2158 struct padata_mt_job job = {
2159 .thread_fn = deferred_init_memmap_chunk,
2160 .fn_arg = zone,
2161 .start = spfn,
2162 .size = epfn_align - spfn,
2163 .align = PAGES_PER_SECTION,
2164 .min_chunk = PAGES_PER_SECTION,
2165 .max_threads = max_threads,
2166 };
2167
2168 padata_do_multithreaded(&job);
2169 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2170 epfn_align);
2171 }
2172zone_empty:
2173 /* Sanity check that the next zone really is unpopulated */
2174 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2175
2176 pr_info("node %d deferred pages initialised in %ums\n",
2177 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2178
2179 pgdat_init_report_one_done();
2180 return 0;
2181}
2182
2183/*
2184 * If this zone has deferred pages, try to grow it by initializing enough
2185 * deferred pages to satisfy the allocation specified by order, rounded up to
2186 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2187 * of SECTION_SIZE bytes by initializing struct pages in increments of
2188 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2189 *
2190 * Return true when zone was grown, otherwise return false. We return true even
2191 * when we grow less than requested, to let the caller decide if there are
2192 * enough pages to satisfy the allocation.
2193 *
2194 * Note: We use noinline because this function is needed only during boot, and
2195 * it is called from a __ref function _deferred_grow_zone. This way we are
2196 * making sure that it is not inlined into permanent text section.
2197 */
2198static noinline bool __init
2199deferred_grow_zone(struct zone *zone, unsigned int order)
2200{
2201 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2202 pg_data_t *pgdat = zone->zone_pgdat;
2203 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2204 unsigned long spfn, epfn, flags;
2205 unsigned long nr_pages = 0;
2206 u64 i;
2207
2208 /* Only the last zone may have deferred pages */
2209 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2210 return false;
2211
2212 pgdat_resize_lock(pgdat, &flags);
2213
2214 /*
2215 * If someone grew this zone while we were waiting for spinlock, return
2216 * true, as there might be enough pages already.
2217 */
2218 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2219 pgdat_resize_unlock(pgdat, &flags);
2220 return true;
2221 }
2222
2223 /* If the zone is empty somebody else may have cleared out the zone */
2224 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2225 first_deferred_pfn)) {
2226 pgdat->first_deferred_pfn = ULONG_MAX;
2227 pgdat_resize_unlock(pgdat, &flags);
2228 /* Retry only once. */
2229 return first_deferred_pfn != ULONG_MAX;
2230 }
2231
2232 /*
2233 * Initialize and free pages in MAX_ORDER sized increments so
2234 * that we can avoid introducing any issues with the buddy
2235 * allocator.
2236 */
2237 while (spfn < epfn) {
2238 /* update our first deferred PFN for this section */
2239 first_deferred_pfn = spfn;
2240
2241 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2242 touch_nmi_watchdog();
2243
2244 /* We should only stop along section boundaries */
2245 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2246 continue;
2247
2248 /* If our quota has been met we can stop here */
2249 if (nr_pages >= nr_pages_needed)
2250 break;
2251 }
2252
2253 pgdat->first_deferred_pfn = spfn;
2254 pgdat_resize_unlock(pgdat, &flags);
2255
2256 return nr_pages > 0;
2257}
2258
2259/*
2260 * deferred_grow_zone() is __init, but it is called from
2261 * get_page_from_freelist() during early boot until deferred_pages permanently
2262 * disables this call. This is why we have refdata wrapper to avoid warning,
2263 * and to ensure that the function body gets unloaded.
2264 */
2265static bool __ref
2266_deferred_grow_zone(struct zone *zone, unsigned int order)
2267{
2268 return deferred_grow_zone(zone, order);
2269}
2270
2271#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2272
2273void __init page_alloc_init_late(void)
2274{
2275 struct zone *zone;
2276 int nid;
2277
2278#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2279
2280 /* There will be num_node_state(N_MEMORY) threads */
2281 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2282 for_each_node_state(nid, N_MEMORY) {
2283 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2284 }
2285
2286 /* Block until all are initialised */
2287 wait_for_completion(&pgdat_init_all_done_comp);
2288
2289 /*
2290 * We initialized the rest of the deferred pages. Permanently disable
2291 * on-demand struct page initialization.
2292 */
2293 static_branch_disable(&deferred_pages);
2294
2295 /* Reinit limits that are based on free pages after the kernel is up */
2296 files_maxfiles_init();
2297#endif
2298
2299 buffer_init();
2300
2301 /* Discard memblock private memory */
2302 memblock_discard();
2303
2304 for_each_node_state(nid, N_MEMORY)
2305 shuffle_free_memory(NODE_DATA(nid));
2306
2307 for_each_populated_zone(zone)
2308 set_zone_contiguous(zone);
2309}
2310
2311#ifdef CONFIG_CMA
2312/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2313void __init init_cma_reserved_pageblock(struct page *page)
2314{
2315 unsigned i = pageblock_nr_pages;
2316 struct page *p = page;
2317
2318 do {
2319 __ClearPageReserved(p);
2320 set_page_count(p, 0);
2321 } while (++p, --i);
2322
2323 set_pageblock_migratetype(page, MIGRATE_CMA);
2324 set_page_refcounted(page);
2325 __free_pages(page, pageblock_order);
2326
2327 adjust_managed_page_count(page, pageblock_nr_pages);
2328 page_zone(page)->cma_pages += pageblock_nr_pages;
2329}
2330#endif
2331
2332/*
2333 * The order of subdivision here is critical for the IO subsystem.
2334 * Please do not alter this order without good reasons and regression
2335 * testing. Specifically, as large blocks of memory are subdivided,
2336 * the order in which smaller blocks are delivered depends on the order
2337 * they're subdivided in this function. This is the primary factor
2338 * influencing the order in which pages are delivered to the IO
2339 * subsystem according to empirical testing, and this is also justified
2340 * by considering the behavior of a buddy system containing a single
2341 * large block of memory acted on by a series of small allocations.
2342 * This behavior is a critical factor in sglist merging's success.
2343 *
2344 * -- nyc
2345 */
2346static inline void expand(struct zone *zone, struct page *page,
2347 int low, int high, int migratetype)
2348{
2349 unsigned long size = 1 << high;
2350
2351 while (high > low) {
2352 high--;
2353 size >>= 1;
2354 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2355
2356 /*
2357 * Mark as guard pages (or page), that will allow to
2358 * merge back to allocator when buddy will be freed.
2359 * Corresponding page table entries will not be touched,
2360 * pages will stay not present in virtual address space
2361 */
2362 if (set_page_guard(zone, &page[size], high, migratetype))
2363 continue;
2364
2365 add_to_free_list(&page[size], zone, high, migratetype);
2366 set_buddy_order(&page[size], high);
2367 }
2368}
2369
2370static void check_new_page_bad(struct page *page)
2371{
2372 if (unlikely(page->flags & __PG_HWPOISON)) {
2373 /* Don't complain about hwpoisoned pages */
2374 page_mapcount_reset(page); /* remove PageBuddy */
2375 return;
2376 }
2377
2378 bad_page(page,
2379 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2380}
2381
2382/*
2383 * This page is about to be returned from the page allocator
2384 */
2385static inline int check_new_page(struct page *page)
2386{
2387 if (likely(page_expected_state(page,
2388 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2389 return 0;
2390
2391 check_new_page_bad(page);
2392 return 1;
2393}
2394
2395static bool check_new_pages(struct page *page, unsigned int order)
2396{
2397 int i;
2398 for (i = 0; i < (1 << order); i++) {
2399 struct page *p = page + i;
2400
2401 if (unlikely(check_new_page(p)))
2402 return true;
2403 }
2404
2405 return false;
2406}
2407
2408#ifdef CONFIG_DEBUG_VM
2409/*
2410 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2411 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2412 * also checked when pcp lists are refilled from the free lists.
2413 */
2414static inline bool check_pcp_refill(struct page *page, unsigned int order)
2415{
2416 if (debug_pagealloc_enabled_static())
2417 return check_new_pages(page, order);
2418 else
2419 return false;
2420}
2421
2422static inline bool check_new_pcp(struct page *page, unsigned int order)
2423{
2424 return check_new_pages(page, order);
2425}
2426#else
2427/*
2428 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2429 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2430 * enabled, they are also checked when being allocated from the pcp lists.
2431 */
2432static inline bool check_pcp_refill(struct page *page, unsigned int order)
2433{
2434 return check_new_pages(page, order);
2435}
2436static inline bool check_new_pcp(struct page *page, unsigned int order)
2437{
2438 if (debug_pagealloc_enabled_static())
2439 return check_new_pages(page, order);
2440 else
2441 return false;
2442}
2443#endif /* CONFIG_DEBUG_VM */
2444
2445static inline bool should_skip_kasan_unpoison(gfp_t flags)
2446{
2447 /* Don't skip if a software KASAN mode is enabled. */
2448 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
2449 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
2450 return false;
2451
2452 /* Skip, if hardware tag-based KASAN is not enabled. */
2453 if (!kasan_hw_tags_enabled())
2454 return true;
2455
2456 /*
2457 * With hardware tag-based KASAN enabled, skip if this has been
2458 * requested via __GFP_SKIP_KASAN_UNPOISON.
2459 */
2460 return flags & __GFP_SKIP_KASAN_UNPOISON;
2461}
2462
2463static inline bool should_skip_init(gfp_t flags)
2464{
2465 /* Don't skip, if hardware tag-based KASAN is not enabled. */
2466 if (!kasan_hw_tags_enabled())
2467 return false;
2468
2469 /* For hardware tag-based KASAN, skip if requested. */
2470 return (flags & __GFP_SKIP_ZERO);
2471}
2472
2473inline void post_alloc_hook(struct page *page, unsigned int order,
2474 gfp_t gfp_flags)
2475{
2476 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
2477 !should_skip_init(gfp_flags);
2478 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
2479 int i;
2480
2481 set_page_private(page, 0);
2482 set_page_refcounted(page);
2483
2484 arch_alloc_page(page, order);
2485 debug_pagealloc_map_pages(page, 1 << order);
2486
2487 /*
2488 * Page unpoisoning must happen before memory initialization.
2489 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2490 * allocations and the page unpoisoning code will complain.
2491 */
2492 kernel_unpoison_pages(page, 1 << order);
2493
2494 /*
2495 * As memory initialization might be integrated into KASAN,
2496 * KASAN unpoisoning and memory initializion code must be
2497 * kept together to avoid discrepancies in behavior.
2498 */
2499
2500 /*
2501 * If memory tags should be zeroed (which happens only when memory
2502 * should be initialized as well).
2503 */
2504 if (init_tags) {
2505 /* Initialize both memory and tags. */
2506 for (i = 0; i != 1 << order; ++i)
2507 tag_clear_highpage(page + i);
2508
2509 /* Note that memory is already initialized by the loop above. */
2510 init = false;
2511 }
2512 if (!should_skip_kasan_unpoison(gfp_flags)) {
2513 /* Unpoison shadow memory or set memory tags. */
2514 kasan_unpoison_pages(page, order, init);
2515
2516 /* Note that memory is already initialized by KASAN. */
2517 if (kasan_has_integrated_init())
2518 init = false;
2519 } else {
2520 /* Ensure page_address() dereferencing does not fault. */
2521 for (i = 0; i != 1 << order; ++i)
2522 page_kasan_tag_reset(page + i);
2523 }
2524 /* If memory is still not initialized, do it now. */
2525 if (init)
2526 kernel_init_pages(page, 1 << order);
2527 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
2528 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
2529 SetPageSkipKASanPoison(page);
2530
2531 set_page_owner(page, order, gfp_flags);
2532 page_table_check_alloc(page, order);
2533}
2534
2535static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2536 unsigned int alloc_flags)
2537{
2538 post_alloc_hook(page, order, gfp_flags);
2539
2540 if (order && (gfp_flags & __GFP_COMP))
2541 prep_compound_page(page, order);
2542
2543 /*
2544 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2545 * allocate the page. The expectation is that the caller is taking
2546 * steps that will free more memory. The caller should avoid the page
2547 * being used for !PFMEMALLOC purposes.
2548 */
2549 if (alloc_flags & ALLOC_NO_WATERMARKS)
2550 set_page_pfmemalloc(page);
2551 else
2552 clear_page_pfmemalloc(page);
2553}
2554
2555/*
2556 * Go through the free lists for the given migratetype and remove
2557 * the smallest available page from the freelists
2558 */
2559static __always_inline
2560struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2561 int migratetype)
2562{
2563 unsigned int current_order;
2564 struct free_area *area;
2565 struct page *page;
2566
2567 /* Find a page of the appropriate size in the preferred list */
2568 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2569 area = &(zone->free_area[current_order]);
2570 page = get_page_from_free_area(area, migratetype);
2571 if (!page)
2572 continue;
2573 del_page_from_free_list(page, zone, current_order);
2574 expand(zone, page, order, current_order, migratetype);
2575 set_pcppage_migratetype(page, migratetype);
2576 trace_mm_page_alloc_zone_locked(page, order, migratetype,
2577 pcp_allowed_order(order) &&
2578 migratetype < MIGRATE_PCPTYPES);
2579 return page;
2580 }
2581
2582 return NULL;
2583}
2584
2585
2586/*
2587 * This array describes the order lists are fallen back to when
2588 * the free lists for the desirable migrate type are depleted
2589 *
2590 * The other migratetypes do not have fallbacks.
2591 */
2592static int fallbacks[MIGRATE_TYPES][3] = {
2593 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2594 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2595 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2596};
2597
2598#ifdef CONFIG_CMA
2599static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2600 unsigned int order)
2601{
2602 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2603}
2604#else
2605static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2606 unsigned int order) { return NULL; }
2607#endif
2608
2609/*
2610 * Move the free pages in a range to the freelist tail of the requested type.
2611 * Note that start_page and end_pages are not aligned on a pageblock
2612 * boundary. If alignment is required, use move_freepages_block()
2613 */
2614static int move_freepages(struct zone *zone,
2615 unsigned long start_pfn, unsigned long end_pfn,
2616 int migratetype, int *num_movable)
2617{
2618 struct page *page;
2619 unsigned long pfn;
2620 unsigned int order;
2621 int pages_moved = 0;
2622
2623 for (pfn = start_pfn; pfn <= end_pfn;) {
2624 page = pfn_to_page(pfn);
2625 if (!PageBuddy(page)) {
2626 /*
2627 * We assume that pages that could be isolated for
2628 * migration are movable. But we don't actually try
2629 * isolating, as that would be expensive.
2630 */
2631 if (num_movable &&
2632 (PageLRU(page) || __PageMovable(page)))
2633 (*num_movable)++;
2634 pfn++;
2635 continue;
2636 }
2637
2638 /* Make sure we are not inadvertently changing nodes */
2639 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2640 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2641
2642 order = buddy_order(page);
2643 move_to_free_list(page, zone, order, migratetype);
2644 pfn += 1 << order;
2645 pages_moved += 1 << order;
2646 }
2647
2648 return pages_moved;
2649}
2650
2651int move_freepages_block(struct zone *zone, struct page *page,
2652 int migratetype, int *num_movable)
2653{
2654 unsigned long start_pfn, end_pfn, pfn;
2655
2656 if (num_movable)
2657 *num_movable = 0;
2658
2659 pfn = page_to_pfn(page);
2660 start_pfn = pageblock_start_pfn(pfn);
2661 end_pfn = pageblock_end_pfn(pfn) - 1;
2662
2663 /* Do not cross zone boundaries */
2664 if (!zone_spans_pfn(zone, start_pfn))
2665 start_pfn = pfn;
2666 if (!zone_spans_pfn(zone, end_pfn))
2667 return 0;
2668
2669 return move_freepages(zone, start_pfn, end_pfn, migratetype,
2670 num_movable);
2671}
2672
2673static void change_pageblock_range(struct page *pageblock_page,
2674 int start_order, int migratetype)
2675{
2676 int nr_pageblocks = 1 << (start_order - pageblock_order);
2677
2678 while (nr_pageblocks--) {
2679 set_pageblock_migratetype(pageblock_page, migratetype);
2680 pageblock_page += pageblock_nr_pages;
2681 }
2682}
2683
2684/*
2685 * When we are falling back to another migratetype during allocation, try to
2686 * steal extra free pages from the same pageblocks to satisfy further
2687 * allocations, instead of polluting multiple pageblocks.
2688 *
2689 * If we are stealing a relatively large buddy page, it is likely there will
2690 * be more free pages in the pageblock, so try to steal them all. For
2691 * reclaimable and unmovable allocations, we steal regardless of page size,
2692 * as fragmentation caused by those allocations polluting movable pageblocks
2693 * is worse than movable allocations stealing from unmovable and reclaimable
2694 * pageblocks.
2695 */
2696static bool can_steal_fallback(unsigned int order, int start_mt)
2697{
2698 /*
2699 * Leaving this order check is intended, although there is
2700 * relaxed order check in next check. The reason is that
2701 * we can actually steal whole pageblock if this condition met,
2702 * but, below check doesn't guarantee it and that is just heuristic
2703 * so could be changed anytime.
2704 */
2705 if (order >= pageblock_order)
2706 return true;
2707
2708 if (order >= pageblock_order / 2 ||
2709 start_mt == MIGRATE_RECLAIMABLE ||
2710 start_mt == MIGRATE_UNMOVABLE ||
2711 page_group_by_mobility_disabled)
2712 return true;
2713
2714 return false;
2715}
2716
2717static inline bool boost_watermark(struct zone *zone)
2718{
2719 unsigned long max_boost;
2720
2721 if (!watermark_boost_factor)
2722 return false;
2723 /*
2724 * Don't bother in zones that are unlikely to produce results.
2725 * On small machines, including kdump capture kernels running
2726 * in a small area, boosting the watermark can cause an out of
2727 * memory situation immediately.
2728 */
2729 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2730 return false;
2731
2732 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2733 watermark_boost_factor, 10000);
2734
2735 /*
2736 * high watermark may be uninitialised if fragmentation occurs
2737 * very early in boot so do not boost. We do not fall
2738 * through and boost by pageblock_nr_pages as failing
2739 * allocations that early means that reclaim is not going
2740 * to help and it may even be impossible to reclaim the
2741 * boosted watermark resulting in a hang.
2742 */
2743 if (!max_boost)
2744 return false;
2745
2746 max_boost = max(pageblock_nr_pages, max_boost);
2747
2748 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2749 max_boost);
2750
2751 return true;
2752}
2753
2754/*
2755 * This function implements actual steal behaviour. If order is large enough,
2756 * we can steal whole pageblock. If not, we first move freepages in this
2757 * pageblock to our migratetype and determine how many already-allocated pages
2758 * are there in the pageblock with a compatible migratetype. If at least half
2759 * of pages are free or compatible, we can change migratetype of the pageblock
2760 * itself, so pages freed in the future will be put on the correct free list.
2761 */
2762static void steal_suitable_fallback(struct zone *zone, struct page *page,
2763 unsigned int alloc_flags, int start_type, bool whole_block)
2764{
2765 unsigned int current_order = buddy_order(page);
2766 int free_pages, movable_pages, alike_pages;
2767 int old_block_type;
2768
2769 old_block_type = get_pageblock_migratetype(page);
2770
2771 /*
2772 * This can happen due to races and we want to prevent broken
2773 * highatomic accounting.
2774 */
2775 if (is_migrate_highatomic(old_block_type))
2776 goto single_page;
2777
2778 /* Take ownership for orders >= pageblock_order */
2779 if (current_order >= pageblock_order) {
2780 change_pageblock_range(page, current_order, start_type);
2781 goto single_page;
2782 }
2783
2784 /*
2785 * Boost watermarks to increase reclaim pressure to reduce the
2786 * likelihood of future fallbacks. Wake kswapd now as the node
2787 * may be balanced overall and kswapd will not wake naturally.
2788 */
2789 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2790 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2791
2792 /* We are not allowed to try stealing from the whole block */
2793 if (!whole_block)
2794 goto single_page;
2795
2796 free_pages = move_freepages_block(zone, page, start_type,
2797 &movable_pages);
2798 /*
2799 * Determine how many pages are compatible with our allocation.
2800 * For movable allocation, it's the number of movable pages which
2801 * we just obtained. For other types it's a bit more tricky.
2802 */
2803 if (start_type == MIGRATE_MOVABLE) {
2804 alike_pages = movable_pages;
2805 } else {
2806 /*
2807 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2808 * to MOVABLE pageblock, consider all non-movable pages as
2809 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2810 * vice versa, be conservative since we can't distinguish the
2811 * exact migratetype of non-movable pages.
2812 */
2813 if (old_block_type == MIGRATE_MOVABLE)
2814 alike_pages = pageblock_nr_pages
2815 - (free_pages + movable_pages);
2816 else
2817 alike_pages = 0;
2818 }
2819
2820 /* moving whole block can fail due to zone boundary conditions */
2821 if (!free_pages)
2822 goto single_page;
2823
2824 /*
2825 * If a sufficient number of pages in the block are either free or of
2826 * comparable migratability as our allocation, claim the whole block.
2827 */
2828 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2829 page_group_by_mobility_disabled)
2830 set_pageblock_migratetype(page, start_type);
2831
2832 return;
2833
2834single_page:
2835 move_to_free_list(page, zone, current_order, start_type);
2836}
2837
2838/*
2839 * Check whether there is a suitable fallback freepage with requested order.
2840 * If only_stealable is true, this function returns fallback_mt only if
2841 * we can steal other freepages all together. This would help to reduce
2842 * fragmentation due to mixed migratetype pages in one pageblock.
2843 */
2844int find_suitable_fallback(struct free_area *area, unsigned int order,
2845 int migratetype, bool only_stealable, bool *can_steal)
2846{
2847 int i;
2848 int fallback_mt;
2849
2850 if (area->nr_free == 0)
2851 return -1;
2852
2853 *can_steal = false;
2854 for (i = 0;; i++) {
2855 fallback_mt = fallbacks[migratetype][i];
2856 if (fallback_mt == MIGRATE_TYPES)
2857 break;
2858
2859 if (free_area_empty(area, fallback_mt))
2860 continue;
2861
2862 if (can_steal_fallback(order, migratetype))
2863 *can_steal = true;
2864
2865 if (!only_stealable)
2866 return fallback_mt;
2867
2868 if (*can_steal)
2869 return fallback_mt;
2870 }
2871
2872 return -1;
2873}
2874
2875/*
2876 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2877 * there are no empty page blocks that contain a page with a suitable order
2878 */
2879static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2880 unsigned int alloc_order)
2881{
2882 int mt;
2883 unsigned long max_managed, flags;
2884
2885 /*
2886 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2887 * Check is race-prone but harmless.
2888 */
2889 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2890 if (zone->nr_reserved_highatomic >= max_managed)
2891 return;
2892
2893 spin_lock_irqsave(&zone->lock, flags);
2894
2895 /* Recheck the nr_reserved_highatomic limit under the lock */
2896 if (zone->nr_reserved_highatomic >= max_managed)
2897 goto out_unlock;
2898
2899 /* Yoink! */
2900 mt = get_pageblock_migratetype(page);
2901 /* Only reserve normal pageblocks (i.e., they can merge with others) */
2902 if (migratetype_is_mergeable(mt)) {
2903 zone->nr_reserved_highatomic += pageblock_nr_pages;
2904 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2905 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2906 }
2907
2908out_unlock:
2909 spin_unlock_irqrestore(&zone->lock, flags);
2910}
2911
2912/*
2913 * Used when an allocation is about to fail under memory pressure. This
2914 * potentially hurts the reliability of high-order allocations when under
2915 * intense memory pressure but failed atomic allocations should be easier
2916 * to recover from than an OOM.
2917 *
2918 * If @force is true, try to unreserve a pageblock even though highatomic
2919 * pageblock is exhausted.
2920 */
2921static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2922 bool force)
2923{
2924 struct zonelist *zonelist = ac->zonelist;
2925 unsigned long flags;
2926 struct zoneref *z;
2927 struct zone *zone;
2928 struct page *page;
2929 int order;
2930 bool ret;
2931
2932 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2933 ac->nodemask) {
2934 /*
2935 * Preserve at least one pageblock unless memory pressure
2936 * is really high.
2937 */
2938 if (!force && zone->nr_reserved_highatomic <=
2939 pageblock_nr_pages)
2940 continue;
2941
2942 spin_lock_irqsave(&zone->lock, flags);
2943 for (order = 0; order < MAX_ORDER; order++) {
2944 struct free_area *area = &(zone->free_area[order]);
2945
2946 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2947 if (!page)
2948 continue;
2949
2950 /*
2951 * In page freeing path, migratetype change is racy so
2952 * we can counter several free pages in a pageblock
2953 * in this loop although we changed the pageblock type
2954 * from highatomic to ac->migratetype. So we should
2955 * adjust the count once.
2956 */
2957 if (is_migrate_highatomic_page(page)) {
2958 /*
2959 * It should never happen but changes to
2960 * locking could inadvertently allow a per-cpu
2961 * drain to add pages to MIGRATE_HIGHATOMIC
2962 * while unreserving so be safe and watch for
2963 * underflows.
2964 */
2965 zone->nr_reserved_highatomic -= min(
2966 pageblock_nr_pages,
2967 zone->nr_reserved_highatomic);
2968 }
2969
2970 /*
2971 * Convert to ac->migratetype and avoid the normal
2972 * pageblock stealing heuristics. Minimally, the caller
2973 * is doing the work and needs the pages. More
2974 * importantly, if the block was always converted to
2975 * MIGRATE_UNMOVABLE or another type then the number
2976 * of pageblocks that cannot be completely freed
2977 * may increase.
2978 */
2979 set_pageblock_migratetype(page, ac->migratetype);
2980 ret = move_freepages_block(zone, page, ac->migratetype,
2981 NULL);
2982 if (ret) {
2983 spin_unlock_irqrestore(&zone->lock, flags);
2984 return ret;
2985 }
2986 }
2987 spin_unlock_irqrestore(&zone->lock, flags);
2988 }
2989
2990 return false;
2991}
2992
2993/*
2994 * Try finding a free buddy page on the fallback list and put it on the free
2995 * list of requested migratetype, possibly along with other pages from the same
2996 * block, depending on fragmentation avoidance heuristics. Returns true if
2997 * fallback was found so that __rmqueue_smallest() can grab it.
2998 *
2999 * The use of signed ints for order and current_order is a deliberate
3000 * deviation from the rest of this file, to make the for loop
3001 * condition simpler.
3002 */
3003static __always_inline bool
3004__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
3005 unsigned int alloc_flags)
3006{
3007 struct free_area *area;
3008 int current_order;
3009 int min_order = order;
3010 struct page *page;
3011 int fallback_mt;
3012 bool can_steal;
3013
3014 /*
3015 * Do not steal pages from freelists belonging to other pageblocks
3016 * i.e. orders < pageblock_order. If there are no local zones free,
3017 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
3018 */
3019 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
3020 min_order = pageblock_order;
3021
3022 /*
3023 * Find the largest available free page in the other list. This roughly
3024 * approximates finding the pageblock with the most free pages, which
3025 * would be too costly to do exactly.
3026 */
3027 for (current_order = MAX_ORDER - 1; current_order >= min_order;
3028 --current_order) {
3029 area = &(zone->free_area[current_order]);
3030 fallback_mt = find_suitable_fallback(area, current_order,
3031 start_migratetype, false, &can_steal);
3032 if (fallback_mt == -1)
3033 continue;
3034
3035 /*
3036 * We cannot steal all free pages from the pageblock and the
3037 * requested migratetype is movable. In that case it's better to
3038 * steal and split the smallest available page instead of the
3039 * largest available page, because even if the next movable
3040 * allocation falls back into a different pageblock than this
3041 * one, it won't cause permanent fragmentation.
3042 */
3043 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
3044 && current_order > order)
3045 goto find_smallest;
3046
3047 goto do_steal;
3048 }
3049
3050 return false;
3051
3052find_smallest:
3053 for (current_order = order; current_order < MAX_ORDER;
3054 current_order++) {
3055 area = &(zone->free_area[current_order]);
3056 fallback_mt = find_suitable_fallback(area, current_order,
3057 start_migratetype, false, &can_steal);
3058 if (fallback_mt != -1)
3059 break;
3060 }
3061
3062 /*
3063 * This should not happen - we already found a suitable fallback
3064 * when looking for the largest page.
3065 */
3066 VM_BUG_ON(current_order == MAX_ORDER);
3067
3068do_steal:
3069 page = get_page_from_free_area(area, fallback_mt);
3070
3071 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
3072 can_steal);
3073
3074 trace_mm_page_alloc_extfrag(page, order, current_order,
3075 start_migratetype, fallback_mt);
3076
3077 return true;
3078
3079}
3080
3081/*
3082 * Do the hard work of removing an element from the buddy allocator.
3083 * Call me with the zone->lock already held.
3084 */
3085static __always_inline struct page *
3086__rmqueue(struct zone *zone, unsigned int order, int migratetype,
3087 unsigned int alloc_flags)
3088{
3089 struct page *page;
3090
3091 if (IS_ENABLED(CONFIG_CMA)) {
3092 /*
3093 * Balance movable allocations between regular and CMA areas by
3094 * allocating from CMA when over half of the zone's free memory
3095 * is in the CMA area.
3096 */
3097 if (alloc_flags & ALLOC_CMA &&
3098 zone_page_state(zone, NR_FREE_CMA_PAGES) >
3099 zone_page_state(zone, NR_FREE_PAGES) / 2) {
3100 page = __rmqueue_cma_fallback(zone, order);
3101 if (page)
3102 return page;
3103 }
3104 }
3105retry:
3106 page = __rmqueue_smallest(zone, order, migratetype);
3107 if (unlikely(!page)) {
3108 if (alloc_flags & ALLOC_CMA)
3109 page = __rmqueue_cma_fallback(zone, order);
3110
3111 if (!page && __rmqueue_fallback(zone, order, migratetype,
3112 alloc_flags))
3113 goto retry;
3114 }
3115 return page;
3116}
3117
3118/*
3119 * Obtain a specified number of elements from the buddy allocator, all under
3120 * a single hold of the lock, for efficiency. Add them to the supplied list.
3121 * Returns the number of new pages which were placed at *list.
3122 */
3123static int rmqueue_bulk(struct zone *zone, unsigned int order,
3124 unsigned long count, struct list_head *list,
3125 int migratetype, unsigned int alloc_flags)
3126{
3127 int i, allocated = 0;
3128
3129 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
3130 spin_lock(&zone->lock);
3131 for (i = 0; i < count; ++i) {
3132 struct page *page = __rmqueue(zone, order, migratetype,
3133 alloc_flags);
3134 if (unlikely(page == NULL))
3135 break;
3136
3137 if (unlikely(check_pcp_refill(page, order)))
3138 continue;
3139
3140 /*
3141 * Split buddy pages returned by expand() are received here in
3142 * physical page order. The page is added to the tail of
3143 * caller's list. From the callers perspective, the linked list
3144 * is ordered by page number under some conditions. This is
3145 * useful for IO devices that can forward direction from the
3146 * head, thus also in the physical page order. This is useful
3147 * for IO devices that can merge IO requests if the physical
3148 * pages are ordered properly.
3149 */
3150 list_add_tail(&page->pcp_list, list);
3151 allocated++;
3152 if (is_migrate_cma(get_pcppage_migratetype(page)))
3153 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3154 -(1 << order));
3155 }
3156
3157 /*
3158 * i pages were removed from the buddy list even if some leak due
3159 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3160 * on i. Do not confuse with 'allocated' which is the number of
3161 * pages added to the pcp list.
3162 */
3163 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3164 spin_unlock(&zone->lock);
3165 return allocated;
3166}
3167
3168#ifdef CONFIG_NUMA
3169/*
3170 * Called from the vmstat counter updater to drain pagesets of this
3171 * currently executing processor on remote nodes after they have
3172 * expired.
3173 */
3174void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3175{
3176 int to_drain, batch;
3177
3178 batch = READ_ONCE(pcp->batch);
3179 to_drain = min(pcp->count, batch);
3180 if (to_drain > 0) {
3181 unsigned long flags;
3182
3183 /*
3184 * free_pcppages_bulk expects IRQs disabled for zone->lock
3185 * so even though pcp->lock is not intended to be IRQ-safe,
3186 * it's needed in this context.
3187 */
3188 spin_lock_irqsave(&pcp->lock, flags);
3189 free_pcppages_bulk(zone, to_drain, pcp, 0);
3190 spin_unlock_irqrestore(&pcp->lock, flags);
3191 }
3192}
3193#endif
3194
3195/*
3196 * Drain pcplists of the indicated processor and zone.
3197 */
3198static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3199{
3200 struct per_cpu_pages *pcp;
3201
3202 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3203 if (pcp->count) {
3204 unsigned long flags;
3205
3206 /* See drain_zone_pages on why this is disabling IRQs */
3207 spin_lock_irqsave(&pcp->lock, flags);
3208 free_pcppages_bulk(zone, pcp->count, pcp, 0);
3209 spin_unlock_irqrestore(&pcp->lock, flags);
3210 }
3211}
3212
3213/*
3214 * Drain pcplists of all zones on the indicated processor.
3215 */
3216static void drain_pages(unsigned int cpu)
3217{
3218 struct zone *zone;
3219
3220 for_each_populated_zone(zone) {
3221 drain_pages_zone(cpu, zone);
3222 }
3223}
3224
3225/*
3226 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3227 */
3228void drain_local_pages(struct zone *zone)
3229{
3230 int cpu = smp_processor_id();
3231
3232 if (zone)
3233 drain_pages_zone(cpu, zone);
3234 else
3235 drain_pages(cpu);
3236}
3237
3238/*
3239 * The implementation of drain_all_pages(), exposing an extra parameter to
3240 * drain on all cpus.
3241 *
3242 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3243 * not empty. The check for non-emptiness can however race with a free to
3244 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3245 * that need the guarantee that every CPU has drained can disable the
3246 * optimizing racy check.
3247 */
3248static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3249{
3250 int cpu;
3251
3252 /*
3253 * Allocate in the BSS so we won't require allocation in
3254 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3255 */
3256 static cpumask_t cpus_with_pcps;
3257
3258 /*
3259 * Do not drain if one is already in progress unless it's specific to
3260 * a zone. Such callers are primarily CMA and memory hotplug and need
3261 * the drain to be complete when the call returns.
3262 */
3263 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3264 if (!zone)
3265 return;
3266 mutex_lock(&pcpu_drain_mutex);
3267 }
3268
3269 /*
3270 * We don't care about racing with CPU hotplug event
3271 * as offline notification will cause the notified
3272 * cpu to drain that CPU pcps and on_each_cpu_mask
3273 * disables preemption as part of its processing
3274 */
3275 for_each_online_cpu(cpu) {
3276 struct per_cpu_pages *pcp;
3277 struct zone *z;
3278 bool has_pcps = false;
3279
3280 if (force_all_cpus) {
3281 /*
3282 * The pcp.count check is racy, some callers need a
3283 * guarantee that no cpu is missed.
3284 */
3285 has_pcps = true;
3286 } else if (zone) {
3287 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3288 if (pcp->count)
3289 has_pcps = true;
3290 } else {
3291 for_each_populated_zone(z) {
3292 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3293 if (pcp->count) {
3294 has_pcps = true;
3295 break;
3296 }
3297 }
3298 }
3299
3300 if (has_pcps)
3301 cpumask_set_cpu(cpu, &cpus_with_pcps);
3302 else
3303 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3304 }
3305
3306 for_each_cpu(cpu, &cpus_with_pcps) {
3307 if (zone)
3308 drain_pages_zone(cpu, zone);
3309 else
3310 drain_pages(cpu);
3311 }
3312
3313 mutex_unlock(&pcpu_drain_mutex);
3314}
3315
3316/*
3317 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3318 *
3319 * When zone parameter is non-NULL, spill just the single zone's pages.
3320 */
3321void drain_all_pages(struct zone *zone)
3322{
3323 __drain_all_pages(zone, false);
3324}
3325
3326#ifdef CONFIG_HIBERNATION
3327
3328/*
3329 * Touch the watchdog for every WD_PAGE_COUNT pages.
3330 */
3331#define WD_PAGE_COUNT (128*1024)
3332
3333void mark_free_pages(struct zone *zone)
3334{
3335 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3336 unsigned long flags;
3337 unsigned int order, t;
3338 struct page *page;
3339
3340 if (zone_is_empty(zone))
3341 return;
3342
3343 spin_lock_irqsave(&zone->lock, flags);
3344
3345 max_zone_pfn = zone_end_pfn(zone);
3346 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3347 if (pfn_valid(pfn)) {
3348 page = pfn_to_page(pfn);
3349
3350 if (!--page_count) {
3351 touch_nmi_watchdog();
3352 page_count = WD_PAGE_COUNT;
3353 }
3354
3355 if (page_zone(page) != zone)
3356 continue;
3357
3358 if (!swsusp_page_is_forbidden(page))
3359 swsusp_unset_page_free(page);
3360 }
3361
3362 for_each_migratetype_order(order, t) {
3363 list_for_each_entry(page,
3364 &zone->free_area[order].free_list[t], buddy_list) {
3365 unsigned long i;
3366
3367 pfn = page_to_pfn(page);
3368 for (i = 0; i < (1UL << order); i++) {
3369 if (!--page_count) {
3370 touch_nmi_watchdog();
3371 page_count = WD_PAGE_COUNT;
3372 }
3373 swsusp_set_page_free(pfn_to_page(pfn + i));
3374 }
3375 }
3376 }
3377 spin_unlock_irqrestore(&zone->lock, flags);
3378}
3379#endif /* CONFIG_PM */
3380
3381static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3382 unsigned int order)
3383{
3384 int migratetype;
3385
3386 if (!free_pcp_prepare(page, order))
3387 return false;
3388
3389 migratetype = get_pfnblock_migratetype(page, pfn);
3390 set_pcppage_migratetype(page, migratetype);
3391 return true;
3392}
3393
3394static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
3395 bool free_high)
3396{
3397 int min_nr_free, max_nr_free;
3398
3399 /* Free everything if batch freeing high-order pages. */
3400 if (unlikely(free_high))
3401 return pcp->count;
3402
3403 /* Check for PCP disabled or boot pageset */
3404 if (unlikely(high < batch))
3405 return 1;
3406
3407 /* Leave at least pcp->batch pages on the list */
3408 min_nr_free = batch;
3409 max_nr_free = high - batch;
3410
3411 /*
3412 * Double the number of pages freed each time there is subsequent
3413 * freeing of pages without any allocation.
3414 */
3415 batch <<= pcp->free_factor;
3416 if (batch < max_nr_free)
3417 pcp->free_factor++;
3418 batch = clamp(batch, min_nr_free, max_nr_free);
3419
3420 return batch;
3421}
3422
3423static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
3424 bool free_high)
3425{
3426 int high = READ_ONCE(pcp->high);
3427
3428 if (unlikely(!high || free_high))
3429 return 0;
3430
3431 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3432 return high;
3433
3434 /*
3435 * If reclaim is active, limit the number of pages that can be
3436 * stored on pcp lists
3437 */
3438 return min(READ_ONCE(pcp->batch) << 2, high);
3439}
3440
3441static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
3442 struct page *page, int migratetype,
3443 unsigned int order)
3444{
3445 int high;
3446 int pindex;
3447 bool free_high;
3448
3449 __count_vm_events(PGFREE, 1 << order);
3450 pindex = order_to_pindex(migratetype, order);
3451 list_add(&page->pcp_list, &pcp->lists[pindex]);
3452 pcp->count += 1 << order;
3453
3454 /*
3455 * As high-order pages other than THP's stored on PCP can contribute
3456 * to fragmentation, limit the number stored when PCP is heavily
3457 * freeing without allocation. The remainder after bulk freeing
3458 * stops will be drained from vmstat refresh context.
3459 */
3460 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
3461
3462 high = nr_pcp_high(pcp, zone, free_high);
3463 if (pcp->count >= high) {
3464 int batch = READ_ONCE(pcp->batch);
3465
3466 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
3467 }
3468}
3469
3470/*
3471 * Free a pcp page
3472 */
3473void free_unref_page(struct page *page, unsigned int order)
3474{
3475 unsigned long flags;
3476 unsigned long __maybe_unused UP_flags;
3477 struct per_cpu_pages *pcp;
3478 struct zone *zone;
3479 unsigned long pfn = page_to_pfn(page);
3480 int migratetype;
3481
3482 if (!free_unref_page_prepare(page, pfn, order))
3483 return;
3484
3485 /*
3486 * We only track unmovable, reclaimable and movable on pcp lists.
3487 * Place ISOLATE pages on the isolated list because they are being
3488 * offlined but treat HIGHATOMIC as movable pages so we can get those
3489 * areas back if necessary. Otherwise, we may have to free
3490 * excessively into the page allocator
3491 */
3492 migratetype = get_pcppage_migratetype(page);
3493 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3494 if (unlikely(is_migrate_isolate(migratetype))) {
3495 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3496 return;
3497 }
3498 migratetype = MIGRATE_MOVABLE;
3499 }
3500
3501 zone = page_zone(page);
3502 pcp_trylock_prepare(UP_flags);
3503 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3504 if (pcp) {
3505 free_unref_page_commit(zone, pcp, page, migratetype, order);
3506 pcp_spin_unlock_irqrestore(pcp, flags);
3507 } else {
3508 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
3509 }
3510 pcp_trylock_finish(UP_flags);
3511}
3512
3513/*
3514 * Free a list of 0-order pages
3515 */
3516void free_unref_page_list(struct list_head *list)
3517{
3518 struct page *page, *next;
3519 struct per_cpu_pages *pcp = NULL;
3520 struct zone *locked_zone = NULL;
3521 unsigned long flags;
3522 int batch_count = 0;
3523 int migratetype;
3524
3525 /* Prepare pages for freeing */
3526 list_for_each_entry_safe(page, next, list, lru) {
3527 unsigned long pfn = page_to_pfn(page);
3528 if (!free_unref_page_prepare(page, pfn, 0)) {
3529 list_del(&page->lru);
3530 continue;
3531 }
3532
3533 /*
3534 * Free isolated pages directly to the allocator, see
3535 * comment in free_unref_page.
3536 */
3537 migratetype = get_pcppage_migratetype(page);
3538 if (unlikely(is_migrate_isolate(migratetype))) {
3539 list_del(&page->lru);
3540 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3541 continue;
3542 }
3543 }
3544
3545 list_for_each_entry_safe(page, next, list, lru) {
3546 struct zone *zone = page_zone(page);
3547
3548 /* Different zone, different pcp lock. */
3549 if (zone != locked_zone) {
3550 if (pcp)
3551 pcp_spin_unlock_irqrestore(pcp, flags);
3552
3553 locked_zone = zone;
3554 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
3555 }
3556
3557 /*
3558 * Non-isolated types over MIGRATE_PCPTYPES get added
3559 * to the MIGRATE_MOVABLE pcp list.
3560 */
3561 migratetype = get_pcppage_migratetype(page);
3562 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3563 migratetype = MIGRATE_MOVABLE;
3564
3565 trace_mm_page_free_batched(page);
3566 free_unref_page_commit(zone, pcp, page, migratetype, 0);
3567
3568 /*
3569 * Guard against excessive IRQ disabled times when we get
3570 * a large list of pages to free.
3571 */
3572 if (++batch_count == SWAP_CLUSTER_MAX) {
3573 pcp_spin_unlock_irqrestore(pcp, flags);
3574 batch_count = 0;
3575 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
3576 }
3577 }
3578
3579 if (pcp)
3580 pcp_spin_unlock_irqrestore(pcp, flags);
3581}
3582
3583/*
3584 * split_page takes a non-compound higher-order page, and splits it into
3585 * n (1<<order) sub-pages: page[0..n]
3586 * Each sub-page must be freed individually.
3587 *
3588 * Note: this is probably too low level an operation for use in drivers.
3589 * Please consult with lkml before using this in your driver.
3590 */
3591void split_page(struct page *page, unsigned int order)
3592{
3593 int i;
3594
3595 VM_BUG_ON_PAGE(PageCompound(page), page);
3596 VM_BUG_ON_PAGE(!page_count(page), page);
3597
3598 for (i = 1; i < (1 << order); i++)
3599 set_page_refcounted(page + i);
3600 split_page_owner(page, 1 << order);
3601 split_page_memcg(page, 1 << order);
3602}
3603EXPORT_SYMBOL_GPL(split_page);
3604
3605int __isolate_free_page(struct page *page, unsigned int order)
3606{
3607 struct zone *zone = page_zone(page);
3608 int mt = get_pageblock_migratetype(page);
3609
3610 if (!is_migrate_isolate(mt)) {
3611 unsigned long watermark;
3612 /*
3613 * Obey watermarks as if the page was being allocated. We can
3614 * emulate a high-order watermark check with a raised order-0
3615 * watermark, because we already know our high-order page
3616 * exists.
3617 */
3618 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3619 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3620 return 0;
3621
3622 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3623 }
3624
3625 del_page_from_free_list(page, zone, order);
3626
3627 /*
3628 * Set the pageblock if the isolated page is at least half of a
3629 * pageblock
3630 */
3631 if (order >= pageblock_order - 1) {
3632 struct page *endpage = page + (1 << order) - 1;
3633 for (; page < endpage; page += pageblock_nr_pages) {
3634 int mt = get_pageblock_migratetype(page);
3635 /*
3636 * Only change normal pageblocks (i.e., they can merge
3637 * with others)
3638 */
3639 if (migratetype_is_mergeable(mt))
3640 set_pageblock_migratetype(page,
3641 MIGRATE_MOVABLE);
3642 }
3643 }
3644
3645 return 1UL << order;
3646}
3647
3648/**
3649 * __putback_isolated_page - Return a now-isolated page back where we got it
3650 * @page: Page that was isolated
3651 * @order: Order of the isolated page
3652 * @mt: The page's pageblock's migratetype
3653 *
3654 * This function is meant to return a page pulled from the free lists via
3655 * __isolate_free_page back to the free lists they were pulled from.
3656 */
3657void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3658{
3659 struct zone *zone = page_zone(page);
3660
3661 /* zone lock should be held when this function is called */
3662 lockdep_assert_held(&zone->lock);
3663
3664 /* Return isolated page to tail of freelist. */
3665 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3666 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3667}
3668
3669/*
3670 * Update NUMA hit/miss statistics
3671 */
3672static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3673 long nr_account)
3674{
3675#ifdef CONFIG_NUMA
3676 enum numa_stat_item local_stat = NUMA_LOCAL;
3677
3678 /* skip numa counters update if numa stats is disabled */
3679 if (!static_branch_likely(&vm_numa_stat_key))
3680 return;
3681
3682 if (zone_to_nid(z) != numa_node_id())
3683 local_stat = NUMA_OTHER;
3684
3685 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3686 __count_numa_events(z, NUMA_HIT, nr_account);
3687 else {
3688 __count_numa_events(z, NUMA_MISS, nr_account);
3689 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3690 }
3691 __count_numa_events(z, local_stat, nr_account);
3692#endif
3693}
3694
3695static __always_inline
3696struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3697 unsigned int order, unsigned int alloc_flags,
3698 int migratetype)
3699{
3700 struct page *page;
3701 unsigned long flags;
3702
3703 do {
3704 page = NULL;
3705 spin_lock_irqsave(&zone->lock, flags);
3706 /*
3707 * order-0 request can reach here when the pcplist is skipped
3708 * due to non-CMA allocation context. HIGHATOMIC area is
3709 * reserved for high-order atomic allocation, so order-0
3710 * request should skip it.
3711 */
3712 if (order > 0 && alloc_flags & ALLOC_HARDER)
3713 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3714 if (!page) {
3715 page = __rmqueue(zone, order, migratetype, alloc_flags);
3716 if (!page) {
3717 spin_unlock_irqrestore(&zone->lock, flags);
3718 return NULL;
3719 }
3720 }
3721 __mod_zone_freepage_state(zone, -(1 << order),
3722 get_pcppage_migratetype(page));
3723 spin_unlock_irqrestore(&zone->lock, flags);
3724 } while (check_new_pages(page, order));
3725
3726 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3727 zone_statistics(preferred_zone, zone, 1);
3728
3729 return page;
3730}
3731
3732/* Remove page from the per-cpu list, caller must protect the list */
3733static inline
3734struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3735 int migratetype,
3736 unsigned int alloc_flags,
3737 struct per_cpu_pages *pcp,
3738 struct list_head *list)
3739{
3740 struct page *page;
3741
3742 do {
3743 if (list_empty(list)) {
3744 int batch = READ_ONCE(pcp->batch);
3745 int alloced;
3746
3747 /*
3748 * Scale batch relative to order if batch implies
3749 * free pages can be stored on the PCP. Batch can
3750 * be 1 for small zones or for boot pagesets which
3751 * should never store free pages as the pages may
3752 * belong to arbitrary zones.
3753 */
3754 if (batch > 1)
3755 batch = max(batch >> order, 2);
3756 alloced = rmqueue_bulk(zone, order,
3757 batch, list,
3758 migratetype, alloc_flags);
3759
3760 pcp->count += alloced << order;
3761 if (unlikely(list_empty(list)))
3762 return NULL;
3763 }
3764
3765 page = list_first_entry(list, struct page, pcp_list);
3766 list_del(&page->pcp_list);
3767 pcp->count -= 1 << order;
3768 } while (check_new_pcp(page, order));
3769
3770 return page;
3771}
3772
3773/* Lock and remove page from the per-cpu list */
3774static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3775 struct zone *zone, unsigned int order,
3776 int migratetype, unsigned int alloc_flags)
3777{
3778 struct per_cpu_pages *pcp;
3779 struct list_head *list;
3780 struct page *page;
3781 unsigned long flags;
3782 unsigned long __maybe_unused UP_flags;
3783
3784 /*
3785 * spin_trylock may fail due to a parallel drain. In the future, the
3786 * trylock will also protect against IRQ reentrancy.
3787 */
3788 pcp_trylock_prepare(UP_flags);
3789 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3790 if (!pcp) {
3791 pcp_trylock_finish(UP_flags);
3792 return NULL;
3793 }
3794
3795 /*
3796 * On allocation, reduce the number of pages that are batch freed.
3797 * See nr_pcp_free() where free_factor is increased for subsequent
3798 * frees.
3799 */
3800 pcp->free_factor >>= 1;
3801 list = &pcp->lists[order_to_pindex(migratetype, order)];
3802 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3803 pcp_spin_unlock_irqrestore(pcp, flags);
3804 pcp_trylock_finish(UP_flags);
3805 if (page) {
3806 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3807 zone_statistics(preferred_zone, zone, 1);
3808 }
3809 return page;
3810}
3811
3812/*
3813 * Allocate a page from the given zone.
3814 * Use pcplists for THP or "cheap" high-order allocations.
3815 */
3816
3817/*
3818 * Do not instrument rmqueue() with KMSAN. This function may call
3819 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
3820 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3821 * may call rmqueue() again, which will result in a deadlock.
3822 */
3823__no_sanitize_memory
3824static inline
3825struct page *rmqueue(struct zone *preferred_zone,
3826 struct zone *zone, unsigned int order,
3827 gfp_t gfp_flags, unsigned int alloc_flags,
3828 int migratetype)
3829{
3830 struct page *page;
3831
3832 /*
3833 * We most definitely don't want callers attempting to
3834 * allocate greater than order-1 page units with __GFP_NOFAIL.
3835 */
3836 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3837
3838 if (likely(pcp_allowed_order(order))) {
3839 /*
3840 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3841 * we need to skip it when CMA area isn't allowed.
3842 */
3843 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3844 migratetype != MIGRATE_MOVABLE) {
3845 page = rmqueue_pcplist(preferred_zone, zone, order,
3846 migratetype, alloc_flags);
3847 if (likely(page))
3848 goto out;
3849 }
3850 }
3851
3852 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3853 migratetype);
3854
3855out:
3856 /* Separate test+clear to avoid unnecessary atomics */
3857 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3858 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3859 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3860 }
3861
3862 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3863 return page;
3864}
3865
3866#ifdef CONFIG_FAIL_PAGE_ALLOC
3867
3868static struct {
3869 struct fault_attr attr;
3870
3871 bool ignore_gfp_highmem;
3872 bool ignore_gfp_reclaim;
3873 u32 min_order;
3874} fail_page_alloc = {
3875 .attr = FAULT_ATTR_INITIALIZER,
3876 .ignore_gfp_reclaim = true,
3877 .ignore_gfp_highmem = true,
3878 .min_order = 1,
3879};
3880
3881static int __init setup_fail_page_alloc(char *str)
3882{
3883 return setup_fault_attr(&fail_page_alloc.attr, str);
3884}
3885__setup("fail_page_alloc=", setup_fail_page_alloc);
3886
3887static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3888{
3889 if (order < fail_page_alloc.min_order)
3890 return false;
3891 if (gfp_mask & __GFP_NOFAIL)
3892 return false;
3893 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3894 return false;
3895 if (fail_page_alloc.ignore_gfp_reclaim &&
3896 (gfp_mask & __GFP_DIRECT_RECLAIM))
3897 return false;
3898
3899 if (gfp_mask & __GFP_NOWARN)
3900 fail_page_alloc.attr.no_warn = true;
3901
3902 return should_fail(&fail_page_alloc.attr, 1 << order);
3903}
3904
3905#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3906
3907static int __init fail_page_alloc_debugfs(void)
3908{
3909 umode_t mode = S_IFREG | 0600;
3910 struct dentry *dir;
3911
3912 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3913 &fail_page_alloc.attr);
3914
3915 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3916 &fail_page_alloc.ignore_gfp_reclaim);
3917 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3918 &fail_page_alloc.ignore_gfp_highmem);
3919 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3920
3921 return 0;
3922}
3923
3924late_initcall(fail_page_alloc_debugfs);
3925
3926#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3927
3928#else /* CONFIG_FAIL_PAGE_ALLOC */
3929
3930static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3931{
3932 return false;
3933}
3934
3935#endif /* CONFIG_FAIL_PAGE_ALLOC */
3936
3937noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3938{
3939 return __should_fail_alloc_page(gfp_mask, order);
3940}
3941ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3942
3943static inline long __zone_watermark_unusable_free(struct zone *z,
3944 unsigned int order, unsigned int alloc_flags)
3945{
3946 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3947 long unusable_free = (1 << order) - 1;
3948
3949 /*
3950 * If the caller does not have rights to ALLOC_HARDER then subtract
3951 * the high-atomic reserves. This will over-estimate the size of the
3952 * atomic reserve but it avoids a search.
3953 */
3954 if (likely(!alloc_harder))
3955 unusable_free += z->nr_reserved_highatomic;
3956
3957#ifdef CONFIG_CMA
3958 /* If allocation can't use CMA areas don't use free CMA pages */
3959 if (!(alloc_flags & ALLOC_CMA))
3960 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3961#endif
3962
3963 return unusable_free;
3964}
3965
3966/*
3967 * Return true if free base pages are above 'mark'. For high-order checks it
3968 * will return true of the order-0 watermark is reached and there is at least
3969 * one free page of a suitable size. Checking now avoids taking the zone lock
3970 * to check in the allocation paths if no pages are free.
3971 */
3972bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3973 int highest_zoneidx, unsigned int alloc_flags,
3974 long free_pages)
3975{
3976 long min = mark;
3977 int o;
3978 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3979
3980 /* free_pages may go negative - that's OK */
3981 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3982
3983 if (alloc_flags & ALLOC_HIGH)
3984 min -= min / 2;
3985
3986 if (unlikely(alloc_harder)) {
3987 /*
3988 * OOM victims can try even harder than normal ALLOC_HARDER
3989 * users on the grounds that it's definitely going to be in
3990 * the exit path shortly and free memory. Any allocation it
3991 * makes during the free path will be small and short-lived.
3992 */
3993 if (alloc_flags & ALLOC_OOM)
3994 min -= min / 2;
3995 else
3996 min -= min / 4;
3997 }
3998
3999 /*
4000 * Check watermarks for an order-0 allocation request. If these
4001 * are not met, then a high-order request also cannot go ahead
4002 * even if a suitable page happened to be free.
4003 */
4004 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
4005 return false;
4006
4007 /* If this is an order-0 request then the watermark is fine */
4008 if (!order)
4009 return true;
4010
4011 /* For a high-order request, check at least one suitable page is free */
4012 for (o = order; o < MAX_ORDER; o++) {
4013 struct free_area *area = &z->free_area[o];
4014 int mt;
4015
4016 if (!area->nr_free)
4017 continue;
4018
4019 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
4020 if (!free_area_empty(area, mt))
4021 return true;
4022 }
4023
4024#ifdef CONFIG_CMA
4025 if ((alloc_flags & ALLOC_CMA) &&
4026 !free_area_empty(area, MIGRATE_CMA)) {
4027 return true;
4028 }
4029#endif
4030 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
4031 return true;
4032 }
4033 return false;
4034}
4035
4036bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
4037 int highest_zoneidx, unsigned int alloc_flags)
4038{
4039 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4040 zone_page_state(z, NR_FREE_PAGES));
4041}
4042
4043static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
4044 unsigned long mark, int highest_zoneidx,
4045 unsigned int alloc_flags, gfp_t gfp_mask)
4046{
4047 long free_pages;
4048
4049 free_pages = zone_page_state(z, NR_FREE_PAGES);
4050
4051 /*
4052 * Fast check for order-0 only. If this fails then the reserves
4053 * need to be calculated.
4054 */
4055 if (!order) {
4056 long usable_free;
4057 long reserved;
4058
4059 usable_free = free_pages;
4060 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
4061
4062 /* reserved may over estimate high-atomic reserves. */
4063 usable_free -= min(usable_free, reserved);
4064 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
4065 return true;
4066 }
4067
4068 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4069 free_pages))
4070 return true;
4071 /*
4072 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
4073 * when checking the min watermark. The min watermark is the
4074 * point where boosting is ignored so that kswapd is woken up
4075 * when below the low watermark.
4076 */
4077 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
4078 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
4079 mark = z->_watermark[WMARK_MIN];
4080 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
4081 alloc_flags, free_pages);
4082 }
4083
4084 return false;
4085}
4086
4087bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
4088 unsigned long mark, int highest_zoneidx)
4089{
4090 long free_pages = zone_page_state(z, NR_FREE_PAGES);
4091
4092 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
4093 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
4094
4095 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
4096 free_pages);
4097}
4098
4099#ifdef CONFIG_NUMA
4100int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
4101
4102static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4103{
4104 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
4105 node_reclaim_distance;
4106}
4107#else /* CONFIG_NUMA */
4108static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4109{
4110 return true;
4111}
4112#endif /* CONFIG_NUMA */
4113
4114/*
4115 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
4116 * fragmentation is subtle. If the preferred zone was HIGHMEM then
4117 * premature use of a lower zone may cause lowmem pressure problems that
4118 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
4119 * probably too small. It only makes sense to spread allocations to avoid
4120 * fragmentation between the Normal and DMA32 zones.
4121 */
4122static inline unsigned int
4123alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
4124{
4125 unsigned int alloc_flags;
4126
4127 /*
4128 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4129 * to save a branch.
4130 */
4131 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4132
4133#ifdef CONFIG_ZONE_DMA32
4134 if (!zone)
4135 return alloc_flags;
4136
4137 if (zone_idx(zone) != ZONE_NORMAL)
4138 return alloc_flags;
4139
4140 /*
4141 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4142 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4143 * on UMA that if Normal is populated then so is DMA32.
4144 */
4145 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4146 if (nr_online_nodes > 1 && !populated_zone(--zone))
4147 return alloc_flags;
4148
4149 alloc_flags |= ALLOC_NOFRAGMENT;
4150#endif /* CONFIG_ZONE_DMA32 */
4151 return alloc_flags;
4152}
4153
4154/* Must be called after current_gfp_context() which can change gfp_mask */
4155static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4156 unsigned int alloc_flags)
4157{
4158#ifdef CONFIG_CMA
4159 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4160 alloc_flags |= ALLOC_CMA;
4161#endif
4162 return alloc_flags;
4163}
4164
4165/*
4166 * get_page_from_freelist goes through the zonelist trying to allocate
4167 * a page.
4168 */
4169static struct page *
4170get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4171 const struct alloc_context *ac)
4172{
4173 struct zoneref *z;
4174 struct zone *zone;
4175 struct pglist_data *last_pgdat = NULL;
4176 bool last_pgdat_dirty_ok = false;
4177 bool no_fallback;
4178
4179retry:
4180 /*
4181 * Scan zonelist, looking for a zone with enough free.
4182 * See also __cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
4183 */
4184 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4185 z = ac->preferred_zoneref;
4186 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4187 ac->nodemask) {
4188 struct page *page;
4189 unsigned long mark;
4190
4191 if (cpusets_enabled() &&
4192 (alloc_flags & ALLOC_CPUSET) &&
4193 !__cpuset_zone_allowed(zone, gfp_mask))
4194 continue;
4195 /*
4196 * When allocating a page cache page for writing, we
4197 * want to get it from a node that is within its dirty
4198 * limit, such that no single node holds more than its
4199 * proportional share of globally allowed dirty pages.
4200 * The dirty limits take into account the node's
4201 * lowmem reserves and high watermark so that kswapd
4202 * should be able to balance it without having to
4203 * write pages from its LRU list.
4204 *
4205 * XXX: For now, allow allocations to potentially
4206 * exceed the per-node dirty limit in the slowpath
4207 * (spread_dirty_pages unset) before going into reclaim,
4208 * which is important when on a NUMA setup the allowed
4209 * nodes are together not big enough to reach the
4210 * global limit. The proper fix for these situations
4211 * will require awareness of nodes in the
4212 * dirty-throttling and the flusher threads.
4213 */
4214 if (ac->spread_dirty_pages) {
4215 if (last_pgdat != zone->zone_pgdat) {
4216 last_pgdat = zone->zone_pgdat;
4217 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
4218 }
4219
4220 if (!last_pgdat_dirty_ok)
4221 continue;
4222 }
4223
4224 if (no_fallback && nr_online_nodes > 1 &&
4225 zone != ac->preferred_zoneref->zone) {
4226 int local_nid;
4227
4228 /*
4229 * If moving to a remote node, retry but allow
4230 * fragmenting fallbacks. Locality is more important
4231 * than fragmentation avoidance.
4232 */
4233 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4234 if (zone_to_nid(zone) != local_nid) {
4235 alloc_flags &= ~ALLOC_NOFRAGMENT;
4236 goto retry;
4237 }
4238 }
4239
4240 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4241 if (!zone_watermark_fast(zone, order, mark,
4242 ac->highest_zoneidx, alloc_flags,
4243 gfp_mask)) {
4244 int ret;
4245
4246#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4247 /*
4248 * Watermark failed for this zone, but see if we can
4249 * grow this zone if it contains deferred pages.
4250 */
4251 if (static_branch_unlikely(&deferred_pages)) {
4252 if (_deferred_grow_zone(zone, order))
4253 goto try_this_zone;
4254 }
4255#endif
4256 /* Checked here to keep the fast path fast */
4257 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4258 if (alloc_flags & ALLOC_NO_WATERMARKS)
4259 goto try_this_zone;
4260
4261 if (!node_reclaim_enabled() ||
4262 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4263 continue;
4264
4265 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4266 switch (ret) {
4267 case NODE_RECLAIM_NOSCAN:
4268 /* did not scan */
4269 continue;
4270 case NODE_RECLAIM_FULL:
4271 /* scanned but unreclaimable */
4272 continue;
4273 default:
4274 /* did we reclaim enough */
4275 if (zone_watermark_ok(zone, order, mark,
4276 ac->highest_zoneidx, alloc_flags))
4277 goto try_this_zone;
4278
4279 continue;
4280 }
4281 }
4282
4283try_this_zone:
4284 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4285 gfp_mask, alloc_flags, ac->migratetype);
4286 if (page) {
4287 prep_new_page(page, order, gfp_mask, alloc_flags);
4288
4289 /*
4290 * If this is a high-order atomic allocation then check
4291 * if the pageblock should be reserved for the future
4292 */
4293 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4294 reserve_highatomic_pageblock(page, zone, order);
4295
4296 return page;
4297 } else {
4298#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4299 /* Try again if zone has deferred pages */
4300 if (static_branch_unlikely(&deferred_pages)) {
4301 if (_deferred_grow_zone(zone, order))
4302 goto try_this_zone;
4303 }
4304#endif
4305 }
4306 }
4307
4308 /*
4309 * It's possible on a UMA machine to get through all zones that are
4310 * fragmented. If avoiding fragmentation, reset and try again.
4311 */
4312 if (no_fallback) {
4313 alloc_flags &= ~ALLOC_NOFRAGMENT;
4314 goto retry;
4315 }
4316
4317 return NULL;
4318}
4319
4320static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4321{
4322 unsigned int filter = SHOW_MEM_FILTER_NODES;
4323
4324 /*
4325 * This documents exceptions given to allocations in certain
4326 * contexts that are allowed to allocate outside current's set
4327 * of allowed nodes.
4328 */
4329 if (!(gfp_mask & __GFP_NOMEMALLOC))
4330 if (tsk_is_oom_victim(current) ||
4331 (current->flags & (PF_MEMALLOC | PF_EXITING)))
4332 filter &= ~SHOW_MEM_FILTER_NODES;
4333 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4334 filter &= ~SHOW_MEM_FILTER_NODES;
4335
4336 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
4337}
4338
4339void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4340{
4341 struct va_format vaf;
4342 va_list args;
4343 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4344
4345 if ((gfp_mask & __GFP_NOWARN) ||
4346 !__ratelimit(&nopage_rs) ||
4347 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4348 return;
4349
4350 va_start(args, fmt);
4351 vaf.fmt = fmt;
4352 vaf.va = &args;
4353 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4354 current->comm, &vaf, gfp_mask, &gfp_mask,
4355 nodemask_pr_args(nodemask));
4356 va_end(args);
4357
4358 cpuset_print_current_mems_allowed();
4359 pr_cont("\n");
4360 dump_stack();
4361 warn_alloc_show_mem(gfp_mask, nodemask);
4362}
4363
4364static inline struct page *
4365__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4366 unsigned int alloc_flags,
4367 const struct alloc_context *ac)
4368{
4369 struct page *page;
4370
4371 page = get_page_from_freelist(gfp_mask, order,
4372 alloc_flags|ALLOC_CPUSET, ac);
4373 /*
4374 * fallback to ignore cpuset restriction if our nodes
4375 * are depleted
4376 */
4377 if (!page)
4378 page = get_page_from_freelist(gfp_mask, order,
4379 alloc_flags, ac);
4380
4381 return page;
4382}
4383
4384static inline struct page *
4385__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4386 const struct alloc_context *ac, unsigned long *did_some_progress)
4387{
4388 struct oom_control oc = {
4389 .zonelist = ac->zonelist,
4390 .nodemask = ac->nodemask,
4391 .memcg = NULL,
4392 .gfp_mask = gfp_mask,
4393 .order = order,
4394 };
4395 struct page *page;
4396
4397 *did_some_progress = 0;
4398
4399 /*
4400 * Acquire the oom lock. If that fails, somebody else is
4401 * making progress for us.
4402 */
4403 if (!mutex_trylock(&oom_lock)) {
4404 *did_some_progress = 1;
4405 schedule_timeout_uninterruptible(1);
4406 return NULL;
4407 }
4408
4409 /*
4410 * Go through the zonelist yet one more time, keep very high watermark
4411 * here, this is only to catch a parallel oom killing, we must fail if
4412 * we're still under heavy pressure. But make sure that this reclaim
4413 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4414 * allocation which will never fail due to oom_lock already held.
4415 */
4416 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4417 ~__GFP_DIRECT_RECLAIM, order,
4418 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4419 if (page)
4420 goto out;
4421
4422 /* Coredumps can quickly deplete all memory reserves */
4423 if (current->flags & PF_DUMPCORE)
4424 goto out;
4425 /* The OOM killer will not help higher order allocs */
4426 if (order > PAGE_ALLOC_COSTLY_ORDER)
4427 goto out;
4428 /*
4429 * We have already exhausted all our reclaim opportunities without any
4430 * success so it is time to admit defeat. We will skip the OOM killer
4431 * because it is very likely that the caller has a more reasonable
4432 * fallback than shooting a random task.
4433 *
4434 * The OOM killer may not free memory on a specific node.
4435 */
4436 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4437 goto out;
4438 /* The OOM killer does not needlessly kill tasks for lowmem */
4439 if (ac->highest_zoneidx < ZONE_NORMAL)
4440 goto out;
4441 if (pm_suspended_storage())
4442 goto out;
4443 /*
4444 * XXX: GFP_NOFS allocations should rather fail than rely on
4445 * other request to make a forward progress.
4446 * We are in an unfortunate situation where out_of_memory cannot
4447 * do much for this context but let's try it to at least get
4448 * access to memory reserved if the current task is killed (see
4449 * out_of_memory). Once filesystems are ready to handle allocation
4450 * failures more gracefully we should just bail out here.
4451 */
4452
4453 /* Exhausted what can be done so it's blame time */
4454 if (out_of_memory(&oc) ||
4455 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4456 *did_some_progress = 1;
4457
4458 /*
4459 * Help non-failing allocations by giving them access to memory
4460 * reserves
4461 */
4462 if (gfp_mask & __GFP_NOFAIL)
4463 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4464 ALLOC_NO_WATERMARKS, ac);
4465 }
4466out:
4467 mutex_unlock(&oom_lock);
4468 return page;
4469}
4470
4471/*
4472 * Maximum number of compaction retries with a progress before OOM
4473 * killer is consider as the only way to move forward.
4474 */
4475#define MAX_COMPACT_RETRIES 16
4476
4477#ifdef CONFIG_COMPACTION
4478/* Try memory compaction for high-order allocations before reclaim */
4479static struct page *
4480__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4481 unsigned int alloc_flags, const struct alloc_context *ac,
4482 enum compact_priority prio, enum compact_result *compact_result)
4483{
4484 struct page *page = NULL;
4485 unsigned long pflags;
4486 unsigned int noreclaim_flag;
4487
4488 if (!order)
4489 return NULL;
4490
4491 psi_memstall_enter(&pflags);
4492 delayacct_compact_start();
4493 noreclaim_flag = memalloc_noreclaim_save();
4494
4495 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4496 prio, &page);
4497
4498 memalloc_noreclaim_restore(noreclaim_flag);
4499 psi_memstall_leave(&pflags);
4500 delayacct_compact_end();
4501
4502 if (*compact_result == COMPACT_SKIPPED)
4503 return NULL;
4504 /*
4505 * At least in one zone compaction wasn't deferred or skipped, so let's
4506 * count a compaction stall
4507 */
4508 count_vm_event(COMPACTSTALL);
4509
4510 /* Prep a captured page if available */
4511 if (page)
4512 prep_new_page(page, order, gfp_mask, alloc_flags);
4513
4514 /* Try get a page from the freelist if available */
4515 if (!page)
4516 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4517
4518 if (page) {
4519 struct zone *zone = page_zone(page);
4520
4521 zone->compact_blockskip_flush = false;
4522 compaction_defer_reset(zone, order, true);
4523 count_vm_event(COMPACTSUCCESS);
4524 return page;
4525 }
4526
4527 /*
4528 * It's bad if compaction run occurs and fails. The most likely reason
4529 * is that pages exist, but not enough to satisfy watermarks.
4530 */
4531 count_vm_event(COMPACTFAIL);
4532
4533 cond_resched();
4534
4535 return NULL;
4536}
4537
4538static inline bool
4539should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4540 enum compact_result compact_result,
4541 enum compact_priority *compact_priority,
4542 int *compaction_retries)
4543{
4544 int max_retries = MAX_COMPACT_RETRIES;
4545 int min_priority;
4546 bool ret = false;
4547 int retries = *compaction_retries;
4548 enum compact_priority priority = *compact_priority;
4549
4550 if (!order)
4551 return false;
4552
4553 if (fatal_signal_pending(current))
4554 return false;
4555
4556 if (compaction_made_progress(compact_result))
4557 (*compaction_retries)++;
4558
4559 /*
4560 * compaction considers all the zone as desperately out of memory
4561 * so it doesn't really make much sense to retry except when the
4562 * failure could be caused by insufficient priority
4563 */
4564 if (compaction_failed(compact_result))
4565 goto check_priority;
4566
4567 /*
4568 * compaction was skipped because there are not enough order-0 pages
4569 * to work with, so we retry only if it looks like reclaim can help.
4570 */
4571 if (compaction_needs_reclaim(compact_result)) {
4572 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4573 goto out;
4574 }
4575
4576 /*
4577 * make sure the compaction wasn't deferred or didn't bail out early
4578 * due to locks contention before we declare that we should give up.
4579 * But the next retry should use a higher priority if allowed, so
4580 * we don't just keep bailing out endlessly.
4581 */
4582 if (compaction_withdrawn(compact_result)) {
4583 goto check_priority;
4584 }
4585
4586 /*
4587 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4588 * costly ones because they are de facto nofail and invoke OOM
4589 * killer to move on while costly can fail and users are ready
4590 * to cope with that. 1/4 retries is rather arbitrary but we
4591 * would need much more detailed feedback from compaction to
4592 * make a better decision.
4593 */
4594 if (order > PAGE_ALLOC_COSTLY_ORDER)
4595 max_retries /= 4;
4596 if (*compaction_retries <= max_retries) {
4597 ret = true;
4598 goto out;
4599 }
4600
4601 /*
4602 * Make sure there are attempts at the highest priority if we exhausted
4603 * all retries or failed at the lower priorities.
4604 */
4605check_priority:
4606 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4607 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4608
4609 if (*compact_priority > min_priority) {
4610 (*compact_priority)--;
4611 *compaction_retries = 0;
4612 ret = true;
4613 }
4614out:
4615 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4616 return ret;
4617}
4618#else
4619static inline struct page *
4620__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4621 unsigned int alloc_flags, const struct alloc_context *ac,
4622 enum compact_priority prio, enum compact_result *compact_result)
4623{
4624 *compact_result = COMPACT_SKIPPED;
4625 return NULL;
4626}
4627
4628static inline bool
4629should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4630 enum compact_result compact_result,
4631 enum compact_priority *compact_priority,
4632 int *compaction_retries)
4633{
4634 struct zone *zone;
4635 struct zoneref *z;
4636
4637 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4638 return false;
4639
4640 /*
4641 * There are setups with compaction disabled which would prefer to loop
4642 * inside the allocator rather than hit the oom killer prematurely.
4643 * Let's give them a good hope and keep retrying while the order-0
4644 * watermarks are OK.
4645 */
4646 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4647 ac->highest_zoneidx, ac->nodemask) {
4648 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4649 ac->highest_zoneidx, alloc_flags))
4650 return true;
4651 }
4652 return false;
4653}
4654#endif /* CONFIG_COMPACTION */
4655
4656#ifdef CONFIG_LOCKDEP
4657static struct lockdep_map __fs_reclaim_map =
4658 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4659
4660static bool __need_reclaim(gfp_t gfp_mask)
4661{
4662 /* no reclaim without waiting on it */
4663 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4664 return false;
4665
4666 /* this guy won't enter reclaim */
4667 if (current->flags & PF_MEMALLOC)
4668 return false;
4669
4670 if (gfp_mask & __GFP_NOLOCKDEP)
4671 return false;
4672
4673 return true;
4674}
4675
4676void __fs_reclaim_acquire(unsigned long ip)
4677{
4678 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4679}
4680
4681void __fs_reclaim_release(unsigned long ip)
4682{
4683 lock_release(&__fs_reclaim_map, ip);
4684}
4685
4686void fs_reclaim_acquire(gfp_t gfp_mask)
4687{
4688 gfp_mask = current_gfp_context(gfp_mask);
4689
4690 if (__need_reclaim(gfp_mask)) {
4691 if (gfp_mask & __GFP_FS)
4692 __fs_reclaim_acquire(_RET_IP_);
4693
4694#ifdef CONFIG_MMU_NOTIFIER
4695 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4696 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4697#endif
4698
4699 }
4700}
4701EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4702
4703void fs_reclaim_release(gfp_t gfp_mask)
4704{
4705 gfp_mask = current_gfp_context(gfp_mask);
4706
4707 if (__need_reclaim(gfp_mask)) {
4708 if (gfp_mask & __GFP_FS)
4709 __fs_reclaim_release(_RET_IP_);
4710 }
4711}
4712EXPORT_SYMBOL_GPL(fs_reclaim_release);
4713#endif
4714
4715/*
4716 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4717 * have been rebuilt so allocation retries. Reader side does not lock and
4718 * retries the allocation if zonelist changes. Writer side is protected by the
4719 * embedded spin_lock.
4720 */
4721static DEFINE_SEQLOCK(zonelist_update_seq);
4722
4723static unsigned int zonelist_iter_begin(void)
4724{
4725 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4726 return read_seqbegin(&zonelist_update_seq);
4727
4728 return 0;
4729}
4730
4731static unsigned int check_retry_zonelist(unsigned int seq)
4732{
4733 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4734 return read_seqretry(&zonelist_update_seq, seq);
4735
4736 return seq;
4737}
4738
4739/* Perform direct synchronous page reclaim */
4740static unsigned long
4741__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4742 const struct alloc_context *ac)
4743{
4744 unsigned int noreclaim_flag;
4745 unsigned long progress;
4746
4747 cond_resched();
4748
4749 /* We now go into synchronous reclaim */
4750 cpuset_memory_pressure_bump();
4751 fs_reclaim_acquire(gfp_mask);
4752 noreclaim_flag = memalloc_noreclaim_save();
4753
4754 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4755 ac->nodemask);
4756
4757 memalloc_noreclaim_restore(noreclaim_flag);
4758 fs_reclaim_release(gfp_mask);
4759
4760 cond_resched();
4761
4762 return progress;
4763}
4764
4765/* The really slow allocator path where we enter direct reclaim */
4766static inline struct page *
4767__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4768 unsigned int alloc_flags, const struct alloc_context *ac,
4769 unsigned long *did_some_progress)
4770{
4771 struct page *page = NULL;
4772 unsigned long pflags;
4773 bool drained = false;
4774
4775 psi_memstall_enter(&pflags);
4776 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4777 if (unlikely(!(*did_some_progress)))
4778 goto out;
4779
4780retry:
4781 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4782
4783 /*
4784 * If an allocation failed after direct reclaim, it could be because
4785 * pages are pinned on the per-cpu lists or in high alloc reserves.
4786 * Shrink them and try again
4787 */
4788 if (!page && !drained) {
4789 unreserve_highatomic_pageblock(ac, false);
4790 drain_all_pages(NULL);
4791 drained = true;
4792 goto retry;
4793 }
4794out:
4795 psi_memstall_leave(&pflags);
4796
4797 return page;
4798}
4799
4800static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4801 const struct alloc_context *ac)
4802{
4803 struct zoneref *z;
4804 struct zone *zone;
4805 pg_data_t *last_pgdat = NULL;
4806 enum zone_type highest_zoneidx = ac->highest_zoneidx;
4807
4808 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4809 ac->nodemask) {
4810 if (!managed_zone(zone))
4811 continue;
4812 if (last_pgdat != zone->zone_pgdat) {
4813 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4814 last_pgdat = zone->zone_pgdat;
4815 }
4816 }
4817}
4818
4819static inline unsigned int
4820gfp_to_alloc_flags(gfp_t gfp_mask)
4821{
4822 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4823
4824 /*
4825 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4826 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4827 * to save two branches.
4828 */
4829 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4830 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4831
4832 /*
4833 * The caller may dip into page reserves a bit more if the caller
4834 * cannot run direct reclaim, or if the caller has realtime scheduling
4835 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
4836 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4837 */
4838 alloc_flags |= (__force int)
4839 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4840
4841 if (gfp_mask & __GFP_ATOMIC) {
4842 /*
4843 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4844 * if it can't schedule.
4845 */
4846 if (!(gfp_mask & __GFP_NOMEMALLOC))
4847 alloc_flags |= ALLOC_HARDER;
4848 /*
4849 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4850 * comment for __cpuset_node_allowed().
4851 */
4852 alloc_flags &= ~ALLOC_CPUSET;
4853 } else if (unlikely(rt_task(current)) && in_task())
4854 alloc_flags |= ALLOC_HARDER;
4855
4856 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4857
4858 return alloc_flags;
4859}
4860
4861static bool oom_reserves_allowed(struct task_struct *tsk)
4862{
4863 if (!tsk_is_oom_victim(tsk))
4864 return false;
4865
4866 /*
4867 * !MMU doesn't have oom reaper so give access to memory reserves
4868 * only to the thread with TIF_MEMDIE set
4869 */
4870 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4871 return false;
4872
4873 return true;
4874}
4875
4876/*
4877 * Distinguish requests which really need access to full memory
4878 * reserves from oom victims which can live with a portion of it
4879 */
4880static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4881{
4882 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4883 return 0;
4884 if (gfp_mask & __GFP_MEMALLOC)
4885 return ALLOC_NO_WATERMARKS;
4886 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4887 return ALLOC_NO_WATERMARKS;
4888 if (!in_interrupt()) {
4889 if (current->flags & PF_MEMALLOC)
4890 return ALLOC_NO_WATERMARKS;
4891 else if (oom_reserves_allowed(current))
4892 return ALLOC_OOM;
4893 }
4894
4895 return 0;
4896}
4897
4898bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4899{
4900 return !!__gfp_pfmemalloc_flags(gfp_mask);
4901}
4902
4903/*
4904 * Checks whether it makes sense to retry the reclaim to make a forward progress
4905 * for the given allocation request.
4906 *
4907 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4908 * without success, or when we couldn't even meet the watermark if we
4909 * reclaimed all remaining pages on the LRU lists.
4910 *
4911 * Returns true if a retry is viable or false to enter the oom path.
4912 */
4913static inline bool
4914should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4915 struct alloc_context *ac, int alloc_flags,
4916 bool did_some_progress, int *no_progress_loops)
4917{
4918 struct zone *zone;
4919 struct zoneref *z;
4920 bool ret = false;
4921
4922 /*
4923 * Costly allocations might have made a progress but this doesn't mean
4924 * their order will become available due to high fragmentation so
4925 * always increment the no progress counter for them
4926 */
4927 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4928 *no_progress_loops = 0;
4929 else
4930 (*no_progress_loops)++;
4931
4932 /*
4933 * Make sure we converge to OOM if we cannot make any progress
4934 * several times in the row.
4935 */
4936 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4937 /* Before OOM, exhaust highatomic_reserve */
4938 return unreserve_highatomic_pageblock(ac, true);
4939 }
4940
4941 /*
4942 * Keep reclaiming pages while there is a chance this will lead
4943 * somewhere. If none of the target zones can satisfy our allocation
4944 * request even if all reclaimable pages are considered then we are
4945 * screwed and have to go OOM.
4946 */
4947 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4948 ac->highest_zoneidx, ac->nodemask) {
4949 unsigned long available;
4950 unsigned long reclaimable;
4951 unsigned long min_wmark = min_wmark_pages(zone);
4952 bool wmark;
4953
4954 available = reclaimable = zone_reclaimable_pages(zone);
4955 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4956
4957 /*
4958 * Would the allocation succeed if we reclaimed all
4959 * reclaimable pages?
4960 */
4961 wmark = __zone_watermark_ok(zone, order, min_wmark,
4962 ac->highest_zoneidx, alloc_flags, available);
4963 trace_reclaim_retry_zone(z, order, reclaimable,
4964 available, min_wmark, *no_progress_loops, wmark);
4965 if (wmark) {
4966 ret = true;
4967 break;
4968 }
4969 }
4970
4971 /*
4972 * Memory allocation/reclaim might be called from a WQ context and the
4973 * current implementation of the WQ concurrency control doesn't
4974 * recognize that a particular WQ is congested if the worker thread is
4975 * looping without ever sleeping. Therefore we have to do a short sleep
4976 * here rather than calling cond_resched().
4977 */
4978 if (current->flags & PF_WQ_WORKER)
4979 schedule_timeout_uninterruptible(1);
4980 else
4981 cond_resched();
4982 return ret;
4983}
4984
4985static inline bool
4986check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4987{
4988 /*
4989 * It's possible that cpuset's mems_allowed and the nodemask from
4990 * mempolicy don't intersect. This should be normally dealt with by
4991 * policy_nodemask(), but it's possible to race with cpuset update in
4992 * such a way the check therein was true, and then it became false
4993 * before we got our cpuset_mems_cookie here.
4994 * This assumes that for all allocations, ac->nodemask can come only
4995 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4996 * when it does not intersect with the cpuset restrictions) or the
4997 * caller can deal with a violated nodemask.
4998 */
4999 if (cpusets_enabled() && ac->nodemask &&
5000 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
5001 ac->nodemask = NULL;
5002 return true;
5003 }
5004
5005 /*
5006 * When updating a task's mems_allowed or mempolicy nodemask, it is
5007 * possible to race with parallel threads in such a way that our
5008 * allocation can fail while the mask is being updated. If we are about
5009 * to fail, check if the cpuset changed during allocation and if so,
5010 * retry.
5011 */
5012 if (read_mems_allowed_retry(cpuset_mems_cookie))
5013 return true;
5014
5015 return false;
5016}
5017
5018static inline struct page *
5019__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5020 struct alloc_context *ac)
5021{
5022 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
5023 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
5024 struct page *page = NULL;
5025 unsigned int alloc_flags;
5026 unsigned long did_some_progress;
5027 enum compact_priority compact_priority;
5028 enum compact_result compact_result;
5029 int compaction_retries;
5030 int no_progress_loops;
5031 unsigned int cpuset_mems_cookie;
5032 unsigned int zonelist_iter_cookie;
5033 int reserve_flags;
5034
5035 /*
5036 * We also sanity check to catch abuse of atomic reserves being used by
5037 * callers that are not in atomic context.
5038 */
5039 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
5040 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
5041 gfp_mask &= ~__GFP_ATOMIC;
5042
5043restart:
5044 compaction_retries = 0;
5045 no_progress_loops = 0;
5046 compact_priority = DEF_COMPACT_PRIORITY;
5047 cpuset_mems_cookie = read_mems_allowed_begin();
5048 zonelist_iter_cookie = zonelist_iter_begin();
5049
5050 /*
5051 * The fast path uses conservative alloc_flags to succeed only until
5052 * kswapd needs to be woken up, and to avoid the cost of setting up
5053 * alloc_flags precisely. So we do that now.
5054 */
5055 alloc_flags = gfp_to_alloc_flags(gfp_mask);
5056
5057 /*
5058 * We need to recalculate the starting point for the zonelist iterator
5059 * because we might have used different nodemask in the fast path, or
5060 * there was a cpuset modification and we are retrying - otherwise we
5061 * could end up iterating over non-eligible zones endlessly.
5062 */
5063 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5064 ac->highest_zoneidx, ac->nodemask);
5065 if (!ac->preferred_zoneref->zone)
5066 goto nopage;
5067
5068 /*
5069 * Check for insane configurations where the cpuset doesn't contain
5070 * any suitable zone to satisfy the request - e.g. non-movable
5071 * GFP_HIGHUSER allocations from MOVABLE nodes only.
5072 */
5073 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
5074 struct zoneref *z = first_zones_zonelist(ac->zonelist,
5075 ac->highest_zoneidx,
5076 &cpuset_current_mems_allowed);
5077 if (!z->zone)
5078 goto nopage;
5079 }
5080
5081 if (alloc_flags & ALLOC_KSWAPD)
5082 wake_all_kswapds(order, gfp_mask, ac);
5083
5084 /*
5085 * The adjusted alloc_flags might result in immediate success, so try
5086 * that first
5087 */
5088 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5089 if (page)
5090 goto got_pg;
5091
5092 /*
5093 * For costly allocations, try direct compaction first, as it's likely
5094 * that we have enough base pages and don't need to reclaim. For non-
5095 * movable high-order allocations, do that as well, as compaction will
5096 * try prevent permanent fragmentation by migrating from blocks of the
5097 * same migratetype.
5098 * Don't try this for allocations that are allowed to ignore
5099 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
5100 */
5101 if (can_direct_reclaim &&
5102 (costly_order ||
5103 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
5104 && !gfp_pfmemalloc_allowed(gfp_mask)) {
5105 page = __alloc_pages_direct_compact(gfp_mask, order,
5106 alloc_flags, ac,
5107 INIT_COMPACT_PRIORITY,
5108 &compact_result);
5109 if (page)
5110 goto got_pg;
5111
5112 /*
5113 * Checks for costly allocations with __GFP_NORETRY, which
5114 * includes some THP page fault allocations
5115 */
5116 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
5117 /*
5118 * If allocating entire pageblock(s) and compaction
5119 * failed because all zones are below low watermarks
5120 * or is prohibited because it recently failed at this
5121 * order, fail immediately unless the allocator has
5122 * requested compaction and reclaim retry.
5123 *
5124 * Reclaim is
5125 * - potentially very expensive because zones are far
5126 * below their low watermarks or this is part of very
5127 * bursty high order allocations,
5128 * - not guaranteed to help because isolate_freepages()
5129 * may not iterate over freed pages as part of its
5130 * linear scan, and
5131 * - unlikely to make entire pageblocks free on its
5132 * own.
5133 */
5134 if (compact_result == COMPACT_SKIPPED ||
5135 compact_result == COMPACT_DEFERRED)
5136 goto nopage;
5137
5138 /*
5139 * Looks like reclaim/compaction is worth trying, but
5140 * sync compaction could be very expensive, so keep
5141 * using async compaction.
5142 */
5143 compact_priority = INIT_COMPACT_PRIORITY;
5144 }
5145 }
5146
5147retry:
5148 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
5149 if (alloc_flags & ALLOC_KSWAPD)
5150 wake_all_kswapds(order, gfp_mask, ac);
5151
5152 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
5153 if (reserve_flags)
5154 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
5155 (alloc_flags & ALLOC_KSWAPD);
5156
5157 /*
5158 * Reset the nodemask and zonelist iterators if memory policies can be
5159 * ignored. These allocations are high priority and system rather than
5160 * user oriented.
5161 */
5162 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
5163 ac->nodemask = NULL;
5164 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5165 ac->highest_zoneidx, ac->nodemask);
5166 }
5167
5168 /* Attempt with potentially adjusted zonelist and alloc_flags */
5169 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5170 if (page)
5171 goto got_pg;
5172
5173 /* Caller is not willing to reclaim, we can't balance anything */
5174 if (!can_direct_reclaim)
5175 goto nopage;
5176
5177 /* Avoid recursion of direct reclaim */
5178 if (current->flags & PF_MEMALLOC)
5179 goto nopage;
5180
5181 /* Try direct reclaim and then allocating */
5182 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5183 &did_some_progress);
5184 if (page)
5185 goto got_pg;
5186
5187 /* Try direct compaction and then allocating */
5188 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5189 compact_priority, &compact_result);
5190 if (page)
5191 goto got_pg;
5192
5193 /* Do not loop if specifically requested */
5194 if (gfp_mask & __GFP_NORETRY)
5195 goto nopage;
5196
5197 /*
5198 * Do not retry costly high order allocations unless they are
5199 * __GFP_RETRY_MAYFAIL
5200 */
5201 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5202 goto nopage;
5203
5204 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5205 did_some_progress > 0, &no_progress_loops))
5206 goto retry;
5207
5208 /*
5209 * It doesn't make any sense to retry for the compaction if the order-0
5210 * reclaim is not able to make any progress because the current
5211 * implementation of the compaction depends on the sufficient amount
5212 * of free memory (see __compaction_suitable)
5213 */
5214 if (did_some_progress > 0 &&
5215 should_compact_retry(ac, order, alloc_flags,
5216 compact_result, &compact_priority,
5217 &compaction_retries))
5218 goto retry;
5219
5220
5221 /*
5222 * Deal with possible cpuset update races or zonelist updates to avoid
5223 * a unnecessary OOM kill.
5224 */
5225 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5226 check_retry_zonelist(zonelist_iter_cookie))
5227 goto restart;
5228
5229 /* Reclaim has failed us, start killing things */
5230 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5231 if (page)
5232 goto got_pg;
5233
5234 /* Avoid allocations with no watermarks from looping endlessly */
5235 if (tsk_is_oom_victim(current) &&
5236 (alloc_flags & ALLOC_OOM ||
5237 (gfp_mask & __GFP_NOMEMALLOC)))
5238 goto nopage;
5239
5240 /* Retry as long as the OOM killer is making progress */
5241 if (did_some_progress) {
5242 no_progress_loops = 0;
5243 goto retry;
5244 }
5245
5246nopage:
5247 /*
5248 * Deal with possible cpuset update races or zonelist updates to avoid
5249 * a unnecessary OOM kill.
5250 */
5251 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5252 check_retry_zonelist(zonelist_iter_cookie))
5253 goto restart;
5254
5255 /*
5256 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5257 * we always retry
5258 */
5259 if (gfp_mask & __GFP_NOFAIL) {
5260 /*
5261 * All existing users of the __GFP_NOFAIL are blockable, so warn
5262 * of any new users that actually require GFP_NOWAIT
5263 */
5264 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
5265 goto fail;
5266
5267 /*
5268 * PF_MEMALLOC request from this context is rather bizarre
5269 * because we cannot reclaim anything and only can loop waiting
5270 * for somebody to do a work for us
5271 */
5272 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
5273
5274 /*
5275 * non failing costly orders are a hard requirement which we
5276 * are not prepared for much so let's warn about these users
5277 * so that we can identify them and convert them to something
5278 * else.
5279 */
5280 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
5281
5282 /*
5283 * Help non-failing allocations by giving them access to memory
5284 * reserves but do not use ALLOC_NO_WATERMARKS because this
5285 * could deplete whole memory reserves which would just make
5286 * the situation worse
5287 */
5288 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5289 if (page)
5290 goto got_pg;
5291
5292 cond_resched();
5293 goto retry;
5294 }
5295fail:
5296 warn_alloc(gfp_mask, ac->nodemask,
5297 "page allocation failure: order:%u", order);
5298got_pg:
5299 return page;
5300}
5301
5302static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5303 int preferred_nid, nodemask_t *nodemask,
5304 struct alloc_context *ac, gfp_t *alloc_gfp,
5305 unsigned int *alloc_flags)
5306{
5307 ac->highest_zoneidx = gfp_zone(gfp_mask);
5308 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5309 ac->nodemask = nodemask;
5310 ac->migratetype = gfp_migratetype(gfp_mask);
5311
5312 if (cpusets_enabled()) {
5313 *alloc_gfp |= __GFP_HARDWALL;
5314 /*
5315 * When we are in the interrupt context, it is irrelevant
5316 * to the current task context. It means that any node ok.
5317 */
5318 if (in_task() && !ac->nodemask)
5319 ac->nodemask = &cpuset_current_mems_allowed;
5320 else
5321 *alloc_flags |= ALLOC_CPUSET;
5322 }
5323
5324 might_alloc(gfp_mask);
5325
5326 if (should_fail_alloc_page(gfp_mask, order))
5327 return false;
5328
5329 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5330
5331 /* Dirty zone balancing only done in the fast path */
5332 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5333
5334 /*
5335 * The preferred zone is used for statistics but crucially it is
5336 * also used as the starting point for the zonelist iterator. It
5337 * may get reset for allocations that ignore memory policies.
5338 */
5339 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5340 ac->highest_zoneidx, ac->nodemask);
5341
5342 return true;
5343}
5344
5345/*
5346 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5347 * @gfp: GFP flags for the allocation
5348 * @preferred_nid: The preferred NUMA node ID to allocate from
5349 * @nodemask: Set of nodes to allocate from, may be NULL
5350 * @nr_pages: The number of pages desired on the list or array
5351 * @page_list: Optional list to store the allocated pages
5352 * @page_array: Optional array to store the pages
5353 *
5354 * This is a batched version of the page allocator that attempts to
5355 * allocate nr_pages quickly. Pages are added to page_list if page_list
5356 * is not NULL, otherwise it is assumed that the page_array is valid.
5357 *
5358 * For lists, nr_pages is the number of pages that should be allocated.
5359 *
5360 * For arrays, only NULL elements are populated with pages and nr_pages
5361 * is the maximum number of pages that will be stored in the array.
5362 *
5363 * Returns the number of pages on the list or array.
5364 */
5365unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5366 nodemask_t *nodemask, int nr_pages,
5367 struct list_head *page_list,
5368 struct page **page_array)
5369{
5370 struct page *page;
5371 unsigned long flags;
5372 unsigned long __maybe_unused UP_flags;
5373 struct zone *zone;
5374 struct zoneref *z;
5375 struct per_cpu_pages *pcp;
5376 struct list_head *pcp_list;
5377 struct alloc_context ac;
5378 gfp_t alloc_gfp;
5379 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5380 int nr_populated = 0, nr_account = 0;
5381
5382 /*
5383 * Skip populated array elements to determine if any pages need
5384 * to be allocated before disabling IRQs.
5385 */
5386 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5387 nr_populated++;
5388
5389 /* No pages requested? */
5390 if (unlikely(nr_pages <= 0))
5391 goto out;
5392
5393 /* Already populated array? */
5394 if (unlikely(page_array && nr_pages - nr_populated == 0))
5395 goto out;
5396
5397 /* Bulk allocator does not support memcg accounting. */
5398 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5399 goto failed;
5400
5401 /* Use the single page allocator for one page. */
5402 if (nr_pages - nr_populated == 1)
5403 goto failed;
5404
5405#ifdef CONFIG_PAGE_OWNER
5406 /*
5407 * PAGE_OWNER may recurse into the allocator to allocate space to
5408 * save the stack with pagesets.lock held. Releasing/reacquiring
5409 * removes much of the performance benefit of bulk allocation so
5410 * force the caller to allocate one page at a time as it'll have
5411 * similar performance to added complexity to the bulk allocator.
5412 */
5413 if (static_branch_unlikely(&page_owner_inited))
5414 goto failed;
5415#endif
5416
5417 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5418 gfp &= gfp_allowed_mask;
5419 alloc_gfp = gfp;
5420 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5421 goto out;
5422 gfp = alloc_gfp;
5423
5424 /* Find an allowed local zone that meets the low watermark. */
5425 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5426 unsigned long mark;
5427
5428 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5429 !__cpuset_zone_allowed(zone, gfp)) {
5430 continue;
5431 }
5432
5433 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5434 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5435 goto failed;
5436 }
5437
5438 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5439 if (zone_watermark_fast(zone, 0, mark,
5440 zonelist_zone_idx(ac.preferred_zoneref),
5441 alloc_flags, gfp)) {
5442 break;
5443 }
5444 }
5445
5446 /*
5447 * If there are no allowed local zones that meets the watermarks then
5448 * try to allocate a single page and reclaim if necessary.
5449 */
5450 if (unlikely(!zone))
5451 goto failed;
5452
5453 /* Is a parallel drain in progress? */
5454 pcp_trylock_prepare(UP_flags);
5455 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
5456 if (!pcp)
5457 goto failed_irq;
5458
5459 /* Attempt the batch allocation */
5460 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5461 while (nr_populated < nr_pages) {
5462
5463 /* Skip existing pages */
5464 if (page_array && page_array[nr_populated]) {
5465 nr_populated++;
5466 continue;
5467 }
5468
5469 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5470 pcp, pcp_list);
5471 if (unlikely(!page)) {
5472 /* Try and allocate at least one page */
5473 if (!nr_account) {
5474 pcp_spin_unlock_irqrestore(pcp, flags);
5475 goto failed_irq;
5476 }
5477 break;
5478 }
5479 nr_account++;
5480
5481 prep_new_page(page, 0, gfp, 0);
5482 if (page_list)
5483 list_add(&page->lru, page_list);
5484 else
5485 page_array[nr_populated] = page;
5486 nr_populated++;
5487 }
5488
5489 pcp_spin_unlock_irqrestore(pcp, flags);
5490 pcp_trylock_finish(UP_flags);
5491
5492 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5493 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5494
5495out:
5496 return nr_populated;
5497
5498failed_irq:
5499 pcp_trylock_finish(UP_flags);
5500
5501failed:
5502 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5503 if (page) {
5504 if (page_list)
5505 list_add(&page->lru, page_list);
5506 else
5507 page_array[nr_populated] = page;
5508 nr_populated++;
5509 }
5510
5511 goto out;
5512}
5513EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5514
5515/*
5516 * This is the 'heart' of the zoned buddy allocator.
5517 */
5518struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5519 nodemask_t *nodemask)
5520{
5521 struct page *page;
5522 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5523 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5524 struct alloc_context ac = { };
5525
5526 /*
5527 * There are several places where we assume that the order value is sane
5528 * so bail out early if the request is out of bound.
5529 */
5530 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
5531 return NULL;
5532
5533 gfp &= gfp_allowed_mask;
5534 /*
5535 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5536 * resp. GFP_NOIO which has to be inherited for all allocation requests
5537 * from a particular context which has been marked by
5538 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5539 * movable zones are not used during allocation.
5540 */
5541 gfp = current_gfp_context(gfp);
5542 alloc_gfp = gfp;
5543 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5544 &alloc_gfp, &alloc_flags))
5545 return NULL;
5546
5547 /*
5548 * Forbid the first pass from falling back to types that fragment
5549 * memory until all local zones are considered.
5550 */
5551 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5552
5553 /* First allocation attempt */
5554 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5555 if (likely(page))
5556 goto out;
5557
5558 alloc_gfp = gfp;
5559 ac.spread_dirty_pages = false;
5560
5561 /*
5562 * Restore the original nodemask if it was potentially replaced with
5563 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5564 */
5565 ac.nodemask = nodemask;
5566
5567 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5568
5569out:
5570 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5571 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5572 __free_pages(page, order);
5573 page = NULL;
5574 }
5575
5576 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5577 kmsan_alloc_page(page, order, alloc_gfp);
5578
5579 return page;
5580}
5581EXPORT_SYMBOL(__alloc_pages);
5582
5583struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5584 nodemask_t *nodemask)
5585{
5586 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5587 preferred_nid, nodemask);
5588
5589 if (page && order > 1)
5590 prep_transhuge_page(page);
5591 return (struct folio *)page;
5592}
5593EXPORT_SYMBOL(__folio_alloc);
5594
5595/*
5596 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5597 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5598 * you need to access high mem.
5599 */
5600unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5601{
5602 struct page *page;
5603
5604 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5605 if (!page)
5606 return 0;
5607 return (unsigned long) page_address(page);
5608}
5609EXPORT_SYMBOL(__get_free_pages);
5610
5611unsigned long get_zeroed_page(gfp_t gfp_mask)
5612{
5613 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5614}
5615EXPORT_SYMBOL(get_zeroed_page);
5616
5617/**
5618 * __free_pages - Free pages allocated with alloc_pages().
5619 * @page: The page pointer returned from alloc_pages().
5620 * @order: The order of the allocation.
5621 *
5622 * This function can free multi-page allocations that are not compound
5623 * pages. It does not check that the @order passed in matches that of
5624 * the allocation, so it is easy to leak memory. Freeing more memory
5625 * than was allocated will probably emit a warning.
5626 *
5627 * If the last reference to this page is speculative, it will be released
5628 * by put_page() which only frees the first page of a non-compound
5629 * allocation. To prevent the remaining pages from being leaked, we free
5630 * the subsequent pages here. If you want to use the page's reference
5631 * count to decide when to free the allocation, you should allocate a
5632 * compound page, and use put_page() instead of __free_pages().
5633 *
5634 * Context: May be called in interrupt context or while holding a normal
5635 * spinlock, but not in NMI context or while holding a raw spinlock.
5636 */
5637void __free_pages(struct page *page, unsigned int order)
5638{
5639 if (put_page_testzero(page))
5640 free_the_page(page, order);
5641 else if (!PageHead(page))
5642 while (order-- > 0)
5643 free_the_page(page + (1 << order), order);
5644}
5645EXPORT_SYMBOL(__free_pages);
5646
5647void free_pages(unsigned long addr, unsigned int order)
5648{
5649 if (addr != 0) {
5650 VM_BUG_ON(!virt_addr_valid((void *)addr));
5651 __free_pages(virt_to_page((void *)addr), order);
5652 }
5653}
5654
5655EXPORT_SYMBOL(free_pages);
5656
5657/*
5658 * Page Fragment:
5659 * An arbitrary-length arbitrary-offset area of memory which resides
5660 * within a 0 or higher order page. Multiple fragments within that page
5661 * are individually refcounted, in the page's reference counter.
5662 *
5663 * The page_frag functions below provide a simple allocation framework for
5664 * page fragments. This is used by the network stack and network device
5665 * drivers to provide a backing region of memory for use as either an
5666 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5667 */
5668static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5669 gfp_t gfp_mask)
5670{
5671 struct page *page = NULL;
5672 gfp_t gfp = gfp_mask;
5673
5674#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5675 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5676 __GFP_NOMEMALLOC;
5677 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5678 PAGE_FRAG_CACHE_MAX_ORDER);
5679 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5680#endif
5681 if (unlikely(!page))
5682 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5683
5684 nc->va = page ? page_address(page) : NULL;
5685
5686 return page;
5687}
5688
5689void __page_frag_cache_drain(struct page *page, unsigned int count)
5690{
5691 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5692
5693 if (page_ref_sub_and_test(page, count))
5694 free_the_page(page, compound_order(page));
5695}
5696EXPORT_SYMBOL(__page_frag_cache_drain);
5697
5698void *page_frag_alloc_align(struct page_frag_cache *nc,
5699 unsigned int fragsz, gfp_t gfp_mask,
5700 unsigned int align_mask)
5701{
5702 unsigned int size = PAGE_SIZE;
5703 struct page *page;
5704 int offset;
5705
5706 if (unlikely(!nc->va)) {
5707refill:
5708 page = __page_frag_cache_refill(nc, gfp_mask);
5709 if (!page)
5710 return NULL;
5711
5712#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5713 /* if size can vary use size else just use PAGE_SIZE */
5714 size = nc->size;
5715#endif
5716 /* Even if we own the page, we do not use atomic_set().
5717 * This would break get_page_unless_zero() users.
5718 */
5719 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5720
5721 /* reset page count bias and offset to start of new frag */
5722 nc->pfmemalloc = page_is_pfmemalloc(page);
5723 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5724 nc->offset = size;
5725 }
5726
5727 offset = nc->offset - fragsz;
5728 if (unlikely(offset < 0)) {
5729 page = virt_to_page(nc->va);
5730
5731 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5732 goto refill;
5733
5734 if (unlikely(nc->pfmemalloc)) {
5735 free_the_page(page, compound_order(page));
5736 goto refill;
5737 }
5738
5739#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5740 /* if size can vary use size else just use PAGE_SIZE */
5741 size = nc->size;
5742#endif
5743 /* OK, page count is 0, we can safely set it */
5744 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5745
5746 /* reset page count bias and offset to start of new frag */
5747 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5748 offset = size - fragsz;
5749 if (unlikely(offset < 0)) {
5750 /*
5751 * The caller is trying to allocate a fragment
5752 * with fragsz > PAGE_SIZE but the cache isn't big
5753 * enough to satisfy the request, this may
5754 * happen in low memory conditions.
5755 * We don't release the cache page because
5756 * it could make memory pressure worse
5757 * so we simply return NULL here.
5758 */
5759 return NULL;
5760 }
5761 }
5762
5763 nc->pagecnt_bias--;
5764 offset &= align_mask;
5765 nc->offset = offset;
5766
5767 return nc->va + offset;
5768}
5769EXPORT_SYMBOL(page_frag_alloc_align);
5770
5771/*
5772 * Frees a page fragment allocated out of either a compound or order 0 page.
5773 */
5774void page_frag_free(void *addr)
5775{
5776 struct page *page = virt_to_head_page(addr);
5777
5778 if (unlikely(put_page_testzero(page)))
5779 free_the_page(page, compound_order(page));
5780}
5781EXPORT_SYMBOL(page_frag_free);
5782
5783static void *make_alloc_exact(unsigned long addr, unsigned int order,
5784 size_t size)
5785{
5786 if (addr) {
5787 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5788 unsigned long used = addr + PAGE_ALIGN(size);
5789
5790 split_page(virt_to_page((void *)addr), order);
5791 while (used < alloc_end) {
5792 free_page(used);
5793 used += PAGE_SIZE;
5794 }
5795 }
5796 return (void *)addr;
5797}
5798
5799/**
5800 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5801 * @size: the number of bytes to allocate
5802 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5803 *
5804 * This function is similar to alloc_pages(), except that it allocates the
5805 * minimum number of pages to satisfy the request. alloc_pages() can only
5806 * allocate memory in power-of-two pages.
5807 *
5808 * This function is also limited by MAX_ORDER.
5809 *
5810 * Memory allocated by this function must be released by free_pages_exact().
5811 *
5812 * Return: pointer to the allocated area or %NULL in case of error.
5813 */
5814void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5815{
5816 unsigned int order = get_order(size);
5817 unsigned long addr;
5818
5819 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5820 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5821
5822 addr = __get_free_pages(gfp_mask, order);
5823 return make_alloc_exact(addr, order, size);
5824}
5825EXPORT_SYMBOL(alloc_pages_exact);
5826
5827/**
5828 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5829 * pages on a node.
5830 * @nid: the preferred node ID where memory should be allocated
5831 * @size: the number of bytes to allocate
5832 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5833 *
5834 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5835 * back.
5836 *
5837 * Return: pointer to the allocated area or %NULL in case of error.
5838 */
5839void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5840{
5841 unsigned int order = get_order(size);
5842 struct page *p;
5843
5844 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5845 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5846
5847 p = alloc_pages_node(nid, gfp_mask, order);
5848 if (!p)
5849 return NULL;
5850 return make_alloc_exact((unsigned long)page_address(p), order, size);
5851}
5852
5853/**
5854 * free_pages_exact - release memory allocated via alloc_pages_exact()
5855 * @virt: the value returned by alloc_pages_exact.
5856 * @size: size of allocation, same value as passed to alloc_pages_exact().
5857 *
5858 * Release the memory allocated by a previous call to alloc_pages_exact.
5859 */
5860void free_pages_exact(void *virt, size_t size)
5861{
5862 unsigned long addr = (unsigned long)virt;
5863 unsigned long end = addr + PAGE_ALIGN(size);
5864
5865 while (addr < end) {
5866 free_page(addr);
5867 addr += PAGE_SIZE;
5868 }
5869}
5870EXPORT_SYMBOL(free_pages_exact);
5871
5872/**
5873 * nr_free_zone_pages - count number of pages beyond high watermark
5874 * @offset: The zone index of the highest zone
5875 *
5876 * nr_free_zone_pages() counts the number of pages which are beyond the
5877 * high watermark within all zones at or below a given zone index. For each
5878 * zone, the number of pages is calculated as:
5879 *
5880 * nr_free_zone_pages = managed_pages - high_pages
5881 *
5882 * Return: number of pages beyond high watermark.
5883 */
5884static unsigned long nr_free_zone_pages(int offset)
5885{
5886 struct zoneref *z;
5887 struct zone *zone;
5888
5889 /* Just pick one node, since fallback list is circular */
5890 unsigned long sum = 0;
5891
5892 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5893
5894 for_each_zone_zonelist(zone, z, zonelist, offset) {
5895 unsigned long size = zone_managed_pages(zone);
5896 unsigned long high = high_wmark_pages(zone);
5897 if (size > high)
5898 sum += size - high;
5899 }
5900
5901 return sum;
5902}
5903
5904/**
5905 * nr_free_buffer_pages - count number of pages beyond high watermark
5906 *
5907 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5908 * watermark within ZONE_DMA and ZONE_NORMAL.
5909 *
5910 * Return: number of pages beyond high watermark within ZONE_DMA and
5911 * ZONE_NORMAL.
5912 */
5913unsigned long nr_free_buffer_pages(void)
5914{
5915 return nr_free_zone_pages(gfp_zone(GFP_USER));
5916}
5917EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5918
5919static inline void show_node(struct zone *zone)
5920{
5921 if (IS_ENABLED(CONFIG_NUMA))
5922 printk("Node %d ", zone_to_nid(zone));
5923}
5924
5925long si_mem_available(void)
5926{
5927 long available;
5928 unsigned long pagecache;
5929 unsigned long wmark_low = 0;
5930 unsigned long pages[NR_LRU_LISTS];
5931 unsigned long reclaimable;
5932 struct zone *zone;
5933 int lru;
5934
5935 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5936 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5937
5938 for_each_zone(zone)
5939 wmark_low += low_wmark_pages(zone);
5940
5941 /*
5942 * Estimate the amount of memory available for userspace allocations,
5943 * without causing swapping or OOM.
5944 */
5945 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5946
5947 /*
5948 * Not all the page cache can be freed, otherwise the system will
5949 * start swapping or thrashing. Assume at least half of the page
5950 * cache, or the low watermark worth of cache, needs to stay.
5951 */
5952 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5953 pagecache -= min(pagecache / 2, wmark_low);
5954 available += pagecache;
5955
5956 /*
5957 * Part of the reclaimable slab and other kernel memory consists of
5958 * items that are in use, and cannot be freed. Cap this estimate at the
5959 * low watermark.
5960 */
5961 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5962 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5963 available += reclaimable - min(reclaimable / 2, wmark_low);
5964
5965 if (available < 0)
5966 available = 0;
5967 return available;
5968}
5969EXPORT_SYMBOL_GPL(si_mem_available);
5970
5971void si_meminfo(struct sysinfo *val)
5972{
5973 val->totalram = totalram_pages();
5974 val->sharedram = global_node_page_state(NR_SHMEM);
5975 val->freeram = global_zone_page_state(NR_FREE_PAGES);
5976 val->bufferram = nr_blockdev_pages();
5977 val->totalhigh = totalhigh_pages();
5978 val->freehigh = nr_free_highpages();
5979 val->mem_unit = PAGE_SIZE;
5980}
5981
5982EXPORT_SYMBOL(si_meminfo);
5983
5984#ifdef CONFIG_NUMA
5985void si_meminfo_node(struct sysinfo *val, int nid)
5986{
5987 int zone_type; /* needs to be signed */
5988 unsigned long managed_pages = 0;
5989 unsigned long managed_highpages = 0;
5990 unsigned long free_highpages = 0;
5991 pg_data_t *pgdat = NODE_DATA(nid);
5992
5993 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5994 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5995 val->totalram = managed_pages;
5996 val->sharedram = node_page_state(pgdat, NR_SHMEM);
5997 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5998#ifdef CONFIG_HIGHMEM
5999 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
6000 struct zone *zone = &pgdat->node_zones[zone_type];
6001
6002 if (is_highmem(zone)) {
6003 managed_highpages += zone_managed_pages(zone);
6004 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
6005 }
6006 }
6007 val->totalhigh = managed_highpages;
6008 val->freehigh = free_highpages;
6009#else
6010 val->totalhigh = managed_highpages;
6011 val->freehigh = free_highpages;
6012#endif
6013 val->mem_unit = PAGE_SIZE;
6014}
6015#endif
6016
6017/*
6018 * Determine whether the node should be displayed or not, depending on whether
6019 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
6020 */
6021static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
6022{
6023 if (!(flags & SHOW_MEM_FILTER_NODES))
6024 return false;
6025
6026 /*
6027 * no node mask - aka implicit memory numa policy. Do not bother with
6028 * the synchronization - read_mems_allowed_begin - because we do not
6029 * have to be precise here.
6030 */
6031 if (!nodemask)
6032 nodemask = &cpuset_current_mems_allowed;
6033
6034 return !node_isset(nid, *nodemask);
6035}
6036
6037#define K(x) ((x) << (PAGE_SHIFT-10))
6038
6039static void show_migration_types(unsigned char type)
6040{
6041 static const char types[MIGRATE_TYPES] = {
6042 [MIGRATE_UNMOVABLE] = 'U',
6043 [MIGRATE_MOVABLE] = 'M',
6044 [MIGRATE_RECLAIMABLE] = 'E',
6045 [MIGRATE_HIGHATOMIC] = 'H',
6046#ifdef CONFIG_CMA
6047 [MIGRATE_CMA] = 'C',
6048#endif
6049#ifdef CONFIG_MEMORY_ISOLATION
6050 [MIGRATE_ISOLATE] = 'I',
6051#endif
6052 };
6053 char tmp[MIGRATE_TYPES + 1];
6054 char *p = tmp;
6055 int i;
6056
6057 for (i = 0; i < MIGRATE_TYPES; i++) {
6058 if (type & (1 << i))
6059 *p++ = types[i];
6060 }
6061
6062 *p = '\0';
6063 printk(KERN_CONT "(%s) ", tmp);
6064}
6065
6066static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
6067{
6068 int zone_idx;
6069 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
6070 if (zone_managed_pages(pgdat->node_zones + zone_idx))
6071 return true;
6072 return false;
6073}
6074
6075/*
6076 * Show free area list (used inside shift_scroll-lock stuff)
6077 * We also calculate the percentage fragmentation. We do this by counting the
6078 * memory on each free list with the exception of the first item on the list.
6079 *
6080 * Bits in @filter:
6081 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
6082 * cpuset.
6083 */
6084void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
6085{
6086 unsigned long free_pcp = 0;
6087 int cpu, nid;
6088 struct zone *zone;
6089 pg_data_t *pgdat;
6090
6091 for_each_populated_zone(zone) {
6092 if (zone_idx(zone) > max_zone_idx)
6093 continue;
6094 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6095 continue;
6096
6097 for_each_online_cpu(cpu)
6098 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
6099 }
6100
6101 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
6102 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
6103 " unevictable:%lu dirty:%lu writeback:%lu\n"
6104 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
6105 " mapped:%lu shmem:%lu pagetables:%lu\n"
6106 " sec_pagetables:%lu bounce:%lu\n"
6107 " kernel_misc_reclaimable:%lu\n"
6108 " free:%lu free_pcp:%lu free_cma:%lu\n",
6109 global_node_page_state(NR_ACTIVE_ANON),
6110 global_node_page_state(NR_INACTIVE_ANON),
6111 global_node_page_state(NR_ISOLATED_ANON),
6112 global_node_page_state(NR_ACTIVE_FILE),
6113 global_node_page_state(NR_INACTIVE_FILE),
6114 global_node_page_state(NR_ISOLATED_FILE),
6115 global_node_page_state(NR_UNEVICTABLE),
6116 global_node_page_state(NR_FILE_DIRTY),
6117 global_node_page_state(NR_WRITEBACK),
6118 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
6119 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
6120 global_node_page_state(NR_FILE_MAPPED),
6121 global_node_page_state(NR_SHMEM),
6122 global_node_page_state(NR_PAGETABLE),
6123 global_node_page_state(NR_SECONDARY_PAGETABLE),
6124 global_zone_page_state(NR_BOUNCE),
6125 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
6126 global_zone_page_state(NR_FREE_PAGES),
6127 free_pcp,
6128 global_zone_page_state(NR_FREE_CMA_PAGES));
6129
6130 for_each_online_pgdat(pgdat) {
6131 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
6132 continue;
6133 if (!node_has_managed_zones(pgdat, max_zone_idx))
6134 continue;
6135
6136 printk("Node %d"
6137 " active_anon:%lukB"
6138 " inactive_anon:%lukB"
6139 " active_file:%lukB"
6140 " inactive_file:%lukB"
6141 " unevictable:%lukB"
6142 " isolated(anon):%lukB"
6143 " isolated(file):%lukB"
6144 " mapped:%lukB"
6145 " dirty:%lukB"
6146 " writeback:%lukB"
6147 " shmem:%lukB"
6148#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6149 " shmem_thp: %lukB"
6150 " shmem_pmdmapped: %lukB"
6151 " anon_thp: %lukB"
6152#endif
6153 " writeback_tmp:%lukB"
6154 " kernel_stack:%lukB"
6155#ifdef CONFIG_SHADOW_CALL_STACK
6156 " shadow_call_stack:%lukB"
6157#endif
6158 " pagetables:%lukB"
6159 " sec_pagetables:%lukB"
6160 " all_unreclaimable? %s"
6161 "\n",
6162 pgdat->node_id,
6163 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
6164 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
6165 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
6166 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
6167 K(node_page_state(pgdat, NR_UNEVICTABLE)),
6168 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
6169 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
6170 K(node_page_state(pgdat, NR_FILE_MAPPED)),
6171 K(node_page_state(pgdat, NR_FILE_DIRTY)),
6172 K(node_page_state(pgdat, NR_WRITEBACK)),
6173 K(node_page_state(pgdat, NR_SHMEM)),
6174#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6175 K(node_page_state(pgdat, NR_SHMEM_THPS)),
6176 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
6177 K(node_page_state(pgdat, NR_ANON_THPS)),
6178#endif
6179 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
6180 node_page_state(pgdat, NR_KERNEL_STACK_KB),
6181#ifdef CONFIG_SHADOW_CALL_STACK
6182 node_page_state(pgdat, NR_KERNEL_SCS_KB),
6183#endif
6184 K(node_page_state(pgdat, NR_PAGETABLE)),
6185 K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
6186 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
6187 "yes" : "no");
6188 }
6189
6190 for_each_populated_zone(zone) {
6191 int i;
6192
6193 if (zone_idx(zone) > max_zone_idx)
6194 continue;
6195 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6196 continue;
6197
6198 free_pcp = 0;
6199 for_each_online_cpu(cpu)
6200 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
6201
6202 show_node(zone);
6203 printk(KERN_CONT
6204 "%s"
6205 " free:%lukB"
6206 " boost:%lukB"
6207 " min:%lukB"
6208 " low:%lukB"
6209 " high:%lukB"
6210 " reserved_highatomic:%luKB"
6211 " active_anon:%lukB"
6212 " inactive_anon:%lukB"
6213 " active_file:%lukB"
6214 " inactive_file:%lukB"
6215 " unevictable:%lukB"
6216 " writepending:%lukB"
6217 " present:%lukB"
6218 " managed:%lukB"
6219 " mlocked:%lukB"
6220 " bounce:%lukB"
6221 " free_pcp:%lukB"
6222 " local_pcp:%ukB"
6223 " free_cma:%lukB"
6224 "\n",
6225 zone->name,
6226 K(zone_page_state(zone, NR_FREE_PAGES)),
6227 K(zone->watermark_boost),
6228 K(min_wmark_pages(zone)),
6229 K(low_wmark_pages(zone)),
6230 K(high_wmark_pages(zone)),
6231 K(zone->nr_reserved_highatomic),
6232 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6233 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6234 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6235 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6236 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6237 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6238 K(zone->present_pages),
6239 K(zone_managed_pages(zone)),
6240 K(zone_page_state(zone, NR_MLOCK)),
6241 K(zone_page_state(zone, NR_BOUNCE)),
6242 K(free_pcp),
6243 K(this_cpu_read(zone->per_cpu_pageset->count)),
6244 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6245 printk("lowmem_reserve[]:");
6246 for (i = 0; i < MAX_NR_ZONES; i++)
6247 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6248 printk(KERN_CONT "\n");
6249 }
6250
6251 for_each_populated_zone(zone) {
6252 unsigned int order;
6253 unsigned long nr[MAX_ORDER], flags, total = 0;
6254 unsigned char types[MAX_ORDER];
6255
6256 if (zone_idx(zone) > max_zone_idx)
6257 continue;
6258 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6259 continue;
6260 show_node(zone);
6261 printk(KERN_CONT "%s: ", zone->name);
6262
6263 spin_lock_irqsave(&zone->lock, flags);
6264 for (order = 0; order < MAX_ORDER; order++) {
6265 struct free_area *area = &zone->free_area[order];
6266 int type;
6267
6268 nr[order] = area->nr_free;
6269 total += nr[order] << order;
6270
6271 types[order] = 0;
6272 for (type = 0; type < MIGRATE_TYPES; type++) {
6273 if (!free_area_empty(area, type))
6274 types[order] |= 1 << type;
6275 }
6276 }
6277 spin_unlock_irqrestore(&zone->lock, flags);
6278 for (order = 0; order < MAX_ORDER; order++) {
6279 printk(KERN_CONT "%lu*%lukB ",
6280 nr[order], K(1UL) << order);
6281 if (nr[order])
6282 show_migration_types(types[order]);
6283 }
6284 printk(KERN_CONT "= %lukB\n", K(total));
6285 }
6286
6287 for_each_online_node(nid) {
6288 if (show_mem_node_skip(filter, nid, nodemask))
6289 continue;
6290 hugetlb_show_meminfo_node(nid);
6291 }
6292
6293 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6294
6295 show_swap_cache_info();
6296}
6297
6298static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6299{
6300 zoneref->zone = zone;
6301 zoneref->zone_idx = zone_idx(zone);
6302}
6303
6304/*
6305 * Builds allocation fallback zone lists.
6306 *
6307 * Add all populated zones of a node to the zonelist.
6308 */
6309static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6310{
6311 struct zone *zone;
6312 enum zone_type zone_type = MAX_NR_ZONES;
6313 int nr_zones = 0;
6314
6315 do {
6316 zone_type--;
6317 zone = pgdat->node_zones + zone_type;
6318 if (populated_zone(zone)) {
6319 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6320 check_highest_zone(zone_type);
6321 }
6322 } while (zone_type);
6323
6324 return nr_zones;
6325}
6326
6327#ifdef CONFIG_NUMA
6328
6329static int __parse_numa_zonelist_order(char *s)
6330{
6331 /*
6332 * We used to support different zonelists modes but they turned
6333 * out to be just not useful. Let's keep the warning in place
6334 * if somebody still use the cmd line parameter so that we do
6335 * not fail it silently
6336 */
6337 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6338 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
6339 return -EINVAL;
6340 }
6341 return 0;
6342}
6343
6344char numa_zonelist_order[] = "Node";
6345
6346/*
6347 * sysctl handler for numa_zonelist_order
6348 */
6349int numa_zonelist_order_handler(struct ctl_table *table, int write,
6350 void *buffer, size_t *length, loff_t *ppos)
6351{
6352 if (write)
6353 return __parse_numa_zonelist_order(buffer);
6354 return proc_dostring(table, write, buffer, length, ppos);
6355}
6356
6357
6358static int node_load[MAX_NUMNODES];
6359
6360/**
6361 * find_next_best_node - find the next node that should appear in a given node's fallback list
6362 * @node: node whose fallback list we're appending
6363 * @used_node_mask: nodemask_t of already used nodes
6364 *
6365 * We use a number of factors to determine which is the next node that should
6366 * appear on a given node's fallback list. The node should not have appeared
6367 * already in @node's fallback list, and it should be the next closest node
6368 * according to the distance array (which contains arbitrary distance values
6369 * from each node to each node in the system), and should also prefer nodes
6370 * with no CPUs, since presumably they'll have very little allocation pressure
6371 * on them otherwise.
6372 *
6373 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6374 */
6375int find_next_best_node(int node, nodemask_t *used_node_mask)
6376{
6377 int n, val;
6378 int min_val = INT_MAX;
6379 int best_node = NUMA_NO_NODE;
6380
6381 /* Use the local node if we haven't already */
6382 if (!node_isset(node, *used_node_mask)) {
6383 node_set(node, *used_node_mask);
6384 return node;
6385 }
6386
6387 for_each_node_state(n, N_MEMORY) {
6388
6389 /* Don't want a node to appear more than once */
6390 if (node_isset(n, *used_node_mask))
6391 continue;
6392
6393 /* Use the distance array to find the distance */
6394 val = node_distance(node, n);
6395
6396 /* Penalize nodes under us ("prefer the next node") */
6397 val += (n < node);
6398
6399 /* Give preference to headless and unused nodes */
6400 if (!cpumask_empty(cpumask_of_node(n)))
6401 val += PENALTY_FOR_NODE_WITH_CPUS;
6402
6403 /* Slight preference for less loaded node */
6404 val *= MAX_NUMNODES;
6405 val += node_load[n];
6406
6407 if (val < min_val) {
6408 min_val = val;
6409 best_node = n;
6410 }
6411 }
6412
6413 if (best_node >= 0)
6414 node_set(best_node, *used_node_mask);
6415
6416 return best_node;
6417}
6418
6419
6420/*
6421 * Build zonelists ordered by node and zones within node.
6422 * This results in maximum locality--normal zone overflows into local
6423 * DMA zone, if any--but risks exhausting DMA zone.
6424 */
6425static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6426 unsigned nr_nodes)
6427{
6428 struct zoneref *zonerefs;
6429 int i;
6430
6431 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6432
6433 for (i = 0; i < nr_nodes; i++) {
6434 int nr_zones;
6435
6436 pg_data_t *node = NODE_DATA(node_order[i]);
6437
6438 nr_zones = build_zonerefs_node(node, zonerefs);
6439 zonerefs += nr_zones;
6440 }
6441 zonerefs->zone = NULL;
6442 zonerefs->zone_idx = 0;
6443}
6444
6445/*
6446 * Build gfp_thisnode zonelists
6447 */
6448static void build_thisnode_zonelists(pg_data_t *pgdat)
6449{
6450 struct zoneref *zonerefs;
6451 int nr_zones;
6452
6453 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6454 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6455 zonerefs += nr_zones;
6456 zonerefs->zone = NULL;
6457 zonerefs->zone_idx = 0;
6458}
6459
6460/*
6461 * Build zonelists ordered by zone and nodes within zones.
6462 * This results in conserving DMA zone[s] until all Normal memory is
6463 * exhausted, but results in overflowing to remote node while memory
6464 * may still exist in local DMA zone.
6465 */
6466
6467static void build_zonelists(pg_data_t *pgdat)
6468{
6469 static int node_order[MAX_NUMNODES];
6470 int node, nr_nodes = 0;
6471 nodemask_t used_mask = NODE_MASK_NONE;
6472 int local_node, prev_node;
6473
6474 /* NUMA-aware ordering of nodes */
6475 local_node = pgdat->node_id;
6476 prev_node = local_node;
6477
6478 memset(node_order, 0, sizeof(node_order));
6479 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6480 /*
6481 * We don't want to pressure a particular node.
6482 * So adding penalty to the first node in same
6483 * distance group to make it round-robin.
6484 */
6485 if (node_distance(local_node, node) !=
6486 node_distance(local_node, prev_node))
6487 node_load[node] += 1;
6488
6489 node_order[nr_nodes++] = node;
6490 prev_node = node;
6491 }
6492
6493 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6494 build_thisnode_zonelists(pgdat);
6495 pr_info("Fallback order for Node %d: ", local_node);
6496 for (node = 0; node < nr_nodes; node++)
6497 pr_cont("%d ", node_order[node]);
6498 pr_cont("\n");
6499}
6500
6501#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6502/*
6503 * Return node id of node used for "local" allocations.
6504 * I.e., first node id of first zone in arg node's generic zonelist.
6505 * Used for initializing percpu 'numa_mem', which is used primarily
6506 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6507 */
6508int local_memory_node(int node)
6509{
6510 struct zoneref *z;
6511
6512 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6513 gfp_zone(GFP_KERNEL),
6514 NULL);
6515 return zone_to_nid(z->zone);
6516}
6517#endif
6518
6519static void setup_min_unmapped_ratio(void);
6520static void setup_min_slab_ratio(void);
6521#else /* CONFIG_NUMA */
6522
6523static void build_zonelists(pg_data_t *pgdat)
6524{
6525 int node, local_node;
6526 struct zoneref *zonerefs;
6527 int nr_zones;
6528
6529 local_node = pgdat->node_id;
6530
6531 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6532 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6533 zonerefs += nr_zones;
6534
6535 /*
6536 * Now we build the zonelist so that it contains the zones
6537 * of all the other nodes.
6538 * We don't want to pressure a particular node, so when
6539 * building the zones for node N, we make sure that the
6540 * zones coming right after the local ones are those from
6541 * node N+1 (modulo N)
6542 */
6543 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6544 if (!node_online(node))
6545 continue;
6546 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6547 zonerefs += nr_zones;
6548 }
6549 for (node = 0; node < local_node; node++) {
6550 if (!node_online(node))
6551 continue;
6552 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6553 zonerefs += nr_zones;
6554 }
6555
6556 zonerefs->zone = NULL;
6557 zonerefs->zone_idx = 0;
6558}
6559
6560#endif /* CONFIG_NUMA */
6561
6562/*
6563 * Boot pageset table. One per cpu which is going to be used for all
6564 * zones and all nodes. The parameters will be set in such a way
6565 * that an item put on a list will immediately be handed over to
6566 * the buddy list. This is safe since pageset manipulation is done
6567 * with interrupts disabled.
6568 *
6569 * The boot_pagesets must be kept even after bootup is complete for
6570 * unused processors and/or zones. They do play a role for bootstrapping
6571 * hotplugged processors.
6572 *
6573 * zoneinfo_show() and maybe other functions do
6574 * not check if the processor is online before following the pageset pointer.
6575 * Other parts of the kernel may not check if the zone is available.
6576 */
6577static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6578/* These effectively disable the pcplists in the boot pageset completely */
6579#define BOOT_PAGESET_HIGH 0
6580#define BOOT_PAGESET_BATCH 1
6581static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6582static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6583static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6584
6585static void __build_all_zonelists(void *data)
6586{
6587 int nid;
6588 int __maybe_unused cpu;
6589 pg_data_t *self = data;
6590
6591 write_seqlock(&zonelist_update_seq);
6592
6593#ifdef CONFIG_NUMA
6594 memset(node_load, 0, sizeof(node_load));
6595#endif
6596
6597 /*
6598 * This node is hotadded and no memory is yet present. So just
6599 * building zonelists is fine - no need to touch other nodes.
6600 */
6601 if (self && !node_online(self->node_id)) {
6602 build_zonelists(self);
6603 } else {
6604 /*
6605 * All possible nodes have pgdat preallocated
6606 * in free_area_init
6607 */
6608 for_each_node(nid) {
6609 pg_data_t *pgdat = NODE_DATA(nid);
6610
6611 build_zonelists(pgdat);
6612 }
6613
6614#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6615 /*
6616 * We now know the "local memory node" for each node--
6617 * i.e., the node of the first zone in the generic zonelist.
6618 * Set up numa_mem percpu variable for on-line cpus. During
6619 * boot, only the boot cpu should be on-line; we'll init the
6620 * secondary cpus' numa_mem as they come on-line. During
6621 * node/memory hotplug, we'll fixup all on-line cpus.
6622 */
6623 for_each_online_cpu(cpu)
6624 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6625#endif
6626 }
6627
6628 write_sequnlock(&zonelist_update_seq);
6629}
6630
6631static noinline void __init
6632build_all_zonelists_init(void)
6633{
6634 int cpu;
6635
6636 __build_all_zonelists(NULL);
6637
6638 /*
6639 * Initialize the boot_pagesets that are going to be used
6640 * for bootstrapping processors. The real pagesets for
6641 * each zone will be allocated later when the per cpu
6642 * allocator is available.
6643 *
6644 * boot_pagesets are used also for bootstrapping offline
6645 * cpus if the system is already booted because the pagesets
6646 * are needed to initialize allocators on a specific cpu too.
6647 * F.e. the percpu allocator needs the page allocator which
6648 * needs the percpu allocator in order to allocate its pagesets
6649 * (a chicken-egg dilemma).
6650 */
6651 for_each_possible_cpu(cpu)
6652 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
6653
6654 mminit_verify_zonelist();
6655 cpuset_init_current_mems_allowed();
6656}
6657
6658/*
6659 * unless system_state == SYSTEM_BOOTING.
6660 *
6661 * __ref due to call of __init annotated helper build_all_zonelists_init
6662 * [protected by SYSTEM_BOOTING].
6663 */
6664void __ref build_all_zonelists(pg_data_t *pgdat)
6665{
6666 unsigned long vm_total_pages;
6667
6668 if (system_state == SYSTEM_BOOTING) {
6669 build_all_zonelists_init();
6670 } else {
6671 __build_all_zonelists(pgdat);
6672 /* cpuset refresh routine should be here */
6673 }
6674 /* Get the number of free pages beyond high watermark in all zones. */
6675 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6676 /*
6677 * Disable grouping by mobility if the number of pages in the
6678 * system is too low to allow the mechanism to work. It would be
6679 * more accurate, but expensive to check per-zone. This check is
6680 * made on memory-hotadd so a system can start with mobility
6681 * disabled and enable it later
6682 */
6683 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6684 page_group_by_mobility_disabled = 1;
6685 else
6686 page_group_by_mobility_disabled = 0;
6687
6688 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
6689 nr_online_nodes,
6690 page_group_by_mobility_disabled ? "off" : "on",
6691 vm_total_pages);
6692#ifdef CONFIG_NUMA
6693 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6694#endif
6695}
6696
6697/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6698static bool __meminit
6699overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6700{
6701 static struct memblock_region *r;
6702
6703 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6704 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6705 for_each_mem_region(r) {
6706 if (*pfn < memblock_region_memory_end_pfn(r))
6707 break;
6708 }
6709 }
6710 if (*pfn >= memblock_region_memory_base_pfn(r) &&
6711 memblock_is_mirror(r)) {
6712 *pfn = memblock_region_memory_end_pfn(r);
6713 return true;
6714 }
6715 }
6716 return false;
6717}
6718
6719/*
6720 * Initially all pages are reserved - free ones are freed
6721 * up by memblock_free_all() once the early boot process is
6722 * done. Non-atomic initialization, single-pass.
6723 *
6724 * All aligned pageblocks are initialized to the specified migratetype
6725 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6726 * zone stats (e.g., nr_isolate_pageblock) are touched.
6727 */
6728void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6729 unsigned long start_pfn, unsigned long zone_end_pfn,
6730 enum meminit_context context,
6731 struct vmem_altmap *altmap, int migratetype)
6732{
6733 unsigned long pfn, end_pfn = start_pfn + size;
6734 struct page *page;
6735
6736 if (highest_memmap_pfn < end_pfn - 1)
6737 highest_memmap_pfn = end_pfn - 1;
6738
6739#ifdef CONFIG_ZONE_DEVICE
6740 /*
6741 * Honor reservation requested by the driver for this ZONE_DEVICE
6742 * memory. We limit the total number of pages to initialize to just
6743 * those that might contain the memory mapping. We will defer the
6744 * ZONE_DEVICE page initialization until after we have released
6745 * the hotplug lock.
6746 */
6747 if (zone == ZONE_DEVICE) {
6748 if (!altmap)
6749 return;
6750
6751 if (start_pfn == altmap->base_pfn)
6752 start_pfn += altmap->reserve;
6753 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6754 }
6755#endif
6756
6757 for (pfn = start_pfn; pfn < end_pfn; ) {
6758 /*
6759 * There can be holes in boot-time mem_map[]s handed to this
6760 * function. They do not exist on hotplugged memory.
6761 */
6762 if (context == MEMINIT_EARLY) {
6763 if (overlap_memmap_init(zone, &pfn))
6764 continue;
6765 if (defer_init(nid, pfn, zone_end_pfn))
6766 break;
6767 }
6768
6769 page = pfn_to_page(pfn);
6770 __init_single_page(page, pfn, zone, nid);
6771 if (context == MEMINIT_HOTPLUG)
6772 __SetPageReserved(page);
6773
6774 /*
6775 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6776 * such that unmovable allocations won't be scattered all
6777 * over the place during system boot.
6778 */
6779 if (pageblock_aligned(pfn)) {
6780 set_pageblock_migratetype(page, migratetype);
6781 cond_resched();
6782 }
6783 pfn++;
6784 }
6785}
6786
6787#ifdef CONFIG_ZONE_DEVICE
6788static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
6789 unsigned long zone_idx, int nid,
6790 struct dev_pagemap *pgmap)
6791{
6792
6793 __init_single_page(page, pfn, zone_idx, nid);
6794
6795 /*
6796 * Mark page reserved as it will need to wait for onlining
6797 * phase for it to be fully associated with a zone.
6798 *
6799 * We can use the non-atomic __set_bit operation for setting
6800 * the flag as we are still initializing the pages.
6801 */
6802 __SetPageReserved(page);
6803
6804 /*
6805 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6806 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
6807 * ever freed or placed on a driver-private list.
6808 */
6809 page->pgmap = pgmap;
6810 page->zone_device_data = NULL;
6811
6812 /*
6813 * Mark the block movable so that blocks are reserved for
6814 * movable at startup. This will force kernel allocations
6815 * to reserve their blocks rather than leaking throughout
6816 * the address space during boot when many long-lived
6817 * kernel allocations are made.
6818 *
6819 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6820 * because this is done early in section_activate()
6821 */
6822 if (pageblock_aligned(pfn)) {
6823 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6824 cond_resched();
6825 }
6826
6827 /*
6828 * ZONE_DEVICE pages are released directly to the driver page allocator
6829 * which will set the page count to 1 when allocating the page.
6830 */
6831 if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
6832 pgmap->type == MEMORY_DEVICE_COHERENT)
6833 set_page_count(page, 0);
6834}
6835
6836/*
6837 * With compound page geometry and when struct pages are stored in ram most
6838 * tail pages are reused. Consequently, the amount of unique struct pages to
6839 * initialize is a lot smaller that the total amount of struct pages being
6840 * mapped. This is a paired / mild layering violation with explicit knowledge
6841 * of how the sparse_vmemmap internals handle compound pages in the lack
6842 * of an altmap. See vmemmap_populate_compound_pages().
6843 */
6844static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
6845 unsigned long nr_pages)
6846{
6847 return is_power_of_2(sizeof(struct page)) &&
6848 !altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages;
6849}
6850
6851static void __ref memmap_init_compound(struct page *head,
6852 unsigned long head_pfn,
6853 unsigned long zone_idx, int nid,
6854 struct dev_pagemap *pgmap,
6855 unsigned long nr_pages)
6856{
6857 unsigned long pfn, end_pfn = head_pfn + nr_pages;
6858 unsigned int order = pgmap->vmemmap_shift;
6859
6860 __SetPageHead(head);
6861 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
6862 struct page *page = pfn_to_page(pfn);
6863
6864 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6865 prep_compound_tail(head, pfn - head_pfn);
6866 set_page_count(page, 0);
6867
6868 /*
6869 * The first tail page stores compound_mapcount_ptr() and
6870 * compound_order() and the second tail page stores
6871 * compound_pincount_ptr(). Call prep_compound_head() after
6872 * the first and second tail pages have been initialized to
6873 * not have the data overwritten.
6874 */
6875 if (pfn == head_pfn + 2)
6876 prep_compound_head(head, order);
6877 }
6878}
6879
6880void __ref memmap_init_zone_device(struct zone *zone,
6881 unsigned long start_pfn,
6882 unsigned long nr_pages,
6883 struct dev_pagemap *pgmap)
6884{
6885 unsigned long pfn, end_pfn = start_pfn + nr_pages;
6886 struct pglist_data *pgdat = zone->zone_pgdat;
6887 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6888 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
6889 unsigned long zone_idx = zone_idx(zone);
6890 unsigned long start = jiffies;
6891 int nid = pgdat->node_id;
6892
6893 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
6894 return;
6895
6896 /*
6897 * The call to memmap_init should have already taken care
6898 * of the pages reserved for the memmap, so we can just jump to
6899 * the end of that region and start processing the device pages.
6900 */
6901 if (altmap) {
6902 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6903 nr_pages = end_pfn - start_pfn;
6904 }
6905
6906 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
6907 struct page *page = pfn_to_page(pfn);
6908
6909 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6910
6911 if (pfns_per_compound == 1)
6912 continue;
6913
6914 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
6915 compound_nr_pages(altmap, pfns_per_compound));
6916 }
6917
6918 pr_info("%s initialised %lu pages in %ums\n", __func__,
6919 nr_pages, jiffies_to_msecs(jiffies - start));
6920}
6921
6922#endif
6923static void __meminit zone_init_free_lists(struct zone *zone)
6924{
6925 unsigned int order, t;
6926 for_each_migratetype_order(order, t) {
6927 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6928 zone->free_area[order].nr_free = 0;
6929 }
6930}
6931
6932/*
6933 * Only struct pages that correspond to ranges defined by memblock.memory
6934 * are zeroed and initialized by going through __init_single_page() during
6935 * memmap_init_zone_range().
6936 *
6937 * But, there could be struct pages that correspond to holes in
6938 * memblock.memory. This can happen because of the following reasons:
6939 * - physical memory bank size is not necessarily the exact multiple of the
6940 * arbitrary section size
6941 * - early reserved memory may not be listed in memblock.memory
6942 * - memory layouts defined with memmap= kernel parameter may not align
6943 * nicely with memmap sections
6944 *
6945 * Explicitly initialize those struct pages so that:
6946 * - PG_Reserved is set
6947 * - zone and node links point to zone and node that span the page if the
6948 * hole is in the middle of a zone
6949 * - zone and node links point to adjacent zone/node if the hole falls on
6950 * the zone boundary; the pages in such holes will be prepended to the
6951 * zone/node above the hole except for the trailing pages in the last
6952 * section that will be appended to the zone/node below.
6953 */
6954static void __init init_unavailable_range(unsigned long spfn,
6955 unsigned long epfn,
6956 int zone, int node)
6957{
6958 unsigned long pfn;
6959 u64 pgcnt = 0;
6960
6961 for (pfn = spfn; pfn < epfn; pfn++) {
6962 if (!pfn_valid(pageblock_start_pfn(pfn))) {
6963 pfn = pageblock_end_pfn(pfn) - 1;
6964 continue;
6965 }
6966 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
6967 __SetPageReserved(pfn_to_page(pfn));
6968 pgcnt++;
6969 }
6970
6971 if (pgcnt)
6972 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6973 node, zone_names[zone], pgcnt);
6974}
6975
6976static void __init memmap_init_zone_range(struct zone *zone,
6977 unsigned long start_pfn,
6978 unsigned long end_pfn,
6979 unsigned long *hole_pfn)
6980{
6981 unsigned long zone_start_pfn = zone->zone_start_pfn;
6982 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6983 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6984
6985 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6986 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6987
6988 if (start_pfn >= end_pfn)
6989 return;
6990
6991 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6992 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6993
6994 if (*hole_pfn < start_pfn)
6995 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6996
6997 *hole_pfn = end_pfn;
6998}
6999
7000static void __init memmap_init(void)
7001{
7002 unsigned long start_pfn, end_pfn;
7003 unsigned long hole_pfn = 0;
7004 int i, j, zone_id = 0, nid;
7005
7006 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7007 struct pglist_data *node = NODE_DATA(nid);
7008
7009 for (j = 0; j < MAX_NR_ZONES; j++) {
7010 struct zone *zone = node->node_zones + j;
7011
7012 if (!populated_zone(zone))
7013 continue;
7014
7015 memmap_init_zone_range(zone, start_pfn, end_pfn,
7016 &hole_pfn);
7017 zone_id = j;
7018 }
7019 }
7020
7021#ifdef CONFIG_SPARSEMEM
7022 /*
7023 * Initialize the memory map for hole in the range [memory_end,
7024 * section_end].
7025 * Append the pages in this hole to the highest zone in the last
7026 * node.
7027 * The call to init_unavailable_range() is outside the ifdef to
7028 * silence the compiler warining about zone_id set but not used;
7029 * for FLATMEM it is a nop anyway
7030 */
7031 end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
7032 if (hole_pfn < end_pfn)
7033#endif
7034 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
7035}
7036
7037void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
7038 phys_addr_t min_addr, int nid, bool exact_nid)
7039{
7040 void *ptr;
7041
7042 if (exact_nid)
7043 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
7044 MEMBLOCK_ALLOC_ACCESSIBLE,
7045 nid);
7046 else
7047 ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
7048 MEMBLOCK_ALLOC_ACCESSIBLE,
7049 nid);
7050
7051 if (ptr && size > 0)
7052 page_init_poison(ptr, size);
7053
7054 return ptr;
7055}
7056
7057static int zone_batchsize(struct zone *zone)
7058{
7059#ifdef CONFIG_MMU
7060 int batch;
7061
7062 /*
7063 * The number of pages to batch allocate is either ~0.1%
7064 * of the zone or 1MB, whichever is smaller. The batch
7065 * size is striking a balance between allocation latency
7066 * and zone lock contention.
7067 */
7068 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
7069 batch /= 4; /* We effectively *= 4 below */
7070 if (batch < 1)
7071 batch = 1;
7072
7073 /*
7074 * Clamp the batch to a 2^n - 1 value. Having a power
7075 * of 2 value was found to be more likely to have
7076 * suboptimal cache aliasing properties in some cases.
7077 *
7078 * For example if 2 tasks are alternately allocating
7079 * batches of pages, one task can end up with a lot
7080 * of pages of one half of the possible page colors
7081 * and the other with pages of the other colors.
7082 */
7083 batch = rounddown_pow_of_two(batch + batch/2) - 1;
7084
7085 return batch;
7086
7087#else
7088 /* The deferral and batching of frees should be suppressed under NOMMU
7089 * conditions.
7090 *
7091 * The problem is that NOMMU needs to be able to allocate large chunks
7092 * of contiguous memory as there's no hardware page translation to
7093 * assemble apparent contiguous memory from discontiguous pages.
7094 *
7095 * Queueing large contiguous runs of pages for batching, however,
7096 * causes the pages to actually be freed in smaller chunks. As there
7097 * can be a significant delay between the individual batches being
7098 * recycled, this leads to the once large chunks of space being
7099 * fragmented and becoming unavailable for high-order allocations.
7100 */
7101 return 0;
7102#endif
7103}
7104
7105static int zone_highsize(struct zone *zone, int batch, int cpu_online)
7106{
7107#ifdef CONFIG_MMU
7108 int high;
7109 int nr_split_cpus;
7110 unsigned long total_pages;
7111
7112 if (!percpu_pagelist_high_fraction) {
7113 /*
7114 * By default, the high value of the pcp is based on the zone
7115 * low watermark so that if they are full then background
7116 * reclaim will not be started prematurely.
7117 */
7118 total_pages = low_wmark_pages(zone);
7119 } else {
7120 /*
7121 * If percpu_pagelist_high_fraction is configured, the high
7122 * value is based on a fraction of the managed pages in the
7123 * zone.
7124 */
7125 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
7126 }
7127
7128 /*
7129 * Split the high value across all online CPUs local to the zone. Note
7130 * that early in boot that CPUs may not be online yet and that during
7131 * CPU hotplug that the cpumask is not yet updated when a CPU is being
7132 * onlined. For memory nodes that have no CPUs, split pcp->high across
7133 * all online CPUs to mitigate the risk that reclaim is triggered
7134 * prematurely due to pages stored on pcp lists.
7135 */
7136 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
7137 if (!nr_split_cpus)
7138 nr_split_cpus = num_online_cpus();
7139 high = total_pages / nr_split_cpus;
7140
7141 /*
7142 * Ensure high is at least batch*4. The multiple is based on the
7143 * historical relationship between high and batch.
7144 */
7145 high = max(high, batch << 2);
7146
7147 return high;
7148#else
7149 return 0;
7150#endif
7151}
7152
7153/*
7154 * pcp->high and pcp->batch values are related and generally batch is lower
7155 * than high. They are also related to pcp->count such that count is lower
7156 * than high, and as soon as it reaches high, the pcplist is flushed.
7157 *
7158 * However, guaranteeing these relations at all times would require e.g. write
7159 * barriers here but also careful usage of read barriers at the read side, and
7160 * thus be prone to error and bad for performance. Thus the update only prevents
7161 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
7162 * can cope with those fields changing asynchronously, and fully trust only the
7163 * pcp->count field on the local CPU with interrupts disabled.
7164 *
7165 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
7166 * outside of boot time (or some other assurance that no concurrent updaters
7167 * exist).
7168 */
7169static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
7170 unsigned long batch)
7171{
7172 WRITE_ONCE(pcp->batch, batch);
7173 WRITE_ONCE(pcp->high, high);
7174}
7175
7176static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
7177{
7178 int pindex;
7179
7180 memset(pcp, 0, sizeof(*pcp));
7181 memset(pzstats, 0, sizeof(*pzstats));
7182
7183 spin_lock_init(&pcp->lock);
7184 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
7185 INIT_LIST_HEAD(&pcp->lists[pindex]);
7186
7187 /*
7188 * Set batch and high values safe for a boot pageset. A true percpu
7189 * pageset's initialization will update them subsequently. Here we don't
7190 * need to be as careful as pageset_update() as nobody can access the
7191 * pageset yet.
7192 */
7193 pcp->high = BOOT_PAGESET_HIGH;
7194 pcp->batch = BOOT_PAGESET_BATCH;
7195 pcp->free_factor = 0;
7196}
7197
7198static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
7199 unsigned long batch)
7200{
7201 struct per_cpu_pages *pcp;
7202 int cpu;
7203
7204 for_each_possible_cpu(cpu) {
7205 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7206 pageset_update(pcp, high, batch);
7207 }
7208}
7209
7210/*
7211 * Calculate and set new high and batch values for all per-cpu pagesets of a
7212 * zone based on the zone's size.
7213 */
7214static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
7215{
7216 int new_high, new_batch;
7217
7218 new_batch = max(1, zone_batchsize(zone));
7219 new_high = zone_highsize(zone, new_batch, cpu_online);
7220
7221 if (zone->pageset_high == new_high &&
7222 zone->pageset_batch == new_batch)
7223 return;
7224
7225 zone->pageset_high = new_high;
7226 zone->pageset_batch = new_batch;
7227
7228 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
7229}
7230
7231void __meminit setup_zone_pageset(struct zone *zone)
7232{
7233 int cpu;
7234
7235 /* Size may be 0 on !SMP && !NUMA */
7236 if (sizeof(struct per_cpu_zonestat) > 0)
7237 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
7238
7239 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
7240 for_each_possible_cpu(cpu) {
7241 struct per_cpu_pages *pcp;
7242 struct per_cpu_zonestat *pzstats;
7243
7244 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7245 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7246 per_cpu_pages_init(pcp, pzstats);
7247 }
7248
7249 zone_set_pageset_high_and_batch(zone, 0);
7250}
7251
7252/*
7253 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7254 * page high values need to be recalculated.
7255 */
7256static void zone_pcp_update(struct zone *zone, int cpu_online)
7257{
7258 mutex_lock(&pcp_batch_high_lock);
7259 zone_set_pageset_high_and_batch(zone, cpu_online);
7260 mutex_unlock(&pcp_batch_high_lock);
7261}
7262
7263/*
7264 * Allocate per cpu pagesets and initialize them.
7265 * Before this call only boot pagesets were available.
7266 */
7267void __init setup_per_cpu_pageset(void)
7268{
7269 struct pglist_data *pgdat;
7270 struct zone *zone;
7271 int __maybe_unused cpu;
7272
7273 for_each_populated_zone(zone)
7274 setup_zone_pageset(zone);
7275
7276#ifdef CONFIG_NUMA
7277 /*
7278 * Unpopulated zones continue using the boot pagesets.
7279 * The numa stats for these pagesets need to be reset.
7280 * Otherwise, they will end up skewing the stats of
7281 * the nodes these zones are associated with.
7282 */
7283 for_each_possible_cpu(cpu) {
7284 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
7285 memset(pzstats->vm_numa_event, 0,
7286 sizeof(pzstats->vm_numa_event));
7287 }
7288#endif
7289
7290 for_each_online_pgdat(pgdat)
7291 pgdat->per_cpu_nodestats =
7292 alloc_percpu(struct per_cpu_nodestat);
7293}
7294
7295static __meminit void zone_pcp_init(struct zone *zone)
7296{
7297 /*
7298 * per cpu subsystem is not up at this point. The following code
7299 * relies on the ability of the linker to provide the
7300 * offset of a (static) per cpu variable into the per cpu area.
7301 */
7302 zone->per_cpu_pageset = &boot_pageset;
7303 zone->per_cpu_zonestats = &boot_zonestats;
7304 zone->pageset_high = BOOT_PAGESET_HIGH;
7305 zone->pageset_batch = BOOT_PAGESET_BATCH;
7306
7307 if (populated_zone(zone))
7308 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7309 zone->present_pages, zone_batchsize(zone));
7310}
7311
7312void __meminit init_currently_empty_zone(struct zone *zone,
7313 unsigned long zone_start_pfn,
7314 unsigned long size)
7315{
7316 struct pglist_data *pgdat = zone->zone_pgdat;
7317 int zone_idx = zone_idx(zone) + 1;
7318
7319 if (zone_idx > pgdat->nr_zones)
7320 pgdat->nr_zones = zone_idx;
7321
7322 zone->zone_start_pfn = zone_start_pfn;
7323
7324 mminit_dprintk(MMINIT_TRACE, "memmap_init",
7325 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
7326 pgdat->node_id,
7327 (unsigned long)zone_idx(zone),
7328 zone_start_pfn, (zone_start_pfn + size));
7329
7330 zone_init_free_lists(zone);
7331 zone->initialized = 1;
7332}
7333
7334/**
7335 * get_pfn_range_for_nid - Return the start and end page frames for a node
7336 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7337 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7338 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7339 *
7340 * It returns the start and end page frame of a node based on information
7341 * provided by memblock_set_node(). If called for a node
7342 * with no available memory, a warning is printed and the start and end
7343 * PFNs will be 0.
7344 */
7345void __init get_pfn_range_for_nid(unsigned int nid,
7346 unsigned long *start_pfn, unsigned long *end_pfn)
7347{
7348 unsigned long this_start_pfn, this_end_pfn;
7349 int i;
7350
7351 *start_pfn = -1UL;
7352 *end_pfn = 0;
7353
7354 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7355 *start_pfn = min(*start_pfn, this_start_pfn);
7356 *end_pfn = max(*end_pfn, this_end_pfn);
7357 }
7358
7359 if (*start_pfn == -1UL)
7360 *start_pfn = 0;
7361}
7362
7363/*
7364 * This finds a zone that can be used for ZONE_MOVABLE pages. The
7365 * assumption is made that zones within a node are ordered in monotonic
7366 * increasing memory addresses so that the "highest" populated zone is used
7367 */
7368static void __init find_usable_zone_for_movable(void)
7369{
7370 int zone_index;
7371 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7372 if (zone_index == ZONE_MOVABLE)
7373 continue;
7374
7375 if (arch_zone_highest_possible_pfn[zone_index] >
7376 arch_zone_lowest_possible_pfn[zone_index])
7377 break;
7378 }
7379
7380 VM_BUG_ON(zone_index == -1);
7381 movable_zone = zone_index;
7382}
7383
7384/*
7385 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7386 * because it is sized independent of architecture. Unlike the other zones,
7387 * the starting point for ZONE_MOVABLE is not fixed. It may be different
7388 * in each node depending on the size of each node and how evenly kernelcore
7389 * is distributed. This helper function adjusts the zone ranges
7390 * provided by the architecture for a given node by using the end of the
7391 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7392 * zones within a node are in order of monotonic increases memory addresses
7393 */
7394static void __init adjust_zone_range_for_zone_movable(int nid,
7395 unsigned long zone_type,
7396 unsigned long node_start_pfn,
7397 unsigned long node_end_pfn,
7398 unsigned long *zone_start_pfn,
7399 unsigned long *zone_end_pfn)
7400{
7401 /* Only adjust if ZONE_MOVABLE is on this node */
7402 if (zone_movable_pfn[nid]) {
7403 /* Size ZONE_MOVABLE */
7404 if (zone_type == ZONE_MOVABLE) {
7405 *zone_start_pfn = zone_movable_pfn[nid];
7406 *zone_end_pfn = min(node_end_pfn,
7407 arch_zone_highest_possible_pfn[movable_zone]);
7408
7409 /* Adjust for ZONE_MOVABLE starting within this range */
7410 } else if (!mirrored_kernelcore &&
7411 *zone_start_pfn < zone_movable_pfn[nid] &&
7412 *zone_end_pfn > zone_movable_pfn[nid]) {
7413 *zone_end_pfn = zone_movable_pfn[nid];
7414
7415 /* Check if this whole range is within ZONE_MOVABLE */
7416 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
7417 *zone_start_pfn = *zone_end_pfn;
7418 }
7419}
7420
7421/*
7422 * Return the number of pages a zone spans in a node, including holes
7423 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7424 */
7425static unsigned long __init zone_spanned_pages_in_node(int nid,
7426 unsigned long zone_type,
7427 unsigned long node_start_pfn,
7428 unsigned long node_end_pfn,
7429 unsigned long *zone_start_pfn,
7430 unsigned long *zone_end_pfn)
7431{
7432 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7433 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7434 /* When hotadd a new node from cpu_up(), the node should be empty */
7435 if (!node_start_pfn && !node_end_pfn)
7436 return 0;
7437
7438 /* Get the start and end of the zone */
7439 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7440 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7441 adjust_zone_range_for_zone_movable(nid, zone_type,
7442 node_start_pfn, node_end_pfn,
7443 zone_start_pfn, zone_end_pfn);
7444
7445 /* Check that this node has pages within the zone's required range */
7446 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7447 return 0;
7448
7449 /* Move the zone boundaries inside the node if necessary */
7450 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7451 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7452
7453 /* Return the spanned pages */
7454 return *zone_end_pfn - *zone_start_pfn;
7455}
7456
7457/*
7458 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7459 * then all holes in the requested range will be accounted for.
7460 */
7461unsigned long __init __absent_pages_in_range(int nid,
7462 unsigned long range_start_pfn,
7463 unsigned long range_end_pfn)
7464{
7465 unsigned long nr_absent = range_end_pfn - range_start_pfn;
7466 unsigned long start_pfn, end_pfn;
7467 int i;
7468
7469 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7470 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7471 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7472 nr_absent -= end_pfn - start_pfn;
7473 }
7474 return nr_absent;
7475}
7476
7477/**
7478 * absent_pages_in_range - Return number of page frames in holes within a range
7479 * @start_pfn: The start PFN to start searching for holes
7480 * @end_pfn: The end PFN to stop searching for holes
7481 *
7482 * Return: the number of pages frames in memory holes within a range.
7483 */
7484unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7485 unsigned long end_pfn)
7486{
7487 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7488}
7489
7490/* Return the number of page frames in holes in a zone on a node */
7491static unsigned long __init zone_absent_pages_in_node(int nid,
7492 unsigned long zone_type,
7493 unsigned long node_start_pfn,
7494 unsigned long node_end_pfn)
7495{
7496 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7497 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7498 unsigned long zone_start_pfn, zone_end_pfn;
7499 unsigned long nr_absent;
7500
7501 /* When hotadd a new node from cpu_up(), the node should be empty */
7502 if (!node_start_pfn && !node_end_pfn)
7503 return 0;
7504
7505 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7506 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7507
7508 adjust_zone_range_for_zone_movable(nid, zone_type,
7509 node_start_pfn, node_end_pfn,
7510 &zone_start_pfn, &zone_end_pfn);
7511 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7512
7513 /*
7514 * ZONE_MOVABLE handling.
7515 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7516 * and vice versa.
7517 */
7518 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7519 unsigned long start_pfn, end_pfn;
7520 struct memblock_region *r;
7521
7522 for_each_mem_region(r) {
7523 start_pfn = clamp(memblock_region_memory_base_pfn(r),
7524 zone_start_pfn, zone_end_pfn);
7525 end_pfn = clamp(memblock_region_memory_end_pfn(r),
7526 zone_start_pfn, zone_end_pfn);
7527
7528 if (zone_type == ZONE_MOVABLE &&
7529 memblock_is_mirror(r))
7530 nr_absent += end_pfn - start_pfn;
7531
7532 if (zone_type == ZONE_NORMAL &&
7533 !memblock_is_mirror(r))
7534 nr_absent += end_pfn - start_pfn;
7535 }
7536 }
7537
7538 return nr_absent;
7539}
7540
7541static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7542 unsigned long node_start_pfn,
7543 unsigned long node_end_pfn)
7544{
7545 unsigned long realtotalpages = 0, totalpages = 0;
7546 enum zone_type i;
7547
7548 for (i = 0; i < MAX_NR_ZONES; i++) {
7549 struct zone *zone = pgdat->node_zones + i;
7550 unsigned long zone_start_pfn, zone_end_pfn;
7551 unsigned long spanned, absent;
7552 unsigned long size, real_size;
7553
7554 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7555 node_start_pfn,
7556 node_end_pfn,
7557 &zone_start_pfn,
7558 &zone_end_pfn);
7559 absent = zone_absent_pages_in_node(pgdat->node_id, i,
7560 node_start_pfn,
7561 node_end_pfn);
7562
7563 size = spanned;
7564 real_size = size - absent;
7565
7566 if (size)
7567 zone->zone_start_pfn = zone_start_pfn;
7568 else
7569 zone->zone_start_pfn = 0;
7570 zone->spanned_pages = size;
7571 zone->present_pages = real_size;
7572#if defined(CONFIG_MEMORY_HOTPLUG)
7573 zone->present_early_pages = real_size;
7574#endif
7575
7576 totalpages += size;
7577 realtotalpages += real_size;
7578 }
7579
7580 pgdat->node_spanned_pages = totalpages;
7581 pgdat->node_present_pages = realtotalpages;
7582 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7583}
7584
7585#ifndef CONFIG_SPARSEMEM
7586/*
7587 * Calculate the size of the zone->blockflags rounded to an unsigned long
7588 * Start by making sure zonesize is a multiple of pageblock_order by rounding
7589 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7590 * round what is now in bits to nearest long in bits, then return it in
7591 * bytes.
7592 */
7593static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7594{
7595 unsigned long usemapsize;
7596
7597 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7598 usemapsize = roundup(zonesize, pageblock_nr_pages);
7599 usemapsize = usemapsize >> pageblock_order;
7600 usemapsize *= NR_PAGEBLOCK_BITS;
7601 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7602
7603 return usemapsize / 8;
7604}
7605
7606static void __ref setup_usemap(struct zone *zone)
7607{
7608 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7609 zone->spanned_pages);
7610 zone->pageblock_flags = NULL;
7611 if (usemapsize) {
7612 zone->pageblock_flags =
7613 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7614 zone_to_nid(zone));
7615 if (!zone->pageblock_flags)
7616 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7617 usemapsize, zone->name, zone_to_nid(zone));
7618 }
7619}
7620#else
7621static inline void setup_usemap(struct zone *zone) {}
7622#endif /* CONFIG_SPARSEMEM */
7623
7624#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7625
7626/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7627void __init set_pageblock_order(void)
7628{
7629 unsigned int order = MAX_ORDER - 1;
7630
7631 /* Check that pageblock_nr_pages has not already been setup */
7632 if (pageblock_order)
7633 return;
7634
7635 /* Don't let pageblocks exceed the maximum allocation granularity. */
7636 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
7637 order = HUGETLB_PAGE_ORDER;
7638
7639 /*
7640 * Assume the largest contiguous order of interest is a huge page.
7641 * This value may be variable depending on boot parameters on IA64 and
7642 * powerpc.
7643 */
7644 pageblock_order = order;
7645}
7646#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7647
7648/*
7649 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7650 * is unused as pageblock_order is set at compile-time. See
7651 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7652 * the kernel config
7653 */
7654void __init set_pageblock_order(void)
7655{
7656}
7657
7658#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7659
7660static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7661 unsigned long present_pages)
7662{
7663 unsigned long pages = spanned_pages;
7664
7665 /*
7666 * Provide a more accurate estimation if there are holes within
7667 * the zone and SPARSEMEM is in use. If there are holes within the
7668 * zone, each populated memory region may cost us one or two extra
7669 * memmap pages due to alignment because memmap pages for each
7670 * populated regions may not be naturally aligned on page boundary.
7671 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7672 */
7673 if (spanned_pages > present_pages + (present_pages >> 4) &&
7674 IS_ENABLED(CONFIG_SPARSEMEM))
7675 pages = present_pages;
7676
7677 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7678}
7679
7680#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7681static void pgdat_init_split_queue(struct pglist_data *pgdat)
7682{
7683 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7684
7685 spin_lock_init(&ds_queue->split_queue_lock);
7686 INIT_LIST_HEAD(&ds_queue->split_queue);
7687 ds_queue->split_queue_len = 0;
7688}
7689#else
7690static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7691#endif
7692
7693#ifdef CONFIG_COMPACTION
7694static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7695{
7696 init_waitqueue_head(&pgdat->kcompactd_wait);
7697}
7698#else
7699static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7700#endif
7701
7702static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7703{
7704 int i;
7705
7706 pgdat_resize_init(pgdat);
7707 pgdat_kswapd_lock_init(pgdat);
7708
7709 pgdat_init_split_queue(pgdat);
7710 pgdat_init_kcompactd(pgdat);
7711
7712 init_waitqueue_head(&pgdat->kswapd_wait);
7713 init_waitqueue_head(&pgdat->pfmemalloc_wait);
7714
7715 for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
7716 init_waitqueue_head(&pgdat->reclaim_wait[i]);
7717
7718 pgdat_page_ext_init(pgdat);
7719 lruvec_init(&pgdat->__lruvec);
7720}
7721
7722static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7723 unsigned long remaining_pages)
7724{
7725 atomic_long_set(&zone->managed_pages, remaining_pages);
7726 zone_set_nid(zone, nid);
7727 zone->name = zone_names[idx];
7728 zone->zone_pgdat = NODE_DATA(nid);
7729 spin_lock_init(&zone->lock);
7730 zone_seqlock_init(zone);
7731 zone_pcp_init(zone);
7732}
7733
7734/*
7735 * Set up the zone data structures
7736 * - init pgdat internals
7737 * - init all zones belonging to this node
7738 *
7739 * NOTE: this function is only called during memory hotplug
7740 */
7741#ifdef CONFIG_MEMORY_HOTPLUG
7742void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
7743{
7744 int nid = pgdat->node_id;
7745 enum zone_type z;
7746 int cpu;
7747
7748 pgdat_init_internals(pgdat);
7749
7750 if (pgdat->per_cpu_nodestats == &boot_nodestats)
7751 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
7752
7753 /*
7754 * Reset the nr_zones, order and highest_zoneidx before reuse.
7755 * Note that kswapd will init kswapd_highest_zoneidx properly
7756 * when it starts in the near future.
7757 */
7758 pgdat->nr_zones = 0;
7759 pgdat->kswapd_order = 0;
7760 pgdat->kswapd_highest_zoneidx = 0;
7761 pgdat->node_start_pfn = 0;
7762 for_each_online_cpu(cpu) {
7763 struct per_cpu_nodestat *p;
7764
7765 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
7766 memset(p, 0, sizeof(*p));
7767 }
7768
7769 for (z = 0; z < MAX_NR_ZONES; z++)
7770 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7771}
7772#endif
7773
7774/*
7775 * Set up the zone data structures:
7776 * - mark all pages reserved
7777 * - mark all memory queues empty
7778 * - clear the memory bitmaps
7779 *
7780 * NOTE: pgdat should get zeroed by caller.
7781 * NOTE: this function is only called during early init.
7782 */
7783static void __init free_area_init_core(struct pglist_data *pgdat)
7784{
7785 enum zone_type j;
7786 int nid = pgdat->node_id;
7787
7788 pgdat_init_internals(pgdat);
7789 pgdat->per_cpu_nodestats = &boot_nodestats;
7790
7791 for (j = 0; j < MAX_NR_ZONES; j++) {
7792 struct zone *zone = pgdat->node_zones + j;
7793 unsigned long size, freesize, memmap_pages;
7794
7795 size = zone->spanned_pages;
7796 freesize = zone->present_pages;
7797
7798 /*
7799 * Adjust freesize so that it accounts for how much memory
7800 * is used by this zone for memmap. This affects the watermark
7801 * and per-cpu initialisations
7802 */
7803 memmap_pages = calc_memmap_size(size, freesize);
7804 if (!is_highmem_idx(j)) {
7805 if (freesize >= memmap_pages) {
7806 freesize -= memmap_pages;
7807 if (memmap_pages)
7808 pr_debug(" %s zone: %lu pages used for memmap\n",
7809 zone_names[j], memmap_pages);
7810 } else
7811 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
7812 zone_names[j], memmap_pages, freesize);
7813 }
7814
7815 /* Account for reserved pages */
7816 if (j == 0 && freesize > dma_reserve) {
7817 freesize -= dma_reserve;
7818 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7819 }
7820
7821 if (!is_highmem_idx(j))
7822 nr_kernel_pages += freesize;
7823 /* Charge for highmem memmap if there are enough kernel pages */
7824 else if (nr_kernel_pages > memmap_pages * 2)
7825 nr_kernel_pages -= memmap_pages;
7826 nr_all_pages += freesize;
7827
7828 /*
7829 * Set an approximate value for lowmem here, it will be adjusted
7830 * when the bootmem allocator frees pages into the buddy system.
7831 * And all highmem pages will be managed by the buddy system.
7832 */
7833 zone_init_internals(zone, j, nid, freesize);
7834
7835 if (!size)
7836 continue;
7837
7838 set_pageblock_order();
7839 setup_usemap(zone);
7840 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7841 }
7842}
7843
7844#ifdef CONFIG_FLATMEM
7845static void __init alloc_node_mem_map(struct pglist_data *pgdat)
7846{
7847 unsigned long __maybe_unused start = 0;
7848 unsigned long __maybe_unused offset = 0;
7849
7850 /* Skip empty nodes */
7851 if (!pgdat->node_spanned_pages)
7852 return;
7853
7854 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7855 offset = pgdat->node_start_pfn - start;
7856 /* ia64 gets its own node_mem_map, before this, without bootmem */
7857 if (!pgdat->node_mem_map) {
7858 unsigned long size, end;
7859 struct page *map;
7860
7861 /*
7862 * The zone's endpoints aren't required to be MAX_ORDER
7863 * aligned but the node_mem_map endpoints must be in order
7864 * for the buddy allocator to function correctly.
7865 */
7866 end = pgdat_end_pfn(pgdat);
7867 end = ALIGN(end, MAX_ORDER_NR_PAGES);
7868 size = (end - start) * sizeof(struct page);
7869 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7870 pgdat->node_id, false);
7871 if (!map)
7872 panic("Failed to allocate %ld bytes for node %d memory map\n",
7873 size, pgdat->node_id);
7874 pgdat->node_mem_map = map + offset;
7875 }
7876 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7877 __func__, pgdat->node_id, (unsigned long)pgdat,
7878 (unsigned long)pgdat->node_mem_map);
7879#ifndef CONFIG_NUMA
7880 /*
7881 * With no DISCONTIG, the global mem_map is just set as node 0's
7882 */
7883 if (pgdat == NODE_DATA(0)) {
7884 mem_map = NODE_DATA(0)->node_mem_map;
7885 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7886 mem_map -= offset;
7887 }
7888#endif
7889}
7890#else
7891static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
7892#endif /* CONFIG_FLATMEM */
7893
7894#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7895static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7896{
7897 pgdat->first_deferred_pfn = ULONG_MAX;
7898}
7899#else
7900static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7901#endif
7902
7903static void __init free_area_init_node(int nid)
7904{
7905 pg_data_t *pgdat = NODE_DATA(nid);
7906 unsigned long start_pfn = 0;
7907 unsigned long end_pfn = 0;
7908
7909 /* pg_data_t should be reset to zero when it's allocated */
7910 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7911
7912 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7913
7914 pgdat->node_id = nid;
7915 pgdat->node_start_pfn = start_pfn;
7916 pgdat->per_cpu_nodestats = NULL;
7917
7918 if (start_pfn != end_pfn) {
7919 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7920 (u64)start_pfn << PAGE_SHIFT,
7921 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7922 } else {
7923 pr_info("Initmem setup node %d as memoryless\n", nid);
7924 }
7925
7926 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7927
7928 alloc_node_mem_map(pgdat);
7929 pgdat_set_deferred_range(pgdat);
7930
7931 free_area_init_core(pgdat);
7932}
7933
7934static void __init free_area_init_memoryless_node(int nid)
7935{
7936 free_area_init_node(nid);
7937}
7938
7939#if MAX_NUMNODES > 1
7940/*
7941 * Figure out the number of possible node ids.
7942 */
7943void __init setup_nr_node_ids(void)
7944{
7945 unsigned int highest;
7946
7947 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7948 nr_node_ids = highest + 1;
7949}
7950#endif
7951
7952/**
7953 * node_map_pfn_alignment - determine the maximum internode alignment
7954 *
7955 * This function should be called after node map is populated and sorted.
7956 * It calculates the maximum power of two alignment which can distinguish
7957 * all the nodes.
7958 *
7959 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7960 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7961 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7962 * shifted, 1GiB is enough and this function will indicate so.
7963 *
7964 * This is used to test whether pfn -> nid mapping of the chosen memory
7965 * model has fine enough granularity to avoid incorrect mapping for the
7966 * populated node map.
7967 *
7968 * Return: the determined alignment in pfn's. 0 if there is no alignment
7969 * requirement (single node).
7970 */
7971unsigned long __init node_map_pfn_alignment(void)
7972{
7973 unsigned long accl_mask = 0, last_end = 0;
7974 unsigned long start, end, mask;
7975 int last_nid = NUMA_NO_NODE;
7976 int i, nid;
7977
7978 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7979 if (!start || last_nid < 0 || last_nid == nid) {
7980 last_nid = nid;
7981 last_end = end;
7982 continue;
7983 }
7984
7985 /*
7986 * Start with a mask granular enough to pin-point to the
7987 * start pfn and tick off bits one-by-one until it becomes
7988 * too coarse to separate the current node from the last.
7989 */
7990 mask = ~((1 << __ffs(start)) - 1);
7991 while (mask && last_end <= (start & (mask << 1)))
7992 mask <<= 1;
7993
7994 /* accumulate all internode masks */
7995 accl_mask |= mask;
7996 }
7997
7998 /* convert mask to number of pages */
7999 return ~accl_mask + 1;
8000}
8001
8002/*
8003 * early_calculate_totalpages()
8004 * Sum pages in active regions for movable zone.
8005 * Populate N_MEMORY for calculating usable_nodes.
8006 */
8007static unsigned long __init early_calculate_totalpages(void)
8008{
8009 unsigned long totalpages = 0;
8010 unsigned long start_pfn, end_pfn;
8011 int i, nid;
8012
8013 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8014 unsigned long pages = end_pfn - start_pfn;
8015
8016 totalpages += pages;
8017 if (pages)
8018 node_set_state(nid, N_MEMORY);
8019 }
8020 return totalpages;
8021}
8022
8023/*
8024 * Find the PFN the Movable zone begins in each node. Kernel memory
8025 * is spread evenly between nodes as long as the nodes have enough
8026 * memory. When they don't, some nodes will have more kernelcore than
8027 * others
8028 */
8029static void __init find_zone_movable_pfns_for_nodes(void)
8030{
8031 int i, nid;
8032 unsigned long usable_startpfn;
8033 unsigned long kernelcore_node, kernelcore_remaining;
8034 /* save the state before borrow the nodemask */
8035 nodemask_t saved_node_state = node_states[N_MEMORY];
8036 unsigned long totalpages = early_calculate_totalpages();
8037 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
8038 struct memblock_region *r;
8039
8040 /* Need to find movable_zone earlier when movable_node is specified. */
8041 find_usable_zone_for_movable();
8042
8043 /*
8044 * If movable_node is specified, ignore kernelcore and movablecore
8045 * options.
8046 */
8047 if (movable_node_is_enabled()) {
8048 for_each_mem_region(r) {
8049 if (!memblock_is_hotpluggable(r))
8050 continue;
8051
8052 nid = memblock_get_region_node(r);
8053
8054 usable_startpfn = PFN_DOWN(r->base);
8055 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8056 min(usable_startpfn, zone_movable_pfn[nid]) :
8057 usable_startpfn;
8058 }
8059
8060 goto out2;
8061 }
8062
8063 /*
8064 * If kernelcore=mirror is specified, ignore movablecore option
8065 */
8066 if (mirrored_kernelcore) {
8067 bool mem_below_4gb_not_mirrored = false;
8068
8069 for_each_mem_region(r) {
8070 if (memblock_is_mirror(r))
8071 continue;
8072
8073 nid = memblock_get_region_node(r);
8074
8075 usable_startpfn = memblock_region_memory_base_pfn(r);
8076
8077 if (usable_startpfn < PHYS_PFN(SZ_4G)) {
8078 mem_below_4gb_not_mirrored = true;
8079 continue;
8080 }
8081
8082 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8083 min(usable_startpfn, zone_movable_pfn[nid]) :
8084 usable_startpfn;
8085 }
8086
8087 if (mem_below_4gb_not_mirrored)
8088 pr_warn("This configuration results in unmirrored kernel memory.\n");
8089
8090 goto out2;
8091 }
8092
8093 /*
8094 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
8095 * amount of necessary memory.
8096 */
8097 if (required_kernelcore_percent)
8098 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
8099 10000UL;
8100 if (required_movablecore_percent)
8101 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
8102 10000UL;
8103
8104 /*
8105 * If movablecore= was specified, calculate what size of
8106 * kernelcore that corresponds so that memory usable for
8107 * any allocation type is evenly spread. If both kernelcore
8108 * and movablecore are specified, then the value of kernelcore
8109 * will be used for required_kernelcore if it's greater than
8110 * what movablecore would have allowed.
8111 */
8112 if (required_movablecore) {
8113 unsigned long corepages;
8114
8115 /*
8116 * Round-up so that ZONE_MOVABLE is at least as large as what
8117 * was requested by the user
8118 */
8119 required_movablecore =
8120 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
8121 required_movablecore = min(totalpages, required_movablecore);
8122 corepages = totalpages - required_movablecore;
8123
8124 required_kernelcore = max(required_kernelcore, corepages);
8125 }
8126
8127 /*
8128 * If kernelcore was not specified or kernelcore size is larger
8129 * than totalpages, there is no ZONE_MOVABLE.
8130 */
8131 if (!required_kernelcore || required_kernelcore >= totalpages)
8132 goto out;
8133
8134 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
8135 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
8136
8137restart:
8138 /* Spread kernelcore memory as evenly as possible throughout nodes */
8139 kernelcore_node = required_kernelcore / usable_nodes;
8140 for_each_node_state(nid, N_MEMORY) {
8141 unsigned long start_pfn, end_pfn;
8142
8143 /*
8144 * Recalculate kernelcore_node if the division per node
8145 * now exceeds what is necessary to satisfy the requested
8146 * amount of memory for the kernel
8147 */
8148 if (required_kernelcore < kernelcore_node)
8149 kernelcore_node = required_kernelcore / usable_nodes;
8150
8151 /*
8152 * As the map is walked, we track how much memory is usable
8153 * by the kernel using kernelcore_remaining. When it is
8154 * 0, the rest of the node is usable by ZONE_MOVABLE
8155 */
8156 kernelcore_remaining = kernelcore_node;
8157
8158 /* Go through each range of PFNs within this node */
8159 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
8160 unsigned long size_pages;
8161
8162 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
8163 if (start_pfn >= end_pfn)
8164 continue;
8165
8166 /* Account for what is only usable for kernelcore */
8167 if (start_pfn < usable_startpfn) {
8168 unsigned long kernel_pages;
8169 kernel_pages = min(end_pfn, usable_startpfn)
8170 - start_pfn;
8171
8172 kernelcore_remaining -= min(kernel_pages,
8173 kernelcore_remaining);
8174 required_kernelcore -= min(kernel_pages,
8175 required_kernelcore);
8176
8177 /* Continue if range is now fully accounted */
8178 if (end_pfn <= usable_startpfn) {
8179
8180 /*
8181 * Push zone_movable_pfn to the end so
8182 * that if we have to rebalance
8183 * kernelcore across nodes, we will
8184 * not double account here
8185 */
8186 zone_movable_pfn[nid] = end_pfn;
8187 continue;
8188 }
8189 start_pfn = usable_startpfn;
8190 }
8191
8192 /*
8193 * The usable PFN range for ZONE_MOVABLE is from
8194 * start_pfn->end_pfn. Calculate size_pages as the
8195 * number of pages used as kernelcore
8196 */
8197 size_pages = end_pfn - start_pfn;
8198 if (size_pages > kernelcore_remaining)
8199 size_pages = kernelcore_remaining;
8200 zone_movable_pfn[nid] = start_pfn + size_pages;
8201
8202 /*
8203 * Some kernelcore has been met, update counts and
8204 * break if the kernelcore for this node has been
8205 * satisfied
8206 */
8207 required_kernelcore -= min(required_kernelcore,
8208 size_pages);
8209 kernelcore_remaining -= size_pages;
8210 if (!kernelcore_remaining)
8211 break;
8212 }
8213 }
8214
8215 /*
8216 * If there is still required_kernelcore, we do another pass with one
8217 * less node in the count. This will push zone_movable_pfn[nid] further
8218 * along on the nodes that still have memory until kernelcore is
8219 * satisfied
8220 */
8221 usable_nodes--;
8222 if (usable_nodes && required_kernelcore > usable_nodes)
8223 goto restart;
8224
8225out2:
8226 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
8227 for (nid = 0; nid < MAX_NUMNODES; nid++) {
8228 unsigned long start_pfn, end_pfn;
8229
8230 zone_movable_pfn[nid] =
8231 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
8232
8233 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8234 if (zone_movable_pfn[nid] >= end_pfn)
8235 zone_movable_pfn[nid] = 0;
8236 }
8237
8238out:
8239 /* restore the node_state */
8240 node_states[N_MEMORY] = saved_node_state;
8241}
8242
8243/* Any regular or high memory on that node ? */
8244static void check_for_memory(pg_data_t *pgdat, int nid)
8245{
8246 enum zone_type zone_type;
8247
8248 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
8249 struct zone *zone = &pgdat->node_zones[zone_type];
8250 if (populated_zone(zone)) {
8251 if (IS_ENABLED(CONFIG_HIGHMEM))
8252 node_set_state(nid, N_HIGH_MEMORY);
8253 if (zone_type <= ZONE_NORMAL)
8254 node_set_state(nid, N_NORMAL_MEMORY);
8255 break;
8256 }
8257 }
8258}
8259
8260/*
8261 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
8262 * such cases we allow max_zone_pfn sorted in the descending order
8263 */
8264bool __weak arch_has_descending_max_zone_pfns(void)
8265{
8266 return false;
8267}
8268
8269/**
8270 * free_area_init - Initialise all pg_data_t and zone data
8271 * @max_zone_pfn: an array of max PFNs for each zone
8272 *
8273 * This will call free_area_init_node() for each active node in the system.
8274 * Using the page ranges provided by memblock_set_node(), the size of each
8275 * zone in each node and their holes is calculated. If the maximum PFN
8276 * between two adjacent zones match, it is assumed that the zone is empty.
8277 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
8278 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
8279 * starts where the previous one ended. For example, ZONE_DMA32 starts
8280 * at arch_max_dma_pfn.
8281 */
8282void __init free_area_init(unsigned long *max_zone_pfn)
8283{
8284 unsigned long start_pfn, end_pfn;
8285 int i, nid, zone;
8286 bool descending;
8287
8288 /* Record where the zone boundaries are */
8289 memset(arch_zone_lowest_possible_pfn, 0,
8290 sizeof(arch_zone_lowest_possible_pfn));
8291 memset(arch_zone_highest_possible_pfn, 0,
8292 sizeof(arch_zone_highest_possible_pfn));
8293
8294 start_pfn = PHYS_PFN(memblock_start_of_DRAM());
8295 descending = arch_has_descending_max_zone_pfns();
8296
8297 for (i = 0; i < MAX_NR_ZONES; i++) {
8298 if (descending)
8299 zone = MAX_NR_ZONES - i - 1;
8300 else
8301 zone = i;
8302
8303 if (zone == ZONE_MOVABLE)
8304 continue;
8305
8306 end_pfn = max(max_zone_pfn[zone], start_pfn);
8307 arch_zone_lowest_possible_pfn[zone] = start_pfn;
8308 arch_zone_highest_possible_pfn[zone] = end_pfn;
8309
8310 start_pfn = end_pfn;
8311 }
8312
8313 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
8314 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
8315 find_zone_movable_pfns_for_nodes();
8316
8317 /* Print out the zone ranges */
8318 pr_info("Zone ranges:\n");
8319 for (i = 0; i < MAX_NR_ZONES; i++) {
8320 if (i == ZONE_MOVABLE)
8321 continue;
8322 pr_info(" %-8s ", zone_names[i]);
8323 if (arch_zone_lowest_possible_pfn[i] ==
8324 arch_zone_highest_possible_pfn[i])
8325 pr_cont("empty\n");
8326 else
8327 pr_cont("[mem %#018Lx-%#018Lx]\n",
8328 (u64)arch_zone_lowest_possible_pfn[i]
8329 << PAGE_SHIFT,
8330 ((u64)arch_zone_highest_possible_pfn[i]
8331 << PAGE_SHIFT) - 1);
8332 }
8333
8334 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
8335 pr_info("Movable zone start for each node\n");
8336 for (i = 0; i < MAX_NUMNODES; i++) {
8337 if (zone_movable_pfn[i])
8338 pr_info(" Node %d: %#018Lx\n", i,
8339 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
8340 }
8341
8342 /*
8343 * Print out the early node map, and initialize the
8344 * subsection-map relative to active online memory ranges to
8345 * enable future "sub-section" extensions of the memory map.
8346 */
8347 pr_info("Early memory node ranges\n");
8348 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8349 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8350 (u64)start_pfn << PAGE_SHIFT,
8351 ((u64)end_pfn << PAGE_SHIFT) - 1);
8352 subsection_map_init(start_pfn, end_pfn - start_pfn);
8353 }
8354
8355 /* Initialise every node */
8356 mminit_verify_pageflags_layout();
8357 setup_nr_node_ids();
8358 for_each_node(nid) {
8359 pg_data_t *pgdat;
8360
8361 if (!node_online(nid)) {
8362 pr_info("Initializing node %d as memoryless\n", nid);
8363
8364 /* Allocator not initialized yet */
8365 pgdat = arch_alloc_nodedata(nid);
8366 if (!pgdat) {
8367 pr_err("Cannot allocate %zuB for node %d.\n",
8368 sizeof(*pgdat), nid);
8369 continue;
8370 }
8371 arch_refresh_nodedata(nid, pgdat);
8372 free_area_init_memoryless_node(nid);
8373
8374 /*
8375 * We do not want to confuse userspace by sysfs
8376 * files/directories for node without any memory
8377 * attached to it, so this node is not marked as
8378 * N_MEMORY and not marked online so that no sysfs
8379 * hierarchy will be created via register_one_node for
8380 * it. The pgdat will get fully initialized by
8381 * hotadd_init_pgdat() when memory is hotplugged into
8382 * this node.
8383 */
8384 continue;
8385 }
8386
8387 pgdat = NODE_DATA(nid);
8388 free_area_init_node(nid);
8389
8390 /* Any memory on that node */
8391 if (pgdat->node_present_pages)
8392 node_set_state(nid, N_MEMORY);
8393 check_for_memory(pgdat, nid);
8394 }
8395
8396 memmap_init();
8397}
8398
8399static int __init cmdline_parse_core(char *p, unsigned long *core,
8400 unsigned long *percent)
8401{
8402 unsigned long long coremem;
8403 char *endptr;
8404
8405 if (!p)
8406 return -EINVAL;
8407
8408 /* Value may be a percentage of total memory, otherwise bytes */
8409 coremem = simple_strtoull(p, &endptr, 0);
8410 if (*endptr == '%') {
8411 /* Paranoid check for percent values greater than 100 */
8412 WARN_ON(coremem > 100);
8413
8414 *percent = coremem;
8415 } else {
8416 coremem = memparse(p, &p);
8417 /* Paranoid check that UL is enough for the coremem value */
8418 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8419
8420 *core = coremem >> PAGE_SHIFT;
8421 *percent = 0UL;
8422 }
8423 return 0;
8424}
8425
8426/*
8427 * kernelcore=size sets the amount of memory for use for allocations that
8428 * cannot be reclaimed or migrated.
8429 */
8430static int __init cmdline_parse_kernelcore(char *p)
8431{
8432 /* parse kernelcore=mirror */
8433 if (parse_option_str(p, "mirror")) {
8434 mirrored_kernelcore = true;
8435 return 0;
8436 }
8437
8438 return cmdline_parse_core(p, &required_kernelcore,
8439 &required_kernelcore_percent);
8440}
8441
8442/*
8443 * movablecore=size sets the amount of memory for use for allocations that
8444 * can be reclaimed or migrated.
8445 */
8446static int __init cmdline_parse_movablecore(char *p)
8447{
8448 return cmdline_parse_core(p, &required_movablecore,
8449 &required_movablecore_percent);
8450}
8451
8452early_param("kernelcore", cmdline_parse_kernelcore);
8453early_param("movablecore", cmdline_parse_movablecore);
8454
8455void adjust_managed_page_count(struct page *page, long count)
8456{
8457 atomic_long_add(count, &page_zone(page)->managed_pages);
8458 totalram_pages_add(count);
8459#ifdef CONFIG_HIGHMEM
8460 if (PageHighMem(page))
8461 totalhigh_pages_add(count);
8462#endif
8463}
8464EXPORT_SYMBOL(adjust_managed_page_count);
8465
8466unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8467{
8468 void *pos;
8469 unsigned long pages = 0;
8470
8471 start = (void *)PAGE_ALIGN((unsigned long)start);
8472 end = (void *)((unsigned long)end & PAGE_MASK);
8473 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8474 struct page *page = virt_to_page(pos);
8475 void *direct_map_addr;
8476
8477 /*
8478 * 'direct_map_addr' might be different from 'pos'
8479 * because some architectures' virt_to_page()
8480 * work with aliases. Getting the direct map
8481 * address ensures that we get a _writeable_
8482 * alias for the memset().
8483 */
8484 direct_map_addr = page_address(page);
8485 /*
8486 * Perform a kasan-unchecked memset() since this memory
8487 * has not been initialized.
8488 */
8489 direct_map_addr = kasan_reset_tag(direct_map_addr);
8490 if ((unsigned int)poison <= 0xFF)
8491 memset(direct_map_addr, poison, PAGE_SIZE);
8492
8493 free_reserved_page(page);
8494 }
8495
8496 if (pages && s)
8497 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
8498
8499 return pages;
8500}
8501
8502void __init mem_init_print_info(void)
8503{
8504 unsigned long physpages, codesize, datasize, rosize, bss_size;
8505 unsigned long init_code_size, init_data_size;
8506
8507 physpages = get_num_physpages();
8508 codesize = _etext - _stext;
8509 datasize = _edata - _sdata;
8510 rosize = __end_rodata - __start_rodata;
8511 bss_size = __bss_stop - __bss_start;
8512 init_data_size = __init_end - __init_begin;
8513 init_code_size = _einittext - _sinittext;
8514
8515 /*
8516 * Detect special cases and adjust section sizes accordingly:
8517 * 1) .init.* may be embedded into .data sections
8518 * 2) .init.text.* may be out of [__init_begin, __init_end],
8519 * please refer to arch/tile/kernel/vmlinux.lds.S.
8520 * 3) .rodata.* may be embedded into .text or .data sections.
8521 */
8522#define adj_init_size(start, end, size, pos, adj) \
8523 do { \
8524 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
8525 size -= adj; \
8526 } while (0)
8527
8528 adj_init_size(__init_begin, __init_end, init_data_size,
8529 _sinittext, init_code_size);
8530 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8531 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8532 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8533 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8534
8535#undef adj_init_size
8536
8537 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8538#ifdef CONFIG_HIGHMEM
8539 ", %luK highmem"
8540#endif
8541 ")\n",
8542 K(nr_free_pages()), K(physpages),
8543 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
8544 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
8545 K(physpages - totalram_pages() - totalcma_pages),
8546 K(totalcma_pages)
8547#ifdef CONFIG_HIGHMEM
8548 , K(totalhigh_pages())
8549#endif
8550 );
8551}
8552
8553/**
8554 * set_dma_reserve - set the specified number of pages reserved in the first zone
8555 * @new_dma_reserve: The number of pages to mark reserved
8556 *
8557 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8558 * In the DMA zone, a significant percentage may be consumed by kernel image
8559 * and other unfreeable allocations which can skew the watermarks badly. This
8560 * function may optionally be used to account for unfreeable pages in the
8561 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8562 * smaller per-cpu batchsize.
8563 */
8564void __init set_dma_reserve(unsigned long new_dma_reserve)
8565{
8566 dma_reserve = new_dma_reserve;
8567}
8568
8569static int page_alloc_cpu_dead(unsigned int cpu)
8570{
8571 struct zone *zone;
8572
8573 lru_add_drain_cpu(cpu);
8574 mlock_page_drain_remote(cpu);
8575 drain_pages(cpu);
8576
8577 /*
8578 * Spill the event counters of the dead processor
8579 * into the current processors event counters.
8580 * This artificially elevates the count of the current
8581 * processor.
8582 */
8583 vm_events_fold_cpu(cpu);
8584
8585 /*
8586 * Zero the differential counters of the dead processor
8587 * so that the vm statistics are consistent.
8588 *
8589 * This is only okay since the processor is dead and cannot
8590 * race with what we are doing.
8591 */
8592 cpu_vm_stats_fold(cpu);
8593
8594 for_each_populated_zone(zone)
8595 zone_pcp_update(zone, 0);
8596
8597 return 0;
8598}
8599
8600static int page_alloc_cpu_online(unsigned int cpu)
8601{
8602 struct zone *zone;
8603
8604 for_each_populated_zone(zone)
8605 zone_pcp_update(zone, 1);
8606 return 0;
8607}
8608
8609#ifdef CONFIG_NUMA
8610int hashdist = HASHDIST_DEFAULT;
8611
8612static int __init set_hashdist(char *str)
8613{
8614 if (!str)
8615 return 0;
8616 hashdist = simple_strtoul(str, &str, 0);
8617 return 1;
8618}
8619__setup("hashdist=", set_hashdist);
8620#endif
8621
8622void __init page_alloc_init(void)
8623{
8624 int ret;
8625
8626#ifdef CONFIG_NUMA
8627 if (num_node_state(N_MEMORY) == 1)
8628 hashdist = 0;
8629#endif
8630
8631 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8632 "mm/page_alloc:pcp",
8633 page_alloc_cpu_online,
8634 page_alloc_cpu_dead);
8635 WARN_ON(ret < 0);
8636}
8637
8638/*
8639 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8640 * or min_free_kbytes changes.
8641 */
8642static void calculate_totalreserve_pages(void)
8643{
8644 struct pglist_data *pgdat;
8645 unsigned long reserve_pages = 0;
8646 enum zone_type i, j;
8647
8648 for_each_online_pgdat(pgdat) {
8649
8650 pgdat->totalreserve_pages = 0;
8651
8652 for (i = 0; i < MAX_NR_ZONES; i++) {
8653 struct zone *zone = pgdat->node_zones + i;
8654 long max = 0;
8655 unsigned long managed_pages = zone_managed_pages(zone);
8656
8657 /* Find valid and maximum lowmem_reserve in the zone */
8658 for (j = i; j < MAX_NR_ZONES; j++) {
8659 if (zone->lowmem_reserve[j] > max)
8660 max = zone->lowmem_reserve[j];
8661 }
8662
8663 /* we treat the high watermark as reserved pages. */
8664 max += high_wmark_pages(zone);
8665
8666 if (max > managed_pages)
8667 max = managed_pages;
8668
8669 pgdat->totalreserve_pages += max;
8670
8671 reserve_pages += max;
8672 }
8673 }
8674 totalreserve_pages = reserve_pages;
8675}
8676
8677/*
8678 * setup_per_zone_lowmem_reserve - called whenever
8679 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
8680 * has a correct pages reserved value, so an adequate number of
8681 * pages are left in the zone after a successful __alloc_pages().
8682 */
8683static void setup_per_zone_lowmem_reserve(void)
8684{
8685 struct pglist_data *pgdat;
8686 enum zone_type i, j;
8687
8688 for_each_online_pgdat(pgdat) {
8689 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8690 struct zone *zone = &pgdat->node_zones[i];
8691 int ratio = sysctl_lowmem_reserve_ratio[i];
8692 bool clear = !ratio || !zone_managed_pages(zone);
8693 unsigned long managed_pages = 0;
8694
8695 for (j = i + 1; j < MAX_NR_ZONES; j++) {
8696 struct zone *upper_zone = &pgdat->node_zones[j];
8697
8698 managed_pages += zone_managed_pages(upper_zone);
8699
8700 if (clear)
8701 zone->lowmem_reserve[j] = 0;
8702 else
8703 zone->lowmem_reserve[j] = managed_pages / ratio;
8704 }
8705 }
8706 }
8707
8708 /* update totalreserve_pages */
8709 calculate_totalreserve_pages();
8710}
8711
8712static void __setup_per_zone_wmarks(void)
8713{
8714 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8715 unsigned long lowmem_pages = 0;
8716 struct zone *zone;
8717 unsigned long flags;
8718
8719 /* Calculate total number of !ZONE_HIGHMEM pages */
8720 for_each_zone(zone) {
8721 if (!is_highmem(zone))
8722 lowmem_pages += zone_managed_pages(zone);
8723 }
8724
8725 for_each_zone(zone) {
8726 u64 tmp;
8727
8728 spin_lock_irqsave(&zone->lock, flags);
8729 tmp = (u64)pages_min * zone_managed_pages(zone);
8730 do_div(tmp, lowmem_pages);
8731 if (is_highmem(zone)) {
8732 /*
8733 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8734 * need highmem pages, so cap pages_min to a small
8735 * value here.
8736 *
8737 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8738 * deltas control async page reclaim, and so should
8739 * not be capped for highmem.
8740 */
8741 unsigned long min_pages;
8742
8743 min_pages = zone_managed_pages(zone) / 1024;
8744 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8745 zone->_watermark[WMARK_MIN] = min_pages;
8746 } else {
8747 /*
8748 * If it's a lowmem zone, reserve a number of pages
8749 * proportionate to the zone's size.
8750 */
8751 zone->_watermark[WMARK_MIN] = tmp;
8752 }
8753
8754 /*
8755 * Set the kswapd watermarks distance according to the
8756 * scale factor in proportion to available memory, but
8757 * ensure a minimum size on small systems.
8758 */
8759 tmp = max_t(u64, tmp >> 2,
8760 mult_frac(zone_managed_pages(zone),
8761 watermark_scale_factor, 10000));
8762
8763 zone->watermark_boost = 0;
8764 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
8765 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
8766 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
8767
8768 spin_unlock_irqrestore(&zone->lock, flags);
8769 }
8770
8771 /* update totalreserve_pages */
8772 calculate_totalreserve_pages();
8773}
8774
8775/**
8776 * setup_per_zone_wmarks - called when min_free_kbytes changes
8777 * or when memory is hot-{added|removed}
8778 *
8779 * Ensures that the watermark[min,low,high] values for each zone are set
8780 * correctly with respect to min_free_kbytes.
8781 */
8782void setup_per_zone_wmarks(void)
8783{
8784 struct zone *zone;
8785 static DEFINE_SPINLOCK(lock);
8786
8787 spin_lock(&lock);
8788 __setup_per_zone_wmarks();
8789 spin_unlock(&lock);
8790
8791 /*
8792 * The watermark size have changed so update the pcpu batch
8793 * and high limits or the limits may be inappropriate.
8794 */
8795 for_each_zone(zone)
8796 zone_pcp_update(zone, 0);
8797}
8798
8799/*
8800 * Initialise min_free_kbytes.
8801 *
8802 * For small machines we want it small (128k min). For large machines
8803 * we want it large (256MB max). But it is not linear, because network
8804 * bandwidth does not increase linearly with machine size. We use
8805 *
8806 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8807 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
8808 *
8809 * which yields
8810 *
8811 * 16MB: 512k
8812 * 32MB: 724k
8813 * 64MB: 1024k
8814 * 128MB: 1448k
8815 * 256MB: 2048k
8816 * 512MB: 2896k
8817 * 1024MB: 4096k
8818 * 2048MB: 5792k
8819 * 4096MB: 8192k
8820 * 8192MB: 11584k
8821 * 16384MB: 16384k
8822 */
8823void calculate_min_free_kbytes(void)
8824{
8825 unsigned long lowmem_kbytes;
8826 int new_min_free_kbytes;
8827
8828 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8829 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8830
8831 if (new_min_free_kbytes > user_min_free_kbytes)
8832 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
8833 else
8834 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8835 new_min_free_kbytes, user_min_free_kbytes);
8836
8837}
8838
8839int __meminit init_per_zone_wmark_min(void)
8840{
8841 calculate_min_free_kbytes();
8842 setup_per_zone_wmarks();
8843 refresh_zone_stat_thresholds();
8844 setup_per_zone_lowmem_reserve();
8845
8846#ifdef CONFIG_NUMA
8847 setup_min_unmapped_ratio();
8848 setup_min_slab_ratio();
8849#endif
8850
8851 khugepaged_min_free_kbytes_update();
8852
8853 return 0;
8854}
8855postcore_initcall(init_per_zone_wmark_min)
8856
8857/*
8858 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8859 * that we can call two helper functions whenever min_free_kbytes
8860 * changes.
8861 */
8862int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8863 void *buffer, size_t *length, loff_t *ppos)
8864{
8865 int rc;
8866
8867 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8868 if (rc)
8869 return rc;
8870
8871 if (write) {
8872 user_min_free_kbytes = min_free_kbytes;
8873 setup_per_zone_wmarks();
8874 }
8875 return 0;
8876}
8877
8878int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8879 void *buffer, size_t *length, loff_t *ppos)
8880{
8881 int rc;
8882
8883 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8884 if (rc)
8885 return rc;
8886
8887 if (write)
8888 setup_per_zone_wmarks();
8889
8890 return 0;
8891}
8892
8893#ifdef CONFIG_NUMA
8894static void setup_min_unmapped_ratio(void)
8895{
8896 pg_data_t *pgdat;
8897 struct zone *zone;
8898
8899 for_each_online_pgdat(pgdat)
8900 pgdat->min_unmapped_pages = 0;
8901
8902 for_each_zone(zone)
8903 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8904 sysctl_min_unmapped_ratio) / 100;
8905}
8906
8907
8908int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8909 void *buffer, size_t *length, loff_t *ppos)
8910{
8911 int rc;
8912
8913 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8914 if (rc)
8915 return rc;
8916
8917 setup_min_unmapped_ratio();
8918
8919 return 0;
8920}
8921
8922static void setup_min_slab_ratio(void)
8923{
8924 pg_data_t *pgdat;
8925 struct zone *zone;
8926
8927 for_each_online_pgdat(pgdat)
8928 pgdat->min_slab_pages = 0;
8929
8930 for_each_zone(zone)
8931 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8932 sysctl_min_slab_ratio) / 100;
8933}
8934
8935int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8936 void *buffer, size_t *length, loff_t *ppos)
8937{
8938 int rc;
8939
8940 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8941 if (rc)
8942 return rc;
8943
8944 setup_min_slab_ratio();
8945
8946 return 0;
8947}
8948#endif
8949
8950/*
8951 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8952 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8953 * whenever sysctl_lowmem_reserve_ratio changes.
8954 *
8955 * The reserve ratio obviously has absolutely no relation with the
8956 * minimum watermarks. The lowmem reserve ratio can only make sense
8957 * if in function of the boot time zone sizes.
8958 */
8959int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8960 void *buffer, size_t *length, loff_t *ppos)
8961{
8962 int i;
8963
8964 proc_dointvec_minmax(table, write, buffer, length, ppos);
8965
8966 for (i = 0; i < MAX_NR_ZONES; i++) {
8967 if (sysctl_lowmem_reserve_ratio[i] < 1)
8968 sysctl_lowmem_reserve_ratio[i] = 0;
8969 }
8970
8971 setup_per_zone_lowmem_reserve();
8972 return 0;
8973}
8974
8975/*
8976 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8977 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8978 * pagelist can have before it gets flushed back to buddy allocator.
8979 */
8980int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8981 int write, void *buffer, size_t *length, loff_t *ppos)
8982{
8983 struct zone *zone;
8984 int old_percpu_pagelist_high_fraction;
8985 int ret;
8986
8987 mutex_lock(&pcp_batch_high_lock);
8988 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
8989
8990 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8991 if (!write || ret < 0)
8992 goto out;
8993
8994 /* Sanity checking to avoid pcp imbalance */
8995 if (percpu_pagelist_high_fraction &&
8996 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8997 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
8998 ret = -EINVAL;
8999 goto out;
9000 }
9001
9002 /* No change? */
9003 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
9004 goto out;
9005
9006 for_each_populated_zone(zone)
9007 zone_set_pageset_high_and_batch(zone, 0);
9008out:
9009 mutex_unlock(&pcp_batch_high_lock);
9010 return ret;
9011}
9012
9013#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
9014/*
9015 * Returns the number of pages that arch has reserved but
9016 * is not known to alloc_large_system_hash().
9017 */
9018static unsigned long __init arch_reserved_kernel_pages(void)
9019{
9020 return 0;
9021}
9022#endif
9023
9024/*
9025 * Adaptive scale is meant to reduce sizes of hash tables on large memory
9026 * machines. As memory size is increased the scale is also increased but at
9027 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
9028 * quadruples the scale is increased by one, which means the size of hash table
9029 * only doubles, instead of quadrupling as well.
9030 * Because 32-bit systems cannot have large physical memory, where this scaling
9031 * makes sense, it is disabled on such platforms.
9032 */
9033#if __BITS_PER_LONG > 32
9034#define ADAPT_SCALE_BASE (64ul << 30)
9035#define ADAPT_SCALE_SHIFT 2
9036#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
9037#endif
9038
9039/*
9040 * allocate a large system hash table from bootmem
9041 * - it is assumed that the hash table must contain an exact power-of-2
9042 * quantity of entries
9043 * - limit is the number of hash buckets, not the total allocation size
9044 */
9045void *__init alloc_large_system_hash(const char *tablename,
9046 unsigned long bucketsize,
9047 unsigned long numentries,
9048 int scale,
9049 int flags,
9050 unsigned int *_hash_shift,
9051 unsigned int *_hash_mask,
9052 unsigned long low_limit,
9053 unsigned long high_limit)
9054{
9055 unsigned long long max = high_limit;
9056 unsigned long log2qty, size;
9057 void *table;
9058 gfp_t gfp_flags;
9059 bool virt;
9060 bool huge;
9061
9062 /* allow the kernel cmdline to have a say */
9063 if (!numentries) {
9064 /* round applicable memory size up to nearest megabyte */
9065 numentries = nr_kernel_pages;
9066 numentries -= arch_reserved_kernel_pages();
9067
9068 /* It isn't necessary when PAGE_SIZE >= 1MB */
9069 if (PAGE_SIZE < SZ_1M)
9070 numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
9071
9072#if __BITS_PER_LONG > 32
9073 if (!high_limit) {
9074 unsigned long adapt;
9075
9076 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
9077 adapt <<= ADAPT_SCALE_SHIFT)
9078 scale++;
9079 }
9080#endif
9081
9082 /* limit to 1 bucket per 2^scale bytes of low memory */
9083 if (scale > PAGE_SHIFT)
9084 numentries >>= (scale - PAGE_SHIFT);
9085 else
9086 numentries <<= (PAGE_SHIFT - scale);
9087
9088 /* Make sure we've got at least a 0-order allocation.. */
9089 if (unlikely(flags & HASH_SMALL)) {
9090 /* Makes no sense without HASH_EARLY */
9091 WARN_ON(!(flags & HASH_EARLY));
9092 if (!(numentries >> *_hash_shift)) {
9093 numentries = 1UL << *_hash_shift;
9094 BUG_ON(!numentries);
9095 }
9096 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9097 numentries = PAGE_SIZE / bucketsize;
9098 }
9099 numentries = roundup_pow_of_two(numentries);
9100
9101 /* limit allocation size to 1/16 total memory by default */
9102 if (max == 0) {
9103 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
9104 do_div(max, bucketsize);
9105 }
9106 max = min(max, 0x80000000ULL);
9107
9108 if (numentries < low_limit)
9109 numentries = low_limit;
9110 if (numentries > max)
9111 numentries = max;
9112
9113 log2qty = ilog2(numentries);
9114
9115 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
9116 do {
9117 virt = false;
9118 size = bucketsize << log2qty;
9119 if (flags & HASH_EARLY) {
9120 if (flags & HASH_ZERO)
9121 table = memblock_alloc(size, SMP_CACHE_BYTES);
9122 else
9123 table = memblock_alloc_raw(size,
9124 SMP_CACHE_BYTES);
9125 } else if (get_order(size) >= MAX_ORDER || hashdist) {
9126 table = vmalloc_huge(size, gfp_flags);
9127 virt = true;
9128 if (table)
9129 huge = is_vm_area_hugepages(table);
9130 } else {
9131 /*
9132 * If bucketsize is not a power-of-two, we may free
9133 * some pages at the end of hash table which
9134 * alloc_pages_exact() automatically does
9135 */
9136 table = alloc_pages_exact(size, gfp_flags);
9137 kmemleak_alloc(table, size, 1, gfp_flags);
9138 }
9139 } while (!table && size > PAGE_SIZE && --log2qty);
9140
9141 if (!table)
9142 panic("Failed to allocate %s hash table\n", tablename);
9143
9144 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
9145 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
9146 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
9147
9148 if (_hash_shift)
9149 *_hash_shift = log2qty;
9150 if (_hash_mask)
9151 *_hash_mask = (1 << log2qty) - 1;
9152
9153 return table;
9154}
9155
9156#ifdef CONFIG_CONTIG_ALLOC
9157#if defined(CONFIG_DYNAMIC_DEBUG) || \
9158 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9159/* Usage: See admin-guide/dynamic-debug-howto.rst */
9160static void alloc_contig_dump_pages(struct list_head *page_list)
9161{
9162 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
9163
9164 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
9165 struct page *page;
9166
9167 dump_stack();
9168 list_for_each_entry(page, page_list, lru)
9169 dump_page(page, "migration failure");
9170 }
9171}
9172#else
9173static inline void alloc_contig_dump_pages(struct list_head *page_list)
9174{
9175}
9176#endif
9177
9178/* [start, end) must belong to a single zone. */
9179int __alloc_contig_migrate_range(struct compact_control *cc,
9180 unsigned long start, unsigned long end)
9181{
9182 /* This function is based on compact_zone() from compaction.c. */
9183 unsigned int nr_reclaimed;
9184 unsigned long pfn = start;
9185 unsigned int tries = 0;
9186 int ret = 0;
9187 struct migration_target_control mtc = {
9188 .nid = zone_to_nid(cc->zone),
9189 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
9190 };
9191
9192 lru_cache_disable();
9193
9194 while (pfn < end || !list_empty(&cc->migratepages)) {
9195 if (fatal_signal_pending(current)) {
9196 ret = -EINTR;
9197 break;
9198 }
9199
9200 if (list_empty(&cc->migratepages)) {
9201 cc->nr_migratepages = 0;
9202 ret = isolate_migratepages_range(cc, pfn, end);
9203 if (ret && ret != -EAGAIN)
9204 break;
9205 pfn = cc->migrate_pfn;
9206 tries = 0;
9207 } else if (++tries == 5) {
9208 ret = -EBUSY;
9209 break;
9210 }
9211
9212 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
9213 &cc->migratepages);
9214 cc->nr_migratepages -= nr_reclaimed;
9215
9216 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
9217 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
9218
9219 /*
9220 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9221 * to retry again over this error, so do the same here.
9222 */
9223 if (ret == -ENOMEM)
9224 break;
9225 }
9226
9227 lru_cache_enable();
9228 if (ret < 0) {
9229 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
9230 alloc_contig_dump_pages(&cc->migratepages);
9231 putback_movable_pages(&cc->migratepages);
9232 return ret;
9233 }
9234 return 0;
9235}
9236
9237/**
9238 * alloc_contig_range() -- tries to allocate given range of pages
9239 * @start: start PFN to allocate
9240 * @end: one-past-the-last PFN to allocate
9241 * @migratetype: migratetype of the underlying pageblocks (either
9242 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
9243 * in range must have the same migratetype and it must
9244 * be either of the two.
9245 * @gfp_mask: GFP mask to use during compaction
9246 *
9247 * The PFN range does not have to be pageblock aligned. The PFN range must
9248 * belong to a single zone.
9249 *
9250 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9251 * pageblocks in the range. Once isolated, the pageblocks should not
9252 * be modified by others.
9253 *
9254 * Return: zero on success or negative error code. On success all
9255 * pages which PFN is in [start, end) are allocated for the caller and
9256 * need to be freed with free_contig_range().
9257 */
9258int alloc_contig_range(unsigned long start, unsigned long end,
9259 unsigned migratetype, gfp_t gfp_mask)
9260{
9261 unsigned long outer_start, outer_end;
9262 int order;
9263 int ret = 0;
9264
9265 struct compact_control cc = {
9266 .nr_migratepages = 0,
9267 .order = -1,
9268 .zone = page_zone(pfn_to_page(start)),
9269 .mode = MIGRATE_SYNC,
9270 .ignore_skip_hint = true,
9271 .no_set_skip_hint = true,
9272 .gfp_mask = current_gfp_context(gfp_mask),
9273 .alloc_contig = true,
9274 };
9275 INIT_LIST_HEAD(&cc.migratepages);
9276
9277 /*
9278 * What we do here is we mark all pageblocks in range as
9279 * MIGRATE_ISOLATE. Because pageblock and max order pages may
9280 * have different sizes, and due to the way page allocator
9281 * work, start_isolate_page_range() has special handlings for this.
9282 *
9283 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9284 * migrate the pages from an unaligned range (ie. pages that
9285 * we are interested in). This will put all the pages in
9286 * range back to page allocator as MIGRATE_ISOLATE.
9287 *
9288 * When this is done, we take the pages in range from page
9289 * allocator removing them from the buddy system. This way
9290 * page allocator will never consider using them.
9291 *
9292 * This lets us mark the pageblocks back as
9293 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9294 * aligned range but not in the unaligned, original range are
9295 * put back to page allocator so that buddy can use them.
9296 */
9297
9298 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
9299 if (ret)
9300 goto done;
9301
9302 drain_all_pages(cc.zone);
9303
9304 /*
9305 * In case of -EBUSY, we'd like to know which page causes problem.
9306 * So, just fall through. test_pages_isolated() has a tracepoint
9307 * which will report the busy page.
9308 *
9309 * It is possible that busy pages could become available before
9310 * the call to test_pages_isolated, and the range will actually be
9311 * allocated. So, if we fall through be sure to clear ret so that
9312 * -EBUSY is not accidentally used or returned to caller.
9313 */
9314 ret = __alloc_contig_migrate_range(&cc, start, end);
9315 if (ret && ret != -EBUSY)
9316 goto done;
9317 ret = 0;
9318
9319 /*
9320 * Pages from [start, end) are within a pageblock_nr_pages
9321 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
9322 * more, all pages in [start, end) are free in page allocator.
9323 * What we are going to do is to allocate all pages from
9324 * [start, end) (that is remove them from page allocator).
9325 *
9326 * The only problem is that pages at the beginning and at the
9327 * end of interesting range may be not aligned with pages that
9328 * page allocator holds, ie. they can be part of higher order
9329 * pages. Because of this, we reserve the bigger range and
9330 * once this is done free the pages we are not interested in.
9331 *
9332 * We don't have to hold zone->lock here because the pages are
9333 * isolated thus they won't get removed from buddy.
9334 */
9335
9336 order = 0;
9337 outer_start = start;
9338 while (!PageBuddy(pfn_to_page(outer_start))) {
9339 if (++order >= MAX_ORDER) {
9340 outer_start = start;
9341 break;
9342 }
9343 outer_start &= ~0UL << order;
9344 }
9345
9346 if (outer_start != start) {
9347 order = buddy_order(pfn_to_page(outer_start));
9348
9349 /*
9350 * outer_start page could be small order buddy page and
9351 * it doesn't include start page. Adjust outer_start
9352 * in this case to report failed page properly
9353 * on tracepoint in test_pages_isolated()
9354 */
9355 if (outer_start + (1UL << order) <= start)
9356 outer_start = start;
9357 }
9358
9359 /* Make sure the range is really isolated. */
9360 if (test_pages_isolated(outer_start, end, 0)) {
9361 ret = -EBUSY;
9362 goto done;
9363 }
9364
9365 /* Grab isolated pages from freelists. */
9366 outer_end = isolate_freepages_range(&cc, outer_start, end);
9367 if (!outer_end) {
9368 ret = -EBUSY;
9369 goto done;
9370 }
9371
9372 /* Free head and tail (if any) */
9373 if (start != outer_start)
9374 free_contig_range(outer_start, start - outer_start);
9375 if (end != outer_end)
9376 free_contig_range(end, outer_end - end);
9377
9378done:
9379 undo_isolate_page_range(start, end, migratetype);
9380 return ret;
9381}
9382EXPORT_SYMBOL(alloc_contig_range);
9383
9384static int __alloc_contig_pages(unsigned long start_pfn,
9385 unsigned long nr_pages, gfp_t gfp_mask)
9386{
9387 unsigned long end_pfn = start_pfn + nr_pages;
9388
9389 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9390 gfp_mask);
9391}
9392
9393static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9394 unsigned long nr_pages)
9395{
9396 unsigned long i, end_pfn = start_pfn + nr_pages;
9397 struct page *page;
9398
9399 for (i = start_pfn; i < end_pfn; i++) {
9400 page = pfn_to_online_page(i);
9401 if (!page)
9402 return false;
9403
9404 if (page_zone(page) != z)
9405 return false;
9406
9407 if (PageReserved(page))
9408 return false;
9409 }
9410 return true;
9411}
9412
9413static bool zone_spans_last_pfn(const struct zone *zone,
9414 unsigned long start_pfn, unsigned long nr_pages)
9415{
9416 unsigned long last_pfn = start_pfn + nr_pages - 1;
9417
9418 return zone_spans_pfn(zone, last_pfn);
9419}
9420
9421/**
9422 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9423 * @nr_pages: Number of contiguous pages to allocate
9424 * @gfp_mask: GFP mask to limit search and used during compaction
9425 * @nid: Target node
9426 * @nodemask: Mask for other possible nodes
9427 *
9428 * This routine is a wrapper around alloc_contig_range(). It scans over zones
9429 * on an applicable zonelist to find a contiguous pfn range which can then be
9430 * tried for allocation with alloc_contig_range(). This routine is intended
9431 * for allocation requests which can not be fulfilled with the buddy allocator.
9432 *
9433 * The allocated memory is always aligned to a page boundary. If nr_pages is a
9434 * power of two, then allocated range is also guaranteed to be aligned to same
9435 * nr_pages (e.g. 1GB request would be aligned to 1GB).
9436 *
9437 * Allocated pages can be freed with free_contig_range() or by manually calling
9438 * __free_page() on each allocated page.
9439 *
9440 * Return: pointer to contiguous pages on success, or NULL if not successful.
9441 */
9442struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9443 int nid, nodemask_t *nodemask)
9444{
9445 unsigned long ret, pfn, flags;
9446 struct zonelist *zonelist;
9447 struct zone *zone;
9448 struct zoneref *z;
9449
9450 zonelist = node_zonelist(nid, gfp_mask);
9451 for_each_zone_zonelist_nodemask(zone, z, zonelist,
9452 gfp_zone(gfp_mask), nodemask) {
9453 spin_lock_irqsave(&zone->lock, flags);
9454
9455 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9456 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9457 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9458 /*
9459 * We release the zone lock here because
9460 * alloc_contig_range() will also lock the zone
9461 * at some point. If there's an allocation
9462 * spinning on this lock, it may win the race
9463 * and cause alloc_contig_range() to fail...
9464 */
9465 spin_unlock_irqrestore(&zone->lock, flags);
9466 ret = __alloc_contig_pages(pfn, nr_pages,
9467 gfp_mask);
9468 if (!ret)
9469 return pfn_to_page(pfn);
9470 spin_lock_irqsave(&zone->lock, flags);
9471 }
9472 pfn += nr_pages;
9473 }
9474 spin_unlock_irqrestore(&zone->lock, flags);
9475 }
9476 return NULL;
9477}
9478#endif /* CONFIG_CONTIG_ALLOC */
9479
9480void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9481{
9482 unsigned long count = 0;
9483
9484 for (; nr_pages--; pfn++) {
9485 struct page *page = pfn_to_page(pfn);
9486
9487 count += page_count(page) != 1;
9488 __free_page(page);
9489 }
9490 WARN(count != 0, "%lu pages are still in use!\n", count);
9491}
9492EXPORT_SYMBOL(free_contig_range);
9493
9494/*
9495 * Effectively disable pcplists for the zone by setting the high limit to 0
9496 * and draining all cpus. A concurrent page freeing on another CPU that's about
9497 * to put the page on pcplist will either finish before the drain and the page
9498 * will be drained, or observe the new high limit and skip the pcplist.
9499 *
9500 * Must be paired with a call to zone_pcp_enable().
9501 */
9502void zone_pcp_disable(struct zone *zone)
9503{
9504 mutex_lock(&pcp_batch_high_lock);
9505 __zone_set_pageset_high_and_batch(zone, 0, 1);
9506 __drain_all_pages(zone, true);
9507}
9508
9509void zone_pcp_enable(struct zone *zone)
9510{
9511 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9512 mutex_unlock(&pcp_batch_high_lock);
9513}
9514
9515void zone_pcp_reset(struct zone *zone)
9516{
9517 int cpu;
9518 struct per_cpu_zonestat *pzstats;
9519
9520 if (zone->per_cpu_pageset != &boot_pageset) {
9521 for_each_online_cpu(cpu) {
9522 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9523 drain_zonestat(zone, pzstats);
9524 }
9525 free_percpu(zone->per_cpu_pageset);
9526 zone->per_cpu_pageset = &boot_pageset;
9527 if (zone->per_cpu_zonestats != &boot_zonestats) {
9528 free_percpu(zone->per_cpu_zonestats);
9529 zone->per_cpu_zonestats = &boot_zonestats;
9530 }
9531 }
9532}
9533
9534#ifdef CONFIG_MEMORY_HOTREMOVE
9535/*
9536 * All pages in the range must be in a single zone, must not contain holes,
9537 * must span full sections, and must be isolated before calling this function.
9538 */
9539void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9540{
9541 unsigned long pfn = start_pfn;
9542 struct page *page;
9543 struct zone *zone;
9544 unsigned int order;
9545 unsigned long flags;
9546
9547 offline_mem_sections(pfn, end_pfn);
9548 zone = page_zone(pfn_to_page(pfn));
9549 spin_lock_irqsave(&zone->lock, flags);
9550 while (pfn < end_pfn) {
9551 page = pfn_to_page(pfn);
9552 /*
9553 * The HWPoisoned page may be not in buddy system, and
9554 * page_count() is not 0.
9555 */
9556 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9557 pfn++;
9558 continue;
9559 }
9560 /*
9561 * At this point all remaining PageOffline() pages have a
9562 * reference count of 0 and can simply be skipped.
9563 */
9564 if (PageOffline(page)) {
9565 BUG_ON(page_count(page));
9566 BUG_ON(PageBuddy(page));
9567 pfn++;
9568 continue;
9569 }
9570
9571 BUG_ON(page_count(page));
9572 BUG_ON(!PageBuddy(page));
9573 order = buddy_order(page);
9574 del_page_from_free_list(page, zone, order);
9575 pfn += (1 << order);
9576 }
9577 spin_unlock_irqrestore(&zone->lock, flags);
9578}
9579#endif
9580
9581/*
9582 * This function returns a stable result only if called under zone lock.
9583 */
9584bool is_free_buddy_page(struct page *page)
9585{
9586 unsigned long pfn = page_to_pfn(page);
9587 unsigned int order;
9588
9589 for (order = 0; order < MAX_ORDER; order++) {
9590 struct page *page_head = page - (pfn & ((1 << order) - 1));
9591
9592 if (PageBuddy(page_head) &&
9593 buddy_order_unsafe(page_head) >= order)
9594 break;
9595 }
9596
9597 return order < MAX_ORDER;
9598}
9599EXPORT_SYMBOL(is_free_buddy_page);
9600
9601#ifdef CONFIG_MEMORY_FAILURE
9602/*
9603 * Break down a higher-order page in sub-pages, and keep our target out of
9604 * buddy allocator.
9605 */
9606static void break_down_buddy_pages(struct zone *zone, struct page *page,
9607 struct page *target, int low, int high,
9608 int migratetype)
9609{
9610 unsigned long size = 1 << high;
9611 struct page *current_buddy, *next_page;
9612
9613 while (high > low) {
9614 high--;
9615 size >>= 1;
9616
9617 if (target >= &page[size]) {
9618 next_page = page + size;
9619 current_buddy = page;
9620 } else {
9621 next_page = page;
9622 current_buddy = page + size;
9623 }
9624
9625 if (set_page_guard(zone, current_buddy, high, migratetype))
9626 continue;
9627
9628 if (current_buddy != target) {
9629 add_to_free_list(current_buddy, zone, high, migratetype);
9630 set_buddy_order(current_buddy, high);
9631 page = next_page;
9632 }
9633 }
9634}
9635
9636/*
9637 * Take a page that will be marked as poisoned off the buddy allocator.
9638 */
9639bool take_page_off_buddy(struct page *page)
9640{
9641 struct zone *zone = page_zone(page);
9642 unsigned long pfn = page_to_pfn(page);
9643 unsigned long flags;
9644 unsigned int order;
9645 bool ret = false;
9646
9647 spin_lock_irqsave(&zone->lock, flags);
9648 for (order = 0; order < MAX_ORDER; order++) {
9649 struct page *page_head = page - (pfn & ((1 << order) - 1));
9650 int page_order = buddy_order(page_head);
9651
9652 if (PageBuddy(page_head) && page_order >= order) {
9653 unsigned long pfn_head = page_to_pfn(page_head);
9654 int migratetype = get_pfnblock_migratetype(page_head,
9655 pfn_head);
9656
9657 del_page_from_free_list(page_head, zone, page_order);
9658 break_down_buddy_pages(zone, page_head, page, 0,
9659 page_order, migratetype);
9660 SetPageHWPoisonTakenOff(page);
9661 if (!is_migrate_isolate(migratetype))
9662 __mod_zone_freepage_state(zone, -1, migratetype);
9663 ret = true;
9664 break;
9665 }
9666 if (page_count(page_head) > 0)
9667 break;
9668 }
9669 spin_unlock_irqrestore(&zone->lock, flags);
9670 return ret;
9671}
9672
9673/*
9674 * Cancel takeoff done by take_page_off_buddy().
9675 */
9676bool put_page_back_buddy(struct page *page)
9677{
9678 struct zone *zone = page_zone(page);
9679 unsigned long pfn = page_to_pfn(page);
9680 unsigned long flags;
9681 int migratetype = get_pfnblock_migratetype(page, pfn);
9682 bool ret = false;
9683
9684 spin_lock_irqsave(&zone->lock, flags);
9685 if (put_page_testzero(page)) {
9686 ClearPageHWPoisonTakenOff(page);
9687 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
9688 if (TestClearPageHWPoison(page)) {
9689 ret = true;
9690 }
9691 }
9692 spin_unlock_irqrestore(&zone->lock, flags);
9693
9694 return ret;
9695}
9696#endif
9697
9698#ifdef CONFIG_ZONE_DMA
9699bool has_managed_dma(void)
9700{
9701 struct pglist_data *pgdat;
9702
9703 for_each_online_pgdat(pgdat) {
9704 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
9705
9706 if (managed_zone(zone))
9707 return true;
9708 }
9709 return false;
9710}
9711#endif /* CONFIG_ZONE_DMA */