Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/list_nulls.h>
11#include <linux/wait.h>
12#include <linux/bitops.h>
13#include <linux/cache.h>
14#include <linux/threads.h>
15#include <linux/numa.h>
16#include <linux/init.h>
17#include <linux/seqlock.h>
18#include <linux/nodemask.h>
19#include <linux/pageblock-flags.h>
20#include <linux/page-flags-layout.h>
21#include <linux/atomic.h>
22#include <linux/mm_types.h>
23#include <linux/page-flags.h>
24#include <linux/local_lock.h>
25#include <linux/zswap.h>
26#include <asm/page.h>
27
28/* Free memory management - zoned buddy allocator. */
29#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
30#define MAX_PAGE_ORDER 10
31#else
32#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
33#endif
34#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
35
36#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
37
38#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
39
40/*
41 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
42 * costly to service. That is between allocation orders which should
43 * coalesce naturally under reasonable reclaim pressure and those which
44 * will not.
45 */
46#define PAGE_ALLOC_COSTLY_ORDER 3
47
48enum migratetype {
49 MIGRATE_UNMOVABLE,
50 MIGRATE_MOVABLE,
51 MIGRATE_RECLAIMABLE,
52 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
53 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
54#ifdef CONFIG_CMA
55 /*
56 * MIGRATE_CMA migration type is designed to mimic the way
57 * ZONE_MOVABLE works. Only movable pages can be allocated
58 * from MIGRATE_CMA pageblocks and page allocator never
59 * implicitly change migration type of MIGRATE_CMA pageblock.
60 *
61 * The way to use it is to change migratetype of a range of
62 * pageblocks to MIGRATE_CMA which can be done by
63 * __free_pageblock_cma() function.
64 */
65 MIGRATE_CMA,
66#endif
67#ifdef CONFIG_MEMORY_ISOLATION
68 MIGRATE_ISOLATE, /* can't allocate from here */
69#endif
70 MIGRATE_TYPES
71};
72
73/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
74extern const char * const migratetype_names[MIGRATE_TYPES];
75
76#ifdef CONFIG_CMA
77# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
78# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
79# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
80 get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
81#else
82# define is_migrate_cma(migratetype) false
83# define is_migrate_cma_page(_page) false
84# define is_migrate_cma_folio(folio, pfn) false
85#endif
86
87static inline bool is_migrate_movable(int mt)
88{
89 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
90}
91
92/*
93 * Check whether a migratetype can be merged with another migratetype.
94 *
95 * It is only mergeable when it can fall back to other migratetypes for
96 * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
97 */
98static inline bool migratetype_is_mergeable(int mt)
99{
100 return mt < MIGRATE_PCPTYPES;
101}
102
103#define for_each_migratetype_order(order, type) \
104 for (order = 0; order < NR_PAGE_ORDERS; order++) \
105 for (type = 0; type < MIGRATE_TYPES; type++)
106
107extern int page_group_by_mobility_disabled;
108
109#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
110
111#define get_pageblock_migratetype(page) \
112 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
113
114#define folio_migratetype(folio) \
115 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
116 MIGRATETYPE_MASK)
117struct free_area {
118 struct list_head free_list[MIGRATE_TYPES];
119 unsigned long nr_free;
120};
121
122struct pglist_data;
123
124#ifdef CONFIG_NUMA
125enum numa_stat_item {
126 NUMA_HIT, /* allocated in intended node */
127 NUMA_MISS, /* allocated in non intended node */
128 NUMA_FOREIGN, /* was intended here, hit elsewhere */
129 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
130 NUMA_LOCAL, /* allocation from local node */
131 NUMA_OTHER, /* allocation from other node */
132 NR_VM_NUMA_EVENT_ITEMS
133};
134#else
135#define NR_VM_NUMA_EVENT_ITEMS 0
136#endif
137
138enum zone_stat_item {
139 /* First 128 byte cacheline (assuming 64 bit words) */
140 NR_FREE_PAGES,
141 NR_FREE_PAGES_BLOCKS,
142 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
143 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
144 NR_ZONE_ACTIVE_ANON,
145 NR_ZONE_INACTIVE_FILE,
146 NR_ZONE_ACTIVE_FILE,
147 NR_ZONE_UNEVICTABLE,
148 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
149 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
150 /* Second 128 byte cacheline */
151 NR_BOUNCE,
152#if IS_ENABLED(CONFIG_ZSMALLOC)
153 NR_ZSPAGES, /* allocated in zsmalloc */
154#endif
155 NR_FREE_CMA_PAGES,
156#ifdef CONFIG_UNACCEPTED_MEMORY
157 NR_UNACCEPTED,
158#endif
159 NR_VM_ZONE_STAT_ITEMS };
160
161enum node_stat_item {
162 NR_LRU_BASE,
163 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
164 NR_ACTIVE_ANON, /* " " " " " */
165 NR_INACTIVE_FILE, /* " " " " " */
166 NR_ACTIVE_FILE, /* " " " " " */
167 NR_UNEVICTABLE, /* " " " " " */
168 NR_SLAB_RECLAIMABLE_B,
169 NR_SLAB_UNRECLAIMABLE_B,
170 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
171 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
172 WORKINGSET_NODES,
173 WORKINGSET_REFAULT_BASE,
174 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
175 WORKINGSET_REFAULT_FILE,
176 WORKINGSET_ACTIVATE_BASE,
177 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
178 WORKINGSET_ACTIVATE_FILE,
179 WORKINGSET_RESTORE_BASE,
180 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
181 WORKINGSET_RESTORE_FILE,
182 WORKINGSET_NODERECLAIM,
183 NR_ANON_MAPPED, /* Mapped anonymous pages */
184 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
185 only modified from process context */
186 NR_FILE_PAGES,
187 NR_FILE_DIRTY,
188 NR_WRITEBACK,
189 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
190 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
191 NR_SHMEM_THPS,
192 NR_SHMEM_PMDMAPPED,
193 NR_FILE_THPS,
194 NR_FILE_PMDMAPPED,
195 NR_ANON_THPS,
196 NR_VMSCAN_WRITE,
197 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
198 NR_DIRTIED, /* page dirtyings since bootup */
199 NR_WRITTEN, /* page writings since bootup */
200 NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
201 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
202 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
203 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
204 NR_KERNEL_STACK_KB, /* measured in KiB */
205#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
206 NR_KERNEL_SCS_KB, /* measured in KiB */
207#endif
208 NR_PAGETABLE, /* used for pagetables */
209 NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */
210#ifdef CONFIG_IOMMU_SUPPORT
211 NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */
212#endif
213#ifdef CONFIG_SWAP
214 NR_SWAPCACHE,
215#endif
216#ifdef CONFIG_NUMA_BALANCING
217 PGPROMOTE_SUCCESS, /* promote successfully */
218 PGPROMOTE_CANDIDATE, /* candidate pages to promote */
219#endif
220 /* PGDEMOTE_*: pages demoted */
221 PGDEMOTE_KSWAPD,
222 PGDEMOTE_DIRECT,
223 PGDEMOTE_KHUGEPAGED,
224 PGDEMOTE_PROACTIVE,
225#ifdef CONFIG_HUGETLB_PAGE
226 NR_HUGETLB,
227#endif
228 NR_BALLOON_PAGES,
229 NR_VM_NODE_STAT_ITEMS
230};
231
232/*
233 * Returns true if the item should be printed in THPs (/proc/vmstat
234 * currently prints number of anon, file and shmem THPs. But the item
235 * is charged in pages).
236 */
237static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
238{
239 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
240 return false;
241
242 return item == NR_ANON_THPS ||
243 item == NR_FILE_THPS ||
244 item == NR_SHMEM_THPS ||
245 item == NR_SHMEM_PMDMAPPED ||
246 item == NR_FILE_PMDMAPPED;
247}
248
249/*
250 * Returns true if the value is measured in bytes (most vmstat values are
251 * measured in pages). This defines the API part, the internal representation
252 * might be different.
253 */
254static __always_inline bool vmstat_item_in_bytes(int idx)
255{
256 /*
257 * Global and per-node slab counters track slab pages.
258 * It's expected that changes are multiples of PAGE_SIZE.
259 * Internally values are stored in pages.
260 *
261 * Per-memcg and per-lruvec counters track memory, consumed
262 * by individual slab objects. These counters are actually
263 * byte-precise.
264 */
265 return (idx == NR_SLAB_RECLAIMABLE_B ||
266 idx == NR_SLAB_UNRECLAIMABLE_B);
267}
268
269/*
270 * We do arithmetic on the LRU lists in various places in the code,
271 * so it is important to keep the active lists LRU_ACTIVE higher in
272 * the array than the corresponding inactive lists, and to keep
273 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
274 *
275 * This has to be kept in sync with the statistics in zone_stat_item
276 * above and the descriptions in vmstat_text in mm/vmstat.c
277 */
278#define LRU_BASE 0
279#define LRU_ACTIVE 1
280#define LRU_FILE 2
281
282enum lru_list {
283 LRU_INACTIVE_ANON = LRU_BASE,
284 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
285 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
286 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
287 LRU_UNEVICTABLE,
288 NR_LRU_LISTS
289};
290
291enum vmscan_throttle_state {
292 VMSCAN_THROTTLE_WRITEBACK,
293 VMSCAN_THROTTLE_ISOLATED,
294 VMSCAN_THROTTLE_NOPROGRESS,
295 VMSCAN_THROTTLE_CONGESTED,
296 NR_VMSCAN_THROTTLE,
297};
298
299#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
300
301#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
302
303static inline bool is_file_lru(enum lru_list lru)
304{
305 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
306}
307
308static inline bool is_active_lru(enum lru_list lru)
309{
310 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
311}
312
313#define WORKINGSET_ANON 0
314#define WORKINGSET_FILE 1
315#define ANON_AND_FILE 2
316
317enum lruvec_flags {
318 /*
319 * An lruvec has many dirty pages backed by a congested BDI:
320 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
321 * It can be cleared by cgroup reclaim or kswapd.
322 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
323 * It can only be cleared by kswapd.
324 *
325 * Essentially, kswapd can unthrottle an lruvec throttled by cgroup
326 * reclaim, but not vice versa. This only applies to the root cgroup.
327 * The goal is to prevent cgroup reclaim on the root cgroup (e.g.
328 * memory.reclaim) to unthrottle an unbalanced node (that was throttled
329 * by kswapd).
330 */
331 LRUVEC_CGROUP_CONGESTED,
332 LRUVEC_NODE_CONGESTED,
333};
334
335#endif /* !__GENERATING_BOUNDS_H */
336
337/*
338 * Evictable folios are divided into multiple generations. The youngest and the
339 * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
340 * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
341 * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
342 * corresponding generation. The gen counter in folio->flags stores gen+1 while
343 * a folio is on one of lrugen->folios[]. Otherwise it stores 0.
344 *
345 * After a folio is faulted in, the aging needs to check the accessed bit at
346 * least twice before handing this folio over to the eviction. The first check
347 * clears the accessed bit from the initial fault; the second check makes sure
348 * this folio hasn't been used since then. This process, AKA second chance,
349 * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI
350 * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two
351 * generations are considered active; the rest of generations, if they exist,
352 * are considered inactive. See lru_gen_is_active().
353 *
354 * PG_active is always cleared while a folio is on one of lrugen->folios[] so
355 * that the sliding window needs not to worry about it. And it's set again when
356 * a folio considered active is isolated for non-reclaiming purposes, e.g.,
357 * migration. See lru_gen_add_folio() and lru_gen_del_folio().
358 *
359 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
360 * number of categories of the active/inactive LRU when keeping track of
361 * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
362 * in folio->flags, masked by LRU_GEN_MASK.
363 */
364#define MIN_NR_GENS 2U
365#define MAX_NR_GENS 4U
366
367/*
368 * Each generation is divided into multiple tiers. A folio accessed N times
369 * through file descriptors is in tier order_base_2(N). A folio in the first
370 * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page
371 * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
372 * PG_workingset. A folio in any other tier (1<N<5) between the first and last
373 * is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
374 *
375 * In contrast to moving across generations which requires the LRU lock, moving
376 * across tiers only involves atomic operations on folio->flags and therefore
377 * has a negligible cost in the buffered access path. In the eviction path,
378 * comparisons of refaulted/(evicted+protected) from the first tier and the rest
379 * infer whether folios accessed multiple times through file descriptors are
380 * statistically hot and thus worth protecting.
381 *
382 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
383 * number of categories of the active/inactive LRU when keeping track of
384 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
385 * folio->flags, masked by LRU_REFS_MASK.
386 */
387#define MAX_NR_TIERS 4U
388
389#ifndef __GENERATING_BOUNDS_H
390
391#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
392#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
393
394/*
395 * For folios accessed multiple times through file descriptors,
396 * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
397 * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its
398 * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily
399 * promoted into the second oldest generation in the eviction path. And when
400 * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that
401 * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is
402 * only valid when PG_referenced is set.
403 *
404 * For folios accessed multiple times through page tables, folio_update_gen()
405 * from a page table walk or lru_gen_set_refs() from a rmap walk sets
406 * PG_referenced after the accessed bit is cleared for the first time.
407 * Thereafter, those two paths set PG_workingset and promote folios to the
408 * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears
409 * PG_referenced. Note that for this case, LRU_REFS_MASK is not used.
410 *
411 * For both cases above, after PG_workingset is set on a folio, it remains until
412 * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It
413 * can be set again if lru_gen_test_recent() returns true upon a refault.
414 */
415#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced))
416
417struct lruvec;
418struct page_vma_mapped_walk;
419
420#ifdef CONFIG_LRU_GEN
421
422enum {
423 LRU_GEN_ANON,
424 LRU_GEN_FILE,
425};
426
427enum {
428 LRU_GEN_CORE,
429 LRU_GEN_MM_WALK,
430 LRU_GEN_NONLEAF_YOUNG,
431 NR_LRU_GEN_CAPS
432};
433
434#define MIN_LRU_BATCH BITS_PER_LONG
435#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
436
437/* whether to keep historical stats from evicted generations */
438#ifdef CONFIG_LRU_GEN_STATS
439#define NR_HIST_GENS MAX_NR_GENS
440#else
441#define NR_HIST_GENS 1U
442#endif
443
444/*
445 * The youngest generation number is stored in max_seq for both anon and file
446 * types as they are aged on an equal footing. The oldest generation numbers are
447 * stored in min_seq[] separately for anon and file types so that they can be
448 * incremented independently. Ideally min_seq[] are kept in sync when both anon
449 * and file types are evictable. However, to adapt to situations like extreme
450 * swappiness, they are allowed to be out of sync by at most
451 * MAX_NR_GENS-MIN_NR_GENS-1.
452 *
453 * The number of pages in each generation is eventually consistent and therefore
454 * can be transiently negative when reset_batch_size() is pending.
455 */
456struct lru_gen_folio {
457 /* the aging increments the youngest generation number */
458 unsigned long max_seq;
459 /* the eviction increments the oldest generation numbers */
460 unsigned long min_seq[ANON_AND_FILE];
461 /* the birth time of each generation in jiffies */
462 unsigned long timestamps[MAX_NR_GENS];
463 /* the multi-gen LRU lists, lazily sorted on eviction */
464 struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
465 /* the multi-gen LRU sizes, eventually consistent */
466 long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
467 /* the exponential moving average of refaulted */
468 unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
469 /* the exponential moving average of evicted+protected */
470 unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
471 /* can only be modified under the LRU lock */
472 unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
473 /* can be modified without holding the LRU lock */
474 atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
475 atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
476 /* whether the multi-gen LRU is enabled */
477 bool enabled;
478 /* the memcg generation this lru_gen_folio belongs to */
479 u8 gen;
480 /* the list segment this lru_gen_folio belongs to */
481 u8 seg;
482 /* per-node lru_gen_folio list for global reclaim */
483 struct hlist_nulls_node list;
484};
485
486enum {
487 MM_LEAF_TOTAL, /* total leaf entries */
488 MM_LEAF_YOUNG, /* young leaf entries */
489 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
490 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
491 NR_MM_STATS
492};
493
494/* double-buffering Bloom filters */
495#define NR_BLOOM_FILTERS 2
496
497struct lru_gen_mm_state {
498 /* synced with max_seq after each iteration */
499 unsigned long seq;
500 /* where the current iteration continues after */
501 struct list_head *head;
502 /* where the last iteration ended before */
503 struct list_head *tail;
504 /* Bloom filters flip after each iteration */
505 unsigned long *filters[NR_BLOOM_FILTERS];
506 /* the mm stats for debugging */
507 unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
508};
509
510struct lru_gen_mm_walk {
511 /* the lruvec under reclaim */
512 struct lruvec *lruvec;
513 /* max_seq from lru_gen_folio: can be out of date */
514 unsigned long seq;
515 /* the next address within an mm to scan */
516 unsigned long next_addr;
517 /* to batch promoted pages */
518 int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
519 /* to batch the mm stats */
520 int mm_stats[NR_MM_STATS];
521 /* total batched items */
522 int batched;
523 int swappiness;
524 bool force_scan;
525};
526
527/*
528 * For each node, memcgs are divided into two generations: the old and the
529 * young. For each generation, memcgs are randomly sharded into multiple bins
530 * to improve scalability. For each bin, the hlist_nulls is virtually divided
531 * into three segments: the head, the tail and the default.
532 *
533 * An onlining memcg is added to the tail of a random bin in the old generation.
534 * The eviction starts at the head of a random bin in the old generation. The
535 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
536 * the old generation, is incremented when all its bins become empty.
537 *
538 * There are four operations:
539 * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
540 * current generation (old or young) and updates its "seg" to "head";
541 * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
542 * current generation (old or young) and updates its "seg" to "tail";
543 * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
544 * generation, updates its "gen" to "old" and resets its "seg" to "default";
545 * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
546 * young generation, updates its "gen" to "young" and resets its "seg" to
547 * "default".
548 *
549 * The events that trigger the above operations are:
550 * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
551 * 2. The first attempt to reclaim a memcg below low, which triggers
552 * MEMCG_LRU_TAIL;
553 * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
554 * threshold, which triggers MEMCG_LRU_TAIL;
555 * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
556 * threshold, which triggers MEMCG_LRU_YOUNG;
557 * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
558 * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
559 * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
560 *
561 * Notes:
562 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
563 * of their max_seq counters ensures the eventual fairness to all eligible
564 * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
565 * 2. There are only two valid generations: old (seq) and young (seq+1).
566 * MEMCG_NR_GENS is set to three so that when reading the generation counter
567 * locklessly, a stale value (seq-1) does not wraparound to young.
568 */
569#define MEMCG_NR_GENS 3
570#define MEMCG_NR_BINS 8
571
572struct lru_gen_memcg {
573 /* the per-node memcg generation counter */
574 unsigned long seq;
575 /* each memcg has one lru_gen_folio per node */
576 unsigned long nr_memcgs[MEMCG_NR_GENS];
577 /* per-node lru_gen_folio list for global reclaim */
578 struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
579 /* protects the above */
580 spinlock_t lock;
581};
582
583void lru_gen_init_pgdat(struct pglist_data *pgdat);
584void lru_gen_init_lruvec(struct lruvec *lruvec);
585bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
586
587void lru_gen_init_memcg(struct mem_cgroup *memcg);
588void lru_gen_exit_memcg(struct mem_cgroup *memcg);
589void lru_gen_online_memcg(struct mem_cgroup *memcg);
590void lru_gen_offline_memcg(struct mem_cgroup *memcg);
591void lru_gen_release_memcg(struct mem_cgroup *memcg);
592void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
593
594#else /* !CONFIG_LRU_GEN */
595
596static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
597{
598}
599
600static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
601{
602}
603
604static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
605{
606 return false;
607}
608
609static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
610{
611}
612
613static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
614{
615}
616
617static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
618{
619}
620
621static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
622{
623}
624
625static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
626{
627}
628
629static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
630{
631}
632
633#endif /* CONFIG_LRU_GEN */
634
635struct lruvec {
636 struct list_head lists[NR_LRU_LISTS];
637 /* per lruvec lru_lock for memcg */
638 spinlock_t lru_lock;
639 /*
640 * These track the cost of reclaiming one LRU - file or anon -
641 * over the other. As the observed cost of reclaiming one LRU
642 * increases, the reclaim scan balance tips toward the other.
643 */
644 unsigned long anon_cost;
645 unsigned long file_cost;
646 /* Non-resident age, driven by LRU movement */
647 atomic_long_t nonresident_age;
648 /* Refaults at the time of last reclaim cycle */
649 unsigned long refaults[ANON_AND_FILE];
650 /* Various lruvec state flags (enum lruvec_flags) */
651 unsigned long flags;
652#ifdef CONFIG_LRU_GEN
653 /* evictable pages divided into generations */
654 struct lru_gen_folio lrugen;
655#ifdef CONFIG_LRU_GEN_WALKS_MMU
656 /* to concurrently iterate lru_gen_mm_list */
657 struct lru_gen_mm_state mm_state;
658#endif
659#endif /* CONFIG_LRU_GEN */
660#ifdef CONFIG_MEMCG
661 struct pglist_data *pgdat;
662#endif
663 struct zswap_lruvec_state zswap_lruvec_state;
664};
665
666/* Isolate for asynchronous migration */
667#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
668/* Isolate unevictable pages */
669#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
670
671/* LRU Isolation modes. */
672typedef unsigned __bitwise isolate_mode_t;
673
674enum zone_watermarks {
675 WMARK_MIN,
676 WMARK_LOW,
677 WMARK_HIGH,
678 WMARK_PROMO,
679 NR_WMARK
680};
681
682/*
683 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
684 * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
685 * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
686 */
687#ifdef CONFIG_TRANSPARENT_HUGEPAGE
688#define NR_PCP_THP 2
689#else
690#define NR_PCP_THP 0
691#endif
692#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
693#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
694
695/*
696 * Flags used in pcp->flags field.
697 *
698 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
699 * previous page freeing. To avoid to drain PCP for an accident
700 * high-order page freeing.
701 *
702 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
703 * draining PCP for consecutive high-order pages freeing without
704 * allocation if data cache slice of CPU is large enough. To reduce
705 * zone lock contention and keep cache-hot pages reusing.
706 */
707#define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
708#define PCPF_FREE_HIGH_BATCH BIT(1)
709
710struct per_cpu_pages {
711 spinlock_t lock; /* Protects lists field */
712 int count; /* number of pages in the list */
713 int high; /* high watermark, emptying needed */
714 int high_min; /* min high watermark */
715 int high_max; /* max high watermark */
716 int batch; /* chunk size for buddy add/remove */
717 u8 flags; /* protected by pcp->lock */
718 u8 alloc_factor; /* batch scaling factor during allocate */
719#ifdef CONFIG_NUMA
720 u8 expire; /* When 0, remote pagesets are drained */
721#endif
722 short free_count; /* consecutive free count */
723
724 /* Lists of pages, one per migrate type stored on the pcp-lists */
725 struct list_head lists[NR_PCP_LISTS];
726} ____cacheline_aligned_in_smp;
727
728struct per_cpu_zonestat {
729#ifdef CONFIG_SMP
730 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
731 s8 stat_threshold;
732#endif
733#ifdef CONFIG_NUMA
734 /*
735 * Low priority inaccurate counters that are only folded
736 * on demand. Use a large type to avoid the overhead of
737 * folding during refresh_cpu_vm_stats.
738 */
739 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
740#endif
741};
742
743struct per_cpu_nodestat {
744 s8 stat_threshold;
745 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
746};
747
748#endif /* !__GENERATING_BOUNDS.H */
749
750enum zone_type {
751 /*
752 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
753 * to DMA to all of the addressable memory (ZONE_NORMAL).
754 * On architectures where this area covers the whole 32 bit address
755 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
756 * DMA addressing constraints. This distinction is important as a 32bit
757 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
758 * platforms may need both zones as they support peripherals with
759 * different DMA addressing limitations.
760 */
761#ifdef CONFIG_ZONE_DMA
762 ZONE_DMA,
763#endif
764#ifdef CONFIG_ZONE_DMA32
765 ZONE_DMA32,
766#endif
767 /*
768 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
769 * performed on pages in ZONE_NORMAL if the DMA devices support
770 * transfers to all addressable memory.
771 */
772 ZONE_NORMAL,
773#ifdef CONFIG_HIGHMEM
774 /*
775 * A memory area that is only addressable by the kernel through
776 * mapping portions into its own address space. This is for example
777 * used by i386 to allow the kernel to address the memory beyond
778 * 900MB. The kernel will set up special mappings (page
779 * table entries on i386) for each page that the kernel needs to
780 * access.
781 */
782 ZONE_HIGHMEM,
783#endif
784 /*
785 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
786 * movable pages with few exceptional cases described below. Main use
787 * cases for ZONE_MOVABLE are to make memory offlining/unplug more
788 * likely to succeed, and to locally limit unmovable allocations - e.g.,
789 * to increase the number of THP/huge pages. Notable special cases are:
790 *
791 * 1. Pinned pages: (long-term) pinning of movable pages might
792 * essentially turn such pages unmovable. Therefore, we do not allow
793 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
794 * faulted, they come from the right zone right away. However, it is
795 * still possible that address space already has pages in
796 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
797 * touches that memory before pinning). In such case we migrate them
798 * to a different zone. When migration fails - pinning fails.
799 * 2. memblock allocations: kernelcore/movablecore setups might create
800 * situations where ZONE_MOVABLE contains unmovable allocations
801 * after boot. Memory offlining and allocations fail early.
802 * 3. Memory holes: kernelcore/movablecore setups might create very rare
803 * situations where ZONE_MOVABLE contains memory holes after boot,
804 * for example, if we have sections that are only partially
805 * populated. Memory offlining and allocations fail early.
806 * 4. PG_hwpoison pages: while poisoned pages can be skipped during
807 * memory offlining, such pages cannot be allocated.
808 * 5. Unmovable PG_offline pages: in paravirtualized environments,
809 * hotplugged memory blocks might only partially be managed by the
810 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
811 * parts not manged by the buddy are unmovable PG_offline pages. In
812 * some cases (virtio-mem), such pages can be skipped during
813 * memory offlining, however, cannot be moved/allocated. These
814 * techniques might use alloc_contig_range() to hide previously
815 * exposed pages from the buddy again (e.g., to implement some sort
816 * of memory unplug in virtio-mem).
817 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
818 * situations where ZERO_PAGE(0) which is allocated differently
819 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
820 * cannot be migrated.
821 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
822 * memory to the MOVABLE zone, the vmemmap pages are also placed in
823 * such zone. Such pages cannot be really moved around as they are
824 * self-stored in the range, but they are treated as movable when
825 * the range they describe is about to be offlined.
826 *
827 * In general, no unmovable allocations that degrade memory offlining
828 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
829 * have to expect that migrating pages in ZONE_MOVABLE can fail (even
830 * if has_unmovable_pages() states that there are no unmovable pages,
831 * there can be false negatives).
832 */
833 ZONE_MOVABLE,
834#ifdef CONFIG_ZONE_DEVICE
835 ZONE_DEVICE,
836#endif
837 __MAX_NR_ZONES
838
839};
840
841#ifndef __GENERATING_BOUNDS_H
842
843#define ASYNC_AND_SYNC 2
844
845struct zone {
846 /* Read-mostly fields */
847
848 /* zone watermarks, access with *_wmark_pages(zone) macros */
849 unsigned long _watermark[NR_WMARK];
850 unsigned long watermark_boost;
851
852 unsigned long nr_reserved_highatomic;
853 unsigned long nr_free_highatomic;
854
855 /*
856 * We don't know if the memory that we're going to allocate will be
857 * freeable or/and it will be released eventually, so to avoid totally
858 * wasting several GB of ram we must reserve some of the lower zone
859 * memory (otherwise we risk to run OOM on the lower zones despite
860 * there being tons of freeable ram on the higher zones). This array is
861 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
862 * changes.
863 */
864 long lowmem_reserve[MAX_NR_ZONES];
865
866#ifdef CONFIG_NUMA
867 int node;
868#endif
869 struct pglist_data *zone_pgdat;
870 struct per_cpu_pages __percpu *per_cpu_pageset;
871 struct per_cpu_zonestat __percpu *per_cpu_zonestats;
872 /*
873 * the high and batch values are copied to individual pagesets for
874 * faster access
875 */
876 int pageset_high_min;
877 int pageset_high_max;
878 int pageset_batch;
879
880#ifndef CONFIG_SPARSEMEM
881 /*
882 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
883 * In SPARSEMEM, this map is stored in struct mem_section
884 */
885 unsigned long *pageblock_flags;
886#endif /* CONFIG_SPARSEMEM */
887
888 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
889 unsigned long zone_start_pfn;
890
891 /*
892 * spanned_pages is the total pages spanned by the zone, including
893 * holes, which is calculated as:
894 * spanned_pages = zone_end_pfn - zone_start_pfn;
895 *
896 * present_pages is physical pages existing within the zone, which
897 * is calculated as:
898 * present_pages = spanned_pages - absent_pages(pages in holes);
899 *
900 * present_early_pages is present pages existing within the zone
901 * located on memory available since early boot, excluding hotplugged
902 * memory.
903 *
904 * managed_pages is present pages managed by the buddy system, which
905 * is calculated as (reserved_pages includes pages allocated by the
906 * bootmem allocator):
907 * managed_pages = present_pages - reserved_pages;
908 *
909 * cma pages is present pages that are assigned for CMA use
910 * (MIGRATE_CMA).
911 *
912 * So present_pages may be used by memory hotplug or memory power
913 * management logic to figure out unmanaged pages by checking
914 * (present_pages - managed_pages). And managed_pages should be used
915 * by page allocator and vm scanner to calculate all kinds of watermarks
916 * and thresholds.
917 *
918 * Locking rules:
919 *
920 * zone_start_pfn and spanned_pages are protected by span_seqlock.
921 * It is a seqlock because it has to be read outside of zone->lock,
922 * and it is done in the main allocator path. But, it is written
923 * quite infrequently.
924 *
925 * The span_seq lock is declared along with zone->lock because it is
926 * frequently read in proximity to zone->lock. It's good to
927 * give them a chance of being in the same cacheline.
928 *
929 * Write access to present_pages at runtime should be protected by
930 * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
931 * present_pages should use get_online_mems() to get a stable value.
932 */
933 atomic_long_t managed_pages;
934 unsigned long spanned_pages;
935 unsigned long present_pages;
936#if defined(CONFIG_MEMORY_HOTPLUG)
937 unsigned long present_early_pages;
938#endif
939#ifdef CONFIG_CMA
940 unsigned long cma_pages;
941#endif
942
943 const char *name;
944
945#ifdef CONFIG_MEMORY_ISOLATION
946 /*
947 * Number of isolated pageblock. It is used to solve incorrect
948 * freepage counting problem due to racy retrieving migratetype
949 * of pageblock. Protected by zone->lock.
950 */
951 unsigned long nr_isolate_pageblock;
952#endif
953
954#ifdef CONFIG_MEMORY_HOTPLUG
955 /* see spanned/present_pages for more description */
956 seqlock_t span_seqlock;
957#endif
958
959 int initialized;
960
961 /* Write-intensive fields used from the page allocator */
962 CACHELINE_PADDING(_pad1_);
963
964 /* free areas of different sizes */
965 struct free_area free_area[NR_PAGE_ORDERS];
966
967#ifdef CONFIG_UNACCEPTED_MEMORY
968 /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
969 struct list_head unaccepted_pages;
970
971 /* To be called once the last page in the zone is accepted */
972 struct work_struct unaccepted_cleanup;
973#endif
974
975 /* zone flags, see below */
976 unsigned long flags;
977
978 /* Primarily protects free_area */
979 spinlock_t lock;
980
981 /* Pages to be freed when next trylock succeeds */
982 struct llist_head trylock_free_pages;
983
984 /* Write-intensive fields used by compaction and vmstats. */
985 CACHELINE_PADDING(_pad2_);
986
987 /*
988 * When free pages are below this point, additional steps are taken
989 * when reading the number of free pages to avoid per-cpu counter
990 * drift allowing watermarks to be breached
991 */
992 unsigned long percpu_drift_mark;
993
994#if defined CONFIG_COMPACTION || defined CONFIG_CMA
995 /* pfn where compaction free scanner should start */
996 unsigned long compact_cached_free_pfn;
997 /* pfn where compaction migration scanner should start */
998 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
999 unsigned long compact_init_migrate_pfn;
1000 unsigned long compact_init_free_pfn;
1001#endif
1002
1003#ifdef CONFIG_COMPACTION
1004 /*
1005 * On compaction failure, 1<<compact_defer_shift compactions
1006 * are skipped before trying again. The number attempted since
1007 * last failure is tracked with compact_considered.
1008 * compact_order_failed is the minimum compaction failed order.
1009 */
1010 unsigned int compact_considered;
1011 unsigned int compact_defer_shift;
1012 int compact_order_failed;
1013#endif
1014
1015#if defined CONFIG_COMPACTION || defined CONFIG_CMA
1016 /* Set to true when the PG_migrate_skip bits should be cleared */
1017 bool compact_blockskip_flush;
1018#endif
1019
1020 bool contiguous;
1021
1022 CACHELINE_PADDING(_pad3_);
1023 /* Zone statistics */
1024 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
1025 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
1026} ____cacheline_internodealigned_in_smp;
1027
1028enum pgdat_flags {
1029 PGDAT_DIRTY, /* reclaim scanning has recently found
1030 * many dirty file pages at the tail
1031 * of the LRU.
1032 */
1033 PGDAT_WRITEBACK, /* reclaim scanning has recently found
1034 * many pages under writeback
1035 */
1036 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
1037};
1038
1039enum zone_flags {
1040 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
1041 * Cleared when kswapd is woken.
1042 */
1043 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
1044 ZONE_BELOW_HIGH, /* zone is below high watermark. */
1045};
1046
1047static inline unsigned long wmark_pages(const struct zone *z,
1048 enum zone_watermarks w)
1049{
1050 return z->_watermark[w] + z->watermark_boost;
1051}
1052
1053static inline unsigned long min_wmark_pages(const struct zone *z)
1054{
1055 return wmark_pages(z, WMARK_MIN);
1056}
1057
1058static inline unsigned long low_wmark_pages(const struct zone *z)
1059{
1060 return wmark_pages(z, WMARK_LOW);
1061}
1062
1063static inline unsigned long high_wmark_pages(const struct zone *z)
1064{
1065 return wmark_pages(z, WMARK_HIGH);
1066}
1067
1068static inline unsigned long promo_wmark_pages(const struct zone *z)
1069{
1070 return wmark_pages(z, WMARK_PROMO);
1071}
1072
1073static inline unsigned long zone_managed_pages(struct zone *zone)
1074{
1075 return (unsigned long)atomic_long_read(&zone->managed_pages);
1076}
1077
1078static inline unsigned long zone_cma_pages(struct zone *zone)
1079{
1080#ifdef CONFIG_CMA
1081 return zone->cma_pages;
1082#else
1083 return 0;
1084#endif
1085}
1086
1087static inline unsigned long zone_end_pfn(const struct zone *zone)
1088{
1089 return zone->zone_start_pfn + zone->spanned_pages;
1090}
1091
1092static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
1093{
1094 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
1095}
1096
1097static inline bool zone_is_initialized(struct zone *zone)
1098{
1099 return zone->initialized;
1100}
1101
1102static inline bool zone_is_empty(struct zone *zone)
1103{
1104 return zone->spanned_pages == 0;
1105}
1106
1107#ifndef BUILD_VDSO32_64
1108/*
1109 * The zone field is never updated after free_area_init_core()
1110 * sets it, so none of the operations on it need to be atomic.
1111 */
1112
1113/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
1114#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1115#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1116#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1117#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1118#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1119#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
1120#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
1121
1122/*
1123 * Define the bit shifts to access each section. For non-existent
1124 * sections we define the shift as 0; that plus a 0 mask ensures
1125 * the compiler will optimise away reference to them.
1126 */
1127#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
1128#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
1129#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
1130#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
1131#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
1132
1133/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
1134#ifdef NODE_NOT_IN_PAGE_FLAGS
1135#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
1136#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
1137 SECTIONS_PGOFF : ZONES_PGOFF)
1138#else
1139#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
1140#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
1141 NODES_PGOFF : ZONES_PGOFF)
1142#endif
1143
1144#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
1145
1146#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1147#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1148#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1149#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1150#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1151#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1152
1153static inline enum zone_type page_zonenum(const struct page *page)
1154{
1155 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
1156 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1157}
1158
1159static inline enum zone_type folio_zonenum(const struct folio *folio)
1160{
1161 return page_zonenum(&folio->page);
1162}
1163
1164#ifdef CONFIG_ZONE_DEVICE
1165static inline bool is_zone_device_page(const struct page *page)
1166{
1167 return page_zonenum(page) == ZONE_DEVICE;
1168}
1169
1170static inline struct dev_pagemap *page_pgmap(const struct page *page)
1171{
1172 VM_WARN_ON_ONCE_PAGE(!is_zone_device_page(page), page);
1173 return page_folio(page)->pgmap;
1174}
1175
1176/*
1177 * Consecutive zone device pages should not be merged into the same sgl
1178 * or bvec segment with other types of pages or if they belong to different
1179 * pgmaps. Otherwise getting the pgmap of a given segment is not possible
1180 * without scanning the entire segment. This helper returns true either if
1181 * both pages are not zone device pages or both pages are zone device pages
1182 * with the same pgmap.
1183 */
1184static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
1185 const struct page *b)
1186{
1187 if (is_zone_device_page(a) != is_zone_device_page(b))
1188 return false;
1189 if (!is_zone_device_page(a))
1190 return true;
1191 return page_pgmap(a) == page_pgmap(b);
1192}
1193
1194extern void memmap_init_zone_device(struct zone *, unsigned long,
1195 unsigned long, struct dev_pagemap *);
1196#else
1197static inline bool is_zone_device_page(const struct page *page)
1198{
1199 return false;
1200}
1201static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
1202 const struct page *b)
1203{
1204 return true;
1205}
1206static inline struct dev_pagemap *page_pgmap(const struct page *page)
1207{
1208 return NULL;
1209}
1210#endif
1211
1212static inline bool folio_is_zone_device(const struct folio *folio)
1213{
1214 return is_zone_device_page(&folio->page);
1215}
1216
1217static inline bool is_zone_movable_page(const struct page *page)
1218{
1219 return page_zonenum(page) == ZONE_MOVABLE;
1220}
1221
1222static inline bool folio_is_zone_movable(const struct folio *folio)
1223{
1224 return folio_zonenum(folio) == ZONE_MOVABLE;
1225}
1226#endif
1227
1228/*
1229 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
1230 * intersection with the given zone
1231 */
1232static inline bool zone_intersects(struct zone *zone,
1233 unsigned long start_pfn, unsigned long nr_pages)
1234{
1235 if (zone_is_empty(zone))
1236 return false;
1237 if (start_pfn >= zone_end_pfn(zone) ||
1238 start_pfn + nr_pages <= zone->zone_start_pfn)
1239 return false;
1240
1241 return true;
1242}
1243
1244/*
1245 * The "priority" of VM scanning is how much of the queues we will scan in one
1246 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
1247 * queues ("queue_length >> 12") during an aging round.
1248 */
1249#define DEF_PRIORITY 12
1250
1251/* Maximum number of zones on a zonelist */
1252#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
1253
1254enum {
1255 ZONELIST_FALLBACK, /* zonelist with fallback */
1256#ifdef CONFIG_NUMA
1257 /*
1258 * The NUMA zonelists are doubled because we need zonelists that
1259 * restrict the allocations to a single node for __GFP_THISNODE.
1260 */
1261 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
1262#endif
1263 MAX_ZONELISTS
1264};
1265
1266/*
1267 * This struct contains information about a zone in a zonelist. It is stored
1268 * here to avoid dereferences into large structures and lookups of tables
1269 */
1270struct zoneref {
1271 struct zone *zone; /* Pointer to actual zone */
1272 int zone_idx; /* zone_idx(zoneref->zone) */
1273};
1274
1275/*
1276 * One allocation request operates on a zonelist. A zonelist
1277 * is a list of zones, the first one is the 'goal' of the
1278 * allocation, the other zones are fallback zones, in decreasing
1279 * priority.
1280 *
1281 * To speed the reading of the zonelist, the zonerefs contain the zone index
1282 * of the entry being read. Helper functions to access information given
1283 * a struct zoneref are
1284 *
1285 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
1286 * zonelist_zone_idx() - Return the index of the zone for an entry
1287 * zonelist_node_idx() - Return the index of the node for an entry
1288 */
1289struct zonelist {
1290 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
1291};
1292
1293/*
1294 * The array of struct pages for flatmem.
1295 * It must be declared for SPARSEMEM as well because there are configurations
1296 * that rely on that.
1297 */
1298extern struct page *mem_map;
1299
1300#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1301struct deferred_split {
1302 spinlock_t split_queue_lock;
1303 struct list_head split_queue;
1304 unsigned long split_queue_len;
1305};
1306#endif
1307
1308#ifdef CONFIG_MEMORY_FAILURE
1309/*
1310 * Per NUMA node memory failure handling statistics.
1311 */
1312struct memory_failure_stats {
1313 /*
1314 * Number of raw pages poisoned.
1315 * Cases not accounted: memory outside kernel control, offline page,
1316 * arch-specific memory_failure (SGX), hwpoison_filter() filtered
1317 * error events, and unpoison actions from hwpoison_unpoison.
1318 */
1319 unsigned long total;
1320 /*
1321 * Recovery results of poisoned raw pages handled by memory_failure,
1322 * in sync with mf_result.
1323 * total = ignored + failed + delayed + recovered.
1324 * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
1325 */
1326 unsigned long ignored;
1327 unsigned long failed;
1328 unsigned long delayed;
1329 unsigned long recovered;
1330};
1331#endif
1332
1333/*
1334 * On NUMA machines, each NUMA node would have a pg_data_t to describe
1335 * it's memory layout. On UMA machines there is a single pglist_data which
1336 * describes the whole memory.
1337 *
1338 * Memory statistics and page replacement data structures are maintained on a
1339 * per-zone basis.
1340 */
1341typedef struct pglist_data {
1342 /*
1343 * node_zones contains just the zones for THIS node. Not all of the
1344 * zones may be populated, but it is the full list. It is referenced by
1345 * this node's node_zonelists as well as other node's node_zonelists.
1346 */
1347 struct zone node_zones[MAX_NR_ZONES];
1348
1349 /*
1350 * node_zonelists contains references to all zones in all nodes.
1351 * Generally the first zones will be references to this node's
1352 * node_zones.
1353 */
1354 struct zonelist node_zonelists[MAX_ZONELISTS];
1355
1356 int nr_zones; /* number of populated zones in this node */
1357#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
1358 struct page *node_mem_map;
1359#ifdef CONFIG_PAGE_EXTENSION
1360 struct page_ext *node_page_ext;
1361#endif
1362#endif
1363#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
1364 /*
1365 * Must be held any time you expect node_start_pfn,
1366 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
1367 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1368 * init.
1369 *
1370 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
1371 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
1372 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
1373 *
1374 * Nests above zone->lock and zone->span_seqlock
1375 */
1376 spinlock_t node_size_lock;
1377#endif
1378 unsigned long node_start_pfn;
1379 unsigned long node_present_pages; /* total number of physical pages */
1380 unsigned long node_spanned_pages; /* total size of physical page
1381 range, including holes */
1382 int node_id;
1383 wait_queue_head_t kswapd_wait;
1384 wait_queue_head_t pfmemalloc_wait;
1385
1386 /* workqueues for throttling reclaim for different reasons. */
1387 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
1388
1389 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
1390 unsigned long nr_reclaim_start; /* nr pages written while throttled
1391 * when throttling started. */
1392#ifdef CONFIG_MEMORY_HOTPLUG
1393 struct mutex kswapd_lock;
1394#endif
1395 struct task_struct *kswapd; /* Protected by kswapd_lock */
1396 int kswapd_order;
1397 enum zone_type kswapd_highest_zoneidx;
1398
1399 int kswapd_failures; /* Number of 'reclaimed == 0' runs */
1400
1401#ifdef CONFIG_COMPACTION
1402 int kcompactd_max_order;
1403 enum zone_type kcompactd_highest_zoneidx;
1404 wait_queue_head_t kcompactd_wait;
1405 struct task_struct *kcompactd;
1406 bool proactive_compact_trigger;
1407#endif
1408 /*
1409 * This is a per-node reserve of pages that are not available
1410 * to userspace allocations.
1411 */
1412 unsigned long totalreserve_pages;
1413
1414#ifdef CONFIG_NUMA
1415 /*
1416 * node reclaim becomes active if more unmapped pages exist.
1417 */
1418 unsigned long min_unmapped_pages;
1419 unsigned long min_slab_pages;
1420#endif /* CONFIG_NUMA */
1421
1422 /* Write-intensive fields used by page reclaim */
1423 CACHELINE_PADDING(_pad1_);
1424
1425#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1426 /*
1427 * If memory initialisation on large machines is deferred then this
1428 * is the first PFN that needs to be initialised.
1429 */
1430 unsigned long first_deferred_pfn;
1431#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1432
1433#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1434 struct deferred_split deferred_split_queue;
1435#endif
1436
1437#ifdef CONFIG_NUMA_BALANCING
1438 /* start time in ms of current promote rate limit period */
1439 unsigned int nbp_rl_start;
1440 /* number of promote candidate pages at start time of current rate limit period */
1441 unsigned long nbp_rl_nr_cand;
1442 /* promote threshold in ms */
1443 unsigned int nbp_threshold;
1444 /* start time in ms of current promote threshold adjustment period */
1445 unsigned int nbp_th_start;
1446 /*
1447 * number of promote candidate pages at start time of current promote
1448 * threshold adjustment period
1449 */
1450 unsigned long nbp_th_nr_cand;
1451#endif
1452 /* Fields commonly accessed by the page reclaim scanner */
1453
1454 /*
1455 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
1456 *
1457 * Use mem_cgroup_lruvec() to look up lruvecs.
1458 */
1459 struct lruvec __lruvec;
1460
1461 unsigned long flags;
1462
1463#ifdef CONFIG_LRU_GEN
1464 /* kswap mm walk data */
1465 struct lru_gen_mm_walk mm_walk;
1466 /* lru_gen_folio list */
1467 struct lru_gen_memcg memcg_lru;
1468#endif
1469
1470 CACHELINE_PADDING(_pad2_);
1471
1472 /* Per-node vmstats */
1473 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
1474 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
1475#ifdef CONFIG_NUMA
1476 struct memory_tier __rcu *memtier;
1477#endif
1478#ifdef CONFIG_MEMORY_FAILURE
1479 struct memory_failure_stats mf_stats;
1480#endif
1481} pg_data_t;
1482
1483#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
1484#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
1485
1486#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
1487#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
1488
1489static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
1490{
1491 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
1492}
1493
1494#include <linux/memory_hotplug.h>
1495
1496void build_all_zonelists(pg_data_t *pgdat);
1497void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
1498 enum zone_type highest_zoneidx);
1499bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1500 int highest_zoneidx, unsigned int alloc_flags,
1501 long free_pages);
1502bool zone_watermark_ok(struct zone *z, unsigned int order,
1503 unsigned long mark, int highest_zoneidx,
1504 unsigned int alloc_flags);
1505/*
1506 * Memory initialization context, use to differentiate memory added by
1507 * the platform statically or via memory hotplug interface.
1508 */
1509enum meminit_context {
1510 MEMINIT_EARLY,
1511 MEMINIT_HOTPLUG,
1512};
1513
1514extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
1515 unsigned long size);
1516
1517extern void lruvec_init(struct lruvec *lruvec);
1518
1519static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
1520{
1521#ifdef CONFIG_MEMCG
1522 return lruvec->pgdat;
1523#else
1524 return container_of(lruvec, struct pglist_data, __lruvec);
1525#endif
1526}
1527
1528#ifdef CONFIG_HAVE_MEMORYLESS_NODES
1529int local_memory_node(int node_id);
1530#else
1531static inline int local_memory_node(int node_id) { return node_id; };
1532#endif
1533
1534/*
1535 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
1536 */
1537#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
1538
1539#ifdef CONFIG_ZONE_DEVICE
1540static inline bool zone_is_zone_device(struct zone *zone)
1541{
1542 return zone_idx(zone) == ZONE_DEVICE;
1543}
1544#else
1545static inline bool zone_is_zone_device(struct zone *zone)
1546{
1547 return false;
1548}
1549#endif
1550
1551/*
1552 * Returns true if a zone has pages managed by the buddy allocator.
1553 * All the reclaim decisions have to use this function rather than
1554 * populated_zone(). If the whole zone is reserved then we can easily
1555 * end up with populated_zone() && !managed_zone().
1556 */
1557static inline bool managed_zone(struct zone *zone)
1558{
1559 return zone_managed_pages(zone);
1560}
1561
1562/* Returns true if a zone has memory */
1563static inline bool populated_zone(struct zone *zone)
1564{
1565 return zone->present_pages;
1566}
1567
1568#ifdef CONFIG_NUMA
1569static inline int zone_to_nid(struct zone *zone)
1570{
1571 return zone->node;
1572}
1573
1574static inline void zone_set_nid(struct zone *zone, int nid)
1575{
1576 zone->node = nid;
1577}
1578#else
1579static inline int zone_to_nid(struct zone *zone)
1580{
1581 return 0;
1582}
1583
1584static inline void zone_set_nid(struct zone *zone, int nid) {}
1585#endif
1586
1587extern int movable_zone;
1588
1589static inline int is_highmem_idx(enum zone_type idx)
1590{
1591#ifdef CONFIG_HIGHMEM
1592 return (idx == ZONE_HIGHMEM ||
1593 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
1594#else
1595 return 0;
1596#endif
1597}
1598
1599/**
1600 * is_highmem - helper function to quickly check if a struct zone is a
1601 * highmem zone or not. This is an attempt to keep references
1602 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
1603 * @zone: pointer to struct zone variable
1604 * Return: 1 for a highmem zone, 0 otherwise
1605 */
1606static inline int is_highmem(struct zone *zone)
1607{
1608 return is_highmem_idx(zone_idx(zone));
1609}
1610
1611#ifdef CONFIG_ZONE_DMA
1612bool has_managed_dma(void);
1613#else
1614static inline bool has_managed_dma(void)
1615{
1616 return false;
1617}
1618#endif
1619
1620
1621#ifndef CONFIG_NUMA
1622
1623extern struct pglist_data contig_page_data;
1624static inline struct pglist_data *NODE_DATA(int nid)
1625{
1626 return &contig_page_data;
1627}
1628
1629#else /* CONFIG_NUMA */
1630
1631#include <asm/mmzone.h>
1632
1633#endif /* !CONFIG_NUMA */
1634
1635extern struct pglist_data *first_online_pgdat(void);
1636extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1637extern struct zone *next_zone(struct zone *zone);
1638
1639/**
1640 * for_each_online_pgdat - helper macro to iterate over all online nodes
1641 * @pgdat: pointer to a pg_data_t variable
1642 */
1643#define for_each_online_pgdat(pgdat) \
1644 for (pgdat = first_online_pgdat(); \
1645 pgdat; \
1646 pgdat = next_online_pgdat(pgdat))
1647/**
1648 * for_each_zone - helper macro to iterate over all memory zones
1649 * @zone: pointer to struct zone variable
1650 *
1651 * The user only needs to declare the zone variable, for_each_zone
1652 * fills it in.
1653 */
1654#define for_each_zone(zone) \
1655 for (zone = (first_online_pgdat())->node_zones; \
1656 zone; \
1657 zone = next_zone(zone))
1658
1659#define for_each_populated_zone(zone) \
1660 for (zone = (first_online_pgdat())->node_zones; \
1661 zone; \
1662 zone = next_zone(zone)) \
1663 if (!populated_zone(zone)) \
1664 ; /* do nothing */ \
1665 else
1666
1667static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1668{
1669 return zoneref->zone;
1670}
1671
1672static inline int zonelist_zone_idx(struct zoneref *zoneref)
1673{
1674 return zoneref->zone_idx;
1675}
1676
1677static inline int zonelist_node_idx(struct zoneref *zoneref)
1678{
1679 return zone_to_nid(zoneref->zone);
1680}
1681
1682struct zoneref *__next_zones_zonelist(struct zoneref *z,
1683 enum zone_type highest_zoneidx,
1684 nodemask_t *nodes);
1685
1686/**
1687 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
1688 * @z: The cursor used as a starting point for the search
1689 * @highest_zoneidx: The zone index of the highest zone to return
1690 * @nodes: An optional nodemask to filter the zonelist with
1691 *
1692 * This function returns the next zone at or below a given zone index that is
1693 * within the allowed nodemask using a cursor as the starting point for the
1694 * search. The zoneref returned is a cursor that represents the current zone
1695 * being examined. It should be advanced by one before calling
1696 * next_zones_zonelist again.
1697 *
1698 * Return: the next zone at or below highest_zoneidx within the allowed
1699 * nodemask using a cursor within a zonelist as a starting point
1700 */
1701static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1702 enum zone_type highest_zoneidx,
1703 nodemask_t *nodes)
1704{
1705 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1706 return z;
1707 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1708}
1709
1710/**
1711 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
1712 * @zonelist: The zonelist to search for a suitable zone
1713 * @highest_zoneidx: The zone index of the highest zone to return
1714 * @nodes: An optional nodemask to filter the zonelist with
1715 *
1716 * This function returns the first zone at or below a given zone index that is
1717 * within the allowed nodemask. The zoneref returned is a cursor that can be
1718 * used to iterate the zonelist with next_zones_zonelist by advancing it by
1719 * one before calling.
1720 *
1721 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1722 * never NULL). This may happen either genuinely, or due to concurrent nodemask
1723 * update due to cpuset modification.
1724 *
1725 * Return: Zoneref pointer for the first suitable zone found
1726 */
1727static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1728 enum zone_type highest_zoneidx,
1729 nodemask_t *nodes)
1730{
1731 return next_zones_zonelist(zonelist->_zonerefs,
1732 highest_zoneidx, nodes);
1733}
1734
1735/**
1736 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
1737 * @zone: The current zone in the iterator
1738 * @z: The current pointer within zonelist->_zonerefs being iterated
1739 * @zlist: The zonelist being iterated
1740 * @highidx: The zone index of the highest zone to return
1741 * @nodemask: Nodemask allowed by the allocator
1742 *
1743 * This iterator iterates though all zones at or below a given zone index and
1744 * within a given nodemask
1745 */
1746#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1747 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1748 zone; \
1749 z = next_zones_zonelist(++z, highidx, nodemask), \
1750 zone = zonelist_zone(z))
1751
1752#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
1753 for (zone = zonelist_zone(z); \
1754 zone; \
1755 z = next_zones_zonelist(++z, highidx, nodemask), \
1756 zone = zonelist_zone(z))
1757
1758
1759/**
1760 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1761 * @zone: The current zone in the iterator
1762 * @z: The current pointer within zonelist->zones being iterated
1763 * @zlist: The zonelist being iterated
1764 * @highidx: The zone index of the highest zone to return
1765 *
1766 * This iterator iterates though all zones at or below a given zone index.
1767 */
1768#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1769 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1770
1771/* Whether the 'nodes' are all movable nodes */
1772static inline bool movable_only_nodes(nodemask_t *nodes)
1773{
1774 struct zonelist *zonelist;
1775 struct zoneref *z;
1776 int nid;
1777
1778 if (nodes_empty(*nodes))
1779 return false;
1780
1781 /*
1782 * We can chose arbitrary node from the nodemask to get a
1783 * zonelist as they are interlinked. We just need to find
1784 * at least one zone that can satisfy kernel allocations.
1785 */
1786 nid = first_node(*nodes);
1787 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
1788 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
1789 return (!zonelist_zone(z)) ? true : false;
1790}
1791
1792
1793#ifdef CONFIG_SPARSEMEM
1794#include <asm/sparsemem.h>
1795#endif
1796
1797#ifdef CONFIG_FLATMEM
1798#define pfn_to_nid(pfn) (0)
1799#endif
1800
1801#ifdef CONFIG_SPARSEMEM
1802
1803/*
1804 * PA_SECTION_SHIFT physical address to/from section number
1805 * PFN_SECTION_SHIFT pfn to/from section number
1806 */
1807#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1808#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1809
1810#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1811
1812#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1813#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1814
1815#define SECTION_BLOCKFLAGS_BITS \
1816 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1817
1818#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
1819#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
1820#endif
1821
1822static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1823{
1824 return pfn >> PFN_SECTION_SHIFT;
1825}
1826static inline unsigned long section_nr_to_pfn(unsigned long sec)
1827{
1828 return sec << PFN_SECTION_SHIFT;
1829}
1830
1831#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1832#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1833
1834#define SUBSECTION_SHIFT 21
1835#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1836
1837#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1838#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1839#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1840
1841#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1842#error Subsection size exceeds section size
1843#else
1844#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1845#endif
1846
1847#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1848#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1849
1850struct mem_section_usage {
1851 struct rcu_head rcu;
1852#ifdef CONFIG_SPARSEMEM_VMEMMAP
1853 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1854#endif
1855 /* See declaration of similar field in struct zone */
1856 unsigned long pageblock_flags[0];
1857};
1858
1859void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1860
1861struct page;
1862struct page_ext;
1863struct mem_section {
1864 /*
1865 * This is, logically, a pointer to an array of struct
1866 * pages. However, it is stored with some other magic.
1867 * (see sparse.c::sparse_init_one_section())
1868 *
1869 * Additionally during early boot we encode node id of
1870 * the location of the section here to guide allocation.
1871 * (see sparse.c::memory_present())
1872 *
1873 * Making it a UL at least makes someone do a cast
1874 * before using it wrong.
1875 */
1876 unsigned long section_mem_map;
1877
1878 struct mem_section_usage *usage;
1879#ifdef CONFIG_PAGE_EXTENSION
1880 /*
1881 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1882 * section. (see page_ext.h about this.)
1883 */
1884 struct page_ext *page_ext;
1885 unsigned long pad;
1886#endif
1887 /*
1888 * WARNING: mem_section must be a power-of-2 in size for the
1889 * calculation and use of SECTION_ROOT_MASK to make sense.
1890 */
1891};
1892
1893#ifdef CONFIG_SPARSEMEM_EXTREME
1894#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1895#else
1896#define SECTIONS_PER_ROOT 1
1897#endif
1898
1899#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1900#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1901#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1902
1903#ifdef CONFIG_SPARSEMEM_EXTREME
1904extern struct mem_section **mem_section;
1905#else
1906extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1907#endif
1908
1909static inline unsigned long *section_to_usemap(struct mem_section *ms)
1910{
1911 return ms->usage->pageblock_flags;
1912}
1913
1914static inline struct mem_section *__nr_to_section(unsigned long nr)
1915{
1916 unsigned long root = SECTION_NR_TO_ROOT(nr);
1917
1918 if (unlikely(root >= NR_SECTION_ROOTS))
1919 return NULL;
1920
1921#ifdef CONFIG_SPARSEMEM_EXTREME
1922 if (!mem_section || !mem_section[root])
1923 return NULL;
1924#endif
1925 return &mem_section[root][nr & SECTION_ROOT_MASK];
1926}
1927extern size_t mem_section_usage_size(void);
1928
1929/*
1930 * We use the lower bits of the mem_map pointer to store
1931 * a little bit of information. The pointer is calculated
1932 * as mem_map - section_nr_to_pfn(pnum). The result is
1933 * aligned to the minimum alignment of the two values:
1934 * 1. All mem_map arrays are page-aligned.
1935 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1936 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1937 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1938 * worst combination is powerpc with 256k pages,
1939 * which results in PFN_SECTION_SHIFT equal 6.
1940 * To sum it up, at least 6 bits are available on all architectures.
1941 * However, we can exceed 6 bits on some other architectures except
1942 * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
1943 * with the worst case of 64K pages on arm64) if we make sure the
1944 * exceeded bit is not applicable to powerpc.
1945 */
1946enum {
1947 SECTION_MARKED_PRESENT_BIT,
1948 SECTION_HAS_MEM_MAP_BIT,
1949 SECTION_IS_ONLINE_BIT,
1950 SECTION_IS_EARLY_BIT,
1951#ifdef CONFIG_ZONE_DEVICE
1952 SECTION_TAINT_ZONE_DEVICE_BIT,
1953#endif
1954#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
1955 SECTION_IS_VMEMMAP_PREINIT_BIT,
1956#endif
1957 SECTION_MAP_LAST_BIT,
1958};
1959
1960#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
1961#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
1962#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
1963#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
1964#ifdef CONFIG_ZONE_DEVICE
1965#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
1966#endif
1967#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
1968#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT)
1969#endif
1970#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
1971#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
1972
1973static inline struct page *__section_mem_map_addr(struct mem_section *section)
1974{
1975 unsigned long map = section->section_mem_map;
1976 map &= SECTION_MAP_MASK;
1977 return (struct page *)map;
1978}
1979
1980static inline int present_section(struct mem_section *section)
1981{
1982 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1983}
1984
1985static inline int present_section_nr(unsigned long nr)
1986{
1987 return present_section(__nr_to_section(nr));
1988}
1989
1990static inline int valid_section(struct mem_section *section)
1991{
1992 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1993}
1994
1995static inline int early_section(struct mem_section *section)
1996{
1997 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1998}
1999
2000static inline int valid_section_nr(unsigned long nr)
2001{
2002 return valid_section(__nr_to_section(nr));
2003}
2004
2005static inline int online_section(struct mem_section *section)
2006{
2007 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
2008}
2009
2010#ifdef CONFIG_ZONE_DEVICE
2011static inline int online_device_section(struct mem_section *section)
2012{
2013 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
2014
2015 return section && ((section->section_mem_map & flags) == flags);
2016}
2017#else
2018static inline int online_device_section(struct mem_section *section)
2019{
2020 return 0;
2021}
2022#endif
2023
2024#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
2025static inline int preinited_vmemmap_section(struct mem_section *section)
2026{
2027 return (section &&
2028 (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
2029}
2030
2031void sparse_vmemmap_init_nid_early(int nid);
2032void sparse_vmemmap_init_nid_late(int nid);
2033
2034#else
2035static inline int preinited_vmemmap_section(struct mem_section *section)
2036{
2037 return 0;
2038}
2039static inline void sparse_vmemmap_init_nid_early(int nid)
2040{
2041}
2042
2043static inline void sparse_vmemmap_init_nid_late(int nid)
2044{
2045}
2046#endif
2047
2048static inline int online_section_nr(unsigned long nr)
2049{
2050 return online_section(__nr_to_section(nr));
2051}
2052
2053#ifdef CONFIG_MEMORY_HOTPLUG
2054void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
2055void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
2056#endif
2057
2058static inline struct mem_section *__pfn_to_section(unsigned long pfn)
2059{
2060 return __nr_to_section(pfn_to_section_nr(pfn));
2061}
2062
2063extern unsigned long __highest_present_section_nr;
2064
2065static inline int subsection_map_index(unsigned long pfn)
2066{
2067 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
2068}
2069
2070#ifdef CONFIG_SPARSEMEM_VMEMMAP
2071static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
2072{
2073 int idx = subsection_map_index(pfn);
2074 struct mem_section_usage *usage = READ_ONCE(ms->usage);
2075
2076 return usage ? test_bit(idx, usage->subsection_map) : 0;
2077}
2078#else
2079static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
2080{
2081 return 1;
2082}
2083#endif
2084
2085void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
2086 unsigned long flags);
2087
2088#ifndef CONFIG_HAVE_ARCH_PFN_VALID
2089/**
2090 * pfn_valid - check if there is a valid memory map entry for a PFN
2091 * @pfn: the page frame number to check
2092 *
2093 * Check if there is a valid memory map entry aka struct page for the @pfn.
2094 * Note, that availability of the memory map entry does not imply that
2095 * there is actual usable memory at that @pfn. The struct page may
2096 * represent a hole or an unusable page frame.
2097 *
2098 * Return: 1 for PFNs that have memory map entries and 0 otherwise
2099 */
2100static inline int pfn_valid(unsigned long pfn)
2101{
2102 struct mem_section *ms;
2103 int ret;
2104
2105 /*
2106 * Ensure the upper PAGE_SHIFT bits are clear in the
2107 * pfn. Else it might lead to false positives when
2108 * some of the upper bits are set, but the lower bits
2109 * match a valid pfn.
2110 */
2111 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
2112 return 0;
2113
2114 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
2115 return 0;
2116 ms = __pfn_to_section(pfn);
2117 rcu_read_lock_sched();
2118 if (!valid_section(ms)) {
2119 rcu_read_unlock_sched();
2120 return 0;
2121 }
2122 /*
2123 * Traditionally early sections always returned pfn_valid() for
2124 * the entire section-sized span.
2125 */
2126 ret = early_section(ms) || pfn_section_valid(ms, pfn);
2127 rcu_read_unlock_sched();
2128
2129 return ret;
2130}
2131#endif
2132
2133static inline int pfn_in_present_section(unsigned long pfn)
2134{
2135 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
2136 return 0;
2137 return present_section(__pfn_to_section(pfn));
2138}
2139
2140static inline unsigned long next_present_section_nr(unsigned long section_nr)
2141{
2142 while (++section_nr <= __highest_present_section_nr) {
2143 if (present_section_nr(section_nr))
2144 return section_nr;
2145 }
2146
2147 return -1;
2148}
2149
2150#define for_each_present_section_nr(start, section_nr) \
2151 for (section_nr = next_present_section_nr(start - 1); \
2152 section_nr != -1; \
2153 section_nr = next_present_section_nr(section_nr))
2154
2155/*
2156 * These are _only_ used during initialisation, therefore they
2157 * can use __initdata ... They could have names to indicate
2158 * this restriction.
2159 */
2160#ifdef CONFIG_NUMA
2161#define pfn_to_nid(pfn) \
2162({ \
2163 unsigned long __pfn_to_nid_pfn = (pfn); \
2164 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
2165})
2166#else
2167#define pfn_to_nid(pfn) (0)
2168#endif
2169
2170void sparse_init(void);
2171#else
2172#define sparse_init() do {} while (0)
2173#define sparse_index_init(_sec, _nid) do {} while (0)
2174#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
2175#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
2176#define pfn_in_present_section pfn_valid
2177#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
2178#endif /* CONFIG_SPARSEMEM */
2179
2180#endif /* !__GENERATING_BOUNDS.H */
2181#endif /* !__ASSEMBLY__ */
2182#endif /* _LINUX_MMZONE_H */