Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
7
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include <linux/bitops.h>
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
16#include <linux/seqlock.h>
17#include <linux/nodemask.h>
18#include <linux/pageblock-flags.h>
19#include <linux/page-flags-layout.h>
20#include <linux/atomic.h>
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
23#include <linux/local_lock.h>
24#include <asm/page.h>
25
26/* Free memory management - zoned buddy allocator. */
27#ifndef CONFIG_FORCE_MAX_ZONEORDER
28#define MAX_ORDER 11
29#else
30#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
31#endif
32#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
33
34/*
35 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
36 * costly to service. That is between allocation orders which should
37 * coalesce naturally under reasonable reclaim pressure and those which
38 * will not.
39 */
40#define PAGE_ALLOC_COSTLY_ORDER 3
41
42enum migratetype {
43 MIGRATE_UNMOVABLE,
44 MIGRATE_MOVABLE,
45 MIGRATE_RECLAIMABLE,
46 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
47 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
48#ifdef CONFIG_CMA
49 /*
50 * MIGRATE_CMA migration type is designed to mimic the way
51 * ZONE_MOVABLE works. Only movable pages can be allocated
52 * from MIGRATE_CMA pageblocks and page allocator never
53 * implicitly change migration type of MIGRATE_CMA pageblock.
54 *
55 * The way to use it is to change migratetype of a range of
56 * pageblocks to MIGRATE_CMA which can be done by
57 * __free_pageblock_cma() function. What is important though
58 * is that a range of pageblocks must be aligned to
59 * MAX_ORDER_NR_PAGES should biggest page be bigger than
60 * a single pageblock.
61 */
62 MIGRATE_CMA,
63#endif
64#ifdef CONFIG_MEMORY_ISOLATION
65 MIGRATE_ISOLATE, /* can't allocate from here */
66#endif
67 MIGRATE_TYPES
68};
69
70/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
71extern const char * const migratetype_names[MIGRATE_TYPES];
72
73#ifdef CONFIG_CMA
74# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
75# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
76#else
77# define is_migrate_cma(migratetype) false
78# define is_migrate_cma_page(_page) false
79#endif
80
81static inline bool is_migrate_movable(int mt)
82{
83 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
84}
85
86#define for_each_migratetype_order(order, type) \
87 for (order = 0; order < MAX_ORDER; order++) \
88 for (type = 0; type < MIGRATE_TYPES; type++)
89
90extern int page_group_by_mobility_disabled;
91
92#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
93
94#define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
96
97struct free_area {
98 struct list_head free_list[MIGRATE_TYPES];
99 unsigned long nr_free;
100};
101
102static inline struct page *get_page_from_free_area(struct free_area *area,
103 int migratetype)
104{
105 return list_first_entry_or_null(&area->free_list[migratetype],
106 struct page, lru);
107}
108
109static inline bool free_area_empty(struct free_area *area, int migratetype)
110{
111 return list_empty(&area->free_list[migratetype]);
112}
113
114struct pglist_data;
115
116/*
117 * Add a wild amount of padding here to ensure data fall into separate
118 * cachelines. There are very few zone structures in the machine, so space
119 * consumption is not a concern here.
120 */
121#if defined(CONFIG_SMP)
122struct zone_padding {
123 char x[0];
124} ____cacheline_internodealigned_in_smp;
125#define ZONE_PADDING(name) struct zone_padding name;
126#else
127#define ZONE_PADDING(name)
128#endif
129
130#ifdef CONFIG_NUMA
131enum numa_stat_item {
132 NUMA_HIT, /* allocated in intended node */
133 NUMA_MISS, /* allocated in non intended node */
134 NUMA_FOREIGN, /* was intended here, hit elsewhere */
135 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
136 NUMA_LOCAL, /* allocation from local node */
137 NUMA_OTHER, /* allocation from other node */
138 NR_VM_NUMA_EVENT_ITEMS
139};
140#else
141#define NR_VM_NUMA_EVENT_ITEMS 0
142#endif
143
144enum zone_stat_item {
145 /* First 128 byte cacheline (assuming 64 bit words) */
146 NR_FREE_PAGES,
147 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
148 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
149 NR_ZONE_ACTIVE_ANON,
150 NR_ZONE_INACTIVE_FILE,
151 NR_ZONE_ACTIVE_FILE,
152 NR_ZONE_UNEVICTABLE,
153 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
154 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
155 /* Second 128 byte cacheline */
156 NR_BOUNCE,
157#if IS_ENABLED(CONFIG_ZSMALLOC)
158 NR_ZSPAGES, /* allocated in zsmalloc */
159#endif
160 NR_FREE_CMA_PAGES,
161 NR_VM_ZONE_STAT_ITEMS };
162
163enum node_stat_item {
164 NR_LRU_BASE,
165 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
166 NR_ACTIVE_ANON, /* " " " " " */
167 NR_INACTIVE_FILE, /* " " " " " */
168 NR_ACTIVE_FILE, /* " " " " " */
169 NR_UNEVICTABLE, /* " " " " " */
170 NR_SLAB_RECLAIMABLE_B,
171 NR_SLAB_UNRECLAIMABLE_B,
172 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
173 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
174 WORKINGSET_NODES,
175 WORKINGSET_REFAULT_BASE,
176 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
177 WORKINGSET_REFAULT_FILE,
178 WORKINGSET_ACTIVATE_BASE,
179 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
180 WORKINGSET_ACTIVATE_FILE,
181 WORKINGSET_RESTORE_BASE,
182 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
183 WORKINGSET_RESTORE_FILE,
184 WORKINGSET_NODERECLAIM,
185 NR_ANON_MAPPED, /* Mapped anonymous pages */
186 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
187 only modified from process context */
188 NR_FILE_PAGES,
189 NR_FILE_DIRTY,
190 NR_WRITEBACK,
191 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
192 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
193 NR_SHMEM_THPS,
194 NR_SHMEM_PMDMAPPED,
195 NR_FILE_THPS,
196 NR_FILE_PMDMAPPED,
197 NR_ANON_THPS,
198 NR_VMSCAN_WRITE,
199 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
200 NR_DIRTIED, /* page dirtyings since bootup */
201 NR_WRITTEN, /* page writings since bootup */
202 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
203 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
204 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
205 NR_KERNEL_STACK_KB, /* measured in KiB */
206#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
207 NR_KERNEL_SCS_KB, /* measured in KiB */
208#endif
209 NR_PAGETABLE, /* used for pagetables */
210#ifdef CONFIG_SWAP
211 NR_SWAPCACHE,
212#endif
213 NR_VM_NODE_STAT_ITEMS
214};
215
216/*
217 * Returns true if the item should be printed in THPs (/proc/vmstat
218 * currently prints number of anon, file and shmem THPs. But the item
219 * is charged in pages).
220 */
221static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
222{
223 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
224 return false;
225
226 return item == NR_ANON_THPS ||
227 item == NR_FILE_THPS ||
228 item == NR_SHMEM_THPS ||
229 item == NR_SHMEM_PMDMAPPED ||
230 item == NR_FILE_PMDMAPPED;
231}
232
233/*
234 * Returns true if the value is measured in bytes (most vmstat values are
235 * measured in pages). This defines the API part, the internal representation
236 * might be different.
237 */
238static __always_inline bool vmstat_item_in_bytes(int idx)
239{
240 /*
241 * Global and per-node slab counters track slab pages.
242 * It's expected that changes are multiples of PAGE_SIZE.
243 * Internally values are stored in pages.
244 *
245 * Per-memcg and per-lruvec counters track memory, consumed
246 * by individual slab objects. These counters are actually
247 * byte-precise.
248 */
249 return (idx == NR_SLAB_RECLAIMABLE_B ||
250 idx == NR_SLAB_UNRECLAIMABLE_B);
251}
252
253/*
254 * We do arithmetic on the LRU lists in various places in the code,
255 * so it is important to keep the active lists LRU_ACTIVE higher in
256 * the array than the corresponding inactive lists, and to keep
257 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
258 *
259 * This has to be kept in sync with the statistics in zone_stat_item
260 * above and the descriptions in vmstat_text in mm/vmstat.c
261 */
262#define LRU_BASE 0
263#define LRU_ACTIVE 1
264#define LRU_FILE 2
265
266enum lru_list {
267 LRU_INACTIVE_ANON = LRU_BASE,
268 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
269 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
270 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
271 LRU_UNEVICTABLE,
272 NR_LRU_LISTS
273};
274
275#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
276
277#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
278
279static inline bool is_file_lru(enum lru_list lru)
280{
281 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
282}
283
284static inline bool is_active_lru(enum lru_list lru)
285{
286 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
287}
288
289#define ANON_AND_FILE 2
290
291enum lruvec_flags {
292 LRUVEC_CONGESTED, /* lruvec has many dirty pages
293 * backed by a congested BDI
294 */
295};
296
297struct lruvec {
298 struct list_head lists[NR_LRU_LISTS];
299 /* per lruvec lru_lock for memcg */
300 spinlock_t lru_lock;
301 /*
302 * These track the cost of reclaiming one LRU - file or anon -
303 * over the other. As the observed cost of reclaiming one LRU
304 * increases, the reclaim scan balance tips toward the other.
305 */
306 unsigned long anon_cost;
307 unsigned long file_cost;
308 /* Non-resident age, driven by LRU movement */
309 atomic_long_t nonresident_age;
310 /* Refaults at the time of last reclaim cycle */
311 unsigned long refaults[ANON_AND_FILE];
312 /* Various lruvec state flags (enum lruvec_flags) */
313 unsigned long flags;
314#ifdef CONFIG_MEMCG
315 struct pglist_data *pgdat;
316#endif
317};
318
319/* Isolate unmapped pages */
320#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
321/* Isolate for asynchronous migration */
322#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
323/* Isolate unevictable pages */
324#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
325
326/* LRU Isolation modes. */
327typedef unsigned __bitwise isolate_mode_t;
328
329enum zone_watermarks {
330 WMARK_MIN,
331 WMARK_LOW,
332 WMARK_HIGH,
333 NR_WMARK
334};
335
336/*
337 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional
338 * for pageblock size for THP if configured.
339 */
340#ifdef CONFIG_TRANSPARENT_HUGEPAGE
341#define NR_PCP_THP 1
342#else
343#define NR_PCP_THP 0
344#endif
345#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP))
346
347/*
348 * Shift to encode migratetype and order in the same integer, with order
349 * in the least significant bits.
350 */
351#define NR_PCP_ORDER_WIDTH 8
352#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
353
354#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
355#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
356#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
357#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
358
359/* Fields and list protected by pagesets local_lock in page_alloc.c */
360struct per_cpu_pages {
361 int count; /* number of pages in the list */
362 int high; /* high watermark, emptying needed */
363 int batch; /* chunk size for buddy add/remove */
364 short free_factor; /* batch scaling factor during free */
365#ifdef CONFIG_NUMA
366 short expire; /* When 0, remote pagesets are drained */
367#endif
368
369 /* Lists of pages, one per migrate type stored on the pcp-lists */
370 struct list_head lists[NR_PCP_LISTS];
371};
372
373struct per_cpu_zonestat {
374#ifdef CONFIG_SMP
375 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
376 s8 stat_threshold;
377#endif
378#ifdef CONFIG_NUMA
379 /*
380 * Low priority inaccurate counters that are only folded
381 * on demand. Use a large type to avoid the overhead of
382 * folding during refresh_cpu_vm_stats.
383 */
384 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
385#endif
386};
387
388struct per_cpu_nodestat {
389 s8 stat_threshold;
390 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
391};
392
393#endif /* !__GENERATING_BOUNDS.H */
394
395enum zone_type {
396 /*
397 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
398 * to DMA to all of the addressable memory (ZONE_NORMAL).
399 * On architectures where this area covers the whole 32 bit address
400 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
401 * DMA addressing constraints. This distinction is important as a 32bit
402 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
403 * platforms may need both zones as they support peripherals with
404 * different DMA addressing limitations.
405 */
406#ifdef CONFIG_ZONE_DMA
407 ZONE_DMA,
408#endif
409#ifdef CONFIG_ZONE_DMA32
410 ZONE_DMA32,
411#endif
412 /*
413 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
414 * performed on pages in ZONE_NORMAL if the DMA devices support
415 * transfers to all addressable memory.
416 */
417 ZONE_NORMAL,
418#ifdef CONFIG_HIGHMEM
419 /*
420 * A memory area that is only addressable by the kernel through
421 * mapping portions into its own address space. This is for example
422 * used by i386 to allow the kernel to address the memory beyond
423 * 900MB. The kernel will set up special mappings (page
424 * table entries on i386) for each page that the kernel needs to
425 * access.
426 */
427 ZONE_HIGHMEM,
428#endif
429 /*
430 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
431 * movable pages with few exceptional cases described below. Main use
432 * cases for ZONE_MOVABLE are to make memory offlining/unplug more
433 * likely to succeed, and to locally limit unmovable allocations - e.g.,
434 * to increase the number of THP/huge pages. Notable special cases are:
435 *
436 * 1. Pinned pages: (long-term) pinning of movable pages might
437 * essentially turn such pages unmovable. Therefore, we do not allow
438 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
439 * faulted, they come from the right zone right away. However, it is
440 * still possible that address space already has pages in
441 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
442 * touches that memory before pinning). In such case we migrate them
443 * to a different zone. When migration fails - pinning fails.
444 * 2. memblock allocations: kernelcore/movablecore setups might create
445 * situations where ZONE_MOVABLE contains unmovable allocations
446 * after boot. Memory offlining and allocations fail early.
447 * 3. Memory holes: kernelcore/movablecore setups might create very rare
448 * situations where ZONE_MOVABLE contains memory holes after boot,
449 * for example, if we have sections that are only partially
450 * populated. Memory offlining and allocations fail early.
451 * 4. PG_hwpoison pages: while poisoned pages can be skipped during
452 * memory offlining, such pages cannot be allocated.
453 * 5. Unmovable PG_offline pages: in paravirtualized environments,
454 * hotplugged memory blocks might only partially be managed by the
455 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
456 * parts not manged by the buddy are unmovable PG_offline pages. In
457 * some cases (virtio-mem), such pages can be skipped during
458 * memory offlining, however, cannot be moved/allocated. These
459 * techniques might use alloc_contig_range() to hide previously
460 * exposed pages from the buddy again (e.g., to implement some sort
461 * of memory unplug in virtio-mem).
462 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
463 * situations where ZERO_PAGE(0) which is allocated differently
464 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
465 * cannot be migrated.
466 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
467 * memory to the MOVABLE zone, the vmemmap pages are also placed in
468 * such zone. Such pages cannot be really moved around as they are
469 * self-stored in the range, but they are treated as movable when
470 * the range they describe is about to be offlined.
471 *
472 * In general, no unmovable allocations that degrade memory offlining
473 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
474 * have to expect that migrating pages in ZONE_MOVABLE can fail (even
475 * if has_unmovable_pages() states that there are no unmovable pages,
476 * there can be false negatives).
477 */
478 ZONE_MOVABLE,
479#ifdef CONFIG_ZONE_DEVICE
480 ZONE_DEVICE,
481#endif
482 __MAX_NR_ZONES
483
484};
485
486#ifndef __GENERATING_BOUNDS_H
487
488#define ASYNC_AND_SYNC 2
489
490struct zone {
491 /* Read-mostly fields */
492
493 /* zone watermarks, access with *_wmark_pages(zone) macros */
494 unsigned long _watermark[NR_WMARK];
495 unsigned long watermark_boost;
496
497 unsigned long nr_reserved_highatomic;
498
499 /*
500 * We don't know if the memory that we're going to allocate will be
501 * freeable or/and it will be released eventually, so to avoid totally
502 * wasting several GB of ram we must reserve some of the lower zone
503 * memory (otherwise we risk to run OOM on the lower zones despite
504 * there being tons of freeable ram on the higher zones). This array is
505 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
506 * changes.
507 */
508 long lowmem_reserve[MAX_NR_ZONES];
509
510#ifdef CONFIG_NUMA
511 int node;
512#endif
513 struct pglist_data *zone_pgdat;
514 struct per_cpu_pages __percpu *per_cpu_pageset;
515 struct per_cpu_zonestat __percpu *per_cpu_zonestats;
516 /*
517 * the high and batch values are copied to individual pagesets for
518 * faster access
519 */
520 int pageset_high;
521 int pageset_batch;
522
523#ifndef CONFIG_SPARSEMEM
524 /*
525 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
526 * In SPARSEMEM, this map is stored in struct mem_section
527 */
528 unsigned long *pageblock_flags;
529#endif /* CONFIG_SPARSEMEM */
530
531 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
532 unsigned long zone_start_pfn;
533
534 /*
535 * spanned_pages is the total pages spanned by the zone, including
536 * holes, which is calculated as:
537 * spanned_pages = zone_end_pfn - zone_start_pfn;
538 *
539 * present_pages is physical pages existing within the zone, which
540 * is calculated as:
541 * present_pages = spanned_pages - absent_pages(pages in holes);
542 *
543 * managed_pages is present pages managed by the buddy system, which
544 * is calculated as (reserved_pages includes pages allocated by the
545 * bootmem allocator):
546 * managed_pages = present_pages - reserved_pages;
547 *
548 * cma pages is present pages that are assigned for CMA use
549 * (MIGRATE_CMA).
550 *
551 * So present_pages may be used by memory hotplug or memory power
552 * management logic to figure out unmanaged pages by checking
553 * (present_pages - managed_pages). And managed_pages should be used
554 * by page allocator and vm scanner to calculate all kinds of watermarks
555 * and thresholds.
556 *
557 * Locking rules:
558 *
559 * zone_start_pfn and spanned_pages are protected by span_seqlock.
560 * It is a seqlock because it has to be read outside of zone->lock,
561 * and it is done in the main allocator path. But, it is written
562 * quite infrequently.
563 *
564 * The span_seq lock is declared along with zone->lock because it is
565 * frequently read in proximity to zone->lock. It's good to
566 * give them a chance of being in the same cacheline.
567 *
568 * Write access to present_pages at runtime should be protected by
569 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
570 * present_pages should get_online_mems() to get a stable value.
571 */
572 atomic_long_t managed_pages;
573 unsigned long spanned_pages;
574 unsigned long present_pages;
575#ifdef CONFIG_CMA
576 unsigned long cma_pages;
577#endif
578
579 const char *name;
580
581#ifdef CONFIG_MEMORY_ISOLATION
582 /*
583 * Number of isolated pageblock. It is used to solve incorrect
584 * freepage counting problem due to racy retrieving migratetype
585 * of pageblock. Protected by zone->lock.
586 */
587 unsigned long nr_isolate_pageblock;
588#endif
589
590#ifdef CONFIG_MEMORY_HOTPLUG
591 /* see spanned/present_pages for more description */
592 seqlock_t span_seqlock;
593#endif
594
595 int initialized;
596
597 /* Write-intensive fields used from the page allocator */
598 ZONE_PADDING(_pad1_)
599
600 /* free areas of different sizes */
601 struct free_area free_area[MAX_ORDER];
602
603 /* zone flags, see below */
604 unsigned long flags;
605
606 /* Primarily protects free_area */
607 spinlock_t lock;
608
609 /* Write-intensive fields used by compaction and vmstats. */
610 ZONE_PADDING(_pad2_)
611
612 /*
613 * When free pages are below this point, additional steps are taken
614 * when reading the number of free pages to avoid per-cpu counter
615 * drift allowing watermarks to be breached
616 */
617 unsigned long percpu_drift_mark;
618
619#if defined CONFIG_COMPACTION || defined CONFIG_CMA
620 /* pfn where compaction free scanner should start */
621 unsigned long compact_cached_free_pfn;
622 /* pfn where compaction migration scanner should start */
623 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
624 unsigned long compact_init_migrate_pfn;
625 unsigned long compact_init_free_pfn;
626#endif
627
628#ifdef CONFIG_COMPACTION
629 /*
630 * On compaction failure, 1<<compact_defer_shift compactions
631 * are skipped before trying again. The number attempted since
632 * last failure is tracked with compact_considered.
633 * compact_order_failed is the minimum compaction failed order.
634 */
635 unsigned int compact_considered;
636 unsigned int compact_defer_shift;
637 int compact_order_failed;
638#endif
639
640#if defined CONFIG_COMPACTION || defined CONFIG_CMA
641 /* Set to true when the PG_migrate_skip bits should be cleared */
642 bool compact_blockskip_flush;
643#endif
644
645 bool contiguous;
646
647 ZONE_PADDING(_pad3_)
648 /* Zone statistics */
649 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
650 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
651} ____cacheline_internodealigned_in_smp;
652
653enum pgdat_flags {
654 PGDAT_DIRTY, /* reclaim scanning has recently found
655 * many dirty file pages at the tail
656 * of the LRU.
657 */
658 PGDAT_WRITEBACK, /* reclaim scanning has recently found
659 * many pages under writeback
660 */
661 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
662};
663
664enum zone_flags {
665 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
666 * Cleared when kswapd is woken.
667 */
668 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
669};
670
671static inline unsigned long zone_managed_pages(struct zone *zone)
672{
673 return (unsigned long)atomic_long_read(&zone->managed_pages);
674}
675
676static inline unsigned long zone_cma_pages(struct zone *zone)
677{
678#ifdef CONFIG_CMA
679 return zone->cma_pages;
680#else
681 return 0;
682#endif
683}
684
685static inline unsigned long zone_end_pfn(const struct zone *zone)
686{
687 return zone->zone_start_pfn + zone->spanned_pages;
688}
689
690static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
691{
692 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
693}
694
695static inline bool zone_is_initialized(struct zone *zone)
696{
697 return zone->initialized;
698}
699
700static inline bool zone_is_empty(struct zone *zone)
701{
702 return zone->spanned_pages == 0;
703}
704
705/*
706 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
707 * intersection with the given zone
708 */
709static inline bool zone_intersects(struct zone *zone,
710 unsigned long start_pfn, unsigned long nr_pages)
711{
712 if (zone_is_empty(zone))
713 return false;
714 if (start_pfn >= zone_end_pfn(zone) ||
715 start_pfn + nr_pages <= zone->zone_start_pfn)
716 return false;
717
718 return true;
719}
720
721/*
722 * The "priority" of VM scanning is how much of the queues we will scan in one
723 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
724 * queues ("queue_length >> 12") during an aging round.
725 */
726#define DEF_PRIORITY 12
727
728/* Maximum number of zones on a zonelist */
729#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
730
731enum {
732 ZONELIST_FALLBACK, /* zonelist with fallback */
733#ifdef CONFIG_NUMA
734 /*
735 * The NUMA zonelists are doubled because we need zonelists that
736 * restrict the allocations to a single node for __GFP_THISNODE.
737 */
738 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
739#endif
740 MAX_ZONELISTS
741};
742
743/*
744 * This struct contains information about a zone in a zonelist. It is stored
745 * here to avoid dereferences into large structures and lookups of tables
746 */
747struct zoneref {
748 struct zone *zone; /* Pointer to actual zone */
749 int zone_idx; /* zone_idx(zoneref->zone) */
750};
751
752/*
753 * One allocation request operates on a zonelist. A zonelist
754 * is a list of zones, the first one is the 'goal' of the
755 * allocation, the other zones are fallback zones, in decreasing
756 * priority.
757 *
758 * To speed the reading of the zonelist, the zonerefs contain the zone index
759 * of the entry being read. Helper functions to access information given
760 * a struct zoneref are
761 *
762 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
763 * zonelist_zone_idx() - Return the index of the zone for an entry
764 * zonelist_node_idx() - Return the index of the node for an entry
765 */
766struct zonelist {
767 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
768};
769
770/*
771 * The array of struct pages for flatmem.
772 * It must be declared for SPARSEMEM as well because there are configurations
773 * that rely on that.
774 */
775extern struct page *mem_map;
776
777#ifdef CONFIG_TRANSPARENT_HUGEPAGE
778struct deferred_split {
779 spinlock_t split_queue_lock;
780 struct list_head split_queue;
781 unsigned long split_queue_len;
782};
783#endif
784
785/*
786 * On NUMA machines, each NUMA node would have a pg_data_t to describe
787 * it's memory layout. On UMA machines there is a single pglist_data which
788 * describes the whole memory.
789 *
790 * Memory statistics and page replacement data structures are maintained on a
791 * per-zone basis.
792 */
793typedef struct pglist_data {
794 /*
795 * node_zones contains just the zones for THIS node. Not all of the
796 * zones may be populated, but it is the full list. It is referenced by
797 * this node's node_zonelists as well as other node's node_zonelists.
798 */
799 struct zone node_zones[MAX_NR_ZONES];
800
801 /*
802 * node_zonelists contains references to all zones in all nodes.
803 * Generally the first zones will be references to this node's
804 * node_zones.
805 */
806 struct zonelist node_zonelists[MAX_ZONELISTS];
807
808 int nr_zones; /* number of populated zones in this node */
809#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
810 struct page *node_mem_map;
811#ifdef CONFIG_PAGE_EXTENSION
812 struct page_ext *node_page_ext;
813#endif
814#endif
815#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
816 /*
817 * Must be held any time you expect node_start_pfn,
818 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
819 * Also synchronizes pgdat->first_deferred_pfn during deferred page
820 * init.
821 *
822 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
823 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
824 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
825 *
826 * Nests above zone->lock and zone->span_seqlock
827 */
828 spinlock_t node_size_lock;
829#endif
830 unsigned long node_start_pfn;
831 unsigned long node_present_pages; /* total number of physical pages */
832 unsigned long node_spanned_pages; /* total size of physical page
833 range, including holes */
834 int node_id;
835 wait_queue_head_t kswapd_wait;
836 wait_queue_head_t pfmemalloc_wait;
837 struct task_struct *kswapd; /* Protected by
838 mem_hotplug_begin/end() */
839 int kswapd_order;
840 enum zone_type kswapd_highest_zoneidx;
841
842 int kswapd_failures; /* Number of 'reclaimed == 0' runs */
843
844#ifdef CONFIG_COMPACTION
845 int kcompactd_max_order;
846 enum zone_type kcompactd_highest_zoneidx;
847 wait_queue_head_t kcompactd_wait;
848 struct task_struct *kcompactd;
849#endif
850 /*
851 * This is a per-node reserve of pages that are not available
852 * to userspace allocations.
853 */
854 unsigned long totalreserve_pages;
855
856#ifdef CONFIG_NUMA
857 /*
858 * node reclaim becomes active if more unmapped pages exist.
859 */
860 unsigned long min_unmapped_pages;
861 unsigned long min_slab_pages;
862#endif /* CONFIG_NUMA */
863
864 /* Write-intensive fields used by page reclaim */
865 ZONE_PADDING(_pad1_)
866
867#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
868 /*
869 * If memory initialisation on large machines is deferred then this
870 * is the first PFN that needs to be initialised.
871 */
872 unsigned long first_deferred_pfn;
873#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
874
875#ifdef CONFIG_TRANSPARENT_HUGEPAGE
876 struct deferred_split deferred_split_queue;
877#endif
878
879 /* Fields commonly accessed by the page reclaim scanner */
880
881 /*
882 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
883 *
884 * Use mem_cgroup_lruvec() to look up lruvecs.
885 */
886 struct lruvec __lruvec;
887
888 unsigned long flags;
889
890 ZONE_PADDING(_pad2_)
891
892 /* Per-node vmstats */
893 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
894 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
895} pg_data_t;
896
897#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
898#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
899#ifdef CONFIG_FLATMEM
900#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
901#else
902#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
903#endif
904#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
905
906#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
907#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
908
909static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
910{
911 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
912}
913
914static inline bool pgdat_is_empty(pg_data_t *pgdat)
915{
916 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
917}
918
919#include <linux/memory_hotplug.h>
920
921void build_all_zonelists(pg_data_t *pgdat);
922void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
923 enum zone_type highest_zoneidx);
924bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
925 int highest_zoneidx, unsigned int alloc_flags,
926 long free_pages);
927bool zone_watermark_ok(struct zone *z, unsigned int order,
928 unsigned long mark, int highest_zoneidx,
929 unsigned int alloc_flags);
930bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
931 unsigned long mark, int highest_zoneidx);
932/*
933 * Memory initialization context, use to differentiate memory added by
934 * the platform statically or via memory hotplug interface.
935 */
936enum meminit_context {
937 MEMINIT_EARLY,
938 MEMINIT_HOTPLUG,
939};
940
941extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
942 unsigned long size);
943
944extern void lruvec_init(struct lruvec *lruvec);
945
946static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
947{
948#ifdef CONFIG_MEMCG
949 return lruvec->pgdat;
950#else
951 return container_of(lruvec, struct pglist_data, __lruvec);
952#endif
953}
954
955#ifdef CONFIG_HAVE_MEMORYLESS_NODES
956int local_memory_node(int node_id);
957#else
958static inline int local_memory_node(int node_id) { return node_id; };
959#endif
960
961/*
962 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
963 */
964#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
965
966#ifdef CONFIG_ZONE_DEVICE
967static inline bool zone_is_zone_device(struct zone *zone)
968{
969 return zone_idx(zone) == ZONE_DEVICE;
970}
971#else
972static inline bool zone_is_zone_device(struct zone *zone)
973{
974 return false;
975}
976#endif
977
978/*
979 * Returns true if a zone has pages managed by the buddy allocator.
980 * All the reclaim decisions have to use this function rather than
981 * populated_zone(). If the whole zone is reserved then we can easily
982 * end up with populated_zone() && !managed_zone().
983 */
984static inline bool managed_zone(struct zone *zone)
985{
986 return zone_managed_pages(zone);
987}
988
989/* Returns true if a zone has memory */
990static inline bool populated_zone(struct zone *zone)
991{
992 return zone->present_pages;
993}
994
995#ifdef CONFIG_NUMA
996static inline int zone_to_nid(struct zone *zone)
997{
998 return zone->node;
999}
1000
1001static inline void zone_set_nid(struct zone *zone, int nid)
1002{
1003 zone->node = nid;
1004}
1005#else
1006static inline int zone_to_nid(struct zone *zone)
1007{
1008 return 0;
1009}
1010
1011static inline void zone_set_nid(struct zone *zone, int nid) {}
1012#endif
1013
1014extern int movable_zone;
1015
1016static inline int is_highmem_idx(enum zone_type idx)
1017{
1018#ifdef CONFIG_HIGHMEM
1019 return (idx == ZONE_HIGHMEM ||
1020 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
1021#else
1022 return 0;
1023#endif
1024}
1025
1026/**
1027 * is_highmem - helper function to quickly check if a struct zone is a
1028 * highmem zone or not. This is an attempt to keep references
1029 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
1030 * @zone: pointer to struct zone variable
1031 * Return: 1 for a highmem zone, 0 otherwise
1032 */
1033static inline int is_highmem(struct zone *zone)
1034{
1035#ifdef CONFIG_HIGHMEM
1036 return is_highmem_idx(zone_idx(zone));
1037#else
1038 return 0;
1039#endif
1040}
1041
1042/* These two functions are used to setup the per zone pages min values */
1043struct ctl_table;
1044
1045int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
1046 loff_t *);
1047int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
1048 size_t *, loff_t *);
1049extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
1050int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
1051 size_t *, loff_t *);
1052int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
1053 void *, size_t *, loff_t *);
1054int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
1055 void *, size_t *, loff_t *);
1056int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
1057 void *, size_t *, loff_t *);
1058int numa_zonelist_order_handler(struct ctl_table *, int,
1059 void *, size_t *, loff_t *);
1060extern int percpu_pagelist_high_fraction;
1061extern char numa_zonelist_order[];
1062#define NUMA_ZONELIST_ORDER_LEN 16
1063
1064#ifndef CONFIG_NUMA
1065
1066extern struct pglist_data contig_page_data;
1067static inline struct pglist_data *NODE_DATA(int nid)
1068{
1069 return &contig_page_data;
1070}
1071#define NODE_MEM_MAP(nid) mem_map
1072
1073#else /* CONFIG_NUMA */
1074
1075#include <asm/mmzone.h>
1076
1077#endif /* !CONFIG_NUMA */
1078
1079extern struct pglist_data *first_online_pgdat(void);
1080extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1081extern struct zone *next_zone(struct zone *zone);
1082
1083/**
1084 * for_each_online_pgdat - helper macro to iterate over all online nodes
1085 * @pgdat: pointer to a pg_data_t variable
1086 */
1087#define for_each_online_pgdat(pgdat) \
1088 for (pgdat = first_online_pgdat(); \
1089 pgdat; \
1090 pgdat = next_online_pgdat(pgdat))
1091/**
1092 * for_each_zone - helper macro to iterate over all memory zones
1093 * @zone: pointer to struct zone variable
1094 *
1095 * The user only needs to declare the zone variable, for_each_zone
1096 * fills it in.
1097 */
1098#define for_each_zone(zone) \
1099 for (zone = (first_online_pgdat())->node_zones; \
1100 zone; \
1101 zone = next_zone(zone))
1102
1103#define for_each_populated_zone(zone) \
1104 for (zone = (first_online_pgdat())->node_zones; \
1105 zone; \
1106 zone = next_zone(zone)) \
1107 if (!populated_zone(zone)) \
1108 ; /* do nothing */ \
1109 else
1110
1111static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1112{
1113 return zoneref->zone;
1114}
1115
1116static inline int zonelist_zone_idx(struct zoneref *zoneref)
1117{
1118 return zoneref->zone_idx;
1119}
1120
1121static inline int zonelist_node_idx(struct zoneref *zoneref)
1122{
1123 return zone_to_nid(zoneref->zone);
1124}
1125
1126struct zoneref *__next_zones_zonelist(struct zoneref *z,
1127 enum zone_type highest_zoneidx,
1128 nodemask_t *nodes);
1129
1130/**
1131 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
1132 * @z: The cursor used as a starting point for the search
1133 * @highest_zoneidx: The zone index of the highest zone to return
1134 * @nodes: An optional nodemask to filter the zonelist with
1135 *
1136 * This function returns the next zone at or below a given zone index that is
1137 * within the allowed nodemask using a cursor as the starting point for the
1138 * search. The zoneref returned is a cursor that represents the current zone
1139 * being examined. It should be advanced by one before calling
1140 * next_zones_zonelist again.
1141 *
1142 * Return: the next zone at or below highest_zoneidx within the allowed
1143 * nodemask using a cursor within a zonelist as a starting point
1144 */
1145static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1146 enum zone_type highest_zoneidx,
1147 nodemask_t *nodes)
1148{
1149 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1150 return z;
1151 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1152}
1153
1154/**
1155 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
1156 * @zonelist: The zonelist to search for a suitable zone
1157 * @highest_zoneidx: The zone index of the highest zone to return
1158 * @nodes: An optional nodemask to filter the zonelist with
1159 *
1160 * This function returns the first zone at or below a given zone index that is
1161 * within the allowed nodemask. The zoneref returned is a cursor that can be
1162 * used to iterate the zonelist with next_zones_zonelist by advancing it by
1163 * one before calling.
1164 *
1165 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1166 * never NULL). This may happen either genuinely, or due to concurrent nodemask
1167 * update due to cpuset modification.
1168 *
1169 * Return: Zoneref pointer for the first suitable zone found
1170 */
1171static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1172 enum zone_type highest_zoneidx,
1173 nodemask_t *nodes)
1174{
1175 return next_zones_zonelist(zonelist->_zonerefs,
1176 highest_zoneidx, nodes);
1177}
1178
1179/**
1180 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
1181 * @zone: The current zone in the iterator
1182 * @z: The current pointer within zonelist->_zonerefs being iterated
1183 * @zlist: The zonelist being iterated
1184 * @highidx: The zone index of the highest zone to return
1185 * @nodemask: Nodemask allowed by the allocator
1186 *
1187 * This iterator iterates though all zones at or below a given zone index and
1188 * within a given nodemask
1189 */
1190#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1191 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1192 zone; \
1193 z = next_zones_zonelist(++z, highidx, nodemask), \
1194 zone = zonelist_zone(z))
1195
1196#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
1197 for (zone = z->zone; \
1198 zone; \
1199 z = next_zones_zonelist(++z, highidx, nodemask), \
1200 zone = zonelist_zone(z))
1201
1202
1203/**
1204 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1205 * @zone: The current zone in the iterator
1206 * @z: The current pointer within zonelist->zones being iterated
1207 * @zlist: The zonelist being iterated
1208 * @highidx: The zone index of the highest zone to return
1209 *
1210 * This iterator iterates though all zones at or below a given zone index.
1211 */
1212#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1213 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1214
1215#ifdef CONFIG_SPARSEMEM
1216#include <asm/sparsemem.h>
1217#endif
1218
1219#ifdef CONFIG_FLATMEM
1220#define pfn_to_nid(pfn) (0)
1221#endif
1222
1223#ifdef CONFIG_SPARSEMEM
1224
1225/*
1226 * PA_SECTION_SHIFT physical address to/from section number
1227 * PFN_SECTION_SHIFT pfn to/from section number
1228 */
1229#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1230#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1231
1232#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1233
1234#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1235#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1236
1237#define SECTION_BLOCKFLAGS_BITS \
1238 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1239
1240#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1241#error Allocator MAX_ORDER exceeds SECTION_SIZE
1242#endif
1243
1244static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1245{
1246 return pfn >> PFN_SECTION_SHIFT;
1247}
1248static inline unsigned long section_nr_to_pfn(unsigned long sec)
1249{
1250 return sec << PFN_SECTION_SHIFT;
1251}
1252
1253#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1254#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1255
1256#define SUBSECTION_SHIFT 21
1257#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1258
1259#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1260#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1261#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1262
1263#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1264#error Subsection size exceeds section size
1265#else
1266#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1267#endif
1268
1269#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1270#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1271
1272struct mem_section_usage {
1273#ifdef CONFIG_SPARSEMEM_VMEMMAP
1274 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1275#endif
1276 /* See declaration of similar field in struct zone */
1277 unsigned long pageblock_flags[0];
1278};
1279
1280void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1281
1282struct page;
1283struct page_ext;
1284struct mem_section {
1285 /*
1286 * This is, logically, a pointer to an array of struct
1287 * pages. However, it is stored with some other magic.
1288 * (see sparse.c::sparse_init_one_section())
1289 *
1290 * Additionally during early boot we encode node id of
1291 * the location of the section here to guide allocation.
1292 * (see sparse.c::memory_present())
1293 *
1294 * Making it a UL at least makes someone do a cast
1295 * before using it wrong.
1296 */
1297 unsigned long section_mem_map;
1298
1299 struct mem_section_usage *usage;
1300#ifdef CONFIG_PAGE_EXTENSION
1301 /*
1302 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1303 * section. (see page_ext.h about this.)
1304 */
1305 struct page_ext *page_ext;
1306 unsigned long pad;
1307#endif
1308 /*
1309 * WARNING: mem_section must be a power-of-2 in size for the
1310 * calculation and use of SECTION_ROOT_MASK to make sense.
1311 */
1312};
1313
1314#ifdef CONFIG_SPARSEMEM_EXTREME
1315#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1316#else
1317#define SECTIONS_PER_ROOT 1
1318#endif
1319
1320#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1321#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1322#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1323
1324#ifdef CONFIG_SPARSEMEM_EXTREME
1325extern struct mem_section **mem_section;
1326#else
1327extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1328#endif
1329
1330static inline unsigned long *section_to_usemap(struct mem_section *ms)
1331{
1332 return ms->usage->pageblock_flags;
1333}
1334
1335static inline struct mem_section *__nr_to_section(unsigned long nr)
1336{
1337#ifdef CONFIG_SPARSEMEM_EXTREME
1338 if (!mem_section)
1339 return NULL;
1340#endif
1341 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1342 return NULL;
1343 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1344}
1345extern unsigned long __section_nr(struct mem_section *ms);
1346extern size_t mem_section_usage_size(void);
1347
1348/*
1349 * We use the lower bits of the mem_map pointer to store
1350 * a little bit of information. The pointer is calculated
1351 * as mem_map - section_nr_to_pfn(pnum). The result is
1352 * aligned to the minimum alignment of the two values:
1353 * 1. All mem_map arrays are page-aligned.
1354 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1355 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1356 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1357 * worst combination is powerpc with 256k pages,
1358 * which results in PFN_SECTION_SHIFT equal 6.
1359 * To sum it up, at least 6 bits are available.
1360 */
1361#define SECTION_MARKED_PRESENT (1UL<<0)
1362#define SECTION_HAS_MEM_MAP (1UL<<1)
1363#define SECTION_IS_ONLINE (1UL<<2)
1364#define SECTION_IS_EARLY (1UL<<3)
1365#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
1366#define SECTION_MAP_LAST_BIT (1UL<<5)
1367#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1368#define SECTION_NID_SHIFT 3
1369
1370static inline struct page *__section_mem_map_addr(struct mem_section *section)
1371{
1372 unsigned long map = section->section_mem_map;
1373 map &= SECTION_MAP_MASK;
1374 return (struct page *)map;
1375}
1376
1377static inline int present_section(struct mem_section *section)
1378{
1379 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1380}
1381
1382static inline int present_section_nr(unsigned long nr)
1383{
1384 return present_section(__nr_to_section(nr));
1385}
1386
1387static inline int valid_section(struct mem_section *section)
1388{
1389 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1390}
1391
1392static inline int early_section(struct mem_section *section)
1393{
1394 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1395}
1396
1397static inline int valid_section_nr(unsigned long nr)
1398{
1399 return valid_section(__nr_to_section(nr));
1400}
1401
1402static inline int online_section(struct mem_section *section)
1403{
1404 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1405}
1406
1407static inline int online_device_section(struct mem_section *section)
1408{
1409 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
1410
1411 return section && ((section->section_mem_map & flags) == flags);
1412}
1413
1414static inline int online_section_nr(unsigned long nr)
1415{
1416 return online_section(__nr_to_section(nr));
1417}
1418
1419#ifdef CONFIG_MEMORY_HOTPLUG
1420void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1421void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1422#endif
1423
1424static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1425{
1426 return __nr_to_section(pfn_to_section_nr(pfn));
1427}
1428
1429extern unsigned long __highest_present_section_nr;
1430
1431static inline int subsection_map_index(unsigned long pfn)
1432{
1433 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1434}
1435
1436#ifdef CONFIG_SPARSEMEM_VMEMMAP
1437static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1438{
1439 int idx = subsection_map_index(pfn);
1440
1441 return test_bit(idx, ms->usage->subsection_map);
1442}
1443#else
1444static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1445{
1446 return 1;
1447}
1448#endif
1449
1450#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1451/**
1452 * pfn_valid - check if there is a valid memory map entry for a PFN
1453 * @pfn: the page frame number to check
1454 *
1455 * Check if there is a valid memory map entry aka struct page for the @pfn.
1456 * Note, that availability of the memory map entry does not imply that
1457 * there is actual usable memory at that @pfn. The struct page may
1458 * represent a hole or an unusable page frame.
1459 *
1460 * Return: 1 for PFNs that have memory map entries and 0 otherwise
1461 */
1462static inline int pfn_valid(unsigned long pfn)
1463{
1464 struct mem_section *ms;
1465
1466 /*
1467 * Ensure the upper PAGE_SHIFT bits are clear in the
1468 * pfn. Else it might lead to false positives when
1469 * some of the upper bits are set, but the lower bits
1470 * match a valid pfn.
1471 */
1472 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
1473 return 0;
1474
1475 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1476 return 0;
1477 ms = __nr_to_section(pfn_to_section_nr(pfn));
1478 if (!valid_section(ms))
1479 return 0;
1480 /*
1481 * Traditionally early sections always returned pfn_valid() for
1482 * the entire section-sized span.
1483 */
1484 return early_section(ms) || pfn_section_valid(ms, pfn);
1485}
1486#endif
1487
1488static inline int pfn_in_present_section(unsigned long pfn)
1489{
1490 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1491 return 0;
1492 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1493}
1494
1495static inline unsigned long next_present_section_nr(unsigned long section_nr)
1496{
1497 while (++section_nr <= __highest_present_section_nr) {
1498 if (present_section_nr(section_nr))
1499 return section_nr;
1500 }
1501
1502 return -1;
1503}
1504
1505/*
1506 * These are _only_ used during initialisation, therefore they
1507 * can use __initdata ... They could have names to indicate
1508 * this restriction.
1509 */
1510#ifdef CONFIG_NUMA
1511#define pfn_to_nid(pfn) \
1512({ \
1513 unsigned long __pfn_to_nid_pfn = (pfn); \
1514 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1515})
1516#else
1517#define pfn_to_nid(pfn) (0)
1518#endif
1519
1520void sparse_init(void);
1521#else
1522#define sparse_init() do {} while (0)
1523#define sparse_index_init(_sec, _nid) do {} while (0)
1524#define pfn_in_present_section pfn_valid
1525#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
1526#endif /* CONFIG_SPARSEMEM */
1527
1528/*
1529 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
1530 * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
1531 * pfn_valid_within() should be used in this case; we optimise this away
1532 * when we have no holes within a MAX_ORDER_NR_PAGES block.
1533 */
1534#ifdef CONFIG_HOLES_IN_ZONE
1535#define pfn_valid_within(pfn) pfn_valid(pfn)
1536#else
1537#define pfn_valid_within(pfn) (1)
1538#endif
1539
1540#endif /* !__GENERATING_BOUNDS.H */
1541#endif /* !__ASSEMBLY__ */
1542#endif /* _LINUX_MMZONE_H */