at v5.2 42 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_MMZONE_H 3#define _LINUX_MMZONE_H 4 5#ifndef __ASSEMBLY__ 6#ifndef __GENERATING_BOUNDS_H 7 8#include <linux/spinlock.h> 9#include <linux/list.h> 10#include <linux/wait.h> 11#include <linux/bitops.h> 12#include <linux/cache.h> 13#include <linux/threads.h> 14#include <linux/numa.h> 15#include <linux/init.h> 16#include <linux/seqlock.h> 17#include <linux/nodemask.h> 18#include <linux/pageblock-flags.h> 19#include <linux/page-flags-layout.h> 20#include <linux/atomic.h> 21#include <linux/mm_types.h> 22#include <linux/page-flags.h> 23#include <asm/page.h> 24 25/* Free memory management - zoned buddy allocator. */ 26#ifndef CONFIG_FORCE_MAX_ZONEORDER 27#define MAX_ORDER 11 28#else 29#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 30#endif 31#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 32 33/* 34 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 35 * costly to service. That is between allocation orders which should 36 * coalesce naturally under reasonable reclaim pressure and those which 37 * will not. 38 */ 39#define PAGE_ALLOC_COSTLY_ORDER 3 40 41enum migratetype { 42 MIGRATE_UNMOVABLE, 43 MIGRATE_MOVABLE, 44 MIGRATE_RECLAIMABLE, 45 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 47#ifdef CONFIG_CMA 48 /* 49 * MIGRATE_CMA migration type is designed to mimic the way 50 * ZONE_MOVABLE works. Only movable pages can be allocated 51 * from MIGRATE_CMA pageblocks and page allocator never 52 * implicitly change migration type of MIGRATE_CMA pageblock. 53 * 54 * The way to use it is to change migratetype of a range of 55 * pageblocks to MIGRATE_CMA which can be done by 56 * __free_pageblock_cma() function. What is important though 57 * is that a range of pageblocks must be aligned to 58 * MAX_ORDER_NR_PAGES should biggest page be bigger then 59 * a single pageblock. 60 */ 61 MIGRATE_CMA, 62#endif 63#ifdef CONFIG_MEMORY_ISOLATION 64 MIGRATE_ISOLATE, /* can't allocate from here */ 65#endif 66 MIGRATE_TYPES 67}; 68 69/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 70extern const char * const migratetype_names[MIGRATE_TYPES]; 71 72#ifdef CONFIG_CMA 73# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 74# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 75#else 76# define is_migrate_cma(migratetype) false 77# define is_migrate_cma_page(_page) false 78#endif 79 80static inline bool is_migrate_movable(int mt) 81{ 82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 83} 84 85#define for_each_migratetype_order(order, type) \ 86 for (order = 0; order < MAX_ORDER; order++) \ 87 for (type = 0; type < MIGRATE_TYPES; type++) 88 89extern int page_group_by_mobility_disabled; 90 91#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) 92#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) 93 94#define get_pageblock_migratetype(page) \ 95 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 96 PB_migrate_end, MIGRATETYPE_MASK) 97 98struct free_area { 99 struct list_head free_list[MIGRATE_TYPES]; 100 unsigned long nr_free; 101}; 102 103/* Used for pages not on another list */ 104static inline void add_to_free_area(struct page *page, struct free_area *area, 105 int migratetype) 106{ 107 list_add(&page->lru, &area->free_list[migratetype]); 108 area->nr_free++; 109} 110 111/* Used for pages not on another list */ 112static inline void add_to_free_area_tail(struct page *page, struct free_area *area, 113 int migratetype) 114{ 115 list_add_tail(&page->lru, &area->free_list[migratetype]); 116 area->nr_free++; 117} 118 119#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR 120/* Used to preserve page allocation order entropy */ 121void add_to_free_area_random(struct page *page, struct free_area *area, 122 int migratetype); 123#else 124static inline void add_to_free_area_random(struct page *page, 125 struct free_area *area, int migratetype) 126{ 127 add_to_free_area(page, area, migratetype); 128} 129#endif 130 131/* Used for pages which are on another list */ 132static inline void move_to_free_area(struct page *page, struct free_area *area, 133 int migratetype) 134{ 135 list_move(&page->lru, &area->free_list[migratetype]); 136} 137 138static inline struct page *get_page_from_free_area(struct free_area *area, 139 int migratetype) 140{ 141 return list_first_entry_or_null(&area->free_list[migratetype], 142 struct page, lru); 143} 144 145static inline void del_page_from_free_area(struct page *page, 146 struct free_area *area) 147{ 148 list_del(&page->lru); 149 __ClearPageBuddy(page); 150 set_page_private(page, 0); 151 area->nr_free--; 152} 153 154static inline bool free_area_empty(struct free_area *area, int migratetype) 155{ 156 return list_empty(&area->free_list[migratetype]); 157} 158 159struct pglist_data; 160 161/* 162 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. 163 * So add a wild amount of padding here to ensure that they fall into separate 164 * cachelines. There are very few zone structures in the machine, so space 165 * consumption is not a concern here. 166 */ 167#if defined(CONFIG_SMP) 168struct zone_padding { 169 char x[0]; 170} ____cacheline_internodealigned_in_smp; 171#define ZONE_PADDING(name) struct zone_padding name; 172#else 173#define ZONE_PADDING(name) 174#endif 175 176#ifdef CONFIG_NUMA 177enum numa_stat_item { 178 NUMA_HIT, /* allocated in intended node */ 179 NUMA_MISS, /* allocated in non intended node */ 180 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 181 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 182 NUMA_LOCAL, /* allocation from local node */ 183 NUMA_OTHER, /* allocation from other node */ 184 NR_VM_NUMA_STAT_ITEMS 185}; 186#else 187#define NR_VM_NUMA_STAT_ITEMS 0 188#endif 189 190enum zone_stat_item { 191 /* First 128 byte cacheline (assuming 64 bit words) */ 192 NR_FREE_PAGES, 193 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 194 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 195 NR_ZONE_ACTIVE_ANON, 196 NR_ZONE_INACTIVE_FILE, 197 NR_ZONE_ACTIVE_FILE, 198 NR_ZONE_UNEVICTABLE, 199 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 200 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 201 NR_PAGETABLE, /* used for pagetables */ 202 NR_KERNEL_STACK_KB, /* measured in KiB */ 203 /* Second 128 byte cacheline */ 204 NR_BOUNCE, 205#if IS_ENABLED(CONFIG_ZSMALLOC) 206 NR_ZSPAGES, /* allocated in zsmalloc */ 207#endif 208 NR_FREE_CMA_PAGES, 209 NR_VM_ZONE_STAT_ITEMS }; 210 211enum node_stat_item { 212 NR_LRU_BASE, 213 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 214 NR_ACTIVE_ANON, /* " " " " " */ 215 NR_INACTIVE_FILE, /* " " " " " */ 216 NR_ACTIVE_FILE, /* " " " " " */ 217 NR_UNEVICTABLE, /* " " " " " */ 218 NR_SLAB_RECLAIMABLE, 219 NR_SLAB_UNRECLAIMABLE, 220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 222 WORKINGSET_NODES, 223 WORKINGSET_REFAULT, 224 WORKINGSET_ACTIVATE, 225 WORKINGSET_RESTORE, 226 WORKINGSET_NODERECLAIM, 227 NR_ANON_MAPPED, /* Mapped anonymous pages */ 228 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 229 only modified from process context */ 230 NR_FILE_PAGES, 231 NR_FILE_DIRTY, 232 NR_WRITEBACK, 233 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 234 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 235 NR_SHMEM_THPS, 236 NR_SHMEM_PMDMAPPED, 237 NR_ANON_THPS, 238 NR_UNSTABLE_NFS, /* NFS unstable pages */ 239 NR_VMSCAN_WRITE, 240 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 241 NR_DIRTIED, /* page dirtyings since bootup */ 242 NR_WRITTEN, /* page writings since bootup */ 243 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 244 NR_VM_NODE_STAT_ITEMS 245}; 246 247/* 248 * We do arithmetic on the LRU lists in various places in the code, 249 * so it is important to keep the active lists LRU_ACTIVE higher in 250 * the array than the corresponding inactive lists, and to keep 251 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 252 * 253 * This has to be kept in sync with the statistics in zone_stat_item 254 * above and the descriptions in vmstat_text in mm/vmstat.c 255 */ 256#define LRU_BASE 0 257#define LRU_ACTIVE 1 258#define LRU_FILE 2 259 260enum lru_list { 261 LRU_INACTIVE_ANON = LRU_BASE, 262 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 263 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 264 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 265 LRU_UNEVICTABLE, 266 NR_LRU_LISTS 267}; 268 269#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 270 271#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 272 273static inline int is_file_lru(enum lru_list lru) 274{ 275 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 276} 277 278static inline int is_active_lru(enum lru_list lru) 279{ 280 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 281} 282 283struct zone_reclaim_stat { 284 /* 285 * The pageout code in vmscan.c keeps track of how many of the 286 * mem/swap backed and file backed pages are referenced. 287 * The higher the rotated/scanned ratio, the more valuable 288 * that cache is. 289 * 290 * The anon LRU stats live in [0], file LRU stats in [1] 291 */ 292 unsigned long recent_rotated[2]; 293 unsigned long recent_scanned[2]; 294}; 295 296struct lruvec { 297 struct list_head lists[NR_LRU_LISTS]; 298 struct zone_reclaim_stat reclaim_stat; 299 /* Evictions & activations on the inactive file list */ 300 atomic_long_t inactive_age; 301 /* Refaults at the time of last reclaim cycle */ 302 unsigned long refaults; 303#ifdef CONFIG_MEMCG 304 struct pglist_data *pgdat; 305#endif 306}; 307 308/* Isolate unmapped file */ 309#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 310/* Isolate for asynchronous migration */ 311#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 312/* Isolate unevictable pages */ 313#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 314 315/* LRU Isolation modes. */ 316typedef unsigned __bitwise isolate_mode_t; 317 318enum zone_watermarks { 319 WMARK_MIN, 320 WMARK_LOW, 321 WMARK_HIGH, 322 NR_WMARK 323}; 324 325#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) 326#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) 327#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) 328#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) 329 330struct per_cpu_pages { 331 int count; /* number of pages in the list */ 332 int high; /* high watermark, emptying needed */ 333 int batch; /* chunk size for buddy add/remove */ 334 335 /* Lists of pages, one per migrate type stored on the pcp-lists */ 336 struct list_head lists[MIGRATE_PCPTYPES]; 337}; 338 339struct per_cpu_pageset { 340 struct per_cpu_pages pcp; 341#ifdef CONFIG_NUMA 342 s8 expire; 343 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; 344#endif 345#ifdef CONFIG_SMP 346 s8 stat_threshold; 347 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 348#endif 349}; 350 351struct per_cpu_nodestat { 352 s8 stat_threshold; 353 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 354}; 355 356#endif /* !__GENERATING_BOUNDS.H */ 357 358enum zone_type { 359#ifdef CONFIG_ZONE_DMA 360 /* 361 * ZONE_DMA is used when there are devices that are not able 362 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we 363 * carve out the portion of memory that is needed for these devices. 364 * The range is arch specific. 365 * 366 * Some examples 367 * 368 * Architecture Limit 369 * --------------------------- 370 * parisc, ia64, sparc <4G 371 * s390, powerpc <2G 372 * arm Various 373 * alpha Unlimited or 0-16MB. 374 * 375 * i386, x86_64 and multiple other arches 376 * <16M. 377 */ 378 ZONE_DMA, 379#endif 380#ifdef CONFIG_ZONE_DMA32 381 /* 382 * x86_64 needs two ZONE_DMAs because it supports devices that are 383 * only able to do DMA to the lower 16M but also 32 bit devices that 384 * can only do DMA areas below 4G. 385 */ 386 ZONE_DMA32, 387#endif 388 /* 389 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 390 * performed on pages in ZONE_NORMAL if the DMA devices support 391 * transfers to all addressable memory. 392 */ 393 ZONE_NORMAL, 394#ifdef CONFIG_HIGHMEM 395 /* 396 * A memory area that is only addressable by the kernel through 397 * mapping portions into its own address space. This is for example 398 * used by i386 to allow the kernel to address the memory beyond 399 * 900MB. The kernel will set up special mappings (page 400 * table entries on i386) for each page that the kernel needs to 401 * access. 402 */ 403 ZONE_HIGHMEM, 404#endif 405 ZONE_MOVABLE, 406#ifdef CONFIG_ZONE_DEVICE 407 ZONE_DEVICE, 408#endif 409 __MAX_NR_ZONES 410 411}; 412 413#ifndef __GENERATING_BOUNDS_H 414 415struct zone { 416 /* Read-mostly fields */ 417 418 /* zone watermarks, access with *_wmark_pages(zone) macros */ 419 unsigned long _watermark[NR_WMARK]; 420 unsigned long watermark_boost; 421 422 unsigned long nr_reserved_highatomic; 423 424 /* 425 * We don't know if the memory that we're going to allocate will be 426 * freeable or/and it will be released eventually, so to avoid totally 427 * wasting several GB of ram we must reserve some of the lower zone 428 * memory (otherwise we risk to run OOM on the lower zones despite 429 * there being tons of freeable ram on the higher zones). This array is 430 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 431 * changes. 432 */ 433 long lowmem_reserve[MAX_NR_ZONES]; 434 435#ifdef CONFIG_NUMA 436 int node; 437#endif 438 struct pglist_data *zone_pgdat; 439 struct per_cpu_pageset __percpu *pageset; 440 441#ifndef CONFIG_SPARSEMEM 442 /* 443 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 444 * In SPARSEMEM, this map is stored in struct mem_section 445 */ 446 unsigned long *pageblock_flags; 447#endif /* CONFIG_SPARSEMEM */ 448 449 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 450 unsigned long zone_start_pfn; 451 452 /* 453 * spanned_pages is the total pages spanned by the zone, including 454 * holes, which is calculated as: 455 * spanned_pages = zone_end_pfn - zone_start_pfn; 456 * 457 * present_pages is physical pages existing within the zone, which 458 * is calculated as: 459 * present_pages = spanned_pages - absent_pages(pages in holes); 460 * 461 * managed_pages is present pages managed by the buddy system, which 462 * is calculated as (reserved_pages includes pages allocated by the 463 * bootmem allocator): 464 * managed_pages = present_pages - reserved_pages; 465 * 466 * So present_pages may be used by memory hotplug or memory power 467 * management logic to figure out unmanaged pages by checking 468 * (present_pages - managed_pages). And managed_pages should be used 469 * by page allocator and vm scanner to calculate all kinds of watermarks 470 * and thresholds. 471 * 472 * Locking rules: 473 * 474 * zone_start_pfn and spanned_pages are protected by span_seqlock. 475 * It is a seqlock because it has to be read outside of zone->lock, 476 * and it is done in the main allocator path. But, it is written 477 * quite infrequently. 478 * 479 * The span_seq lock is declared along with zone->lock because it is 480 * frequently read in proximity to zone->lock. It's good to 481 * give them a chance of being in the same cacheline. 482 * 483 * Write access to present_pages at runtime should be protected by 484 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of 485 * present_pages should get_online_mems() to get a stable value. 486 */ 487 atomic_long_t managed_pages; 488 unsigned long spanned_pages; 489 unsigned long present_pages; 490 491 const char *name; 492 493#ifdef CONFIG_MEMORY_ISOLATION 494 /* 495 * Number of isolated pageblock. It is used to solve incorrect 496 * freepage counting problem due to racy retrieving migratetype 497 * of pageblock. Protected by zone->lock. 498 */ 499 unsigned long nr_isolate_pageblock; 500#endif 501 502#ifdef CONFIG_MEMORY_HOTPLUG 503 /* see spanned/present_pages for more description */ 504 seqlock_t span_seqlock; 505#endif 506 507 int initialized; 508 509 /* Write-intensive fields used from the page allocator */ 510 ZONE_PADDING(_pad1_) 511 512 /* free areas of different sizes */ 513 struct free_area free_area[MAX_ORDER]; 514 515 /* zone flags, see below */ 516 unsigned long flags; 517 518 /* Primarily protects free_area */ 519 spinlock_t lock; 520 521 /* Write-intensive fields used by compaction and vmstats. */ 522 ZONE_PADDING(_pad2_) 523 524 /* 525 * When free pages are below this point, additional steps are taken 526 * when reading the number of free pages to avoid per-cpu counter 527 * drift allowing watermarks to be breached 528 */ 529 unsigned long percpu_drift_mark; 530 531#if defined CONFIG_COMPACTION || defined CONFIG_CMA 532 /* pfn where compaction free scanner should start */ 533 unsigned long compact_cached_free_pfn; 534 /* pfn where async and sync compaction migration scanner should start */ 535 unsigned long compact_cached_migrate_pfn[2]; 536 unsigned long compact_init_migrate_pfn; 537 unsigned long compact_init_free_pfn; 538#endif 539 540#ifdef CONFIG_COMPACTION 541 /* 542 * On compaction failure, 1<<compact_defer_shift compactions 543 * are skipped before trying again. The number attempted since 544 * last failure is tracked with compact_considered. 545 */ 546 unsigned int compact_considered; 547 unsigned int compact_defer_shift; 548 int compact_order_failed; 549#endif 550 551#if defined CONFIG_COMPACTION || defined CONFIG_CMA 552 /* Set to true when the PG_migrate_skip bits should be cleared */ 553 bool compact_blockskip_flush; 554#endif 555 556 bool contiguous; 557 558 ZONE_PADDING(_pad3_) 559 /* Zone statistics */ 560 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 561 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; 562} ____cacheline_internodealigned_in_smp; 563 564enum pgdat_flags { 565 PGDAT_CONGESTED, /* pgdat has many dirty pages backed by 566 * a congested BDI 567 */ 568 PGDAT_DIRTY, /* reclaim scanning has recently found 569 * many dirty file pages at the tail 570 * of the LRU. 571 */ 572 PGDAT_WRITEBACK, /* reclaim scanning has recently found 573 * many pages under writeback 574 */ 575 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 576}; 577 578enum zone_flags { 579 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 580 * Cleared when kswapd is woken. 581 */ 582}; 583 584static inline unsigned long zone_managed_pages(struct zone *zone) 585{ 586 return (unsigned long)atomic_long_read(&zone->managed_pages); 587} 588 589static inline unsigned long zone_end_pfn(const struct zone *zone) 590{ 591 return zone->zone_start_pfn + zone->spanned_pages; 592} 593 594static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 595{ 596 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 597} 598 599static inline bool zone_is_initialized(struct zone *zone) 600{ 601 return zone->initialized; 602} 603 604static inline bool zone_is_empty(struct zone *zone) 605{ 606 return zone->spanned_pages == 0; 607} 608 609/* 610 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 611 * intersection with the given zone 612 */ 613static inline bool zone_intersects(struct zone *zone, 614 unsigned long start_pfn, unsigned long nr_pages) 615{ 616 if (zone_is_empty(zone)) 617 return false; 618 if (start_pfn >= zone_end_pfn(zone) || 619 start_pfn + nr_pages <= zone->zone_start_pfn) 620 return false; 621 622 return true; 623} 624 625/* 626 * The "priority" of VM scanning is how much of the queues we will scan in one 627 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 628 * queues ("queue_length >> 12") during an aging round. 629 */ 630#define DEF_PRIORITY 12 631 632/* Maximum number of zones on a zonelist */ 633#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 634 635enum { 636 ZONELIST_FALLBACK, /* zonelist with fallback */ 637#ifdef CONFIG_NUMA 638 /* 639 * The NUMA zonelists are doubled because we need zonelists that 640 * restrict the allocations to a single node for __GFP_THISNODE. 641 */ 642 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 643#endif 644 MAX_ZONELISTS 645}; 646 647/* 648 * This struct contains information about a zone in a zonelist. It is stored 649 * here to avoid dereferences into large structures and lookups of tables 650 */ 651struct zoneref { 652 struct zone *zone; /* Pointer to actual zone */ 653 int zone_idx; /* zone_idx(zoneref->zone) */ 654}; 655 656/* 657 * One allocation request operates on a zonelist. A zonelist 658 * is a list of zones, the first one is the 'goal' of the 659 * allocation, the other zones are fallback zones, in decreasing 660 * priority. 661 * 662 * To speed the reading of the zonelist, the zonerefs contain the zone index 663 * of the entry being read. Helper functions to access information given 664 * a struct zoneref are 665 * 666 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 667 * zonelist_zone_idx() - Return the index of the zone for an entry 668 * zonelist_node_idx() - Return the index of the node for an entry 669 */ 670struct zonelist { 671 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 672}; 673 674#ifndef CONFIG_DISCONTIGMEM 675/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 676extern struct page *mem_map; 677#endif 678 679/* 680 * On NUMA machines, each NUMA node would have a pg_data_t to describe 681 * it's memory layout. On UMA machines there is a single pglist_data which 682 * describes the whole memory. 683 * 684 * Memory statistics and page replacement data structures are maintained on a 685 * per-zone basis. 686 */ 687struct bootmem_data; 688typedef struct pglist_data { 689 struct zone node_zones[MAX_NR_ZONES]; 690 struct zonelist node_zonelists[MAX_ZONELISTS]; 691 int nr_zones; 692#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 693 struct page *node_mem_map; 694#ifdef CONFIG_PAGE_EXTENSION 695 struct page_ext *node_page_ext; 696#endif 697#endif 698#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 699 /* 700 * Must be held any time you expect node_start_pfn, 701 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 702 * 703 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 704 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 705 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 706 * 707 * Nests above zone->lock and zone->span_seqlock 708 */ 709 spinlock_t node_size_lock; 710#endif 711 unsigned long node_start_pfn; 712 unsigned long node_present_pages; /* total number of physical pages */ 713 unsigned long node_spanned_pages; /* total size of physical page 714 range, including holes */ 715 int node_id; 716 wait_queue_head_t kswapd_wait; 717 wait_queue_head_t pfmemalloc_wait; 718 struct task_struct *kswapd; /* Protected by 719 mem_hotplug_begin/end() */ 720 int kswapd_order; 721 enum zone_type kswapd_classzone_idx; 722 723 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 724 725#ifdef CONFIG_COMPACTION 726 int kcompactd_max_order; 727 enum zone_type kcompactd_classzone_idx; 728 wait_queue_head_t kcompactd_wait; 729 struct task_struct *kcompactd; 730#endif 731 /* 732 * This is a per-node reserve of pages that are not available 733 * to userspace allocations. 734 */ 735 unsigned long totalreserve_pages; 736 737#ifdef CONFIG_NUMA 738 /* 739 * zone reclaim becomes active if more unmapped pages exist. 740 */ 741 unsigned long min_unmapped_pages; 742 unsigned long min_slab_pages; 743#endif /* CONFIG_NUMA */ 744 745 /* Write-intensive fields used by page reclaim */ 746 ZONE_PADDING(_pad1_) 747 spinlock_t lru_lock; 748 749#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 750 /* 751 * If memory initialisation on large machines is deferred then this 752 * is the first PFN that needs to be initialised. 753 */ 754 unsigned long first_deferred_pfn; 755#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 756 757#ifdef CONFIG_TRANSPARENT_HUGEPAGE 758 spinlock_t split_queue_lock; 759 struct list_head split_queue; 760 unsigned long split_queue_len; 761#endif 762 763 /* Fields commonly accessed by the page reclaim scanner */ 764 struct lruvec lruvec; 765 766 unsigned long flags; 767 768 ZONE_PADDING(_pad2_) 769 770 /* Per-node vmstats */ 771 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 772 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 773} pg_data_t; 774 775#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 776#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 777#ifdef CONFIG_FLAT_NODE_MEM_MAP 778#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 779#else 780#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 781#endif 782#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 783 784#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 785#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 786 787static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) 788{ 789 return &pgdat->lruvec; 790} 791 792static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 793{ 794 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 795} 796 797static inline bool pgdat_is_empty(pg_data_t *pgdat) 798{ 799 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 800} 801 802#include <linux/memory_hotplug.h> 803 804void build_all_zonelists(pg_data_t *pgdat); 805void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 806 enum zone_type classzone_idx); 807bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 808 int classzone_idx, unsigned int alloc_flags, 809 long free_pages); 810bool zone_watermark_ok(struct zone *z, unsigned int order, 811 unsigned long mark, int classzone_idx, 812 unsigned int alloc_flags); 813bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 814 unsigned long mark, int classzone_idx); 815enum memmap_context { 816 MEMMAP_EARLY, 817 MEMMAP_HOTPLUG, 818}; 819extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 820 unsigned long size); 821 822extern void lruvec_init(struct lruvec *lruvec); 823 824static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 825{ 826#ifdef CONFIG_MEMCG 827 return lruvec->pgdat; 828#else 829 return container_of(lruvec, struct pglist_data, lruvec); 830#endif 831} 832 833extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); 834 835#ifdef CONFIG_HAVE_MEMORY_PRESENT 836void memory_present(int nid, unsigned long start, unsigned long end); 837#else 838static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 839#endif 840 841#if defined(CONFIG_SPARSEMEM) 842void memblocks_present(void); 843#else 844static inline void memblocks_present(void) {} 845#endif 846 847#ifdef CONFIG_HAVE_MEMORYLESS_NODES 848int local_memory_node(int node_id); 849#else 850static inline int local_memory_node(int node_id) { return node_id; }; 851#endif 852 853/* 854 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 855 */ 856#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 857 858#ifdef CONFIG_ZONE_DEVICE 859static inline bool is_dev_zone(const struct zone *zone) 860{ 861 return zone_idx(zone) == ZONE_DEVICE; 862} 863#else 864static inline bool is_dev_zone(const struct zone *zone) 865{ 866 return false; 867} 868#endif 869 870/* 871 * Returns true if a zone has pages managed by the buddy allocator. 872 * All the reclaim decisions have to use this function rather than 873 * populated_zone(). If the whole zone is reserved then we can easily 874 * end up with populated_zone() && !managed_zone(). 875 */ 876static inline bool managed_zone(struct zone *zone) 877{ 878 return zone_managed_pages(zone); 879} 880 881/* Returns true if a zone has memory */ 882static inline bool populated_zone(struct zone *zone) 883{ 884 return zone->present_pages; 885} 886 887#ifdef CONFIG_NUMA 888static inline int zone_to_nid(struct zone *zone) 889{ 890 return zone->node; 891} 892 893static inline void zone_set_nid(struct zone *zone, int nid) 894{ 895 zone->node = nid; 896} 897#else 898static inline int zone_to_nid(struct zone *zone) 899{ 900 return 0; 901} 902 903static inline void zone_set_nid(struct zone *zone, int nid) {} 904#endif 905 906extern int movable_zone; 907 908#ifdef CONFIG_HIGHMEM 909static inline int zone_movable_is_highmem(void) 910{ 911#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 912 return movable_zone == ZONE_HIGHMEM; 913#else 914 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; 915#endif 916} 917#endif 918 919static inline int is_highmem_idx(enum zone_type idx) 920{ 921#ifdef CONFIG_HIGHMEM 922 return (idx == ZONE_HIGHMEM || 923 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 924#else 925 return 0; 926#endif 927} 928 929/** 930 * is_highmem - helper function to quickly check if a struct zone is a 931 * highmem zone or not. This is an attempt to keep references 932 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 933 * @zone - pointer to struct zone variable 934 */ 935static inline int is_highmem(struct zone *zone) 936{ 937#ifdef CONFIG_HIGHMEM 938 return is_highmem_idx(zone_idx(zone)); 939#else 940 return 0; 941#endif 942} 943 944/* These two functions are used to setup the per zone pages min values */ 945struct ctl_table; 946int min_free_kbytes_sysctl_handler(struct ctl_table *, int, 947 void __user *, size_t *, loff_t *); 948int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, 949 void __user *, size_t *, loff_t *); 950int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, 951 void __user *, size_t *, loff_t *); 952extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 953int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, 954 void __user *, size_t *, loff_t *); 955int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 956 void __user *, size_t *, loff_t *); 957int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 958 void __user *, size_t *, loff_t *); 959int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 960 void __user *, size_t *, loff_t *); 961 962extern int numa_zonelist_order_handler(struct ctl_table *, int, 963 void __user *, size_t *, loff_t *); 964extern char numa_zonelist_order[]; 965#define NUMA_ZONELIST_ORDER_LEN 16 966 967#ifndef CONFIG_NEED_MULTIPLE_NODES 968 969extern struct pglist_data contig_page_data; 970#define NODE_DATA(nid) (&contig_page_data) 971#define NODE_MEM_MAP(nid) mem_map 972 973#else /* CONFIG_NEED_MULTIPLE_NODES */ 974 975#include <asm/mmzone.h> 976 977#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 978 979extern struct pglist_data *first_online_pgdat(void); 980extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 981extern struct zone *next_zone(struct zone *zone); 982 983/** 984 * for_each_online_pgdat - helper macro to iterate over all online nodes 985 * @pgdat - pointer to a pg_data_t variable 986 */ 987#define for_each_online_pgdat(pgdat) \ 988 for (pgdat = first_online_pgdat(); \ 989 pgdat; \ 990 pgdat = next_online_pgdat(pgdat)) 991/** 992 * for_each_zone - helper macro to iterate over all memory zones 993 * @zone - pointer to struct zone variable 994 * 995 * The user only needs to declare the zone variable, for_each_zone 996 * fills it in. 997 */ 998#define for_each_zone(zone) \ 999 for (zone = (first_online_pgdat())->node_zones; \ 1000 zone; \ 1001 zone = next_zone(zone)) 1002 1003#define for_each_populated_zone(zone) \ 1004 for (zone = (first_online_pgdat())->node_zones; \ 1005 zone; \ 1006 zone = next_zone(zone)) \ 1007 if (!populated_zone(zone)) \ 1008 ; /* do nothing */ \ 1009 else 1010 1011static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1012{ 1013 return zoneref->zone; 1014} 1015 1016static inline int zonelist_zone_idx(struct zoneref *zoneref) 1017{ 1018 return zoneref->zone_idx; 1019} 1020 1021static inline int zonelist_node_idx(struct zoneref *zoneref) 1022{ 1023 return zone_to_nid(zoneref->zone); 1024} 1025 1026struct zoneref *__next_zones_zonelist(struct zoneref *z, 1027 enum zone_type highest_zoneidx, 1028 nodemask_t *nodes); 1029 1030/** 1031 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1032 * @z - The cursor used as a starting point for the search 1033 * @highest_zoneidx - The zone index of the highest zone to return 1034 * @nodes - An optional nodemask to filter the zonelist with 1035 * 1036 * This function returns the next zone at or below a given zone index that is 1037 * within the allowed nodemask using a cursor as the starting point for the 1038 * search. The zoneref returned is a cursor that represents the current zone 1039 * being examined. It should be advanced by one before calling 1040 * next_zones_zonelist again. 1041 */ 1042static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1043 enum zone_type highest_zoneidx, 1044 nodemask_t *nodes) 1045{ 1046 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1047 return z; 1048 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1049} 1050 1051/** 1052 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1053 * @zonelist - The zonelist to search for a suitable zone 1054 * @highest_zoneidx - The zone index of the highest zone to return 1055 * @nodes - An optional nodemask to filter the zonelist with 1056 * @return - Zoneref pointer for the first suitable zone found (see below) 1057 * 1058 * This function returns the first zone at or below a given zone index that is 1059 * within the allowed nodemask. The zoneref returned is a cursor that can be 1060 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1061 * one before calling. 1062 * 1063 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1064 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1065 * update due to cpuset modification. 1066 */ 1067static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1068 enum zone_type highest_zoneidx, 1069 nodemask_t *nodes) 1070{ 1071 return next_zones_zonelist(zonelist->_zonerefs, 1072 highest_zoneidx, nodes); 1073} 1074 1075/** 1076 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1077 * @zone - The current zone in the iterator 1078 * @z - The current pointer within zonelist->zones being iterated 1079 * @zlist - The zonelist being iterated 1080 * @highidx - The zone index of the highest zone to return 1081 * @nodemask - Nodemask allowed by the allocator 1082 * 1083 * This iterator iterates though all zones at or below a given zone index and 1084 * within a given nodemask 1085 */ 1086#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1087 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1088 zone; \ 1089 z = next_zones_zonelist(++z, highidx, nodemask), \ 1090 zone = zonelist_zone(z)) 1091 1092#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1093 for (zone = z->zone; \ 1094 zone; \ 1095 z = next_zones_zonelist(++z, highidx, nodemask), \ 1096 zone = zonelist_zone(z)) 1097 1098 1099/** 1100 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1101 * @zone - The current zone in the iterator 1102 * @z - The current pointer within zonelist->zones being iterated 1103 * @zlist - The zonelist being iterated 1104 * @highidx - The zone index of the highest zone to return 1105 * 1106 * This iterator iterates though all zones at or below a given zone index. 1107 */ 1108#define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1109 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1110 1111#ifdef CONFIG_SPARSEMEM 1112#include <asm/sparsemem.h> 1113#endif 1114 1115#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 1116 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1117static inline unsigned long early_pfn_to_nid(unsigned long pfn) 1118{ 1119 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); 1120 return 0; 1121} 1122#endif 1123 1124#ifdef CONFIG_FLATMEM 1125#define pfn_to_nid(pfn) (0) 1126#endif 1127 1128#ifdef CONFIG_SPARSEMEM 1129 1130/* 1131 * SECTION_SHIFT #bits space required to store a section # 1132 * 1133 * PA_SECTION_SHIFT physical address to/from section number 1134 * PFN_SECTION_SHIFT pfn to/from section number 1135 */ 1136#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1137#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1138 1139#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1140 1141#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1142#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1143 1144#define SECTION_BLOCKFLAGS_BITS \ 1145 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1146 1147#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1148#error Allocator MAX_ORDER exceeds SECTION_SIZE 1149#endif 1150 1151static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1152{ 1153 return pfn >> PFN_SECTION_SHIFT; 1154} 1155static inline unsigned long section_nr_to_pfn(unsigned long sec) 1156{ 1157 return sec << PFN_SECTION_SHIFT; 1158} 1159 1160#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1161#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1162 1163struct page; 1164struct page_ext; 1165struct mem_section { 1166 /* 1167 * This is, logically, a pointer to an array of struct 1168 * pages. However, it is stored with some other magic. 1169 * (see sparse.c::sparse_init_one_section()) 1170 * 1171 * Additionally during early boot we encode node id of 1172 * the location of the section here to guide allocation. 1173 * (see sparse.c::memory_present()) 1174 * 1175 * Making it a UL at least makes someone do a cast 1176 * before using it wrong. 1177 */ 1178 unsigned long section_mem_map; 1179 1180 /* See declaration of similar field in struct zone */ 1181 unsigned long *pageblock_flags; 1182#ifdef CONFIG_PAGE_EXTENSION 1183 /* 1184 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1185 * section. (see page_ext.h about this.) 1186 */ 1187 struct page_ext *page_ext; 1188 unsigned long pad; 1189#endif 1190 /* 1191 * WARNING: mem_section must be a power-of-2 in size for the 1192 * calculation and use of SECTION_ROOT_MASK to make sense. 1193 */ 1194}; 1195 1196#ifdef CONFIG_SPARSEMEM_EXTREME 1197#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1198#else 1199#define SECTIONS_PER_ROOT 1 1200#endif 1201 1202#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1203#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1204#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1205 1206#ifdef CONFIG_SPARSEMEM_EXTREME 1207extern struct mem_section **mem_section; 1208#else 1209extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1210#endif 1211 1212static inline struct mem_section *__nr_to_section(unsigned long nr) 1213{ 1214#ifdef CONFIG_SPARSEMEM_EXTREME 1215 if (!mem_section) 1216 return NULL; 1217#endif 1218 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1219 return NULL; 1220 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1221} 1222extern int __section_nr(struct mem_section* ms); 1223extern unsigned long usemap_size(void); 1224 1225/* 1226 * We use the lower bits of the mem_map pointer to store 1227 * a little bit of information. The pointer is calculated 1228 * as mem_map - section_nr_to_pfn(pnum). The result is 1229 * aligned to the minimum alignment of the two values: 1230 * 1. All mem_map arrays are page-aligned. 1231 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1232 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1233 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1234 * worst combination is powerpc with 256k pages, 1235 * which results in PFN_SECTION_SHIFT equal 6. 1236 * To sum it up, at least 6 bits are available. 1237 */ 1238#define SECTION_MARKED_PRESENT (1UL<<0) 1239#define SECTION_HAS_MEM_MAP (1UL<<1) 1240#define SECTION_IS_ONLINE (1UL<<2) 1241#define SECTION_MAP_LAST_BIT (1UL<<3) 1242#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1243#define SECTION_NID_SHIFT 3 1244 1245static inline struct page *__section_mem_map_addr(struct mem_section *section) 1246{ 1247 unsigned long map = section->section_mem_map; 1248 map &= SECTION_MAP_MASK; 1249 return (struct page *)map; 1250} 1251 1252static inline int present_section(struct mem_section *section) 1253{ 1254 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1255} 1256 1257static inline int present_section_nr(unsigned long nr) 1258{ 1259 return present_section(__nr_to_section(nr)); 1260} 1261 1262static inline int valid_section(struct mem_section *section) 1263{ 1264 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1265} 1266 1267static inline int valid_section_nr(unsigned long nr) 1268{ 1269 return valid_section(__nr_to_section(nr)); 1270} 1271 1272static inline int online_section(struct mem_section *section) 1273{ 1274 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 1275} 1276 1277static inline int online_section_nr(unsigned long nr) 1278{ 1279 return online_section(__nr_to_section(nr)); 1280} 1281 1282#ifdef CONFIG_MEMORY_HOTPLUG 1283void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1284#ifdef CONFIG_MEMORY_HOTREMOVE 1285void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1286#endif 1287#endif 1288 1289static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1290{ 1291 return __nr_to_section(pfn_to_section_nr(pfn)); 1292} 1293 1294extern int __highest_present_section_nr; 1295 1296#ifndef CONFIG_HAVE_ARCH_PFN_VALID 1297static inline int pfn_valid(unsigned long pfn) 1298{ 1299 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1300 return 0; 1301 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 1302} 1303#endif 1304 1305static inline int pfn_present(unsigned long pfn) 1306{ 1307 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1308 return 0; 1309 return present_section(__nr_to_section(pfn_to_section_nr(pfn))); 1310} 1311 1312/* 1313 * These are _only_ used during initialisation, therefore they 1314 * can use __initdata ... They could have names to indicate 1315 * this restriction. 1316 */ 1317#ifdef CONFIG_NUMA 1318#define pfn_to_nid(pfn) \ 1319({ \ 1320 unsigned long __pfn_to_nid_pfn = (pfn); \ 1321 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1322}) 1323#else 1324#define pfn_to_nid(pfn) (0) 1325#endif 1326 1327#define early_pfn_valid(pfn) pfn_valid(pfn) 1328void sparse_init(void); 1329#else 1330#define sparse_init() do {} while (0) 1331#define sparse_index_init(_sec, _nid) do {} while (0) 1332#define pfn_present pfn_valid 1333#endif /* CONFIG_SPARSEMEM */ 1334 1335/* 1336 * During memory init memblocks map pfns to nids. The search is expensive and 1337 * this caches recent lookups. The implementation of __early_pfn_to_nid 1338 * may treat start/end as pfns or sections. 1339 */ 1340struct mminit_pfnnid_cache { 1341 unsigned long last_start; 1342 unsigned long last_end; 1343 int last_nid; 1344}; 1345 1346#ifndef early_pfn_valid 1347#define early_pfn_valid(pfn) (1) 1348#endif 1349 1350void memory_present(int nid, unsigned long start, unsigned long end); 1351 1352/* 1353 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1354 * need to check pfn validity within that MAX_ORDER_NR_PAGES block. 1355 * pfn_valid_within() should be used in this case; we optimise this away 1356 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1357 */ 1358#ifdef CONFIG_HOLES_IN_ZONE 1359#define pfn_valid_within(pfn) pfn_valid(pfn) 1360#else 1361#define pfn_valid_within(pfn) (1) 1362#endif 1363 1364#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1365/* 1366 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1367 * associated with it or not. This means that a struct page exists for this 1368 * pfn. The caller cannot assume the page is fully initialized in general. 1369 * Hotplugable pages might not have been onlined yet. pfn_to_online_page() 1370 * will ensure the struct page is fully online and initialized. Special pages 1371 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. 1372 * 1373 * In FLATMEM, it is expected that holes always have valid memmap as long as 1374 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed 1375 * that a valid section has a memmap for the entire section. 1376 * 1377 * However, an ARM, and maybe other embedded architectures in the future 1378 * free memmap backing holes to save memory on the assumption the memmap is 1379 * never used. The page_zone linkages are then broken even though pfn_valid() 1380 * returns true. A walker of the full memmap must then do this additional 1381 * check to ensure the memmap they are looking at is sane by making sure 1382 * the zone and PFN linkages are still valid. This is expensive, but walkers 1383 * of the full memmap are extremely rare. 1384 */ 1385bool memmap_valid_within(unsigned long pfn, 1386 struct page *page, struct zone *zone); 1387#else 1388static inline bool memmap_valid_within(unsigned long pfn, 1389 struct page *page, struct zone *zone) 1390{ 1391 return true; 1392} 1393#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1394 1395#endif /* !__GENERATING_BOUNDS.H */ 1396#endif /* !__ASSEMBLY__ */ 1397#endif /* _LINUX_MMZONE_H */