at v2.6.23 26 kB view raw
1#ifndef _LINUX_MMZONE_H 2#define _LINUX_MMZONE_H 3 4#ifdef __KERNEL__ 5#ifndef __ASSEMBLY__ 6 7#include <linux/spinlock.h> 8#include <linux/list.h> 9#include <linux/wait.h> 10#include <linux/cache.h> 11#include <linux/threads.h> 12#include <linux/numa.h> 13#include <linux/init.h> 14#include <linux/seqlock.h> 15#include <linux/nodemask.h> 16#include <asm/atomic.h> 17#include <asm/page.h> 18 19/* Free memory management - zoned buddy allocator. */ 20#ifndef CONFIG_FORCE_MAX_ZONEORDER 21#define MAX_ORDER 11 22#else 23#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 24#endif 25#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 26 27/* 28 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 29 * costly to service. That is between allocation orders which should 30 * coelesce naturally under reasonable reclaim pressure and those which 31 * will not. 32 */ 33#define PAGE_ALLOC_COSTLY_ORDER 3 34 35struct free_area { 36 struct list_head free_list; 37 unsigned long nr_free; 38}; 39 40struct pglist_data; 41 42/* 43 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. 44 * So add a wild amount of padding here to ensure that they fall into separate 45 * cachelines. There are very few zone structures in the machine, so space 46 * consumption is not a concern here. 47 */ 48#if defined(CONFIG_SMP) 49struct zone_padding { 50 char x[0]; 51} ____cacheline_internodealigned_in_smp; 52#define ZONE_PADDING(name) struct zone_padding name; 53#else 54#define ZONE_PADDING(name) 55#endif 56 57enum zone_stat_item { 58 /* First 128 byte cacheline (assuming 64 bit words) */ 59 NR_FREE_PAGES, 60 NR_INACTIVE, 61 NR_ACTIVE, 62 NR_ANON_PAGES, /* Mapped anonymous pages */ 63 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 64 only modified from process context */ 65 NR_FILE_PAGES, 66 NR_FILE_DIRTY, 67 NR_WRITEBACK, 68 /* Second 128 byte cacheline */ 69 NR_SLAB_RECLAIMABLE, 70 NR_SLAB_UNRECLAIMABLE, 71 NR_PAGETABLE, /* used for pagetables */ 72 NR_UNSTABLE_NFS, /* NFS unstable pages */ 73 NR_BOUNCE, 74 NR_VMSCAN_WRITE, 75#ifdef CONFIG_NUMA 76 NUMA_HIT, /* allocated in intended node */ 77 NUMA_MISS, /* allocated in non intended node */ 78 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 79 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 80 NUMA_LOCAL, /* allocation from local node */ 81 NUMA_OTHER, /* allocation from other node */ 82#endif 83 NR_VM_ZONE_STAT_ITEMS }; 84 85struct per_cpu_pages { 86 int count; /* number of pages in the list */ 87 int high; /* high watermark, emptying needed */ 88 int batch; /* chunk size for buddy add/remove */ 89 struct list_head list; /* the list of pages */ 90}; 91 92struct per_cpu_pageset { 93 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 94#ifdef CONFIG_NUMA 95 s8 expire; 96#endif 97#ifdef CONFIG_SMP 98 s8 stat_threshold; 99 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 100#endif 101} ____cacheline_aligned_in_smp; 102 103#ifdef CONFIG_NUMA 104#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) 105#else 106#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) 107#endif 108 109enum zone_type { 110#ifdef CONFIG_ZONE_DMA 111 /* 112 * ZONE_DMA is used when there are devices that are not able 113 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we 114 * carve out the portion of memory that is needed for these devices. 115 * The range is arch specific. 116 * 117 * Some examples 118 * 119 * Architecture Limit 120 * --------------------------- 121 * parisc, ia64, sparc <4G 122 * s390 <2G 123 * arm Various 124 * alpha Unlimited or 0-16MB. 125 * 126 * i386, x86_64 and multiple other arches 127 * <16M. 128 */ 129 ZONE_DMA, 130#endif 131#ifdef CONFIG_ZONE_DMA32 132 /* 133 * x86_64 needs two ZONE_DMAs because it supports devices that are 134 * only able to do DMA to the lower 16M but also 32 bit devices that 135 * can only do DMA areas below 4G. 136 */ 137 ZONE_DMA32, 138#endif 139 /* 140 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 141 * performed on pages in ZONE_NORMAL if the DMA devices support 142 * transfers to all addressable memory. 143 */ 144 ZONE_NORMAL, 145#ifdef CONFIG_HIGHMEM 146 /* 147 * A memory area that is only addressable by the kernel through 148 * mapping portions into its own address space. This is for example 149 * used by i386 to allow the kernel to address the memory beyond 150 * 900MB. The kernel will set up special mappings (page 151 * table entries on i386) for each page that the kernel needs to 152 * access. 153 */ 154 ZONE_HIGHMEM, 155#endif 156 ZONE_MOVABLE, 157 MAX_NR_ZONES 158}; 159 160/* 161 * When a memory allocation must conform to specific limitations (such 162 * as being suitable for DMA) the caller will pass in hints to the 163 * allocator in the gfp_mask, in the zone modifier bits. These bits 164 * are used to select a priority ordered list of memory zones which 165 * match the requested limits. See gfp_zone() in include/linux/gfp.h 166 */ 167 168/* 169 * Count the active zones. Note that the use of defined(X) outside 170 * #if and family is not necessarily defined so ensure we cannot use 171 * it later. Use __ZONE_COUNT to work out how many shift bits we need. 172 */ 173#define __ZONE_COUNT ( \ 174 defined(CONFIG_ZONE_DMA) \ 175 + defined(CONFIG_ZONE_DMA32) \ 176 + 1 \ 177 + defined(CONFIG_HIGHMEM) \ 178 + 1 \ 179) 180#if __ZONE_COUNT < 2 181#define ZONES_SHIFT 0 182#elif __ZONE_COUNT <= 2 183#define ZONES_SHIFT 1 184#elif __ZONE_COUNT <= 4 185#define ZONES_SHIFT 2 186#else 187#error ZONES_SHIFT -- too many zones configured adjust calculation 188#endif 189#undef __ZONE_COUNT 190 191struct zone { 192 /* Fields commonly accessed by the page allocator */ 193 unsigned long pages_min, pages_low, pages_high; 194 /* 195 * We don't know if the memory that we're going to allocate will be freeable 196 * or/and it will be released eventually, so to avoid totally wasting several 197 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 198 * to run OOM on the lower zones despite there's tons of freeable ram 199 * on the higher zones). This array is recalculated at runtime if the 200 * sysctl_lowmem_reserve_ratio sysctl changes. 201 */ 202 unsigned long lowmem_reserve[MAX_NR_ZONES]; 203 204#ifdef CONFIG_NUMA 205 int node; 206 /* 207 * zone reclaim becomes active if more unmapped pages exist. 208 */ 209 unsigned long min_unmapped_pages; 210 unsigned long min_slab_pages; 211 struct per_cpu_pageset *pageset[NR_CPUS]; 212#else 213 struct per_cpu_pageset pageset[NR_CPUS]; 214#endif 215 /* 216 * free areas of different sizes 217 */ 218 spinlock_t lock; 219#ifdef CONFIG_MEMORY_HOTPLUG 220 /* see spanned/present_pages for more description */ 221 seqlock_t span_seqlock; 222#endif 223 struct free_area free_area[MAX_ORDER]; 224 225 226 ZONE_PADDING(_pad1_) 227 228 /* Fields commonly accessed by the page reclaim scanner */ 229 spinlock_t lru_lock; 230 struct list_head active_list; 231 struct list_head inactive_list; 232 unsigned long nr_scan_active; 233 unsigned long nr_scan_inactive; 234 unsigned long pages_scanned; /* since last reclaim */ 235 int all_unreclaimable; /* All pages pinned */ 236 237 /* A count of how many reclaimers are scanning this zone */ 238 atomic_t reclaim_in_progress; 239 240 /* Zone statistics */ 241 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 242 243 /* 244 * prev_priority holds the scanning priority for this zone. It is 245 * defined as the scanning priority at which we achieved our reclaim 246 * target at the previous try_to_free_pages() or balance_pgdat() 247 * invokation. 248 * 249 * We use prev_priority as a measure of how much stress page reclaim is 250 * under - it drives the swappiness decision: whether to unmap mapped 251 * pages. 252 * 253 * Access to both this field is quite racy even on uniprocessor. But 254 * it is expected to average out OK. 255 */ 256 int prev_priority; 257 258 259 ZONE_PADDING(_pad2_) 260 /* Rarely used or read-mostly fields */ 261 262 /* 263 * wait_table -- the array holding the hash table 264 * wait_table_hash_nr_entries -- the size of the hash table array 265 * wait_table_bits -- wait_table_size == (1 << wait_table_bits) 266 * 267 * The purpose of all these is to keep track of the people 268 * waiting for a page to become available and make them 269 * runnable again when possible. The trouble is that this 270 * consumes a lot of space, especially when so few things 271 * wait on pages at a given time. So instead of using 272 * per-page waitqueues, we use a waitqueue hash table. 273 * 274 * The bucket discipline is to sleep on the same queue when 275 * colliding and wake all in that wait queue when removing. 276 * When something wakes, it must check to be sure its page is 277 * truly available, a la thundering herd. The cost of a 278 * collision is great, but given the expected load of the 279 * table, they should be so rare as to be outweighed by the 280 * benefits from the saved space. 281 * 282 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the 283 * primary users of these fields, and in mm/page_alloc.c 284 * free_area_init_core() performs the initialization of them. 285 */ 286 wait_queue_head_t * wait_table; 287 unsigned long wait_table_hash_nr_entries; 288 unsigned long wait_table_bits; 289 290 /* 291 * Discontig memory support fields. 292 */ 293 struct pglist_data *zone_pgdat; 294 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 295 unsigned long zone_start_pfn; 296 297 /* 298 * zone_start_pfn, spanned_pages and present_pages are all 299 * protected by span_seqlock. It is a seqlock because it has 300 * to be read outside of zone->lock, and it is done in the main 301 * allocator path. But, it is written quite infrequently. 302 * 303 * The lock is declared along with zone->lock because it is 304 * frequently read in proximity to zone->lock. It's good to 305 * give them a chance of being in the same cacheline. 306 */ 307 unsigned long spanned_pages; /* total size, including holes */ 308 unsigned long present_pages; /* amount of memory (excluding holes) */ 309 310 /* 311 * rarely used fields: 312 */ 313 const char *name; 314} ____cacheline_internodealigned_in_smp; 315 316/* 317 * The "priority" of VM scanning is how much of the queues we will scan in one 318 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 319 * queues ("queue_length >> 12") during an aging round. 320 */ 321#define DEF_PRIORITY 12 322 323/* Maximum number of zones on a zonelist */ 324#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 325 326#ifdef CONFIG_NUMA 327/* 328 * We cache key information from each zonelist for smaller cache 329 * footprint when scanning for free pages in get_page_from_freelist(). 330 * 331 * 1) The BITMAP fullzones tracks which zones in a zonelist have come 332 * up short of free memory since the last time (last_fullzone_zap) 333 * we zero'd fullzones. 334 * 2) The array z_to_n[] maps each zone in the zonelist to its node 335 * id, so that we can efficiently evaluate whether that node is 336 * set in the current tasks mems_allowed. 337 * 338 * Both fullzones and z_to_n[] are one-to-one with the zonelist, 339 * indexed by a zones offset in the zonelist zones[] array. 340 * 341 * The get_page_from_freelist() routine does two scans. During the 342 * first scan, we skip zones whose corresponding bit in 'fullzones' 343 * is set or whose corresponding node in current->mems_allowed (which 344 * comes from cpusets) is not set. During the second scan, we bypass 345 * this zonelist_cache, to ensure we look methodically at each zone. 346 * 347 * Once per second, we zero out (zap) fullzones, forcing us to 348 * reconsider nodes that might have regained more free memory. 349 * The field last_full_zap is the time we last zapped fullzones. 350 * 351 * This mechanism reduces the amount of time we waste repeatedly 352 * reexaming zones for free memory when they just came up low on 353 * memory momentarilly ago. 354 * 355 * The zonelist_cache struct members logically belong in struct 356 * zonelist. However, the mempolicy zonelists constructed for 357 * MPOL_BIND are intentionally variable length (and usually much 358 * shorter). A general purpose mechanism for handling structs with 359 * multiple variable length members is more mechanism than we want 360 * here. We resort to some special case hackery instead. 361 * 362 * The MPOL_BIND zonelists don't need this zonelist_cache (in good 363 * part because they are shorter), so we put the fixed length stuff 364 * at the front of the zonelist struct, ending in a variable length 365 * zones[], as is needed by MPOL_BIND. 366 * 367 * Then we put the optional zonelist cache on the end of the zonelist 368 * struct. This optional stuff is found by a 'zlcache_ptr' pointer in 369 * the fixed length portion at the front of the struct. This pointer 370 * both enables us to find the zonelist cache, and in the case of 371 * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) 372 * to know that the zonelist cache is not there. 373 * 374 * The end result is that struct zonelists come in two flavors: 375 * 1) The full, fixed length version, shown below, and 376 * 2) The custom zonelists for MPOL_BIND. 377 * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. 378 * 379 * Even though there may be multiple CPU cores on a node modifying 380 * fullzones or last_full_zap in the same zonelist_cache at the same 381 * time, we don't lock it. This is just hint data - if it is wrong now 382 * and then, the allocator will still function, perhaps a bit slower. 383 */ 384 385 386struct zonelist_cache { 387 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ 388 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ 389 unsigned long last_full_zap; /* when last zap'd (jiffies) */ 390}; 391#else 392struct zonelist_cache; 393#endif 394 395/* 396 * One allocation request operates on a zonelist. A zonelist 397 * is a list of zones, the first one is the 'goal' of the 398 * allocation, the other zones are fallback zones, in decreasing 399 * priority. 400 * 401 * If zlcache_ptr is not NULL, then it is just the address of zlcache, 402 * as explained above. If zlcache_ptr is NULL, there is no zlcache. 403 */ 404 405struct zonelist { 406 struct zonelist_cache *zlcache_ptr; // NULL or &zlcache 407 struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited 408#ifdef CONFIG_NUMA 409 struct zonelist_cache zlcache; // optional ... 410#endif 411}; 412 413#ifdef CONFIG_NUMA 414/* 415 * Only custom zonelists like MPOL_BIND need to be filtered as part of 416 * policies. As described in the comment for struct zonelist_cache, these 417 * zonelists will not have a zlcache so zlcache_ptr will not be set. Use 418 * that to determine if the zonelists needs to be filtered or not. 419 */ 420static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) 421{ 422 return !zonelist->zlcache_ptr; 423} 424#else 425static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) 426{ 427 return 0; 428} 429#endif /* CONFIG_NUMA */ 430 431#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 432struct node_active_region { 433 unsigned long start_pfn; 434 unsigned long end_pfn; 435 int nid; 436}; 437#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 438 439#ifndef CONFIG_DISCONTIGMEM 440/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 441extern struct page *mem_map; 442#endif 443 444/* 445 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM 446 * (mostly NUMA machines?) to denote a higher-level memory zone than the 447 * zone denotes. 448 * 449 * On NUMA machines, each NUMA node would have a pg_data_t to describe 450 * it's memory layout. 451 * 452 * Memory statistics and page replacement data structures are maintained on a 453 * per-zone basis. 454 */ 455struct bootmem_data; 456typedef struct pglist_data { 457 struct zone node_zones[MAX_NR_ZONES]; 458 struct zonelist node_zonelists[MAX_NR_ZONES]; 459 int nr_zones; 460#ifdef CONFIG_FLAT_NODE_MEM_MAP 461 struct page *node_mem_map; 462#endif 463 struct bootmem_data *bdata; 464#ifdef CONFIG_MEMORY_HOTPLUG 465 /* 466 * Must be held any time you expect node_start_pfn, node_present_pages 467 * or node_spanned_pages stay constant. Holding this will also 468 * guarantee that any pfn_valid() stays that way. 469 * 470 * Nests above zone->lock and zone->size_seqlock. 471 */ 472 spinlock_t node_size_lock; 473#endif 474 unsigned long node_start_pfn; 475 unsigned long node_present_pages; /* total number of physical pages */ 476 unsigned long node_spanned_pages; /* total size of physical page 477 range, including holes */ 478 int node_id; 479 wait_queue_head_t kswapd_wait; 480 struct task_struct *kswapd; 481 int kswapd_max_order; 482} pg_data_t; 483 484#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 485#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 486#ifdef CONFIG_FLAT_NODE_MEM_MAP 487#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 488#else 489#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 490#endif 491#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 492 493#include <linux/memory_hotplug.h> 494 495void get_zone_counts(unsigned long *active, unsigned long *inactive, 496 unsigned long *free); 497void build_all_zonelists(void); 498void wakeup_kswapd(struct zone *zone, int order); 499int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 500 int classzone_idx, int alloc_flags); 501enum memmap_context { 502 MEMMAP_EARLY, 503 MEMMAP_HOTPLUG, 504}; 505extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 506 unsigned long size, 507 enum memmap_context context); 508 509#ifdef CONFIG_HAVE_MEMORY_PRESENT 510void memory_present(int nid, unsigned long start, unsigned long end); 511#else 512static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 513#endif 514 515#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE 516unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 517#endif 518 519/* 520 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 521 */ 522#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 523 524static inline int populated_zone(struct zone *zone) 525{ 526 return (!!zone->present_pages); 527} 528 529extern int movable_zone; 530 531static inline int zone_movable_is_highmem(void) 532{ 533#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) 534 return movable_zone == ZONE_HIGHMEM; 535#else 536 return 0; 537#endif 538} 539 540static inline int is_highmem_idx(enum zone_type idx) 541{ 542#ifdef CONFIG_HIGHMEM 543 return (idx == ZONE_HIGHMEM || 544 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 545#else 546 return 0; 547#endif 548} 549 550static inline int is_normal_idx(enum zone_type idx) 551{ 552 return (idx == ZONE_NORMAL); 553} 554 555/** 556 * is_highmem - helper function to quickly check if a struct zone is a 557 * highmem zone or not. This is an attempt to keep references 558 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 559 * @zone - pointer to struct zone variable 560 */ 561static inline int is_highmem(struct zone *zone) 562{ 563#ifdef CONFIG_HIGHMEM 564 int zone_idx = zone - zone->zone_pgdat->node_zones; 565 return zone_idx == ZONE_HIGHMEM || 566 (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); 567#else 568 return 0; 569#endif 570} 571 572static inline int is_normal(struct zone *zone) 573{ 574 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; 575} 576 577static inline int is_dma32(struct zone *zone) 578{ 579#ifdef CONFIG_ZONE_DMA32 580 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; 581#else 582 return 0; 583#endif 584} 585 586static inline int is_dma(struct zone *zone) 587{ 588#ifdef CONFIG_ZONE_DMA 589 return zone == zone->zone_pgdat->node_zones + ZONE_DMA; 590#else 591 return 0; 592#endif 593} 594 595/* These two functions are used to setup the per zone pages min values */ 596struct ctl_table; 597struct file; 598int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, 599 void __user *, size_t *, loff_t *); 600extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; 601int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, 602 void __user *, size_t *, loff_t *); 603int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, 604 void __user *, size_t *, loff_t *); 605int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 606 struct file *, void __user *, size_t *, loff_t *); 607int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 608 struct file *, void __user *, size_t *, loff_t *); 609 610extern int numa_zonelist_order_handler(struct ctl_table *, int, 611 struct file *, void __user *, size_t *, loff_t *); 612extern char numa_zonelist_order[]; 613#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ 614 615#include <linux/topology.h> 616/* Returns the number of the current Node. */ 617#ifndef numa_node_id 618#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) 619#endif 620 621#ifndef CONFIG_NEED_MULTIPLE_NODES 622 623extern struct pglist_data contig_page_data; 624#define NODE_DATA(nid) (&contig_page_data) 625#define NODE_MEM_MAP(nid) mem_map 626#define MAX_NODES_SHIFT 1 627 628#else /* CONFIG_NEED_MULTIPLE_NODES */ 629 630#include <asm/mmzone.h> 631 632#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 633 634extern struct pglist_data *first_online_pgdat(void); 635extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 636extern struct zone *next_zone(struct zone *zone); 637 638/** 639 * for_each_pgdat - helper macro to iterate over all nodes 640 * @pgdat - pointer to a pg_data_t variable 641 */ 642#define for_each_online_pgdat(pgdat) \ 643 for (pgdat = first_online_pgdat(); \ 644 pgdat; \ 645 pgdat = next_online_pgdat(pgdat)) 646/** 647 * for_each_zone - helper macro to iterate over all memory zones 648 * @zone - pointer to struct zone variable 649 * 650 * The user only needs to declare the zone variable, for_each_zone 651 * fills it in. 652 */ 653#define for_each_zone(zone) \ 654 for (zone = (first_online_pgdat())->node_zones; \ 655 zone; \ 656 zone = next_zone(zone)) 657 658#ifdef CONFIG_SPARSEMEM 659#include <asm/sparsemem.h> 660#endif 661 662#if BITS_PER_LONG == 32 663/* 664 * with 32 bit page->flags field, we reserve 9 bits for node/zone info. 665 * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. 666 */ 667#define FLAGS_RESERVED 9 668 669#elif BITS_PER_LONG == 64 670/* 671 * with 64 bit flags field, there's plenty of room. 672 */ 673#define FLAGS_RESERVED 32 674 675#else 676 677#error BITS_PER_LONG not defined 678 679#endif 680 681#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 682 !defined(CONFIG_ARCH_POPULATES_NODE_MAP) 683#define early_pfn_to_nid(nid) (0UL) 684#endif 685 686#ifdef CONFIG_FLATMEM 687#define pfn_to_nid(pfn) (0) 688#endif 689 690#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 691#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 692 693#ifdef CONFIG_SPARSEMEM 694 695/* 696 * SECTION_SHIFT #bits space required to store a section # 697 * 698 * PA_SECTION_SHIFT physical address to/from section number 699 * PFN_SECTION_SHIFT pfn to/from section number 700 */ 701#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) 702 703#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 704#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 705 706#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 707 708#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 709#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 710 711#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 712#error Allocator MAX_ORDER exceeds SECTION_SIZE 713#endif 714 715struct page; 716struct mem_section { 717 /* 718 * This is, logically, a pointer to an array of struct 719 * pages. However, it is stored with some other magic. 720 * (see sparse.c::sparse_init_one_section()) 721 * 722 * Additionally during early boot we encode node id of 723 * the location of the section here to guide allocation. 724 * (see sparse.c::memory_present()) 725 * 726 * Making it a UL at least makes someone do a cast 727 * before using it wrong. 728 */ 729 unsigned long section_mem_map; 730}; 731 732#ifdef CONFIG_SPARSEMEM_EXTREME 733#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 734#else 735#define SECTIONS_PER_ROOT 1 736#endif 737 738#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 739#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) 740#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 741 742#ifdef CONFIG_SPARSEMEM_EXTREME 743extern struct mem_section *mem_section[NR_SECTION_ROOTS]; 744#else 745extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 746#endif 747 748static inline struct mem_section *__nr_to_section(unsigned long nr) 749{ 750 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 751 return NULL; 752 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 753} 754extern int __section_nr(struct mem_section* ms); 755 756/* 757 * We use the lower bits of the mem_map pointer to store 758 * a little bit of information. There should be at least 759 * 3 bits here due to 32-bit alignment. 760 */ 761#define SECTION_MARKED_PRESENT (1UL<<0) 762#define SECTION_HAS_MEM_MAP (1UL<<1) 763#define SECTION_MAP_LAST_BIT (1UL<<2) 764#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 765#define SECTION_NID_SHIFT 2 766 767static inline struct page *__section_mem_map_addr(struct mem_section *section) 768{ 769 unsigned long map = section->section_mem_map; 770 map &= SECTION_MAP_MASK; 771 return (struct page *)map; 772} 773 774static inline int valid_section(struct mem_section *section) 775{ 776 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 777} 778 779static inline int section_has_mem_map(struct mem_section *section) 780{ 781 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 782} 783 784static inline int valid_section_nr(unsigned long nr) 785{ 786 return valid_section(__nr_to_section(nr)); 787} 788 789static inline struct mem_section *__pfn_to_section(unsigned long pfn) 790{ 791 return __nr_to_section(pfn_to_section_nr(pfn)); 792} 793 794static inline int pfn_valid(unsigned long pfn) 795{ 796 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 797 return 0; 798 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 799} 800 801/* 802 * These are _only_ used during initialisation, therefore they 803 * can use __initdata ... They could have names to indicate 804 * this restriction. 805 */ 806#ifdef CONFIG_NUMA 807#define pfn_to_nid(pfn) \ 808({ \ 809 unsigned long __pfn_to_nid_pfn = (pfn); \ 810 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 811}) 812#else 813#define pfn_to_nid(pfn) (0) 814#endif 815 816#define early_pfn_valid(pfn) pfn_valid(pfn) 817void sparse_init(void); 818#else 819#define sparse_init() do {} while (0) 820#define sparse_index_init(_sec, _nid) do {} while (0) 821#endif /* CONFIG_SPARSEMEM */ 822 823#ifdef CONFIG_NODES_SPAN_OTHER_NODES 824#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid)) 825#else 826#define early_pfn_in_nid(pfn, nid) (1) 827#endif 828 829#ifndef early_pfn_valid 830#define early_pfn_valid(pfn) (1) 831#endif 832 833void memory_present(int nid, unsigned long start, unsigned long end); 834unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 835 836/* 837 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 838 * need to check pfn validility within that MAX_ORDER_NR_PAGES block. 839 * pfn_valid_within() should be used in this case; we optimise this away 840 * when we have no holes within a MAX_ORDER_NR_PAGES block. 841 */ 842#ifdef CONFIG_HOLES_IN_ZONE 843#define pfn_valid_within(pfn) pfn_valid(pfn) 844#else 845#define pfn_valid_within(pfn) (1) 846#endif 847 848#endif /* !__ASSEMBLY__ */ 849#endif /* __KERNEL__ */ 850#endif /* _LINUX_MMZONE_H */