at v2.6.17 19 kB view raw
1#ifndef _LINUX_MMZONE_H 2#define _LINUX_MMZONE_H 3 4#ifdef __KERNEL__ 5#ifndef __ASSEMBLY__ 6 7#include <linux/config.h> 8#include <linux/spinlock.h> 9#include <linux/list.h> 10#include <linux/wait.h> 11#include <linux/cache.h> 12#include <linux/threads.h> 13#include <linux/numa.h> 14#include <linux/init.h> 15#include <linux/seqlock.h> 16#include <linux/nodemask.h> 17#include <asm/atomic.h> 18#include <asm/page.h> 19 20/* Free memory management - zoned buddy allocator. */ 21#ifndef CONFIG_FORCE_MAX_ZONEORDER 22#define MAX_ORDER 11 23#else 24#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 25#endif 26#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 27 28struct free_area { 29 struct list_head free_list; 30 unsigned long nr_free; 31}; 32 33struct pglist_data; 34 35/* 36 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. 37 * So add a wild amount of padding here to ensure that they fall into separate 38 * cachelines. There are very few zone structures in the machine, so space 39 * consumption is not a concern here. 40 */ 41#if defined(CONFIG_SMP) 42struct zone_padding { 43 char x[0]; 44} ____cacheline_internodealigned_in_smp; 45#define ZONE_PADDING(name) struct zone_padding name; 46#else 47#define ZONE_PADDING(name) 48#endif 49 50struct per_cpu_pages { 51 int count; /* number of pages in the list */ 52 int high; /* high watermark, emptying needed */ 53 int batch; /* chunk size for buddy add/remove */ 54 struct list_head list; /* the list of pages */ 55}; 56 57struct per_cpu_pageset { 58 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 59#ifdef CONFIG_NUMA 60 unsigned long numa_hit; /* allocated in intended node */ 61 unsigned long numa_miss; /* allocated in non intended node */ 62 unsigned long numa_foreign; /* was intended here, hit elsewhere */ 63 unsigned long interleave_hit; /* interleaver prefered this zone */ 64 unsigned long local_node; /* allocation from local node */ 65 unsigned long other_node; /* allocation from other node */ 66#endif 67} ____cacheline_aligned_in_smp; 68 69#ifdef CONFIG_NUMA 70#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) 71#else 72#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) 73#endif 74 75#define ZONE_DMA 0 76#define ZONE_DMA32 1 77#define ZONE_NORMAL 2 78#define ZONE_HIGHMEM 3 79 80#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ 81#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ 82 83 84/* 85 * When a memory allocation must conform to specific limitations (such 86 * as being suitable for DMA) the caller will pass in hints to the 87 * allocator in the gfp_mask, in the zone modifier bits. These bits 88 * are used to select a priority ordered list of memory zones which 89 * match the requested limits. GFP_ZONEMASK defines which bits within 90 * the gfp_mask should be considered as zone modifiers. Each valid 91 * combination of the zone modifier bits has a corresponding list 92 * of zones (in node_zonelists). Thus for two zone modifiers there 93 * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will 94 * be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible 95 * combinations of zone modifiers in "zone modifier space". 96 * 97 * As an optimisation any zone modifier bits which are only valid when 98 * no other zone modifier bits are set (loners) should be placed in 99 * the highest order bits of this field. This allows us to reduce the 100 * extent of the zonelists thus saving space. For example in the case 101 * of three zone modifier bits, we could require up to eight zonelists. 102 * If the left most zone modifier is a "loner" then the highest valid 103 * zonelist would be four allowing us to allocate only five zonelists. 104 * Use the first form for GFP_ZONETYPES when the left most bit is not 105 * a "loner", otherwise use the second. 106 * 107 * NOTE! Make sure this matches the zones in <linux/gfp.h> 108 */ 109#define GFP_ZONEMASK 0x07 110/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ 111#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ 112 113/* 114 * On machines where it is needed (eg PCs) we divide physical memory 115 * into multiple physical zones. On a 32bit PC we have 4 zones: 116 * 117 * ZONE_DMA < 16 MB ISA DMA capable memory 118 * ZONE_DMA32 0 MB Empty 119 * ZONE_NORMAL 16-896 MB direct mapped by the kernel 120 * ZONE_HIGHMEM > 896 MB only page cache and user processes 121 */ 122 123struct zone { 124 /* Fields commonly accessed by the page allocator */ 125 unsigned long free_pages; 126 unsigned long pages_min, pages_low, pages_high; 127 /* 128 * We don't know if the memory that we're going to allocate will be freeable 129 * or/and it will be released eventually, so to avoid totally wasting several 130 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 131 * to run OOM on the lower zones despite there's tons of freeable ram 132 * on the higher zones). This array is recalculated at runtime if the 133 * sysctl_lowmem_reserve_ratio sysctl changes. 134 */ 135 unsigned long lowmem_reserve[MAX_NR_ZONES]; 136 137#ifdef CONFIG_NUMA 138 struct per_cpu_pageset *pageset[NR_CPUS]; 139#else 140 struct per_cpu_pageset pageset[NR_CPUS]; 141#endif 142 /* 143 * free areas of different sizes 144 */ 145 spinlock_t lock; 146#ifdef CONFIG_MEMORY_HOTPLUG 147 /* see spanned/present_pages for more description */ 148 seqlock_t span_seqlock; 149#endif 150 struct free_area free_area[MAX_ORDER]; 151 152 153 ZONE_PADDING(_pad1_) 154 155 /* Fields commonly accessed by the page reclaim scanner */ 156 spinlock_t lru_lock; 157 struct list_head active_list; 158 struct list_head inactive_list; 159 unsigned long nr_scan_active; 160 unsigned long nr_scan_inactive; 161 unsigned long nr_active; 162 unsigned long nr_inactive; 163 unsigned long pages_scanned; /* since last reclaim */ 164 int all_unreclaimable; /* All pages pinned */ 165 166 /* A count of how many reclaimers are scanning this zone */ 167 atomic_t reclaim_in_progress; 168 169 /* 170 * timestamp (in jiffies) of the last zone reclaim that did not 171 * result in freeing of pages. This is used to avoid repeated scans 172 * if all memory in the zone is in use. 173 */ 174 unsigned long last_unsuccessful_zone_reclaim; 175 176 /* 177 * prev_priority holds the scanning priority for this zone. It is 178 * defined as the scanning priority at which we achieved our reclaim 179 * target at the previous try_to_free_pages() or balance_pgdat() 180 * invokation. 181 * 182 * We use prev_priority as a measure of how much stress page reclaim is 183 * under - it drives the swappiness decision: whether to unmap mapped 184 * pages. 185 * 186 * temp_priority is used to remember the scanning priority at which 187 * this zone was successfully refilled to free_pages == pages_high. 188 * 189 * Access to both these fields is quite racy even on uniprocessor. But 190 * it is expected to average out OK. 191 */ 192 int temp_priority; 193 int prev_priority; 194 195 196 ZONE_PADDING(_pad2_) 197 /* Rarely used or read-mostly fields */ 198 199 /* 200 * wait_table -- the array holding the hash table 201 * wait_table_size -- the size of the hash table array 202 * wait_table_bits -- wait_table_size == (1 << wait_table_bits) 203 * 204 * The purpose of all these is to keep track of the people 205 * waiting for a page to become available and make them 206 * runnable again when possible. The trouble is that this 207 * consumes a lot of space, especially when so few things 208 * wait on pages at a given time. So instead of using 209 * per-page waitqueues, we use a waitqueue hash table. 210 * 211 * The bucket discipline is to sleep on the same queue when 212 * colliding and wake all in that wait queue when removing. 213 * When something wakes, it must check to be sure its page is 214 * truly available, a la thundering herd. The cost of a 215 * collision is great, but given the expected load of the 216 * table, they should be so rare as to be outweighed by the 217 * benefits from the saved space. 218 * 219 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the 220 * primary users of these fields, and in mm/page_alloc.c 221 * free_area_init_core() performs the initialization of them. 222 */ 223 wait_queue_head_t * wait_table; 224 unsigned long wait_table_size; 225 unsigned long wait_table_bits; 226 227 /* 228 * Discontig memory support fields. 229 */ 230 struct pglist_data *zone_pgdat; 231 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 232 unsigned long zone_start_pfn; 233 234 /* 235 * zone_start_pfn, spanned_pages and present_pages are all 236 * protected by span_seqlock. It is a seqlock because it has 237 * to be read outside of zone->lock, and it is done in the main 238 * allocator path. But, it is written quite infrequently. 239 * 240 * The lock is declared along with zone->lock because it is 241 * frequently read in proximity to zone->lock. It's good to 242 * give them a chance of being in the same cacheline. 243 */ 244 unsigned long spanned_pages; /* total size, including holes */ 245 unsigned long present_pages; /* amount of memory (excluding holes) */ 246 247 /* 248 * rarely used fields: 249 */ 250 char *name; 251} ____cacheline_internodealigned_in_smp; 252 253 254/* 255 * The "priority" of VM scanning is how much of the queues we will scan in one 256 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 257 * queues ("queue_length >> 12") during an aging round. 258 */ 259#define DEF_PRIORITY 12 260 261/* 262 * One allocation request operates on a zonelist. A zonelist 263 * is a list of zones, the first one is the 'goal' of the 264 * allocation, the other zones are fallback zones, in decreasing 265 * priority. 266 * 267 * Right now a zonelist takes up less than a cacheline. We never 268 * modify it apart from boot-up, and only a few indices are used, 269 * so despite the zonelist table being relatively big, the cache 270 * footprint of this construct is very small. 271 */ 272struct zonelist { 273 struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited 274}; 275 276 277/* 278 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM 279 * (mostly NUMA machines?) to denote a higher-level memory zone than the 280 * zone denotes. 281 * 282 * On NUMA machines, each NUMA node would have a pg_data_t to describe 283 * it's memory layout. 284 * 285 * Memory statistics and page replacement data structures are maintained on a 286 * per-zone basis. 287 */ 288struct bootmem_data; 289typedef struct pglist_data { 290 struct zone node_zones[MAX_NR_ZONES]; 291 struct zonelist node_zonelists[GFP_ZONETYPES]; 292 int nr_zones; 293#ifdef CONFIG_FLAT_NODE_MEM_MAP 294 struct page *node_mem_map; 295#endif 296 struct bootmem_data *bdata; 297#ifdef CONFIG_MEMORY_HOTPLUG 298 /* 299 * Must be held any time you expect node_start_pfn, node_present_pages 300 * or node_spanned_pages stay constant. Holding this will also 301 * guarantee that any pfn_valid() stays that way. 302 * 303 * Nests above zone->lock and zone->size_seqlock. 304 */ 305 spinlock_t node_size_lock; 306#endif 307 unsigned long node_start_pfn; 308 unsigned long node_present_pages; /* total number of physical pages */ 309 unsigned long node_spanned_pages; /* total size of physical page 310 range, including holes */ 311 int node_id; 312 wait_queue_head_t kswapd_wait; 313 struct task_struct *kswapd; 314 int kswapd_max_order; 315} pg_data_t; 316 317#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 318#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 319#ifdef CONFIG_FLAT_NODE_MEM_MAP 320#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 321#else 322#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 323#endif 324#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 325 326#include <linux/memory_hotplug.h> 327 328void __get_zone_counts(unsigned long *active, unsigned long *inactive, 329 unsigned long *free, struct pglist_data *pgdat); 330void get_zone_counts(unsigned long *active, unsigned long *inactive, 331 unsigned long *free); 332void build_all_zonelists(void); 333void wakeup_kswapd(struct zone *zone, int order); 334int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 335 int classzone_idx, int alloc_flags); 336 337#ifdef CONFIG_HAVE_MEMORY_PRESENT 338void memory_present(int nid, unsigned long start, unsigned long end); 339#else 340static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 341#endif 342 343#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE 344unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 345#endif 346 347/* 348 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 349 */ 350#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 351 352static inline int populated_zone(struct zone *zone) 353{ 354 return (!!zone->present_pages); 355} 356 357static inline int is_highmem_idx(int idx) 358{ 359 return (idx == ZONE_HIGHMEM); 360} 361 362static inline int is_normal_idx(int idx) 363{ 364 return (idx == ZONE_NORMAL); 365} 366 367/** 368 * is_highmem - helper function to quickly check if a struct zone is a 369 * highmem zone or not. This is an attempt to keep references 370 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 371 * @zone - pointer to struct zone variable 372 */ 373static inline int is_highmem(struct zone *zone) 374{ 375 return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; 376} 377 378static inline int is_normal(struct zone *zone) 379{ 380 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; 381} 382 383static inline int is_dma32(struct zone *zone) 384{ 385 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; 386} 387 388static inline int is_dma(struct zone *zone) 389{ 390 return zone == zone->zone_pgdat->node_zones + ZONE_DMA; 391} 392 393/* These two functions are used to setup the per zone pages min values */ 394struct ctl_table; 395struct file; 396int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, 397 void __user *, size_t *, loff_t *); 398extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; 399int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, 400 void __user *, size_t *, loff_t *); 401int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, 402 void __user *, size_t *, loff_t *); 403 404#include <linux/topology.h> 405/* Returns the number of the current Node. */ 406#ifndef numa_node_id 407#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) 408#endif 409 410#ifndef CONFIG_NEED_MULTIPLE_NODES 411 412extern struct pglist_data contig_page_data; 413#define NODE_DATA(nid) (&contig_page_data) 414#define NODE_MEM_MAP(nid) mem_map 415#define MAX_NODES_SHIFT 1 416 417#else /* CONFIG_NEED_MULTIPLE_NODES */ 418 419#include <asm/mmzone.h> 420 421#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 422 423extern struct pglist_data *first_online_pgdat(void); 424extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 425extern struct zone *next_zone(struct zone *zone); 426 427/** 428 * for_each_pgdat - helper macro to iterate over all nodes 429 * @pgdat - pointer to a pg_data_t variable 430 */ 431#define for_each_online_pgdat(pgdat) \ 432 for (pgdat = first_online_pgdat(); \ 433 pgdat; \ 434 pgdat = next_online_pgdat(pgdat)) 435/** 436 * for_each_zone - helper macro to iterate over all memory zones 437 * @zone - pointer to struct zone variable 438 * 439 * The user only needs to declare the zone variable, for_each_zone 440 * fills it in. 441 */ 442#define for_each_zone(zone) \ 443 for (zone = (first_online_pgdat())->node_zones; \ 444 zone; \ 445 zone = next_zone(zone)) 446 447#ifdef CONFIG_SPARSEMEM 448#include <asm/sparsemem.h> 449#endif 450 451#if BITS_PER_LONG == 32 452/* 453 * with 32 bit page->flags field, we reserve 9 bits for node/zone info. 454 * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. 455 */ 456#define FLAGS_RESERVED 9 457 458#elif BITS_PER_LONG == 64 459/* 460 * with 64 bit flags field, there's plenty of room. 461 */ 462#define FLAGS_RESERVED 32 463 464#else 465 466#error BITS_PER_LONG not defined 467 468#endif 469 470#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 471#define early_pfn_to_nid(nid) (0UL) 472#endif 473 474#ifdef CONFIG_FLATMEM 475#define pfn_to_nid(pfn) (0) 476#endif 477 478#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 479#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 480 481#ifdef CONFIG_SPARSEMEM 482 483/* 484 * SECTION_SHIFT #bits space required to store a section # 485 * 486 * PA_SECTION_SHIFT physical address to/from section number 487 * PFN_SECTION_SHIFT pfn to/from section number 488 */ 489#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) 490 491#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 492#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 493 494#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 495 496#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 497#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 498 499#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 500#error Allocator MAX_ORDER exceeds SECTION_SIZE 501#endif 502 503struct page; 504struct mem_section { 505 /* 506 * This is, logically, a pointer to an array of struct 507 * pages. However, it is stored with some other magic. 508 * (see sparse.c::sparse_init_one_section()) 509 * 510 * Making it a UL at least makes someone do a cast 511 * before using it wrong. 512 */ 513 unsigned long section_mem_map; 514}; 515 516#ifdef CONFIG_SPARSEMEM_EXTREME 517#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 518#else 519#define SECTIONS_PER_ROOT 1 520#endif 521 522#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 523#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) 524#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 525 526#ifdef CONFIG_SPARSEMEM_EXTREME 527extern struct mem_section *mem_section[NR_SECTION_ROOTS]; 528#else 529extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 530#endif 531 532static inline struct mem_section *__nr_to_section(unsigned long nr) 533{ 534 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 535 return NULL; 536 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 537} 538extern int __section_nr(struct mem_section* ms); 539 540/* 541 * We use the lower bits of the mem_map pointer to store 542 * a little bit of information. There should be at least 543 * 3 bits here due to 32-bit alignment. 544 */ 545#define SECTION_MARKED_PRESENT (1UL<<0) 546#define SECTION_HAS_MEM_MAP (1UL<<1) 547#define SECTION_MAP_LAST_BIT (1UL<<2) 548#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 549 550static inline struct page *__section_mem_map_addr(struct mem_section *section) 551{ 552 unsigned long map = section->section_mem_map; 553 map &= SECTION_MAP_MASK; 554 return (struct page *)map; 555} 556 557static inline int valid_section(struct mem_section *section) 558{ 559 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 560} 561 562static inline int section_has_mem_map(struct mem_section *section) 563{ 564 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 565} 566 567static inline int valid_section_nr(unsigned long nr) 568{ 569 return valid_section(__nr_to_section(nr)); 570} 571 572static inline struct mem_section *__pfn_to_section(unsigned long pfn) 573{ 574 return __nr_to_section(pfn_to_section_nr(pfn)); 575} 576 577static inline int pfn_valid(unsigned long pfn) 578{ 579 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 580 return 0; 581 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 582} 583 584/* 585 * These are _only_ used during initialisation, therefore they 586 * can use __initdata ... They could have names to indicate 587 * this restriction. 588 */ 589#ifdef CONFIG_NUMA 590#define pfn_to_nid(pfn) \ 591({ \ 592 unsigned long __pfn_to_nid_pfn = (pfn); \ 593 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 594}) 595#else 596#define pfn_to_nid(pfn) (0) 597#endif 598 599#define early_pfn_valid(pfn) pfn_valid(pfn) 600void sparse_init(void); 601#else 602#define sparse_init() do {} while (0) 603#define sparse_index_init(_sec, _nid) do {} while (0) 604#endif /* CONFIG_SPARSEMEM */ 605 606#ifndef early_pfn_valid 607#define early_pfn_valid(pfn) (1) 608#endif 609 610void memory_present(int nid, unsigned long start, unsigned long end); 611unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 612 613#endif /* !__ASSEMBLY__ */ 614#endif /* __KERNEL__ */ 615#endif /* _LINUX_MMZONE_H */