Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at 33bc227e4e48ddadcf2eacb381c19df338f0a6c8 2621 lines 66 kB view raw
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/config.h> 18#include <linux/stddef.h> 19#include <linux/mm.h> 20#include <linux/swap.h> 21#include <linux/interrupt.h> 22#include <linux/pagemap.h> 23#include <linux/bootmem.h> 24#include <linux/compiler.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/suspend.h> 28#include <linux/pagevec.h> 29#include <linux/blkdev.h> 30#include <linux/slab.h> 31#include <linux/notifier.h> 32#include <linux/topology.h> 33#include <linux/sysctl.h> 34#include <linux/cpu.h> 35#include <linux/cpuset.h> 36#include <linux/memory_hotplug.h> 37#include <linux/nodemask.h> 38#include <linux/vmalloc.h> 39 40#include <asm/tlbflush.h> 41#include "internal.h" 42 43/* 44 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 45 * initializer cleaner 46 */ 47nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 48EXPORT_SYMBOL(node_online_map); 49nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 50EXPORT_SYMBOL(node_possible_map); 51struct pglist_data *pgdat_list __read_mostly; 52unsigned long totalram_pages __read_mostly; 53unsigned long totalhigh_pages __read_mostly; 54long nr_swap_pages; 55 56/* 57 * results with 256, 32 in the lowmem_reserve sysctl: 58 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 59 * 1G machine -> (16M dma, 784M normal, 224M high) 60 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 61 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 62 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 63 * 64 * TBD: should special case ZONE_DMA32 machines here - in those we normally 65 * don't need any ZONE_NORMAL reservation 66 */ 67int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; 68 69EXPORT_SYMBOL(totalram_pages); 70 71/* 72 * Used by page_zone() to look up the address of the struct zone whose 73 * id is encoded in the upper bits of page->flags 74 */ 75struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 76EXPORT_SYMBOL(zone_table); 77 78static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; 79int min_free_kbytes = 1024; 80 81unsigned long __initdata nr_kernel_pages; 82unsigned long __initdata nr_all_pages; 83 84static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 85{ 86 int ret = 0; 87 unsigned seq; 88 unsigned long pfn = page_to_pfn(page); 89 90 do { 91 seq = zone_span_seqbegin(zone); 92 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 93 ret = 1; 94 else if (pfn < zone->zone_start_pfn) 95 ret = 1; 96 } while (zone_span_seqretry(zone, seq)); 97 98 return ret; 99} 100 101static int page_is_consistent(struct zone *zone, struct page *page) 102{ 103#ifdef CONFIG_HOLES_IN_ZONE 104 if (!pfn_valid(page_to_pfn(page))) 105 return 0; 106#endif 107 if (zone != page_zone(page)) 108 return 0; 109 110 return 1; 111} 112/* 113 * Temporary debugging check for pages not lying within a given zone. 114 */ 115static int bad_range(struct zone *zone, struct page *page) 116{ 117 if (page_outside_zone_boundaries(zone, page)) 118 return 1; 119 if (!page_is_consistent(zone, page)) 120 return 1; 121 122 return 0; 123} 124 125static void bad_page(const char *function, struct page *page) 126{ 127 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", 128 function, current->comm, page); 129 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 130 (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, 131 page->mapping, page_mapcount(page), page_count(page)); 132 printk(KERN_EMERG "Backtrace:\n"); 133 dump_stack(); 134 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); 135 page->flags &= ~(1 << PG_lru | 136 1 << PG_private | 137 1 << PG_locked | 138 1 << PG_active | 139 1 << PG_dirty | 140 1 << PG_reclaim | 141 1 << PG_slab | 142 1 << PG_swapcache | 143 1 << PG_writeback ); 144 set_page_count(page, 0); 145 reset_page_mapcount(page); 146 page->mapping = NULL; 147 add_taint(TAINT_BAD_PAGE); 148} 149 150/* 151 * Higher-order pages are called "compound pages". They are structured thusly: 152 * 153 * The first PAGE_SIZE page is called the "head page". 154 * 155 * The remaining PAGE_SIZE pages are called "tail pages". 156 * 157 * All pages have PG_compound set. All pages have their ->private pointing at 158 * the head page (even the head page has this). 159 * 160 * The first tail page's ->mapping, if non-zero, holds the address of the 161 * compound page's put_page() function. 162 * 163 * The order of the allocation is stored in the first tail page's ->index 164 * This is only for debug at present. This usage means that zero-order pages 165 * may not be compound. 166 */ 167static void prep_compound_page(struct page *page, unsigned long order) 168{ 169 int i; 170 int nr_pages = 1 << order; 171 172 page[1].mapping = NULL; 173 page[1].index = order; 174 for (i = 0; i < nr_pages; i++) { 175 struct page *p = page + i; 176 177 SetPageCompound(p); 178 set_page_private(p, (unsigned long)page); 179 } 180} 181 182static void destroy_compound_page(struct page *page, unsigned long order) 183{ 184 int i; 185 int nr_pages = 1 << order; 186 187 if (!PageCompound(page)) 188 return; 189 190 if (page[1].index != order) 191 bad_page(__FUNCTION__, page); 192 193 for (i = 0; i < nr_pages; i++) { 194 struct page *p = page + i; 195 196 if (!PageCompound(p)) 197 bad_page(__FUNCTION__, page); 198 if (page_private(p) != (unsigned long)page) 199 bad_page(__FUNCTION__, page); 200 ClearPageCompound(p); 201 } 202} 203 204/* 205 * function for dealing with page's order in buddy system. 206 * zone->lock is already acquired when we use these. 207 * So, we don't need atomic page->flags operations here. 208 */ 209static inline unsigned long page_order(struct page *page) { 210 return page_private(page); 211} 212 213static inline void set_page_order(struct page *page, int order) { 214 set_page_private(page, order); 215 __SetPagePrivate(page); 216} 217 218static inline void rmv_page_order(struct page *page) 219{ 220 __ClearPagePrivate(page); 221 set_page_private(page, 0); 222} 223 224/* 225 * Locate the struct page for both the matching buddy in our 226 * pair (buddy1) and the combined O(n+1) page they form (page). 227 * 228 * 1) Any buddy B1 will have an order O twin B2 which satisfies 229 * the following equation: 230 * B2 = B1 ^ (1 << O) 231 * For example, if the starting buddy (buddy2) is #8 its order 232 * 1 buddy is #10: 233 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 234 * 235 * 2) Any buddy B will have an order O+1 parent P which 236 * satisfies the following equation: 237 * P = B & ~(1 << O) 238 * 239 * Assumption: *_mem_map is contigious at least up to MAX_ORDER 240 */ 241static inline struct page * 242__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 243{ 244 unsigned long buddy_idx = page_idx ^ (1 << order); 245 246 return page + (buddy_idx - page_idx); 247} 248 249static inline unsigned long 250__find_combined_index(unsigned long page_idx, unsigned int order) 251{ 252 return (page_idx & ~(1 << order)); 253} 254 255/* 256 * This function checks whether a page is free && is the buddy 257 * we can do coalesce a page and its buddy if 258 * (a) the buddy is free && 259 * (b) the buddy is on the buddy system && 260 * (c) a page and its buddy have the same order. 261 * for recording page's order, we use page_private(page) and PG_private. 262 * 263 */ 264static inline int page_is_buddy(struct page *page, int order) 265{ 266 if (PagePrivate(page) && 267 (page_order(page) == order) && 268 page_count(page) == 0) 269 return 1; 270 return 0; 271} 272 273/* 274 * Freeing function for a buddy system allocator. 275 * 276 * The concept of a buddy system is to maintain direct-mapped table 277 * (containing bit values) for memory blocks of various "orders". 278 * The bottom level table contains the map for the smallest allocatable 279 * units of memory (here, pages), and each level above it describes 280 * pairs of units from the levels below, hence, "buddies". 281 * At a high level, all that happens here is marking the table entry 282 * at the bottom level available, and propagating the changes upward 283 * as necessary, plus some accounting needed to play nicely with other 284 * parts of the VM system. 285 * At each level, we keep a list of pages, which are heads of continuous 286 * free pages of length of (1 << order) and marked with PG_Private.Page's 287 * order is recorded in page_private(page) field. 288 * So when we are allocating or freeing one, we can derive the state of the 289 * other. That is, if we allocate a small block, and both were 290 * free, the remainder of the region must be split into blocks. 291 * If a block is freed, and its buddy is also free, then this 292 * triggers coalescing into a block of larger size. 293 * 294 * -- wli 295 */ 296 297static inline void __free_pages_bulk (struct page *page, 298 struct zone *zone, unsigned int order) 299{ 300 unsigned long page_idx; 301 int order_size = 1 << order; 302 303 if (unlikely(order)) 304 destroy_compound_page(page, order); 305 306 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 307 308 BUG_ON(page_idx & (order_size - 1)); 309 BUG_ON(bad_range(zone, page)); 310 311 zone->free_pages += order_size; 312 while (order < MAX_ORDER-1) { 313 unsigned long combined_idx; 314 struct free_area *area; 315 struct page *buddy; 316 317 combined_idx = __find_combined_index(page_idx, order); 318 buddy = __page_find_buddy(page, page_idx, order); 319 320 if (bad_range(zone, buddy)) 321 break; 322 if (!page_is_buddy(buddy, order)) 323 break; /* Move the buddy up one level. */ 324 list_del(&buddy->lru); 325 area = zone->free_area + order; 326 area->nr_free--; 327 rmv_page_order(buddy); 328 page = page + (combined_idx - page_idx); 329 page_idx = combined_idx; 330 order++; 331 } 332 set_page_order(page, order); 333 list_add(&page->lru, &zone->free_area[order].free_list); 334 zone->free_area[order].nr_free++; 335} 336 337static inline int free_pages_check(const char *function, struct page *page) 338{ 339 if ( page_mapcount(page) || 340 page->mapping != NULL || 341 page_count(page) != 0 || 342 (page->flags & ( 343 1 << PG_lru | 344 1 << PG_private | 345 1 << PG_locked | 346 1 << PG_active | 347 1 << PG_reclaim | 348 1 << PG_slab | 349 1 << PG_swapcache | 350 1 << PG_writeback | 351 1 << PG_reserved ))) 352 bad_page(function, page); 353 if (PageDirty(page)) 354 __ClearPageDirty(page); 355 /* 356 * For now, we report if PG_reserved was found set, but do not 357 * clear it, and do not free the page. But we shall soon need 358 * to do more, for when the ZERO_PAGE count wraps negative. 359 */ 360 return PageReserved(page); 361} 362 363/* 364 * Frees a list of pages. 365 * Assumes all pages on list are in same zone, and of same order. 366 * count is the number of pages to free. 367 * 368 * If the zone was previously in an "all pages pinned" state then look to 369 * see if this freeing clears that state. 370 * 371 * And clear the zone's pages_scanned counter, to hold off the "all pages are 372 * pinned" detection logic. 373 */ 374static int 375free_pages_bulk(struct zone *zone, int count, 376 struct list_head *list, unsigned int order) 377{ 378 unsigned long flags; 379 struct page *page = NULL; 380 int ret = 0; 381 382 spin_lock_irqsave(&zone->lock, flags); 383 zone->all_unreclaimable = 0; 384 zone->pages_scanned = 0; 385 while (!list_empty(list) && count--) { 386 page = list_entry(list->prev, struct page, lru); 387 /* have to delete it as __free_pages_bulk list manipulates */ 388 list_del(&page->lru); 389 __free_pages_bulk(page, zone, order); 390 ret++; 391 } 392 spin_unlock_irqrestore(&zone->lock, flags); 393 return ret; 394} 395 396void __free_pages_ok(struct page *page, unsigned int order) 397{ 398 LIST_HEAD(list); 399 int i; 400 int reserved = 0; 401 402 arch_free_page(page, order); 403 404#ifndef CONFIG_MMU 405 if (order > 0) 406 for (i = 1 ; i < (1 << order) ; ++i) 407 __put_page(page + i); 408#endif 409 410 for (i = 0 ; i < (1 << order) ; ++i) 411 reserved += free_pages_check(__FUNCTION__, page + i); 412 if (reserved) 413 return; 414 415 list_add(&page->lru, &list); 416 mod_page_state(pgfree, 1 << order); 417 kernel_map_pages(page, 1<<order, 0); 418 free_pages_bulk(page_zone(page), 1, &list, order); 419} 420 421 422/* 423 * The order of subdivision here is critical for the IO subsystem. 424 * Please do not alter this order without good reasons and regression 425 * testing. Specifically, as large blocks of memory are subdivided, 426 * the order in which smaller blocks are delivered depends on the order 427 * they're subdivided in this function. This is the primary factor 428 * influencing the order in which pages are delivered to the IO 429 * subsystem according to empirical testing, and this is also justified 430 * by considering the behavior of a buddy system containing a single 431 * large block of memory acted on by a series of small allocations. 432 * This behavior is a critical factor in sglist merging's success. 433 * 434 * -- wli 435 */ 436static inline struct page * 437expand(struct zone *zone, struct page *page, 438 int low, int high, struct free_area *area) 439{ 440 unsigned long size = 1 << high; 441 442 while (high > low) { 443 area--; 444 high--; 445 size >>= 1; 446 BUG_ON(bad_range(zone, &page[size])); 447 list_add(&page[size].lru, &area->free_list); 448 area->nr_free++; 449 set_page_order(&page[size], high); 450 } 451 return page; 452} 453 454void set_page_refs(struct page *page, int order) 455{ 456#ifdef CONFIG_MMU 457 set_page_count(page, 1); 458#else 459 int i; 460 461 /* 462 * We need to reference all the pages for this order, otherwise if 463 * anyone accesses one of the pages with (get/put) it will be freed. 464 * - eg: access_process_vm() 465 */ 466 for (i = 0; i < (1 << order); i++) 467 set_page_count(page + i, 1); 468#endif /* CONFIG_MMU */ 469} 470 471/* 472 * This page is about to be returned from the page allocator 473 */ 474static int prep_new_page(struct page *page, int order) 475{ 476 if ( page_mapcount(page) || 477 page->mapping != NULL || 478 page_count(page) != 0 || 479 (page->flags & ( 480 1 << PG_lru | 481 1 << PG_private | 482 1 << PG_locked | 483 1 << PG_active | 484 1 << PG_dirty | 485 1 << PG_reclaim | 486 1 << PG_slab | 487 1 << PG_swapcache | 488 1 << PG_writeback | 489 1 << PG_reserved ))) 490 bad_page(__FUNCTION__, page); 491 492 /* 493 * For now, we report if PG_reserved was found set, but do not 494 * clear it, and do not allocate the page: as a safety net. 495 */ 496 if (PageReserved(page)) 497 return 1; 498 499 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 500 1 << PG_referenced | 1 << PG_arch_1 | 501 1 << PG_checked | 1 << PG_mappedtodisk); 502 set_page_private(page, 0); 503 set_page_refs(page, order); 504 kernel_map_pages(page, 1 << order, 1); 505 return 0; 506} 507 508/* 509 * Do the hard work of removing an element from the buddy allocator. 510 * Call me with the zone->lock already held. 511 */ 512static struct page *__rmqueue(struct zone *zone, unsigned int order) 513{ 514 struct free_area * area; 515 unsigned int current_order; 516 struct page *page; 517 518 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 519 area = zone->free_area + current_order; 520 if (list_empty(&area->free_list)) 521 continue; 522 523 page = list_entry(area->free_list.next, struct page, lru); 524 list_del(&page->lru); 525 rmv_page_order(page); 526 area->nr_free--; 527 zone->free_pages -= 1UL << order; 528 return expand(zone, page, order, current_order, area); 529 } 530 531 return NULL; 532} 533 534/* 535 * Obtain a specified number of elements from the buddy allocator, all under 536 * a single hold of the lock, for efficiency. Add them to the supplied list. 537 * Returns the number of new pages which were placed at *list. 538 */ 539static int rmqueue_bulk(struct zone *zone, unsigned int order, 540 unsigned long count, struct list_head *list) 541{ 542 unsigned long flags; 543 int i; 544 int allocated = 0; 545 struct page *page; 546 547 spin_lock_irqsave(&zone->lock, flags); 548 for (i = 0; i < count; ++i) { 549 page = __rmqueue(zone, order); 550 if (page == NULL) 551 break; 552 allocated++; 553 list_add_tail(&page->lru, list); 554 } 555 spin_unlock_irqrestore(&zone->lock, flags); 556 return allocated; 557} 558 559#ifdef CONFIG_NUMA 560/* Called from the slab reaper to drain remote pagesets */ 561void drain_remote_pages(void) 562{ 563 struct zone *zone; 564 int i; 565 unsigned long flags; 566 567 local_irq_save(flags); 568 for_each_zone(zone) { 569 struct per_cpu_pageset *pset; 570 571 /* Do not drain local pagesets */ 572 if (zone->zone_pgdat->node_id == numa_node_id()) 573 continue; 574 575 pset = zone->pageset[smp_processor_id()]; 576 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 577 struct per_cpu_pages *pcp; 578 579 pcp = &pset->pcp[i]; 580 if (pcp->count) 581 pcp->count -= free_pages_bulk(zone, pcp->count, 582 &pcp->list, 0); 583 } 584 } 585 local_irq_restore(flags); 586} 587#endif 588 589#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 590static void __drain_pages(unsigned int cpu) 591{ 592 struct zone *zone; 593 int i; 594 595 for_each_zone(zone) { 596 struct per_cpu_pageset *pset; 597 598 pset = zone_pcp(zone, cpu); 599 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 600 struct per_cpu_pages *pcp; 601 602 pcp = &pset->pcp[i]; 603 pcp->count -= free_pages_bulk(zone, pcp->count, 604 &pcp->list, 0); 605 } 606 } 607} 608#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ 609 610#ifdef CONFIG_PM 611 612void mark_free_pages(struct zone *zone) 613{ 614 unsigned long zone_pfn, flags; 615 int order; 616 struct list_head *curr; 617 618 if (!zone->spanned_pages) 619 return; 620 621 spin_lock_irqsave(&zone->lock, flags); 622 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 623 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); 624 625 for (order = MAX_ORDER - 1; order >= 0; --order) 626 list_for_each(curr, &zone->free_area[order].free_list) { 627 unsigned long start_pfn, i; 628 629 start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); 630 631 for (i=0; i < (1<<order); i++) 632 SetPageNosaveFree(pfn_to_page(start_pfn+i)); 633 } 634 spin_unlock_irqrestore(&zone->lock, flags); 635} 636 637/* 638 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 639 */ 640void drain_local_pages(void) 641{ 642 unsigned long flags; 643 644 local_irq_save(flags); 645 __drain_pages(smp_processor_id()); 646 local_irq_restore(flags); 647} 648#endif /* CONFIG_PM */ 649 650static void zone_statistics(struct zonelist *zonelist, struct zone *z) 651{ 652#ifdef CONFIG_NUMA 653 unsigned long flags; 654 int cpu; 655 pg_data_t *pg = z->zone_pgdat; 656 pg_data_t *orig = zonelist->zones[0]->zone_pgdat; 657 struct per_cpu_pageset *p; 658 659 local_irq_save(flags); 660 cpu = smp_processor_id(); 661 p = zone_pcp(z,cpu); 662 if (pg == orig) { 663 p->numa_hit++; 664 } else { 665 p->numa_miss++; 666 zone_pcp(zonelist->zones[0], cpu)->numa_foreign++; 667 } 668 if (pg == NODE_DATA(numa_node_id())) 669 p->local_node++; 670 else 671 p->other_node++; 672 local_irq_restore(flags); 673#endif 674} 675 676/* 677 * Free a 0-order page 678 */ 679static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); 680static void fastcall free_hot_cold_page(struct page *page, int cold) 681{ 682 struct zone *zone = page_zone(page); 683 struct per_cpu_pages *pcp; 684 unsigned long flags; 685 686 arch_free_page(page, 0); 687 688 if (PageAnon(page)) 689 page->mapping = NULL; 690 if (free_pages_check(__FUNCTION__, page)) 691 return; 692 693 inc_page_state(pgfree); 694 kernel_map_pages(page, 1, 0); 695 696 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 697 local_irq_save(flags); 698 list_add(&page->lru, &pcp->list); 699 pcp->count++; 700 if (pcp->count >= pcp->high) 701 pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 702 local_irq_restore(flags); 703 put_cpu(); 704} 705 706void fastcall free_hot_page(struct page *page) 707{ 708 free_hot_cold_page(page, 0); 709} 710 711void fastcall free_cold_page(struct page *page) 712{ 713 free_hot_cold_page(page, 1); 714} 715 716static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 717{ 718 int i; 719 720 BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 721 for(i = 0; i < (1 << order); i++) 722 clear_highpage(page + i); 723} 724 725/* 726 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 727 * we cheat by calling it from here, in the order > 0 path. Saves a branch 728 * or two. 729 */ 730static struct page * 731buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 732{ 733 unsigned long flags; 734 struct page *page; 735 int cold = !!(gfp_flags & __GFP_COLD); 736 737again: 738 if (order == 0) { 739 struct per_cpu_pages *pcp; 740 741 page = NULL; 742 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 743 local_irq_save(flags); 744 if (pcp->count <= pcp->low) 745 pcp->count += rmqueue_bulk(zone, 0, 746 pcp->batch, &pcp->list); 747 if (pcp->count) { 748 page = list_entry(pcp->list.next, struct page, lru); 749 list_del(&page->lru); 750 pcp->count--; 751 } 752 local_irq_restore(flags); 753 put_cpu(); 754 } else { 755 spin_lock_irqsave(&zone->lock, flags); 756 page = __rmqueue(zone, order); 757 spin_unlock_irqrestore(&zone->lock, flags); 758 } 759 760 if (page != NULL) { 761 BUG_ON(bad_range(zone, page)); 762 mod_page_state_zone(zone, pgalloc, 1 << order); 763 if (prep_new_page(page, order)) 764 goto again; 765 766 if (gfp_flags & __GFP_ZERO) 767 prep_zero_page(page, order, gfp_flags); 768 769 if (order && (gfp_flags & __GFP_COMP)) 770 prep_compound_page(page, order); 771 } 772 return page; 773} 774 775#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 776#define ALLOC_HARDER 0x02 /* try to alloc harder */ 777#define ALLOC_HIGH 0x04 /* __GFP_HIGH set */ 778#define ALLOC_CPUSET 0x08 /* check for correct cpuset */ 779 780/* 781 * Return 1 if free pages are above 'mark'. This takes into account the order 782 * of the allocation. 783 */ 784int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 785 int classzone_idx, int alloc_flags) 786{ 787 /* free_pages my go negative - that's OK */ 788 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 789 int o; 790 791 if (alloc_flags & ALLOC_HIGH) 792 min -= min / 2; 793 if (alloc_flags & ALLOC_HARDER) 794 min -= min / 4; 795 796 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 797 return 0; 798 for (o = 0; o < order; o++) { 799 /* At the next order, this order's pages become unavailable */ 800 free_pages -= z->free_area[o].nr_free << o; 801 802 /* Require fewer higher order pages to be free */ 803 min >>= 1; 804 805 if (free_pages <= min) 806 return 0; 807 } 808 return 1; 809} 810 811/* 812 * get_page_from_freeliest goes through the zonelist trying to allocate 813 * a page. 814 */ 815static struct page * 816get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 817 struct zonelist *zonelist, int alloc_flags) 818{ 819 struct zone **z = zonelist->zones; 820 struct page *page = NULL; 821 int classzone_idx = zone_idx(*z); 822 823 /* 824 * Go through the zonelist once, looking for a zone with enough free. 825 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 826 */ 827 do { 828 if ((alloc_flags & ALLOC_CPUSET) && 829 !cpuset_zone_allowed(*z, gfp_mask)) 830 continue; 831 832 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 833 if (!zone_watermark_ok(*z, order, (*z)->pages_low, 834 classzone_idx, alloc_flags)) 835 continue; 836 } 837 838 page = buffered_rmqueue(*z, order, gfp_mask); 839 if (page) { 840 zone_statistics(zonelist, *z); 841 break; 842 } 843 } while (*(++z) != NULL); 844 return page; 845} 846 847/* 848 * This is the 'heart' of the zoned buddy allocator. 849 */ 850struct page * fastcall 851__alloc_pages(gfp_t gfp_mask, unsigned int order, 852 struct zonelist *zonelist) 853{ 854 const gfp_t wait = gfp_mask & __GFP_WAIT; 855 struct zone **z; 856 struct page *page; 857 struct reclaim_state reclaim_state; 858 struct task_struct *p = current; 859 int do_retry; 860 int alloc_flags; 861 int did_some_progress; 862 863 might_sleep_if(wait); 864 865restart: 866 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 867 868 if (unlikely(*z == NULL)) { 869 /* Should this ever happen?? */ 870 return NULL; 871 } 872 873 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 874 zonelist, ALLOC_CPUSET); 875 if (page) 876 goto got_pg; 877 878 do { 879 wakeup_kswapd(*z, order); 880 } while (*(++z)); 881 882 /* 883 * OK, we're below the kswapd watermark and have kicked background 884 * reclaim. Now things get more complex, so set up alloc_flags according 885 * to how we want to proceed. 886 * 887 * The caller may dip into page reserves a bit more if the caller 888 * cannot run direct reclaim, or if the caller has realtime scheduling 889 * policy. 890 */ 891 alloc_flags = 0; 892 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 893 alloc_flags |= ALLOC_HARDER; 894 if (gfp_mask & __GFP_HIGH) 895 alloc_flags |= ALLOC_HIGH; 896 if (wait) 897 alloc_flags |= ALLOC_CPUSET; 898 899 /* 900 * Go through the zonelist again. Let __GFP_HIGH and allocations 901 * coming from realtime tasks go deeper into reserves. 902 * 903 * This is the last chance, in general, before the goto nopage. 904 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 905 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 906 */ 907 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 908 if (page) 909 goto got_pg; 910 911 /* This allocation should allow future memory freeing. */ 912 913 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 914 && !in_interrupt()) { 915 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 916nofail_alloc: 917 /* go through the zonelist yet again, ignoring mins */ 918 page = get_page_from_freelist(gfp_mask, order, 919 zonelist, ALLOC_NO_WATERMARKS|ALLOC_CPUSET); 920 if (page) 921 goto got_pg; 922 if (gfp_mask & __GFP_NOFAIL) { 923 blk_congestion_wait(WRITE, HZ/50); 924 goto nofail_alloc; 925 } 926 } 927 goto nopage; 928 } 929 930 /* Atomic allocations - we can't balance anything */ 931 if (!wait) 932 goto nopage; 933 934rebalance: 935 cond_resched(); 936 937 /* We now go into synchronous reclaim */ 938 p->flags |= PF_MEMALLOC; 939 reclaim_state.reclaimed_slab = 0; 940 p->reclaim_state = &reclaim_state; 941 942 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 943 944 p->reclaim_state = NULL; 945 p->flags &= ~PF_MEMALLOC; 946 947 cond_resched(); 948 949 if (likely(did_some_progress)) { 950 page = get_page_from_freelist(gfp_mask, order, 951 zonelist, alloc_flags); 952 if (page) 953 goto got_pg; 954 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 955 /* 956 * Go through the zonelist yet one more time, keep 957 * very high watermark here, this is only to catch 958 * a parallel oom killing, we must fail if we're still 959 * under heavy pressure. 960 */ 961 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 962 zonelist, ALLOC_CPUSET); 963 if (page) 964 goto got_pg; 965 966 out_of_memory(gfp_mask, order); 967 goto restart; 968 } 969 970 /* 971 * Don't let big-order allocations loop unless the caller explicitly 972 * requests that. Wait for some write requests to complete then retry. 973 * 974 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 975 * <= 3, but that may not be true in other implementations. 976 */ 977 do_retry = 0; 978 if (!(gfp_mask & __GFP_NORETRY)) { 979 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 980 do_retry = 1; 981 if (gfp_mask & __GFP_NOFAIL) 982 do_retry = 1; 983 } 984 if (do_retry) { 985 blk_congestion_wait(WRITE, HZ/50); 986 goto rebalance; 987 } 988 989nopage: 990 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 991 printk(KERN_WARNING "%s: page allocation failure." 992 " order:%d, mode:0x%x\n", 993 p->comm, order, gfp_mask); 994 dump_stack(); 995 show_mem(); 996 } 997got_pg: 998 return page; 999} 1000 1001EXPORT_SYMBOL(__alloc_pages); 1002 1003/* 1004 * Common helper functions. 1005 */ 1006fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1007{ 1008 struct page * page; 1009 page = alloc_pages(gfp_mask, order); 1010 if (!page) 1011 return 0; 1012 return (unsigned long) page_address(page); 1013} 1014 1015EXPORT_SYMBOL(__get_free_pages); 1016 1017fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1018{ 1019 struct page * page; 1020 1021 /* 1022 * get_zeroed_page() returns a 32-bit address, which cannot represent 1023 * a highmem page 1024 */ 1025 BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1026 1027 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1028 if (page) 1029 return (unsigned long) page_address(page); 1030 return 0; 1031} 1032 1033EXPORT_SYMBOL(get_zeroed_page); 1034 1035void __pagevec_free(struct pagevec *pvec) 1036{ 1037 int i = pagevec_count(pvec); 1038 1039 while (--i >= 0) 1040 free_hot_cold_page(pvec->pages[i], pvec->cold); 1041} 1042 1043fastcall void __free_pages(struct page *page, unsigned int order) 1044{ 1045 if (put_page_testzero(page)) { 1046 if (order == 0) 1047 free_hot_page(page); 1048 else 1049 __free_pages_ok(page, order); 1050 } 1051} 1052 1053EXPORT_SYMBOL(__free_pages); 1054 1055fastcall void free_pages(unsigned long addr, unsigned int order) 1056{ 1057 if (addr != 0) { 1058 BUG_ON(!virt_addr_valid((void *)addr)); 1059 __free_pages(virt_to_page((void *)addr), order); 1060 } 1061} 1062 1063EXPORT_SYMBOL(free_pages); 1064 1065/* 1066 * Total amount of free (allocatable) RAM: 1067 */ 1068unsigned int nr_free_pages(void) 1069{ 1070 unsigned int sum = 0; 1071 struct zone *zone; 1072 1073 for_each_zone(zone) 1074 sum += zone->free_pages; 1075 1076 return sum; 1077} 1078 1079EXPORT_SYMBOL(nr_free_pages); 1080 1081#ifdef CONFIG_NUMA 1082unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1083{ 1084 unsigned int i, sum = 0; 1085 1086 for (i = 0; i < MAX_NR_ZONES; i++) 1087 sum += pgdat->node_zones[i].free_pages; 1088 1089 return sum; 1090} 1091#endif 1092 1093static unsigned int nr_free_zone_pages(int offset) 1094{ 1095 /* Just pick one node, since fallback list is circular */ 1096 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1097 unsigned int sum = 0; 1098 1099 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1100 struct zone **zonep = zonelist->zones; 1101 struct zone *zone; 1102 1103 for (zone = *zonep++; zone; zone = *zonep++) { 1104 unsigned long size = zone->present_pages; 1105 unsigned long high = zone->pages_high; 1106 if (size > high) 1107 sum += size - high; 1108 } 1109 1110 return sum; 1111} 1112 1113/* 1114 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1115 */ 1116unsigned int nr_free_buffer_pages(void) 1117{ 1118 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1119} 1120 1121/* 1122 * Amount of free RAM allocatable within all zones 1123 */ 1124unsigned int nr_free_pagecache_pages(void) 1125{ 1126 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1127} 1128 1129#ifdef CONFIG_HIGHMEM 1130unsigned int nr_free_highpages (void) 1131{ 1132 pg_data_t *pgdat; 1133 unsigned int pages = 0; 1134 1135 for_each_pgdat(pgdat) 1136 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1137 1138 return pages; 1139} 1140#endif 1141 1142#ifdef CONFIG_NUMA 1143static void show_node(struct zone *zone) 1144{ 1145 printk("Node %d ", zone->zone_pgdat->node_id); 1146} 1147#else 1148#define show_node(zone) do { } while (0) 1149#endif 1150 1151/* 1152 * Accumulate the page_state information across all CPUs. 1153 * The result is unavoidably approximate - it can change 1154 * during and after execution of this function. 1155 */ 1156static DEFINE_PER_CPU(struct page_state, page_states) = {0}; 1157 1158atomic_t nr_pagecache = ATOMIC_INIT(0); 1159EXPORT_SYMBOL(nr_pagecache); 1160#ifdef CONFIG_SMP 1161DEFINE_PER_CPU(long, nr_pagecache_local) = 0; 1162#endif 1163 1164void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) 1165{ 1166 int cpu = 0; 1167 1168 memset(ret, 0, sizeof(*ret)); 1169 cpus_and(*cpumask, *cpumask, cpu_online_map); 1170 1171 cpu = first_cpu(*cpumask); 1172 while (cpu < NR_CPUS) { 1173 unsigned long *in, *out, off; 1174 1175 in = (unsigned long *)&per_cpu(page_states, cpu); 1176 1177 cpu = next_cpu(cpu, *cpumask); 1178 1179 if (cpu < NR_CPUS) 1180 prefetch(&per_cpu(page_states, cpu)); 1181 1182 out = (unsigned long *)ret; 1183 for (off = 0; off < nr; off++) 1184 *out++ += *in++; 1185 } 1186} 1187 1188void get_page_state_node(struct page_state *ret, int node) 1189{ 1190 int nr; 1191 cpumask_t mask = node_to_cpumask(node); 1192 1193 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1194 nr /= sizeof(unsigned long); 1195 1196 __get_page_state(ret, nr+1, &mask); 1197} 1198 1199void get_page_state(struct page_state *ret) 1200{ 1201 int nr; 1202 cpumask_t mask = CPU_MASK_ALL; 1203 1204 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1205 nr /= sizeof(unsigned long); 1206 1207 __get_page_state(ret, nr + 1, &mask); 1208} 1209 1210void get_full_page_state(struct page_state *ret) 1211{ 1212 cpumask_t mask = CPU_MASK_ALL; 1213 1214 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); 1215} 1216 1217unsigned long __read_page_state(unsigned long offset) 1218{ 1219 unsigned long ret = 0; 1220 int cpu; 1221 1222 for_each_online_cpu(cpu) { 1223 unsigned long in; 1224 1225 in = (unsigned long)&per_cpu(page_states, cpu) + offset; 1226 ret += *((unsigned long *)in); 1227 } 1228 return ret; 1229} 1230 1231void __mod_page_state(unsigned long offset, unsigned long delta) 1232{ 1233 unsigned long flags; 1234 void* ptr; 1235 1236 local_irq_save(flags); 1237 ptr = &__get_cpu_var(page_states); 1238 *(unsigned long*)(ptr + offset) += delta; 1239 local_irq_restore(flags); 1240} 1241 1242EXPORT_SYMBOL(__mod_page_state); 1243 1244void __get_zone_counts(unsigned long *active, unsigned long *inactive, 1245 unsigned long *free, struct pglist_data *pgdat) 1246{ 1247 struct zone *zones = pgdat->node_zones; 1248 int i; 1249 1250 *active = 0; 1251 *inactive = 0; 1252 *free = 0; 1253 for (i = 0; i < MAX_NR_ZONES; i++) { 1254 *active += zones[i].nr_active; 1255 *inactive += zones[i].nr_inactive; 1256 *free += zones[i].free_pages; 1257 } 1258} 1259 1260void get_zone_counts(unsigned long *active, 1261 unsigned long *inactive, unsigned long *free) 1262{ 1263 struct pglist_data *pgdat; 1264 1265 *active = 0; 1266 *inactive = 0; 1267 *free = 0; 1268 for_each_pgdat(pgdat) { 1269 unsigned long l, m, n; 1270 __get_zone_counts(&l, &m, &n, pgdat); 1271 *active += l; 1272 *inactive += m; 1273 *free += n; 1274 } 1275} 1276 1277void si_meminfo(struct sysinfo *val) 1278{ 1279 val->totalram = totalram_pages; 1280 val->sharedram = 0; 1281 val->freeram = nr_free_pages(); 1282 val->bufferram = nr_blockdev_pages(); 1283#ifdef CONFIG_HIGHMEM 1284 val->totalhigh = totalhigh_pages; 1285 val->freehigh = nr_free_highpages(); 1286#else 1287 val->totalhigh = 0; 1288 val->freehigh = 0; 1289#endif 1290 val->mem_unit = PAGE_SIZE; 1291} 1292 1293EXPORT_SYMBOL(si_meminfo); 1294 1295#ifdef CONFIG_NUMA 1296void si_meminfo_node(struct sysinfo *val, int nid) 1297{ 1298 pg_data_t *pgdat = NODE_DATA(nid); 1299 1300 val->totalram = pgdat->node_present_pages; 1301 val->freeram = nr_free_pages_pgdat(pgdat); 1302 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1303 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1304 val->mem_unit = PAGE_SIZE; 1305} 1306#endif 1307 1308#define K(x) ((x) << (PAGE_SHIFT-10)) 1309 1310/* 1311 * Show free area list (used inside shift_scroll-lock stuff) 1312 * We also calculate the percentage fragmentation. We do this by counting the 1313 * memory on each free list with the exception of the first item on the list. 1314 */ 1315void show_free_areas(void) 1316{ 1317 struct page_state ps; 1318 int cpu, temperature; 1319 unsigned long active; 1320 unsigned long inactive; 1321 unsigned long free; 1322 struct zone *zone; 1323 1324 for_each_zone(zone) { 1325 show_node(zone); 1326 printk("%s per-cpu:", zone->name); 1327 1328 if (!zone->present_pages) { 1329 printk(" empty\n"); 1330 continue; 1331 } else 1332 printk("\n"); 1333 1334 for_each_online_cpu(cpu) { 1335 struct per_cpu_pageset *pageset; 1336 1337 pageset = zone_pcp(zone, cpu); 1338 1339 for (temperature = 0; temperature < 2; temperature++) 1340 printk("cpu %d %s: low %d, high %d, batch %d used:%d\n", 1341 cpu, 1342 temperature ? "cold" : "hot", 1343 pageset->pcp[temperature].low, 1344 pageset->pcp[temperature].high, 1345 pageset->pcp[temperature].batch, 1346 pageset->pcp[temperature].count); 1347 } 1348 } 1349 1350 get_page_state(&ps); 1351 get_zone_counts(&active, &inactive, &free); 1352 1353 printk("Free pages: %11ukB (%ukB HighMem)\n", 1354 K(nr_free_pages()), 1355 K(nr_free_highpages())); 1356 1357 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1358 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1359 active, 1360 inactive, 1361 ps.nr_dirty, 1362 ps.nr_writeback, 1363 ps.nr_unstable, 1364 nr_free_pages(), 1365 ps.nr_slab, 1366 ps.nr_mapped, 1367 ps.nr_page_table_pages); 1368 1369 for_each_zone(zone) { 1370 int i; 1371 1372 show_node(zone); 1373 printk("%s" 1374 " free:%lukB" 1375 " min:%lukB" 1376 " low:%lukB" 1377 " high:%lukB" 1378 " active:%lukB" 1379 " inactive:%lukB" 1380 " present:%lukB" 1381 " pages_scanned:%lu" 1382 " all_unreclaimable? %s" 1383 "\n", 1384 zone->name, 1385 K(zone->free_pages), 1386 K(zone->pages_min), 1387 K(zone->pages_low), 1388 K(zone->pages_high), 1389 K(zone->nr_active), 1390 K(zone->nr_inactive), 1391 K(zone->present_pages), 1392 zone->pages_scanned, 1393 (zone->all_unreclaimable ? "yes" : "no") 1394 ); 1395 printk("lowmem_reserve[]:"); 1396 for (i = 0; i < MAX_NR_ZONES; i++) 1397 printk(" %lu", zone->lowmem_reserve[i]); 1398 printk("\n"); 1399 } 1400 1401 for_each_zone(zone) { 1402 unsigned long nr, flags, order, total = 0; 1403 1404 show_node(zone); 1405 printk("%s: ", zone->name); 1406 if (!zone->present_pages) { 1407 printk("empty\n"); 1408 continue; 1409 } 1410 1411 spin_lock_irqsave(&zone->lock, flags); 1412 for (order = 0; order < MAX_ORDER; order++) { 1413 nr = zone->free_area[order].nr_free; 1414 total += nr << order; 1415 printk("%lu*%lukB ", nr, K(1UL) << order); 1416 } 1417 spin_unlock_irqrestore(&zone->lock, flags); 1418 printk("= %lukB\n", K(total)); 1419 } 1420 1421 show_swap_cache_info(); 1422} 1423 1424/* 1425 * Builds allocation fallback zone lists. 1426 */ 1427static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k) 1428{ 1429 switch (k) { 1430 struct zone *zone; 1431 default: 1432 BUG(); 1433 case ZONE_HIGHMEM: 1434 zone = pgdat->node_zones + ZONE_HIGHMEM; 1435 if (zone->present_pages) { 1436#ifndef CONFIG_HIGHMEM 1437 BUG(); 1438#endif 1439 zonelist->zones[j++] = zone; 1440 } 1441 case ZONE_NORMAL: 1442 zone = pgdat->node_zones + ZONE_NORMAL; 1443 if (zone->present_pages) 1444 zonelist->zones[j++] = zone; 1445 case ZONE_DMA32: 1446 zone = pgdat->node_zones + ZONE_DMA32; 1447 if (zone->present_pages) 1448 zonelist->zones[j++] = zone; 1449 case ZONE_DMA: 1450 zone = pgdat->node_zones + ZONE_DMA; 1451 if (zone->present_pages) 1452 zonelist->zones[j++] = zone; 1453 } 1454 1455 return j; 1456} 1457 1458static inline int highest_zone(int zone_bits) 1459{ 1460 int res = ZONE_NORMAL; 1461 if (zone_bits & (__force int)__GFP_HIGHMEM) 1462 res = ZONE_HIGHMEM; 1463 if (zone_bits & (__force int)__GFP_DMA32) 1464 res = ZONE_DMA32; 1465 if (zone_bits & (__force int)__GFP_DMA) 1466 res = ZONE_DMA; 1467 return res; 1468} 1469 1470#ifdef CONFIG_NUMA 1471#define MAX_NODE_LOAD (num_online_nodes()) 1472static int __initdata node_load[MAX_NUMNODES]; 1473/** 1474 * find_next_best_node - find the next node that should appear in a given node's fallback list 1475 * @node: node whose fallback list we're appending 1476 * @used_node_mask: nodemask_t of already used nodes 1477 * 1478 * We use a number of factors to determine which is the next node that should 1479 * appear on a given node's fallback list. The node should not have appeared 1480 * already in @node's fallback list, and it should be the next closest node 1481 * according to the distance array (which contains arbitrary distance values 1482 * from each node to each node in the system), and should also prefer nodes 1483 * with no CPUs, since presumably they'll have very little allocation pressure 1484 * on them otherwise. 1485 * It returns -1 if no node is found. 1486 */ 1487static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1488{ 1489 int i, n, val; 1490 int min_val = INT_MAX; 1491 int best_node = -1; 1492 1493 for_each_online_node(i) { 1494 cpumask_t tmp; 1495 1496 /* Start from local node */ 1497 n = (node+i) % num_online_nodes(); 1498 1499 /* Don't want a node to appear more than once */ 1500 if (node_isset(n, *used_node_mask)) 1501 continue; 1502 1503 /* Use the local node if we haven't already */ 1504 if (!node_isset(node, *used_node_mask)) { 1505 best_node = node; 1506 break; 1507 } 1508 1509 /* Use the distance array to find the distance */ 1510 val = node_distance(node, n); 1511 1512 /* Give preference to headless and unused nodes */ 1513 tmp = node_to_cpumask(n); 1514 if (!cpus_empty(tmp)) 1515 val += PENALTY_FOR_NODE_WITH_CPUS; 1516 1517 /* Slight preference for less loaded node */ 1518 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1519 val += node_load[n]; 1520 1521 if (val < min_val) { 1522 min_val = val; 1523 best_node = n; 1524 } 1525 } 1526 1527 if (best_node >= 0) 1528 node_set(best_node, *used_node_mask); 1529 1530 return best_node; 1531} 1532 1533static void __init build_zonelists(pg_data_t *pgdat) 1534{ 1535 int i, j, k, node, local_node; 1536 int prev_node, load; 1537 struct zonelist *zonelist; 1538 nodemask_t used_mask; 1539 1540 /* initialize zonelists */ 1541 for (i = 0; i < GFP_ZONETYPES; i++) { 1542 zonelist = pgdat->node_zonelists + i; 1543 zonelist->zones[0] = NULL; 1544 } 1545 1546 /* NUMA-aware ordering of nodes */ 1547 local_node = pgdat->node_id; 1548 load = num_online_nodes(); 1549 prev_node = local_node; 1550 nodes_clear(used_mask); 1551 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1552 /* 1553 * We don't want to pressure a particular node. 1554 * So adding penalty to the first node in same 1555 * distance group to make it round-robin. 1556 */ 1557 if (node_distance(local_node, node) != 1558 node_distance(local_node, prev_node)) 1559 node_load[node] += load; 1560 prev_node = node; 1561 load--; 1562 for (i = 0; i < GFP_ZONETYPES; i++) { 1563 zonelist = pgdat->node_zonelists + i; 1564 for (j = 0; zonelist->zones[j] != NULL; j++); 1565 1566 k = highest_zone(i); 1567 1568 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1569 zonelist->zones[j] = NULL; 1570 } 1571 } 1572} 1573 1574#else /* CONFIG_NUMA */ 1575 1576static void __init build_zonelists(pg_data_t *pgdat) 1577{ 1578 int i, j, k, node, local_node; 1579 1580 local_node = pgdat->node_id; 1581 for (i = 0; i < GFP_ZONETYPES; i++) { 1582 struct zonelist *zonelist; 1583 1584 zonelist = pgdat->node_zonelists + i; 1585 1586 j = 0; 1587 k = highest_zone(i); 1588 j = build_zonelists_node(pgdat, zonelist, j, k); 1589 /* 1590 * Now we build the zonelist so that it contains the zones 1591 * of all the other nodes. 1592 * We don't want to pressure a particular node, so when 1593 * building the zones for node N, we make sure that the 1594 * zones coming right after the local ones are those from 1595 * node N+1 (modulo N) 1596 */ 1597 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1598 if (!node_online(node)) 1599 continue; 1600 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1601 } 1602 for (node = 0; node < local_node; node++) { 1603 if (!node_online(node)) 1604 continue; 1605 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1606 } 1607 1608 zonelist->zones[j] = NULL; 1609 } 1610} 1611 1612#endif /* CONFIG_NUMA */ 1613 1614void __init build_all_zonelists(void) 1615{ 1616 int i; 1617 1618 for_each_online_node(i) 1619 build_zonelists(NODE_DATA(i)); 1620 printk("Built %i zonelists\n", num_online_nodes()); 1621 cpuset_init_current_mems_allowed(); 1622} 1623 1624/* 1625 * Helper functions to size the waitqueue hash table. 1626 * Essentially these want to choose hash table sizes sufficiently 1627 * large so that collisions trying to wait on pages are rare. 1628 * But in fact, the number of active page waitqueues on typical 1629 * systems is ridiculously low, less than 200. So this is even 1630 * conservative, even though it seems large. 1631 * 1632 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1633 * waitqueues, i.e. the size of the waitq table given the number of pages. 1634 */ 1635#define PAGES_PER_WAITQUEUE 256 1636 1637static inline unsigned long wait_table_size(unsigned long pages) 1638{ 1639 unsigned long size = 1; 1640 1641 pages /= PAGES_PER_WAITQUEUE; 1642 1643 while (size < pages) 1644 size <<= 1; 1645 1646 /* 1647 * Once we have dozens or even hundreds of threads sleeping 1648 * on IO we've got bigger problems than wait queue collision. 1649 * Limit the size of the wait table to a reasonable size. 1650 */ 1651 size = min(size, 4096UL); 1652 1653 return max(size, 4UL); 1654} 1655 1656/* 1657 * This is an integer logarithm so that shifts can be used later 1658 * to extract the more random high bits from the multiplicative 1659 * hash function before the remainder is taken. 1660 */ 1661static inline unsigned long wait_table_bits(unsigned long size) 1662{ 1663 return ffz(~size); 1664} 1665 1666#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1667 1668static void __init calculate_zone_totalpages(struct pglist_data *pgdat, 1669 unsigned long *zones_size, unsigned long *zholes_size) 1670{ 1671 unsigned long realtotalpages, totalpages = 0; 1672 int i; 1673 1674 for (i = 0; i < MAX_NR_ZONES; i++) 1675 totalpages += zones_size[i]; 1676 pgdat->node_spanned_pages = totalpages; 1677 1678 realtotalpages = totalpages; 1679 if (zholes_size) 1680 for (i = 0; i < MAX_NR_ZONES; i++) 1681 realtotalpages -= zholes_size[i]; 1682 pgdat->node_present_pages = realtotalpages; 1683 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1684} 1685 1686 1687/* 1688 * Initially all pages are reserved - free ones are freed 1689 * up by free_all_bootmem() once the early boot process is 1690 * done. Non-atomic initialization, single-pass. 1691 */ 1692void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1693 unsigned long start_pfn) 1694{ 1695 struct page *page; 1696 unsigned long end_pfn = start_pfn + size; 1697 unsigned long pfn; 1698 1699 for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { 1700 if (!early_pfn_valid(pfn)) 1701 continue; 1702 if (!early_pfn_in_nid(pfn, nid)) 1703 continue; 1704 page = pfn_to_page(pfn); 1705 set_page_links(page, zone, nid, pfn); 1706 set_page_count(page, 1); 1707 reset_page_mapcount(page); 1708 SetPageReserved(page); 1709 INIT_LIST_HEAD(&page->lru); 1710#ifdef WANT_PAGE_VIRTUAL 1711 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1712 if (!is_highmem_idx(zone)) 1713 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1714#endif 1715 } 1716} 1717 1718void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1719 unsigned long size) 1720{ 1721 int order; 1722 for (order = 0; order < MAX_ORDER ; order++) { 1723 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1724 zone->free_area[order].nr_free = 0; 1725 } 1726} 1727 1728#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1729void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 1730 unsigned long size) 1731{ 1732 unsigned long snum = pfn_to_section_nr(pfn); 1733 unsigned long end = pfn_to_section_nr(pfn + size); 1734 1735 if (FLAGS_HAS_NODE) 1736 zone_table[ZONETABLE_INDEX(nid, zid)] = zone; 1737 else 1738 for (; snum <= end; snum++) 1739 zone_table[ZONETABLE_INDEX(snum, zid)] = zone; 1740} 1741 1742#ifndef __HAVE_ARCH_MEMMAP_INIT 1743#define memmap_init(size, nid, zone, start_pfn) \ 1744 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1745#endif 1746 1747static int __devinit zone_batchsize(struct zone *zone) 1748{ 1749 int batch; 1750 1751 /* 1752 * The per-cpu-pages pools are set to around 1000th of the 1753 * size of the zone. But no more than 1/2 of a meg. 1754 * 1755 * OK, so we don't know how big the cache is. So guess. 1756 */ 1757 batch = zone->present_pages / 1024; 1758 if (batch * PAGE_SIZE > 512 * 1024) 1759 batch = (512 * 1024) / PAGE_SIZE; 1760 batch /= 4; /* We effectively *= 4 below */ 1761 if (batch < 1) 1762 batch = 1; 1763 1764 /* 1765 * We will be trying to allcoate bigger chunks of contiguous 1766 * memory of the order of fls(batch). This should result in 1767 * better cache coloring. 1768 * 1769 * A sanity check also to ensure that batch is still in limits. 1770 */ 1771 batch = (1 << fls(batch + batch/2)); 1772 1773 if (fls(batch) >= (PAGE_SHIFT + MAX_ORDER - 2)) 1774 batch = PAGE_SHIFT + ((MAX_ORDER - 1 - PAGE_SHIFT)/2); 1775 1776 return batch; 1777} 1778 1779inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 1780{ 1781 struct per_cpu_pages *pcp; 1782 1783 memset(p, 0, sizeof(*p)); 1784 1785 pcp = &p->pcp[0]; /* hot */ 1786 pcp->count = 0; 1787 pcp->low = 0; 1788 pcp->high = 6 * batch; 1789 pcp->batch = max(1UL, 1 * batch); 1790 INIT_LIST_HEAD(&pcp->list); 1791 1792 pcp = &p->pcp[1]; /* cold*/ 1793 pcp->count = 0; 1794 pcp->low = 0; 1795 pcp->high = 2 * batch; 1796 pcp->batch = max(1UL, batch/2); 1797 INIT_LIST_HEAD(&pcp->list); 1798} 1799 1800#ifdef CONFIG_NUMA 1801/* 1802 * Boot pageset table. One per cpu which is going to be used for all 1803 * zones and all nodes. The parameters will be set in such a way 1804 * that an item put on a list will immediately be handed over to 1805 * the buddy list. This is safe since pageset manipulation is done 1806 * with interrupts disabled. 1807 * 1808 * Some NUMA counter updates may also be caught by the boot pagesets. 1809 * 1810 * The boot_pagesets must be kept even after bootup is complete for 1811 * unused processors and/or zones. They do play a role for bootstrapping 1812 * hotplugged processors. 1813 * 1814 * zoneinfo_show() and maybe other functions do 1815 * not check if the processor is online before following the pageset pointer. 1816 * Other parts of the kernel may not check if the zone is available. 1817 */ 1818static struct per_cpu_pageset 1819 boot_pageset[NR_CPUS]; 1820 1821/* 1822 * Dynamically allocate memory for the 1823 * per cpu pageset array in struct zone. 1824 */ 1825static int __devinit process_zones(int cpu) 1826{ 1827 struct zone *zone, *dzone; 1828 1829 for_each_zone(zone) { 1830 1831 zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset), 1832 GFP_KERNEL, cpu_to_node(cpu)); 1833 if (!zone->pageset[cpu]) 1834 goto bad; 1835 1836 setup_pageset(zone->pageset[cpu], zone_batchsize(zone)); 1837 } 1838 1839 return 0; 1840bad: 1841 for_each_zone(dzone) { 1842 if (dzone == zone) 1843 break; 1844 kfree(dzone->pageset[cpu]); 1845 dzone->pageset[cpu] = NULL; 1846 } 1847 return -ENOMEM; 1848} 1849 1850static inline void free_zone_pagesets(int cpu) 1851{ 1852#ifdef CONFIG_NUMA 1853 struct zone *zone; 1854 1855 for_each_zone(zone) { 1856 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 1857 1858 zone_pcp(zone, cpu) = NULL; 1859 kfree(pset); 1860 } 1861#endif 1862} 1863 1864static int __devinit pageset_cpuup_callback(struct notifier_block *nfb, 1865 unsigned long action, 1866 void *hcpu) 1867{ 1868 int cpu = (long)hcpu; 1869 int ret = NOTIFY_OK; 1870 1871 switch (action) { 1872 case CPU_UP_PREPARE: 1873 if (process_zones(cpu)) 1874 ret = NOTIFY_BAD; 1875 break; 1876 case CPU_UP_CANCELED: 1877 case CPU_DEAD: 1878 free_zone_pagesets(cpu); 1879 break; 1880 default: 1881 break; 1882 } 1883 return ret; 1884} 1885 1886static struct notifier_block pageset_notifier = 1887 { &pageset_cpuup_callback, NULL, 0 }; 1888 1889void __init setup_per_cpu_pageset() 1890{ 1891 int err; 1892 1893 /* Initialize per_cpu_pageset for cpu 0. 1894 * A cpuup callback will do this for every cpu 1895 * as it comes online 1896 */ 1897 err = process_zones(smp_processor_id()); 1898 BUG_ON(err); 1899 register_cpu_notifier(&pageset_notifier); 1900} 1901 1902#endif 1903 1904static __devinit 1905void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1906{ 1907 int i; 1908 struct pglist_data *pgdat = zone->zone_pgdat; 1909 1910 /* 1911 * The per-page waitqueue mechanism uses hashed waitqueues 1912 * per zone. 1913 */ 1914 zone->wait_table_size = wait_table_size(zone_size_pages); 1915 zone->wait_table_bits = wait_table_bits(zone->wait_table_size); 1916 zone->wait_table = (wait_queue_head_t *) 1917 alloc_bootmem_node(pgdat, zone->wait_table_size 1918 * sizeof(wait_queue_head_t)); 1919 1920 for(i = 0; i < zone->wait_table_size; ++i) 1921 init_waitqueue_head(zone->wait_table + i); 1922} 1923 1924static __devinit void zone_pcp_init(struct zone *zone) 1925{ 1926 int cpu; 1927 unsigned long batch = zone_batchsize(zone); 1928 1929 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1930#ifdef CONFIG_NUMA 1931 /* Early boot. Slab allocator not functional yet */ 1932 zone->pageset[cpu] = &boot_pageset[cpu]; 1933 setup_pageset(&boot_pageset[cpu],0); 1934#else 1935 setup_pageset(zone_pcp(zone,cpu), batch); 1936#endif 1937 } 1938 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 1939 zone->name, zone->present_pages, batch); 1940} 1941 1942static __devinit void init_currently_empty_zone(struct zone *zone, 1943 unsigned long zone_start_pfn, unsigned long size) 1944{ 1945 struct pglist_data *pgdat = zone->zone_pgdat; 1946 1947 zone_wait_table_init(zone, size); 1948 pgdat->nr_zones = zone_idx(zone) + 1; 1949 1950 zone->zone_mem_map = pfn_to_page(zone_start_pfn); 1951 zone->zone_start_pfn = zone_start_pfn; 1952 1953 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 1954 1955 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 1956} 1957 1958/* 1959 * Set up the zone data structures: 1960 * - mark all pages reserved 1961 * - mark all memory queues empty 1962 * - clear the memory bitmaps 1963 */ 1964static void __init free_area_init_core(struct pglist_data *pgdat, 1965 unsigned long *zones_size, unsigned long *zholes_size) 1966{ 1967 unsigned long j; 1968 int nid = pgdat->node_id; 1969 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1970 1971 pgdat_resize_init(pgdat); 1972 pgdat->nr_zones = 0; 1973 init_waitqueue_head(&pgdat->kswapd_wait); 1974 pgdat->kswapd_max_order = 0; 1975 1976 for (j = 0; j < MAX_NR_ZONES; j++) { 1977 struct zone *zone = pgdat->node_zones + j; 1978 unsigned long size, realsize; 1979 1980 realsize = size = zones_size[j]; 1981 if (zholes_size) 1982 realsize -= zholes_size[j]; 1983 1984 if (j < ZONE_HIGHMEM) 1985 nr_kernel_pages += realsize; 1986 nr_all_pages += realsize; 1987 1988 zone->spanned_pages = size; 1989 zone->present_pages = realsize; 1990 zone->name = zone_names[j]; 1991 spin_lock_init(&zone->lock); 1992 spin_lock_init(&zone->lru_lock); 1993 zone_seqlock_init(zone); 1994 zone->zone_pgdat = pgdat; 1995 zone->free_pages = 0; 1996 1997 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 1998 1999 zone_pcp_init(zone); 2000 INIT_LIST_HEAD(&zone->active_list); 2001 INIT_LIST_HEAD(&zone->inactive_list); 2002 zone->nr_scan_active = 0; 2003 zone->nr_scan_inactive = 0; 2004 zone->nr_active = 0; 2005 zone->nr_inactive = 0; 2006 atomic_set(&zone->reclaim_in_progress, 0); 2007 if (!size) 2008 continue; 2009 2010 zonetable_add(zone, nid, j, zone_start_pfn, size); 2011 init_currently_empty_zone(zone, zone_start_pfn, size); 2012 zone_start_pfn += size; 2013 } 2014} 2015 2016static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2017{ 2018 /* Skip empty nodes */ 2019 if (!pgdat->node_spanned_pages) 2020 return; 2021 2022#ifdef CONFIG_FLAT_NODE_MEM_MAP 2023 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2024 if (!pgdat->node_mem_map) { 2025 unsigned long size; 2026 struct page *map; 2027 2028 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); 2029 map = alloc_remap(pgdat->node_id, size); 2030 if (!map) 2031 map = alloc_bootmem_node(pgdat, size); 2032 pgdat->node_mem_map = map; 2033 } 2034#ifdef CONFIG_FLATMEM 2035 /* 2036 * With no DISCONTIG, the global mem_map is just set as node 0's 2037 */ 2038 if (pgdat == NODE_DATA(0)) 2039 mem_map = NODE_DATA(0)->node_mem_map; 2040#endif 2041#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2042} 2043 2044void __init free_area_init_node(int nid, struct pglist_data *pgdat, 2045 unsigned long *zones_size, unsigned long node_start_pfn, 2046 unsigned long *zholes_size) 2047{ 2048 pgdat->node_id = nid; 2049 pgdat->node_start_pfn = node_start_pfn; 2050 calculate_zone_totalpages(pgdat, zones_size, zholes_size); 2051 2052 alloc_node_mem_map(pgdat); 2053 2054 free_area_init_core(pgdat, zones_size, zholes_size); 2055} 2056 2057#ifndef CONFIG_NEED_MULTIPLE_NODES 2058static bootmem_data_t contig_bootmem_data; 2059struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2060 2061EXPORT_SYMBOL(contig_page_data); 2062#endif 2063 2064void __init free_area_init(unsigned long *zones_size) 2065{ 2066 free_area_init_node(0, NODE_DATA(0), zones_size, 2067 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2068} 2069 2070#ifdef CONFIG_PROC_FS 2071 2072#include <linux/seq_file.h> 2073 2074static void *frag_start(struct seq_file *m, loff_t *pos) 2075{ 2076 pg_data_t *pgdat; 2077 loff_t node = *pos; 2078 2079 for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) 2080 --node; 2081 2082 return pgdat; 2083} 2084 2085static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 2086{ 2087 pg_data_t *pgdat = (pg_data_t *)arg; 2088 2089 (*pos)++; 2090 return pgdat->pgdat_next; 2091} 2092 2093static void frag_stop(struct seq_file *m, void *arg) 2094{ 2095} 2096 2097/* 2098 * This walks the free areas for each zone. 2099 */ 2100static int frag_show(struct seq_file *m, void *arg) 2101{ 2102 pg_data_t *pgdat = (pg_data_t *)arg; 2103 struct zone *zone; 2104 struct zone *node_zones = pgdat->node_zones; 2105 unsigned long flags; 2106 int order; 2107 2108 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2109 if (!zone->present_pages) 2110 continue; 2111 2112 spin_lock_irqsave(&zone->lock, flags); 2113 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 2114 for (order = 0; order < MAX_ORDER; ++order) 2115 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 2116 spin_unlock_irqrestore(&zone->lock, flags); 2117 seq_putc(m, '\n'); 2118 } 2119 return 0; 2120} 2121 2122struct seq_operations fragmentation_op = { 2123 .start = frag_start, 2124 .next = frag_next, 2125 .stop = frag_stop, 2126 .show = frag_show, 2127}; 2128 2129/* 2130 * Output information about zones in @pgdat. 2131 */ 2132static int zoneinfo_show(struct seq_file *m, void *arg) 2133{ 2134 pg_data_t *pgdat = arg; 2135 struct zone *zone; 2136 struct zone *node_zones = pgdat->node_zones; 2137 unsigned long flags; 2138 2139 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { 2140 int i; 2141 2142 if (!zone->present_pages) 2143 continue; 2144 2145 spin_lock_irqsave(&zone->lock, flags); 2146 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 2147 seq_printf(m, 2148 "\n pages free %lu" 2149 "\n min %lu" 2150 "\n low %lu" 2151 "\n high %lu" 2152 "\n active %lu" 2153 "\n inactive %lu" 2154 "\n scanned %lu (a: %lu i: %lu)" 2155 "\n spanned %lu" 2156 "\n present %lu", 2157 zone->free_pages, 2158 zone->pages_min, 2159 zone->pages_low, 2160 zone->pages_high, 2161 zone->nr_active, 2162 zone->nr_inactive, 2163 zone->pages_scanned, 2164 zone->nr_scan_active, zone->nr_scan_inactive, 2165 zone->spanned_pages, 2166 zone->present_pages); 2167 seq_printf(m, 2168 "\n protection: (%lu", 2169 zone->lowmem_reserve[0]); 2170 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 2171 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 2172 seq_printf(m, 2173 ")" 2174 "\n pagesets"); 2175 for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) { 2176 struct per_cpu_pageset *pageset; 2177 int j; 2178 2179 pageset = zone_pcp(zone, i); 2180 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 2181 if (pageset->pcp[j].count) 2182 break; 2183 } 2184 if (j == ARRAY_SIZE(pageset->pcp)) 2185 continue; 2186 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 2187 seq_printf(m, 2188 "\n cpu: %i pcp: %i" 2189 "\n count: %i" 2190 "\n low: %i" 2191 "\n high: %i" 2192 "\n batch: %i", 2193 i, j, 2194 pageset->pcp[j].count, 2195 pageset->pcp[j].low, 2196 pageset->pcp[j].high, 2197 pageset->pcp[j].batch); 2198 } 2199#ifdef CONFIG_NUMA 2200 seq_printf(m, 2201 "\n numa_hit: %lu" 2202 "\n numa_miss: %lu" 2203 "\n numa_foreign: %lu" 2204 "\n interleave_hit: %lu" 2205 "\n local_node: %lu" 2206 "\n other_node: %lu", 2207 pageset->numa_hit, 2208 pageset->numa_miss, 2209 pageset->numa_foreign, 2210 pageset->interleave_hit, 2211 pageset->local_node, 2212 pageset->other_node); 2213#endif 2214 } 2215 seq_printf(m, 2216 "\n all_unreclaimable: %u" 2217 "\n prev_priority: %i" 2218 "\n temp_priority: %i" 2219 "\n start_pfn: %lu", 2220 zone->all_unreclaimable, 2221 zone->prev_priority, 2222 zone->temp_priority, 2223 zone->zone_start_pfn); 2224 spin_unlock_irqrestore(&zone->lock, flags); 2225 seq_putc(m, '\n'); 2226 } 2227 return 0; 2228} 2229 2230struct seq_operations zoneinfo_op = { 2231 .start = frag_start, /* iterate over all zones. The same as in 2232 * fragmentation. */ 2233 .next = frag_next, 2234 .stop = frag_stop, 2235 .show = zoneinfo_show, 2236}; 2237 2238static char *vmstat_text[] = { 2239 "nr_dirty", 2240 "nr_writeback", 2241 "nr_unstable", 2242 "nr_page_table_pages", 2243 "nr_mapped", 2244 "nr_slab", 2245 2246 "pgpgin", 2247 "pgpgout", 2248 "pswpin", 2249 "pswpout", 2250 "pgalloc_high", 2251 2252 "pgalloc_normal", 2253 "pgalloc_dma", 2254 "pgfree", 2255 "pgactivate", 2256 "pgdeactivate", 2257 2258 "pgfault", 2259 "pgmajfault", 2260 "pgrefill_high", 2261 "pgrefill_normal", 2262 "pgrefill_dma", 2263 2264 "pgsteal_high", 2265 "pgsteal_normal", 2266 "pgsteal_dma", 2267 "pgscan_kswapd_high", 2268 "pgscan_kswapd_normal", 2269 2270 "pgscan_kswapd_dma", 2271 "pgscan_direct_high", 2272 "pgscan_direct_normal", 2273 "pgscan_direct_dma", 2274 "pginodesteal", 2275 2276 "slabs_scanned", 2277 "kswapd_steal", 2278 "kswapd_inodesteal", 2279 "pageoutrun", 2280 "allocstall", 2281 2282 "pgrotated", 2283 "nr_bounce", 2284}; 2285 2286static void *vmstat_start(struct seq_file *m, loff_t *pos) 2287{ 2288 struct page_state *ps; 2289 2290 if (*pos >= ARRAY_SIZE(vmstat_text)) 2291 return NULL; 2292 2293 ps = kmalloc(sizeof(*ps), GFP_KERNEL); 2294 m->private = ps; 2295 if (!ps) 2296 return ERR_PTR(-ENOMEM); 2297 get_full_page_state(ps); 2298 ps->pgpgin /= 2; /* sectors -> kbytes */ 2299 ps->pgpgout /= 2; 2300 return (unsigned long *)ps + *pos; 2301} 2302 2303static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 2304{ 2305 (*pos)++; 2306 if (*pos >= ARRAY_SIZE(vmstat_text)) 2307 return NULL; 2308 return (unsigned long *)m->private + *pos; 2309} 2310 2311static int vmstat_show(struct seq_file *m, void *arg) 2312{ 2313 unsigned long *l = arg; 2314 unsigned long off = l - (unsigned long *)m->private; 2315 2316 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 2317 return 0; 2318} 2319 2320static void vmstat_stop(struct seq_file *m, void *arg) 2321{ 2322 kfree(m->private); 2323 m->private = NULL; 2324} 2325 2326struct seq_operations vmstat_op = { 2327 .start = vmstat_start, 2328 .next = vmstat_next, 2329 .stop = vmstat_stop, 2330 .show = vmstat_show, 2331}; 2332 2333#endif /* CONFIG_PROC_FS */ 2334 2335#ifdef CONFIG_HOTPLUG_CPU 2336static int page_alloc_cpu_notify(struct notifier_block *self, 2337 unsigned long action, void *hcpu) 2338{ 2339 int cpu = (unsigned long)hcpu; 2340 long *count; 2341 unsigned long *src, *dest; 2342 2343 if (action == CPU_DEAD) { 2344 int i; 2345 2346 /* Drain local pagecache count. */ 2347 count = &per_cpu(nr_pagecache_local, cpu); 2348 atomic_add(*count, &nr_pagecache); 2349 *count = 0; 2350 local_irq_disable(); 2351 __drain_pages(cpu); 2352 2353 /* Add dead cpu's page_states to our own. */ 2354 dest = (unsigned long *)&__get_cpu_var(page_states); 2355 src = (unsigned long *)&per_cpu(page_states, cpu); 2356 2357 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long); 2358 i++) { 2359 dest[i] += src[i]; 2360 src[i] = 0; 2361 } 2362 2363 local_irq_enable(); 2364 } 2365 return NOTIFY_OK; 2366} 2367#endif /* CONFIG_HOTPLUG_CPU */ 2368 2369void __init page_alloc_init(void) 2370{ 2371 hotcpu_notifier(page_alloc_cpu_notify, 0); 2372} 2373 2374/* 2375 * setup_per_zone_lowmem_reserve - called whenever 2376 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2377 * has a correct pages reserved value, so an adequate number of 2378 * pages are left in the zone after a successful __alloc_pages(). 2379 */ 2380static void setup_per_zone_lowmem_reserve(void) 2381{ 2382 struct pglist_data *pgdat; 2383 int j, idx; 2384 2385 for_each_pgdat(pgdat) { 2386 for (j = 0; j < MAX_NR_ZONES; j++) { 2387 struct zone *zone = pgdat->node_zones + j; 2388 unsigned long present_pages = zone->present_pages; 2389 2390 zone->lowmem_reserve[j] = 0; 2391 2392 for (idx = j-1; idx >= 0; idx--) { 2393 struct zone *lower_zone; 2394 2395 if (sysctl_lowmem_reserve_ratio[idx] < 1) 2396 sysctl_lowmem_reserve_ratio[idx] = 1; 2397 2398 lower_zone = pgdat->node_zones + idx; 2399 lower_zone->lowmem_reserve[j] = present_pages / 2400 sysctl_lowmem_reserve_ratio[idx]; 2401 present_pages += lower_zone->present_pages; 2402 } 2403 } 2404 } 2405} 2406 2407/* 2408 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2409 * that the pages_{min,low,high} values for each zone are set correctly 2410 * with respect to min_free_kbytes. 2411 */ 2412void setup_per_zone_pages_min(void) 2413{ 2414 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 2415 unsigned long lowmem_pages = 0; 2416 struct zone *zone; 2417 unsigned long flags; 2418 2419 /* Calculate total number of !ZONE_HIGHMEM pages */ 2420 for_each_zone(zone) { 2421 if (!is_highmem(zone)) 2422 lowmem_pages += zone->present_pages; 2423 } 2424 2425 for_each_zone(zone) { 2426 unsigned long tmp; 2427 spin_lock_irqsave(&zone->lru_lock, flags); 2428 tmp = (pages_min * zone->present_pages) / lowmem_pages; 2429 if (is_highmem(zone)) { 2430 /* 2431 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2432 * need highmem pages, so cap pages_min to a small 2433 * value here. 2434 * 2435 * The (pages_high-pages_low) and (pages_low-pages_min) 2436 * deltas controls asynch page reclaim, and so should 2437 * not be capped for highmem. 2438 */ 2439 int min_pages; 2440 2441 min_pages = zone->present_pages / 1024; 2442 if (min_pages < SWAP_CLUSTER_MAX) 2443 min_pages = SWAP_CLUSTER_MAX; 2444 if (min_pages > 128) 2445 min_pages = 128; 2446 zone->pages_min = min_pages; 2447 } else { 2448 /* 2449 * If it's a lowmem zone, reserve a number of pages 2450 * proportionate to the zone's size. 2451 */ 2452 zone->pages_min = tmp; 2453 } 2454 2455 zone->pages_low = zone->pages_min + tmp / 4; 2456 zone->pages_high = zone->pages_min + tmp / 2; 2457 spin_unlock_irqrestore(&zone->lru_lock, flags); 2458 } 2459} 2460 2461/* 2462 * Initialise min_free_kbytes. 2463 * 2464 * For small machines we want it small (128k min). For large machines 2465 * we want it large (64MB max). But it is not linear, because network 2466 * bandwidth does not increase linearly with machine size. We use 2467 * 2468 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 2469 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 2470 * 2471 * which yields 2472 * 2473 * 16MB: 512k 2474 * 32MB: 724k 2475 * 64MB: 1024k 2476 * 128MB: 1448k 2477 * 256MB: 2048k 2478 * 512MB: 2896k 2479 * 1024MB: 4096k 2480 * 2048MB: 5792k 2481 * 4096MB: 8192k 2482 * 8192MB: 11584k 2483 * 16384MB: 16384k 2484 */ 2485static int __init init_per_zone_pages_min(void) 2486{ 2487 unsigned long lowmem_kbytes; 2488 2489 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 2490 2491 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 2492 if (min_free_kbytes < 128) 2493 min_free_kbytes = 128; 2494 if (min_free_kbytes > 65536) 2495 min_free_kbytes = 65536; 2496 setup_per_zone_pages_min(); 2497 setup_per_zone_lowmem_reserve(); 2498 return 0; 2499} 2500module_init(init_per_zone_pages_min) 2501 2502/* 2503 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 2504 * that we can call two helper functions whenever min_free_kbytes 2505 * changes. 2506 */ 2507int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 2508 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2509{ 2510 proc_dointvec(table, write, file, buffer, length, ppos); 2511 setup_per_zone_pages_min(); 2512 return 0; 2513} 2514 2515/* 2516 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2517 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2518 * whenever sysctl_lowmem_reserve_ratio changes. 2519 * 2520 * The reserve ratio obviously has absolutely no relation with the 2521 * pages_min watermarks. The lowmem reserve ratio can only make sense 2522 * if in function of the boot time zone sizes. 2523 */ 2524int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 2525 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2526{ 2527 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2528 setup_per_zone_lowmem_reserve(); 2529 return 0; 2530} 2531 2532__initdata int hashdist = HASHDIST_DEFAULT; 2533 2534#ifdef CONFIG_NUMA 2535static int __init set_hashdist(char *str) 2536{ 2537 if (!str) 2538 return 0; 2539 hashdist = simple_strtoul(str, &str, 0); 2540 return 1; 2541} 2542__setup("hashdist=", set_hashdist); 2543#endif 2544 2545/* 2546 * allocate a large system hash table from bootmem 2547 * - it is assumed that the hash table must contain an exact power-of-2 2548 * quantity of entries 2549 * - limit is the number of hash buckets, not the total allocation size 2550 */ 2551void *__init alloc_large_system_hash(const char *tablename, 2552 unsigned long bucketsize, 2553 unsigned long numentries, 2554 int scale, 2555 int flags, 2556 unsigned int *_hash_shift, 2557 unsigned int *_hash_mask, 2558 unsigned long limit) 2559{ 2560 unsigned long long max = limit; 2561 unsigned long log2qty, size; 2562 void *table = NULL; 2563 2564 /* allow the kernel cmdline to have a say */ 2565 if (!numentries) { 2566 /* round applicable memory size up to nearest megabyte */ 2567 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; 2568 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 2569 numentries >>= 20 - PAGE_SHIFT; 2570 numentries <<= 20 - PAGE_SHIFT; 2571 2572 /* limit to 1 bucket per 2^scale bytes of low memory */ 2573 if (scale > PAGE_SHIFT) 2574 numentries >>= (scale - PAGE_SHIFT); 2575 else 2576 numentries <<= (PAGE_SHIFT - scale); 2577 } 2578 /* rounded up to nearest power of 2 in size */ 2579 numentries = 1UL << (long_log2(numentries) + 1); 2580 2581 /* limit allocation size to 1/16 total memory by default */ 2582 if (max == 0) { 2583 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2584 do_div(max, bucketsize); 2585 } 2586 2587 if (numentries > max) 2588 numentries = max; 2589 2590 log2qty = long_log2(numentries); 2591 2592 do { 2593 size = bucketsize << log2qty; 2594 if (flags & HASH_EARLY) 2595 table = alloc_bootmem(size); 2596 else if (hashdist) 2597 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 2598 else { 2599 unsigned long order; 2600 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 2601 ; 2602 table = (void*) __get_free_pages(GFP_ATOMIC, order); 2603 } 2604 } while (!table && size > PAGE_SIZE && --log2qty); 2605 2606 if (!table) 2607 panic("Failed to allocate %s hash table\n", tablename); 2608 2609 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 2610 tablename, 2611 (1U << log2qty), 2612 long_log2(size) - PAGE_SHIFT, 2613 size); 2614 2615 if (_hash_shift) 2616 *_hash_shift = log2qty; 2617 if (_hash_mask) 2618 *_hash_mask = (1 << log2qty) - 1; 2619 2620 return table; 2621}