at v4.4-rc4 193 kB view raw
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17#include <linux/stddef.h> 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/interrupt.h> 21#include <linux/pagemap.h> 22#include <linux/jiffies.h> 23#include <linux/bootmem.h> 24#include <linux/memblock.h> 25#include <linux/compiler.h> 26#include <linux/kernel.h> 27#include <linux/kmemcheck.h> 28#include <linux/kasan.h> 29#include <linux/module.h> 30#include <linux/suspend.h> 31#include <linux/pagevec.h> 32#include <linux/blkdev.h> 33#include <linux/slab.h> 34#include <linux/ratelimit.h> 35#include <linux/oom.h> 36#include <linux/notifier.h> 37#include <linux/topology.h> 38#include <linux/sysctl.h> 39#include <linux/cpu.h> 40#include <linux/cpuset.h> 41#include <linux/memory_hotplug.h> 42#include <linux/nodemask.h> 43#include <linux/vmalloc.h> 44#include <linux/vmstat.h> 45#include <linux/mempolicy.h> 46#include <linux/stop_machine.h> 47#include <linux/sort.h> 48#include <linux/pfn.h> 49#include <linux/backing-dev.h> 50#include <linux/fault-inject.h> 51#include <linux/page-isolation.h> 52#include <linux/page_ext.h> 53#include <linux/debugobjects.h> 54#include <linux/kmemleak.h> 55#include <linux/compaction.h> 56#include <trace/events/kmem.h> 57#include <linux/prefetch.h> 58#include <linux/mm_inline.h> 59#include <linux/migrate.h> 60#include <linux/page_ext.h> 61#include <linux/hugetlb.h> 62#include <linux/sched/rt.h> 63#include <linux/page_owner.h> 64#include <linux/kthread.h> 65 66#include <asm/sections.h> 67#include <asm/tlbflush.h> 68#include <asm/div64.h> 69#include "internal.h" 70 71/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 72static DEFINE_MUTEX(pcp_batch_high_lock); 73#define MIN_PERCPU_PAGELIST_FRACTION (8) 74 75#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 76DEFINE_PER_CPU(int, numa_node); 77EXPORT_PER_CPU_SYMBOL(numa_node); 78#endif 79 80#ifdef CONFIG_HAVE_MEMORYLESS_NODES 81/* 82 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 83 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 84 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 85 * defined in <linux/topology.h>. 86 */ 87DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 88EXPORT_PER_CPU_SYMBOL(_numa_mem_); 89int _node_numa_mem_[MAX_NUMNODES]; 90#endif 91 92/* 93 * Array of node states. 94 */ 95nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 96 [N_POSSIBLE] = NODE_MASK_ALL, 97 [N_ONLINE] = { { [0] = 1UL } }, 98#ifndef CONFIG_NUMA 99 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 100#ifdef CONFIG_HIGHMEM 101 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 102#endif 103#ifdef CONFIG_MOVABLE_NODE 104 [N_MEMORY] = { { [0] = 1UL } }, 105#endif 106 [N_CPU] = { { [0] = 1UL } }, 107#endif /* NUMA */ 108}; 109EXPORT_SYMBOL(node_states); 110 111/* Protect totalram_pages and zone->managed_pages */ 112static DEFINE_SPINLOCK(managed_page_count_lock); 113 114unsigned long totalram_pages __read_mostly; 115unsigned long totalreserve_pages __read_mostly; 116unsigned long totalcma_pages __read_mostly; 117/* 118 * When calculating the number of globally allowed dirty pages, there 119 * is a certain number of per-zone reserves that should not be 120 * considered dirtyable memory. This is the sum of those reserves 121 * over all existing zones that contribute dirtyable memory. 122 */ 123unsigned long dirty_balance_reserve __read_mostly; 124 125int percpu_pagelist_fraction; 126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 127 128/* 129 * A cached value of the page's pageblock's migratetype, used when the page is 130 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 131 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 132 * Also the migratetype set in the page does not necessarily match the pcplist 133 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 134 * other index - this ensures that it will be put on the correct CMA freelist. 135 */ 136static inline int get_pcppage_migratetype(struct page *page) 137{ 138 return page->index; 139} 140 141static inline void set_pcppage_migratetype(struct page *page, int migratetype) 142{ 143 page->index = migratetype; 144} 145 146#ifdef CONFIG_PM_SLEEP 147/* 148 * The following functions are used by the suspend/hibernate code to temporarily 149 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 150 * while devices are suspended. To avoid races with the suspend/hibernate code, 151 * they should always be called with pm_mutex held (gfp_allowed_mask also should 152 * only be modified with pm_mutex held, unless the suspend/hibernate code is 153 * guaranteed not to run in parallel with that modification). 154 */ 155 156static gfp_t saved_gfp_mask; 157 158void pm_restore_gfp_mask(void) 159{ 160 WARN_ON(!mutex_is_locked(&pm_mutex)); 161 if (saved_gfp_mask) { 162 gfp_allowed_mask = saved_gfp_mask; 163 saved_gfp_mask = 0; 164 } 165} 166 167void pm_restrict_gfp_mask(void) 168{ 169 WARN_ON(!mutex_is_locked(&pm_mutex)); 170 WARN_ON(saved_gfp_mask); 171 saved_gfp_mask = gfp_allowed_mask; 172 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 173} 174 175bool pm_suspended_storage(void) 176{ 177 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 178 return false; 179 return true; 180} 181#endif /* CONFIG_PM_SLEEP */ 182 183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 184unsigned int pageblock_order __read_mostly; 185#endif 186 187static void __free_pages_ok(struct page *page, unsigned int order); 188 189/* 190 * results with 256, 32 in the lowmem_reserve sysctl: 191 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 192 * 1G machine -> (16M dma, 784M normal, 224M high) 193 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 194 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 195 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 196 * 197 * TBD: should special case ZONE_DMA32 machines here - in those we normally 198 * don't need any ZONE_NORMAL reservation 199 */ 200int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 201#ifdef CONFIG_ZONE_DMA 202 256, 203#endif 204#ifdef CONFIG_ZONE_DMA32 205 256, 206#endif 207#ifdef CONFIG_HIGHMEM 208 32, 209#endif 210 32, 211}; 212 213EXPORT_SYMBOL(totalram_pages); 214 215static char * const zone_names[MAX_NR_ZONES] = { 216#ifdef CONFIG_ZONE_DMA 217 "DMA", 218#endif 219#ifdef CONFIG_ZONE_DMA32 220 "DMA32", 221#endif 222 "Normal", 223#ifdef CONFIG_HIGHMEM 224 "HighMem", 225#endif 226 "Movable", 227#ifdef CONFIG_ZONE_DEVICE 228 "Device", 229#endif 230}; 231 232static void free_compound_page(struct page *page); 233compound_page_dtor * const compound_page_dtors[] = { 234 NULL, 235 free_compound_page, 236#ifdef CONFIG_HUGETLB_PAGE 237 free_huge_page, 238#endif 239}; 240 241int min_free_kbytes = 1024; 242int user_min_free_kbytes = -1; 243 244static unsigned long __meminitdata nr_kernel_pages; 245static unsigned long __meminitdata nr_all_pages; 246static unsigned long __meminitdata dma_reserve; 247 248#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 249static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 250static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 251static unsigned long __initdata required_kernelcore; 252static unsigned long __initdata required_movablecore; 253static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 254 255/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 256int movable_zone; 257EXPORT_SYMBOL(movable_zone); 258#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 259 260#if MAX_NUMNODES > 1 261int nr_node_ids __read_mostly = MAX_NUMNODES; 262int nr_online_nodes __read_mostly = 1; 263EXPORT_SYMBOL(nr_node_ids); 264EXPORT_SYMBOL(nr_online_nodes); 265#endif 266 267int page_group_by_mobility_disabled __read_mostly; 268 269#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 270static inline void reset_deferred_meminit(pg_data_t *pgdat) 271{ 272 pgdat->first_deferred_pfn = ULONG_MAX; 273} 274 275/* Returns true if the struct page for the pfn is uninitialised */ 276static inline bool __meminit early_page_uninitialised(unsigned long pfn) 277{ 278 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn) 279 return true; 280 281 return false; 282} 283 284static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 285{ 286 if (pfn >= NODE_DATA(nid)->first_deferred_pfn) 287 return true; 288 289 return false; 290} 291 292/* 293 * Returns false when the remaining initialisation should be deferred until 294 * later in the boot cycle when it can be parallelised. 295 */ 296static inline bool update_defer_init(pg_data_t *pgdat, 297 unsigned long pfn, unsigned long zone_end, 298 unsigned long *nr_initialised) 299{ 300 /* Always populate low zones for address-contrained allocations */ 301 if (zone_end < pgdat_end_pfn(pgdat)) 302 return true; 303 304 /* Initialise at least 2G of the highest zone */ 305 (*nr_initialised)++; 306 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && 307 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 308 pgdat->first_deferred_pfn = pfn; 309 return false; 310 } 311 312 return true; 313} 314#else 315static inline void reset_deferred_meminit(pg_data_t *pgdat) 316{ 317} 318 319static inline bool early_page_uninitialised(unsigned long pfn) 320{ 321 return false; 322} 323 324static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 325{ 326 return false; 327} 328 329static inline bool update_defer_init(pg_data_t *pgdat, 330 unsigned long pfn, unsigned long zone_end, 331 unsigned long *nr_initialised) 332{ 333 return true; 334} 335#endif 336 337 338void set_pageblock_migratetype(struct page *page, int migratetype) 339{ 340 if (unlikely(page_group_by_mobility_disabled && 341 migratetype < MIGRATE_PCPTYPES)) 342 migratetype = MIGRATE_UNMOVABLE; 343 344 set_pageblock_flags_group(page, (unsigned long)migratetype, 345 PB_migrate, PB_migrate_end); 346} 347 348#ifdef CONFIG_DEBUG_VM 349static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 350{ 351 int ret = 0; 352 unsigned seq; 353 unsigned long pfn = page_to_pfn(page); 354 unsigned long sp, start_pfn; 355 356 do { 357 seq = zone_span_seqbegin(zone); 358 start_pfn = zone->zone_start_pfn; 359 sp = zone->spanned_pages; 360 if (!zone_spans_pfn(zone, pfn)) 361 ret = 1; 362 } while (zone_span_seqretry(zone, seq)); 363 364 if (ret) 365 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 366 pfn, zone_to_nid(zone), zone->name, 367 start_pfn, start_pfn + sp); 368 369 return ret; 370} 371 372static int page_is_consistent(struct zone *zone, struct page *page) 373{ 374 if (!pfn_valid_within(page_to_pfn(page))) 375 return 0; 376 if (zone != page_zone(page)) 377 return 0; 378 379 return 1; 380} 381/* 382 * Temporary debugging check for pages not lying within a given zone. 383 */ 384static int bad_range(struct zone *zone, struct page *page) 385{ 386 if (page_outside_zone_boundaries(zone, page)) 387 return 1; 388 if (!page_is_consistent(zone, page)) 389 return 1; 390 391 return 0; 392} 393#else 394static inline int bad_range(struct zone *zone, struct page *page) 395{ 396 return 0; 397} 398#endif 399 400static void bad_page(struct page *page, const char *reason, 401 unsigned long bad_flags) 402{ 403 static unsigned long resume; 404 static unsigned long nr_shown; 405 static unsigned long nr_unshown; 406 407 /* Don't complain about poisoned pages */ 408 if (PageHWPoison(page)) { 409 page_mapcount_reset(page); /* remove PageBuddy */ 410 return; 411 } 412 413 /* 414 * Allow a burst of 60 reports, then keep quiet for that minute; 415 * or allow a steady drip of one report per second. 416 */ 417 if (nr_shown == 60) { 418 if (time_before(jiffies, resume)) { 419 nr_unshown++; 420 goto out; 421 } 422 if (nr_unshown) { 423 printk(KERN_ALERT 424 "BUG: Bad page state: %lu messages suppressed\n", 425 nr_unshown); 426 nr_unshown = 0; 427 } 428 nr_shown = 0; 429 } 430 if (nr_shown++ == 0) 431 resume = jiffies + 60 * HZ; 432 433 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 434 current->comm, page_to_pfn(page)); 435 dump_page_badflags(page, reason, bad_flags); 436 437 print_modules(); 438 dump_stack(); 439out: 440 /* Leave bad fields for debug, except PageBuddy could make trouble */ 441 page_mapcount_reset(page); /* remove PageBuddy */ 442 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 443} 444 445/* 446 * Higher-order pages are called "compound pages". They are structured thusly: 447 * 448 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 449 * 450 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 451 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 452 * 453 * The first tail page's ->compound_dtor holds the offset in array of compound 454 * page destructors. See compound_page_dtors. 455 * 456 * The first tail page's ->compound_order holds the order of allocation. 457 * This usage means that zero-order pages may not be compound. 458 */ 459 460static void free_compound_page(struct page *page) 461{ 462 __free_pages_ok(page, compound_order(page)); 463} 464 465void prep_compound_page(struct page *page, unsigned int order) 466{ 467 int i; 468 int nr_pages = 1 << order; 469 470 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 471 set_compound_order(page, order); 472 __SetPageHead(page); 473 for (i = 1; i < nr_pages; i++) { 474 struct page *p = page + i; 475 set_page_count(p, 0); 476 set_compound_head(p, page); 477 } 478} 479 480#ifdef CONFIG_DEBUG_PAGEALLOC 481unsigned int _debug_guardpage_minorder; 482bool _debug_pagealloc_enabled __read_mostly; 483bool _debug_guardpage_enabled __read_mostly; 484 485static int __init early_debug_pagealloc(char *buf) 486{ 487 if (!buf) 488 return -EINVAL; 489 490 if (strcmp(buf, "on") == 0) 491 _debug_pagealloc_enabled = true; 492 493 return 0; 494} 495early_param("debug_pagealloc", early_debug_pagealloc); 496 497static bool need_debug_guardpage(void) 498{ 499 /* If we don't use debug_pagealloc, we don't need guard page */ 500 if (!debug_pagealloc_enabled()) 501 return false; 502 503 return true; 504} 505 506static void init_debug_guardpage(void) 507{ 508 if (!debug_pagealloc_enabled()) 509 return; 510 511 _debug_guardpage_enabled = true; 512} 513 514struct page_ext_operations debug_guardpage_ops = { 515 .need = need_debug_guardpage, 516 .init = init_debug_guardpage, 517}; 518 519static int __init debug_guardpage_minorder_setup(char *buf) 520{ 521 unsigned long res; 522 523 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 524 printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); 525 return 0; 526 } 527 _debug_guardpage_minorder = res; 528 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); 529 return 0; 530} 531__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); 532 533static inline void set_page_guard(struct zone *zone, struct page *page, 534 unsigned int order, int migratetype) 535{ 536 struct page_ext *page_ext; 537 538 if (!debug_guardpage_enabled()) 539 return; 540 541 page_ext = lookup_page_ext(page); 542 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 543 544 INIT_LIST_HEAD(&page->lru); 545 set_page_private(page, order); 546 /* Guard pages are not available for any usage */ 547 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 548} 549 550static inline void clear_page_guard(struct zone *zone, struct page *page, 551 unsigned int order, int migratetype) 552{ 553 struct page_ext *page_ext; 554 555 if (!debug_guardpage_enabled()) 556 return; 557 558 page_ext = lookup_page_ext(page); 559 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 560 561 set_page_private(page, 0); 562 if (!is_migrate_isolate(migratetype)) 563 __mod_zone_freepage_state(zone, (1 << order), migratetype); 564} 565#else 566struct page_ext_operations debug_guardpage_ops = { NULL, }; 567static inline void set_page_guard(struct zone *zone, struct page *page, 568 unsigned int order, int migratetype) {} 569static inline void clear_page_guard(struct zone *zone, struct page *page, 570 unsigned int order, int migratetype) {} 571#endif 572 573static inline void set_page_order(struct page *page, unsigned int order) 574{ 575 set_page_private(page, order); 576 __SetPageBuddy(page); 577} 578 579static inline void rmv_page_order(struct page *page) 580{ 581 __ClearPageBuddy(page); 582 set_page_private(page, 0); 583} 584 585/* 586 * This function checks whether a page is free && is the buddy 587 * we can do coalesce a page and its buddy if 588 * (a) the buddy is not in a hole && 589 * (b) the buddy is in the buddy system && 590 * (c) a page and its buddy have the same order && 591 * (d) a page and its buddy are in the same zone. 592 * 593 * For recording whether a page is in the buddy system, we set ->_mapcount 594 * PAGE_BUDDY_MAPCOUNT_VALUE. 595 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 596 * serialized by zone->lock. 597 * 598 * For recording page's order, we use page_private(page). 599 */ 600static inline int page_is_buddy(struct page *page, struct page *buddy, 601 unsigned int order) 602{ 603 if (!pfn_valid_within(page_to_pfn(buddy))) 604 return 0; 605 606 if (page_is_guard(buddy) && page_order(buddy) == order) { 607 if (page_zone_id(page) != page_zone_id(buddy)) 608 return 0; 609 610 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 611 612 return 1; 613 } 614 615 if (PageBuddy(buddy) && page_order(buddy) == order) { 616 /* 617 * zone check is done late to avoid uselessly 618 * calculating zone/node ids for pages that could 619 * never merge. 620 */ 621 if (page_zone_id(page) != page_zone_id(buddy)) 622 return 0; 623 624 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 625 626 return 1; 627 } 628 return 0; 629} 630 631/* 632 * Freeing function for a buddy system allocator. 633 * 634 * The concept of a buddy system is to maintain direct-mapped table 635 * (containing bit values) for memory blocks of various "orders". 636 * The bottom level table contains the map for the smallest allocatable 637 * units of memory (here, pages), and each level above it describes 638 * pairs of units from the levels below, hence, "buddies". 639 * At a high level, all that happens here is marking the table entry 640 * at the bottom level available, and propagating the changes upward 641 * as necessary, plus some accounting needed to play nicely with other 642 * parts of the VM system. 643 * At each level, we keep a list of pages, which are heads of continuous 644 * free pages of length of (1 << order) and marked with _mapcount 645 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 646 * field. 647 * So when we are allocating or freeing one, we can derive the state of the 648 * other. That is, if we allocate a small block, and both were 649 * free, the remainder of the region must be split into blocks. 650 * If a block is freed, and its buddy is also free, then this 651 * triggers coalescing into a block of larger size. 652 * 653 * -- nyc 654 */ 655 656static inline void __free_one_page(struct page *page, 657 unsigned long pfn, 658 struct zone *zone, unsigned int order, 659 int migratetype) 660{ 661 unsigned long page_idx; 662 unsigned long combined_idx; 663 unsigned long uninitialized_var(buddy_idx); 664 struct page *buddy; 665 unsigned int max_order = MAX_ORDER; 666 667 VM_BUG_ON(!zone_is_initialized(zone)); 668 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 669 670 VM_BUG_ON(migratetype == -1); 671 if (is_migrate_isolate(migratetype)) { 672 /* 673 * We restrict max order of merging to prevent merge 674 * between freepages on isolate pageblock and normal 675 * pageblock. Without this, pageblock isolation 676 * could cause incorrect freepage accounting. 677 */ 678 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 679 } else { 680 __mod_zone_freepage_state(zone, 1 << order, migratetype); 681 } 682 683 page_idx = pfn & ((1 << max_order) - 1); 684 685 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 686 VM_BUG_ON_PAGE(bad_range(zone, page), page); 687 688 while (order < max_order - 1) { 689 buddy_idx = __find_buddy_index(page_idx, order); 690 buddy = page + (buddy_idx - page_idx); 691 if (!page_is_buddy(page, buddy, order)) 692 break; 693 /* 694 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 695 * merge with it and move up one order. 696 */ 697 if (page_is_guard(buddy)) { 698 clear_page_guard(zone, buddy, order, migratetype); 699 } else { 700 list_del(&buddy->lru); 701 zone->free_area[order].nr_free--; 702 rmv_page_order(buddy); 703 } 704 combined_idx = buddy_idx & page_idx; 705 page = page + (combined_idx - page_idx); 706 page_idx = combined_idx; 707 order++; 708 } 709 set_page_order(page, order); 710 711 /* 712 * If this is not the largest possible page, check if the buddy 713 * of the next-highest order is free. If it is, it's possible 714 * that pages are being freed that will coalesce soon. In case, 715 * that is happening, add the free page to the tail of the list 716 * so it's less likely to be used soon and more likely to be merged 717 * as a higher order page 718 */ 719 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 720 struct page *higher_page, *higher_buddy; 721 combined_idx = buddy_idx & page_idx; 722 higher_page = page + (combined_idx - page_idx); 723 buddy_idx = __find_buddy_index(combined_idx, order + 1); 724 higher_buddy = higher_page + (buddy_idx - combined_idx); 725 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 726 list_add_tail(&page->lru, 727 &zone->free_area[order].free_list[migratetype]); 728 goto out; 729 } 730 } 731 732 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 733out: 734 zone->free_area[order].nr_free++; 735} 736 737static inline int free_pages_check(struct page *page) 738{ 739 const char *bad_reason = NULL; 740 unsigned long bad_flags = 0; 741 742 if (unlikely(page_mapcount(page))) 743 bad_reason = "nonzero mapcount"; 744 if (unlikely(page->mapping != NULL)) 745 bad_reason = "non-NULL mapping"; 746 if (unlikely(atomic_read(&page->_count) != 0)) 747 bad_reason = "nonzero _count"; 748 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 749 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 750 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 751 } 752#ifdef CONFIG_MEMCG 753 if (unlikely(page->mem_cgroup)) 754 bad_reason = "page still charged to cgroup"; 755#endif 756 if (unlikely(bad_reason)) { 757 bad_page(page, bad_reason, bad_flags); 758 return 1; 759 } 760 page_cpupid_reset_last(page); 761 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 762 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 763 return 0; 764} 765 766/* 767 * Frees a number of pages from the PCP lists 768 * Assumes all pages on list are in same zone, and of same order. 769 * count is the number of pages to free. 770 * 771 * If the zone was previously in an "all pages pinned" state then look to 772 * see if this freeing clears that state. 773 * 774 * And clear the zone's pages_scanned counter, to hold off the "all pages are 775 * pinned" detection logic. 776 */ 777static void free_pcppages_bulk(struct zone *zone, int count, 778 struct per_cpu_pages *pcp) 779{ 780 int migratetype = 0; 781 int batch_free = 0; 782 int to_free = count; 783 unsigned long nr_scanned; 784 785 spin_lock(&zone->lock); 786 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 787 if (nr_scanned) 788 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 789 790 while (to_free) { 791 struct page *page; 792 struct list_head *list; 793 794 /* 795 * Remove pages from lists in a round-robin fashion. A 796 * batch_free count is maintained that is incremented when an 797 * empty list is encountered. This is so more pages are freed 798 * off fuller lists instead of spinning excessively around empty 799 * lists 800 */ 801 do { 802 batch_free++; 803 if (++migratetype == MIGRATE_PCPTYPES) 804 migratetype = 0; 805 list = &pcp->lists[migratetype]; 806 } while (list_empty(list)); 807 808 /* This is the only non-empty list. Free them all. */ 809 if (batch_free == MIGRATE_PCPTYPES) 810 batch_free = to_free; 811 812 do { 813 int mt; /* migratetype of the to-be-freed page */ 814 815 page = list_entry(list->prev, struct page, lru); 816 /* must delete as __free_one_page list manipulates */ 817 list_del(&page->lru); 818 819 mt = get_pcppage_migratetype(page); 820 /* MIGRATE_ISOLATE page should not go to pcplists */ 821 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 822 /* Pageblock could have been isolated meanwhile */ 823 if (unlikely(has_isolate_pageblock(zone))) 824 mt = get_pageblock_migratetype(page); 825 826 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 827 trace_mm_page_pcpu_drain(page, 0, mt); 828 } while (--to_free && --batch_free && !list_empty(list)); 829 } 830 spin_unlock(&zone->lock); 831} 832 833static void free_one_page(struct zone *zone, 834 struct page *page, unsigned long pfn, 835 unsigned int order, 836 int migratetype) 837{ 838 unsigned long nr_scanned; 839 spin_lock(&zone->lock); 840 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 841 if (nr_scanned) 842 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 843 844 if (unlikely(has_isolate_pageblock(zone) || 845 is_migrate_isolate(migratetype))) { 846 migratetype = get_pfnblock_migratetype(page, pfn); 847 } 848 __free_one_page(page, pfn, zone, order, migratetype); 849 spin_unlock(&zone->lock); 850} 851 852static int free_tail_pages_check(struct page *head_page, struct page *page) 853{ 854 int ret = 1; 855 856 /* 857 * We rely page->lru.next never has bit 0 set, unless the page 858 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 859 */ 860 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 861 862 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 863 ret = 0; 864 goto out; 865 } 866 if (unlikely(!PageTail(page))) { 867 bad_page(page, "PageTail not set", 0); 868 goto out; 869 } 870 if (unlikely(compound_head(page) != head_page)) { 871 bad_page(page, "compound_head not consistent", 0); 872 goto out; 873 } 874 ret = 0; 875out: 876 clear_compound_head(page); 877 return ret; 878} 879 880static void __meminit __init_single_page(struct page *page, unsigned long pfn, 881 unsigned long zone, int nid) 882{ 883 set_page_links(page, zone, nid, pfn); 884 init_page_count(page); 885 page_mapcount_reset(page); 886 page_cpupid_reset_last(page); 887 888 INIT_LIST_HEAD(&page->lru); 889#ifdef WANT_PAGE_VIRTUAL 890 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 891 if (!is_highmem_idx(zone)) 892 set_page_address(page, __va(pfn << PAGE_SHIFT)); 893#endif 894} 895 896static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 897 int nid) 898{ 899 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 900} 901 902#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 903static void init_reserved_page(unsigned long pfn) 904{ 905 pg_data_t *pgdat; 906 int nid, zid; 907 908 if (!early_page_uninitialised(pfn)) 909 return; 910 911 nid = early_pfn_to_nid(pfn); 912 pgdat = NODE_DATA(nid); 913 914 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 915 struct zone *zone = &pgdat->node_zones[zid]; 916 917 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 918 break; 919 } 920 __init_single_pfn(pfn, zid, nid); 921} 922#else 923static inline void init_reserved_page(unsigned long pfn) 924{ 925} 926#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 927 928/* 929 * Initialised pages do not have PageReserved set. This function is 930 * called for each range allocated by the bootmem allocator and 931 * marks the pages PageReserved. The remaining valid pages are later 932 * sent to the buddy page allocator. 933 */ 934void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) 935{ 936 unsigned long start_pfn = PFN_DOWN(start); 937 unsigned long end_pfn = PFN_UP(end); 938 939 for (; start_pfn < end_pfn; start_pfn++) { 940 if (pfn_valid(start_pfn)) { 941 struct page *page = pfn_to_page(start_pfn); 942 943 init_reserved_page(start_pfn); 944 945 /* Avoid false-positive PageTail() */ 946 INIT_LIST_HEAD(&page->lru); 947 948 SetPageReserved(page); 949 } 950 } 951} 952 953static bool free_pages_prepare(struct page *page, unsigned int order) 954{ 955 bool compound = PageCompound(page); 956 int i, bad = 0; 957 958 VM_BUG_ON_PAGE(PageTail(page), page); 959 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 960 961 trace_mm_page_free(page, order); 962 kmemcheck_free_shadow(page, order); 963 kasan_free_pages(page, order); 964 965 if (PageAnon(page)) 966 page->mapping = NULL; 967 bad += free_pages_check(page); 968 for (i = 1; i < (1 << order); i++) { 969 if (compound) 970 bad += free_tail_pages_check(page, page + i); 971 bad += free_pages_check(page + i); 972 } 973 if (bad) 974 return false; 975 976 reset_page_owner(page, order); 977 978 if (!PageHighMem(page)) { 979 debug_check_no_locks_freed(page_address(page), 980 PAGE_SIZE << order); 981 debug_check_no_obj_freed(page_address(page), 982 PAGE_SIZE << order); 983 } 984 arch_free_page(page, order); 985 kernel_map_pages(page, 1 << order, 0); 986 987 return true; 988} 989 990static void __free_pages_ok(struct page *page, unsigned int order) 991{ 992 unsigned long flags; 993 int migratetype; 994 unsigned long pfn = page_to_pfn(page); 995 996 if (!free_pages_prepare(page, order)) 997 return; 998 999 migratetype = get_pfnblock_migratetype(page, pfn); 1000 local_irq_save(flags); 1001 __count_vm_events(PGFREE, 1 << order); 1002 free_one_page(page_zone(page), page, pfn, order, migratetype); 1003 local_irq_restore(flags); 1004} 1005 1006static void __init __free_pages_boot_core(struct page *page, 1007 unsigned long pfn, unsigned int order) 1008{ 1009 unsigned int nr_pages = 1 << order; 1010 struct page *p = page; 1011 unsigned int loop; 1012 1013 prefetchw(p); 1014 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1015 prefetchw(p + 1); 1016 __ClearPageReserved(p); 1017 set_page_count(p, 0); 1018 } 1019 __ClearPageReserved(p); 1020 set_page_count(p, 0); 1021 1022 page_zone(page)->managed_pages += nr_pages; 1023 set_page_refcounted(page); 1024 __free_pages(page, order); 1025} 1026 1027#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 1028 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1029 1030static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1031 1032int __meminit early_pfn_to_nid(unsigned long pfn) 1033{ 1034 static DEFINE_SPINLOCK(early_pfn_lock); 1035 int nid; 1036 1037 spin_lock(&early_pfn_lock); 1038 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1039 if (nid < 0) 1040 nid = 0; 1041 spin_unlock(&early_pfn_lock); 1042 1043 return nid; 1044} 1045#endif 1046 1047#ifdef CONFIG_NODES_SPAN_OTHER_NODES 1048static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1049 struct mminit_pfnnid_cache *state) 1050{ 1051 int nid; 1052 1053 nid = __early_pfn_to_nid(pfn, state); 1054 if (nid >= 0 && nid != node) 1055 return false; 1056 return true; 1057} 1058 1059/* Only safe to use early in boot when initialisation is single-threaded */ 1060static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1061{ 1062 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); 1063} 1064 1065#else 1066 1067static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1068{ 1069 return true; 1070} 1071static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1072 struct mminit_pfnnid_cache *state) 1073{ 1074 return true; 1075} 1076#endif 1077 1078 1079void __init __free_pages_bootmem(struct page *page, unsigned long pfn, 1080 unsigned int order) 1081{ 1082 if (early_page_uninitialised(pfn)) 1083 return; 1084 return __free_pages_boot_core(page, pfn, order); 1085} 1086 1087#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1088static void __init deferred_free_range(struct page *page, 1089 unsigned long pfn, int nr_pages) 1090{ 1091 int i; 1092 1093 if (!page) 1094 return; 1095 1096 /* Free a large naturally-aligned chunk if possible */ 1097 if (nr_pages == MAX_ORDER_NR_PAGES && 1098 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { 1099 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1100 __free_pages_boot_core(page, pfn, MAX_ORDER-1); 1101 return; 1102 } 1103 1104 for (i = 0; i < nr_pages; i++, page++, pfn++) 1105 __free_pages_boot_core(page, pfn, 0); 1106} 1107 1108/* Completion tracking for deferred_init_memmap() threads */ 1109static atomic_t pgdat_init_n_undone __initdata; 1110static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1111 1112static inline void __init pgdat_init_report_one_done(void) 1113{ 1114 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1115 complete(&pgdat_init_all_done_comp); 1116} 1117 1118/* Initialise remaining memory on a node */ 1119static int __init deferred_init_memmap(void *data) 1120{ 1121 pg_data_t *pgdat = data; 1122 int nid = pgdat->node_id; 1123 struct mminit_pfnnid_cache nid_init_state = { }; 1124 unsigned long start = jiffies; 1125 unsigned long nr_pages = 0; 1126 unsigned long walk_start, walk_end; 1127 int i, zid; 1128 struct zone *zone; 1129 unsigned long first_init_pfn = pgdat->first_deferred_pfn; 1130 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1131 1132 if (first_init_pfn == ULONG_MAX) { 1133 pgdat_init_report_one_done(); 1134 return 0; 1135 } 1136 1137 /* Bind memory initialisation thread to a local node if possible */ 1138 if (!cpumask_empty(cpumask)) 1139 set_cpus_allowed_ptr(current, cpumask); 1140 1141 /* Sanity check boundaries */ 1142 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 1143 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 1144 pgdat->first_deferred_pfn = ULONG_MAX; 1145 1146 /* Only the highest zone is deferred so find it */ 1147 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1148 zone = pgdat->node_zones + zid; 1149 if (first_init_pfn < zone_end_pfn(zone)) 1150 break; 1151 } 1152 1153 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 1154 unsigned long pfn, end_pfn; 1155 struct page *page = NULL; 1156 struct page *free_base_page = NULL; 1157 unsigned long free_base_pfn = 0; 1158 int nr_to_free = 0; 1159 1160 end_pfn = min(walk_end, zone_end_pfn(zone)); 1161 pfn = first_init_pfn; 1162 if (pfn < walk_start) 1163 pfn = walk_start; 1164 if (pfn < zone->zone_start_pfn) 1165 pfn = zone->zone_start_pfn; 1166 1167 for (; pfn < end_pfn; pfn++) { 1168 if (!pfn_valid_within(pfn)) 1169 goto free_range; 1170 1171 /* 1172 * Ensure pfn_valid is checked every 1173 * MAX_ORDER_NR_PAGES for memory holes 1174 */ 1175 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 1176 if (!pfn_valid(pfn)) { 1177 page = NULL; 1178 goto free_range; 1179 } 1180 } 1181 1182 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { 1183 page = NULL; 1184 goto free_range; 1185 } 1186 1187 /* Minimise pfn page lookups and scheduler checks */ 1188 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { 1189 page++; 1190 } else { 1191 nr_pages += nr_to_free; 1192 deferred_free_range(free_base_page, 1193 free_base_pfn, nr_to_free); 1194 free_base_page = NULL; 1195 free_base_pfn = nr_to_free = 0; 1196 1197 page = pfn_to_page(pfn); 1198 cond_resched(); 1199 } 1200 1201 if (page->flags) { 1202 VM_BUG_ON(page_zone(page) != zone); 1203 goto free_range; 1204 } 1205 1206 __init_single_page(page, pfn, zid, nid); 1207 if (!free_base_page) { 1208 free_base_page = page; 1209 free_base_pfn = pfn; 1210 nr_to_free = 0; 1211 } 1212 nr_to_free++; 1213 1214 /* Where possible, batch up pages for a single free */ 1215 continue; 1216free_range: 1217 /* Free the current block of pages to allocator */ 1218 nr_pages += nr_to_free; 1219 deferred_free_range(free_base_page, free_base_pfn, 1220 nr_to_free); 1221 free_base_page = NULL; 1222 free_base_pfn = nr_to_free = 0; 1223 } 1224 1225 first_init_pfn = max(end_pfn, first_init_pfn); 1226 } 1227 1228 /* Sanity check that the next zone really is unpopulated */ 1229 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1230 1231 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1232 jiffies_to_msecs(jiffies - start)); 1233 1234 pgdat_init_report_one_done(); 1235 return 0; 1236} 1237 1238void __init page_alloc_init_late(void) 1239{ 1240 int nid; 1241 1242 /* There will be num_node_state(N_MEMORY) threads */ 1243 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 1244 for_each_node_state(nid, N_MEMORY) { 1245 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1246 } 1247 1248 /* Block until all are initialised */ 1249 wait_for_completion(&pgdat_init_all_done_comp); 1250 1251 /* Reinit limits that are based on free pages after the kernel is up */ 1252 files_maxfiles_init(); 1253} 1254#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1255 1256#ifdef CONFIG_CMA 1257/* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1258void __init init_cma_reserved_pageblock(struct page *page) 1259{ 1260 unsigned i = pageblock_nr_pages; 1261 struct page *p = page; 1262 1263 do { 1264 __ClearPageReserved(p); 1265 set_page_count(p, 0); 1266 } while (++p, --i); 1267 1268 set_pageblock_migratetype(page, MIGRATE_CMA); 1269 1270 if (pageblock_order >= MAX_ORDER) { 1271 i = pageblock_nr_pages; 1272 p = page; 1273 do { 1274 set_page_refcounted(p); 1275 __free_pages(p, MAX_ORDER - 1); 1276 p += MAX_ORDER_NR_PAGES; 1277 } while (i -= MAX_ORDER_NR_PAGES); 1278 } else { 1279 set_page_refcounted(page); 1280 __free_pages(page, pageblock_order); 1281 } 1282 1283 adjust_managed_page_count(page, pageblock_nr_pages); 1284} 1285#endif 1286 1287/* 1288 * The order of subdivision here is critical for the IO subsystem. 1289 * Please do not alter this order without good reasons and regression 1290 * testing. Specifically, as large blocks of memory are subdivided, 1291 * the order in which smaller blocks are delivered depends on the order 1292 * they're subdivided in this function. This is the primary factor 1293 * influencing the order in which pages are delivered to the IO 1294 * subsystem according to empirical testing, and this is also justified 1295 * by considering the behavior of a buddy system containing a single 1296 * large block of memory acted on by a series of small allocations. 1297 * This behavior is a critical factor in sglist merging's success. 1298 * 1299 * -- nyc 1300 */ 1301static inline void expand(struct zone *zone, struct page *page, 1302 int low, int high, struct free_area *area, 1303 int migratetype) 1304{ 1305 unsigned long size = 1 << high; 1306 1307 while (high > low) { 1308 area--; 1309 high--; 1310 size >>= 1; 1311 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1312 1313 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 1314 debug_guardpage_enabled() && 1315 high < debug_guardpage_minorder()) { 1316 /* 1317 * Mark as guard pages (or page), that will allow to 1318 * merge back to allocator when buddy will be freed. 1319 * Corresponding page table entries will not be touched, 1320 * pages will stay not present in virtual address space 1321 */ 1322 set_page_guard(zone, &page[size], high, migratetype); 1323 continue; 1324 } 1325 list_add(&page[size].lru, &area->free_list[migratetype]); 1326 area->nr_free++; 1327 set_page_order(&page[size], high); 1328 } 1329} 1330 1331/* 1332 * This page is about to be returned from the page allocator 1333 */ 1334static inline int check_new_page(struct page *page) 1335{ 1336 const char *bad_reason = NULL; 1337 unsigned long bad_flags = 0; 1338 1339 if (unlikely(page_mapcount(page))) 1340 bad_reason = "nonzero mapcount"; 1341 if (unlikely(page->mapping != NULL)) 1342 bad_reason = "non-NULL mapping"; 1343 if (unlikely(atomic_read(&page->_count) != 0)) 1344 bad_reason = "nonzero _count"; 1345 if (unlikely(page->flags & __PG_HWPOISON)) { 1346 bad_reason = "HWPoisoned (hardware-corrupted)"; 1347 bad_flags = __PG_HWPOISON; 1348 } 1349 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1350 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1351 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1352 } 1353#ifdef CONFIG_MEMCG 1354 if (unlikely(page->mem_cgroup)) 1355 bad_reason = "page still charged to cgroup"; 1356#endif 1357 if (unlikely(bad_reason)) { 1358 bad_page(page, bad_reason, bad_flags); 1359 return 1; 1360 } 1361 return 0; 1362} 1363 1364static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1365 int alloc_flags) 1366{ 1367 int i; 1368 1369 for (i = 0; i < (1 << order); i++) { 1370 struct page *p = page + i; 1371 if (unlikely(check_new_page(p))) 1372 return 1; 1373 } 1374 1375 set_page_private(page, 0); 1376 set_page_refcounted(page); 1377 1378 arch_alloc_page(page, order); 1379 kernel_map_pages(page, 1 << order, 1); 1380 kasan_alloc_pages(page, order); 1381 1382 if (gfp_flags & __GFP_ZERO) 1383 for (i = 0; i < (1 << order); i++) 1384 clear_highpage(page + i); 1385 1386 if (order && (gfp_flags & __GFP_COMP)) 1387 prep_compound_page(page, order); 1388 1389 set_page_owner(page, order, gfp_flags); 1390 1391 /* 1392 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1393 * allocate the page. The expectation is that the caller is taking 1394 * steps that will free more memory. The caller should avoid the page 1395 * being used for !PFMEMALLOC purposes. 1396 */ 1397 if (alloc_flags & ALLOC_NO_WATERMARKS) 1398 set_page_pfmemalloc(page); 1399 else 1400 clear_page_pfmemalloc(page); 1401 1402 return 0; 1403} 1404 1405/* 1406 * Go through the free lists for the given migratetype and remove 1407 * the smallest available page from the freelists 1408 */ 1409static inline 1410struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1411 int migratetype) 1412{ 1413 unsigned int current_order; 1414 struct free_area *area; 1415 struct page *page; 1416 1417 /* Find a page of the appropriate size in the preferred list */ 1418 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 1419 area = &(zone->free_area[current_order]); 1420 if (list_empty(&area->free_list[migratetype])) 1421 continue; 1422 1423 page = list_entry(area->free_list[migratetype].next, 1424 struct page, lru); 1425 list_del(&page->lru); 1426 rmv_page_order(page); 1427 area->nr_free--; 1428 expand(zone, page, order, current_order, area, migratetype); 1429 set_pcppage_migratetype(page, migratetype); 1430 return page; 1431 } 1432 1433 return NULL; 1434} 1435 1436 1437/* 1438 * This array describes the order lists are fallen back to when 1439 * the free lists for the desirable migrate type are depleted 1440 */ 1441static int fallbacks[MIGRATE_TYPES][4] = { 1442 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1443 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1444 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 1445#ifdef CONFIG_CMA 1446 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 1447#endif 1448#ifdef CONFIG_MEMORY_ISOLATION 1449 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 1450#endif 1451}; 1452 1453#ifdef CONFIG_CMA 1454static struct page *__rmqueue_cma_fallback(struct zone *zone, 1455 unsigned int order) 1456{ 1457 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1458} 1459#else 1460static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1461 unsigned int order) { return NULL; } 1462#endif 1463 1464/* 1465 * Move the free pages in a range to the free lists of the requested type. 1466 * Note that start_page and end_pages are not aligned on a pageblock 1467 * boundary. If alignment is required, use move_freepages_block() 1468 */ 1469int move_freepages(struct zone *zone, 1470 struct page *start_page, struct page *end_page, 1471 int migratetype) 1472{ 1473 struct page *page; 1474 unsigned int order; 1475 int pages_moved = 0; 1476 1477#ifndef CONFIG_HOLES_IN_ZONE 1478 /* 1479 * page_zone is not safe to call in this context when 1480 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 1481 * anyway as we check zone boundaries in move_freepages_block(). 1482 * Remove at a later date when no bug reports exist related to 1483 * grouping pages by mobility 1484 */ 1485 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1486#endif 1487 1488 for (page = start_page; page <= end_page;) { 1489 /* Make sure we are not inadvertently changing nodes */ 1490 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1491 1492 if (!pfn_valid_within(page_to_pfn(page))) { 1493 page++; 1494 continue; 1495 } 1496 1497 if (!PageBuddy(page)) { 1498 page++; 1499 continue; 1500 } 1501 1502 order = page_order(page); 1503 list_move(&page->lru, 1504 &zone->free_area[order].free_list[migratetype]); 1505 page += 1 << order; 1506 pages_moved += 1 << order; 1507 } 1508 1509 return pages_moved; 1510} 1511 1512int move_freepages_block(struct zone *zone, struct page *page, 1513 int migratetype) 1514{ 1515 unsigned long start_pfn, end_pfn; 1516 struct page *start_page, *end_page; 1517 1518 start_pfn = page_to_pfn(page); 1519 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1520 start_page = pfn_to_page(start_pfn); 1521 end_page = start_page + pageblock_nr_pages - 1; 1522 end_pfn = start_pfn + pageblock_nr_pages - 1; 1523 1524 /* Do not cross zone boundaries */ 1525 if (!zone_spans_pfn(zone, start_pfn)) 1526 start_page = page; 1527 if (!zone_spans_pfn(zone, end_pfn)) 1528 return 0; 1529 1530 return move_freepages(zone, start_page, end_page, migratetype); 1531} 1532 1533static void change_pageblock_range(struct page *pageblock_page, 1534 int start_order, int migratetype) 1535{ 1536 int nr_pageblocks = 1 << (start_order - pageblock_order); 1537 1538 while (nr_pageblocks--) { 1539 set_pageblock_migratetype(pageblock_page, migratetype); 1540 pageblock_page += pageblock_nr_pages; 1541 } 1542} 1543 1544/* 1545 * When we are falling back to another migratetype during allocation, try to 1546 * steal extra free pages from the same pageblocks to satisfy further 1547 * allocations, instead of polluting multiple pageblocks. 1548 * 1549 * If we are stealing a relatively large buddy page, it is likely there will 1550 * be more free pages in the pageblock, so try to steal them all. For 1551 * reclaimable and unmovable allocations, we steal regardless of page size, 1552 * as fragmentation caused by those allocations polluting movable pageblocks 1553 * is worse than movable allocations stealing from unmovable and reclaimable 1554 * pageblocks. 1555 */ 1556static bool can_steal_fallback(unsigned int order, int start_mt) 1557{ 1558 /* 1559 * Leaving this order check is intended, although there is 1560 * relaxed order check in next check. The reason is that 1561 * we can actually steal whole pageblock if this condition met, 1562 * but, below check doesn't guarantee it and that is just heuristic 1563 * so could be changed anytime. 1564 */ 1565 if (order >= pageblock_order) 1566 return true; 1567 1568 if (order >= pageblock_order / 2 || 1569 start_mt == MIGRATE_RECLAIMABLE || 1570 start_mt == MIGRATE_UNMOVABLE || 1571 page_group_by_mobility_disabled) 1572 return true; 1573 1574 return false; 1575} 1576 1577/* 1578 * This function implements actual steal behaviour. If order is large enough, 1579 * we can steal whole pageblock. If not, we first move freepages in this 1580 * pageblock and check whether half of pages are moved or not. If half of 1581 * pages are moved, we can change migratetype of pageblock and permanently 1582 * use it's pages as requested migratetype in the future. 1583 */ 1584static void steal_suitable_fallback(struct zone *zone, struct page *page, 1585 int start_type) 1586{ 1587 unsigned int current_order = page_order(page); 1588 int pages; 1589 1590 /* Take ownership for orders >= pageblock_order */ 1591 if (current_order >= pageblock_order) { 1592 change_pageblock_range(page, current_order, start_type); 1593 return; 1594 } 1595 1596 pages = move_freepages_block(zone, page, start_type); 1597 1598 /* Claim the whole block if over half of it is free */ 1599 if (pages >= (1 << (pageblock_order-1)) || 1600 page_group_by_mobility_disabled) 1601 set_pageblock_migratetype(page, start_type); 1602} 1603 1604/* 1605 * Check whether there is a suitable fallback freepage with requested order. 1606 * If only_stealable is true, this function returns fallback_mt only if 1607 * we can steal other freepages all together. This would help to reduce 1608 * fragmentation due to mixed migratetype pages in one pageblock. 1609 */ 1610int find_suitable_fallback(struct free_area *area, unsigned int order, 1611 int migratetype, bool only_stealable, bool *can_steal) 1612{ 1613 int i; 1614 int fallback_mt; 1615 1616 if (area->nr_free == 0) 1617 return -1; 1618 1619 *can_steal = false; 1620 for (i = 0;; i++) { 1621 fallback_mt = fallbacks[migratetype][i]; 1622 if (fallback_mt == MIGRATE_TYPES) 1623 break; 1624 1625 if (list_empty(&area->free_list[fallback_mt])) 1626 continue; 1627 1628 if (can_steal_fallback(order, migratetype)) 1629 *can_steal = true; 1630 1631 if (!only_stealable) 1632 return fallback_mt; 1633 1634 if (*can_steal) 1635 return fallback_mt; 1636 } 1637 1638 return -1; 1639} 1640 1641/* 1642 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1643 * there are no empty page blocks that contain a page with a suitable order 1644 */ 1645static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 1646 unsigned int alloc_order) 1647{ 1648 int mt; 1649 unsigned long max_managed, flags; 1650 1651 /* 1652 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 1653 * Check is race-prone but harmless. 1654 */ 1655 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; 1656 if (zone->nr_reserved_highatomic >= max_managed) 1657 return; 1658 1659 spin_lock_irqsave(&zone->lock, flags); 1660 1661 /* Recheck the nr_reserved_highatomic limit under the lock */ 1662 if (zone->nr_reserved_highatomic >= max_managed) 1663 goto out_unlock; 1664 1665 /* Yoink! */ 1666 mt = get_pageblock_migratetype(page); 1667 if (mt != MIGRATE_HIGHATOMIC && 1668 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { 1669 zone->nr_reserved_highatomic += pageblock_nr_pages; 1670 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 1671 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 1672 } 1673 1674out_unlock: 1675 spin_unlock_irqrestore(&zone->lock, flags); 1676} 1677 1678/* 1679 * Used when an allocation is about to fail under memory pressure. This 1680 * potentially hurts the reliability of high-order allocations when under 1681 * intense memory pressure but failed atomic allocations should be easier 1682 * to recover from than an OOM. 1683 */ 1684static void unreserve_highatomic_pageblock(const struct alloc_context *ac) 1685{ 1686 struct zonelist *zonelist = ac->zonelist; 1687 unsigned long flags; 1688 struct zoneref *z; 1689 struct zone *zone; 1690 struct page *page; 1691 int order; 1692 1693 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 1694 ac->nodemask) { 1695 /* Preserve at least one pageblock */ 1696 if (zone->nr_reserved_highatomic <= pageblock_nr_pages) 1697 continue; 1698 1699 spin_lock_irqsave(&zone->lock, flags); 1700 for (order = 0; order < MAX_ORDER; order++) { 1701 struct free_area *area = &(zone->free_area[order]); 1702 1703 if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) 1704 continue; 1705 1706 page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next, 1707 struct page, lru); 1708 1709 /* 1710 * It should never happen but changes to locking could 1711 * inadvertently allow a per-cpu drain to add pages 1712 * to MIGRATE_HIGHATOMIC while unreserving so be safe 1713 * and watch for underflows. 1714 */ 1715 zone->nr_reserved_highatomic -= min(pageblock_nr_pages, 1716 zone->nr_reserved_highatomic); 1717 1718 /* 1719 * Convert to ac->migratetype and avoid the normal 1720 * pageblock stealing heuristics. Minimally, the caller 1721 * is doing the work and needs the pages. More 1722 * importantly, if the block was always converted to 1723 * MIGRATE_UNMOVABLE or another type then the number 1724 * of pageblocks that cannot be completely freed 1725 * may increase. 1726 */ 1727 set_pageblock_migratetype(page, ac->migratetype); 1728 move_freepages_block(zone, page, ac->migratetype); 1729 spin_unlock_irqrestore(&zone->lock, flags); 1730 return; 1731 } 1732 spin_unlock_irqrestore(&zone->lock, flags); 1733 } 1734} 1735 1736/* Remove an element from the buddy allocator from the fallback list */ 1737static inline struct page * 1738__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 1739{ 1740 struct free_area *area; 1741 unsigned int current_order; 1742 struct page *page; 1743 int fallback_mt; 1744 bool can_steal; 1745 1746 /* Find the largest possible block of pages in the other list */ 1747 for (current_order = MAX_ORDER-1; 1748 current_order >= order && current_order <= MAX_ORDER-1; 1749 --current_order) { 1750 area = &(zone->free_area[current_order]); 1751 fallback_mt = find_suitable_fallback(area, current_order, 1752 start_migratetype, false, &can_steal); 1753 if (fallback_mt == -1) 1754 continue; 1755 1756 page = list_entry(area->free_list[fallback_mt].next, 1757 struct page, lru); 1758 if (can_steal) 1759 steal_suitable_fallback(zone, page, start_migratetype); 1760 1761 /* Remove the page from the freelists */ 1762 area->nr_free--; 1763 list_del(&page->lru); 1764 rmv_page_order(page); 1765 1766 expand(zone, page, order, current_order, area, 1767 start_migratetype); 1768 /* 1769 * The pcppage_migratetype may differ from pageblock's 1770 * migratetype depending on the decisions in 1771 * find_suitable_fallback(). This is OK as long as it does not 1772 * differ for MIGRATE_CMA pageblocks. Those can be used as 1773 * fallback only via special __rmqueue_cma_fallback() function 1774 */ 1775 set_pcppage_migratetype(page, start_migratetype); 1776 1777 trace_mm_page_alloc_extfrag(page, order, current_order, 1778 start_migratetype, fallback_mt); 1779 1780 return page; 1781 } 1782 1783 return NULL; 1784} 1785 1786/* 1787 * Do the hard work of removing an element from the buddy allocator. 1788 * Call me with the zone->lock already held. 1789 */ 1790static struct page *__rmqueue(struct zone *zone, unsigned int order, 1791 int migratetype, gfp_t gfp_flags) 1792{ 1793 struct page *page; 1794 1795 page = __rmqueue_smallest(zone, order, migratetype); 1796 if (unlikely(!page)) { 1797 if (migratetype == MIGRATE_MOVABLE) 1798 page = __rmqueue_cma_fallback(zone, order); 1799 1800 if (!page) 1801 page = __rmqueue_fallback(zone, order, migratetype); 1802 } 1803 1804 trace_mm_page_alloc_zone_locked(page, order, migratetype); 1805 return page; 1806} 1807 1808/* 1809 * Obtain a specified number of elements from the buddy allocator, all under 1810 * a single hold of the lock, for efficiency. Add them to the supplied list. 1811 * Returns the number of new pages which were placed at *list. 1812 */ 1813static int rmqueue_bulk(struct zone *zone, unsigned int order, 1814 unsigned long count, struct list_head *list, 1815 int migratetype, bool cold) 1816{ 1817 int i; 1818 1819 spin_lock(&zone->lock); 1820 for (i = 0; i < count; ++i) { 1821 struct page *page = __rmqueue(zone, order, migratetype, 0); 1822 if (unlikely(page == NULL)) 1823 break; 1824 1825 /* 1826 * Split buddy pages returned by expand() are received here 1827 * in physical page order. The page is added to the callers and 1828 * list and the list head then moves forward. From the callers 1829 * perspective, the linked list is ordered by page number in 1830 * some conditions. This is useful for IO devices that can 1831 * merge IO requests if the physical pages are ordered 1832 * properly. 1833 */ 1834 if (likely(!cold)) 1835 list_add(&page->lru, list); 1836 else 1837 list_add_tail(&page->lru, list); 1838 list = &page->lru; 1839 if (is_migrate_cma(get_pcppage_migratetype(page))) 1840 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1841 -(1 << order)); 1842 } 1843 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1844 spin_unlock(&zone->lock); 1845 return i; 1846} 1847 1848#ifdef CONFIG_NUMA 1849/* 1850 * Called from the vmstat counter updater to drain pagesets of this 1851 * currently executing processor on remote nodes after they have 1852 * expired. 1853 * 1854 * Note that this function must be called with the thread pinned to 1855 * a single processor. 1856 */ 1857void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 1858{ 1859 unsigned long flags; 1860 int to_drain, batch; 1861 1862 local_irq_save(flags); 1863 batch = READ_ONCE(pcp->batch); 1864 to_drain = min(pcp->count, batch); 1865 if (to_drain > 0) { 1866 free_pcppages_bulk(zone, to_drain, pcp); 1867 pcp->count -= to_drain; 1868 } 1869 local_irq_restore(flags); 1870} 1871#endif 1872 1873/* 1874 * Drain pcplists of the indicated processor and zone. 1875 * 1876 * The processor must either be the current processor and the 1877 * thread pinned to the current processor or a processor that 1878 * is not online. 1879 */ 1880static void drain_pages_zone(unsigned int cpu, struct zone *zone) 1881{ 1882 unsigned long flags; 1883 struct per_cpu_pageset *pset; 1884 struct per_cpu_pages *pcp; 1885 1886 local_irq_save(flags); 1887 pset = per_cpu_ptr(zone->pageset, cpu); 1888 1889 pcp = &pset->pcp; 1890 if (pcp->count) { 1891 free_pcppages_bulk(zone, pcp->count, pcp); 1892 pcp->count = 0; 1893 } 1894 local_irq_restore(flags); 1895} 1896 1897/* 1898 * Drain pcplists of all zones on the indicated processor. 1899 * 1900 * The processor must either be the current processor and the 1901 * thread pinned to the current processor or a processor that 1902 * is not online. 1903 */ 1904static void drain_pages(unsigned int cpu) 1905{ 1906 struct zone *zone; 1907 1908 for_each_populated_zone(zone) { 1909 drain_pages_zone(cpu, zone); 1910 } 1911} 1912 1913/* 1914 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1915 * 1916 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 1917 * the single zone's pages. 1918 */ 1919void drain_local_pages(struct zone *zone) 1920{ 1921 int cpu = smp_processor_id(); 1922 1923 if (zone) 1924 drain_pages_zone(cpu, zone); 1925 else 1926 drain_pages(cpu); 1927} 1928 1929/* 1930 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 1931 * 1932 * When zone parameter is non-NULL, spill just the single zone's pages. 1933 * 1934 * Note that this code is protected against sending an IPI to an offline 1935 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: 1936 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but 1937 * nothing keeps CPUs from showing up after we populated the cpumask and 1938 * before the call to on_each_cpu_mask(). 1939 */ 1940void drain_all_pages(struct zone *zone) 1941{ 1942 int cpu; 1943 1944 /* 1945 * Allocate in the BSS so we wont require allocation in 1946 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 1947 */ 1948 static cpumask_t cpus_with_pcps; 1949 1950 /* 1951 * We don't care about racing with CPU hotplug event 1952 * as offline notification will cause the notified 1953 * cpu to drain that CPU pcps and on_each_cpu_mask 1954 * disables preemption as part of its processing 1955 */ 1956 for_each_online_cpu(cpu) { 1957 struct per_cpu_pageset *pcp; 1958 struct zone *z; 1959 bool has_pcps = false; 1960 1961 if (zone) { 1962 pcp = per_cpu_ptr(zone->pageset, cpu); 1963 if (pcp->pcp.count) 1964 has_pcps = true; 1965 } else { 1966 for_each_populated_zone(z) { 1967 pcp = per_cpu_ptr(z->pageset, cpu); 1968 if (pcp->pcp.count) { 1969 has_pcps = true; 1970 break; 1971 } 1972 } 1973 } 1974 1975 if (has_pcps) 1976 cpumask_set_cpu(cpu, &cpus_with_pcps); 1977 else 1978 cpumask_clear_cpu(cpu, &cpus_with_pcps); 1979 } 1980 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, 1981 zone, 1); 1982} 1983 1984#ifdef CONFIG_HIBERNATION 1985 1986void mark_free_pages(struct zone *zone) 1987{ 1988 unsigned long pfn, max_zone_pfn; 1989 unsigned long flags; 1990 unsigned int order, t; 1991 struct list_head *curr; 1992 1993 if (zone_is_empty(zone)) 1994 return; 1995 1996 spin_lock_irqsave(&zone->lock, flags); 1997 1998 max_zone_pfn = zone_end_pfn(zone); 1999 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2000 if (pfn_valid(pfn)) { 2001 struct page *page = pfn_to_page(pfn); 2002 2003 if (!swsusp_page_is_forbidden(page)) 2004 swsusp_unset_page_free(page); 2005 } 2006 2007 for_each_migratetype_order(order, t) { 2008 list_for_each(curr, &zone->free_area[order].free_list[t]) { 2009 unsigned long i; 2010 2011 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 2012 for (i = 0; i < (1UL << order); i++) 2013 swsusp_set_page_free(pfn_to_page(pfn + i)); 2014 } 2015 } 2016 spin_unlock_irqrestore(&zone->lock, flags); 2017} 2018#endif /* CONFIG_PM */ 2019 2020/* 2021 * Free a 0-order page 2022 * cold == true ? free a cold page : free a hot page 2023 */ 2024void free_hot_cold_page(struct page *page, bool cold) 2025{ 2026 struct zone *zone = page_zone(page); 2027 struct per_cpu_pages *pcp; 2028 unsigned long flags; 2029 unsigned long pfn = page_to_pfn(page); 2030 int migratetype; 2031 2032 if (!free_pages_prepare(page, 0)) 2033 return; 2034 2035 migratetype = get_pfnblock_migratetype(page, pfn); 2036 set_pcppage_migratetype(page, migratetype); 2037 local_irq_save(flags); 2038 __count_vm_event(PGFREE); 2039 2040 /* 2041 * We only track unmovable, reclaimable and movable on pcp lists. 2042 * Free ISOLATE pages back to the allocator because they are being 2043 * offlined but treat RESERVE as movable pages so we can get those 2044 * areas back if necessary. Otherwise, we may have to free 2045 * excessively into the page allocator 2046 */ 2047 if (migratetype >= MIGRATE_PCPTYPES) { 2048 if (unlikely(is_migrate_isolate(migratetype))) { 2049 free_one_page(zone, page, pfn, 0, migratetype); 2050 goto out; 2051 } 2052 migratetype = MIGRATE_MOVABLE; 2053 } 2054 2055 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2056 if (!cold) 2057 list_add(&page->lru, &pcp->lists[migratetype]); 2058 else 2059 list_add_tail(&page->lru, &pcp->lists[migratetype]); 2060 pcp->count++; 2061 if (pcp->count >= pcp->high) { 2062 unsigned long batch = READ_ONCE(pcp->batch); 2063 free_pcppages_bulk(zone, batch, pcp); 2064 pcp->count -= batch; 2065 } 2066 2067out: 2068 local_irq_restore(flags); 2069} 2070 2071/* 2072 * Free a list of 0-order pages 2073 */ 2074void free_hot_cold_page_list(struct list_head *list, bool cold) 2075{ 2076 struct page *page, *next; 2077 2078 list_for_each_entry_safe(page, next, list, lru) { 2079 trace_mm_page_free_batched(page, cold); 2080 free_hot_cold_page(page, cold); 2081 } 2082} 2083 2084/* 2085 * split_page takes a non-compound higher-order page, and splits it into 2086 * n (1<<order) sub-pages: page[0..n] 2087 * Each sub-page must be freed individually. 2088 * 2089 * Note: this is probably too low level an operation for use in drivers. 2090 * Please consult with lkml before using this in your driver. 2091 */ 2092void split_page(struct page *page, unsigned int order) 2093{ 2094 int i; 2095 gfp_t gfp_mask; 2096 2097 VM_BUG_ON_PAGE(PageCompound(page), page); 2098 VM_BUG_ON_PAGE(!page_count(page), page); 2099 2100#ifdef CONFIG_KMEMCHECK 2101 /* 2102 * Split shadow pages too, because free(page[0]) would 2103 * otherwise free the whole shadow. 2104 */ 2105 if (kmemcheck_page_is_tracked(page)) 2106 split_page(virt_to_page(page[0].shadow), order); 2107#endif 2108 2109 gfp_mask = get_page_owner_gfp(page); 2110 set_page_owner(page, 0, gfp_mask); 2111 for (i = 1; i < (1 << order); i++) { 2112 set_page_refcounted(page + i); 2113 set_page_owner(page + i, 0, gfp_mask); 2114 } 2115} 2116EXPORT_SYMBOL_GPL(split_page); 2117 2118int __isolate_free_page(struct page *page, unsigned int order) 2119{ 2120 unsigned long watermark; 2121 struct zone *zone; 2122 int mt; 2123 2124 BUG_ON(!PageBuddy(page)); 2125 2126 zone = page_zone(page); 2127 mt = get_pageblock_migratetype(page); 2128 2129 if (!is_migrate_isolate(mt)) { 2130 /* Obey watermarks as if the page was being allocated */ 2131 watermark = low_wmark_pages(zone) + (1 << order); 2132 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 2133 return 0; 2134 2135 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2136 } 2137 2138 /* Remove page from free list */ 2139 list_del(&page->lru); 2140 zone->free_area[order].nr_free--; 2141 rmv_page_order(page); 2142 2143 set_page_owner(page, order, __GFP_MOVABLE); 2144 2145 /* Set the pageblock if the isolated page is at least a pageblock */ 2146 if (order >= pageblock_order - 1) { 2147 struct page *endpage = page + (1 << order) - 1; 2148 for (; page < endpage; page += pageblock_nr_pages) { 2149 int mt = get_pageblock_migratetype(page); 2150 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) 2151 set_pageblock_migratetype(page, 2152 MIGRATE_MOVABLE); 2153 } 2154 } 2155 2156 2157 return 1UL << order; 2158} 2159 2160/* 2161 * Similar to split_page except the page is already free. As this is only 2162 * being used for migration, the migratetype of the block also changes. 2163 * As this is called with interrupts disabled, the caller is responsible 2164 * for calling arch_alloc_page() and kernel_map_page() after interrupts 2165 * are enabled. 2166 * 2167 * Note: this is probably too low level an operation for use in drivers. 2168 * Please consult with lkml before using this in your driver. 2169 */ 2170int split_free_page(struct page *page) 2171{ 2172 unsigned int order; 2173 int nr_pages; 2174 2175 order = page_order(page); 2176 2177 nr_pages = __isolate_free_page(page, order); 2178 if (!nr_pages) 2179 return 0; 2180 2181 /* Split into individual pages */ 2182 set_page_refcounted(page); 2183 split_page(page, order); 2184 return nr_pages; 2185} 2186 2187/* 2188 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 2189 */ 2190static inline 2191struct page *buffered_rmqueue(struct zone *preferred_zone, 2192 struct zone *zone, unsigned int order, 2193 gfp_t gfp_flags, int alloc_flags, int migratetype) 2194{ 2195 unsigned long flags; 2196 struct page *page; 2197 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2198 2199 if (likely(order == 0)) { 2200 struct per_cpu_pages *pcp; 2201 struct list_head *list; 2202 2203 local_irq_save(flags); 2204 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2205 list = &pcp->lists[migratetype]; 2206 if (list_empty(list)) { 2207 pcp->count += rmqueue_bulk(zone, 0, 2208 pcp->batch, list, 2209 migratetype, cold); 2210 if (unlikely(list_empty(list))) 2211 goto failed; 2212 } 2213 2214 if (cold) 2215 page = list_entry(list->prev, struct page, lru); 2216 else 2217 page = list_entry(list->next, struct page, lru); 2218 2219 list_del(&page->lru); 2220 pcp->count--; 2221 } else { 2222 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 2223 /* 2224 * __GFP_NOFAIL is not to be used in new code. 2225 * 2226 * All __GFP_NOFAIL callers should be fixed so that they 2227 * properly detect and handle allocation failures. 2228 * 2229 * We most definitely don't want callers attempting to 2230 * allocate greater than order-1 page units with 2231 * __GFP_NOFAIL. 2232 */ 2233 WARN_ON_ONCE(order > 1); 2234 } 2235 spin_lock_irqsave(&zone->lock, flags); 2236 2237 page = NULL; 2238 if (alloc_flags & ALLOC_HARDER) { 2239 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2240 if (page) 2241 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2242 } 2243 if (!page) 2244 page = __rmqueue(zone, order, migratetype, gfp_flags); 2245 spin_unlock(&zone->lock); 2246 if (!page) 2247 goto failed; 2248 __mod_zone_freepage_state(zone, -(1 << order), 2249 get_pcppage_migratetype(page)); 2250 } 2251 2252 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 2253 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && 2254 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) 2255 set_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2256 2257 __count_zone_vm_events(PGALLOC, zone, 1 << order); 2258 zone_statistics(preferred_zone, zone, gfp_flags); 2259 local_irq_restore(flags); 2260 2261 VM_BUG_ON_PAGE(bad_range(zone, page), page); 2262 return page; 2263 2264failed: 2265 local_irq_restore(flags); 2266 return NULL; 2267} 2268 2269#ifdef CONFIG_FAIL_PAGE_ALLOC 2270 2271static struct { 2272 struct fault_attr attr; 2273 2274 bool ignore_gfp_highmem; 2275 bool ignore_gfp_reclaim; 2276 u32 min_order; 2277} fail_page_alloc = { 2278 .attr = FAULT_ATTR_INITIALIZER, 2279 .ignore_gfp_reclaim = true, 2280 .ignore_gfp_highmem = true, 2281 .min_order = 1, 2282}; 2283 2284static int __init setup_fail_page_alloc(char *str) 2285{ 2286 return setup_fault_attr(&fail_page_alloc.attr, str); 2287} 2288__setup("fail_page_alloc=", setup_fail_page_alloc); 2289 2290static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2291{ 2292 if (order < fail_page_alloc.min_order) 2293 return false; 2294 if (gfp_mask & __GFP_NOFAIL) 2295 return false; 2296 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2297 return false; 2298 if (fail_page_alloc.ignore_gfp_reclaim && 2299 (gfp_mask & __GFP_DIRECT_RECLAIM)) 2300 return false; 2301 2302 return should_fail(&fail_page_alloc.attr, 1 << order); 2303} 2304 2305#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 2306 2307static int __init fail_page_alloc_debugfs(void) 2308{ 2309 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 2310 struct dentry *dir; 2311 2312 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 2313 &fail_page_alloc.attr); 2314 if (IS_ERR(dir)) 2315 return PTR_ERR(dir); 2316 2317 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2318 &fail_page_alloc.ignore_gfp_reclaim)) 2319 goto fail; 2320 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2321 &fail_page_alloc.ignore_gfp_highmem)) 2322 goto fail; 2323 if (!debugfs_create_u32("min-order", mode, dir, 2324 &fail_page_alloc.min_order)) 2325 goto fail; 2326 2327 return 0; 2328fail: 2329 debugfs_remove_recursive(dir); 2330 2331 return -ENOMEM; 2332} 2333 2334late_initcall(fail_page_alloc_debugfs); 2335 2336#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 2337 2338#else /* CONFIG_FAIL_PAGE_ALLOC */ 2339 2340static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2341{ 2342 return false; 2343} 2344 2345#endif /* CONFIG_FAIL_PAGE_ALLOC */ 2346 2347/* 2348 * Return true if free base pages are above 'mark'. For high-order checks it 2349 * will return true of the order-0 watermark is reached and there is at least 2350 * one free page of a suitable size. Checking now avoids taking the zone lock 2351 * to check in the allocation paths if no pages are free. 2352 */ 2353static bool __zone_watermark_ok(struct zone *z, unsigned int order, 2354 unsigned long mark, int classzone_idx, int alloc_flags, 2355 long free_pages) 2356{ 2357 long min = mark; 2358 int o; 2359 const int alloc_harder = (alloc_flags & ALLOC_HARDER); 2360 2361 /* free_pages may go negative - that's OK */ 2362 free_pages -= (1 << order) - 1; 2363 2364 if (alloc_flags & ALLOC_HIGH) 2365 min -= min / 2; 2366 2367 /* 2368 * If the caller does not have rights to ALLOC_HARDER then subtract 2369 * the high-atomic reserves. This will over-estimate the size of the 2370 * atomic reserve but it avoids a search. 2371 */ 2372 if (likely(!alloc_harder)) 2373 free_pages -= z->nr_reserved_highatomic; 2374 else 2375 min -= min / 4; 2376 2377#ifdef CONFIG_CMA 2378 /* If allocation can't use CMA areas don't use free CMA pages */ 2379 if (!(alloc_flags & ALLOC_CMA)) 2380 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 2381#endif 2382 2383 /* 2384 * Check watermarks for an order-0 allocation request. If these 2385 * are not met, then a high-order request also cannot go ahead 2386 * even if a suitable page happened to be free. 2387 */ 2388 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 2389 return false; 2390 2391 /* If this is an order-0 request then the watermark is fine */ 2392 if (!order) 2393 return true; 2394 2395 /* For a high-order request, check at least one suitable page is free */ 2396 for (o = order; o < MAX_ORDER; o++) { 2397 struct free_area *area = &z->free_area[o]; 2398 int mt; 2399 2400 if (!area->nr_free) 2401 continue; 2402 2403 if (alloc_harder) 2404 return true; 2405 2406 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2407 if (!list_empty(&area->free_list[mt])) 2408 return true; 2409 } 2410 2411#ifdef CONFIG_CMA 2412 if ((alloc_flags & ALLOC_CMA) && 2413 !list_empty(&area->free_list[MIGRATE_CMA])) { 2414 return true; 2415 } 2416#endif 2417 } 2418 return false; 2419} 2420 2421bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2422 int classzone_idx, int alloc_flags) 2423{ 2424 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2425 zone_page_state(z, NR_FREE_PAGES)); 2426} 2427 2428bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2429 unsigned long mark, int classzone_idx) 2430{ 2431 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2432 2433 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2434 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2435 2436 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, 2437 free_pages); 2438} 2439 2440#ifdef CONFIG_NUMA 2441static bool zone_local(struct zone *local_zone, struct zone *zone) 2442{ 2443 return local_zone->node == zone->node; 2444} 2445 2446static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2447{ 2448 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < 2449 RECLAIM_DISTANCE; 2450} 2451#else /* CONFIG_NUMA */ 2452static bool zone_local(struct zone *local_zone, struct zone *zone) 2453{ 2454 return true; 2455} 2456 2457static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2458{ 2459 return true; 2460} 2461#endif /* CONFIG_NUMA */ 2462 2463static void reset_alloc_batches(struct zone *preferred_zone) 2464{ 2465 struct zone *zone = preferred_zone->zone_pgdat->node_zones; 2466 2467 do { 2468 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2469 high_wmark_pages(zone) - low_wmark_pages(zone) - 2470 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 2471 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2472 } while (zone++ != preferred_zone); 2473} 2474 2475/* 2476 * get_page_from_freelist goes through the zonelist trying to allocate 2477 * a page. 2478 */ 2479static struct page * 2480get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 2481 const struct alloc_context *ac) 2482{ 2483 struct zonelist *zonelist = ac->zonelist; 2484 struct zoneref *z; 2485 struct page *page = NULL; 2486 struct zone *zone; 2487 int nr_fair_skipped = 0; 2488 bool zonelist_rescan; 2489 2490zonelist_scan: 2491 zonelist_rescan = false; 2492 2493 /* 2494 * Scan zonelist, looking for a zone with enough free. 2495 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2496 */ 2497 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 2498 ac->nodemask) { 2499 unsigned long mark; 2500 2501 if (cpusets_enabled() && 2502 (alloc_flags & ALLOC_CPUSET) && 2503 !cpuset_zone_allowed(zone, gfp_mask)) 2504 continue; 2505 /* 2506 * Distribute pages in proportion to the individual 2507 * zone size to ensure fair page aging. The zone a 2508 * page was allocated in should have no effect on the 2509 * time the page has in memory before being reclaimed. 2510 */ 2511 if (alloc_flags & ALLOC_FAIR) { 2512 if (!zone_local(ac->preferred_zone, zone)) 2513 break; 2514 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { 2515 nr_fair_skipped++; 2516 continue; 2517 } 2518 } 2519 /* 2520 * When allocating a page cache page for writing, we 2521 * want to get it from a zone that is within its dirty 2522 * limit, such that no single zone holds more than its 2523 * proportional share of globally allowed dirty pages. 2524 * The dirty limits take into account the zone's 2525 * lowmem reserves and high watermark so that kswapd 2526 * should be able to balance it without having to 2527 * write pages from its LRU list. 2528 * 2529 * This may look like it could increase pressure on 2530 * lower zones by failing allocations in higher zones 2531 * before they are full. But the pages that do spill 2532 * over are limited as the lower zones are protected 2533 * by this very same mechanism. It should not become 2534 * a practical burden to them. 2535 * 2536 * XXX: For now, allow allocations to potentially 2537 * exceed the per-zone dirty limit in the slowpath 2538 * (spread_dirty_pages unset) before going into reclaim, 2539 * which is important when on a NUMA setup the allowed 2540 * zones are together not big enough to reach the 2541 * global limit. The proper fix for these situations 2542 * will require awareness of zones in the 2543 * dirty-throttling and the flusher threads. 2544 */ 2545 if (ac->spread_dirty_pages && !zone_dirty_ok(zone)) 2546 continue; 2547 2548 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 2549 if (!zone_watermark_ok(zone, order, mark, 2550 ac->classzone_idx, alloc_flags)) { 2551 int ret; 2552 2553 /* Checked here to keep the fast path fast */ 2554 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 2555 if (alloc_flags & ALLOC_NO_WATERMARKS) 2556 goto try_this_zone; 2557 2558 if (zone_reclaim_mode == 0 || 2559 !zone_allows_reclaim(ac->preferred_zone, zone)) 2560 continue; 2561 2562 ret = zone_reclaim(zone, gfp_mask, order); 2563 switch (ret) { 2564 case ZONE_RECLAIM_NOSCAN: 2565 /* did not scan */ 2566 continue; 2567 case ZONE_RECLAIM_FULL: 2568 /* scanned but unreclaimable */ 2569 continue; 2570 default: 2571 /* did we reclaim enough */ 2572 if (zone_watermark_ok(zone, order, mark, 2573 ac->classzone_idx, alloc_flags)) 2574 goto try_this_zone; 2575 2576 continue; 2577 } 2578 } 2579 2580try_this_zone: 2581 page = buffered_rmqueue(ac->preferred_zone, zone, order, 2582 gfp_mask, alloc_flags, ac->migratetype); 2583 if (page) { 2584 if (prep_new_page(page, order, gfp_mask, alloc_flags)) 2585 goto try_this_zone; 2586 2587 /* 2588 * If this is a high-order atomic allocation then check 2589 * if the pageblock should be reserved for the future 2590 */ 2591 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 2592 reserve_highatomic_pageblock(page, zone, order); 2593 2594 return page; 2595 } 2596 } 2597 2598 /* 2599 * The first pass makes sure allocations are spread fairly within the 2600 * local node. However, the local node might have free pages left 2601 * after the fairness batches are exhausted, and remote zones haven't 2602 * even been considered yet. Try once more without fairness, and 2603 * include remote zones now, before entering the slowpath and waking 2604 * kswapd: prefer spilling to a remote zone over swapping locally. 2605 */ 2606 if (alloc_flags & ALLOC_FAIR) { 2607 alloc_flags &= ~ALLOC_FAIR; 2608 if (nr_fair_skipped) { 2609 zonelist_rescan = true; 2610 reset_alloc_batches(ac->preferred_zone); 2611 } 2612 if (nr_online_nodes > 1) 2613 zonelist_rescan = true; 2614 } 2615 2616 if (zonelist_rescan) 2617 goto zonelist_scan; 2618 2619 return NULL; 2620} 2621 2622/* 2623 * Large machines with many possible nodes should not always dump per-node 2624 * meminfo in irq context. 2625 */ 2626static inline bool should_suppress_show_mem(void) 2627{ 2628 bool ret = false; 2629 2630#if NODES_SHIFT > 8 2631 ret = in_interrupt(); 2632#endif 2633 return ret; 2634} 2635 2636static DEFINE_RATELIMIT_STATE(nopage_rs, 2637 DEFAULT_RATELIMIT_INTERVAL, 2638 DEFAULT_RATELIMIT_BURST); 2639 2640void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) 2641{ 2642 unsigned int filter = SHOW_MEM_FILTER_NODES; 2643 2644 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 2645 debug_guardpage_minorder() > 0) 2646 return; 2647 2648 /* 2649 * This documents exceptions given to allocations in certain 2650 * contexts that are allowed to allocate outside current's set 2651 * of allowed nodes. 2652 */ 2653 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2654 if (test_thread_flag(TIF_MEMDIE) || 2655 (current->flags & (PF_MEMALLOC | PF_EXITING))) 2656 filter &= ~SHOW_MEM_FILTER_NODES; 2657 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 2658 filter &= ~SHOW_MEM_FILTER_NODES; 2659 2660 if (fmt) { 2661 struct va_format vaf; 2662 va_list args; 2663 2664 va_start(args, fmt); 2665 2666 vaf.fmt = fmt; 2667 vaf.va = &args; 2668 2669 pr_warn("%pV", &vaf); 2670 2671 va_end(args); 2672 } 2673 2674 pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n", 2675 current->comm, order, gfp_mask); 2676 2677 dump_stack(); 2678 if (!should_suppress_show_mem()) 2679 show_mem(filter); 2680} 2681 2682static inline struct page * 2683__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2684 const struct alloc_context *ac, unsigned long *did_some_progress) 2685{ 2686 struct oom_control oc = { 2687 .zonelist = ac->zonelist, 2688 .nodemask = ac->nodemask, 2689 .gfp_mask = gfp_mask, 2690 .order = order, 2691 }; 2692 struct page *page; 2693 2694 *did_some_progress = 0; 2695 2696 /* 2697 * Acquire the oom lock. If that fails, somebody else is 2698 * making progress for us. 2699 */ 2700 if (!mutex_trylock(&oom_lock)) { 2701 *did_some_progress = 1; 2702 schedule_timeout_uninterruptible(1); 2703 return NULL; 2704 } 2705 2706 /* 2707 * Go through the zonelist yet one more time, keep very high watermark 2708 * here, this is only to catch a parallel oom killing, we must fail if 2709 * we're still under heavy pressure. 2710 */ 2711 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 2712 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 2713 if (page) 2714 goto out; 2715 2716 if (!(gfp_mask & __GFP_NOFAIL)) { 2717 /* Coredumps can quickly deplete all memory reserves */ 2718 if (current->flags & PF_DUMPCORE) 2719 goto out; 2720 /* The OOM killer will not help higher order allocs */ 2721 if (order > PAGE_ALLOC_COSTLY_ORDER) 2722 goto out; 2723 /* The OOM killer does not needlessly kill tasks for lowmem */ 2724 if (ac->high_zoneidx < ZONE_NORMAL) 2725 goto out; 2726 /* The OOM killer does not compensate for IO-less reclaim */ 2727 if (!(gfp_mask & __GFP_FS)) { 2728 /* 2729 * XXX: Page reclaim didn't yield anything, 2730 * and the OOM killer can't be invoked, but 2731 * keep looping as per tradition. 2732 */ 2733 *did_some_progress = 1; 2734 goto out; 2735 } 2736 if (pm_suspended_storage()) 2737 goto out; 2738 /* The OOM killer may not free memory on a specific node */ 2739 if (gfp_mask & __GFP_THISNODE) 2740 goto out; 2741 } 2742 /* Exhausted what can be done so it's blamo time */ 2743 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) 2744 *did_some_progress = 1; 2745out: 2746 mutex_unlock(&oom_lock); 2747 return page; 2748} 2749 2750#ifdef CONFIG_COMPACTION 2751/* Try memory compaction for high-order allocations before reclaim */ 2752static struct page * 2753__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2754 int alloc_flags, const struct alloc_context *ac, 2755 enum migrate_mode mode, int *contended_compaction, 2756 bool *deferred_compaction) 2757{ 2758 unsigned long compact_result; 2759 struct page *page; 2760 2761 if (!order) 2762 return NULL; 2763 2764 current->flags |= PF_MEMALLOC; 2765 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 2766 mode, contended_compaction); 2767 current->flags &= ~PF_MEMALLOC; 2768 2769 switch (compact_result) { 2770 case COMPACT_DEFERRED: 2771 *deferred_compaction = true; 2772 /* fall-through */ 2773 case COMPACT_SKIPPED: 2774 return NULL; 2775 default: 2776 break; 2777 } 2778 2779 /* 2780 * At least in one zone compaction wasn't deferred or skipped, so let's 2781 * count a compaction stall 2782 */ 2783 count_vm_event(COMPACTSTALL); 2784 2785 page = get_page_from_freelist(gfp_mask, order, 2786 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 2787 2788 if (page) { 2789 struct zone *zone = page_zone(page); 2790 2791 zone->compact_blockskip_flush = false; 2792 compaction_defer_reset(zone, order, true); 2793 count_vm_event(COMPACTSUCCESS); 2794 return page; 2795 } 2796 2797 /* 2798 * It's bad if compaction run occurs and fails. The most likely reason 2799 * is that pages exist, but not enough to satisfy watermarks. 2800 */ 2801 count_vm_event(COMPACTFAIL); 2802 2803 cond_resched(); 2804 2805 return NULL; 2806} 2807#else 2808static inline struct page * 2809__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2810 int alloc_flags, const struct alloc_context *ac, 2811 enum migrate_mode mode, int *contended_compaction, 2812 bool *deferred_compaction) 2813{ 2814 return NULL; 2815} 2816#endif /* CONFIG_COMPACTION */ 2817 2818/* Perform direct synchronous page reclaim */ 2819static int 2820__perform_reclaim(gfp_t gfp_mask, unsigned int order, 2821 const struct alloc_context *ac) 2822{ 2823 struct reclaim_state reclaim_state; 2824 int progress; 2825 2826 cond_resched(); 2827 2828 /* We now go into synchronous reclaim */ 2829 cpuset_memory_pressure_bump(); 2830 current->flags |= PF_MEMALLOC; 2831 lockdep_set_current_reclaim_state(gfp_mask); 2832 reclaim_state.reclaimed_slab = 0; 2833 current->reclaim_state = &reclaim_state; 2834 2835 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 2836 ac->nodemask); 2837 2838 current->reclaim_state = NULL; 2839 lockdep_clear_current_reclaim_state(); 2840 current->flags &= ~PF_MEMALLOC; 2841 2842 cond_resched(); 2843 2844 return progress; 2845} 2846 2847/* The really slow allocator path where we enter direct reclaim */ 2848static inline struct page * 2849__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2850 int alloc_flags, const struct alloc_context *ac, 2851 unsigned long *did_some_progress) 2852{ 2853 struct page *page = NULL; 2854 bool drained = false; 2855 2856 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 2857 if (unlikely(!(*did_some_progress))) 2858 return NULL; 2859 2860retry: 2861 page = get_page_from_freelist(gfp_mask, order, 2862 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 2863 2864 /* 2865 * If an allocation failed after direct reclaim, it could be because 2866 * pages are pinned on the per-cpu lists or in high alloc reserves. 2867 * Shrink them them and try again 2868 */ 2869 if (!page && !drained) { 2870 unreserve_highatomic_pageblock(ac); 2871 drain_all_pages(NULL); 2872 drained = true; 2873 goto retry; 2874 } 2875 2876 return page; 2877} 2878 2879/* 2880 * This is called in the allocator slow-path if the allocation request is of 2881 * sufficient urgency to ignore watermarks and take other desperate measures 2882 */ 2883static inline struct page * 2884__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 2885 const struct alloc_context *ac) 2886{ 2887 struct page *page; 2888 2889 do { 2890 page = get_page_from_freelist(gfp_mask, order, 2891 ALLOC_NO_WATERMARKS, ac); 2892 2893 if (!page && gfp_mask & __GFP_NOFAIL) 2894 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, 2895 HZ/50); 2896 } while (!page && (gfp_mask & __GFP_NOFAIL)); 2897 2898 return page; 2899} 2900 2901static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) 2902{ 2903 struct zoneref *z; 2904 struct zone *zone; 2905 2906 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2907 ac->high_zoneidx, ac->nodemask) 2908 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); 2909} 2910 2911static inline int 2912gfp_to_alloc_flags(gfp_t gfp_mask) 2913{ 2914 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2915 2916 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2917 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2918 2919 /* 2920 * The caller may dip into page reserves a bit more if the caller 2921 * cannot run direct reclaim, or if the caller has realtime scheduling 2922 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2923 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 2924 */ 2925 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2926 2927 if (gfp_mask & __GFP_ATOMIC) { 2928 /* 2929 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 2930 * if it can't schedule. 2931 */ 2932 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2933 alloc_flags |= ALLOC_HARDER; 2934 /* 2935 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 2936 * comment for __cpuset_node_allowed(). 2937 */ 2938 alloc_flags &= ~ALLOC_CPUSET; 2939 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2940 alloc_flags |= ALLOC_HARDER; 2941 2942 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2943 if (gfp_mask & __GFP_MEMALLOC) 2944 alloc_flags |= ALLOC_NO_WATERMARKS; 2945 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 2946 alloc_flags |= ALLOC_NO_WATERMARKS; 2947 else if (!in_interrupt() && 2948 ((current->flags & PF_MEMALLOC) || 2949 unlikely(test_thread_flag(TIF_MEMDIE)))) 2950 alloc_flags |= ALLOC_NO_WATERMARKS; 2951 } 2952#ifdef CONFIG_CMA 2953 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2954 alloc_flags |= ALLOC_CMA; 2955#endif 2956 return alloc_flags; 2957} 2958 2959bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 2960{ 2961 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 2962} 2963 2964static inline bool is_thp_gfp_mask(gfp_t gfp_mask) 2965{ 2966 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE; 2967} 2968 2969static inline struct page * 2970__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2971 struct alloc_context *ac) 2972{ 2973 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 2974 struct page *page = NULL; 2975 int alloc_flags; 2976 unsigned long pages_reclaimed = 0; 2977 unsigned long did_some_progress; 2978 enum migrate_mode migration_mode = MIGRATE_ASYNC; 2979 bool deferred_compaction = false; 2980 int contended_compaction = COMPACT_CONTENDED_NONE; 2981 2982 /* 2983 * In the slowpath, we sanity check order to avoid ever trying to 2984 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 2985 * be using allocators in order of preference for an area that is 2986 * too large. 2987 */ 2988 if (order >= MAX_ORDER) { 2989 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 2990 return NULL; 2991 } 2992 2993 /* 2994 * We also sanity check to catch abuse of atomic reserves being used by 2995 * callers that are not in atomic context. 2996 */ 2997 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 2998 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 2999 gfp_mask &= ~__GFP_ATOMIC; 3000 3001 /* 3002 * If this allocation cannot block and it is for a specific node, then 3003 * fail early. There's no need to wakeup kswapd or retry for a 3004 * speculative node-specific allocation. 3005 */ 3006 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim) 3007 goto nopage; 3008 3009retry: 3010 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3011 wake_all_kswapds(order, ac); 3012 3013 /* 3014 * OK, we're below the kswapd watermark and have kicked background 3015 * reclaim. Now things get more complex, so set up alloc_flags according 3016 * to how we want to proceed. 3017 */ 3018 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3019 3020 /* 3021 * Find the true preferred zone if the allocation is unconstrained by 3022 * cpusets. 3023 */ 3024 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) { 3025 struct zoneref *preferred_zoneref; 3026 preferred_zoneref = first_zones_zonelist(ac->zonelist, 3027 ac->high_zoneidx, NULL, &ac->preferred_zone); 3028 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref); 3029 } 3030 3031 /* This is the last chance, in general, before the goto nopage. */ 3032 page = get_page_from_freelist(gfp_mask, order, 3033 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3034 if (page) 3035 goto got_pg; 3036 3037 /* Allocate without watermarks if the context allows */ 3038 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3039 /* 3040 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds 3041 * the allocation is high priority and these type of 3042 * allocations are system rather than user orientated 3043 */ 3044 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); 3045 3046 page = __alloc_pages_high_priority(gfp_mask, order, ac); 3047 3048 if (page) { 3049 goto got_pg; 3050 } 3051 } 3052 3053 /* Caller is not willing to reclaim, we can't balance anything */ 3054 if (!can_direct_reclaim) { 3055 /* 3056 * All existing users of the deprecated __GFP_NOFAIL are 3057 * blockable, so warn of any new users that actually allow this 3058 * type of allocation to fail. 3059 */ 3060 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); 3061 goto nopage; 3062 } 3063 3064 /* Avoid recursion of direct reclaim */ 3065 if (current->flags & PF_MEMALLOC) 3066 goto nopage; 3067 3068 /* Avoid allocations with no watermarks from looping endlessly */ 3069 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 3070 goto nopage; 3071 3072 /* 3073 * Try direct compaction. The first pass is asynchronous. Subsequent 3074 * attempts after direct reclaim are synchronous 3075 */ 3076 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3077 migration_mode, 3078 &contended_compaction, 3079 &deferred_compaction); 3080 if (page) 3081 goto got_pg; 3082 3083 /* Checks for THP-specific high-order allocations */ 3084 if (is_thp_gfp_mask(gfp_mask)) { 3085 /* 3086 * If compaction is deferred for high-order allocations, it is 3087 * because sync compaction recently failed. If this is the case 3088 * and the caller requested a THP allocation, we do not want 3089 * to heavily disrupt the system, so we fail the allocation 3090 * instead of entering direct reclaim. 3091 */ 3092 if (deferred_compaction) 3093 goto nopage; 3094 3095 /* 3096 * In all zones where compaction was attempted (and not 3097 * deferred or skipped), lock contention has been detected. 3098 * For THP allocation we do not want to disrupt the others 3099 * so we fallback to base pages instead. 3100 */ 3101 if (contended_compaction == COMPACT_CONTENDED_LOCK) 3102 goto nopage; 3103 3104 /* 3105 * If compaction was aborted due to need_resched(), we do not 3106 * want to further increase allocation latency, unless it is 3107 * khugepaged trying to collapse. 3108 */ 3109 if (contended_compaction == COMPACT_CONTENDED_SCHED 3110 && !(current->flags & PF_KTHREAD)) 3111 goto nopage; 3112 } 3113 3114 /* 3115 * It can become very expensive to allocate transparent hugepages at 3116 * fault, so use asynchronous memory compaction for THP unless it is 3117 * khugepaged trying to collapse. 3118 */ 3119 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD)) 3120 migration_mode = MIGRATE_SYNC_LIGHT; 3121 3122 /* Try direct reclaim and then allocating */ 3123 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 3124 &did_some_progress); 3125 if (page) 3126 goto got_pg; 3127 3128 /* Do not loop if specifically requested */ 3129 if (gfp_mask & __GFP_NORETRY) 3130 goto noretry; 3131 3132 /* Keep reclaiming pages as long as there is reasonable progress */ 3133 pages_reclaimed += did_some_progress; 3134 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || 3135 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { 3136 /* Wait for some write requests to complete then retry */ 3137 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); 3138 goto retry; 3139 } 3140 3141 /* Reclaim has failed us, start killing things */ 3142 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3143 if (page) 3144 goto got_pg; 3145 3146 /* Retry as long as the OOM killer is making progress */ 3147 if (did_some_progress) 3148 goto retry; 3149 3150noretry: 3151 /* 3152 * High-order allocations do not necessarily loop after 3153 * direct reclaim and reclaim/compaction depends on compaction 3154 * being called after reclaim so call directly if necessary 3155 */ 3156 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, 3157 ac, migration_mode, 3158 &contended_compaction, 3159 &deferred_compaction); 3160 if (page) 3161 goto got_pg; 3162nopage: 3163 warn_alloc_failed(gfp_mask, order, NULL); 3164got_pg: 3165 return page; 3166} 3167 3168/* 3169 * This is the 'heart' of the zoned buddy allocator. 3170 */ 3171struct page * 3172__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 3173 struct zonelist *zonelist, nodemask_t *nodemask) 3174{ 3175 struct zoneref *preferred_zoneref; 3176 struct page *page = NULL; 3177 unsigned int cpuset_mems_cookie; 3178 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; 3179 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 3180 struct alloc_context ac = { 3181 .high_zoneidx = gfp_zone(gfp_mask), 3182 .nodemask = nodemask, 3183 .migratetype = gfpflags_to_migratetype(gfp_mask), 3184 }; 3185 3186 gfp_mask &= gfp_allowed_mask; 3187 3188 lockdep_trace_alloc(gfp_mask); 3189 3190 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 3191 3192 if (should_fail_alloc_page(gfp_mask, order)) 3193 return NULL; 3194 3195 /* 3196 * Check the zones suitable for the gfp_mask contain at least one 3197 * valid zone. It's possible to have an empty zonelist as a result 3198 * of __GFP_THISNODE and a memoryless node 3199 */ 3200 if (unlikely(!zonelist->_zonerefs->zone)) 3201 return NULL; 3202 3203 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3204 alloc_flags |= ALLOC_CMA; 3205 3206retry_cpuset: 3207 cpuset_mems_cookie = read_mems_allowed_begin(); 3208 3209 /* We set it here, as __alloc_pages_slowpath might have changed it */ 3210 ac.zonelist = zonelist; 3211 3212 /* Dirty zone balancing only done in the fast path */ 3213 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3214 3215 /* The preferred zone is used for statistics later */ 3216 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, 3217 ac.nodemask ? : &cpuset_current_mems_allowed, 3218 &ac.preferred_zone); 3219 if (!ac.preferred_zone) 3220 goto out; 3221 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); 3222 3223 /* First allocation attempt */ 3224 alloc_mask = gfp_mask|__GFP_HARDWALL; 3225 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 3226 if (unlikely(!page)) { 3227 /* 3228 * Runtime PM, block IO and its error handling path 3229 * can deadlock because I/O on the device might not 3230 * complete. 3231 */ 3232 alloc_mask = memalloc_noio_flags(gfp_mask); 3233 ac.spread_dirty_pages = false; 3234 3235 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 3236 } 3237 3238 if (kmemcheck_enabled && page) 3239 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 3240 3241 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 3242 3243out: 3244 /* 3245 * When updating a task's mems_allowed, it is possible to race with 3246 * parallel threads in such a way that an allocation can fail while 3247 * the mask is being updated. If a page allocation is about to fail, 3248 * check if the cpuset changed during allocation and if so, retry. 3249 */ 3250 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 3251 goto retry_cpuset; 3252 3253 return page; 3254} 3255EXPORT_SYMBOL(__alloc_pages_nodemask); 3256 3257/* 3258 * Common helper functions. 3259 */ 3260unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 3261{ 3262 struct page *page; 3263 3264 /* 3265 * __get_free_pages() returns a 32-bit address, which cannot represent 3266 * a highmem page 3267 */ 3268 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 3269 3270 page = alloc_pages(gfp_mask, order); 3271 if (!page) 3272 return 0; 3273 return (unsigned long) page_address(page); 3274} 3275EXPORT_SYMBOL(__get_free_pages); 3276 3277unsigned long get_zeroed_page(gfp_t gfp_mask) 3278{ 3279 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 3280} 3281EXPORT_SYMBOL(get_zeroed_page); 3282 3283void __free_pages(struct page *page, unsigned int order) 3284{ 3285 if (put_page_testzero(page)) { 3286 if (order == 0) 3287 free_hot_cold_page(page, false); 3288 else 3289 __free_pages_ok(page, order); 3290 } 3291} 3292 3293EXPORT_SYMBOL(__free_pages); 3294 3295void free_pages(unsigned long addr, unsigned int order) 3296{ 3297 if (addr != 0) { 3298 VM_BUG_ON(!virt_addr_valid((void *)addr)); 3299 __free_pages(virt_to_page((void *)addr), order); 3300 } 3301} 3302 3303EXPORT_SYMBOL(free_pages); 3304 3305/* 3306 * Page Fragment: 3307 * An arbitrary-length arbitrary-offset area of memory which resides 3308 * within a 0 or higher order page. Multiple fragments within that page 3309 * are individually refcounted, in the page's reference counter. 3310 * 3311 * The page_frag functions below provide a simple allocation framework for 3312 * page fragments. This is used by the network stack and network device 3313 * drivers to provide a backing region of memory for use as either an 3314 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 3315 */ 3316static struct page *__page_frag_refill(struct page_frag_cache *nc, 3317 gfp_t gfp_mask) 3318{ 3319 struct page *page = NULL; 3320 gfp_t gfp = gfp_mask; 3321 3322#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3323 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 3324 __GFP_NOMEMALLOC; 3325 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 3326 PAGE_FRAG_CACHE_MAX_ORDER); 3327 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 3328#endif 3329 if (unlikely(!page)) 3330 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 3331 3332 nc->va = page ? page_address(page) : NULL; 3333 3334 return page; 3335} 3336 3337void *__alloc_page_frag(struct page_frag_cache *nc, 3338 unsigned int fragsz, gfp_t gfp_mask) 3339{ 3340 unsigned int size = PAGE_SIZE; 3341 struct page *page; 3342 int offset; 3343 3344 if (unlikely(!nc->va)) { 3345refill: 3346 page = __page_frag_refill(nc, gfp_mask); 3347 if (!page) 3348 return NULL; 3349 3350#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3351 /* if size can vary use size else just use PAGE_SIZE */ 3352 size = nc->size; 3353#endif 3354 /* Even if we own the page, we do not use atomic_set(). 3355 * This would break get_page_unless_zero() users. 3356 */ 3357 atomic_add(size - 1, &page->_count); 3358 3359 /* reset page count bias and offset to start of new frag */ 3360 nc->pfmemalloc = page_is_pfmemalloc(page); 3361 nc->pagecnt_bias = size; 3362 nc->offset = size; 3363 } 3364 3365 offset = nc->offset - fragsz; 3366 if (unlikely(offset < 0)) { 3367 page = virt_to_page(nc->va); 3368 3369 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) 3370 goto refill; 3371 3372#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3373 /* if size can vary use size else just use PAGE_SIZE */ 3374 size = nc->size; 3375#endif 3376 /* OK, page count is 0, we can safely set it */ 3377 atomic_set(&page->_count, size); 3378 3379 /* reset page count bias and offset to start of new frag */ 3380 nc->pagecnt_bias = size; 3381 offset = size - fragsz; 3382 } 3383 3384 nc->pagecnt_bias--; 3385 nc->offset = offset; 3386 3387 return nc->va + offset; 3388} 3389EXPORT_SYMBOL(__alloc_page_frag); 3390 3391/* 3392 * Frees a page fragment allocated out of either a compound or order 0 page. 3393 */ 3394void __free_page_frag(void *addr) 3395{ 3396 struct page *page = virt_to_head_page(addr); 3397 3398 if (unlikely(put_page_testzero(page))) 3399 __free_pages_ok(page, compound_order(page)); 3400} 3401EXPORT_SYMBOL(__free_page_frag); 3402 3403/* 3404 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter 3405 * of the current memory cgroup. 3406 * 3407 * It should be used when the caller would like to use kmalloc, but since the 3408 * allocation is large, it has to fall back to the page allocator. 3409 */ 3410struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) 3411{ 3412 struct page *page; 3413 3414 page = alloc_pages(gfp_mask, order); 3415 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 3416 __free_pages(page, order); 3417 page = NULL; 3418 } 3419 return page; 3420} 3421 3422struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 3423{ 3424 struct page *page; 3425 3426 page = alloc_pages_node(nid, gfp_mask, order); 3427 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 3428 __free_pages(page, order); 3429 page = NULL; 3430 } 3431 return page; 3432} 3433 3434/* 3435 * __free_kmem_pages and free_kmem_pages will free pages allocated with 3436 * alloc_kmem_pages. 3437 */ 3438void __free_kmem_pages(struct page *page, unsigned int order) 3439{ 3440 memcg_kmem_uncharge(page, order); 3441 __free_pages(page, order); 3442} 3443 3444void free_kmem_pages(unsigned long addr, unsigned int order) 3445{ 3446 if (addr != 0) { 3447 VM_BUG_ON(!virt_addr_valid((void *)addr)); 3448 __free_kmem_pages(virt_to_page((void *)addr), order); 3449 } 3450} 3451 3452static void *make_alloc_exact(unsigned long addr, unsigned int order, 3453 size_t size) 3454{ 3455 if (addr) { 3456 unsigned long alloc_end = addr + (PAGE_SIZE << order); 3457 unsigned long used = addr + PAGE_ALIGN(size); 3458 3459 split_page(virt_to_page((void *)addr), order); 3460 while (used < alloc_end) { 3461 free_page(used); 3462 used += PAGE_SIZE; 3463 } 3464 } 3465 return (void *)addr; 3466} 3467 3468/** 3469 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 3470 * @size: the number of bytes to allocate 3471 * @gfp_mask: GFP flags for the allocation 3472 * 3473 * This function is similar to alloc_pages(), except that it allocates the 3474 * minimum number of pages to satisfy the request. alloc_pages() can only 3475 * allocate memory in power-of-two pages. 3476 * 3477 * This function is also limited by MAX_ORDER. 3478 * 3479 * Memory allocated by this function must be released by free_pages_exact(). 3480 */ 3481void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 3482{ 3483 unsigned int order = get_order(size); 3484 unsigned long addr; 3485 3486 addr = __get_free_pages(gfp_mask, order); 3487 return make_alloc_exact(addr, order, size); 3488} 3489EXPORT_SYMBOL(alloc_pages_exact); 3490 3491/** 3492 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 3493 * pages on a node. 3494 * @nid: the preferred node ID where memory should be allocated 3495 * @size: the number of bytes to allocate 3496 * @gfp_mask: GFP flags for the allocation 3497 * 3498 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 3499 * back. 3500 */ 3501void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 3502{ 3503 unsigned int order = get_order(size); 3504 struct page *p = alloc_pages_node(nid, gfp_mask, order); 3505 if (!p) 3506 return NULL; 3507 return make_alloc_exact((unsigned long)page_address(p), order, size); 3508} 3509 3510/** 3511 * free_pages_exact - release memory allocated via alloc_pages_exact() 3512 * @virt: the value returned by alloc_pages_exact. 3513 * @size: size of allocation, same value as passed to alloc_pages_exact(). 3514 * 3515 * Release the memory allocated by a previous call to alloc_pages_exact. 3516 */ 3517void free_pages_exact(void *virt, size_t size) 3518{ 3519 unsigned long addr = (unsigned long)virt; 3520 unsigned long end = addr + PAGE_ALIGN(size); 3521 3522 while (addr < end) { 3523 free_page(addr); 3524 addr += PAGE_SIZE; 3525 } 3526} 3527EXPORT_SYMBOL(free_pages_exact); 3528 3529/** 3530 * nr_free_zone_pages - count number of pages beyond high watermark 3531 * @offset: The zone index of the highest zone 3532 * 3533 * nr_free_zone_pages() counts the number of counts pages which are beyond the 3534 * high watermark within all zones at or below a given zone index. For each 3535 * zone, the number of pages is calculated as: 3536 * managed_pages - high_pages 3537 */ 3538static unsigned long nr_free_zone_pages(int offset) 3539{ 3540 struct zoneref *z; 3541 struct zone *zone; 3542 3543 /* Just pick one node, since fallback list is circular */ 3544 unsigned long sum = 0; 3545 3546 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 3547 3548 for_each_zone_zonelist(zone, z, zonelist, offset) { 3549 unsigned long size = zone->managed_pages; 3550 unsigned long high = high_wmark_pages(zone); 3551 if (size > high) 3552 sum += size - high; 3553 } 3554 3555 return sum; 3556} 3557 3558/** 3559 * nr_free_buffer_pages - count number of pages beyond high watermark 3560 * 3561 * nr_free_buffer_pages() counts the number of pages which are beyond the high 3562 * watermark within ZONE_DMA and ZONE_NORMAL. 3563 */ 3564unsigned long nr_free_buffer_pages(void) 3565{ 3566 return nr_free_zone_pages(gfp_zone(GFP_USER)); 3567} 3568EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 3569 3570/** 3571 * nr_free_pagecache_pages - count number of pages beyond high watermark 3572 * 3573 * nr_free_pagecache_pages() counts the number of pages which are beyond the 3574 * high watermark within all zones. 3575 */ 3576unsigned long nr_free_pagecache_pages(void) 3577{ 3578 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 3579} 3580 3581static inline void show_node(struct zone *zone) 3582{ 3583 if (IS_ENABLED(CONFIG_NUMA)) 3584 printk("Node %d ", zone_to_nid(zone)); 3585} 3586 3587void si_meminfo(struct sysinfo *val) 3588{ 3589 val->totalram = totalram_pages; 3590 val->sharedram = global_page_state(NR_SHMEM); 3591 val->freeram = global_page_state(NR_FREE_PAGES); 3592 val->bufferram = nr_blockdev_pages(); 3593 val->totalhigh = totalhigh_pages; 3594 val->freehigh = nr_free_highpages(); 3595 val->mem_unit = PAGE_SIZE; 3596} 3597 3598EXPORT_SYMBOL(si_meminfo); 3599 3600#ifdef CONFIG_NUMA 3601void si_meminfo_node(struct sysinfo *val, int nid) 3602{ 3603 int zone_type; /* needs to be signed */ 3604 unsigned long managed_pages = 0; 3605 pg_data_t *pgdat = NODE_DATA(nid); 3606 3607 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 3608 managed_pages += pgdat->node_zones[zone_type].managed_pages; 3609 val->totalram = managed_pages; 3610 val->sharedram = node_page_state(nid, NR_SHMEM); 3611 val->freeram = node_page_state(nid, NR_FREE_PAGES); 3612#ifdef CONFIG_HIGHMEM 3613 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; 3614 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 3615 NR_FREE_PAGES); 3616#else 3617 val->totalhigh = 0; 3618 val->freehigh = 0; 3619#endif 3620 val->mem_unit = PAGE_SIZE; 3621} 3622#endif 3623 3624/* 3625 * Determine whether the node should be displayed or not, depending on whether 3626 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 3627 */ 3628bool skip_free_areas_node(unsigned int flags, int nid) 3629{ 3630 bool ret = false; 3631 unsigned int cpuset_mems_cookie; 3632 3633 if (!(flags & SHOW_MEM_FILTER_NODES)) 3634 goto out; 3635 3636 do { 3637 cpuset_mems_cookie = read_mems_allowed_begin(); 3638 ret = !node_isset(nid, cpuset_current_mems_allowed); 3639 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 3640out: 3641 return ret; 3642} 3643 3644#define K(x) ((x) << (PAGE_SHIFT-10)) 3645 3646static void show_migration_types(unsigned char type) 3647{ 3648 static const char types[MIGRATE_TYPES] = { 3649 [MIGRATE_UNMOVABLE] = 'U', 3650 [MIGRATE_RECLAIMABLE] = 'E', 3651 [MIGRATE_MOVABLE] = 'M', 3652#ifdef CONFIG_CMA 3653 [MIGRATE_CMA] = 'C', 3654#endif 3655#ifdef CONFIG_MEMORY_ISOLATION 3656 [MIGRATE_ISOLATE] = 'I', 3657#endif 3658 }; 3659 char tmp[MIGRATE_TYPES + 1]; 3660 char *p = tmp; 3661 int i; 3662 3663 for (i = 0; i < MIGRATE_TYPES; i++) { 3664 if (type & (1 << i)) 3665 *p++ = types[i]; 3666 } 3667 3668 *p = '\0'; 3669 printk("(%s) ", tmp); 3670} 3671 3672/* 3673 * Show free area list (used inside shift_scroll-lock stuff) 3674 * We also calculate the percentage fragmentation. We do this by counting the 3675 * memory on each free list with the exception of the first item on the list. 3676 * 3677 * Bits in @filter: 3678 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 3679 * cpuset. 3680 */ 3681void show_free_areas(unsigned int filter) 3682{ 3683 unsigned long free_pcp = 0; 3684 int cpu; 3685 struct zone *zone; 3686 3687 for_each_populated_zone(zone) { 3688 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3689 continue; 3690 3691 for_each_online_cpu(cpu) 3692 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 3693 } 3694 3695 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 3696 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 3697 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 3698 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 3699 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 3700 " free:%lu free_pcp:%lu free_cma:%lu\n", 3701 global_page_state(NR_ACTIVE_ANON), 3702 global_page_state(NR_INACTIVE_ANON), 3703 global_page_state(NR_ISOLATED_ANON), 3704 global_page_state(NR_ACTIVE_FILE), 3705 global_page_state(NR_INACTIVE_FILE), 3706 global_page_state(NR_ISOLATED_FILE), 3707 global_page_state(NR_UNEVICTABLE), 3708 global_page_state(NR_FILE_DIRTY), 3709 global_page_state(NR_WRITEBACK), 3710 global_page_state(NR_UNSTABLE_NFS), 3711 global_page_state(NR_SLAB_RECLAIMABLE), 3712 global_page_state(NR_SLAB_UNRECLAIMABLE), 3713 global_page_state(NR_FILE_MAPPED), 3714 global_page_state(NR_SHMEM), 3715 global_page_state(NR_PAGETABLE), 3716 global_page_state(NR_BOUNCE), 3717 global_page_state(NR_FREE_PAGES), 3718 free_pcp, 3719 global_page_state(NR_FREE_CMA_PAGES)); 3720 3721 for_each_populated_zone(zone) { 3722 int i; 3723 3724 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3725 continue; 3726 3727 free_pcp = 0; 3728 for_each_online_cpu(cpu) 3729 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 3730 3731 show_node(zone); 3732 printk("%s" 3733 " free:%lukB" 3734 " min:%lukB" 3735 " low:%lukB" 3736 " high:%lukB" 3737 " active_anon:%lukB" 3738 " inactive_anon:%lukB" 3739 " active_file:%lukB" 3740 " inactive_file:%lukB" 3741 " unevictable:%lukB" 3742 " isolated(anon):%lukB" 3743 " isolated(file):%lukB" 3744 " present:%lukB" 3745 " managed:%lukB" 3746 " mlocked:%lukB" 3747 " dirty:%lukB" 3748 " writeback:%lukB" 3749 " mapped:%lukB" 3750 " shmem:%lukB" 3751 " slab_reclaimable:%lukB" 3752 " slab_unreclaimable:%lukB" 3753 " kernel_stack:%lukB" 3754 " pagetables:%lukB" 3755 " unstable:%lukB" 3756 " bounce:%lukB" 3757 " free_pcp:%lukB" 3758 " local_pcp:%ukB" 3759 " free_cma:%lukB" 3760 " writeback_tmp:%lukB" 3761 " pages_scanned:%lu" 3762 " all_unreclaimable? %s" 3763 "\n", 3764 zone->name, 3765 K(zone_page_state(zone, NR_FREE_PAGES)), 3766 K(min_wmark_pages(zone)), 3767 K(low_wmark_pages(zone)), 3768 K(high_wmark_pages(zone)), 3769 K(zone_page_state(zone, NR_ACTIVE_ANON)), 3770 K(zone_page_state(zone, NR_INACTIVE_ANON)), 3771 K(zone_page_state(zone, NR_ACTIVE_FILE)), 3772 K(zone_page_state(zone, NR_INACTIVE_FILE)), 3773 K(zone_page_state(zone, NR_UNEVICTABLE)), 3774 K(zone_page_state(zone, NR_ISOLATED_ANON)), 3775 K(zone_page_state(zone, NR_ISOLATED_FILE)), 3776 K(zone->present_pages), 3777 K(zone->managed_pages), 3778 K(zone_page_state(zone, NR_MLOCK)), 3779 K(zone_page_state(zone, NR_FILE_DIRTY)), 3780 K(zone_page_state(zone, NR_WRITEBACK)), 3781 K(zone_page_state(zone, NR_FILE_MAPPED)), 3782 K(zone_page_state(zone, NR_SHMEM)), 3783 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 3784 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 3785 zone_page_state(zone, NR_KERNEL_STACK) * 3786 THREAD_SIZE / 1024, 3787 K(zone_page_state(zone, NR_PAGETABLE)), 3788 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 3789 K(zone_page_state(zone, NR_BOUNCE)), 3790 K(free_pcp), 3791 K(this_cpu_read(zone->pageset->pcp.count)), 3792 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 3793 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 3794 K(zone_page_state(zone, NR_PAGES_SCANNED)), 3795 (!zone_reclaimable(zone) ? "yes" : "no") 3796 ); 3797 printk("lowmem_reserve[]:"); 3798 for (i = 0; i < MAX_NR_ZONES; i++) 3799 printk(" %ld", zone->lowmem_reserve[i]); 3800 printk("\n"); 3801 } 3802 3803 for_each_populated_zone(zone) { 3804 unsigned int order; 3805 unsigned long nr[MAX_ORDER], flags, total = 0; 3806 unsigned char types[MAX_ORDER]; 3807 3808 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3809 continue; 3810 show_node(zone); 3811 printk("%s: ", zone->name); 3812 3813 spin_lock_irqsave(&zone->lock, flags); 3814 for (order = 0; order < MAX_ORDER; order++) { 3815 struct free_area *area = &zone->free_area[order]; 3816 int type; 3817 3818 nr[order] = area->nr_free; 3819 total += nr[order] << order; 3820 3821 types[order] = 0; 3822 for (type = 0; type < MIGRATE_TYPES; type++) { 3823 if (!list_empty(&area->free_list[type])) 3824 types[order] |= 1 << type; 3825 } 3826 } 3827 spin_unlock_irqrestore(&zone->lock, flags); 3828 for (order = 0; order < MAX_ORDER; order++) { 3829 printk("%lu*%lukB ", nr[order], K(1UL) << order); 3830 if (nr[order]) 3831 show_migration_types(types[order]); 3832 } 3833 printk("= %lukB\n", K(total)); 3834 } 3835 3836 hugetlb_show_meminfo(); 3837 3838 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 3839 3840 show_swap_cache_info(); 3841} 3842 3843static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 3844{ 3845 zoneref->zone = zone; 3846 zoneref->zone_idx = zone_idx(zone); 3847} 3848 3849/* 3850 * Builds allocation fallback zone lists. 3851 * 3852 * Add all populated zones of a node to the zonelist. 3853 */ 3854static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 3855 int nr_zones) 3856{ 3857 struct zone *zone; 3858 enum zone_type zone_type = MAX_NR_ZONES; 3859 3860 do { 3861 zone_type--; 3862 zone = pgdat->node_zones + zone_type; 3863 if (populated_zone(zone)) { 3864 zoneref_set_zone(zone, 3865 &zonelist->_zonerefs[nr_zones++]); 3866 check_highest_zone(zone_type); 3867 } 3868 } while (zone_type); 3869 3870 return nr_zones; 3871} 3872 3873 3874/* 3875 * zonelist_order: 3876 * 0 = automatic detection of better ordering. 3877 * 1 = order by ([node] distance, -zonetype) 3878 * 2 = order by (-zonetype, [node] distance) 3879 * 3880 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 3881 * the same zonelist. So only NUMA can configure this param. 3882 */ 3883#define ZONELIST_ORDER_DEFAULT 0 3884#define ZONELIST_ORDER_NODE 1 3885#define ZONELIST_ORDER_ZONE 2 3886 3887/* zonelist order in the kernel. 3888 * set_zonelist_order() will set this to NODE or ZONE. 3889 */ 3890static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 3891static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 3892 3893 3894#ifdef CONFIG_NUMA 3895/* The value user specified ....changed by config */ 3896static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3897/* string for sysctl */ 3898#define NUMA_ZONELIST_ORDER_LEN 16 3899char numa_zonelist_order[16] = "default"; 3900 3901/* 3902 * interface for configure zonelist ordering. 3903 * command line option "numa_zonelist_order" 3904 * = "[dD]efault - default, automatic configuration. 3905 * = "[nN]ode - order by node locality, then by zone within node 3906 * = "[zZ]one - order by zone, then by locality within zone 3907 */ 3908 3909static int __parse_numa_zonelist_order(char *s) 3910{ 3911 if (*s == 'd' || *s == 'D') { 3912 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3913 } else if (*s == 'n' || *s == 'N') { 3914 user_zonelist_order = ZONELIST_ORDER_NODE; 3915 } else if (*s == 'z' || *s == 'Z') { 3916 user_zonelist_order = ZONELIST_ORDER_ZONE; 3917 } else { 3918 printk(KERN_WARNING 3919 "Ignoring invalid numa_zonelist_order value: " 3920 "%s\n", s); 3921 return -EINVAL; 3922 } 3923 return 0; 3924} 3925 3926static __init int setup_numa_zonelist_order(char *s) 3927{ 3928 int ret; 3929 3930 if (!s) 3931 return 0; 3932 3933 ret = __parse_numa_zonelist_order(s); 3934 if (ret == 0) 3935 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 3936 3937 return ret; 3938} 3939early_param("numa_zonelist_order", setup_numa_zonelist_order); 3940 3941/* 3942 * sysctl handler for numa_zonelist_order 3943 */ 3944int numa_zonelist_order_handler(struct ctl_table *table, int write, 3945 void __user *buffer, size_t *length, 3946 loff_t *ppos) 3947{ 3948 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 3949 int ret; 3950 static DEFINE_MUTEX(zl_order_mutex); 3951 3952 mutex_lock(&zl_order_mutex); 3953 if (write) { 3954 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 3955 ret = -EINVAL; 3956 goto out; 3957 } 3958 strcpy(saved_string, (char *)table->data); 3959 } 3960 ret = proc_dostring(table, write, buffer, length, ppos); 3961 if (ret) 3962 goto out; 3963 if (write) { 3964 int oldval = user_zonelist_order; 3965 3966 ret = __parse_numa_zonelist_order((char *)table->data); 3967 if (ret) { 3968 /* 3969 * bogus value. restore saved string 3970 */ 3971 strncpy((char *)table->data, saved_string, 3972 NUMA_ZONELIST_ORDER_LEN); 3973 user_zonelist_order = oldval; 3974 } else if (oldval != user_zonelist_order) { 3975 mutex_lock(&zonelists_mutex); 3976 build_all_zonelists(NULL, NULL); 3977 mutex_unlock(&zonelists_mutex); 3978 } 3979 } 3980out: 3981 mutex_unlock(&zl_order_mutex); 3982 return ret; 3983} 3984 3985 3986#define MAX_NODE_LOAD (nr_online_nodes) 3987static int node_load[MAX_NUMNODES]; 3988 3989/** 3990 * find_next_best_node - find the next node that should appear in a given node's fallback list 3991 * @node: node whose fallback list we're appending 3992 * @used_node_mask: nodemask_t of already used nodes 3993 * 3994 * We use a number of factors to determine which is the next node that should 3995 * appear on a given node's fallback list. The node should not have appeared 3996 * already in @node's fallback list, and it should be the next closest node 3997 * according to the distance array (which contains arbitrary distance values 3998 * from each node to each node in the system), and should also prefer nodes 3999 * with no CPUs, since presumably they'll have very little allocation pressure 4000 * on them otherwise. 4001 * It returns -1 if no node is found. 4002 */ 4003static int find_next_best_node(int node, nodemask_t *used_node_mask) 4004{ 4005 int n, val; 4006 int min_val = INT_MAX; 4007 int best_node = NUMA_NO_NODE; 4008 const struct cpumask *tmp = cpumask_of_node(0); 4009 4010 /* Use the local node if we haven't already */ 4011 if (!node_isset(node, *used_node_mask)) { 4012 node_set(node, *used_node_mask); 4013 return node; 4014 } 4015 4016 for_each_node_state(n, N_MEMORY) { 4017 4018 /* Don't want a node to appear more than once */ 4019 if (node_isset(n, *used_node_mask)) 4020 continue; 4021 4022 /* Use the distance array to find the distance */ 4023 val = node_distance(node, n); 4024 4025 /* Penalize nodes under us ("prefer the next node") */ 4026 val += (n < node); 4027 4028 /* Give preference to headless and unused nodes */ 4029 tmp = cpumask_of_node(n); 4030 if (!cpumask_empty(tmp)) 4031 val += PENALTY_FOR_NODE_WITH_CPUS; 4032 4033 /* Slight preference for less loaded node */ 4034 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 4035 val += node_load[n]; 4036 4037 if (val < min_val) { 4038 min_val = val; 4039 best_node = n; 4040 } 4041 } 4042 4043 if (best_node >= 0) 4044 node_set(best_node, *used_node_mask); 4045 4046 return best_node; 4047} 4048 4049 4050/* 4051 * Build zonelists ordered by node and zones within node. 4052 * This results in maximum locality--normal zone overflows into local 4053 * DMA zone, if any--but risks exhausting DMA zone. 4054 */ 4055static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 4056{ 4057 int j; 4058 struct zonelist *zonelist; 4059 4060 zonelist = &pgdat->node_zonelists[0]; 4061 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 4062 ; 4063 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4064 zonelist->_zonerefs[j].zone = NULL; 4065 zonelist->_zonerefs[j].zone_idx = 0; 4066} 4067 4068/* 4069 * Build gfp_thisnode zonelists 4070 */ 4071static void build_thisnode_zonelists(pg_data_t *pgdat) 4072{ 4073 int j; 4074 struct zonelist *zonelist; 4075 4076 zonelist = &pgdat->node_zonelists[1]; 4077 j = build_zonelists_node(pgdat, zonelist, 0); 4078 zonelist->_zonerefs[j].zone = NULL; 4079 zonelist->_zonerefs[j].zone_idx = 0; 4080} 4081 4082/* 4083 * Build zonelists ordered by zone and nodes within zones. 4084 * This results in conserving DMA zone[s] until all Normal memory is 4085 * exhausted, but results in overflowing to remote node while memory 4086 * may still exist in local DMA zone. 4087 */ 4088static int node_order[MAX_NUMNODES]; 4089 4090static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 4091{ 4092 int pos, j, node; 4093 int zone_type; /* needs to be signed */ 4094 struct zone *z; 4095 struct zonelist *zonelist; 4096 4097 zonelist = &pgdat->node_zonelists[0]; 4098 pos = 0; 4099 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 4100 for (j = 0; j < nr_nodes; j++) { 4101 node = node_order[j]; 4102 z = &NODE_DATA(node)->node_zones[zone_type]; 4103 if (populated_zone(z)) { 4104 zoneref_set_zone(z, 4105 &zonelist->_zonerefs[pos++]); 4106 check_highest_zone(zone_type); 4107 } 4108 } 4109 } 4110 zonelist->_zonerefs[pos].zone = NULL; 4111 zonelist->_zonerefs[pos].zone_idx = 0; 4112} 4113 4114#if defined(CONFIG_64BIT) 4115/* 4116 * Devices that require DMA32/DMA are relatively rare and do not justify a 4117 * penalty to every machine in case the specialised case applies. Default 4118 * to Node-ordering on 64-bit NUMA machines 4119 */ 4120static int default_zonelist_order(void) 4121{ 4122 return ZONELIST_ORDER_NODE; 4123} 4124#else 4125/* 4126 * On 32-bit, the Normal zone needs to be preserved for allocations accessible 4127 * by the kernel. If processes running on node 0 deplete the low memory zone 4128 * then reclaim will occur more frequency increasing stalls and potentially 4129 * be easier to OOM if a large percentage of the zone is under writeback or 4130 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. 4131 * Hence, default to zone ordering on 32-bit. 4132 */ 4133static int default_zonelist_order(void) 4134{ 4135 return ZONELIST_ORDER_ZONE; 4136} 4137#endif /* CONFIG_64BIT */ 4138 4139static void set_zonelist_order(void) 4140{ 4141 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 4142 current_zonelist_order = default_zonelist_order(); 4143 else 4144 current_zonelist_order = user_zonelist_order; 4145} 4146 4147static void build_zonelists(pg_data_t *pgdat) 4148{ 4149 int j, node, load; 4150 enum zone_type i; 4151 nodemask_t used_mask; 4152 int local_node, prev_node; 4153 struct zonelist *zonelist; 4154 unsigned int order = current_zonelist_order; 4155 4156 /* initialize zonelists */ 4157 for (i = 0; i < MAX_ZONELISTS; i++) { 4158 zonelist = pgdat->node_zonelists + i; 4159 zonelist->_zonerefs[0].zone = NULL; 4160 zonelist->_zonerefs[0].zone_idx = 0; 4161 } 4162 4163 /* NUMA-aware ordering of nodes */ 4164 local_node = pgdat->node_id; 4165 load = nr_online_nodes; 4166 prev_node = local_node; 4167 nodes_clear(used_mask); 4168 4169 memset(node_order, 0, sizeof(node_order)); 4170 j = 0; 4171 4172 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4173 /* 4174 * We don't want to pressure a particular node. 4175 * So adding penalty to the first node in same 4176 * distance group to make it round-robin. 4177 */ 4178 if (node_distance(local_node, node) != 4179 node_distance(local_node, prev_node)) 4180 node_load[node] = load; 4181 4182 prev_node = node; 4183 load--; 4184 if (order == ZONELIST_ORDER_NODE) 4185 build_zonelists_in_node_order(pgdat, node); 4186 else 4187 node_order[j++] = node; /* remember order */ 4188 } 4189 4190 if (order == ZONELIST_ORDER_ZONE) { 4191 /* calculate node order -- i.e., DMA last! */ 4192 build_zonelists_in_zone_order(pgdat, j); 4193 } 4194 4195 build_thisnode_zonelists(pgdat); 4196} 4197 4198#ifdef CONFIG_HAVE_MEMORYLESS_NODES 4199/* 4200 * Return node id of node used for "local" allocations. 4201 * I.e., first node id of first zone in arg node's generic zonelist. 4202 * Used for initializing percpu 'numa_mem', which is used primarily 4203 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 4204 */ 4205int local_memory_node(int node) 4206{ 4207 struct zone *zone; 4208 4209 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 4210 gfp_zone(GFP_KERNEL), 4211 NULL, 4212 &zone); 4213 return zone->node; 4214} 4215#endif 4216 4217#else /* CONFIG_NUMA */ 4218 4219static void set_zonelist_order(void) 4220{ 4221 current_zonelist_order = ZONELIST_ORDER_ZONE; 4222} 4223 4224static void build_zonelists(pg_data_t *pgdat) 4225{ 4226 int node, local_node; 4227 enum zone_type j; 4228 struct zonelist *zonelist; 4229 4230 local_node = pgdat->node_id; 4231 4232 zonelist = &pgdat->node_zonelists[0]; 4233 j = build_zonelists_node(pgdat, zonelist, 0); 4234 4235 /* 4236 * Now we build the zonelist so that it contains the zones 4237 * of all the other nodes. 4238 * We don't want to pressure a particular node, so when 4239 * building the zones for node N, we make sure that the 4240 * zones coming right after the local ones are those from 4241 * node N+1 (modulo N) 4242 */ 4243 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 4244 if (!node_online(node)) 4245 continue; 4246 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4247 } 4248 for (node = 0; node < local_node; node++) { 4249 if (!node_online(node)) 4250 continue; 4251 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4252 } 4253 4254 zonelist->_zonerefs[j].zone = NULL; 4255 zonelist->_zonerefs[j].zone_idx = 0; 4256} 4257 4258#endif /* CONFIG_NUMA */ 4259 4260/* 4261 * Boot pageset table. One per cpu which is going to be used for all 4262 * zones and all nodes. The parameters will be set in such a way 4263 * that an item put on a list will immediately be handed over to 4264 * the buddy list. This is safe since pageset manipulation is done 4265 * with interrupts disabled. 4266 * 4267 * The boot_pagesets must be kept even after bootup is complete for 4268 * unused processors and/or zones. They do play a role for bootstrapping 4269 * hotplugged processors. 4270 * 4271 * zoneinfo_show() and maybe other functions do 4272 * not check if the processor is online before following the pageset pointer. 4273 * Other parts of the kernel may not check if the zone is available. 4274 */ 4275static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 4276static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 4277static void setup_zone_pageset(struct zone *zone); 4278 4279/* 4280 * Global mutex to protect against size modification of zonelists 4281 * as well as to serialize pageset setup for the new populated zone. 4282 */ 4283DEFINE_MUTEX(zonelists_mutex); 4284 4285/* return values int ....just for stop_machine() */ 4286static int __build_all_zonelists(void *data) 4287{ 4288 int nid; 4289 int cpu; 4290 pg_data_t *self = data; 4291 4292#ifdef CONFIG_NUMA 4293 memset(node_load, 0, sizeof(node_load)); 4294#endif 4295 4296 if (self && !node_online(self->node_id)) { 4297 build_zonelists(self); 4298 } 4299 4300 for_each_online_node(nid) { 4301 pg_data_t *pgdat = NODE_DATA(nid); 4302 4303 build_zonelists(pgdat); 4304 } 4305 4306 /* 4307 * Initialize the boot_pagesets that are going to be used 4308 * for bootstrapping processors. The real pagesets for 4309 * each zone will be allocated later when the per cpu 4310 * allocator is available. 4311 * 4312 * boot_pagesets are used also for bootstrapping offline 4313 * cpus if the system is already booted because the pagesets 4314 * are needed to initialize allocators on a specific cpu too. 4315 * F.e. the percpu allocator needs the page allocator which 4316 * needs the percpu allocator in order to allocate its pagesets 4317 * (a chicken-egg dilemma). 4318 */ 4319 for_each_possible_cpu(cpu) { 4320 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 4321 4322#ifdef CONFIG_HAVE_MEMORYLESS_NODES 4323 /* 4324 * We now know the "local memory node" for each node-- 4325 * i.e., the node of the first zone in the generic zonelist. 4326 * Set up numa_mem percpu variable for on-line cpus. During 4327 * boot, only the boot cpu should be on-line; we'll init the 4328 * secondary cpus' numa_mem as they come on-line. During 4329 * node/memory hotplug, we'll fixup all on-line cpus. 4330 */ 4331 if (cpu_online(cpu)) 4332 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 4333#endif 4334 } 4335 4336 return 0; 4337} 4338 4339static noinline void __init 4340build_all_zonelists_init(void) 4341{ 4342 __build_all_zonelists(NULL); 4343 mminit_verify_zonelist(); 4344 cpuset_init_current_mems_allowed(); 4345} 4346 4347/* 4348 * Called with zonelists_mutex held always 4349 * unless system_state == SYSTEM_BOOTING. 4350 * 4351 * __ref due to (1) call of __meminit annotated setup_zone_pageset 4352 * [we're only called with non-NULL zone through __meminit paths] and 4353 * (2) call of __init annotated helper build_all_zonelists_init 4354 * [protected by SYSTEM_BOOTING]. 4355 */ 4356void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 4357{ 4358 set_zonelist_order(); 4359 4360 if (system_state == SYSTEM_BOOTING) { 4361 build_all_zonelists_init(); 4362 } else { 4363#ifdef CONFIG_MEMORY_HOTPLUG 4364 if (zone) 4365 setup_zone_pageset(zone); 4366#endif 4367 /* we have to stop all cpus to guarantee there is no user 4368 of zonelist */ 4369 stop_machine(__build_all_zonelists, pgdat, NULL); 4370 /* cpuset refresh routine should be here */ 4371 } 4372 vm_total_pages = nr_free_pagecache_pages(); 4373 /* 4374 * Disable grouping by mobility if the number of pages in the 4375 * system is too low to allow the mechanism to work. It would be 4376 * more accurate, but expensive to check per-zone. This check is 4377 * made on memory-hotadd so a system can start with mobility 4378 * disabled and enable it later 4379 */ 4380 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 4381 page_group_by_mobility_disabled = 1; 4382 else 4383 page_group_by_mobility_disabled = 0; 4384 4385 pr_info("Built %i zonelists in %s order, mobility grouping %s. " 4386 "Total pages: %ld\n", 4387 nr_online_nodes, 4388 zonelist_order_name[current_zonelist_order], 4389 page_group_by_mobility_disabled ? "off" : "on", 4390 vm_total_pages); 4391#ifdef CONFIG_NUMA 4392 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 4393#endif 4394} 4395 4396/* 4397 * Helper functions to size the waitqueue hash table. 4398 * Essentially these want to choose hash table sizes sufficiently 4399 * large so that collisions trying to wait on pages are rare. 4400 * But in fact, the number of active page waitqueues on typical 4401 * systems is ridiculously low, less than 200. So this is even 4402 * conservative, even though it seems large. 4403 * 4404 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 4405 * waitqueues, i.e. the size of the waitq table given the number of pages. 4406 */ 4407#define PAGES_PER_WAITQUEUE 256 4408 4409#ifndef CONFIG_MEMORY_HOTPLUG 4410static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4411{ 4412 unsigned long size = 1; 4413 4414 pages /= PAGES_PER_WAITQUEUE; 4415 4416 while (size < pages) 4417 size <<= 1; 4418 4419 /* 4420 * Once we have dozens or even hundreds of threads sleeping 4421 * on IO we've got bigger problems than wait queue collision. 4422 * Limit the size of the wait table to a reasonable size. 4423 */ 4424 size = min(size, 4096UL); 4425 4426 return max(size, 4UL); 4427} 4428#else 4429/* 4430 * A zone's size might be changed by hot-add, so it is not possible to determine 4431 * a suitable size for its wait_table. So we use the maximum size now. 4432 * 4433 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 4434 * 4435 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 4436 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 4437 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 4438 * 4439 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 4440 * or more by the traditional way. (See above). It equals: 4441 * 4442 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 4443 * ia64(16K page size) : = ( 8G + 4M)byte. 4444 * powerpc (64K page size) : = (32G +16M)byte. 4445 */ 4446static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4447{ 4448 return 4096UL; 4449} 4450#endif 4451 4452/* 4453 * This is an integer logarithm so that shifts can be used later 4454 * to extract the more random high bits from the multiplicative 4455 * hash function before the remainder is taken. 4456 */ 4457static inline unsigned long wait_table_bits(unsigned long size) 4458{ 4459 return ffz(~size); 4460} 4461 4462/* 4463 * Initially all pages are reserved - free ones are freed 4464 * up by free_all_bootmem() once the early boot process is 4465 * done. Non-atomic initialization, single-pass. 4466 */ 4467void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4468 unsigned long start_pfn, enum memmap_context context) 4469{ 4470 pg_data_t *pgdat = NODE_DATA(nid); 4471 unsigned long end_pfn = start_pfn + size; 4472 unsigned long pfn; 4473 struct zone *z; 4474 unsigned long nr_initialised = 0; 4475 4476 if (highest_memmap_pfn < end_pfn - 1) 4477 highest_memmap_pfn = end_pfn - 1; 4478 4479 z = &pgdat->node_zones[zone]; 4480 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4481 /* 4482 * There can be holes in boot-time mem_map[]s 4483 * handed to this function. They do not 4484 * exist on hotplugged memory. 4485 */ 4486 if (context == MEMMAP_EARLY) { 4487 if (!early_pfn_valid(pfn)) 4488 continue; 4489 if (!early_pfn_in_nid(pfn, nid)) 4490 continue; 4491 if (!update_defer_init(pgdat, pfn, end_pfn, 4492 &nr_initialised)) 4493 break; 4494 } 4495 4496 /* 4497 * Mark the block movable so that blocks are reserved for 4498 * movable at startup. This will force kernel allocations 4499 * to reserve their blocks rather than leaking throughout 4500 * the address space during boot when many long-lived 4501 * kernel allocations are made. 4502 * 4503 * bitmap is created for zone's valid pfn range. but memmap 4504 * can be created for invalid pages (for alignment) 4505 * check here not to call set_pageblock_migratetype() against 4506 * pfn out of zone. 4507 */ 4508 if (!(pfn & (pageblock_nr_pages - 1))) { 4509 struct page *page = pfn_to_page(pfn); 4510 4511 __init_single_page(page, pfn, zone, nid); 4512 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4513 } else { 4514 __init_single_pfn(pfn, zone, nid); 4515 } 4516 } 4517} 4518 4519static void __meminit zone_init_free_lists(struct zone *zone) 4520{ 4521 unsigned int order, t; 4522 for_each_migratetype_order(order, t) { 4523 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 4524 zone->free_area[order].nr_free = 0; 4525 } 4526} 4527 4528#ifndef __HAVE_ARCH_MEMMAP_INIT 4529#define memmap_init(size, nid, zone, start_pfn) \ 4530 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4531#endif 4532 4533static int zone_batchsize(struct zone *zone) 4534{ 4535#ifdef CONFIG_MMU 4536 int batch; 4537 4538 /* 4539 * The per-cpu-pages pools are set to around 1000th of the 4540 * size of the zone. But no more than 1/2 of a meg. 4541 * 4542 * OK, so we don't know how big the cache is. So guess. 4543 */ 4544 batch = zone->managed_pages / 1024; 4545 if (batch * PAGE_SIZE > 512 * 1024) 4546 batch = (512 * 1024) / PAGE_SIZE; 4547 batch /= 4; /* We effectively *= 4 below */ 4548 if (batch < 1) 4549 batch = 1; 4550 4551 /* 4552 * Clamp the batch to a 2^n - 1 value. Having a power 4553 * of 2 value was found to be more likely to have 4554 * suboptimal cache aliasing properties in some cases. 4555 * 4556 * For example if 2 tasks are alternately allocating 4557 * batches of pages, one task can end up with a lot 4558 * of pages of one half of the possible page colors 4559 * and the other with pages of the other colors. 4560 */ 4561 batch = rounddown_pow_of_two(batch + batch/2) - 1; 4562 4563 return batch; 4564 4565#else 4566 /* The deferral and batching of frees should be suppressed under NOMMU 4567 * conditions. 4568 * 4569 * The problem is that NOMMU needs to be able to allocate large chunks 4570 * of contiguous memory as there's no hardware page translation to 4571 * assemble apparent contiguous memory from discontiguous pages. 4572 * 4573 * Queueing large contiguous runs of pages for batching, however, 4574 * causes the pages to actually be freed in smaller chunks. As there 4575 * can be a significant delay between the individual batches being 4576 * recycled, this leads to the once large chunks of space being 4577 * fragmented and becoming unavailable for high-order allocations. 4578 */ 4579 return 0; 4580#endif 4581} 4582 4583/* 4584 * pcp->high and pcp->batch values are related and dependent on one another: 4585 * ->batch must never be higher then ->high. 4586 * The following function updates them in a safe manner without read side 4587 * locking. 4588 * 4589 * Any new users of pcp->batch and pcp->high should ensure they can cope with 4590 * those fields changing asynchronously (acording the the above rule). 4591 * 4592 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 4593 * outside of boot time (or some other assurance that no concurrent updaters 4594 * exist). 4595 */ 4596static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 4597 unsigned long batch) 4598{ 4599 /* start with a fail safe value for batch */ 4600 pcp->batch = 1; 4601 smp_wmb(); 4602 4603 /* Update high, then batch, in order */ 4604 pcp->high = high; 4605 smp_wmb(); 4606 4607 pcp->batch = batch; 4608} 4609 4610/* a companion to pageset_set_high() */ 4611static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 4612{ 4613 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 4614} 4615 4616static void pageset_init(struct per_cpu_pageset *p) 4617{ 4618 struct per_cpu_pages *pcp; 4619 int migratetype; 4620 4621 memset(p, 0, sizeof(*p)); 4622 4623 pcp = &p->pcp; 4624 pcp->count = 0; 4625 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 4626 INIT_LIST_HEAD(&pcp->lists[migratetype]); 4627} 4628 4629static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 4630{ 4631 pageset_init(p); 4632 pageset_set_batch(p, batch); 4633} 4634 4635/* 4636 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 4637 * to the value high for the pageset p. 4638 */ 4639static void pageset_set_high(struct per_cpu_pageset *p, 4640 unsigned long high) 4641{ 4642 unsigned long batch = max(1UL, high / 4); 4643 if ((high / 4) > (PAGE_SHIFT * 8)) 4644 batch = PAGE_SHIFT * 8; 4645 4646 pageset_update(&p->pcp, high, batch); 4647} 4648 4649static void pageset_set_high_and_batch(struct zone *zone, 4650 struct per_cpu_pageset *pcp) 4651{ 4652 if (percpu_pagelist_fraction) 4653 pageset_set_high(pcp, 4654 (zone->managed_pages / 4655 percpu_pagelist_fraction)); 4656 else 4657 pageset_set_batch(pcp, zone_batchsize(zone)); 4658} 4659 4660static void __meminit zone_pageset_init(struct zone *zone, int cpu) 4661{ 4662 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 4663 4664 pageset_init(pcp); 4665 pageset_set_high_and_batch(zone, pcp); 4666} 4667 4668static void __meminit setup_zone_pageset(struct zone *zone) 4669{ 4670 int cpu; 4671 zone->pageset = alloc_percpu(struct per_cpu_pageset); 4672 for_each_possible_cpu(cpu) 4673 zone_pageset_init(zone, cpu); 4674} 4675 4676/* 4677 * Allocate per cpu pagesets and initialize them. 4678 * Before this call only boot pagesets were available. 4679 */ 4680void __init setup_per_cpu_pageset(void) 4681{ 4682 struct zone *zone; 4683 4684 for_each_populated_zone(zone) 4685 setup_zone_pageset(zone); 4686} 4687 4688static noinline __init_refok 4689int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 4690{ 4691 int i; 4692 size_t alloc_size; 4693 4694 /* 4695 * The per-page waitqueue mechanism uses hashed waitqueues 4696 * per zone. 4697 */ 4698 zone->wait_table_hash_nr_entries = 4699 wait_table_hash_nr_entries(zone_size_pages); 4700 zone->wait_table_bits = 4701 wait_table_bits(zone->wait_table_hash_nr_entries); 4702 alloc_size = zone->wait_table_hash_nr_entries 4703 * sizeof(wait_queue_head_t); 4704 4705 if (!slab_is_available()) { 4706 zone->wait_table = (wait_queue_head_t *) 4707 memblock_virt_alloc_node_nopanic( 4708 alloc_size, zone->zone_pgdat->node_id); 4709 } else { 4710 /* 4711 * This case means that a zone whose size was 0 gets new memory 4712 * via memory hot-add. 4713 * But it may be the case that a new node was hot-added. In 4714 * this case vmalloc() will not be able to use this new node's 4715 * memory - this wait_table must be initialized to use this new 4716 * node itself as well. 4717 * To use this new node's memory, further consideration will be 4718 * necessary. 4719 */ 4720 zone->wait_table = vmalloc(alloc_size); 4721 } 4722 if (!zone->wait_table) 4723 return -ENOMEM; 4724 4725 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 4726 init_waitqueue_head(zone->wait_table + i); 4727 4728 return 0; 4729} 4730 4731static __meminit void zone_pcp_init(struct zone *zone) 4732{ 4733 /* 4734 * per cpu subsystem is not up at this point. The following code 4735 * relies on the ability of the linker to provide the 4736 * offset of a (static) per cpu variable into the per cpu area. 4737 */ 4738 zone->pageset = &boot_pageset; 4739 4740 if (populated_zone(zone)) 4741 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 4742 zone->name, zone->present_pages, 4743 zone_batchsize(zone)); 4744} 4745 4746int __meminit init_currently_empty_zone(struct zone *zone, 4747 unsigned long zone_start_pfn, 4748 unsigned long size) 4749{ 4750 struct pglist_data *pgdat = zone->zone_pgdat; 4751 int ret; 4752 ret = zone_wait_table_init(zone, size); 4753 if (ret) 4754 return ret; 4755 pgdat->nr_zones = zone_idx(zone) + 1; 4756 4757 zone->zone_start_pfn = zone_start_pfn; 4758 4759 mminit_dprintk(MMINIT_TRACE, "memmap_init", 4760 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 4761 pgdat->node_id, 4762 (unsigned long)zone_idx(zone), 4763 zone_start_pfn, (zone_start_pfn + size)); 4764 4765 zone_init_free_lists(zone); 4766 4767 return 0; 4768} 4769 4770#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4771#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4772 4773/* 4774 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4775 */ 4776int __meminit __early_pfn_to_nid(unsigned long pfn, 4777 struct mminit_pfnnid_cache *state) 4778{ 4779 unsigned long start_pfn, end_pfn; 4780 int nid; 4781 4782 if (state->last_start <= pfn && pfn < state->last_end) 4783 return state->last_nid; 4784 4785 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4786 if (nid != -1) { 4787 state->last_start = start_pfn; 4788 state->last_end = end_pfn; 4789 state->last_nid = nid; 4790 } 4791 4792 return nid; 4793} 4794#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4795 4796/** 4797 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 4798 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4799 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 4800 * 4801 * If an architecture guarantees that all ranges registered contain no holes 4802 * and may be freed, this this function may be used instead of calling 4803 * memblock_free_early_nid() manually. 4804 */ 4805void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 4806{ 4807 unsigned long start_pfn, end_pfn; 4808 int i, this_nid; 4809 4810 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 4811 start_pfn = min(start_pfn, max_low_pfn); 4812 end_pfn = min(end_pfn, max_low_pfn); 4813 4814 if (start_pfn < end_pfn) 4815 memblock_free_early_nid(PFN_PHYS(start_pfn), 4816 (end_pfn - start_pfn) << PAGE_SHIFT, 4817 this_nid); 4818 } 4819} 4820 4821/** 4822 * sparse_memory_present_with_active_regions - Call memory_present for each active range 4823 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 4824 * 4825 * If an architecture guarantees that all ranges registered contain no holes and may 4826 * be freed, this function may be used instead of calling memory_present() manually. 4827 */ 4828void __init sparse_memory_present_with_active_regions(int nid) 4829{ 4830 unsigned long start_pfn, end_pfn; 4831 int i, this_nid; 4832 4833 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 4834 memory_present(this_nid, start_pfn, end_pfn); 4835} 4836 4837/** 4838 * get_pfn_range_for_nid - Return the start and end page frames for a node 4839 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 4840 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 4841 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 4842 * 4843 * It returns the start and end page frame of a node based on information 4844 * provided by memblock_set_node(). If called for a node 4845 * with no available memory, a warning is printed and the start and end 4846 * PFNs will be 0. 4847 */ 4848void __meminit get_pfn_range_for_nid(unsigned int nid, 4849 unsigned long *start_pfn, unsigned long *end_pfn) 4850{ 4851 unsigned long this_start_pfn, this_end_pfn; 4852 int i; 4853 4854 *start_pfn = -1UL; 4855 *end_pfn = 0; 4856 4857 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 4858 *start_pfn = min(*start_pfn, this_start_pfn); 4859 *end_pfn = max(*end_pfn, this_end_pfn); 4860 } 4861 4862 if (*start_pfn == -1UL) 4863 *start_pfn = 0; 4864} 4865 4866/* 4867 * This finds a zone that can be used for ZONE_MOVABLE pages. The 4868 * assumption is made that zones within a node are ordered in monotonic 4869 * increasing memory addresses so that the "highest" populated zone is used 4870 */ 4871static void __init find_usable_zone_for_movable(void) 4872{ 4873 int zone_index; 4874 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 4875 if (zone_index == ZONE_MOVABLE) 4876 continue; 4877 4878 if (arch_zone_highest_possible_pfn[zone_index] > 4879 arch_zone_lowest_possible_pfn[zone_index]) 4880 break; 4881 } 4882 4883 VM_BUG_ON(zone_index == -1); 4884 movable_zone = zone_index; 4885} 4886 4887/* 4888 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 4889 * because it is sized independent of architecture. Unlike the other zones, 4890 * the starting point for ZONE_MOVABLE is not fixed. It may be different 4891 * in each node depending on the size of each node and how evenly kernelcore 4892 * is distributed. This helper function adjusts the zone ranges 4893 * provided by the architecture for a given node by using the end of the 4894 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 4895 * zones within a node are in order of monotonic increases memory addresses 4896 */ 4897static void __meminit adjust_zone_range_for_zone_movable(int nid, 4898 unsigned long zone_type, 4899 unsigned long node_start_pfn, 4900 unsigned long node_end_pfn, 4901 unsigned long *zone_start_pfn, 4902 unsigned long *zone_end_pfn) 4903{ 4904 /* Only adjust if ZONE_MOVABLE is on this node */ 4905 if (zone_movable_pfn[nid]) { 4906 /* Size ZONE_MOVABLE */ 4907 if (zone_type == ZONE_MOVABLE) { 4908 *zone_start_pfn = zone_movable_pfn[nid]; 4909 *zone_end_pfn = min(node_end_pfn, 4910 arch_zone_highest_possible_pfn[movable_zone]); 4911 4912 /* Adjust for ZONE_MOVABLE starting within this range */ 4913 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 4914 *zone_end_pfn > zone_movable_pfn[nid]) { 4915 *zone_end_pfn = zone_movable_pfn[nid]; 4916 4917 /* Check if this whole range is within ZONE_MOVABLE */ 4918 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 4919 *zone_start_pfn = *zone_end_pfn; 4920 } 4921} 4922 4923/* 4924 * Return the number of pages a zone spans in a node, including holes 4925 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 4926 */ 4927static unsigned long __meminit zone_spanned_pages_in_node(int nid, 4928 unsigned long zone_type, 4929 unsigned long node_start_pfn, 4930 unsigned long node_end_pfn, 4931 unsigned long *ignored) 4932{ 4933 unsigned long zone_start_pfn, zone_end_pfn; 4934 4935 /* When hotadd a new node from cpu_up(), the node should be empty */ 4936 if (!node_start_pfn && !node_end_pfn) 4937 return 0; 4938 4939 /* Get the start and end of the zone */ 4940 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 4941 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 4942 adjust_zone_range_for_zone_movable(nid, zone_type, 4943 node_start_pfn, node_end_pfn, 4944 &zone_start_pfn, &zone_end_pfn); 4945 4946 /* Check that this node has pages within the zone's required range */ 4947 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 4948 return 0; 4949 4950 /* Move the zone boundaries inside the node if necessary */ 4951 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 4952 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 4953 4954 /* Return the spanned pages */ 4955 return zone_end_pfn - zone_start_pfn; 4956} 4957 4958/* 4959 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 4960 * then all holes in the requested range will be accounted for. 4961 */ 4962unsigned long __meminit __absent_pages_in_range(int nid, 4963 unsigned long range_start_pfn, 4964 unsigned long range_end_pfn) 4965{ 4966 unsigned long nr_absent = range_end_pfn - range_start_pfn; 4967 unsigned long start_pfn, end_pfn; 4968 int i; 4969 4970 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 4971 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 4972 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 4973 nr_absent -= end_pfn - start_pfn; 4974 } 4975 return nr_absent; 4976} 4977 4978/** 4979 * absent_pages_in_range - Return number of page frames in holes within a range 4980 * @start_pfn: The start PFN to start searching for holes 4981 * @end_pfn: The end PFN to stop searching for holes 4982 * 4983 * It returns the number of pages frames in memory holes within a range. 4984 */ 4985unsigned long __init absent_pages_in_range(unsigned long start_pfn, 4986 unsigned long end_pfn) 4987{ 4988 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 4989} 4990 4991/* Return the number of page frames in holes in a zone on a node */ 4992static unsigned long __meminit zone_absent_pages_in_node(int nid, 4993 unsigned long zone_type, 4994 unsigned long node_start_pfn, 4995 unsigned long node_end_pfn, 4996 unsigned long *ignored) 4997{ 4998 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 4999 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5000 unsigned long zone_start_pfn, zone_end_pfn; 5001 5002 /* When hotadd a new node from cpu_up(), the node should be empty */ 5003 if (!node_start_pfn && !node_end_pfn) 5004 return 0; 5005 5006 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5007 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5008 5009 adjust_zone_range_for_zone_movable(nid, zone_type, 5010 node_start_pfn, node_end_pfn, 5011 &zone_start_pfn, &zone_end_pfn); 5012 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 5013} 5014 5015#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5016static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 5017 unsigned long zone_type, 5018 unsigned long node_start_pfn, 5019 unsigned long node_end_pfn, 5020 unsigned long *zones_size) 5021{ 5022 return zones_size[zone_type]; 5023} 5024 5025static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 5026 unsigned long zone_type, 5027 unsigned long node_start_pfn, 5028 unsigned long node_end_pfn, 5029 unsigned long *zholes_size) 5030{ 5031 if (!zholes_size) 5032 return 0; 5033 5034 return zholes_size[zone_type]; 5035} 5036 5037#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5038 5039static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 5040 unsigned long node_start_pfn, 5041 unsigned long node_end_pfn, 5042 unsigned long *zones_size, 5043 unsigned long *zholes_size) 5044{ 5045 unsigned long realtotalpages = 0, totalpages = 0; 5046 enum zone_type i; 5047 5048 for (i = 0; i < MAX_NR_ZONES; i++) { 5049 struct zone *zone = pgdat->node_zones + i; 5050 unsigned long size, real_size; 5051 5052 size = zone_spanned_pages_in_node(pgdat->node_id, i, 5053 node_start_pfn, 5054 node_end_pfn, 5055 zones_size); 5056 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i, 5057 node_start_pfn, node_end_pfn, 5058 zholes_size); 5059 zone->spanned_pages = size; 5060 zone->present_pages = real_size; 5061 5062 totalpages += size; 5063 realtotalpages += real_size; 5064 } 5065 5066 pgdat->node_spanned_pages = totalpages; 5067 pgdat->node_present_pages = realtotalpages; 5068 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 5069 realtotalpages); 5070} 5071 5072#ifndef CONFIG_SPARSEMEM 5073/* 5074 * Calculate the size of the zone->blockflags rounded to an unsigned long 5075 * Start by making sure zonesize is a multiple of pageblock_order by rounding 5076 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 5077 * round what is now in bits to nearest long in bits, then return it in 5078 * bytes. 5079 */ 5080static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 5081{ 5082 unsigned long usemapsize; 5083 5084 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 5085 usemapsize = roundup(zonesize, pageblock_nr_pages); 5086 usemapsize = usemapsize >> pageblock_order; 5087 usemapsize *= NR_PAGEBLOCK_BITS; 5088 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 5089 5090 return usemapsize / 8; 5091} 5092 5093static void __init setup_usemap(struct pglist_data *pgdat, 5094 struct zone *zone, 5095 unsigned long zone_start_pfn, 5096 unsigned long zonesize) 5097{ 5098 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 5099 zone->pageblock_flags = NULL; 5100 if (usemapsize) 5101 zone->pageblock_flags = 5102 memblock_virt_alloc_node_nopanic(usemapsize, 5103 pgdat->node_id); 5104} 5105#else 5106static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 5107 unsigned long zone_start_pfn, unsigned long zonesize) {} 5108#endif /* CONFIG_SPARSEMEM */ 5109 5110#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 5111 5112/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 5113void __paginginit set_pageblock_order(void) 5114{ 5115 unsigned int order; 5116 5117 /* Check that pageblock_nr_pages has not already been setup */ 5118 if (pageblock_order) 5119 return; 5120 5121 if (HPAGE_SHIFT > PAGE_SHIFT) 5122 order = HUGETLB_PAGE_ORDER; 5123 else 5124 order = MAX_ORDER - 1; 5125 5126 /* 5127 * Assume the largest contiguous order of interest is a huge page. 5128 * This value may be variable depending on boot parameters on IA64 and 5129 * powerpc. 5130 */ 5131 pageblock_order = order; 5132} 5133#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5134 5135/* 5136 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 5137 * is unused as pageblock_order is set at compile-time. See 5138 * include/linux/pageblock-flags.h for the values of pageblock_order based on 5139 * the kernel config 5140 */ 5141void __paginginit set_pageblock_order(void) 5142{ 5143} 5144 5145#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5146 5147static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 5148 unsigned long present_pages) 5149{ 5150 unsigned long pages = spanned_pages; 5151 5152 /* 5153 * Provide a more accurate estimation if there are holes within 5154 * the zone and SPARSEMEM is in use. If there are holes within the 5155 * zone, each populated memory region may cost us one or two extra 5156 * memmap pages due to alignment because memmap pages for each 5157 * populated regions may not naturally algined on page boundary. 5158 * So the (present_pages >> 4) heuristic is a tradeoff for that. 5159 */ 5160 if (spanned_pages > present_pages + (present_pages >> 4) && 5161 IS_ENABLED(CONFIG_SPARSEMEM)) 5162 pages = present_pages; 5163 5164 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 5165} 5166 5167/* 5168 * Set up the zone data structures: 5169 * - mark all pages reserved 5170 * - mark all memory queues empty 5171 * - clear the memory bitmaps 5172 * 5173 * NOTE: pgdat should get zeroed by caller. 5174 */ 5175static void __paginginit free_area_init_core(struct pglist_data *pgdat) 5176{ 5177 enum zone_type j; 5178 int nid = pgdat->node_id; 5179 unsigned long zone_start_pfn = pgdat->node_start_pfn; 5180 int ret; 5181 5182 pgdat_resize_init(pgdat); 5183#ifdef CONFIG_NUMA_BALANCING 5184 spin_lock_init(&pgdat->numabalancing_migrate_lock); 5185 pgdat->numabalancing_migrate_nr_pages = 0; 5186 pgdat->numabalancing_migrate_next_window = jiffies; 5187#endif 5188 init_waitqueue_head(&pgdat->kswapd_wait); 5189 init_waitqueue_head(&pgdat->pfmemalloc_wait); 5190 pgdat_page_ext_init(pgdat); 5191 5192 for (j = 0; j < MAX_NR_ZONES; j++) { 5193 struct zone *zone = pgdat->node_zones + j; 5194 unsigned long size, realsize, freesize, memmap_pages; 5195 5196 size = zone->spanned_pages; 5197 realsize = freesize = zone->present_pages; 5198 5199 /* 5200 * Adjust freesize so that it accounts for how much memory 5201 * is used by this zone for memmap. This affects the watermark 5202 * and per-cpu initialisations 5203 */ 5204 memmap_pages = calc_memmap_size(size, realsize); 5205 if (!is_highmem_idx(j)) { 5206 if (freesize >= memmap_pages) { 5207 freesize -= memmap_pages; 5208 if (memmap_pages) 5209 printk(KERN_DEBUG 5210 " %s zone: %lu pages used for memmap\n", 5211 zone_names[j], memmap_pages); 5212 } else 5213 printk(KERN_WARNING 5214 " %s zone: %lu pages exceeds freesize %lu\n", 5215 zone_names[j], memmap_pages, freesize); 5216 } 5217 5218 /* Account for reserved pages */ 5219 if (j == 0 && freesize > dma_reserve) { 5220 freesize -= dma_reserve; 5221 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 5222 zone_names[0], dma_reserve); 5223 } 5224 5225 if (!is_highmem_idx(j)) 5226 nr_kernel_pages += freesize; 5227 /* Charge for highmem memmap if there are enough kernel pages */ 5228 else if (nr_kernel_pages > memmap_pages * 2) 5229 nr_kernel_pages -= memmap_pages; 5230 nr_all_pages += freesize; 5231 5232 /* 5233 * Set an approximate value for lowmem here, it will be adjusted 5234 * when the bootmem allocator frees pages into the buddy system. 5235 * And all highmem pages will be managed by the buddy system. 5236 */ 5237 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5238#ifdef CONFIG_NUMA 5239 zone->node = nid; 5240 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 5241 / 100; 5242 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 5243#endif 5244 zone->name = zone_names[j]; 5245 spin_lock_init(&zone->lock); 5246 spin_lock_init(&zone->lru_lock); 5247 zone_seqlock_init(zone); 5248 zone->zone_pgdat = pgdat; 5249 zone_pcp_init(zone); 5250 5251 /* For bootup, initialized properly in watermark setup */ 5252 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); 5253 5254 lruvec_init(&zone->lruvec); 5255 if (!size) 5256 continue; 5257 5258 set_pageblock_order(); 5259 setup_usemap(pgdat, zone, zone_start_pfn, size); 5260 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 5261 BUG_ON(ret); 5262 memmap_init(size, nid, j, zone_start_pfn); 5263 zone_start_pfn += size; 5264 } 5265} 5266 5267static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 5268{ 5269 unsigned long __maybe_unused start = 0; 5270 unsigned long __maybe_unused offset = 0; 5271 5272 /* Skip empty nodes */ 5273 if (!pgdat->node_spanned_pages) 5274 return; 5275 5276#ifdef CONFIG_FLAT_NODE_MEM_MAP 5277 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 5278 offset = pgdat->node_start_pfn - start; 5279 /* ia64 gets its own node_mem_map, before this, without bootmem */ 5280 if (!pgdat->node_mem_map) { 5281 unsigned long size, end; 5282 struct page *map; 5283 5284 /* 5285 * The zone's endpoints aren't required to be MAX_ORDER 5286 * aligned but the node_mem_map endpoints must be in order 5287 * for the buddy allocator to function correctly. 5288 */ 5289 end = pgdat_end_pfn(pgdat); 5290 end = ALIGN(end, MAX_ORDER_NR_PAGES); 5291 size = (end - start) * sizeof(struct page); 5292 map = alloc_remap(pgdat->node_id, size); 5293 if (!map) 5294 map = memblock_virt_alloc_node_nopanic(size, 5295 pgdat->node_id); 5296 pgdat->node_mem_map = map + offset; 5297 } 5298#ifndef CONFIG_NEED_MULTIPLE_NODES 5299 /* 5300 * With no DISCONTIG, the global mem_map is just set as node 0's 5301 */ 5302 if (pgdat == NODE_DATA(0)) { 5303 mem_map = NODE_DATA(0)->node_mem_map; 5304#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) 5305 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 5306 mem_map -= offset; 5307#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5308 } 5309#endif 5310#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 5311} 5312 5313void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 5314 unsigned long node_start_pfn, unsigned long *zholes_size) 5315{ 5316 pg_data_t *pgdat = NODE_DATA(nid); 5317 unsigned long start_pfn = 0; 5318 unsigned long end_pfn = 0; 5319 5320 /* pg_data_t should be reset to zero when it's allocated */ 5321 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 5322 5323 reset_deferred_meminit(pgdat); 5324 pgdat->node_id = nid; 5325 pgdat->node_start_pfn = node_start_pfn; 5326#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5327 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 5328 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 5329 (u64)start_pfn << PAGE_SHIFT, 5330 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 5331#endif 5332 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 5333 zones_size, zholes_size); 5334 5335 alloc_node_mem_map(pgdat); 5336#ifdef CONFIG_FLAT_NODE_MEM_MAP 5337 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 5338 nid, (unsigned long)pgdat, 5339 (unsigned long)pgdat->node_mem_map); 5340#endif 5341 5342 free_area_init_core(pgdat); 5343} 5344 5345#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5346 5347#if MAX_NUMNODES > 1 5348/* 5349 * Figure out the number of possible node ids. 5350 */ 5351void __init setup_nr_node_ids(void) 5352{ 5353 unsigned int highest; 5354 5355 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 5356 nr_node_ids = highest + 1; 5357} 5358#endif 5359 5360/** 5361 * node_map_pfn_alignment - determine the maximum internode alignment 5362 * 5363 * This function should be called after node map is populated and sorted. 5364 * It calculates the maximum power of two alignment which can distinguish 5365 * all the nodes. 5366 * 5367 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 5368 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 5369 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 5370 * shifted, 1GiB is enough and this function will indicate so. 5371 * 5372 * This is used to test whether pfn -> nid mapping of the chosen memory 5373 * model has fine enough granularity to avoid incorrect mapping for the 5374 * populated node map. 5375 * 5376 * Returns the determined alignment in pfn's. 0 if there is no alignment 5377 * requirement (single node). 5378 */ 5379unsigned long __init node_map_pfn_alignment(void) 5380{ 5381 unsigned long accl_mask = 0, last_end = 0; 5382 unsigned long start, end, mask; 5383 int last_nid = -1; 5384 int i, nid; 5385 5386 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 5387 if (!start || last_nid < 0 || last_nid == nid) { 5388 last_nid = nid; 5389 last_end = end; 5390 continue; 5391 } 5392 5393 /* 5394 * Start with a mask granular enough to pin-point to the 5395 * start pfn and tick off bits one-by-one until it becomes 5396 * too coarse to separate the current node from the last. 5397 */ 5398 mask = ~((1 << __ffs(start)) - 1); 5399 while (mask && last_end <= (start & (mask << 1))) 5400 mask <<= 1; 5401 5402 /* accumulate all internode masks */ 5403 accl_mask |= mask; 5404 } 5405 5406 /* convert mask to number of pages */ 5407 return ~accl_mask + 1; 5408} 5409 5410/* Find the lowest pfn for a node */ 5411static unsigned long __init find_min_pfn_for_node(int nid) 5412{ 5413 unsigned long min_pfn = ULONG_MAX; 5414 unsigned long start_pfn; 5415 int i; 5416 5417 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 5418 min_pfn = min(min_pfn, start_pfn); 5419 5420 if (min_pfn == ULONG_MAX) { 5421 printk(KERN_WARNING 5422 "Could not find start_pfn for node %d\n", nid); 5423 return 0; 5424 } 5425 5426 return min_pfn; 5427} 5428 5429/** 5430 * find_min_pfn_with_active_regions - Find the minimum PFN registered 5431 * 5432 * It returns the minimum PFN based on information provided via 5433 * memblock_set_node(). 5434 */ 5435unsigned long __init find_min_pfn_with_active_regions(void) 5436{ 5437 return find_min_pfn_for_node(MAX_NUMNODES); 5438} 5439 5440/* 5441 * early_calculate_totalpages() 5442 * Sum pages in active regions for movable zone. 5443 * Populate N_MEMORY for calculating usable_nodes. 5444 */ 5445static unsigned long __init early_calculate_totalpages(void) 5446{ 5447 unsigned long totalpages = 0; 5448 unsigned long start_pfn, end_pfn; 5449 int i, nid; 5450 5451 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 5452 unsigned long pages = end_pfn - start_pfn; 5453 5454 totalpages += pages; 5455 if (pages) 5456 node_set_state(nid, N_MEMORY); 5457 } 5458 return totalpages; 5459} 5460 5461/* 5462 * Find the PFN the Movable zone begins in each node. Kernel memory 5463 * is spread evenly between nodes as long as the nodes have enough 5464 * memory. When they don't, some nodes will have more kernelcore than 5465 * others 5466 */ 5467static void __init find_zone_movable_pfns_for_nodes(void) 5468{ 5469 int i, nid; 5470 unsigned long usable_startpfn; 5471 unsigned long kernelcore_node, kernelcore_remaining; 5472 /* save the state before borrow the nodemask */ 5473 nodemask_t saved_node_state = node_states[N_MEMORY]; 5474 unsigned long totalpages = early_calculate_totalpages(); 5475 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 5476 struct memblock_region *r; 5477 5478 /* Need to find movable_zone earlier when movable_node is specified. */ 5479 find_usable_zone_for_movable(); 5480 5481 /* 5482 * If movable_node is specified, ignore kernelcore and movablecore 5483 * options. 5484 */ 5485 if (movable_node_is_enabled()) { 5486 for_each_memblock(memory, r) { 5487 if (!memblock_is_hotpluggable(r)) 5488 continue; 5489 5490 nid = r->nid; 5491 5492 usable_startpfn = PFN_DOWN(r->base); 5493 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 5494 min(usable_startpfn, zone_movable_pfn[nid]) : 5495 usable_startpfn; 5496 } 5497 5498 goto out2; 5499 } 5500 5501 /* 5502 * If movablecore=nn[KMG] was specified, calculate what size of 5503 * kernelcore that corresponds so that memory usable for 5504 * any allocation type is evenly spread. If both kernelcore 5505 * and movablecore are specified, then the value of kernelcore 5506 * will be used for required_kernelcore if it's greater than 5507 * what movablecore would have allowed. 5508 */ 5509 if (required_movablecore) { 5510 unsigned long corepages; 5511 5512 /* 5513 * Round-up so that ZONE_MOVABLE is at least as large as what 5514 * was requested by the user 5515 */ 5516 required_movablecore = 5517 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 5518 required_movablecore = min(totalpages, required_movablecore); 5519 corepages = totalpages - required_movablecore; 5520 5521 required_kernelcore = max(required_kernelcore, corepages); 5522 } 5523 5524 /* 5525 * If kernelcore was not specified or kernelcore size is larger 5526 * than totalpages, there is no ZONE_MOVABLE. 5527 */ 5528 if (!required_kernelcore || required_kernelcore >= totalpages) 5529 goto out; 5530 5531 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 5532 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 5533 5534restart: 5535 /* Spread kernelcore memory as evenly as possible throughout nodes */ 5536 kernelcore_node = required_kernelcore / usable_nodes; 5537 for_each_node_state(nid, N_MEMORY) { 5538 unsigned long start_pfn, end_pfn; 5539 5540 /* 5541 * Recalculate kernelcore_node if the division per node 5542 * now exceeds what is necessary to satisfy the requested 5543 * amount of memory for the kernel 5544 */ 5545 if (required_kernelcore < kernelcore_node) 5546 kernelcore_node = required_kernelcore / usable_nodes; 5547 5548 /* 5549 * As the map is walked, we track how much memory is usable 5550 * by the kernel using kernelcore_remaining. When it is 5551 * 0, the rest of the node is usable by ZONE_MOVABLE 5552 */ 5553 kernelcore_remaining = kernelcore_node; 5554 5555 /* Go through each range of PFNs within this node */ 5556 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5557 unsigned long size_pages; 5558 5559 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 5560 if (start_pfn >= end_pfn) 5561 continue; 5562 5563 /* Account for what is only usable for kernelcore */ 5564 if (start_pfn < usable_startpfn) { 5565 unsigned long kernel_pages; 5566 kernel_pages = min(end_pfn, usable_startpfn) 5567 - start_pfn; 5568 5569 kernelcore_remaining -= min(kernel_pages, 5570 kernelcore_remaining); 5571 required_kernelcore -= min(kernel_pages, 5572 required_kernelcore); 5573 5574 /* Continue if range is now fully accounted */ 5575 if (end_pfn <= usable_startpfn) { 5576 5577 /* 5578 * Push zone_movable_pfn to the end so 5579 * that if we have to rebalance 5580 * kernelcore across nodes, we will 5581 * not double account here 5582 */ 5583 zone_movable_pfn[nid] = end_pfn; 5584 continue; 5585 } 5586 start_pfn = usable_startpfn; 5587 } 5588 5589 /* 5590 * The usable PFN range for ZONE_MOVABLE is from 5591 * start_pfn->end_pfn. Calculate size_pages as the 5592 * number of pages used as kernelcore 5593 */ 5594 size_pages = end_pfn - start_pfn; 5595 if (size_pages > kernelcore_remaining) 5596 size_pages = kernelcore_remaining; 5597 zone_movable_pfn[nid] = start_pfn + size_pages; 5598 5599 /* 5600 * Some kernelcore has been met, update counts and 5601 * break if the kernelcore for this node has been 5602 * satisfied 5603 */ 5604 required_kernelcore -= min(required_kernelcore, 5605 size_pages); 5606 kernelcore_remaining -= size_pages; 5607 if (!kernelcore_remaining) 5608 break; 5609 } 5610 } 5611 5612 /* 5613 * If there is still required_kernelcore, we do another pass with one 5614 * less node in the count. This will push zone_movable_pfn[nid] further 5615 * along on the nodes that still have memory until kernelcore is 5616 * satisfied 5617 */ 5618 usable_nodes--; 5619 if (usable_nodes && required_kernelcore > usable_nodes) 5620 goto restart; 5621 5622out2: 5623 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 5624 for (nid = 0; nid < MAX_NUMNODES; nid++) 5625 zone_movable_pfn[nid] = 5626 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 5627 5628out: 5629 /* restore the node_state */ 5630 node_states[N_MEMORY] = saved_node_state; 5631} 5632 5633/* Any regular or high memory on that node ? */ 5634static void check_for_memory(pg_data_t *pgdat, int nid) 5635{ 5636 enum zone_type zone_type; 5637 5638 if (N_MEMORY == N_NORMAL_MEMORY) 5639 return; 5640 5641 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 5642 struct zone *zone = &pgdat->node_zones[zone_type]; 5643 if (populated_zone(zone)) { 5644 node_set_state(nid, N_HIGH_MEMORY); 5645 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 5646 zone_type <= ZONE_NORMAL) 5647 node_set_state(nid, N_NORMAL_MEMORY); 5648 break; 5649 } 5650 } 5651} 5652 5653/** 5654 * free_area_init_nodes - Initialise all pg_data_t and zone data 5655 * @max_zone_pfn: an array of max PFNs for each zone 5656 * 5657 * This will call free_area_init_node() for each active node in the system. 5658 * Using the page ranges provided by memblock_set_node(), the size of each 5659 * zone in each node and their holes is calculated. If the maximum PFN 5660 * between two adjacent zones match, it is assumed that the zone is empty. 5661 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 5662 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 5663 * starts where the previous one ended. For example, ZONE_DMA32 starts 5664 * at arch_max_dma_pfn. 5665 */ 5666void __init free_area_init_nodes(unsigned long *max_zone_pfn) 5667{ 5668 unsigned long start_pfn, end_pfn; 5669 int i, nid; 5670 5671 /* Record where the zone boundaries are */ 5672 memset(arch_zone_lowest_possible_pfn, 0, 5673 sizeof(arch_zone_lowest_possible_pfn)); 5674 memset(arch_zone_highest_possible_pfn, 0, 5675 sizeof(arch_zone_highest_possible_pfn)); 5676 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 5677 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 5678 for (i = 1; i < MAX_NR_ZONES; i++) { 5679 if (i == ZONE_MOVABLE) 5680 continue; 5681 arch_zone_lowest_possible_pfn[i] = 5682 arch_zone_highest_possible_pfn[i-1]; 5683 arch_zone_highest_possible_pfn[i] = 5684 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 5685 } 5686 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 5687 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 5688 5689 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 5690 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 5691 find_zone_movable_pfns_for_nodes(); 5692 5693 /* Print out the zone ranges */ 5694 pr_info("Zone ranges:\n"); 5695 for (i = 0; i < MAX_NR_ZONES; i++) { 5696 if (i == ZONE_MOVABLE) 5697 continue; 5698 pr_info(" %-8s ", zone_names[i]); 5699 if (arch_zone_lowest_possible_pfn[i] == 5700 arch_zone_highest_possible_pfn[i]) 5701 pr_cont("empty\n"); 5702 else 5703 pr_cont("[mem %#018Lx-%#018Lx]\n", 5704 (u64)arch_zone_lowest_possible_pfn[i] 5705 << PAGE_SHIFT, 5706 ((u64)arch_zone_highest_possible_pfn[i] 5707 << PAGE_SHIFT) - 1); 5708 } 5709 5710 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 5711 pr_info("Movable zone start for each node\n"); 5712 for (i = 0; i < MAX_NUMNODES; i++) { 5713 if (zone_movable_pfn[i]) 5714 pr_info(" Node %d: %#018Lx\n", i, 5715 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 5716 } 5717 5718 /* Print out the early node map */ 5719 pr_info("Early memory node ranges\n"); 5720 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 5721 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 5722 (u64)start_pfn << PAGE_SHIFT, 5723 ((u64)end_pfn << PAGE_SHIFT) - 1); 5724 5725 /* Initialise every node */ 5726 mminit_verify_pageflags_layout(); 5727 setup_nr_node_ids(); 5728 for_each_online_node(nid) { 5729 pg_data_t *pgdat = NODE_DATA(nid); 5730 free_area_init_node(nid, NULL, 5731 find_min_pfn_for_node(nid), NULL); 5732 5733 /* Any memory on that node */ 5734 if (pgdat->node_present_pages) 5735 node_set_state(nid, N_MEMORY); 5736 check_for_memory(pgdat, nid); 5737 } 5738} 5739 5740static int __init cmdline_parse_core(char *p, unsigned long *core) 5741{ 5742 unsigned long long coremem; 5743 if (!p) 5744 return -EINVAL; 5745 5746 coremem = memparse(p, &p); 5747 *core = coremem >> PAGE_SHIFT; 5748 5749 /* Paranoid check that UL is enough for the coremem value */ 5750 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 5751 5752 return 0; 5753} 5754 5755/* 5756 * kernelcore=size sets the amount of memory for use for allocations that 5757 * cannot be reclaimed or migrated. 5758 */ 5759static int __init cmdline_parse_kernelcore(char *p) 5760{ 5761 return cmdline_parse_core(p, &required_kernelcore); 5762} 5763 5764/* 5765 * movablecore=size sets the amount of memory for use for allocations that 5766 * can be reclaimed or migrated. 5767 */ 5768static int __init cmdline_parse_movablecore(char *p) 5769{ 5770 return cmdline_parse_core(p, &required_movablecore); 5771} 5772 5773early_param("kernelcore", cmdline_parse_kernelcore); 5774early_param("movablecore", cmdline_parse_movablecore); 5775 5776#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5777 5778void adjust_managed_page_count(struct page *page, long count) 5779{ 5780 spin_lock(&managed_page_count_lock); 5781 page_zone(page)->managed_pages += count; 5782 totalram_pages += count; 5783#ifdef CONFIG_HIGHMEM 5784 if (PageHighMem(page)) 5785 totalhigh_pages += count; 5786#endif 5787 spin_unlock(&managed_page_count_lock); 5788} 5789EXPORT_SYMBOL(adjust_managed_page_count); 5790 5791unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 5792{ 5793 void *pos; 5794 unsigned long pages = 0; 5795 5796 start = (void *)PAGE_ALIGN((unsigned long)start); 5797 end = (void *)((unsigned long)end & PAGE_MASK); 5798 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5799 if ((unsigned int)poison <= 0xFF) 5800 memset(pos, poison, PAGE_SIZE); 5801 free_reserved_page(virt_to_page(pos)); 5802 } 5803 5804 if (pages && s) 5805 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 5806 s, pages << (PAGE_SHIFT - 10), start, end); 5807 5808 return pages; 5809} 5810EXPORT_SYMBOL(free_reserved_area); 5811 5812#ifdef CONFIG_HIGHMEM 5813void free_highmem_page(struct page *page) 5814{ 5815 __free_reserved_page(page); 5816 totalram_pages++; 5817 page_zone(page)->managed_pages++; 5818 totalhigh_pages++; 5819} 5820#endif 5821 5822 5823void __init mem_init_print_info(const char *str) 5824{ 5825 unsigned long physpages, codesize, datasize, rosize, bss_size; 5826 unsigned long init_code_size, init_data_size; 5827 5828 physpages = get_num_physpages(); 5829 codesize = _etext - _stext; 5830 datasize = _edata - _sdata; 5831 rosize = __end_rodata - __start_rodata; 5832 bss_size = __bss_stop - __bss_start; 5833 init_data_size = __init_end - __init_begin; 5834 init_code_size = _einittext - _sinittext; 5835 5836 /* 5837 * Detect special cases and adjust section sizes accordingly: 5838 * 1) .init.* may be embedded into .data sections 5839 * 2) .init.text.* may be out of [__init_begin, __init_end], 5840 * please refer to arch/tile/kernel/vmlinux.lds.S. 5841 * 3) .rodata.* may be embedded into .text or .data sections. 5842 */ 5843#define adj_init_size(start, end, size, pos, adj) \ 5844 do { \ 5845 if (start <= pos && pos < end && size > adj) \ 5846 size -= adj; \ 5847 } while (0) 5848 5849 adj_init_size(__init_begin, __init_end, init_data_size, 5850 _sinittext, init_code_size); 5851 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 5852 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 5853 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 5854 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 5855 5856#undef adj_init_size 5857 5858 pr_info("Memory: %luK/%luK available " 5859 "(%luK kernel code, %luK rwdata, %luK rodata, " 5860 "%luK init, %luK bss, %luK reserved, %luK cma-reserved" 5861#ifdef CONFIG_HIGHMEM 5862 ", %luK highmem" 5863#endif 5864 "%s%s)\n", 5865 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), 5866 codesize >> 10, datasize >> 10, rosize >> 10, 5867 (init_data_size + init_code_size) >> 10, bss_size >> 10, 5868 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10), 5869 totalcma_pages << (PAGE_SHIFT-10), 5870#ifdef CONFIG_HIGHMEM 5871 totalhigh_pages << (PAGE_SHIFT-10), 5872#endif 5873 str ? ", " : "", str ? str : ""); 5874} 5875 5876/** 5877 * set_dma_reserve - set the specified number of pages reserved in the first zone 5878 * @new_dma_reserve: The number of pages to mark reserved 5879 * 5880 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 5881 * In the DMA zone, a significant percentage may be consumed by kernel image 5882 * and other unfreeable allocations which can skew the watermarks badly. This 5883 * function may optionally be used to account for unfreeable pages in the 5884 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 5885 * smaller per-cpu batchsize. 5886 */ 5887void __init set_dma_reserve(unsigned long new_dma_reserve) 5888{ 5889 dma_reserve = new_dma_reserve; 5890} 5891 5892void __init free_area_init(unsigned long *zones_size) 5893{ 5894 free_area_init_node(0, zones_size, 5895 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 5896} 5897 5898static int page_alloc_cpu_notify(struct notifier_block *self, 5899 unsigned long action, void *hcpu) 5900{ 5901 int cpu = (unsigned long)hcpu; 5902 5903 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 5904 lru_add_drain_cpu(cpu); 5905 drain_pages(cpu); 5906 5907 /* 5908 * Spill the event counters of the dead processor 5909 * into the current processors event counters. 5910 * This artificially elevates the count of the current 5911 * processor. 5912 */ 5913 vm_events_fold_cpu(cpu); 5914 5915 /* 5916 * Zero the differential counters of the dead processor 5917 * so that the vm statistics are consistent. 5918 * 5919 * This is only okay since the processor is dead and cannot 5920 * race with what we are doing. 5921 */ 5922 cpu_vm_stats_fold(cpu); 5923 } 5924 return NOTIFY_OK; 5925} 5926 5927void __init page_alloc_init(void) 5928{ 5929 hotcpu_notifier(page_alloc_cpu_notify, 0); 5930} 5931 5932/* 5933 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5934 * or min_free_kbytes changes. 5935 */ 5936static void calculate_totalreserve_pages(void) 5937{ 5938 struct pglist_data *pgdat; 5939 unsigned long reserve_pages = 0; 5940 enum zone_type i, j; 5941 5942 for_each_online_pgdat(pgdat) { 5943 for (i = 0; i < MAX_NR_ZONES; i++) { 5944 struct zone *zone = pgdat->node_zones + i; 5945 long max = 0; 5946 5947 /* Find valid and maximum lowmem_reserve in the zone */ 5948 for (j = i; j < MAX_NR_ZONES; j++) { 5949 if (zone->lowmem_reserve[j] > max) 5950 max = zone->lowmem_reserve[j]; 5951 } 5952 5953 /* we treat the high watermark as reserved pages. */ 5954 max += high_wmark_pages(zone); 5955 5956 if (max > zone->managed_pages) 5957 max = zone->managed_pages; 5958 reserve_pages += max; 5959 /* 5960 * Lowmem reserves are not available to 5961 * GFP_HIGHUSER page cache allocations and 5962 * kswapd tries to balance zones to their high 5963 * watermark. As a result, neither should be 5964 * regarded as dirtyable memory, to prevent a 5965 * situation where reclaim has to clean pages 5966 * in order to balance the zones. 5967 */ 5968 zone->dirty_balance_reserve = max; 5969 } 5970 } 5971 dirty_balance_reserve = reserve_pages; 5972 totalreserve_pages = reserve_pages; 5973} 5974 5975/* 5976 * setup_per_zone_lowmem_reserve - called whenever 5977 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5978 * has a correct pages reserved value, so an adequate number of 5979 * pages are left in the zone after a successful __alloc_pages(). 5980 */ 5981static void setup_per_zone_lowmem_reserve(void) 5982{ 5983 struct pglist_data *pgdat; 5984 enum zone_type j, idx; 5985 5986 for_each_online_pgdat(pgdat) { 5987 for (j = 0; j < MAX_NR_ZONES; j++) { 5988 struct zone *zone = pgdat->node_zones + j; 5989 unsigned long managed_pages = zone->managed_pages; 5990 5991 zone->lowmem_reserve[j] = 0; 5992 5993 idx = j; 5994 while (idx) { 5995 struct zone *lower_zone; 5996 5997 idx--; 5998 5999 if (sysctl_lowmem_reserve_ratio[idx] < 1) 6000 sysctl_lowmem_reserve_ratio[idx] = 1; 6001 6002 lower_zone = pgdat->node_zones + idx; 6003 lower_zone->lowmem_reserve[j] = managed_pages / 6004 sysctl_lowmem_reserve_ratio[idx]; 6005 managed_pages += lower_zone->managed_pages; 6006 } 6007 } 6008 } 6009 6010 /* update totalreserve_pages */ 6011 calculate_totalreserve_pages(); 6012} 6013 6014static void __setup_per_zone_wmarks(void) 6015{ 6016 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6017 unsigned long lowmem_pages = 0; 6018 struct zone *zone; 6019 unsigned long flags; 6020 6021 /* Calculate total number of !ZONE_HIGHMEM pages */ 6022 for_each_zone(zone) { 6023 if (!is_highmem(zone)) 6024 lowmem_pages += zone->managed_pages; 6025 } 6026 6027 for_each_zone(zone) { 6028 u64 tmp; 6029 6030 spin_lock_irqsave(&zone->lock, flags); 6031 tmp = (u64)pages_min * zone->managed_pages; 6032 do_div(tmp, lowmem_pages); 6033 if (is_highmem(zone)) { 6034 /* 6035 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6036 * need highmem pages, so cap pages_min to a small 6037 * value here. 6038 * 6039 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6040 * deltas control asynch page reclaim, and so should 6041 * not be capped for highmem. 6042 */ 6043 unsigned long min_pages; 6044 6045 min_pages = zone->managed_pages / 1024; 6046 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6047 zone->watermark[WMARK_MIN] = min_pages; 6048 } else { 6049 /* 6050 * If it's a lowmem zone, reserve a number of pages 6051 * proportionate to the zone's size. 6052 */ 6053 zone->watermark[WMARK_MIN] = tmp; 6054 } 6055 6056 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 6057 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 6058 6059 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 6060 high_wmark_pages(zone) - low_wmark_pages(zone) - 6061 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 6062 6063 spin_unlock_irqrestore(&zone->lock, flags); 6064 } 6065 6066 /* update totalreserve_pages */ 6067 calculate_totalreserve_pages(); 6068} 6069 6070/** 6071 * setup_per_zone_wmarks - called when min_free_kbytes changes 6072 * or when memory is hot-{added|removed} 6073 * 6074 * Ensures that the watermark[min,low,high] values for each zone are set 6075 * correctly with respect to min_free_kbytes. 6076 */ 6077void setup_per_zone_wmarks(void) 6078{ 6079 mutex_lock(&zonelists_mutex); 6080 __setup_per_zone_wmarks(); 6081 mutex_unlock(&zonelists_mutex); 6082} 6083 6084/* 6085 * The inactive anon list should be small enough that the VM never has to 6086 * do too much work, but large enough that each inactive page has a chance 6087 * to be referenced again before it is swapped out. 6088 * 6089 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 6090 * INACTIVE_ANON pages on this zone's LRU, maintained by the 6091 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 6092 * the anonymous pages are kept on the inactive list. 6093 * 6094 * total target max 6095 * memory ratio inactive anon 6096 * ------------------------------------- 6097 * 10MB 1 5MB 6098 * 100MB 1 50MB 6099 * 1GB 3 250MB 6100 * 10GB 10 0.9GB 6101 * 100GB 31 3GB 6102 * 1TB 101 10GB 6103 * 10TB 320 32GB 6104 */ 6105static void __meminit calculate_zone_inactive_ratio(struct zone *zone) 6106{ 6107 unsigned int gb, ratio; 6108 6109 /* Zone size in gigabytes */ 6110 gb = zone->managed_pages >> (30 - PAGE_SHIFT); 6111 if (gb) 6112 ratio = int_sqrt(10 * gb); 6113 else 6114 ratio = 1; 6115 6116 zone->inactive_ratio = ratio; 6117} 6118 6119static void __meminit setup_per_zone_inactive_ratio(void) 6120{ 6121 struct zone *zone; 6122 6123 for_each_zone(zone) 6124 calculate_zone_inactive_ratio(zone); 6125} 6126 6127/* 6128 * Initialise min_free_kbytes. 6129 * 6130 * For small machines we want it small (128k min). For large machines 6131 * we want it large (64MB max). But it is not linear, because network 6132 * bandwidth does not increase linearly with machine size. We use 6133 * 6134 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6135 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6136 * 6137 * which yields 6138 * 6139 * 16MB: 512k 6140 * 32MB: 724k 6141 * 64MB: 1024k 6142 * 128MB: 1448k 6143 * 256MB: 2048k 6144 * 512MB: 2896k 6145 * 1024MB: 4096k 6146 * 2048MB: 5792k 6147 * 4096MB: 8192k 6148 * 8192MB: 11584k 6149 * 16384MB: 16384k 6150 */ 6151int __meminit init_per_zone_wmark_min(void) 6152{ 6153 unsigned long lowmem_kbytes; 6154 int new_min_free_kbytes; 6155 6156 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6157 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6158 6159 if (new_min_free_kbytes > user_min_free_kbytes) { 6160 min_free_kbytes = new_min_free_kbytes; 6161 if (min_free_kbytes < 128) 6162 min_free_kbytes = 128; 6163 if (min_free_kbytes > 65536) 6164 min_free_kbytes = 65536; 6165 } else { 6166 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6167 new_min_free_kbytes, user_min_free_kbytes); 6168 } 6169 setup_per_zone_wmarks(); 6170 refresh_zone_stat_thresholds(); 6171 setup_per_zone_lowmem_reserve(); 6172 setup_per_zone_inactive_ratio(); 6173 return 0; 6174} 6175module_init(init_per_zone_wmark_min) 6176 6177/* 6178 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6179 * that we can call two helper functions whenever min_free_kbytes 6180 * changes. 6181 */ 6182int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 6183 void __user *buffer, size_t *length, loff_t *ppos) 6184{ 6185 int rc; 6186 6187 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6188 if (rc) 6189 return rc; 6190 6191 if (write) { 6192 user_min_free_kbytes = min_free_kbytes; 6193 setup_per_zone_wmarks(); 6194 } 6195 return 0; 6196} 6197 6198#ifdef CONFIG_NUMA 6199int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6200 void __user *buffer, size_t *length, loff_t *ppos) 6201{ 6202 struct zone *zone; 6203 int rc; 6204 6205 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6206 if (rc) 6207 return rc; 6208 6209 for_each_zone(zone) 6210 zone->min_unmapped_pages = (zone->managed_pages * 6211 sysctl_min_unmapped_ratio) / 100; 6212 return 0; 6213} 6214 6215int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6216 void __user *buffer, size_t *length, loff_t *ppos) 6217{ 6218 struct zone *zone; 6219 int rc; 6220 6221 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6222 if (rc) 6223 return rc; 6224 6225 for_each_zone(zone) 6226 zone->min_slab_pages = (zone->managed_pages * 6227 sysctl_min_slab_ratio) / 100; 6228 return 0; 6229} 6230#endif 6231 6232/* 6233 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6234 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6235 * whenever sysctl_lowmem_reserve_ratio changes. 6236 * 6237 * The reserve ratio obviously has absolutely no relation with the 6238 * minimum watermarks. The lowmem reserve ratio can only make sense 6239 * if in function of the boot time zone sizes. 6240 */ 6241int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 6242 void __user *buffer, size_t *length, loff_t *ppos) 6243{ 6244 proc_dointvec_minmax(table, write, buffer, length, ppos); 6245 setup_per_zone_lowmem_reserve(); 6246 return 0; 6247} 6248 6249/* 6250 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 6251 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6252 * pagelist can have before it gets flushed back to buddy allocator. 6253 */ 6254int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 6255 void __user *buffer, size_t *length, loff_t *ppos) 6256{ 6257 struct zone *zone; 6258 int old_percpu_pagelist_fraction; 6259 int ret; 6260 6261 mutex_lock(&pcp_batch_high_lock); 6262 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 6263 6264 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6265 if (!write || ret < 0) 6266 goto out; 6267 6268 /* Sanity checking to avoid pcp imbalance */ 6269 if (percpu_pagelist_fraction && 6270 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 6271 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 6272 ret = -EINVAL; 6273 goto out; 6274 } 6275 6276 /* No change? */ 6277 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 6278 goto out; 6279 6280 for_each_populated_zone(zone) { 6281 unsigned int cpu; 6282 6283 for_each_possible_cpu(cpu) 6284 pageset_set_high_and_batch(zone, 6285 per_cpu_ptr(zone->pageset, cpu)); 6286 } 6287out: 6288 mutex_unlock(&pcp_batch_high_lock); 6289 return ret; 6290} 6291 6292#ifdef CONFIG_NUMA 6293int hashdist = HASHDIST_DEFAULT; 6294 6295static int __init set_hashdist(char *str) 6296{ 6297 if (!str) 6298 return 0; 6299 hashdist = simple_strtoul(str, &str, 0); 6300 return 1; 6301} 6302__setup("hashdist=", set_hashdist); 6303#endif 6304 6305/* 6306 * allocate a large system hash table from bootmem 6307 * - it is assumed that the hash table must contain an exact power-of-2 6308 * quantity of entries 6309 * - limit is the number of hash buckets, not the total allocation size 6310 */ 6311void *__init alloc_large_system_hash(const char *tablename, 6312 unsigned long bucketsize, 6313 unsigned long numentries, 6314 int scale, 6315 int flags, 6316 unsigned int *_hash_shift, 6317 unsigned int *_hash_mask, 6318 unsigned long low_limit, 6319 unsigned long high_limit) 6320{ 6321 unsigned long long max = high_limit; 6322 unsigned long log2qty, size; 6323 void *table = NULL; 6324 6325 /* allow the kernel cmdline to have a say */ 6326 if (!numentries) { 6327 /* round applicable memory size up to nearest megabyte */ 6328 numentries = nr_kernel_pages; 6329 6330 /* It isn't necessary when PAGE_SIZE >= 1MB */ 6331 if (PAGE_SHIFT < 20) 6332 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 6333 6334 /* limit to 1 bucket per 2^scale bytes of low memory */ 6335 if (scale > PAGE_SHIFT) 6336 numentries >>= (scale - PAGE_SHIFT); 6337 else 6338 numentries <<= (PAGE_SHIFT - scale); 6339 6340 /* Make sure we've got at least a 0-order allocation.. */ 6341 if (unlikely(flags & HASH_SMALL)) { 6342 /* Makes no sense without HASH_EARLY */ 6343 WARN_ON(!(flags & HASH_EARLY)); 6344 if (!(numentries >> *_hash_shift)) { 6345 numentries = 1UL << *_hash_shift; 6346 BUG_ON(!numentries); 6347 } 6348 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 6349 numentries = PAGE_SIZE / bucketsize; 6350 } 6351 numentries = roundup_pow_of_two(numentries); 6352 6353 /* limit allocation size to 1/16 total memory by default */ 6354 if (max == 0) { 6355 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 6356 do_div(max, bucketsize); 6357 } 6358 max = min(max, 0x80000000ULL); 6359 6360 if (numentries < low_limit) 6361 numentries = low_limit; 6362 if (numentries > max) 6363 numentries = max; 6364 6365 log2qty = ilog2(numentries); 6366 6367 do { 6368 size = bucketsize << log2qty; 6369 if (flags & HASH_EARLY) 6370 table = memblock_virt_alloc_nopanic(size, 0); 6371 else if (hashdist) 6372 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 6373 else { 6374 /* 6375 * If bucketsize is not a power-of-two, we may free 6376 * some pages at the end of hash table which 6377 * alloc_pages_exact() automatically does 6378 */ 6379 if (get_order(size) < MAX_ORDER) { 6380 table = alloc_pages_exact(size, GFP_ATOMIC); 6381 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 6382 } 6383 } 6384 } while (!table && size > PAGE_SIZE && --log2qty); 6385 6386 if (!table) 6387 panic("Failed to allocate %s hash table\n", tablename); 6388 6389 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", 6390 tablename, 6391 (1UL << log2qty), 6392 ilog2(size) - PAGE_SHIFT, 6393 size); 6394 6395 if (_hash_shift) 6396 *_hash_shift = log2qty; 6397 if (_hash_mask) 6398 *_hash_mask = (1 << log2qty) - 1; 6399 6400 return table; 6401} 6402 6403/* Return a pointer to the bitmap storing bits affecting a block of pages */ 6404static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 6405 unsigned long pfn) 6406{ 6407#ifdef CONFIG_SPARSEMEM 6408 return __pfn_to_section(pfn)->pageblock_flags; 6409#else 6410 return zone->pageblock_flags; 6411#endif /* CONFIG_SPARSEMEM */ 6412} 6413 6414static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 6415{ 6416#ifdef CONFIG_SPARSEMEM 6417 pfn &= (PAGES_PER_SECTION-1); 6418 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6419#else 6420 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); 6421 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6422#endif /* CONFIG_SPARSEMEM */ 6423} 6424 6425/** 6426 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 6427 * @page: The page within the block of interest 6428 * @pfn: The target page frame number 6429 * @end_bitidx: The last bit of interest to retrieve 6430 * @mask: mask of bits that the caller is interested in 6431 * 6432 * Return: pageblock_bits flags 6433 */ 6434unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 6435 unsigned long end_bitidx, 6436 unsigned long mask) 6437{ 6438 struct zone *zone; 6439 unsigned long *bitmap; 6440 unsigned long bitidx, word_bitidx; 6441 unsigned long word; 6442 6443 zone = page_zone(page); 6444 bitmap = get_pageblock_bitmap(zone, pfn); 6445 bitidx = pfn_to_bitidx(zone, pfn); 6446 word_bitidx = bitidx / BITS_PER_LONG; 6447 bitidx &= (BITS_PER_LONG-1); 6448 6449 word = bitmap[word_bitidx]; 6450 bitidx += end_bitidx; 6451 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; 6452} 6453 6454/** 6455 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 6456 * @page: The page within the block of interest 6457 * @flags: The flags to set 6458 * @pfn: The target page frame number 6459 * @end_bitidx: The last bit of interest 6460 * @mask: mask of bits that the caller is interested in 6461 */ 6462void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 6463 unsigned long pfn, 6464 unsigned long end_bitidx, 6465 unsigned long mask) 6466{ 6467 struct zone *zone; 6468 unsigned long *bitmap; 6469 unsigned long bitidx, word_bitidx; 6470 unsigned long old_word, word; 6471 6472 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 6473 6474 zone = page_zone(page); 6475 bitmap = get_pageblock_bitmap(zone, pfn); 6476 bitidx = pfn_to_bitidx(zone, pfn); 6477 word_bitidx = bitidx / BITS_PER_LONG; 6478 bitidx &= (BITS_PER_LONG-1); 6479 6480 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6481 6482 bitidx += end_bitidx; 6483 mask <<= (BITS_PER_LONG - bitidx - 1); 6484 flags <<= (BITS_PER_LONG - bitidx - 1); 6485 6486 word = READ_ONCE(bitmap[word_bitidx]); 6487 for (;;) { 6488 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 6489 if (word == old_word) 6490 break; 6491 word = old_word; 6492 } 6493} 6494 6495/* 6496 * This function checks whether pageblock includes unmovable pages or not. 6497 * If @count is not zero, it is okay to include less @count unmovable pages 6498 * 6499 * PageLRU check without isolation or lru_lock could race so that 6500 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 6501 * expect this function should be exact. 6502 */ 6503bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 6504 bool skip_hwpoisoned_pages) 6505{ 6506 unsigned long pfn, iter, found; 6507 int mt; 6508 6509 /* 6510 * For avoiding noise data, lru_add_drain_all() should be called 6511 * If ZONE_MOVABLE, the zone never contains unmovable pages 6512 */ 6513 if (zone_idx(zone) == ZONE_MOVABLE) 6514 return false; 6515 mt = get_pageblock_migratetype(page); 6516 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 6517 return false; 6518 6519 pfn = page_to_pfn(page); 6520 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 6521 unsigned long check = pfn + iter; 6522 6523 if (!pfn_valid_within(check)) 6524 continue; 6525 6526 page = pfn_to_page(check); 6527 6528 /* 6529 * Hugepages are not in LRU lists, but they're movable. 6530 * We need not scan over tail pages bacause we don't 6531 * handle each tail page individually in migration. 6532 */ 6533 if (PageHuge(page)) { 6534 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 6535 continue; 6536 } 6537 6538 /* 6539 * We can't use page_count without pin a page 6540 * because another CPU can free compound page. 6541 * This check already skips compound tails of THP 6542 * because their page->_count is zero at all time. 6543 */ 6544 if (!atomic_read(&page->_count)) { 6545 if (PageBuddy(page)) 6546 iter += (1 << page_order(page)) - 1; 6547 continue; 6548 } 6549 6550 /* 6551 * The HWPoisoned page may be not in buddy system, and 6552 * page_count() is not 0. 6553 */ 6554 if (skip_hwpoisoned_pages && PageHWPoison(page)) 6555 continue; 6556 6557 if (!PageLRU(page)) 6558 found++; 6559 /* 6560 * If there are RECLAIMABLE pages, we need to check 6561 * it. But now, memory offline itself doesn't call 6562 * shrink_node_slabs() and it still to be fixed. 6563 */ 6564 /* 6565 * If the page is not RAM, page_count()should be 0. 6566 * we don't need more check. This is an _used_ not-movable page. 6567 * 6568 * The problematic thing here is PG_reserved pages. PG_reserved 6569 * is set to both of a memory hole page and a _used_ kernel 6570 * page at boot. 6571 */ 6572 if (found > count) 6573 return true; 6574 } 6575 return false; 6576} 6577 6578bool is_pageblock_removable_nolock(struct page *page) 6579{ 6580 struct zone *zone; 6581 unsigned long pfn; 6582 6583 /* 6584 * We have to be careful here because we are iterating over memory 6585 * sections which are not zone aware so we might end up outside of 6586 * the zone but still within the section. 6587 * We have to take care about the node as well. If the node is offline 6588 * its NODE_DATA will be NULL - see page_zone. 6589 */ 6590 if (!node_online(page_to_nid(page))) 6591 return false; 6592 6593 zone = page_zone(page); 6594 pfn = page_to_pfn(page); 6595 if (!zone_spans_pfn(zone, pfn)) 6596 return false; 6597 6598 return !has_unmovable_pages(zone, page, 0, true); 6599} 6600 6601#ifdef CONFIG_CMA 6602 6603static unsigned long pfn_max_align_down(unsigned long pfn) 6604{ 6605 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 6606 pageblock_nr_pages) - 1); 6607} 6608 6609static unsigned long pfn_max_align_up(unsigned long pfn) 6610{ 6611 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 6612 pageblock_nr_pages)); 6613} 6614 6615/* [start, end) must belong to a single zone. */ 6616static int __alloc_contig_migrate_range(struct compact_control *cc, 6617 unsigned long start, unsigned long end) 6618{ 6619 /* This function is based on compact_zone() from compaction.c. */ 6620 unsigned long nr_reclaimed; 6621 unsigned long pfn = start; 6622 unsigned int tries = 0; 6623 int ret = 0; 6624 6625 migrate_prep(); 6626 6627 while (pfn < end || !list_empty(&cc->migratepages)) { 6628 if (fatal_signal_pending(current)) { 6629 ret = -EINTR; 6630 break; 6631 } 6632 6633 if (list_empty(&cc->migratepages)) { 6634 cc->nr_migratepages = 0; 6635 pfn = isolate_migratepages_range(cc, pfn, end); 6636 if (!pfn) { 6637 ret = -EINTR; 6638 break; 6639 } 6640 tries = 0; 6641 } else if (++tries == 5) { 6642 ret = ret < 0 ? ret : -EBUSY; 6643 break; 6644 } 6645 6646 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6647 &cc->migratepages); 6648 cc->nr_migratepages -= nr_reclaimed; 6649 6650 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 6651 NULL, 0, cc->mode, MR_CMA); 6652 } 6653 if (ret < 0) { 6654 putback_movable_pages(&cc->migratepages); 6655 return ret; 6656 } 6657 return 0; 6658} 6659 6660/** 6661 * alloc_contig_range() -- tries to allocate given range of pages 6662 * @start: start PFN to allocate 6663 * @end: one-past-the-last PFN to allocate 6664 * @migratetype: migratetype of the underlaying pageblocks (either 6665 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6666 * in range must have the same migratetype and it must 6667 * be either of the two. 6668 * 6669 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 6670 * aligned, however it's the caller's responsibility to guarantee that 6671 * we are the only thread that changes migrate type of pageblocks the 6672 * pages fall in. 6673 * 6674 * The PFN range must belong to a single zone. 6675 * 6676 * Returns zero on success or negative error code. On success all 6677 * pages which PFN is in [start, end) are allocated for the caller and 6678 * need to be freed with free_contig_range(). 6679 */ 6680int alloc_contig_range(unsigned long start, unsigned long end, 6681 unsigned migratetype) 6682{ 6683 unsigned long outer_start, outer_end; 6684 unsigned int order; 6685 int ret = 0; 6686 6687 struct compact_control cc = { 6688 .nr_migratepages = 0, 6689 .order = -1, 6690 .zone = page_zone(pfn_to_page(start)), 6691 .mode = MIGRATE_SYNC, 6692 .ignore_skip_hint = true, 6693 }; 6694 INIT_LIST_HEAD(&cc.migratepages); 6695 6696 /* 6697 * What we do here is we mark all pageblocks in range as 6698 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6699 * have different sizes, and due to the way page allocator 6700 * work, we align the range to biggest of the two pages so 6701 * that page allocator won't try to merge buddies from 6702 * different pageblocks and change MIGRATE_ISOLATE to some 6703 * other migration type. 6704 * 6705 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6706 * migrate the pages from an unaligned range (ie. pages that 6707 * we are interested in). This will put all the pages in 6708 * range back to page allocator as MIGRATE_ISOLATE. 6709 * 6710 * When this is done, we take the pages in range from page 6711 * allocator removing them from the buddy system. This way 6712 * page allocator will never consider using them. 6713 * 6714 * This lets us mark the pageblocks back as 6715 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6716 * aligned range but not in the unaligned, original range are 6717 * put back to page allocator so that buddy can use them. 6718 */ 6719 6720 ret = start_isolate_page_range(pfn_max_align_down(start), 6721 pfn_max_align_up(end), migratetype, 6722 false); 6723 if (ret) 6724 return ret; 6725 6726 ret = __alloc_contig_migrate_range(&cc, start, end); 6727 if (ret) 6728 goto done; 6729 6730 /* 6731 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 6732 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6733 * more, all pages in [start, end) are free in page allocator. 6734 * What we are going to do is to allocate all pages from 6735 * [start, end) (that is remove them from page allocator). 6736 * 6737 * The only problem is that pages at the beginning and at the 6738 * end of interesting range may be not aligned with pages that 6739 * page allocator holds, ie. they can be part of higher order 6740 * pages. Because of this, we reserve the bigger range and 6741 * once this is done free the pages we are not interested in. 6742 * 6743 * We don't have to hold zone->lock here because the pages are 6744 * isolated thus they won't get removed from buddy. 6745 */ 6746 6747 lru_add_drain_all(); 6748 drain_all_pages(cc.zone); 6749 6750 order = 0; 6751 outer_start = start; 6752 while (!PageBuddy(pfn_to_page(outer_start))) { 6753 if (++order >= MAX_ORDER) { 6754 ret = -EBUSY; 6755 goto done; 6756 } 6757 outer_start &= ~0UL << order; 6758 } 6759 6760 /* Make sure the range is really isolated. */ 6761 if (test_pages_isolated(outer_start, end, false)) { 6762 pr_info("%s: [%lx, %lx) PFNs busy\n", 6763 __func__, outer_start, end); 6764 ret = -EBUSY; 6765 goto done; 6766 } 6767 6768 /* Grab isolated pages from freelists. */ 6769 outer_end = isolate_freepages_range(&cc, outer_start, end); 6770 if (!outer_end) { 6771 ret = -EBUSY; 6772 goto done; 6773 } 6774 6775 /* Free head and tail (if any) */ 6776 if (start != outer_start) 6777 free_contig_range(outer_start, start - outer_start); 6778 if (end != outer_end) 6779 free_contig_range(end, outer_end - end); 6780 6781done: 6782 undo_isolate_page_range(pfn_max_align_down(start), 6783 pfn_max_align_up(end), migratetype); 6784 return ret; 6785} 6786 6787void free_contig_range(unsigned long pfn, unsigned nr_pages) 6788{ 6789 unsigned int count = 0; 6790 6791 for (; nr_pages--; pfn++) { 6792 struct page *page = pfn_to_page(pfn); 6793 6794 count += page_count(page) != 1; 6795 __free_page(page); 6796 } 6797 WARN(count != 0, "%d pages are still in use!\n", count); 6798} 6799#endif 6800 6801#ifdef CONFIG_MEMORY_HOTPLUG 6802/* 6803 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6804 * page high values need to be recalulated. 6805 */ 6806void __meminit zone_pcp_update(struct zone *zone) 6807{ 6808 unsigned cpu; 6809 mutex_lock(&pcp_batch_high_lock); 6810 for_each_possible_cpu(cpu) 6811 pageset_set_high_and_batch(zone, 6812 per_cpu_ptr(zone->pageset, cpu)); 6813 mutex_unlock(&pcp_batch_high_lock); 6814} 6815#endif 6816 6817void zone_pcp_reset(struct zone *zone) 6818{ 6819 unsigned long flags; 6820 int cpu; 6821 struct per_cpu_pageset *pset; 6822 6823 /* avoid races with drain_pages() */ 6824 local_irq_save(flags); 6825 if (zone->pageset != &boot_pageset) { 6826 for_each_online_cpu(cpu) { 6827 pset = per_cpu_ptr(zone->pageset, cpu); 6828 drain_zonestat(zone, pset); 6829 } 6830 free_percpu(zone->pageset); 6831 zone->pageset = &boot_pageset; 6832 } 6833 local_irq_restore(flags); 6834} 6835 6836#ifdef CONFIG_MEMORY_HOTREMOVE 6837/* 6838 * All pages in the range must be isolated before calling this. 6839 */ 6840void 6841__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6842{ 6843 struct page *page; 6844 struct zone *zone; 6845 unsigned int order, i; 6846 unsigned long pfn; 6847 unsigned long flags; 6848 /* find the first valid pfn */ 6849 for (pfn = start_pfn; pfn < end_pfn; pfn++) 6850 if (pfn_valid(pfn)) 6851 break; 6852 if (pfn == end_pfn) 6853 return; 6854 zone = page_zone(pfn_to_page(pfn)); 6855 spin_lock_irqsave(&zone->lock, flags); 6856 pfn = start_pfn; 6857 while (pfn < end_pfn) { 6858 if (!pfn_valid(pfn)) { 6859 pfn++; 6860 continue; 6861 } 6862 page = pfn_to_page(pfn); 6863 /* 6864 * The HWPoisoned page may be not in buddy system, and 6865 * page_count() is not 0. 6866 */ 6867 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6868 pfn++; 6869 SetPageReserved(page); 6870 continue; 6871 } 6872 6873 BUG_ON(page_count(page)); 6874 BUG_ON(!PageBuddy(page)); 6875 order = page_order(page); 6876#ifdef CONFIG_DEBUG_VM 6877 printk(KERN_INFO "remove from free list %lx %d %lx\n", 6878 pfn, 1 << order, end_pfn); 6879#endif 6880 list_del(&page->lru); 6881 rmv_page_order(page); 6882 zone->free_area[order].nr_free--; 6883 for (i = 0; i < (1 << order); i++) 6884 SetPageReserved((page+i)); 6885 pfn += (1 << order); 6886 } 6887 spin_unlock_irqrestore(&zone->lock, flags); 6888} 6889#endif 6890 6891#ifdef CONFIG_MEMORY_FAILURE 6892bool is_free_buddy_page(struct page *page) 6893{ 6894 struct zone *zone = page_zone(page); 6895 unsigned long pfn = page_to_pfn(page); 6896 unsigned long flags; 6897 unsigned int order; 6898 6899 spin_lock_irqsave(&zone->lock, flags); 6900 for (order = 0; order < MAX_ORDER; order++) { 6901 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6902 6903 if (PageBuddy(page_head) && page_order(page_head) >= order) 6904 break; 6905 } 6906 spin_unlock_irqrestore(&zone->lock, flags); 6907 6908 return order < MAX_ORDER; 6909} 6910#endif