at v2.6.35-rc2 990 lines 24 kB view raw
1/* 2 * bootmem - A boot-time physical memory allocator and configurator 3 * 4 * Copyright (C) 1999 Ingo Molnar 5 * 1999 Kanoj Sarcar, SGI 6 * 2008 Johannes Weiner 7 * 8 * Access to this subsystem has to be serialized externally (which is true 9 * for the boot process anyway). 10 */ 11#include <linux/init.h> 12#include <linux/pfn.h> 13#include <linux/slab.h> 14#include <linux/bootmem.h> 15#include <linux/module.h> 16#include <linux/kmemleak.h> 17#include <linux/range.h> 18 19#include <asm/bug.h> 20#include <asm/io.h> 21#include <asm/processor.h> 22 23#include "internal.h" 24 25unsigned long max_low_pfn; 26unsigned long min_low_pfn; 27unsigned long max_pfn; 28 29#ifdef CONFIG_CRASH_DUMP 30/* 31 * If we have booted due to a crash, max_pfn will be a very low value. We need 32 * to know the amount of memory that the previous kernel used. 33 */ 34unsigned long saved_max_pfn; 35#endif 36 37#ifndef CONFIG_NO_BOOTMEM 38bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 39 40static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 41 42static int bootmem_debug; 43 44static int __init bootmem_debug_setup(char *buf) 45{ 46 bootmem_debug = 1; 47 return 0; 48} 49early_param("bootmem_debug", bootmem_debug_setup); 50 51#define bdebug(fmt, args...) ({ \ 52 if (unlikely(bootmem_debug)) \ 53 printk(KERN_INFO \ 54 "bootmem::%s " fmt, \ 55 __func__, ## args); \ 56}) 57 58static unsigned long __init bootmap_bytes(unsigned long pages) 59{ 60 unsigned long bytes = (pages + 7) / 8; 61 62 return ALIGN(bytes, sizeof(long)); 63} 64 65/** 66 * bootmem_bootmap_pages - calculate bitmap size in pages 67 * @pages: number of pages the bitmap has to represent 68 */ 69unsigned long __init bootmem_bootmap_pages(unsigned long pages) 70{ 71 unsigned long bytes = bootmap_bytes(pages); 72 73 return PAGE_ALIGN(bytes) >> PAGE_SHIFT; 74} 75 76/* 77 * link bdata in order 78 */ 79static void __init link_bootmem(bootmem_data_t *bdata) 80{ 81 struct list_head *iter; 82 83 list_for_each(iter, &bdata_list) { 84 bootmem_data_t *ent; 85 86 ent = list_entry(iter, bootmem_data_t, list); 87 if (bdata->node_min_pfn < ent->node_min_pfn) 88 break; 89 } 90 list_add_tail(&bdata->list, iter); 91} 92 93/* 94 * Called once to set up the allocator itself. 95 */ 96static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, 97 unsigned long mapstart, unsigned long start, unsigned long end) 98{ 99 unsigned long mapsize; 100 101 mminit_validate_memmodel_limits(&start, &end); 102 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 103 bdata->node_min_pfn = start; 104 bdata->node_low_pfn = end; 105 link_bootmem(bdata); 106 107 /* 108 * Initially all pages are reserved - setup_arch() has to 109 * register free RAM areas explicitly. 110 */ 111 mapsize = bootmap_bytes(end - start); 112 memset(bdata->node_bootmem_map, 0xff, mapsize); 113 114 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 115 bdata - bootmem_node_data, start, mapstart, end, mapsize); 116 117 return mapsize; 118} 119 120/** 121 * init_bootmem_node - register a node as boot memory 122 * @pgdat: node to register 123 * @freepfn: pfn where the bitmap for this node is to be placed 124 * @startpfn: first pfn on the node 125 * @endpfn: first pfn after the node 126 * 127 * Returns the number of bytes needed to hold the bitmap for this node. 128 */ 129unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 130 unsigned long startpfn, unsigned long endpfn) 131{ 132 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); 133} 134 135/** 136 * init_bootmem - register boot memory 137 * @start: pfn where the bitmap is to be placed 138 * @pages: number of available physical pages 139 * 140 * Returns the number of bytes needed to hold the bitmap. 141 */ 142unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 143{ 144 max_low_pfn = pages; 145 min_low_pfn = start; 146 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 147} 148#endif 149/* 150 * free_bootmem_late - free bootmem pages directly to page allocator 151 * @addr: starting address of the range 152 * @size: size of the range in bytes 153 * 154 * This is only useful when the bootmem allocator has already been torn 155 * down, but we are still initializing the system. Pages are given directly 156 * to the page allocator, no bootmem metadata is updated because it is gone. 157 */ 158void __init free_bootmem_late(unsigned long addr, unsigned long size) 159{ 160 unsigned long cursor, end; 161 162 kmemleak_free_part(__va(addr), size); 163 164 cursor = PFN_UP(addr); 165 end = PFN_DOWN(addr + size); 166 167 for (; cursor < end; cursor++) { 168 __free_pages_bootmem(pfn_to_page(cursor), 0); 169 totalram_pages++; 170 } 171} 172 173#ifdef CONFIG_NO_BOOTMEM 174static void __init __free_pages_memory(unsigned long start, unsigned long end) 175{ 176 int i; 177 unsigned long start_aligned, end_aligned; 178 int order = ilog2(BITS_PER_LONG); 179 180 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); 181 end_aligned = end & ~(BITS_PER_LONG - 1); 182 183 if (end_aligned <= start_aligned) { 184 for (i = start; i < end; i++) 185 __free_pages_bootmem(pfn_to_page(i), 0); 186 187 return; 188 } 189 190 for (i = start; i < start_aligned; i++) 191 __free_pages_bootmem(pfn_to_page(i), 0); 192 193 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG) 194 __free_pages_bootmem(pfn_to_page(i), order); 195 196 for (i = end_aligned; i < end; i++) 197 __free_pages_bootmem(pfn_to_page(i), 0); 198} 199 200unsigned long __init free_all_memory_core_early(int nodeid) 201{ 202 int i; 203 u64 start, end; 204 unsigned long count = 0; 205 struct range *range = NULL; 206 int nr_range; 207 208 nr_range = get_free_all_memory_range(&range, nodeid); 209 210 for (i = 0; i < nr_range; i++) { 211 start = range[i].start; 212 end = range[i].end; 213 count += end - start; 214 __free_pages_memory(start, end); 215 } 216 217 return count; 218} 219#else 220static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 221{ 222 int aligned; 223 struct page *page; 224 unsigned long start, end, pages, count = 0; 225 226 if (!bdata->node_bootmem_map) 227 return 0; 228 229 start = bdata->node_min_pfn; 230 end = bdata->node_low_pfn; 231 232 /* 233 * If the start is aligned to the machines wordsize, we might 234 * be able to free pages in bulks of that order. 235 */ 236 aligned = !(start & (BITS_PER_LONG - 1)); 237 238 bdebug("nid=%td start=%lx end=%lx aligned=%d\n", 239 bdata - bootmem_node_data, start, end, aligned); 240 241 while (start < end) { 242 unsigned long *map, idx, vec; 243 244 map = bdata->node_bootmem_map; 245 idx = start - bdata->node_min_pfn; 246 vec = ~map[idx / BITS_PER_LONG]; 247 248 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) { 249 int order = ilog2(BITS_PER_LONG); 250 251 __free_pages_bootmem(pfn_to_page(start), order); 252 count += BITS_PER_LONG; 253 } else { 254 unsigned long off = 0; 255 256 while (vec && off < BITS_PER_LONG) { 257 if (vec & 1) { 258 page = pfn_to_page(start + off); 259 __free_pages_bootmem(page, 0); 260 count++; 261 } 262 vec >>= 1; 263 off++; 264 } 265 } 266 start += BITS_PER_LONG; 267 } 268 269 page = virt_to_page(bdata->node_bootmem_map); 270 pages = bdata->node_low_pfn - bdata->node_min_pfn; 271 pages = bootmem_bootmap_pages(pages); 272 count += pages; 273 while (pages--) 274 __free_pages_bootmem(page++, 0); 275 276 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 277 278 return count; 279} 280#endif 281 282/** 283 * free_all_bootmem_node - release a node's free pages to the buddy allocator 284 * @pgdat: node to be released 285 * 286 * Returns the number of pages actually released. 287 */ 288unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) 289{ 290 register_page_bootmem_info_node(pgdat); 291#ifdef CONFIG_NO_BOOTMEM 292 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */ 293 return 0; 294#else 295 return free_all_bootmem_core(pgdat->bdata); 296#endif 297} 298 299/** 300 * free_all_bootmem - release free pages to the buddy allocator 301 * 302 * Returns the number of pages actually released. 303 */ 304unsigned long __init free_all_bootmem(void) 305{ 306#ifdef CONFIG_NO_BOOTMEM 307 /* 308 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id 309 * because in some case like Node0 doesnt have RAM installed 310 * low ram will be on Node1 311 * Use MAX_NUMNODES will make sure all ranges in early_node_map[] 312 * will be used instead of only Node0 related 313 */ 314 return free_all_memory_core_early(MAX_NUMNODES); 315#else 316 unsigned long total_pages = 0; 317 bootmem_data_t *bdata; 318 319 list_for_each_entry(bdata, &bdata_list, list) 320 total_pages += free_all_bootmem_core(bdata); 321 322 return total_pages; 323#endif 324} 325 326#ifndef CONFIG_NO_BOOTMEM 327static void __init __free(bootmem_data_t *bdata, 328 unsigned long sidx, unsigned long eidx) 329{ 330 unsigned long idx; 331 332 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data, 333 sidx + bdata->node_min_pfn, 334 eidx + bdata->node_min_pfn); 335 336 if (bdata->hint_idx > sidx) 337 bdata->hint_idx = sidx; 338 339 for (idx = sidx; idx < eidx; idx++) 340 if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) 341 BUG(); 342} 343 344static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, 345 unsigned long eidx, int flags) 346{ 347 unsigned long idx; 348 int exclusive = flags & BOOTMEM_EXCLUSIVE; 349 350 bdebug("nid=%td start=%lx end=%lx flags=%x\n", 351 bdata - bootmem_node_data, 352 sidx + bdata->node_min_pfn, 353 eidx + bdata->node_min_pfn, 354 flags); 355 356 for (idx = sidx; idx < eidx; idx++) 357 if (test_and_set_bit(idx, bdata->node_bootmem_map)) { 358 if (exclusive) { 359 __free(bdata, sidx, idx); 360 return -EBUSY; 361 } 362 bdebug("silent double reserve of PFN %lx\n", 363 idx + bdata->node_min_pfn); 364 } 365 return 0; 366} 367 368static int __init mark_bootmem_node(bootmem_data_t *bdata, 369 unsigned long start, unsigned long end, 370 int reserve, int flags) 371{ 372 unsigned long sidx, eidx; 373 374 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n", 375 bdata - bootmem_node_data, start, end, reserve, flags); 376 377 BUG_ON(start < bdata->node_min_pfn); 378 BUG_ON(end > bdata->node_low_pfn); 379 380 sidx = start - bdata->node_min_pfn; 381 eidx = end - bdata->node_min_pfn; 382 383 if (reserve) 384 return __reserve(bdata, sidx, eidx, flags); 385 else 386 __free(bdata, sidx, eidx); 387 return 0; 388} 389 390static int __init mark_bootmem(unsigned long start, unsigned long end, 391 int reserve, int flags) 392{ 393 unsigned long pos; 394 bootmem_data_t *bdata; 395 396 pos = start; 397 list_for_each_entry(bdata, &bdata_list, list) { 398 int err; 399 unsigned long max; 400 401 if (pos < bdata->node_min_pfn || 402 pos >= bdata->node_low_pfn) { 403 BUG_ON(pos != start); 404 continue; 405 } 406 407 max = min(bdata->node_low_pfn, end); 408 409 err = mark_bootmem_node(bdata, pos, max, reserve, flags); 410 if (reserve && err) { 411 mark_bootmem(start, pos, 0, 0); 412 return err; 413 } 414 415 if (max == end) 416 return 0; 417 pos = bdata->node_low_pfn; 418 } 419 BUG(); 420} 421#endif 422 423/** 424 * free_bootmem_node - mark a page range as usable 425 * @pgdat: node the range resides on 426 * @physaddr: starting address of the range 427 * @size: size of the range in bytes 428 * 429 * Partial pages will be considered reserved and left as they are. 430 * 431 * The range must reside completely on the specified node. 432 */ 433void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 434 unsigned long size) 435{ 436#ifdef CONFIG_NO_BOOTMEM 437 free_early(physaddr, physaddr + size); 438#else 439 unsigned long start, end; 440 441 kmemleak_free_part(__va(physaddr), size); 442 443 start = PFN_UP(physaddr); 444 end = PFN_DOWN(physaddr + size); 445 446 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 447#endif 448} 449 450/** 451 * free_bootmem - mark a page range as usable 452 * @addr: starting address of the range 453 * @size: size of the range in bytes 454 * 455 * Partial pages will be considered reserved and left as they are. 456 * 457 * The range must be contiguous but may span node boundaries. 458 */ 459void __init free_bootmem(unsigned long addr, unsigned long size) 460{ 461#ifdef CONFIG_NO_BOOTMEM 462 free_early(addr, addr + size); 463#else 464 unsigned long start, end; 465 466 kmemleak_free_part(__va(addr), size); 467 468 start = PFN_UP(addr); 469 end = PFN_DOWN(addr + size); 470 471 mark_bootmem(start, end, 0, 0); 472#endif 473} 474 475/** 476 * reserve_bootmem_node - mark a page range as reserved 477 * @pgdat: node the range resides on 478 * @physaddr: starting address of the range 479 * @size: size of the range in bytes 480 * @flags: reservation flags (see linux/bootmem.h) 481 * 482 * Partial pages will be reserved. 483 * 484 * The range must reside completely on the specified node. 485 */ 486int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 487 unsigned long size, int flags) 488{ 489#ifdef CONFIG_NO_BOOTMEM 490 panic("no bootmem"); 491 return 0; 492#else 493 unsigned long start, end; 494 495 start = PFN_DOWN(physaddr); 496 end = PFN_UP(physaddr + size); 497 498 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 499#endif 500} 501 502/** 503 * reserve_bootmem - mark a page range as usable 504 * @addr: starting address of the range 505 * @size: size of the range in bytes 506 * @flags: reservation flags (see linux/bootmem.h) 507 * 508 * Partial pages will be reserved. 509 * 510 * The range must be contiguous but may span node boundaries. 511 */ 512int __init reserve_bootmem(unsigned long addr, unsigned long size, 513 int flags) 514{ 515#ifdef CONFIG_NO_BOOTMEM 516 panic("no bootmem"); 517 return 0; 518#else 519 unsigned long start, end; 520 521 start = PFN_DOWN(addr); 522 end = PFN_UP(addr + size); 523 524 return mark_bootmem(start, end, 1, flags); 525#endif 526} 527 528#ifndef CONFIG_NO_BOOTMEM 529static unsigned long __init align_idx(struct bootmem_data *bdata, 530 unsigned long idx, unsigned long step) 531{ 532 unsigned long base = bdata->node_min_pfn; 533 534 /* 535 * Align the index with respect to the node start so that the 536 * combination of both satisfies the requested alignment. 537 */ 538 539 return ALIGN(base + idx, step) - base; 540} 541 542static unsigned long __init align_off(struct bootmem_data *bdata, 543 unsigned long off, unsigned long align) 544{ 545 unsigned long base = PFN_PHYS(bdata->node_min_pfn); 546 547 /* Same as align_idx for byte offsets */ 548 549 return ALIGN(base + off, align) - base; 550} 551 552static void * __init alloc_bootmem_core(struct bootmem_data *bdata, 553 unsigned long size, unsigned long align, 554 unsigned long goal, unsigned long limit) 555{ 556 unsigned long fallback = 0; 557 unsigned long min, max, start, sidx, midx, step; 558 559 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", 560 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, 561 align, goal, limit); 562 563 BUG_ON(!size); 564 BUG_ON(align & (align - 1)); 565 BUG_ON(limit && goal + size > limit); 566 567 if (!bdata->node_bootmem_map) 568 return NULL; 569 570 min = bdata->node_min_pfn; 571 max = bdata->node_low_pfn; 572 573 goal >>= PAGE_SHIFT; 574 limit >>= PAGE_SHIFT; 575 576 if (limit && max > limit) 577 max = limit; 578 if (max <= min) 579 return NULL; 580 581 step = max(align >> PAGE_SHIFT, 1UL); 582 583 if (goal && min < goal && goal < max) 584 start = ALIGN(goal, step); 585 else 586 start = ALIGN(min, step); 587 588 sidx = start - bdata->node_min_pfn; 589 midx = max - bdata->node_min_pfn; 590 591 if (bdata->hint_idx > sidx) { 592 /* 593 * Handle the valid case of sidx being zero and still 594 * catch the fallback below. 595 */ 596 fallback = sidx + 1; 597 sidx = align_idx(bdata, bdata->hint_idx, step); 598 } 599 600 while (1) { 601 int merge; 602 void *region; 603 unsigned long eidx, i, start_off, end_off; 604find_block: 605 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); 606 sidx = align_idx(bdata, sidx, step); 607 eidx = sidx + PFN_UP(size); 608 609 if (sidx >= midx || eidx > midx) 610 break; 611 612 for (i = sidx; i < eidx; i++) 613 if (test_bit(i, bdata->node_bootmem_map)) { 614 sidx = align_idx(bdata, i, step); 615 if (sidx == i) 616 sidx += step; 617 goto find_block; 618 } 619 620 if (bdata->last_end_off & (PAGE_SIZE - 1) && 621 PFN_DOWN(bdata->last_end_off) + 1 == sidx) 622 start_off = align_off(bdata, bdata->last_end_off, align); 623 else 624 start_off = PFN_PHYS(sidx); 625 626 merge = PFN_DOWN(start_off) < sidx; 627 end_off = start_off + size; 628 629 bdata->last_end_off = end_off; 630 bdata->hint_idx = PFN_UP(end_off); 631 632 /* 633 * Reserve the area now: 634 */ 635 if (__reserve(bdata, PFN_DOWN(start_off) + merge, 636 PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) 637 BUG(); 638 639 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 640 start_off); 641 memset(region, 0, size); 642 /* 643 * The min_count is set to 0 so that bootmem allocated blocks 644 * are never reported as leaks. 645 */ 646 kmemleak_alloc(region, size, 0, 0); 647 return region; 648 } 649 650 if (fallback) { 651 sidx = align_idx(bdata, fallback - 1, step); 652 fallback = 0; 653 goto find_block; 654 } 655 656 return NULL; 657} 658 659static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, 660 unsigned long size, unsigned long align, 661 unsigned long goal, unsigned long limit) 662{ 663 if (WARN_ON_ONCE(slab_is_available())) 664 return kzalloc(size, GFP_NOWAIT); 665 666#ifdef CONFIG_HAVE_ARCH_BOOTMEM 667 { 668 bootmem_data_t *p_bdata; 669 670 p_bdata = bootmem_arch_preferred_node(bdata, size, align, 671 goal, limit); 672 if (p_bdata) 673 return alloc_bootmem_core(p_bdata, size, align, 674 goal, limit); 675 } 676#endif 677 return NULL; 678} 679#endif 680 681static void * __init ___alloc_bootmem_nopanic(unsigned long size, 682 unsigned long align, 683 unsigned long goal, 684 unsigned long limit) 685{ 686#ifdef CONFIG_NO_BOOTMEM 687 void *ptr; 688 689 if (WARN_ON_ONCE(slab_is_available())) 690 return kzalloc(size, GFP_NOWAIT); 691 692restart: 693 694 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); 695 696 if (ptr) 697 return ptr; 698 699 if (goal != 0) { 700 goal = 0; 701 goto restart; 702 } 703 704 return NULL; 705#else 706 bootmem_data_t *bdata; 707 void *region; 708 709restart: 710 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); 711 if (region) 712 return region; 713 714 list_for_each_entry(bdata, &bdata_list, list) { 715 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 716 continue; 717 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 718 break; 719 720 region = alloc_bootmem_core(bdata, size, align, goal, limit); 721 if (region) 722 return region; 723 } 724 725 if (goal) { 726 goal = 0; 727 goto restart; 728 } 729 730 return NULL; 731#endif 732} 733 734/** 735 * __alloc_bootmem_nopanic - allocate boot memory without panicking 736 * @size: size of the request in bytes 737 * @align: alignment of the region 738 * @goal: preferred starting address of the region 739 * 740 * The goal is dropped if it can not be satisfied and the allocation will 741 * fall back to memory below @goal. 742 * 743 * Allocation may happen on any node in the system. 744 * 745 * Returns NULL on failure. 746 */ 747void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 748 unsigned long goal) 749{ 750 unsigned long limit = 0; 751 752#ifdef CONFIG_NO_BOOTMEM 753 limit = -1UL; 754#endif 755 756 return ___alloc_bootmem_nopanic(size, align, goal, limit); 757} 758 759static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, 760 unsigned long goal, unsigned long limit) 761{ 762 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); 763 764 if (mem) 765 return mem; 766 /* 767 * Whoops, we cannot satisfy the allocation request. 768 */ 769 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 770 panic("Out of memory"); 771 return NULL; 772} 773 774/** 775 * __alloc_bootmem - allocate boot memory 776 * @size: size of the request in bytes 777 * @align: alignment of the region 778 * @goal: preferred starting address of the region 779 * 780 * The goal is dropped if it can not be satisfied and the allocation will 781 * fall back to memory below @goal. 782 * 783 * Allocation may happen on any node in the system. 784 * 785 * The function panics if the request can not be satisfied. 786 */ 787void * __init __alloc_bootmem(unsigned long size, unsigned long align, 788 unsigned long goal) 789{ 790 unsigned long limit = 0; 791 792#ifdef CONFIG_NO_BOOTMEM 793 limit = -1UL; 794#endif 795 796 return ___alloc_bootmem(size, align, goal, limit); 797} 798 799#ifndef CONFIG_NO_BOOTMEM 800static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, 801 unsigned long size, unsigned long align, 802 unsigned long goal, unsigned long limit) 803{ 804 void *ptr; 805 806 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); 807 if (ptr) 808 return ptr; 809 810 ptr = alloc_bootmem_core(bdata, size, align, goal, limit); 811 if (ptr) 812 return ptr; 813 814 return ___alloc_bootmem(size, align, goal, limit); 815} 816#endif 817 818/** 819 * __alloc_bootmem_node - allocate boot memory from a specific node 820 * @pgdat: node to allocate from 821 * @size: size of the request in bytes 822 * @align: alignment of the region 823 * @goal: preferred starting address of the region 824 * 825 * The goal is dropped if it can not be satisfied and the allocation will 826 * fall back to memory below @goal. 827 * 828 * Allocation may fall back to any node in the system if the specified node 829 * can not hold the requested memory. 830 * 831 * The function panics if the request can not be satisfied. 832 */ 833void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 834 unsigned long align, unsigned long goal) 835{ 836 if (WARN_ON_ONCE(slab_is_available())) 837 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 838 839#ifdef CONFIG_NO_BOOTMEM 840 return __alloc_memory_core_early(pgdat->node_id, size, align, 841 goal, -1ULL); 842#else 843 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); 844#endif 845} 846 847void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 848 unsigned long align, unsigned long goal) 849{ 850#ifdef MAX_DMA32_PFN 851 unsigned long end_pfn; 852 853 if (WARN_ON_ONCE(slab_is_available())) 854 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 855 856 /* update goal according ...MAX_DMA32_PFN */ 857 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; 858 859 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && 860 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { 861 void *ptr; 862 unsigned long new_goal; 863 864 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 865#ifdef CONFIG_NO_BOOTMEM 866 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 867 new_goal, -1ULL); 868#else 869 ptr = alloc_bootmem_core(pgdat->bdata, size, align, 870 new_goal, 0); 871#endif 872 if (ptr) 873 return ptr; 874 } 875#endif 876 877 return __alloc_bootmem_node(pgdat, size, align, goal); 878 879} 880 881#ifdef CONFIG_SPARSEMEM 882/** 883 * alloc_bootmem_section - allocate boot memory from a specific section 884 * @size: size of the request in bytes 885 * @section_nr: sparse map section to allocate from 886 * 887 * Return NULL on failure. 888 */ 889void * __init alloc_bootmem_section(unsigned long size, 890 unsigned long section_nr) 891{ 892#ifdef CONFIG_NO_BOOTMEM 893 unsigned long pfn, goal, limit; 894 895 pfn = section_nr_to_pfn(section_nr); 896 goal = pfn << PAGE_SHIFT; 897 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; 898 899 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, 900 SMP_CACHE_BYTES, goal, limit); 901#else 902 bootmem_data_t *bdata; 903 unsigned long pfn, goal, limit; 904 905 pfn = section_nr_to_pfn(section_nr); 906 goal = pfn << PAGE_SHIFT; 907 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; 908 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; 909 910 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); 911#endif 912} 913#endif 914 915void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, 916 unsigned long align, unsigned long goal) 917{ 918 void *ptr; 919 920 if (WARN_ON_ONCE(slab_is_available())) 921 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 922 923#ifdef CONFIG_NO_BOOTMEM 924 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 925 goal, -1ULL); 926#else 927 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); 928 if (ptr) 929 return ptr; 930 931 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 932#endif 933 if (ptr) 934 return ptr; 935 936 return __alloc_bootmem_nopanic(size, align, goal); 937} 938 939#ifndef ARCH_LOW_ADDRESS_LIMIT 940#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 941#endif 942 943/** 944 * __alloc_bootmem_low - allocate low boot memory 945 * @size: size of the request in bytes 946 * @align: alignment of the region 947 * @goal: preferred starting address of the region 948 * 949 * The goal is dropped if it can not be satisfied and the allocation will 950 * fall back to memory below @goal. 951 * 952 * Allocation may happen on any node in the system. 953 * 954 * The function panics if the request can not be satisfied. 955 */ 956void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 957 unsigned long goal) 958{ 959 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); 960} 961 962/** 963 * __alloc_bootmem_low_node - allocate low boot memory from a specific node 964 * @pgdat: node to allocate from 965 * @size: size of the request in bytes 966 * @align: alignment of the region 967 * @goal: preferred starting address of the region 968 * 969 * The goal is dropped if it can not be satisfied and the allocation will 970 * fall back to memory below @goal. 971 * 972 * Allocation may fall back to any node in the system if the specified node 973 * can not hold the requested memory. 974 * 975 * The function panics if the request can not be satisfied. 976 */ 977void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 978 unsigned long align, unsigned long goal) 979{ 980 if (WARN_ON_ONCE(slab_is_available())) 981 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 982 983#ifdef CONFIG_NO_BOOTMEM 984 return __alloc_memory_core_early(pgdat->node_id, size, align, 985 goal, ARCH_LOW_ADDRESS_LIMIT); 986#else 987 return ___alloc_bootmem_node(pgdat->bdata, size, align, 988 goal, ARCH_LOW_ADDRESS_LIMIT); 989#endif 990}