at v3.18 21 kB view raw
1/* 2 * bootmem - A boot-time physical memory allocator and configurator 3 * 4 * Copyright (C) 1999 Ingo Molnar 5 * 1999 Kanoj Sarcar, SGI 6 * 2008 Johannes Weiner 7 * 8 * Access to this subsystem has to be serialized externally (which is true 9 * for the boot process anyway). 10 */ 11#include <linux/init.h> 12#include <linux/pfn.h> 13#include <linux/slab.h> 14#include <linux/bootmem.h> 15#include <linux/export.h> 16#include <linux/kmemleak.h> 17#include <linux/range.h> 18#include <linux/memblock.h> 19#include <linux/bug.h> 20#include <linux/io.h> 21 22#include <asm/processor.h> 23 24#include "internal.h" 25 26#ifndef CONFIG_NEED_MULTIPLE_NODES 27struct pglist_data __refdata contig_page_data = { 28 .bdata = &bootmem_node_data[0] 29}; 30EXPORT_SYMBOL(contig_page_data); 31#endif 32 33unsigned long max_low_pfn; 34unsigned long min_low_pfn; 35unsigned long max_pfn; 36 37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 38 39static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 40 41static int bootmem_debug; 42 43static int __init bootmem_debug_setup(char *buf) 44{ 45 bootmem_debug = 1; 46 return 0; 47} 48early_param("bootmem_debug", bootmem_debug_setup); 49 50#define bdebug(fmt, args...) ({ \ 51 if (unlikely(bootmem_debug)) \ 52 printk(KERN_INFO \ 53 "bootmem::%s " fmt, \ 54 __func__, ## args); \ 55}) 56 57static unsigned long __init bootmap_bytes(unsigned long pages) 58{ 59 unsigned long bytes = DIV_ROUND_UP(pages, 8); 60 61 return ALIGN(bytes, sizeof(long)); 62} 63 64/** 65 * bootmem_bootmap_pages - calculate bitmap size in pages 66 * @pages: number of pages the bitmap has to represent 67 */ 68unsigned long __init bootmem_bootmap_pages(unsigned long pages) 69{ 70 unsigned long bytes = bootmap_bytes(pages); 71 72 return PAGE_ALIGN(bytes) >> PAGE_SHIFT; 73} 74 75/* 76 * link bdata in order 77 */ 78static void __init link_bootmem(bootmem_data_t *bdata) 79{ 80 bootmem_data_t *ent; 81 82 list_for_each_entry(ent, &bdata_list, list) { 83 if (bdata->node_min_pfn < ent->node_min_pfn) { 84 list_add_tail(&bdata->list, &ent->list); 85 return; 86 } 87 } 88 89 list_add_tail(&bdata->list, &bdata_list); 90} 91 92/* 93 * Called once to set up the allocator itself. 94 */ 95static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, 96 unsigned long mapstart, unsigned long start, unsigned long end) 97{ 98 unsigned long mapsize; 99 100 mminit_validate_memmodel_limits(&start, &end); 101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 102 bdata->node_min_pfn = start; 103 bdata->node_low_pfn = end; 104 link_bootmem(bdata); 105 106 /* 107 * Initially all pages are reserved - setup_arch() has to 108 * register free RAM areas explicitly. 109 */ 110 mapsize = bootmap_bytes(end - start); 111 memset(bdata->node_bootmem_map, 0xff, mapsize); 112 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); 115 116 return mapsize; 117} 118 119/** 120 * init_bootmem_node - register a node as boot memory 121 * @pgdat: node to register 122 * @freepfn: pfn where the bitmap for this node is to be placed 123 * @startpfn: first pfn on the node 124 * @endpfn: first pfn after the node 125 * 126 * Returns the number of bytes needed to hold the bitmap for this node. 127 */ 128unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 129 unsigned long startpfn, unsigned long endpfn) 130{ 131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); 132} 133 134/** 135 * init_bootmem - register boot memory 136 * @start: pfn where the bitmap is to be placed 137 * @pages: number of available physical pages 138 * 139 * Returns the number of bytes needed to hold the bitmap. 140 */ 141unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 142{ 143 max_low_pfn = pages; 144 min_low_pfn = start; 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 146} 147 148/* 149 * free_bootmem_late - free bootmem pages directly to page allocator 150 * @addr: starting physical address of the range 151 * @size: size of the range in bytes 152 * 153 * This is only useful when the bootmem allocator has already been torn 154 * down, but we are still initializing the system. Pages are given directly 155 * to the page allocator, no bootmem metadata is updated because it is gone. 156 */ 157void __init free_bootmem_late(unsigned long physaddr, unsigned long size) 158{ 159 unsigned long cursor, end; 160 161 kmemleak_free_part(__va(physaddr), size); 162 163 cursor = PFN_UP(physaddr); 164 end = PFN_DOWN(physaddr + size); 165 166 for (; cursor < end; cursor++) { 167 __free_pages_bootmem(pfn_to_page(cursor), 0); 168 totalram_pages++; 169 } 170} 171 172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 173{ 174 struct page *page; 175 unsigned long *map, start, end, pages, count = 0; 176 177 if (!bdata->node_bootmem_map) 178 return 0; 179 180 map = bdata->node_bootmem_map; 181 start = bdata->node_min_pfn; 182 end = bdata->node_low_pfn; 183 184 bdebug("nid=%td start=%lx end=%lx\n", 185 bdata - bootmem_node_data, start, end); 186 187 while (start < end) { 188 unsigned long idx, vec; 189 unsigned shift; 190 191 idx = start - bdata->node_min_pfn; 192 shift = idx & (BITS_PER_LONG - 1); 193 /* 194 * vec holds at most BITS_PER_LONG map bits, 195 * bit 0 corresponds to start. 196 */ 197 vec = ~map[idx / BITS_PER_LONG]; 198 199 if (shift) { 200 vec >>= shift; 201 if (end - start >= BITS_PER_LONG) 202 vec |= ~map[idx / BITS_PER_LONG + 1] << 203 (BITS_PER_LONG - shift); 204 } 205 /* 206 * If we have a properly aligned and fully unreserved 207 * BITS_PER_LONG block of pages in front of us, free 208 * it in one go. 209 */ 210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { 211 int order = ilog2(BITS_PER_LONG); 212 213 __free_pages_bootmem(pfn_to_page(start), order); 214 count += BITS_PER_LONG; 215 start += BITS_PER_LONG; 216 } else { 217 unsigned long cur = start; 218 219 start = ALIGN(start + 1, BITS_PER_LONG); 220 while (vec && cur != start) { 221 if (vec & 1) { 222 page = pfn_to_page(cur); 223 __free_pages_bootmem(page, 0); 224 count++; 225 } 226 vec >>= 1; 227 ++cur; 228 } 229 } 230 } 231 232 page = virt_to_page(bdata->node_bootmem_map); 233 pages = bdata->node_low_pfn - bdata->node_min_pfn; 234 pages = bootmem_bootmap_pages(pages); 235 count += pages; 236 while (pages--) 237 __free_pages_bootmem(page++, 0); 238 239 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 240 241 return count; 242} 243 244static int reset_managed_pages_done __initdata; 245 246void reset_node_managed_pages(pg_data_t *pgdat) 247{ 248 struct zone *z; 249 250 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 251 z->managed_pages = 0; 252} 253 254void __init reset_all_zones_managed_pages(void) 255{ 256 struct pglist_data *pgdat; 257 258 if (reset_managed_pages_done) 259 return; 260 261 for_each_online_pgdat(pgdat) 262 reset_node_managed_pages(pgdat); 263 264 reset_managed_pages_done = 1; 265} 266 267/** 268 * free_all_bootmem - release free pages to the buddy allocator 269 * 270 * Returns the number of pages actually released. 271 */ 272unsigned long __init free_all_bootmem(void) 273{ 274 unsigned long total_pages = 0; 275 bootmem_data_t *bdata; 276 277 reset_all_zones_managed_pages(); 278 279 list_for_each_entry(bdata, &bdata_list, list) 280 total_pages += free_all_bootmem_core(bdata); 281 282 totalram_pages += total_pages; 283 284 return total_pages; 285} 286 287static void __init __free(bootmem_data_t *bdata, 288 unsigned long sidx, unsigned long eidx) 289{ 290 unsigned long idx; 291 292 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data, 293 sidx + bdata->node_min_pfn, 294 eidx + bdata->node_min_pfn); 295 296 if (bdata->hint_idx > sidx) 297 bdata->hint_idx = sidx; 298 299 for (idx = sidx; idx < eidx; idx++) 300 if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) 301 BUG(); 302} 303 304static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, 305 unsigned long eidx, int flags) 306{ 307 unsigned long idx; 308 int exclusive = flags & BOOTMEM_EXCLUSIVE; 309 310 bdebug("nid=%td start=%lx end=%lx flags=%x\n", 311 bdata - bootmem_node_data, 312 sidx + bdata->node_min_pfn, 313 eidx + bdata->node_min_pfn, 314 flags); 315 316 for (idx = sidx; idx < eidx; idx++) 317 if (test_and_set_bit(idx, bdata->node_bootmem_map)) { 318 if (exclusive) { 319 __free(bdata, sidx, idx); 320 return -EBUSY; 321 } 322 bdebug("silent double reserve of PFN %lx\n", 323 idx + bdata->node_min_pfn); 324 } 325 return 0; 326} 327 328static int __init mark_bootmem_node(bootmem_data_t *bdata, 329 unsigned long start, unsigned long end, 330 int reserve, int flags) 331{ 332 unsigned long sidx, eidx; 333 334 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n", 335 bdata - bootmem_node_data, start, end, reserve, flags); 336 337 BUG_ON(start < bdata->node_min_pfn); 338 BUG_ON(end > bdata->node_low_pfn); 339 340 sidx = start - bdata->node_min_pfn; 341 eidx = end - bdata->node_min_pfn; 342 343 if (reserve) 344 return __reserve(bdata, sidx, eidx, flags); 345 else 346 __free(bdata, sidx, eidx); 347 return 0; 348} 349 350static int __init mark_bootmem(unsigned long start, unsigned long end, 351 int reserve, int flags) 352{ 353 unsigned long pos; 354 bootmem_data_t *bdata; 355 356 pos = start; 357 list_for_each_entry(bdata, &bdata_list, list) { 358 int err; 359 unsigned long max; 360 361 if (pos < bdata->node_min_pfn || 362 pos >= bdata->node_low_pfn) { 363 BUG_ON(pos != start); 364 continue; 365 } 366 367 max = min(bdata->node_low_pfn, end); 368 369 err = mark_bootmem_node(bdata, pos, max, reserve, flags); 370 if (reserve && err) { 371 mark_bootmem(start, pos, 0, 0); 372 return err; 373 } 374 375 if (max == end) 376 return 0; 377 pos = bdata->node_low_pfn; 378 } 379 BUG(); 380} 381 382/** 383 * free_bootmem_node - mark a page range as usable 384 * @pgdat: node the range resides on 385 * @physaddr: starting address of the range 386 * @size: size of the range in bytes 387 * 388 * Partial pages will be considered reserved and left as they are. 389 * 390 * The range must reside completely on the specified node. 391 */ 392void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 393 unsigned long size) 394{ 395 unsigned long start, end; 396 397 kmemleak_free_part(__va(physaddr), size); 398 399 start = PFN_UP(physaddr); 400 end = PFN_DOWN(physaddr + size); 401 402 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 403} 404 405/** 406 * free_bootmem - mark a page range as usable 407 * @addr: starting physical address of the range 408 * @size: size of the range in bytes 409 * 410 * Partial pages will be considered reserved and left as they are. 411 * 412 * The range must be contiguous but may span node boundaries. 413 */ 414void __init free_bootmem(unsigned long physaddr, unsigned long size) 415{ 416 unsigned long start, end; 417 418 kmemleak_free_part(__va(physaddr), size); 419 420 start = PFN_UP(physaddr); 421 end = PFN_DOWN(physaddr + size); 422 423 mark_bootmem(start, end, 0, 0); 424} 425 426/** 427 * reserve_bootmem_node - mark a page range as reserved 428 * @pgdat: node the range resides on 429 * @physaddr: starting address of the range 430 * @size: size of the range in bytes 431 * @flags: reservation flags (see linux/bootmem.h) 432 * 433 * Partial pages will be reserved. 434 * 435 * The range must reside completely on the specified node. 436 */ 437int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 438 unsigned long size, int flags) 439{ 440 unsigned long start, end; 441 442 start = PFN_DOWN(physaddr); 443 end = PFN_UP(physaddr + size); 444 445 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 446} 447 448/** 449 * reserve_bootmem - mark a page range as reserved 450 * @addr: starting address of the range 451 * @size: size of the range in bytes 452 * @flags: reservation flags (see linux/bootmem.h) 453 * 454 * Partial pages will be reserved. 455 * 456 * The range must be contiguous but may span node boundaries. 457 */ 458int __init reserve_bootmem(unsigned long addr, unsigned long size, 459 int flags) 460{ 461 unsigned long start, end; 462 463 start = PFN_DOWN(addr); 464 end = PFN_UP(addr + size); 465 466 return mark_bootmem(start, end, 1, flags); 467} 468 469static unsigned long __init align_idx(struct bootmem_data *bdata, 470 unsigned long idx, unsigned long step) 471{ 472 unsigned long base = bdata->node_min_pfn; 473 474 /* 475 * Align the index with respect to the node start so that the 476 * combination of both satisfies the requested alignment. 477 */ 478 479 return ALIGN(base + idx, step) - base; 480} 481 482static unsigned long __init align_off(struct bootmem_data *bdata, 483 unsigned long off, unsigned long align) 484{ 485 unsigned long base = PFN_PHYS(bdata->node_min_pfn); 486 487 /* Same as align_idx for byte offsets */ 488 489 return ALIGN(base + off, align) - base; 490} 491 492static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata, 493 unsigned long size, unsigned long align, 494 unsigned long goal, unsigned long limit) 495{ 496 unsigned long fallback = 0; 497 unsigned long min, max, start, sidx, midx, step; 498 499 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", 500 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, 501 align, goal, limit); 502 503 BUG_ON(!size); 504 BUG_ON(align & (align - 1)); 505 BUG_ON(limit && goal + size > limit); 506 507 if (!bdata->node_bootmem_map) 508 return NULL; 509 510 min = bdata->node_min_pfn; 511 max = bdata->node_low_pfn; 512 513 goal >>= PAGE_SHIFT; 514 limit >>= PAGE_SHIFT; 515 516 if (limit && max > limit) 517 max = limit; 518 if (max <= min) 519 return NULL; 520 521 step = max(align >> PAGE_SHIFT, 1UL); 522 523 if (goal && min < goal && goal < max) 524 start = ALIGN(goal, step); 525 else 526 start = ALIGN(min, step); 527 528 sidx = start - bdata->node_min_pfn; 529 midx = max - bdata->node_min_pfn; 530 531 if (bdata->hint_idx > sidx) { 532 /* 533 * Handle the valid case of sidx being zero and still 534 * catch the fallback below. 535 */ 536 fallback = sidx + 1; 537 sidx = align_idx(bdata, bdata->hint_idx, step); 538 } 539 540 while (1) { 541 int merge; 542 void *region; 543 unsigned long eidx, i, start_off, end_off; 544find_block: 545 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); 546 sidx = align_idx(bdata, sidx, step); 547 eidx = sidx + PFN_UP(size); 548 549 if (sidx >= midx || eidx > midx) 550 break; 551 552 for (i = sidx; i < eidx; i++) 553 if (test_bit(i, bdata->node_bootmem_map)) { 554 sidx = align_idx(bdata, i, step); 555 if (sidx == i) 556 sidx += step; 557 goto find_block; 558 } 559 560 if (bdata->last_end_off & (PAGE_SIZE - 1) && 561 PFN_DOWN(bdata->last_end_off) + 1 == sidx) 562 start_off = align_off(bdata, bdata->last_end_off, align); 563 else 564 start_off = PFN_PHYS(sidx); 565 566 merge = PFN_DOWN(start_off) < sidx; 567 end_off = start_off + size; 568 569 bdata->last_end_off = end_off; 570 bdata->hint_idx = PFN_UP(end_off); 571 572 /* 573 * Reserve the area now: 574 */ 575 if (__reserve(bdata, PFN_DOWN(start_off) + merge, 576 PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) 577 BUG(); 578 579 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 580 start_off); 581 memset(region, 0, size); 582 /* 583 * The min_count is set to 0 so that bootmem allocated blocks 584 * are never reported as leaks. 585 */ 586 kmemleak_alloc(region, size, 0, 0); 587 return region; 588 } 589 590 if (fallback) { 591 sidx = align_idx(bdata, fallback - 1, step); 592 fallback = 0; 593 goto find_block; 594 } 595 596 return NULL; 597} 598 599static void * __init alloc_bootmem_core(unsigned long size, 600 unsigned long align, 601 unsigned long goal, 602 unsigned long limit) 603{ 604 bootmem_data_t *bdata; 605 void *region; 606 607 if (WARN_ON_ONCE(slab_is_available())) 608 return kzalloc(size, GFP_NOWAIT); 609 610 list_for_each_entry(bdata, &bdata_list, list) { 611 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 612 continue; 613 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 614 break; 615 616 region = alloc_bootmem_bdata(bdata, size, align, goal, limit); 617 if (region) 618 return region; 619 } 620 621 return NULL; 622} 623 624static void * __init ___alloc_bootmem_nopanic(unsigned long size, 625 unsigned long align, 626 unsigned long goal, 627 unsigned long limit) 628{ 629 void *ptr; 630 631restart: 632 ptr = alloc_bootmem_core(size, align, goal, limit); 633 if (ptr) 634 return ptr; 635 if (goal) { 636 goal = 0; 637 goto restart; 638 } 639 640 return NULL; 641} 642 643/** 644 * __alloc_bootmem_nopanic - allocate boot memory without panicking 645 * @size: size of the request in bytes 646 * @align: alignment of the region 647 * @goal: preferred starting address of the region 648 * 649 * The goal is dropped if it can not be satisfied and the allocation will 650 * fall back to memory below @goal. 651 * 652 * Allocation may happen on any node in the system. 653 * 654 * Returns NULL on failure. 655 */ 656void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 657 unsigned long goal) 658{ 659 unsigned long limit = 0; 660 661 return ___alloc_bootmem_nopanic(size, align, goal, limit); 662} 663 664static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, 665 unsigned long goal, unsigned long limit) 666{ 667 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); 668 669 if (mem) 670 return mem; 671 /* 672 * Whoops, we cannot satisfy the allocation request. 673 */ 674 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 675 panic("Out of memory"); 676 return NULL; 677} 678 679/** 680 * __alloc_bootmem - allocate boot memory 681 * @size: size of the request in bytes 682 * @align: alignment of the region 683 * @goal: preferred starting address of the region 684 * 685 * The goal is dropped if it can not be satisfied and the allocation will 686 * fall back to memory below @goal. 687 * 688 * Allocation may happen on any node in the system. 689 * 690 * The function panics if the request can not be satisfied. 691 */ 692void * __init __alloc_bootmem(unsigned long size, unsigned long align, 693 unsigned long goal) 694{ 695 unsigned long limit = 0; 696 697 return ___alloc_bootmem(size, align, goal, limit); 698} 699 700void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, 701 unsigned long size, unsigned long align, 702 unsigned long goal, unsigned long limit) 703{ 704 void *ptr; 705 706 if (WARN_ON_ONCE(slab_is_available())) 707 return kzalloc(size, GFP_NOWAIT); 708again: 709 710 /* do not panic in alloc_bootmem_bdata() */ 711 if (limit && goal + size > limit) 712 limit = 0; 713 714 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit); 715 if (ptr) 716 return ptr; 717 718 ptr = alloc_bootmem_core(size, align, goal, limit); 719 if (ptr) 720 return ptr; 721 722 if (goal) { 723 goal = 0; 724 goto again; 725 } 726 727 return NULL; 728} 729 730void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, 731 unsigned long align, unsigned long goal) 732{ 733 if (WARN_ON_ONCE(slab_is_available())) 734 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 735 736 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); 737} 738 739void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 740 unsigned long align, unsigned long goal, 741 unsigned long limit) 742{ 743 void *ptr; 744 745 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); 746 if (ptr) 747 return ptr; 748 749 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 750 panic("Out of memory"); 751 return NULL; 752} 753 754/** 755 * __alloc_bootmem_node - allocate boot memory from a specific node 756 * @pgdat: node to allocate from 757 * @size: size of the request in bytes 758 * @align: alignment of the region 759 * @goal: preferred starting address of the region 760 * 761 * The goal is dropped if it can not be satisfied and the allocation will 762 * fall back to memory below @goal. 763 * 764 * Allocation may fall back to any node in the system if the specified node 765 * can not hold the requested memory. 766 * 767 * The function panics if the request can not be satisfied. 768 */ 769void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 770 unsigned long align, unsigned long goal) 771{ 772 if (WARN_ON_ONCE(slab_is_available())) 773 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 774 775 return ___alloc_bootmem_node(pgdat, size, align, goal, 0); 776} 777 778void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 779 unsigned long align, unsigned long goal) 780{ 781#ifdef MAX_DMA32_PFN 782 unsigned long end_pfn; 783 784 if (WARN_ON_ONCE(slab_is_available())) 785 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 786 787 /* update goal according ...MAX_DMA32_PFN */ 788 end_pfn = pgdat_end_pfn(pgdat); 789 790 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && 791 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { 792 void *ptr; 793 unsigned long new_goal; 794 795 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 796 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, 797 new_goal, 0); 798 if (ptr) 799 return ptr; 800 } 801#endif 802 803 return __alloc_bootmem_node(pgdat, size, align, goal); 804 805} 806 807#ifndef ARCH_LOW_ADDRESS_LIMIT 808#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 809#endif 810 811/** 812 * __alloc_bootmem_low - allocate low boot memory 813 * @size: size of the request in bytes 814 * @align: alignment of the region 815 * @goal: preferred starting address of the region 816 * 817 * The goal is dropped if it can not be satisfied and the allocation will 818 * fall back to memory below @goal. 819 * 820 * Allocation may happen on any node in the system. 821 * 822 * The function panics if the request can not be satisfied. 823 */ 824void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 825 unsigned long goal) 826{ 827 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); 828} 829 830void * __init __alloc_bootmem_low_nopanic(unsigned long size, 831 unsigned long align, 832 unsigned long goal) 833{ 834 return ___alloc_bootmem_nopanic(size, align, goal, 835 ARCH_LOW_ADDRESS_LIMIT); 836} 837 838/** 839 * __alloc_bootmem_low_node - allocate low boot memory from a specific node 840 * @pgdat: node to allocate from 841 * @size: size of the request in bytes 842 * @align: alignment of the region 843 * @goal: preferred starting address of the region 844 * 845 * The goal is dropped if it can not be satisfied and the allocation will 846 * fall back to memory below @goal. 847 * 848 * Allocation may fall back to any node in the system if the specified node 849 * can not hold the requested memory. 850 * 851 * The function panics if the request can not be satisfied. 852 */ 853void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 854 unsigned long align, unsigned long goal) 855{ 856 if (WARN_ON_ONCE(slab_is_available())) 857 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 858 859 return ___alloc_bootmem_node(pgdat, size, align, 860 goal, ARCH_LOW_ADDRESS_LIMIT); 861}