at v3.16 21 kB view raw
1/* 2 * bootmem - A boot-time physical memory allocator and configurator 3 * 4 * Copyright (C) 1999 Ingo Molnar 5 * 1999 Kanoj Sarcar, SGI 6 * 2008 Johannes Weiner 7 * 8 * Access to this subsystem has to be serialized externally (which is true 9 * for the boot process anyway). 10 */ 11#include <linux/init.h> 12#include <linux/pfn.h> 13#include <linux/slab.h> 14#include <linux/bootmem.h> 15#include <linux/export.h> 16#include <linux/kmemleak.h> 17#include <linux/range.h> 18#include <linux/memblock.h> 19 20#include <asm/bug.h> 21#include <asm/io.h> 22#include <asm/processor.h> 23 24#include "internal.h" 25 26#ifndef CONFIG_NEED_MULTIPLE_NODES 27struct pglist_data __refdata contig_page_data = { 28 .bdata = &bootmem_node_data[0] 29}; 30EXPORT_SYMBOL(contig_page_data); 31#endif 32 33unsigned long max_low_pfn; 34unsigned long min_low_pfn; 35unsigned long max_pfn; 36 37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 38 39static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 40 41static int bootmem_debug; 42 43static int __init bootmem_debug_setup(char *buf) 44{ 45 bootmem_debug = 1; 46 return 0; 47} 48early_param("bootmem_debug", bootmem_debug_setup); 49 50#define bdebug(fmt, args...) ({ \ 51 if (unlikely(bootmem_debug)) \ 52 printk(KERN_INFO \ 53 "bootmem::%s " fmt, \ 54 __func__, ## args); \ 55}) 56 57static unsigned long __init bootmap_bytes(unsigned long pages) 58{ 59 unsigned long bytes = DIV_ROUND_UP(pages, 8); 60 61 return ALIGN(bytes, sizeof(long)); 62} 63 64/** 65 * bootmem_bootmap_pages - calculate bitmap size in pages 66 * @pages: number of pages the bitmap has to represent 67 */ 68unsigned long __init bootmem_bootmap_pages(unsigned long pages) 69{ 70 unsigned long bytes = bootmap_bytes(pages); 71 72 return PAGE_ALIGN(bytes) >> PAGE_SHIFT; 73} 74 75/* 76 * link bdata in order 77 */ 78static void __init link_bootmem(bootmem_data_t *bdata) 79{ 80 bootmem_data_t *ent; 81 82 list_for_each_entry(ent, &bdata_list, list) { 83 if (bdata->node_min_pfn < ent->node_min_pfn) { 84 list_add_tail(&bdata->list, &ent->list); 85 return; 86 } 87 } 88 89 list_add_tail(&bdata->list, &bdata_list); 90} 91 92/* 93 * Called once to set up the allocator itself. 94 */ 95static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, 96 unsigned long mapstart, unsigned long start, unsigned long end) 97{ 98 unsigned long mapsize; 99 100 mminit_validate_memmodel_limits(&start, &end); 101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 102 bdata->node_min_pfn = start; 103 bdata->node_low_pfn = end; 104 link_bootmem(bdata); 105 106 /* 107 * Initially all pages are reserved - setup_arch() has to 108 * register free RAM areas explicitly. 109 */ 110 mapsize = bootmap_bytes(end - start); 111 memset(bdata->node_bootmem_map, 0xff, mapsize); 112 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); 115 116 return mapsize; 117} 118 119/** 120 * init_bootmem_node - register a node as boot memory 121 * @pgdat: node to register 122 * @freepfn: pfn where the bitmap for this node is to be placed 123 * @startpfn: first pfn on the node 124 * @endpfn: first pfn after the node 125 * 126 * Returns the number of bytes needed to hold the bitmap for this node. 127 */ 128unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 129 unsigned long startpfn, unsigned long endpfn) 130{ 131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); 132} 133 134/** 135 * init_bootmem - register boot memory 136 * @start: pfn where the bitmap is to be placed 137 * @pages: number of available physical pages 138 * 139 * Returns the number of bytes needed to hold the bitmap. 140 */ 141unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 142{ 143 max_low_pfn = pages; 144 min_low_pfn = start; 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 146} 147 148/* 149 * free_bootmem_late - free bootmem pages directly to page allocator 150 * @addr: starting physical address of the range 151 * @size: size of the range in bytes 152 * 153 * This is only useful when the bootmem allocator has already been torn 154 * down, but we are still initializing the system. Pages are given directly 155 * to the page allocator, no bootmem metadata is updated because it is gone. 156 */ 157void __init free_bootmem_late(unsigned long physaddr, unsigned long size) 158{ 159 unsigned long cursor, end; 160 161 kmemleak_free_part(__va(physaddr), size); 162 163 cursor = PFN_UP(physaddr); 164 end = PFN_DOWN(physaddr + size); 165 166 for (; cursor < end; cursor++) { 167 __free_pages_bootmem(pfn_to_page(cursor), 0); 168 totalram_pages++; 169 } 170} 171 172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 173{ 174 struct page *page; 175 unsigned long *map, start, end, pages, count = 0; 176 177 if (!bdata->node_bootmem_map) 178 return 0; 179 180 map = bdata->node_bootmem_map; 181 start = bdata->node_min_pfn; 182 end = bdata->node_low_pfn; 183 184 bdebug("nid=%td start=%lx end=%lx\n", 185 bdata - bootmem_node_data, start, end); 186 187 while (start < end) { 188 unsigned long idx, vec; 189 unsigned shift; 190 191 idx = start - bdata->node_min_pfn; 192 shift = idx & (BITS_PER_LONG - 1); 193 /* 194 * vec holds at most BITS_PER_LONG map bits, 195 * bit 0 corresponds to start. 196 */ 197 vec = ~map[idx / BITS_PER_LONG]; 198 199 if (shift) { 200 vec >>= shift; 201 if (end - start >= BITS_PER_LONG) 202 vec |= ~map[idx / BITS_PER_LONG + 1] << 203 (BITS_PER_LONG - shift); 204 } 205 /* 206 * If we have a properly aligned and fully unreserved 207 * BITS_PER_LONG block of pages in front of us, free 208 * it in one go. 209 */ 210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { 211 int order = ilog2(BITS_PER_LONG); 212 213 __free_pages_bootmem(pfn_to_page(start), order); 214 count += BITS_PER_LONG; 215 start += BITS_PER_LONG; 216 } else { 217 unsigned long cur = start; 218 219 start = ALIGN(start + 1, BITS_PER_LONG); 220 while (vec && cur != start) { 221 if (vec & 1) { 222 page = pfn_to_page(cur); 223 __free_pages_bootmem(page, 0); 224 count++; 225 } 226 vec >>= 1; 227 ++cur; 228 } 229 } 230 } 231 232 page = virt_to_page(bdata->node_bootmem_map); 233 pages = bdata->node_low_pfn - bdata->node_min_pfn; 234 pages = bootmem_bootmap_pages(pages); 235 count += pages; 236 while (pages--) 237 __free_pages_bootmem(page++, 0); 238 239 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 240 241 return count; 242} 243 244static int reset_managed_pages_done __initdata; 245 246static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 247{ 248 struct zone *z; 249 250 if (reset_managed_pages_done) 251 return; 252 253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 254 z->managed_pages = 0; 255} 256 257void __init reset_all_zones_managed_pages(void) 258{ 259 struct pglist_data *pgdat; 260 261 for_each_online_pgdat(pgdat) 262 reset_node_managed_pages(pgdat); 263 reset_managed_pages_done = 1; 264} 265 266/** 267 * free_all_bootmem - release free pages to the buddy allocator 268 * 269 * Returns the number of pages actually released. 270 */ 271unsigned long __init free_all_bootmem(void) 272{ 273 unsigned long total_pages = 0; 274 bootmem_data_t *bdata; 275 276 reset_all_zones_managed_pages(); 277 278 list_for_each_entry(bdata, &bdata_list, list) 279 total_pages += free_all_bootmem_core(bdata); 280 281 totalram_pages += total_pages; 282 283 return total_pages; 284} 285 286static void __init __free(bootmem_data_t *bdata, 287 unsigned long sidx, unsigned long eidx) 288{ 289 unsigned long idx; 290 291 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data, 292 sidx + bdata->node_min_pfn, 293 eidx + bdata->node_min_pfn); 294 295 if (bdata->hint_idx > sidx) 296 bdata->hint_idx = sidx; 297 298 for (idx = sidx; idx < eidx; idx++) 299 if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) 300 BUG(); 301} 302 303static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, 304 unsigned long eidx, int flags) 305{ 306 unsigned long idx; 307 int exclusive = flags & BOOTMEM_EXCLUSIVE; 308 309 bdebug("nid=%td start=%lx end=%lx flags=%x\n", 310 bdata - bootmem_node_data, 311 sidx + bdata->node_min_pfn, 312 eidx + bdata->node_min_pfn, 313 flags); 314 315 for (idx = sidx; idx < eidx; idx++) 316 if (test_and_set_bit(idx, bdata->node_bootmem_map)) { 317 if (exclusive) { 318 __free(bdata, sidx, idx); 319 return -EBUSY; 320 } 321 bdebug("silent double reserve of PFN %lx\n", 322 idx + bdata->node_min_pfn); 323 } 324 return 0; 325} 326 327static int __init mark_bootmem_node(bootmem_data_t *bdata, 328 unsigned long start, unsigned long end, 329 int reserve, int flags) 330{ 331 unsigned long sidx, eidx; 332 333 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n", 334 bdata - bootmem_node_data, start, end, reserve, flags); 335 336 BUG_ON(start < bdata->node_min_pfn); 337 BUG_ON(end > bdata->node_low_pfn); 338 339 sidx = start - bdata->node_min_pfn; 340 eidx = end - bdata->node_min_pfn; 341 342 if (reserve) 343 return __reserve(bdata, sidx, eidx, flags); 344 else 345 __free(bdata, sidx, eidx); 346 return 0; 347} 348 349static int __init mark_bootmem(unsigned long start, unsigned long end, 350 int reserve, int flags) 351{ 352 unsigned long pos; 353 bootmem_data_t *bdata; 354 355 pos = start; 356 list_for_each_entry(bdata, &bdata_list, list) { 357 int err; 358 unsigned long max; 359 360 if (pos < bdata->node_min_pfn || 361 pos >= bdata->node_low_pfn) { 362 BUG_ON(pos != start); 363 continue; 364 } 365 366 max = min(bdata->node_low_pfn, end); 367 368 err = mark_bootmem_node(bdata, pos, max, reserve, flags); 369 if (reserve && err) { 370 mark_bootmem(start, pos, 0, 0); 371 return err; 372 } 373 374 if (max == end) 375 return 0; 376 pos = bdata->node_low_pfn; 377 } 378 BUG(); 379} 380 381/** 382 * free_bootmem_node - mark a page range as usable 383 * @pgdat: node the range resides on 384 * @physaddr: starting address of the range 385 * @size: size of the range in bytes 386 * 387 * Partial pages will be considered reserved and left as they are. 388 * 389 * The range must reside completely on the specified node. 390 */ 391void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 392 unsigned long size) 393{ 394 unsigned long start, end; 395 396 kmemleak_free_part(__va(physaddr), size); 397 398 start = PFN_UP(physaddr); 399 end = PFN_DOWN(physaddr + size); 400 401 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 402} 403 404/** 405 * free_bootmem - mark a page range as usable 406 * @addr: starting physical address of the range 407 * @size: size of the range in bytes 408 * 409 * Partial pages will be considered reserved and left as they are. 410 * 411 * The range must be contiguous but may span node boundaries. 412 */ 413void __init free_bootmem(unsigned long physaddr, unsigned long size) 414{ 415 unsigned long start, end; 416 417 kmemleak_free_part(__va(physaddr), size); 418 419 start = PFN_UP(physaddr); 420 end = PFN_DOWN(physaddr + size); 421 422 mark_bootmem(start, end, 0, 0); 423} 424 425/** 426 * reserve_bootmem_node - mark a page range as reserved 427 * @pgdat: node the range resides on 428 * @physaddr: starting address of the range 429 * @size: size of the range in bytes 430 * @flags: reservation flags (see linux/bootmem.h) 431 * 432 * Partial pages will be reserved. 433 * 434 * The range must reside completely on the specified node. 435 */ 436int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 437 unsigned long size, int flags) 438{ 439 unsigned long start, end; 440 441 start = PFN_DOWN(physaddr); 442 end = PFN_UP(physaddr + size); 443 444 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 445} 446 447/** 448 * reserve_bootmem - mark a page range as reserved 449 * @addr: starting address of the range 450 * @size: size of the range in bytes 451 * @flags: reservation flags (see linux/bootmem.h) 452 * 453 * Partial pages will be reserved. 454 * 455 * The range must be contiguous but may span node boundaries. 456 */ 457int __init reserve_bootmem(unsigned long addr, unsigned long size, 458 int flags) 459{ 460 unsigned long start, end; 461 462 start = PFN_DOWN(addr); 463 end = PFN_UP(addr + size); 464 465 return mark_bootmem(start, end, 1, flags); 466} 467 468static unsigned long __init align_idx(struct bootmem_data *bdata, 469 unsigned long idx, unsigned long step) 470{ 471 unsigned long base = bdata->node_min_pfn; 472 473 /* 474 * Align the index with respect to the node start so that the 475 * combination of both satisfies the requested alignment. 476 */ 477 478 return ALIGN(base + idx, step) - base; 479} 480 481static unsigned long __init align_off(struct bootmem_data *bdata, 482 unsigned long off, unsigned long align) 483{ 484 unsigned long base = PFN_PHYS(bdata->node_min_pfn); 485 486 /* Same as align_idx for byte offsets */ 487 488 return ALIGN(base + off, align) - base; 489} 490 491static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata, 492 unsigned long size, unsigned long align, 493 unsigned long goal, unsigned long limit) 494{ 495 unsigned long fallback = 0; 496 unsigned long min, max, start, sidx, midx, step; 497 498 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", 499 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, 500 align, goal, limit); 501 502 BUG_ON(!size); 503 BUG_ON(align & (align - 1)); 504 BUG_ON(limit && goal + size > limit); 505 506 if (!bdata->node_bootmem_map) 507 return NULL; 508 509 min = bdata->node_min_pfn; 510 max = bdata->node_low_pfn; 511 512 goal >>= PAGE_SHIFT; 513 limit >>= PAGE_SHIFT; 514 515 if (limit && max > limit) 516 max = limit; 517 if (max <= min) 518 return NULL; 519 520 step = max(align >> PAGE_SHIFT, 1UL); 521 522 if (goal && min < goal && goal < max) 523 start = ALIGN(goal, step); 524 else 525 start = ALIGN(min, step); 526 527 sidx = start - bdata->node_min_pfn; 528 midx = max - bdata->node_min_pfn; 529 530 if (bdata->hint_idx > sidx) { 531 /* 532 * Handle the valid case of sidx being zero and still 533 * catch the fallback below. 534 */ 535 fallback = sidx + 1; 536 sidx = align_idx(bdata, bdata->hint_idx, step); 537 } 538 539 while (1) { 540 int merge; 541 void *region; 542 unsigned long eidx, i, start_off, end_off; 543find_block: 544 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); 545 sidx = align_idx(bdata, sidx, step); 546 eidx = sidx + PFN_UP(size); 547 548 if (sidx >= midx || eidx > midx) 549 break; 550 551 for (i = sidx; i < eidx; i++) 552 if (test_bit(i, bdata->node_bootmem_map)) { 553 sidx = align_idx(bdata, i, step); 554 if (sidx == i) 555 sidx += step; 556 goto find_block; 557 } 558 559 if (bdata->last_end_off & (PAGE_SIZE - 1) && 560 PFN_DOWN(bdata->last_end_off) + 1 == sidx) 561 start_off = align_off(bdata, bdata->last_end_off, align); 562 else 563 start_off = PFN_PHYS(sidx); 564 565 merge = PFN_DOWN(start_off) < sidx; 566 end_off = start_off + size; 567 568 bdata->last_end_off = end_off; 569 bdata->hint_idx = PFN_UP(end_off); 570 571 /* 572 * Reserve the area now: 573 */ 574 if (__reserve(bdata, PFN_DOWN(start_off) + merge, 575 PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) 576 BUG(); 577 578 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 579 start_off); 580 memset(region, 0, size); 581 /* 582 * The min_count is set to 0 so that bootmem allocated blocks 583 * are never reported as leaks. 584 */ 585 kmemleak_alloc(region, size, 0, 0); 586 return region; 587 } 588 589 if (fallback) { 590 sidx = align_idx(bdata, fallback - 1, step); 591 fallback = 0; 592 goto find_block; 593 } 594 595 return NULL; 596} 597 598static void * __init alloc_bootmem_core(unsigned long size, 599 unsigned long align, 600 unsigned long goal, 601 unsigned long limit) 602{ 603 bootmem_data_t *bdata; 604 void *region; 605 606 if (WARN_ON_ONCE(slab_is_available())) 607 return kzalloc(size, GFP_NOWAIT); 608 609 list_for_each_entry(bdata, &bdata_list, list) { 610 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 611 continue; 612 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 613 break; 614 615 region = alloc_bootmem_bdata(bdata, size, align, goal, limit); 616 if (region) 617 return region; 618 } 619 620 return NULL; 621} 622 623static void * __init ___alloc_bootmem_nopanic(unsigned long size, 624 unsigned long align, 625 unsigned long goal, 626 unsigned long limit) 627{ 628 void *ptr; 629 630restart: 631 ptr = alloc_bootmem_core(size, align, goal, limit); 632 if (ptr) 633 return ptr; 634 if (goal) { 635 goal = 0; 636 goto restart; 637 } 638 639 return NULL; 640} 641 642/** 643 * __alloc_bootmem_nopanic - allocate boot memory without panicking 644 * @size: size of the request in bytes 645 * @align: alignment of the region 646 * @goal: preferred starting address of the region 647 * 648 * The goal is dropped if it can not be satisfied and the allocation will 649 * fall back to memory below @goal. 650 * 651 * Allocation may happen on any node in the system. 652 * 653 * Returns NULL on failure. 654 */ 655void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 656 unsigned long goal) 657{ 658 unsigned long limit = 0; 659 660 return ___alloc_bootmem_nopanic(size, align, goal, limit); 661} 662 663static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, 664 unsigned long goal, unsigned long limit) 665{ 666 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); 667 668 if (mem) 669 return mem; 670 /* 671 * Whoops, we cannot satisfy the allocation request. 672 */ 673 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 674 panic("Out of memory"); 675 return NULL; 676} 677 678/** 679 * __alloc_bootmem - allocate boot memory 680 * @size: size of the request in bytes 681 * @align: alignment of the region 682 * @goal: preferred starting address of the region 683 * 684 * The goal is dropped if it can not be satisfied and the allocation will 685 * fall back to memory below @goal. 686 * 687 * Allocation may happen on any node in the system. 688 * 689 * The function panics if the request can not be satisfied. 690 */ 691void * __init __alloc_bootmem(unsigned long size, unsigned long align, 692 unsigned long goal) 693{ 694 unsigned long limit = 0; 695 696 return ___alloc_bootmem(size, align, goal, limit); 697} 698 699void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, 700 unsigned long size, unsigned long align, 701 unsigned long goal, unsigned long limit) 702{ 703 void *ptr; 704 705 if (WARN_ON_ONCE(slab_is_available())) 706 return kzalloc(size, GFP_NOWAIT); 707again: 708 709 /* do not panic in alloc_bootmem_bdata() */ 710 if (limit && goal + size > limit) 711 limit = 0; 712 713 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit); 714 if (ptr) 715 return ptr; 716 717 ptr = alloc_bootmem_core(size, align, goal, limit); 718 if (ptr) 719 return ptr; 720 721 if (goal) { 722 goal = 0; 723 goto again; 724 } 725 726 return NULL; 727} 728 729void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, 730 unsigned long align, unsigned long goal) 731{ 732 if (WARN_ON_ONCE(slab_is_available())) 733 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 734 735 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); 736} 737 738void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 739 unsigned long align, unsigned long goal, 740 unsigned long limit) 741{ 742 void *ptr; 743 744 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); 745 if (ptr) 746 return ptr; 747 748 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 749 panic("Out of memory"); 750 return NULL; 751} 752 753/** 754 * __alloc_bootmem_node - allocate boot memory from a specific node 755 * @pgdat: node to allocate from 756 * @size: size of the request in bytes 757 * @align: alignment of the region 758 * @goal: preferred starting address of the region 759 * 760 * The goal is dropped if it can not be satisfied and the allocation will 761 * fall back to memory below @goal. 762 * 763 * Allocation may fall back to any node in the system if the specified node 764 * can not hold the requested memory. 765 * 766 * The function panics if the request can not be satisfied. 767 */ 768void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 769 unsigned long align, unsigned long goal) 770{ 771 if (WARN_ON_ONCE(slab_is_available())) 772 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 773 774 return ___alloc_bootmem_node(pgdat, size, align, goal, 0); 775} 776 777void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 778 unsigned long align, unsigned long goal) 779{ 780#ifdef MAX_DMA32_PFN 781 unsigned long end_pfn; 782 783 if (WARN_ON_ONCE(slab_is_available())) 784 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 785 786 /* update goal according ...MAX_DMA32_PFN */ 787 end_pfn = pgdat_end_pfn(pgdat); 788 789 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && 790 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { 791 void *ptr; 792 unsigned long new_goal; 793 794 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 795 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, 796 new_goal, 0); 797 if (ptr) 798 return ptr; 799 } 800#endif 801 802 return __alloc_bootmem_node(pgdat, size, align, goal); 803 804} 805 806#ifndef ARCH_LOW_ADDRESS_LIMIT 807#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 808#endif 809 810/** 811 * __alloc_bootmem_low - allocate low boot memory 812 * @size: size of the request in bytes 813 * @align: alignment of the region 814 * @goal: preferred starting address of the region 815 * 816 * The goal is dropped if it can not be satisfied and the allocation will 817 * fall back to memory below @goal. 818 * 819 * Allocation may happen on any node in the system. 820 * 821 * The function panics if the request can not be satisfied. 822 */ 823void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 824 unsigned long goal) 825{ 826 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); 827} 828 829void * __init __alloc_bootmem_low_nopanic(unsigned long size, 830 unsigned long align, 831 unsigned long goal) 832{ 833 return ___alloc_bootmem_nopanic(size, align, goal, 834 ARCH_LOW_ADDRESS_LIMIT); 835} 836 837/** 838 * __alloc_bootmem_low_node - allocate low boot memory from a specific node 839 * @pgdat: node to allocate from 840 * @size: size of the request in bytes 841 * @align: alignment of the region 842 * @goal: preferred starting address of the region 843 * 844 * The goal is dropped if it can not be satisfied and the allocation will 845 * fall back to memory below @goal. 846 * 847 * Allocation may fall back to any node in the system if the specified node 848 * can not hold the requested memory. 849 * 850 * The function panics if the request can not be satisfied. 851 */ 852void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 853 unsigned long align, unsigned long goal) 854{ 855 if (WARN_ON_ONCE(slab_is_available())) 856 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 857 858 return ___alloc_bootmem_node(pgdat, size, align, 859 goal, ARCH_LOW_ADDRESS_LIMIT); 860}