Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-mm' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into x86/mm

+81 -103
-2
arch/x86/include/asm/page_types.h
··· 54 54 extern unsigned long init_memory_mapping(unsigned long start, 55 55 unsigned long end); 56 56 57 - void init_memory_mapping_high(void); 58 - 59 57 extern void initmem_init(void); 60 58 extern void free_initmem(void); 61 59
+8
arch/x86/kernel/setup.c
··· 963 963 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); 964 964 max_pfn_mapped = max_low_pfn_mapped; 965 965 966 + #ifdef CONFIG_X86_64 967 + if (max_pfn > max_low_pfn) { 968 + max_pfn_mapped = init_memory_mapping(1UL<<32, 969 + max_pfn<<PAGE_SHIFT); 970 + /* can we preseve max_low_pfn ?*/ 971 + max_low_pfn = max_pfn; 972 + } 973 + #endif 966 974 memblock.current_limit = get_max_mapped(); 967 975 968 976 /*
-54
arch/x86/mm/init_64.c
··· 606 606 void __init initmem_init(void) 607 607 { 608 608 memblock_x86_register_active_regions(0, 0, max_pfn); 609 - init_memory_mapping_high(); 610 609 } 611 610 #endif 612 - 613 - struct mapping_work_data { 614 - unsigned long start; 615 - unsigned long end; 616 - unsigned long pfn_mapped; 617 - }; 618 - 619 - static int __init_refok 620 - mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax) 621 - { 622 - struct mapping_work_data *data = datax; 623 - unsigned long pfn_mapped; 624 - unsigned long final_start, final_end; 625 - 626 - final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start); 627 - final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end); 628 - 629 - if (final_end <= final_start) 630 - return 0; 631 - 632 - pfn_mapped = init_memory_mapping(final_start, final_end); 633 - 634 - if (pfn_mapped > data->pfn_mapped) 635 - data->pfn_mapped = pfn_mapped; 636 - 637 - return 0; 638 - } 639 - 640 - static unsigned long __init_refok 641 - init_memory_mapping_active_regions(unsigned long start, unsigned long end) 642 - { 643 - struct mapping_work_data data; 644 - 645 - data.start = start; 646 - data.end = end; 647 - data.pfn_mapped = 0; 648 - 649 - work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data); 650 - 651 - return data.pfn_mapped; 652 - } 653 - 654 - void __init_refok init_memory_mapping_high(void) 655 - { 656 - if (max_pfn > max_low_pfn) { 657 - max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32, 658 - max_pfn<<PAGE_SHIFT); 659 - /* can we preserve max_low_pfn ? */ 660 - max_low_pfn = max_pfn; 661 - 662 - memblock.current_limit = get_max_mapped(); 663 - } 664 - } 665 611 666 612 void __init paging_init(void) 667 613 {
+57 -43
arch/x86/mm/numa_64.c
··· 543 543 if (!numa_meminfo_cover_memory(mi)) 544 544 return -EINVAL; 545 545 546 - init_memory_mapping_high(); 547 - 548 546 /* Finally register nodes. */ 549 547 for_each_node_mask(nid, node_possible_map) { 550 548 u64 start = (u64)max_pfn << PAGE_SHIFT; ··· 562 564 return 0; 563 565 } 564 566 567 + /** 568 + * dummy_numma_init - Fallback dummy NUMA init 569 + * 570 + * Used if there's no underlying NUMA architecture, NUMA initialization 571 + * fails, or NUMA is disabled on the command line. 572 + * 573 + * Must online at least one node and add memory blocks that cover all 574 + * allowed memory. This function must not fail. 575 + */ 565 576 static int __init dummy_numa_init(void) 566 577 { 567 578 printk(KERN_INFO "%s\n", ··· 584 577 return 0; 585 578 } 586 579 580 + static int __init numa_init(int (*init_func)(void)) 581 + { 582 + int i; 583 + int ret; 584 + 585 + for (i = 0; i < MAX_LOCAL_APIC; i++) 586 + set_apicid_to_node(i, NUMA_NO_NODE); 587 + 588 + nodes_clear(numa_nodes_parsed); 589 + nodes_clear(node_possible_map); 590 + nodes_clear(node_online_map); 591 + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 592 + remove_all_active_ranges(); 593 + numa_reset_distance(); 594 + 595 + ret = init_func(); 596 + if (ret < 0) 597 + return ret; 598 + ret = numa_cleanup_meminfo(&numa_meminfo); 599 + if (ret < 0) 600 + return ret; 601 + 602 + numa_emulation(&numa_meminfo, numa_distance_cnt); 603 + 604 + ret = numa_register_memblks(&numa_meminfo); 605 + if (ret < 0) 606 + return ret; 607 + 608 + for (i = 0; i < nr_cpu_ids; i++) { 609 + int nid = early_cpu_to_node(i); 610 + 611 + if (nid == NUMA_NO_NODE) 612 + continue; 613 + if (!node_online(nid)) 614 + numa_clear_node(i); 615 + } 616 + numa_init_array(); 617 + return 0; 618 + } 619 + 587 620 void __init initmem_init(void) 588 621 { 589 - int (*numa_init[])(void) = { [2] = dummy_numa_init }; 590 - int i, j; 622 + int ret; 591 623 592 624 if (!numa_off) { 593 625 #ifdef CONFIG_ACPI_NUMA 594 - numa_init[0] = x86_acpi_numa_init; 626 + ret = numa_init(x86_acpi_numa_init); 627 + if (!ret) 628 + return; 595 629 #endif 596 630 #ifdef CONFIG_AMD_NUMA 597 - numa_init[1] = amd_numa_init; 631 + ret = numa_init(amd_numa_init); 632 + if (!ret) 633 + return; 598 634 #endif 599 635 } 600 636 601 - for (i = 0; i < ARRAY_SIZE(numa_init); i++) { 602 - if (!numa_init[i]) 603 - continue; 604 - 605 - for (j = 0; j < MAX_LOCAL_APIC; j++) 606 - set_apicid_to_node(j, NUMA_NO_NODE); 607 - 608 - nodes_clear(numa_nodes_parsed); 609 - nodes_clear(node_possible_map); 610 - nodes_clear(node_online_map); 611 - memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 612 - remove_all_active_ranges(); 613 - numa_reset_distance(); 614 - 615 - if (numa_init[i]() < 0) 616 - continue; 617 - 618 - if (numa_cleanup_meminfo(&numa_meminfo) < 0) 619 - continue; 620 - 621 - numa_emulation(&numa_meminfo, numa_distance_cnt); 622 - 623 - if (numa_register_memblks(&numa_meminfo) < 0) 624 - continue; 625 - 626 - for (j = 0; j < nr_cpu_ids; j++) { 627 - int nid = early_cpu_to_node(j); 628 - 629 - if (nid == NUMA_NO_NODE) 630 - continue; 631 - if (!node_online(nid)) 632 - numa_clear_node(j); 633 - } 634 - numa_init_array(); 635 - return; 636 - } 637 - BUG(); 637 + numa_init(dummy_numa_init); 638 638 } 639 639 640 640 unsigned long __init numa_free_all_bootmem(void)
+16 -4
arch/x86/mm/numa_emulation.c
··· 301 301 const u64 max_addr = max_pfn << PAGE_SHIFT; 302 302 u8 *phys_dist = NULL; 303 303 size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); 304 + int dfl_phys_nid; 304 305 int i, j, ret; 305 306 306 307 if (!emu_cmdline) ··· 358 357 node_distance(i, j); 359 358 } 360 359 360 + /* determine the default phys nid to use for unmapped nodes */ 361 + dfl_phys_nid = NUMA_NO_NODE; 362 + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { 363 + if (emu_nid_to_phys[i] != NUMA_NO_NODE) { 364 + dfl_phys_nid = emu_nid_to_phys[i]; 365 + break; 366 + } 367 + } 368 + if (dfl_phys_nid == NUMA_NO_NODE) { 369 + pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n"); 370 + goto no_emu; 371 + } 372 + 361 373 /* commit */ 362 374 *numa_meminfo = ei; 363 375 ··· 391 377 /* make sure all emulated nodes are mapped to a physical node */ 392 378 for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) 393 379 if (emu_nid_to_phys[i] == NUMA_NO_NODE) 394 - emu_nid_to_phys[i] = 0; 380 + emu_nid_to_phys[i] = dfl_phys_nid; 395 381 396 382 /* 397 383 * Transform distance table. numa_set_distance() ignores all ··· 431 417 { 432 418 int physnid, nid; 433 419 434 - nid = numa_cpu_node(cpu); 435 - if (nid == NUMA_NO_NODE) 436 - nid = early_cpu_to_node(cpu); 420 + nid = early_cpu_to_node(cpu); 437 421 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); 438 422 439 423 physnid = emu_nid_to_phys[nid];