Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] reorganize x86-64 NUMA and DISCONTIGMEM config options

In order to use the alternative sparsemem implmentation for NUMA kernels,
we need to reorganize the config options. This patch effectively abstracts
out the CONFIG_DISCONTIGMEM options to CONFIG_NUMA in most cases. Thus,
the discontigmem implementation may be employed as always, but the
sparsemem implementation may be used alternatively.

Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Matt Tolentino and committed by
Linus Torvalds
2b97690f 1035faf1

+25 -24
+1 -1
arch/x86_64/kernel/head64.c
··· 94 94 s = strstr(saved_command_line, "earlyprintk="); 95 95 if (s != NULL) 96 96 setup_early_printk(s); 97 - #ifdef CONFIG_DISCONTIGMEM 97 + #ifdef CONFIG_NUMA 98 98 s = strstr(saved_command_line, "numa="); 99 99 if (s != NULL) 100 100 numa_setup(s+5);
+3 -3
arch/x86_64/kernel/setup.c
··· 348 348 if (!memcmp(from, "mem=", 4)) 349 349 parse_memopt(from+4, &from); 350 350 351 - #ifdef CONFIG_DISCONTIGMEM 351 + #ifdef CONFIG_NUMA 352 352 if (!memcmp(from, "numa=", 5)) 353 353 numa_setup(from+5); 354 354 #endif ··· 377 377 *cmdline_p = command_line; 378 378 } 379 379 380 - #ifndef CONFIG_DISCONTIGMEM 380 + #ifndef CONFIG_NUMA 381 381 static void __init contig_initmem_init(void) 382 382 { 383 383 unsigned long bootmap_size, bootmap; ··· 554 554 acpi_numa_init(); 555 555 #endif 556 556 557 - #ifdef CONFIG_DISCONTIGMEM 557 + #ifdef CONFIG_NUMA 558 558 numa_initmem_init(0, end_pfn); 559 559 #else 560 560 contig_initmem_init();
+1 -1
arch/x86_64/mm/Makefile
··· 4 4 5 5 obj-y := init.o fault.o ioremap.o extable.o pageattr.o 6 6 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7 - obj-$(CONFIG_DISCONTIGMEM) += numa.o 7 + obj-$(CONFIG_NUMA) += numa.o 8 8 obj-$(CONFIG_K8_NUMA) += k8topology.o 9 9 obj-$(CONFIG_ACPI_NUMA) += srat.o 10 10
+6 -3
arch/x86_64/mm/init.c
··· 318 318 flush_tlb_all(); 319 319 } 320 320 321 - #ifndef CONFIG_DISCONTIGMEM 321 + #ifndef CONFIG_NUMA 322 322 void __init paging_init(void) 323 323 { 324 324 { ··· 427 427 reservedpages = 0; 428 428 429 429 /* this will put all low memory onto the freelists */ 430 - #ifdef CONFIG_DISCONTIGMEM 430 + #ifdef CONFIG_NUMA 431 431 totalram_pages += numa_free_all_bootmem(); 432 432 tmp = 0; 433 433 /* should count reserved pages here for all nodes */ 434 434 #else 435 + 436 + #ifdef CONFIG_FLATMEM 435 437 max_mapnr = end_pfn; 436 438 if (!mem_map) BUG(); 439 + #endif 437 440 438 441 totalram_pages += free_all_bootmem(); 439 442 ··· 518 515 void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 519 516 { 520 517 /* Should check here against the e820 map to avoid double free */ 521 - #ifdef CONFIG_DISCONTIGMEM 518 + #ifdef CONFIG_NUMA 522 519 int nid = phys_to_nid(phys); 523 520 reserve_bootmem_node(NODE_DATA(nid), phys, len); 524 521 #else
+1 -1
arch/x86_64/mm/ioremap.c
··· 178 178 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) 179 179 return (__force void __iomem *)phys_to_virt(phys_addr); 180 180 181 - #ifndef CONFIG_DISCONTIGMEM 181 + #ifdef CONFIG_FLATMEM 182 182 /* 183 183 * Don't allow anybody to remap normal RAM that we're using.. 184 184 */
-5
include/asm-x86_64/io.h
··· 124 124 /* 125 125 * Change "struct page" to physical address. 126 126 */ 127 - #ifdef CONFIG_DISCONTIGMEM 128 - #include <asm/mmzone.h> 129 127 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 130 - #else 131 - #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) 132 - #endif 133 128 134 129 #include <asm-generic/iomap.h> 135 130
+9 -6
include/asm-x86_64/mmzone.h
··· 6 6 7 7 #include <linux/config.h> 8 8 9 - #ifdef CONFIG_DISCONTIGMEM 9 + #ifdef CONFIG_NUMA 10 10 11 11 #define VIRTUAL_BUG_ON(x) 12 12 ··· 30 30 return nid; 31 31 } 32 32 33 - #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 34 - 35 - #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 36 33 #define NODE_DATA(nid) (node_data[nid]) 37 34 38 35 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 39 36 #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ 40 37 NODE_DATA(nid)->node_spanned_pages) 41 38 42 - #define local_mapnr(kvaddr) \ 43 - ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) 39 + #ifdef CONFIG_DISCONTIGMEM 40 + 41 + #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 42 + #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 44 43 45 44 /* AK: this currently doesn't deal with invalid addresses. We'll see 46 45 if the 2.5 kernel doesn't pass them ··· 55 56 #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ 56 57 ({ u8 nid__ = pfn_to_nid(pfn); \ 57 58 nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); })) 59 + #endif 60 + 61 + #define local_mapnr(kvaddr) \ 62 + ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) 58 63 #endif 59 64 #endif
+3 -1
include/asm-x86_64/page.h
··· 119 119 __pa(v); }) 120 120 121 121 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 122 - #ifndef CONFIG_DISCONTIGMEM 122 + #define __boot_va(x) __va(x) 123 + #define __boot_pa(x) __pa(x) 124 + #ifdef CONFIG_FLATMEM 123 125 #define pfn_to_page(pfn) (mem_map + (pfn)) 124 126 #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) 125 127 #define pfn_valid(pfn) ((pfn) < max_mapnr)
+1 -3
include/asm-x86_64/topology.h
··· 3 3 4 4 #include <linux/config.h> 5 5 6 - #ifdef CONFIG_DISCONTIGMEM 6 + #ifdef CONFIG_NUMA 7 7 8 8 #include <asm/mpspec.h> 9 9 #include <asm/bitops.h> ··· 37 37 } 38 38 #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number) 39 39 40 - #ifdef CONFIG_NUMA 41 40 /* sched_domains SD_NODE_INIT for x86_64 machines */ 42 41 #define SD_NODE_INIT (struct sched_domain) { \ 43 42 .span = CPU_MASK_NONE, \ ··· 58 59 .balance_interval = 1, \ 59 60 .nr_balance_failed = 0, \ 60 61 } 61 - #endif 62 62 63 63 #endif 64 64