Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
x86: avoid section mismatch involving arch_register_cpu
x86: fixes for lookup_address args
x86: fix sparse warnings in cpu/common.c
x86: make early_console static in early_printk.c
x86: remove unneeded round_up
x86: fix section mismatch warning in kernel/pci-calgary
x86: fix section mismatch warning in acpi/boot.c
x86: fix section mismatch warnings when referencing notifiers
x86: silence section mismatch warning in smpboot_64.c
x86: fix comments in vmlinux_64.lds
x86_64: make bootmap_start page align v6
x86_64: add debug name for early_res

+79 -65
+1 -1
arch/x86/boot/compressed/vmlinux_64.lds
··· 3 3 ENTRY(startup_64) 4 4 SECTIONS 5 5 { 6 - /* Be careful parts of head_64.S assume startup_64 is at 6 + /* Be careful parts of head_64.S assume startup_32 is at 7 7 * address 0. 8 8 */ 9 9 . = 0;
+7 -1
arch/x86/kernel/acpi/boot.c
··· 496 496 * ACPI based hotplug support for CPU 497 497 */ 498 498 #ifdef CONFIG_ACPI_HOTPLUG_CPU 499 - int acpi_map_lsapic(acpi_handle handle, int *pcpu) 499 + 500 + static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 500 501 { 501 502 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 502 503 union acpi_object *obj; ··· 552 551 return 0; 553 552 } 554 553 554 + /* wrapper to silence section mismatch warning */ 555 + int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) 556 + { 557 + return _acpi_map_lsapic(handle, pcpu); 558 + } 555 559 EXPORT_SYMBOL(acpi_map_lsapic); 556 560 557 561 int acpi_unmap_lsapic(int cpu)
+10 -10
arch/x86/kernel/cpu/common.c
··· 258 258 void __init cpu_detect(struct cpuinfo_x86 *c) 259 259 { 260 260 /* Get vendor name */ 261 - cpuid(0x00000000, &c->cpuid_level, 262 - (int *)&c->x86_vendor_id[0], 263 - (int *)&c->x86_vendor_id[8], 264 - (int *)&c->x86_vendor_id[4]); 261 + cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 262 + (unsigned int *)&c->x86_vendor_id[0], 263 + (unsigned int *)&c->x86_vendor_id[8], 264 + (unsigned int *)&c->x86_vendor_id[4]); 265 265 266 266 c->x86 = 4; 267 267 if (c->cpuid_level >= 0x00000001) { ··· 283 283 static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) 284 284 { 285 285 u32 tfms, xlvl; 286 - int ebx; 286 + unsigned int ebx; 287 287 288 288 memset(&c->x86_capability, 0, sizeof c->x86_capability); 289 289 if (have_cpuid_p()) { ··· 343 343 static void __cpuinit generic_identify(struct cpuinfo_x86 * c) 344 344 { 345 345 u32 tfms, xlvl; 346 - int ebx; 346 + unsigned int ebx; 347 347 348 348 if (have_cpuid_p()) { 349 349 /* Get vendor name */ 350 - cpuid(0x00000000, &c->cpuid_level, 351 - (int *)&c->x86_vendor_id[0], 352 - (int *)&c->x86_vendor_id[8], 353 - (int *)&c->x86_vendor_id[4]); 350 + cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 351 + (unsigned int *)&c->x86_vendor_id[0], 352 + (unsigned int *)&c->x86_vendor_id[8], 353 + (unsigned int *)&c->x86_vendor_id[4]); 354 354 355 355 get_cpu_vendor(c, 0); 356 356 /* Initialize the standard set of capabilities */
+1 -1
arch/x86/kernel/cpuid.c
··· 170 170 return err ? NOTIFY_BAD : NOTIFY_OK; 171 171 } 172 172 173 - static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = 173 + static struct notifier_block __refdata cpuid_class_cpu_notifier = 174 174 { 175 175 .notifier_call = cpuid_class_cpu_callback, 176 176 };
+15 -8
arch/x86/kernel/e820_64.c
··· 54 54 55 55 struct early_res { 56 56 unsigned long start, end; 57 + char name[16]; 57 58 }; 58 59 static struct early_res early_res[MAX_EARLY_RES] __initdata = { 59 - { 0, PAGE_SIZE }, /* BIOS data page */ 60 + { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ 60 61 #ifdef CONFIG_SMP 61 - { SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE }, 62 + { SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE, "SMP_TRAMPOLINE" }, 62 63 #endif 63 64 {} 64 65 }; 65 66 66 - void __init reserve_early(unsigned long start, unsigned long end) 67 + void __init reserve_early(unsigned long start, unsigned long end, char *name) 67 68 { 68 69 int i; 69 70 struct early_res *r; 70 71 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 71 72 r = &early_res[i]; 72 73 if (end > r->start && start < r->end) 73 - panic("Overlapping early reservations %lx-%lx to %lx-%lx\n", 74 - start, end, r->start, r->end); 74 + panic("Overlapping early reservations %lx-%lx %s to %lx-%lx %s\n", 75 + start, end - 1, name?name:"", r->start, r->end - 1, r->name); 75 76 } 76 77 if (i >= MAX_EARLY_RES) 77 78 panic("Too many early reservations"); 78 79 r = &early_res[i]; 79 80 r->start = start; 80 81 r->end = end; 82 + if (name) 83 + strncpy(r->name, name, sizeof(r->name) - 1); 81 84 } 82 85 83 86 void __init early_res_to_bootmem(void) ··· 88 85 int i; 89 86 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 90 87 struct early_res *r = &early_res[i]; 88 + printk(KERN_INFO "early res: %d [%lx-%lx] %s\n", i, 89 + r->start, r->end - 1, r->name); 91 90 reserve_bootmem_generic(r->start, r->end - r->start); 92 91 } 93 92 } ··· 171 166 } 172 167 173 168 /* 174 - * Find a free area in a specific range. 169 + * Find a free area with specified alignment in a specific range. 175 170 */ 176 171 unsigned long __init find_e820_area(unsigned long start, unsigned long end, 177 - unsigned size) 172 + unsigned size, unsigned long align) 178 173 { 179 174 int i; 175 + unsigned long mask = ~(align - 1); 180 176 181 177 for (i = 0; i < e820.nr_map; i++) { 182 178 struct e820entry *ei = &e820.map[i]; ··· 191 185 continue; 192 186 while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) 193 187 ; 194 - last = PAGE_ALIGN(addr) + size; 188 + addr = (addr + align - 1) & mask; 189 + last = addr + size; 195 190 if (last > ei->addr + ei->size) 196 191 continue; 197 192 if (last > end)
+1 -1
arch/x86/kernel/early_printk.c
··· 193 193 }; 194 194 195 195 /* Direct interface for emergencies */ 196 - struct console *early_console = &early_vga_console; 196 + static struct console *early_console = &early_vga_console; 197 197 static int early_console_initialized = 0; 198 198 199 199 void early_printk(const char *fmt, ...)
+1 -1
arch/x86/kernel/efi_64.c
··· 44 44 int executable) 45 45 { 46 46 pte_t *kpte; 47 - int level; 47 + unsigned int level; 48 48 49 49 while (start < end) { 50 50 kpte = lookup_address((unsigned long)__va(start), &level);
+3 -3
arch/x86/kernel/head64.c
··· 75 75 if (ebda_size > 64*1024) 76 76 ebda_size = 64*1024; 77 77 78 - reserve_early(ebda_addr, ebda_addr + ebda_size); 78 + reserve_early(ebda_addr, ebda_addr + ebda_size, "EBDA"); 79 79 } 80 80 81 81 void __init x86_64_start_kernel(char * real_mode_data) ··· 105 105 pda_init(0); 106 106 copy_bootdata(__va(real_mode_data)); 107 107 108 - reserve_early(__pa_symbol(&_text), __pa_symbol(&_end)); 108 + reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); 109 109 110 110 /* Reserve INITRD */ 111 111 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 112 112 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 113 113 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 114 114 unsigned long ramdisk_end = ramdisk_image + ramdisk_size; 115 - reserve_early(ramdisk_image, ramdisk_end); 115 + reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 116 116 } 117 117 118 118 reserve_ebda();
+1 -1
arch/x86/kernel/microcode.c
··· 797 797 return NOTIFY_OK; 798 798 } 799 799 800 - static struct notifier_block __cpuinitdata mc_cpu_notifier = { 800 + static struct notifier_block __refdata mc_cpu_notifier = { 801 801 .notifier_call = mc_cpu_callback, 802 802 }; 803 803
+1 -1
arch/x86/kernel/msr.c
··· 168 168 return err ? NOTIFY_BAD : NOTIFY_OK; 169 169 } 170 170 171 - static struct notifier_block __cpuinitdata msr_class_cpu_notifier = { 171 + static struct notifier_block __refdata msr_class_cpu_notifier = { 172 172 .notifier_call = msr_class_cpu_callback, 173 173 }; 174 174
+2 -2
arch/x86/kernel/pci-calgary_64.c
··· 1006 1006 readq(target); /* flush */ 1007 1007 } 1008 1008 1009 - static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev) 1009 + static void __init calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev) 1010 1010 { 1011 1011 unsigned char busnum = dev->bus->number; 1012 1012 void __iomem *bbar = tbl->bbar; ··· 1022 1022 writel(cpu_to_be32(val), target); 1023 1023 } 1024 1024 1025 - static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev) 1025 + static void __init calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev) 1026 1026 { 1027 1027 unsigned char busnum = dev->bus->number; 1028 1028
+2 -1
arch/x86/kernel/setup_64.c
··· 182 182 unsigned long bootmap_size, bootmap; 183 183 184 184 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 185 - bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); 185 + bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, 186 + PAGE_SIZE); 186 187 if (bootmap == -1L) 187 188 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 188 189 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
+1 -1
arch/x86/kernel/smpboot_64.c
··· 1019 1019 cpu_clear(cpu, cpu_sibling_setup_map); 1020 1020 } 1021 1021 1022 - void remove_cpu_from_maps(void) 1022 + static void __ref remove_cpu_from_maps(void) 1023 1023 { 1024 1024 int cpu = smp_processor_id(); 1025 1025
+1 -2
arch/x86/kernel/topology.c
··· 57 57 } 58 58 EXPORT_SYMBOL(arch_unregister_cpu); 59 59 #else 60 - int arch_register_cpu(int num) 60 + static int __init arch_register_cpu(int num) 61 61 { 62 62 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); 63 63 } 64 - EXPORT_SYMBOL(arch_register_cpu); 65 64 #endif /*CONFIG_HOTPLUG_CPU*/ 66 65 67 66 static int __init topology_init(void)
+1 -1
arch/x86/mm/fault.c
··· 382 382 383 383 #ifdef CONFIG_X86_PAE 384 384 if (error_code & PF_INSTR) { 385 - int level; 385 + unsigned int level; 386 386 pte_t *pte = lookup_address(address, &level); 387 387 388 388 if (pte && pte_present(*pte) && !pte_exec(*pte))
+4 -9
arch/x86/mm/init_64.c
··· 354 354 * need roughly 0.5KB per GB. 355 355 */ 356 356 start = 0x8000; 357 - table_start = find_e820_area(start, end, tables); 357 + table_start = find_e820_area(start, end, tables, PAGE_SIZE); 358 358 if (table_start == -1UL) 359 359 panic("Cannot find space for the kernel page tables"); 360 360 361 - /* 362 - * When you have a lot of RAM like 256GB, early_table will not fit 363 - * into 0x8000 range, find_e820_area() will find area after kernel 364 - * bss but the table_start is not page aligned, so need to round it 365 - * up to avoid overlap with bss: 366 - */ 367 - table_start = round_up(table_start, PAGE_SIZE); 368 361 table_start >>= PAGE_SHIFT; 369 362 table_end = table_start; 370 363 ··· 413 420 mmu_cr4_features = read_cr4(); 414 421 __flush_tlb_all(); 415 422 416 - reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); 423 + if (!after_bootmem) 424 + reserve_early(table_start << PAGE_SHIFT, 425 + table_end << PAGE_SHIFT, "PGTABLE"); 417 426 } 418 427 419 428 #ifndef CONFIG_NUMA
+2 -1
arch/x86/mm/ioremap.c
··· 75 75 { 76 76 unsigned long vaddr = (unsigned long)__va(paddr); 77 77 unsigned long nrpages = size >> PAGE_SHIFT; 78 - int err, level; 78 + unsigned int level; 79 + int err; 79 80 80 81 /* No change for pages after the last mapping */ 81 82 if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
+19 -15
arch/x86/mm/numa_64.c
··· 84 84 85 85 static int __init allocate_cachealigned_memnodemap(void) 86 86 { 87 - unsigned long pad, pad_addr; 87 + unsigned long addr; 88 88 89 89 memnodemap = memnode.embedded_map; 90 90 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) 91 91 return 0; 92 92 93 - pad = L1_CACHE_BYTES - 1; 94 - pad_addr = 0x8000; 95 - nodemap_size = pad + sizeof(s16) * memnodemapsize; 96 - nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT, 97 - nodemap_size); 93 + addr = 0x8000; 94 + nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 95 + nodemap_addr = find_e820_area(addr, end_pfn<<PAGE_SHIFT, 96 + nodemap_size, L1_CACHE_BYTES); 98 97 if (nodemap_addr == -1UL) { 99 98 printk(KERN_ERR 100 99 "NUMA: Unable to allocate Memory to Node hash map\n"); 101 100 nodemap_addr = nodemap_size = 0; 102 101 return -1; 103 102 } 104 - pad_addr = (nodemap_addr + pad) & ~pad; 105 - memnodemap = phys_to_virt(pad_addr); 106 - reserve_early(nodemap_addr, nodemap_addr + nodemap_size); 103 + memnodemap = phys_to_virt(nodemap_addr); 104 + reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); 107 105 108 106 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", 109 107 nodemap_addr, nodemap_addr + nodemap_size); ··· 162 164 } 163 165 164 166 static void * __init early_node_mem(int nodeid, unsigned long start, 165 - unsigned long end, unsigned long size) 167 + unsigned long end, unsigned long size, 168 + unsigned long align) 166 169 { 167 - unsigned long mem = find_e820_area(start, end, size); 170 + unsigned long mem = find_e820_area(start, end, size, align); 168 171 void *ptr; 169 172 170 173 if (mem != -1L) 171 174 return __va(mem); 172 - ptr = __alloc_bootmem_nopanic(size, 173 - SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); 175 + 176 + ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 174 177 if (ptr == NULL) { 175 178 printk(KERN_ERR "Cannot find %lu bytes in node %d\n", 176 179 size, nodeid); ··· 197 198 start_pfn = start >> PAGE_SHIFT; 198 199 end_pfn = end >> PAGE_SHIFT; 199 200 200 - node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size); 201 + node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, 202 + SMP_CACHE_BYTES); 201 203 if (node_data[nodeid] == NULL) 202 204 return; 203 205 nodedata_phys = __pa(node_data[nodeid]); ··· 211 211 /* Find a place for the bootmem map */ 212 212 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 213 213 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 214 + /* 215 + * SMP_CAHCE_BYTES could be enough, but init_bootmem_node like 216 + * to use that to align to PAGE_SIZE 217 + */ 214 218 bootmap = early_node_mem(nodeid, bootmap_start, end, 215 - bootmap_pages<<PAGE_SHIFT); 219 + bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); 216 220 if (bootmap == NULL) { 217 221 if (nodedata_phys < start || nodedata_phys >= end) 218 222 free_bootmem((unsigned long)node_data[nodeid],
+2 -2
arch/x86/mm/pageattr-test.c
··· 42 42 s->max_exec = 0; 43 43 for (i = 0; i < max_pfn_mapped; ) { 44 44 unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT); 45 - int level; 45 + unsigned int level; 46 46 pte_t *pte; 47 47 48 48 pte = lookup_address(addr, &level); ··· 106 106 unsigned long *bm; 107 107 pte_t *pte, pte0; 108 108 int failed = 0; 109 - int level; 109 + unsigned int level; 110 110 int i, k; 111 111 int err; 112 112
+2 -1
include/asm-x86/cpu.h
··· 10 10 struct x86_cpu { 11 11 struct cpu cpu; 12 12 }; 13 - extern int arch_register_cpu(int num); 13 + 14 14 #ifdef CONFIG_HOTPLUG_CPU 15 + extern int arch_register_cpu(int num); 15 16 extern void arch_unregister_cpu(int); 16 17 #endif 17 18
+2 -2
include/asm-x86/e820_64.h
··· 15 15 16 16 #ifndef __ASSEMBLY__ 17 17 extern unsigned long find_e820_area(unsigned long start, unsigned long end, 18 - unsigned size); 18 + unsigned size, unsigned long align); 19 19 extern void add_memory_region(unsigned long start, unsigned long size, 20 20 int type); 21 21 extern void setup_memory_region(void); ··· 41 41 extern struct e820map e820; 42 42 extern void update_e820(void); 43 43 44 - extern void reserve_early(unsigned long start, unsigned long end); 44 + extern void reserve_early(unsigned long start, unsigned long end, char *name); 45 45 extern void early_res_to_bootmem(void); 46 46 47 47 #endif/*!__ASSEMBLY__*/