Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: Work around compilation warning in arch/x86/kernel/apm_32.c
x86, UV: Complete IRQ interrupt migration in arch_enable_uv_irq()
x86, 32-bit: Fix double accounting in reserve_top_address()
x86: Don't use current_cpu_data in x2apic phys_pkg_id
x86, UV: Fix UV apic mode
x86, UV: Fix macros for accessing large node numbers
x86, UV: Delete mapping of MMR rangs mapped by BIOS
x86, UV: Handle missing blade-local memory correctly
x86: fix assembly constraints in native_save_fl()
x86, msr: execute on the correct CPU subset
x86: Fix assert syntax in vmlinux.lds.S
x86: Make 64-bit efi_ioremap use ioremap on MMIO regions
x86: Add quirk to make Apple MacBook5,2 use reboot=pci
x86: Fix CPA memtype reserving in the set_pages_array*() cases
x86, pat: Fix set_memory_wc related corruption
x86: fix section mismatch for i386 init code

+120 -79
+3 -2
arch/x86/include/asm/efi.h
··· 33 33 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 34 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 35 35 36 - #define efi_ioremap(addr, size) ioremap_cache(addr, size) 36 + #define efi_ioremap(addr, size, type) ioremap_cache(addr, size) 37 37 38 38 #else /* !CONFIG_X86_32 */ 39 39 ··· 84 84 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 85 85 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 86 86 87 - extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); 87 + extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 88 + u32 type); 88 89 89 90 #endif /* CONFIG_X86_32 */ 90 91
+7 -1
arch/x86/include/asm/irqflags.h
··· 12 12 { 13 13 unsigned long flags; 14 14 15 + /* 16 + * Note: this needs to be "=r" not "=rm", because we have the 17 + * stack offset from what gcc expects at the time the "pop" is 18 + * executed, and so a memory reference with respect to the stack 19 + * would end up using the wrong address. 20 + */ 15 21 asm volatile("# __raw_save_flags\n\t" 16 22 "pushf ; pop %0" 17 - : "=g" (flags) 23 + : "=r" (flags) 18 24 : /* no input */ 19 25 : "memory"); 20 26
+8 -1
arch/x86/include/asm/uv/uv_hub.h
··· 175 175 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) 176 176 177 177 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 178 - ((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 178 + (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 179 179 180 180 #define UV_APIC_PNODE_SHIFT 6 181 181 ··· 327 327 unsigned short nr_possible_cpus; 328 328 unsigned short nr_online_cpus; 329 329 unsigned short pnode; 330 + short memory_nid; 330 331 }; 331 332 extern struct uv_blade_info *uv_blade_info; 332 333 extern short *uv_node_to_blade; ··· 362 361 static inline int uv_blade_to_pnode(int bid) 363 362 { 364 363 return uv_blade_info[bid].pnode; 364 + } 365 + 366 + /* Nid of memory node on blade. -1 if no blade-local memory */ 367 + static inline int uv_blade_to_memory_nid(int bid) 368 + { 369 + return uv_blade_info[bid].memory_nid; 365 370 } 366 371 367 372 /* Determine the number of possible cpus on a blade */
+3
arch/x86/kernel/apic/io_apic.c
··· 3793 3793 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3794 3794 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3795 3795 3796 + if (cfg->move_in_progress) 3797 + send_cleanup_vector(cfg); 3798 + 3796 3799 return irq; 3797 3800 } 3798 3801
+1 -1
arch/x86/kernel/apic/x2apic_cluster.c
··· 170 170 171 171 static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) 172 172 { 173 - return current_cpu_data.initial_apicid >> index_msb; 173 + return initial_apicid >> index_msb; 174 174 } 175 175 176 176 static void x2apic_send_IPI_self(int vector)
+1 -1
arch/x86/kernel/apic/x2apic_phys.c
··· 162 162 163 163 static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) 164 164 { 165 - return current_cpu_data.initial_apicid >> index_msb; 165 + return initial_apicid >> index_msb; 166 166 } 167 167 168 168 static void x2apic_send_IPI_self(int vector)
+7 -31
arch/x86/kernel/apic/x2apic_uv_x.c
··· 261 261 .apic_id_registered = uv_apic_id_registered, 262 262 263 263 .irq_delivery_mode = dest_Fixed, 264 - .irq_dest_mode = 1, /* logical */ 264 + .irq_dest_mode = 0, /* physical */ 265 265 266 266 .target_cpus = uv_target_cpus, 267 267 .disable_esr = 0, ··· 362 362 BUG(); 363 363 } 364 364 365 - static __init void map_low_mmrs(void) 366 - { 367 - init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); 368 - init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); 369 - } 370 - 371 365 enum map_type {map_wb, map_uc}; 372 366 373 367 static __init void map_high(char *id, unsigned long base, int shift, ··· 387 393 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 388 394 if (gru.s.enable) 389 395 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 390 - } 391 - 392 - static __init void map_config_high(int max_pnode) 393 - { 394 - union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; 395 - int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; 396 - 397 - cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); 398 - if (cfg.s.enable) 399 - map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); 400 - } 401 - 402 - static __init void map_mmr_high(int max_pnode) 403 - { 404 - union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; 405 - int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; 406 - 407 - mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 408 - if (mmr.s.enable) 409 - map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 410 396 } 411 397 412 398 static __init void map_mmioh_high(int max_pnode) ··· 540 566 unsigned long mmr_base, present, paddr; 541 567 unsigned short pnode_mask; 542 568 543 - map_low_mmrs(); 544 - 545 569 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 546 570 m_val = m_n_config.s.m_skt; 547 571 n_val = m_n_config.s.n_skt; ··· 563 591 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 564 592 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 565 593 BUG_ON(!uv_blade_info); 594 + for (blade = 0; blade < uv_num_possible_blades(); blade++) 595 + uv_blade_info[blade].memory_nid = -1; 566 596 567 597 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 568 598 ··· 603 629 lcpu = uv_blade_info[blade].nr_possible_cpus; 604 630 uv_blade_info[blade].nr_possible_cpus++; 605 631 632 + /* Any node on the blade, else will contain -1. */ 633 + uv_blade_info[blade].memory_nid = nid; 634 + 606 635 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; 607 636 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; 608 637 uv_cpu_hub_info(cpu)->m_val = m_val; ··· 639 662 pnode = (paddr >> m_val) & pnode_mask; 640 663 blade = boot_pnode_to_blade(pnode); 641 664 uv_node_to_blade[nid] = blade; 665 + max_pnode = max(pnode, max_pnode); 642 666 } 643 667 644 668 map_gru_high(max_pnode); 645 - map_mmr_high(max_pnode); 646 - map_config_high(max_pnode); 647 669 map_mmioh_high(max_pnode); 648 670 649 671 uv_cpu_init();
+1 -1
arch/x86/kernel/apm_32.c
··· 811 811 u8 ret = 0; 812 812 int idled = 0; 813 813 int polling; 814 - int err; 814 + int err = 0; 815 815 816 816 polling = !!(current_thread_info()->status & TS_POLLING); 817 817 if (polling) {
+1 -1
arch/x86/kernel/efi.c
··· 512 512 && end_pfn <= max_pfn_mapped)) 513 513 va = __va(md->phys_addr); 514 514 else 515 - va = efi_ioremap(md->phys_addr, size); 515 + va = efi_ioremap(md->phys_addr, size, md->type); 516 516 517 517 md->virt_addr = (u64) (unsigned long) va; 518 518
+5 -1
arch/x86/kernel/efi_64.c
··· 98 98 early_runtime_code_mapping_set_exec(0); 99 99 } 100 100 101 - void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) 101 + void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 102 + u32 type) 102 103 { 103 104 unsigned long last_map_pfn; 105 + 106 + if (type == EFI_MEMORY_MAPPED_IO) 107 + return ioremap(phys_addr, size); 104 108 105 109 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 106 110 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
+5 -1
arch/x86/kernel/head_32.S
··· 602 602 #endif 603 603 iret 604 604 605 - .section .cpuinit.data,"wa" 605 + #ifndef CONFIG_HOTPLUG_CPU 606 + __CPUINITDATA 607 + #else 608 + __REFDATA 609 + #endif 606 610 .align 4 607 611 ENTRY(initial_code) 608 612 .long i386_start_kernel
+33 -1
arch/x86/kernel/reboot.c
··· 3 3 #include <linux/init.h> 4 4 #include <linux/pm.h> 5 5 #include <linux/efi.h> 6 + #include <linux/dmi.h> 6 7 #include <acpi/reboot.h> 7 8 #include <asm/io.h> 8 9 #include <asm/apic.h> ··· 18 17 #include <asm/cpu.h> 19 18 20 19 #ifdef CONFIG_X86_32 21 - # include <linux/dmi.h> 22 20 # include <linux/ctype.h> 23 21 # include <linux/mc146818rtc.h> 24 22 #else ··· 403 403 #endif 404 404 405 405 #endif /* CONFIG_X86_32 */ 406 + 407 + /* 408 + * Apple MacBook5,2 (2009 MacBook) needs reboot=p 409 + */ 410 + static int __init set_pci_reboot(const struct dmi_system_id *d) 411 + { 412 + if (reboot_type != BOOT_CF9) { 413 + reboot_type = BOOT_CF9; 414 + printk(KERN_INFO "%s series board detected. " 415 + "Selecting PCI-method for reboots.\n", d->ident); 416 + } 417 + return 0; 418 + } 419 + 420 + static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { 421 + { /* Handle problems with rebooting on Apple MacBook5,2 */ 422 + .callback = set_pci_reboot, 423 + .ident = "Apple MacBook", 424 + .matches = { 425 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 426 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), 427 + }, 428 + }, 429 + { } 430 + }; 431 + 432 + static int __init pci_reboot_init(void) 433 + { 434 + dmi_check_system(pci_reboot_dmi_table); 435 + return 0; 436 + } 437 + core_initcall(pci_reboot_init); 406 438 407 439 static inline void kb_wait(void) 408 440 {
+8 -8
arch/x86/kernel/vmlinux.lds.S
··· 393 393 394 394 395 395 #ifdef CONFIG_X86_32 396 - ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 397 - "kernel image bigger than KERNEL_IMAGE_SIZE") 396 + . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 397 + "kernel image bigger than KERNEL_IMAGE_SIZE"); 398 398 #else 399 399 /* 400 400 * Per-cpu symbols which need to be offset from __per_cpu_load ··· 407 407 /* 408 408 * Build-time check on the image size: 409 409 */ 410 - ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 411 - "kernel image bigger than KERNEL_IMAGE_SIZE") 410 + . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 411 + "kernel image bigger than KERNEL_IMAGE_SIZE"); 412 412 413 413 #ifdef CONFIG_SMP 414 - ASSERT((per_cpu__irq_stack_union == 0), 415 - "irq_stack_union is not at start of per-cpu area"); 414 + . = ASSERT((per_cpu__irq_stack_union == 0), 415 + "irq_stack_union is not at start of per-cpu area"); 416 416 #endif 417 417 418 418 #endif /* CONFIG_X86_32 */ ··· 420 420 #ifdef CONFIG_KEXEC 421 421 #include <asm/kexec.h> 422 422 423 - ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 424 - "kexec control code size is too big") 423 + . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 424 + "kexec control code size is too big"); 425 425 #endif 426 426
+10 -16
arch/x86/lib/msr.c
··· 89 89 rv.msrs = msrs; 90 90 rv.msr_no = msr_no; 91 91 92 - preempt_disable(); 93 - /* 94 - * FIXME: handle the CPU we're executing on separately for now until 95 - * smp_call_function_many has been fixed to not skip it. 96 - */ 97 - this_cpu = raw_smp_processor_id(); 98 - smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1); 92 + this_cpu = get_cpu(); 93 + 94 + if (cpumask_test_cpu(this_cpu, mask)) 95 + __rdmsr_on_cpu(&rv); 99 96 100 97 smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); 101 - preempt_enable(); 98 + put_cpu(); 102 99 } 103 100 EXPORT_SYMBOL(rdmsr_on_cpus); 104 101 ··· 118 121 rv.msrs = msrs; 119 122 rv.msr_no = msr_no; 120 123 121 - preempt_disable(); 122 - /* 123 - * FIXME: handle the CPU we're executing on separately for now until 124 - * smp_call_function_many has been fixed to not skip it. 125 - */ 126 - this_cpu = raw_smp_processor_id(); 127 - smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1); 124 + this_cpu = get_cpu(); 125 + 126 + if (cpumask_test_cpu(this_cpu, mask)) 127 + __wrmsr_on_cpu(&rv); 128 128 129 129 smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); 130 - preempt_enable(); 130 + put_cpu(); 131 131 } 132 132 EXPORT_SYMBOL(wrmsr_on_cpus); 133 133
+27 -12
arch/x86/mm/pageattr.c
··· 591 591 unsigned int level; 592 592 pte_t *kpte, old_pte; 593 593 594 - if (cpa->flags & CPA_PAGES_ARRAY) 595 - address = (unsigned long)page_address(cpa->pages[cpa->curpage]); 596 - else if (cpa->flags & CPA_ARRAY) 594 + if (cpa->flags & CPA_PAGES_ARRAY) { 595 + struct page *page = cpa->pages[cpa->curpage]; 596 + if (unlikely(PageHighMem(page))) 597 + return 0; 598 + address = (unsigned long)page_address(page); 599 + } else if (cpa->flags & CPA_ARRAY) 597 600 address = cpa->vaddr[cpa->curpage]; 598 601 else 599 602 address = *cpa->vaddr; ··· 700 697 * No need to redo, when the primary call touched the direct 701 698 * mapping already: 702 699 */ 703 - if (cpa->flags & CPA_PAGES_ARRAY) 704 - vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]); 705 - else if (cpa->flags & CPA_ARRAY) 700 + if (cpa->flags & CPA_PAGES_ARRAY) { 701 + struct page *page = cpa->pages[cpa->curpage]; 702 + if (unlikely(PageHighMem(page))) 703 + return 0; 704 + vaddr = (unsigned long)page_address(page); 705 + } else if (cpa->flags & CPA_ARRAY) 706 706 vaddr = cpa->vaddr[cpa->curpage]; 707 707 else 708 708 vaddr = *cpa->vaddr; ··· 1003 997 int _set_memory_wc(unsigned long addr, int numpages) 1004 998 { 1005 999 int ret; 1000 + unsigned long addr_copy = addr; 1001 + 1006 1002 ret = change_page_attr_set(&addr, numpages, 1007 1003 __pgprot(_PAGE_CACHE_UC_MINUS), 0); 1008 - 1009 1004 if (!ret) { 1010 - ret = change_page_attr_set(&addr, numpages, 1011 - __pgprot(_PAGE_CACHE_WC), 0); 1005 + ret = change_page_attr_set_clr(&addr_copy, numpages, 1006 + __pgprot(_PAGE_CACHE_WC), 1007 + __pgprot(_PAGE_CACHE_MASK), 1008 + 0, 0, NULL); 1012 1009 } 1013 1010 return ret; 1014 1011 } ··· 1128 1119 int free_idx; 1129 1120 1130 1121 for (i = 0; i < addrinarray; i++) { 1131 - start = (unsigned long)page_address(pages[i]); 1122 + if (PageHighMem(pages[i])) 1123 + continue; 1124 + start = page_to_pfn(pages[i]) << PAGE_SHIFT; 1132 1125 end = start + PAGE_SIZE; 1133 1126 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) 1134 1127 goto err_out; ··· 1143 1132 err_out: 1144 1133 free_idx = i; 1145 1134 for (i = 0; i < free_idx; i++) { 1146 - start = (unsigned long)page_address(pages[i]); 1135 + if (PageHighMem(pages[i])) 1136 + continue; 1137 + start = page_to_pfn(pages[i]) << PAGE_SHIFT; 1147 1138 end = start + PAGE_SIZE; 1148 1139 free_memtype(start, end); 1149 1140 } ··· 1174 1161 return retval; 1175 1162 1176 1163 for (i = 0; i < addrinarray; i++) { 1177 - start = (unsigned long)page_address(pages[i]); 1164 + if (PageHighMem(pages[i])) 1165 + continue; 1166 + start = page_to_pfn(pages[i]) << PAGE_SHIFT; 1178 1167 end = start + PAGE_SIZE; 1179 1168 free_memtype(start, end); 1180 1169 }
-1
arch/x86/mm/pgtable.c
··· 329 329 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", 330 330 (int)-reserve); 331 331 __FIXADDR_TOP = -reserve - PAGE_SIZE; 332 - __VMALLOC_RESERVE += reserve; 333 332 #endif 334 333 } 335 334