Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: fix build warnings in real mode code
x86, calgary: fix section mismatch warning - get_tce_space_from_tar
x86: silence section mismatch warning - get_local_pda
x86, percpu: silence section mismatch warnings related to EARLY_PER_CPU variables
x86: fix i486 suspend to disk CR4 oops
x86: mpparse.c: fix section mismatch warning
x86: mmconf: fix section mismatch warning
x86: fix MP_processor_info section mismatch warning
x86, tsc: fix section mismatch warning
x86: correct register constraints for 64-bit atomic operations

+45 -31
+2
arch/x86/boot/boot.h
··· 30 30 /* Useful macros */ 31 31 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 32 32 33 + #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) 34 + 33 35 extern struct setup_header hdr; 34 36 extern struct boot_params boot_params; 35 37
-1
arch/x86/boot/memory.c
··· 13 13 */ 14 14 15 15 #include "boot.h" 16 - #include <linux/kernel.h> 17 16 18 17 #define SMAP 0x534d4150 /* ASCII "SMAP" */ 19 18
+1 -1
arch/x86/kernel/acpi/sleep.c
··· 86 86 #endif /* !CONFIG_64BIT */ 87 87 88 88 header->pmode_cr0 = read_cr0(); 89 - header->pmode_cr4 = read_cr4(); 89 + header->pmode_cr4 = read_cr4_safe(); 90 90 header->realmode_flags = acpi_realmode_flags; 91 91 header->real_magic = 0x12345678; 92 92
+2 -2
arch/x86/kernel/efi_32.c
··· 53 53 * directory. If I have PAE, I just need to duplicate one entry in 54 54 * page directory. 55 55 */ 56 - cr4 = read_cr4(); 56 + cr4 = read_cr4_safe(); 57 57 58 58 if (cr4 & X86_CR4_PAE) { 59 59 efi_bak_pg_dir_pointer[0].pgd = ··· 91 91 gdt_descr.size = GDT_SIZE - 1; 92 92 load_gdt(&gdt_descr); 93 93 94 - cr4 = read_cr4(); 94 + cr4 = read_cr4_safe(); 95 95 96 96 if (cr4 & X86_CR4_PAE) { 97 97 swapper_pg_dir[pgd_index(0)].pgd =
+1 -1
arch/x86/kernel/mmconf-fam10h_64.c
··· 238 238 {} 239 239 }; 240 240 241 - void __init check_enable_amd_mmconf_dmi(void) 241 + void __cpuinit check_enable_amd_mmconf_dmi(void) 242 242 { 243 243 dmi_check_system(mmconf_dmi_table); 244 244 }
+3 -3
arch/x86/kernel/mpparse.c
··· 49 49 return sum & 0xFF; 50 50 } 51 51 52 - static void __cpuinit MP_processor_info(struct mpc_config_processor *m) 52 + static void __init MP_processor_info(struct mpc_config_processor *m) 53 53 { 54 54 int apicid; 55 55 char *bootup_cpu = ""; ··· 484 484 } 485 485 486 486 487 - static void construct_ioapic_table(int mpc_default_type) 487 + static void __init construct_ioapic_table(int mpc_default_type) 488 488 { 489 489 struct mpc_config_ioapic ioapic; 490 490 struct mpc_config_bus bus; ··· 529 529 construct_default_ioirq_mptable(mpc_default_type); 530 530 } 531 531 #else 532 - static inline void construct_ioapic_table(int mpc_default_type) { } 532 + static inline void __init construct_ioapic_table(int mpc_default_type) { } 533 533 #endif 534 534 535 535 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
+1 -1
arch/x86/kernel/pci-calgary_64.c
··· 1350 1350 * Function for kdump case. Get the tce tables from first kernel 1351 1351 * by reading the contents of the base adress register of calgary iommu 1352 1352 */ 1353 - static void get_tce_space_from_tar(void) 1353 + static void __init get_tce_space_from_tar(void) 1354 1354 { 1355 1355 int bus; 1356 1356 void __iomem *target;
+9 -2
arch/x86/kernel/smpboot.c
··· 756 756 } 757 757 758 758 #ifdef CONFIG_X86_64 759 + 760 + /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */ 761 + static void __ref free_bootmem_pda(struct x8664_pda *oldpda) 762 + { 763 + if (!after_bootmem) 764 + free_bootmem((unsigned long)oldpda, sizeof(*oldpda)); 765 + } 766 + 759 767 /* 760 768 * Allocate node local memory for the AP pda. 761 769 * ··· 792 784 793 785 if (oldpda) { 794 786 memcpy(newpda, oldpda, size); 795 - if (!after_bootmem) 796 - free_bootmem((unsigned long)oldpda, size); 787 + free_bootmem_pda(oldpda); 797 788 } 798 789 799 790 newpda->in_bootmem = 0;
+1 -1
arch/x86/kernel/tsc.c
··· 104 104 /* 105 105 * Read TSC and the reference counters. Take care of SMI disturbance 106 106 */ 107 - static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) 107 + static u64 tsc_read_refs(u64 *pm, u64 *hpet) 108 108 { 109 109 u64 t1, t2; 110 110 int i;
+4 -2
arch/x86/power/cpu_32.c
··· 45 45 ctxt->cr0 = read_cr0(); 46 46 ctxt->cr2 = read_cr2(); 47 47 ctxt->cr3 = read_cr3(); 48 - ctxt->cr4 = read_cr4(); 48 + ctxt->cr4 = read_cr4_safe(); 49 49 } 50 50 51 51 /* Needed by apm.c */ ··· 98 98 /* 99 99 * control registers 100 100 */ 101 - write_cr4(ctxt->cr4); 101 + /* cr4 was introduced in the Pentium CPU */ 102 + if (ctxt->cr4) 103 + write_cr4(ctxt->cr4); 102 104 write_cr3(ctxt->cr3); 103 105 write_cr2(ctxt->cr2); 104 106 write_cr0(ctxt->cr0);
+15 -11
arch/x86/power/hibernate_asm_32.S
··· 28 28 ret 29 29 30 30 ENTRY(restore_image) 31 - movl resume_pg_dir, %ecx 32 - subl $__PAGE_OFFSET, %ecx 33 - movl %ecx, %cr3 31 + movl resume_pg_dir, %eax 32 + subl $__PAGE_OFFSET, %eax 33 + movl %eax, %cr3 34 34 35 35 movl restore_pblist, %edx 36 36 .p2align 4,,7 ··· 52 52 53 53 done: 54 54 /* go back to the original page tables */ 55 - movl $swapper_pg_dir, %ecx 56 - subl $__PAGE_OFFSET, %ecx 57 - movl %ecx, %cr3 55 + movl $swapper_pg_dir, %eax 56 + subl $__PAGE_OFFSET, %eax 57 + movl %eax, %cr3 58 58 /* Flush TLB, including "global" things (vmalloc) */ 59 - movl mmu_cr4_features, %eax 60 - movl %eax, %edx 59 + movl mmu_cr4_features, %ecx 60 + jecxz 1f # cr4 Pentium and higher, skip if zero 61 + movl %ecx, %edx 61 62 andl $~(1<<7), %edx; # PGE 62 63 movl %edx, %cr4; # turn off PGE 63 - movl %cr3, %ecx; # flush TLB 64 - movl %ecx, %cr3 65 - movl %eax, %cr4; # turn PGE back on 64 + 1: 65 + movl %cr3, %eax; # flush TLB 66 + movl %eax, %cr3 67 + jecxz 1f # cr4 Pentium and higher, skip if zero 68 + movl %ecx, %cr4; # turn PGE back on 69 + 1: 66 70 67 71 movl saved_context_esp, %esp 68 72 movl saved_context_ebp, %ebp
+4 -4
include/asm-x86/atomic_64.h
··· 228 228 { 229 229 asm volatile(LOCK_PREFIX "addq %1,%0" 230 230 : "=m" (v->counter) 231 - : "ir" (i), "m" (v->counter)); 231 + : "er" (i), "m" (v->counter)); 232 232 } 233 233 234 234 /** ··· 242 242 { 243 243 asm volatile(LOCK_PREFIX "subq %1,%0" 244 244 : "=m" (v->counter) 245 - : "ir" (i), "m" (v->counter)); 245 + : "er" (i), "m" (v->counter)); 246 246 } 247 247 248 248 /** ··· 260 260 261 261 asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" 262 262 : "=m" (v->counter), "=qm" (c) 263 - : "ir" (i), "m" (v->counter) : "memory"); 263 + : "er" (i), "m" (v->counter) : "memory"); 264 264 return c; 265 265 } 266 266 ··· 341 341 342 342 asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" 343 343 : "=m" (v->counter), "=qm" (c) 344 - : "ir" (i), "m" (v->counter) : "memory"); 344 + : "er" (i), "m" (v->counter) : "memory"); 345 345 return c; 346 346 } 347 347
+1 -1
include/asm-x86/mmconfig.h
··· 3 3 4 4 #ifdef CONFIG_PCI_MMCONFIG 5 5 extern void __cpuinit fam10h_check_enable_mmcfg(void); 6 - extern void __init check_enable_amd_mmconf_dmi(void); 6 + extern void __cpuinit check_enable_amd_mmconf_dmi(void); 7 7 #else 8 8 static inline void fam10h_check_enable_mmcfg(void) { } 9 9 static inline void check_enable_amd_mmconf_dmi(void) { }
+1 -1
include/asm-x86/percpu.h
··· 182 182 DEFINE_PER_CPU(_type, _name) = _initvalue; \ 183 183 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ 184 184 { [0 ... NR_CPUS-1] = _initvalue }; \ 185 - __typeof__(_type) *_name##_early_ptr = _name##_early_map 185 + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map 186 186 187 187 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 188 188 EXPORT_PER_CPU_SYMBOL(_name)