Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, geode: add a VSA2 ID for General Software
x86: use BOOTMEM_EXCLUSIVE on 32-bit
x86, 32-bit: fix boot failure on TSC-less processors
x86: fix NULL pointer deref in __switch_to
x86: set PAE PHYSICAL_MASK_SHIFT to 44 bits.

+26 -16
+4 -1
arch/x86/kernel/geode_32.c
··· 166 static int has_vsa2 = -1; 167 168 if (has_vsa2 == -1) { 169 /* 170 * The VSA has virtual registers that we can query for a 171 * signature. ··· 175 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); 176 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); 177 178 - has_vsa2 = (inw(VSA_VRC_DATA) == VSA_SIG); 179 } 180 181 return has_vsa2;
··· 166 static int has_vsa2 = -1; 167 168 if (has_vsa2 == -1) { 169 + u16 val; 170 + 171 /* 172 * The VSA has virtual registers that we can query for a 173 * signature. ··· 173 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); 174 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); 175 176 + val = inw(VSA_VRC_DATA); 177 + has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); 178 } 179 180 return has_vsa2;
+1
arch/x86/kernel/process_32.c
··· 333 /* 334 * Forget coprocessor state.. 335 */ 336 clear_fpu(tsk); 337 clear_used_math(); 338 }
··· 333 /* 334 * Forget coprocessor state.. 335 */ 336 + tsk->fpu_counter = 0; 337 clear_fpu(tsk); 338 clear_used_math(); 339 }
+1
arch/x86/kernel/process_64.c
··· 294 /* 295 * Forget coprocessor state.. 296 */ 297 clear_fpu(tsk); 298 clear_used_math(); 299 }
··· 294 /* 295 * Forget coprocessor state.. 296 */ 297 + tsk->fpu_counter = 0; 298 clear_fpu(tsk); 299 clear_used_math(); 300 }
+8 -2
arch/x86/kernel/setup_32.c
··· 532 (unsigned long)(crash_size >> 20), 533 (unsigned long)(crash_base >> 20), 534 (unsigned long)(total_mem >> 20)); 535 crashk_res.start = crash_base; 536 crashk_res.end = crash_base + crash_size - 1; 537 - reserve_bootmem(crash_base, crash_size, 538 - BOOTMEM_DEFAULT); 539 } else 540 printk(KERN_INFO "crashkernel reservation failed - " 541 "you have to specify a base address\n");
··· 532 (unsigned long)(crash_size >> 20), 533 (unsigned long)(crash_base >> 20), 534 (unsigned long)(total_mem >> 20)); 535 + 536 + if (reserve_bootmem(crash_base, crash_size, 537 + BOOTMEM_EXCLUSIVE) < 0) { 538 + printk(KERN_INFO "crashkernel reservation " 539 + "failed - memory is in use\n"); 540 + return; 541 + } 542 + 543 crashk_res.start = crash_base; 544 crashk_res.end = crash_base + crash_size - 1; 545 } else 546 printk(KERN_INFO "crashkernel reservation failed - " 547 "you have to specify a base address\n");
+8 -10
arch/x86/kernel/tsc_32.c
··· 14 15 #include "mach_timer.h" 16 17 - static int tsc_disabled; 18 19 /* 20 * On some systems the TSC frequency does not ··· 405 { 406 int cpu; 407 408 - if (!cpu_has_tsc || tsc_disabled) { 409 - /* Disable the TSC in case of !cpu_has_tsc */ 410 - tsc_disabled = 1; 411 return; 412 - } 413 414 cpu_khz = calculate_cpu_khz(); 415 tsc_khz = cpu_khz; 416 417 if (!cpu_khz) { 418 mark_tsc_unstable("could not calculate TSC khz"); 419 - /* 420 - * We need to disable the TSC completely in this case 421 - * to prevent sched_clock() from using it. 422 - */ 423 - tsc_disabled = 1; 424 return; 425 } 426 427 printk("Detected %lu.%03lu MHz processor.\n", 428 (unsigned long)cpu_khz / 1000,
··· 14 15 #include "mach_timer.h" 16 17 + /* native_sched_clock() is called before tsc_init(), so 18 + we must start with the TSC soft disabled to prevent 19 + erroneous rdtsc usage on !cpu_has_tsc processors */ 20 + static int tsc_disabled = -1; 21 22 /* 23 * On some systems the TSC frequency does not ··· 402 { 403 int cpu; 404 405 + if (!cpu_has_tsc || tsc_disabled > 0) 406 return; 407 408 cpu_khz = calculate_cpu_khz(); 409 tsc_khz = cpu_khz; 410 411 if (!cpu_khz) { 412 mark_tsc_unstable("could not calculate TSC khz"); 413 return; 414 } 415 + 416 + /* now allow native_sched_clock() to use rdtsc */ 417 + tsc_disabled = 0; 418 419 printk("Detected %lu.%03lu MHz processor.\n", 420 (unsigned long)cpu_khz / 1000,
+2 -2
include/asm-x86/geode.h
··· 112 #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ 113 #define VSA_VR_SIGNATURE 0x0003 114 #define VSA_VR_MEM_SIZE 0x0200 115 - #define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ 116 - 117 /* GPIO */ 118 119 #define GPIO_OUTPUT_VAL 0x00
··· 112 #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ 113 #define VSA_VR_SIGNATURE 0x0003 114 #define VSA_VR_MEM_SIZE 0x0200 115 + #define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ 116 + #define GSW_VSA_SIG 0x534d /* General Software signature */ 117 /* GPIO */ 118 119 #define GPIO_OUTPUT_VAL 0x00
+2 -1
include/asm-x86/page_32.h
··· 14 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 15 16 #ifdef CONFIG_X86_PAE 17 - #define __PHYSICAL_MASK_SHIFT 36 18 #define __VIRTUAL_MASK_SHIFT 32 19 #define PAGETABLE_LEVELS 3 20
··· 14 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 15 16 #ifdef CONFIG_X86_PAE 17 + /* 44=32+12, the limit we can fit into an unsigned long pfn */ 18 + #define __PHYSICAL_MASK_SHIFT 44 19 #define __VIRTUAL_MASK_SHIFT 32 20 #define PAGETABLE_LEVELS 3 21