Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"The last regression fixes for 4.8 final:

- Two patches addressing the fallout of the CR4 optimizations which
caused CR4-less machines to fail.

- Fix the VDSO build on big endian machines

- Take care of FPU initialization if no CPUID is available otherwise
task struct size ends up being zero

- Fix up context tracking in case load_gs_index fails"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/entry/64: Fix context tracking state warning when load_gs_index fails
x86/boot: Initialize FPU and X86_FEATURE_ALWAYS even if we don't have CPUID
x86/vdso: Fix building on big endian host
x86/boot: Fix another __read_cr4() case on 486
x86/init: Fix cr4_init_shadow() on CR4-less machines

Changed files
+16 -19
arch
x86
entry
include
kernel
+2 -2
arch/x86/entry/entry_64.S
··· 1002 1002 testb $3, CS+8(%rsp) 1003 1003 jz .Lerror_kernelspace 1004 1004 1005 - .Lerror_entry_from_usermode_swapgs: 1006 1005 /* 1007 1006 * We entered from user mode or we're pretending to have entered 1008 1007 * from user mode due to an IRET fault. ··· 1044 1045 * gsbase and proceed. We'll fix up the exception and land in 1045 1046 * .Lgs_change's error handler with kernel gsbase. 1046 1047 */ 1047 - jmp .Lerror_entry_from_usermode_swapgs 1048 + SWAPGS 1049 + jmp .Lerror_entry_done 1048 1050 1049 1051 .Lbstep_iret: 1050 1052 /* Fix truncated RIP */
+1 -1
arch/x86/entry/vdso/vdso2c.h
··· 22 22 23 23 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff)); 24 24 25 - if (hdr->e_type != ET_DYN) 25 + if (GET_LE(&hdr->e_type) != ET_DYN) 26 26 fail("input is not a shared object\n"); 27 27 28 28 /* Walk the segment table. */
+1 -1
arch/x86/include/asm/tlbflush.h
··· 81 81 /* Initialize cr4 shadow for this CPU. */ 82 82 static inline void cr4_init_shadow(void) 83 83 { 84 - this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 84 + this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe()); 85 85 } 86 86 87 87 /* Set in this cpu's CR4. */
+11 -12
arch/x86/kernel/cpu/common.c
··· 804 804 identify_cpu_without_cpuid(c); 805 805 806 806 /* cyrix could have cpuid enabled via c_identify()*/ 807 - if (!have_cpuid_p()) 808 - return; 807 + if (have_cpuid_p()) { 808 + cpu_detect(c); 809 + get_cpu_vendor(c); 810 + get_cpu_cap(c); 809 811 810 - cpu_detect(c); 811 - get_cpu_vendor(c); 812 - get_cpu_cap(c); 812 + if (this_cpu->c_early_init) 813 + this_cpu->c_early_init(c); 813 814 814 - if (this_cpu->c_early_init) 815 - this_cpu->c_early_init(c); 815 + c->cpu_index = 0; 816 + filter_cpuid_features(c, false); 816 817 817 - c->cpu_index = 0; 818 - filter_cpuid_features(c, false); 819 - 820 - if (this_cpu->c_bsp_init) 821 - this_cpu->c_bsp_init(c); 818 + if (this_cpu->c_bsp_init) 819 + this_cpu->c_bsp_init(c); 820 + } 822 821 823 822 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 824 823 fpu__init_system(c);
+1 -3
arch/x86/kernel/setup.c
··· 1137 1137 * auditing all the early-boot CR4 manipulation would be needed to 1138 1138 * rule it out. 1139 1139 */ 1140 - if (boot_cpu_data.cpuid_level >= 0) 1141 - /* A CPU has %cr4 if and only if it has CPUID. */ 1142 - mmu_cr4_features = __read_cr4(); 1140 + mmu_cr4_features = __read_cr4_safe(); 1143 1141 1144 1142 memblock_set_current_limit(get_max_mapped()); 1145 1143