Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-cpufeature-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpufeature updates from Ingo Molnar:
"The main changes in this cycle were:

- Continued cleanups of CPU bugs mis-marked as 'missing features', by
Borislav Petkov.

- Detect the xsaves/xrstors feature and releated cleanup, by Fenghua
Yu"

* 'x86-cpufeature-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, cpu: Kill cpu_has_mp
x86, amd: Cleanup init_amd
x86/cpufeature: Add bug flags to /proc/cpuinfo
x86, cpufeature: Convert more "features" to bugs
x86/xsaves: Detect xsaves/xrstors feature
x86/cpufeature.h: Reformat x86 feature macros

+438 -403
+1 -1
arch/x86/include/asm/apic.h
··· 99 99 { 100 100 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); 101 101 102 - alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, 102 + alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, 103 103 ASM_OUTPUT2("=r" (v), "=m" (*addr)), 104 104 ASM_OUTPUT2("0" (v), "m" (*addr))); 105 105 }
+210 -199
arch/x86/include/asm/cpufeature.h
··· 8 8 #include <asm/required-features.h> 9 9 #endif 10 10 11 - #define NCAPINTS 10 /* N 32-bit words worth of info */ 11 + #define NCAPINTS 11 /* N 32-bit words worth of info */ 12 12 #define NBUGINTS 1 /* N 32-bit bug flags */ 13 13 14 14 /* ··· 18 18 */ 19 19 20 20 /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 21 - #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ 22 - #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ 23 - #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ 24 - #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ 25 - #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ 26 - #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ 27 - #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ 28 - #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ 29 - #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ 30 - #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ 31 - #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ 32 - #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ 33 - #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ 34 - #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ 35 - #define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ 21 + #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 22 + #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 23 + #define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 24 + #define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 25 + #define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 26 + #define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 27 + #define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 28 + #define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 29 + #define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 30 + #define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 31 + #define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 32 + #define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 33 + #define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 34 + #define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 35 + #define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ 36 36 /* (plus FCMOVcc, FCOMI with FPU) */ 37 - #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ 38 - #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ 39 - #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ 40 - #define X86_FEATURE_CLFLUSH (0*32+19) /* CLFLUSH instruction */ 41 - #define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ 42 - #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ 43 - #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ 44 - #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 45 - #define X86_FEATURE_XMM (0*32+25) /* "sse" */ 46 - #define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ 47 - #define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ 48 - #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ 49 - #define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ 50 - #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ 51 - #define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ 37 + #define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 38 + #define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 39 + #define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 40 + #define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 41 + #define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 42 + #define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 43 + #define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 44 + #define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 45 + #define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 46 + #define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 47 + #define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 48 + #define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 49 + #define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 50 + #define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 51 + #define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ 52 52 53 53 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 54 54 /* Don't duplicate feature flags which are redundant with Intel! */ 55 - #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ 56 - #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ 57 - #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ 58 - #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 59 - #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ 60 - #define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ 61 - #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ 62 - #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 63 - #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 64 - #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ 55 + #define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 56 + #define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ 57 + #define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 58 + #define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 59 + #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 60 + #define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 61 + #define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 62 + #define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ 63 + #define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ 64 + #define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ 65 65 66 66 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 67 - #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ 68 - #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ 69 - #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ 67 + #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 68 + #define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 69 + #define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 70 70 71 71 /* Other features, Linux-defined mapping, word 3 */ 72 72 /* This range is used for feature bits which conflict or are synthesized */ 73 - #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ 74 - #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ 75 - #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 76 - #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ 73 + #define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 74 + #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 75 + #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 76 + #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 77 77 /* cpu types for specific tunings: */ 78 - #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ 79 - #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ 80 - #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ 81 - #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ 82 - #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 83 - #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 84 - #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ 85 - #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 86 - #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 87 - #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 88 - #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ 89 - #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ 90 - #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ 91 - #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ 92 - #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ 93 - #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 94 - #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 95 - #define X86_FEATURE_ALWAYS (3*32+21) /* "" Always-present feature */ 96 - #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ 97 - #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ 98 - #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 99 - #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ 100 - #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ 101 - #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 102 - #define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 103 - #define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ 104 - #define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */ 78 + #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 79 + #define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ 80 + #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 81 + #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 82 + #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 83 + #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ 84 + /* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ 85 + #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 86 + #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 87 + #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 88 + #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ 89 + #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ 90 + #define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ 91 + #define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ 92 + #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ 93 + /* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ 94 + #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 95 + #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 96 + #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ 97 + #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 98 + #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 99 + /* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ 100 + #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ 101 + #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ 102 + #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ 103 + #define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */ 104 + #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 105 105 106 106 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 107 - #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 108 - #define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ 109 - #define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ 110 - #define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ 111 - #define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 112 - #define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ 113 - #define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ 114 - #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ 115 - #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ 116 - #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ 117 - #define X86_FEATURE_CID (4*32+10) /* Context ID */ 118 - #define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ 119 - #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 120 - #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 121 - #define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ 122 - #define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */ 123 - #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 124 - #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ 125 - #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ 126 - #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ 127 - #define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ 128 - #define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ 129 - #define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */ 130 - #define X86_FEATURE_AES (4*32+25) /* AES instructions */ 131 - #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 132 - #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 133 - #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ 134 - #define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */ 135 - #define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */ 136 - #define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ 107 + #define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 108 + #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 109 + #define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 110 + #define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ 111 + #define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 112 + #define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 113 + #define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ 114 + #define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 115 + #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 116 + #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 117 + #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 118 + #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 119 + #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ 120 + #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 121 + #define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ 122 + #define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 123 + #define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 124 + #define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 125 + #define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 126 + #define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ 127 + #define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 128 + #define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 129 + #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ 130 + #define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 131 + #define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 132 + #define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ 133 + #define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 134 + #define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ 135 + #define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ 136 + #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 137 137 138 138 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 139 - #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ 140 - #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ 141 - #define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 142 - #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 143 - #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ 144 - #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ 145 - #define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ 146 - #define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ 147 - #define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ 148 - #define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ 139 + #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 140 + #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 141 + #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 142 + #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 143 + #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 144 + #define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 145 + #define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 146 + #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 147 + #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 148 + #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 149 149 150 150 /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 151 - #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ 152 - #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ 153 - #define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ 154 - #define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ 155 - #define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ 156 - #define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ 157 - #define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ 158 - #define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ 159 - #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ 160 - #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ 161 - #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ 162 - #define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ 163 - #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ 164 - #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 165 - #define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ 166 - #define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ 167 - #define X86_FEATURE_TCE (6*32+17) /* translation cache extension */ 168 - #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ 169 - #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ 170 - #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 171 - #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ 172 - #define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ 173 - #define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */ 151 + #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 152 + #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 153 + #define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ 154 + #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 155 + #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 156 + #define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 157 + #define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 158 + #define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 159 + #define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 160 + #define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 161 + #define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 162 + #define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 163 + #define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 164 + #define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 165 + #define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 166 + #define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 167 + #define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ 168 + #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 169 + #define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ 170 + #define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 171 + #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 172 + #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 173 + #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ 174 174 175 175 /* 176 176 * Auxiliary flags: Linux defined - For features scattered in various 177 177 * CPUID levels like 0x6, 0xA etc, word 7 178 178 */ 179 - #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 180 - #define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ 181 - #define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ 182 - #define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 183 - #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ 184 - #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ 185 - #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 186 - #define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ 187 - #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ 188 - #define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */ 179 + #define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ 180 + #define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ 181 + #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 182 + #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 183 + #define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ 184 + #define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ 185 + #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ 186 + #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 187 + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 189 188 190 189 /* Virtualization flags: Linux defined, word 8 */ 191 - #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 192 - #define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ 193 - #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ 194 - #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ 195 - #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ 196 - #define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */ 197 - #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ 198 - #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ 199 - #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ 200 - #define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ 201 - #define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ 202 - #define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */ 203 - #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ 204 - #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ 205 - #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ 190 + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 191 + #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 192 + #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 193 + #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 194 + #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 195 + #define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ 196 + #define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */ 197 + #define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ 198 + #define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ 199 + #define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ 200 + #define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ 201 + #define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */ 202 + #define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ 203 + #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ 204 + #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ 206 205 207 206 208 207 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 209 - #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 210 - #define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */ 211 - #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ 212 - #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ 213 - #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ 214 - #define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ 215 - #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ 216 - #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 217 - #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ 218 - #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ 219 - #define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ 220 - #define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */ 221 - #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ 222 - #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ 223 - #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ 224 - #define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */ 225 - #define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */ 226 - #define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */ 227 - #define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */ 208 + #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 209 + #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ 210 + #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 211 + #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 212 + #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 213 + #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 214 + #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 215 + #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 216 + #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 217 + #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 218 + #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 219 + #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 220 + #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 221 + #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 222 + #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 223 + #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 224 + #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 225 + #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 226 + #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 227 + 228 + /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 229 + #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 230 + #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ 231 + #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ 232 + #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ 228 233 229 234 /* 230 235 * BUG word(s) ··· 239 234 #define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 240 235 #define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 241 236 #define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 242 - #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */ 243 - #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */ 237 + #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 238 + #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 239 + #define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 240 + #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 241 + #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 244 242 245 243 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 246 244 ··· 252 244 253 245 extern const char * const x86_cap_flags[NCAPINTS*32]; 254 246 extern const char * const x86_power_flags[32]; 247 + 248 + /* 249 + * In order to save room, we index into this array by doing 250 + * X86_BUG_<name> - NCAPINTS*32. 251 + */ 252 + extern const char * const x86_bug_flags[NBUGINTS*32]; 255 253 256 254 #define test_cpu_cap(c, bit) \ 257 255 test_bit(bit, (unsigned long *)((c)->x86_capability)) ··· 315 301 #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 316 302 #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 317 303 #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 318 - #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) 319 304 #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 320 305 #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) 321 306 #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) ··· 341 328 #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 342 329 #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 343 330 #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) 331 + #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 344 332 #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 345 333 #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 346 334 #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) ··· 360 346 361 347 #undef cpu_has_pae 362 348 #define cpu_has_pae ___BUG___ 363 - 364 - #undef cpu_has_mp 365 - #define cpu_has_mp 1 366 349 367 350 #undef cpu_has_k6_mtrr 368 351 #define cpu_has_k6_mtrr 0 ··· 550 539 #define static_cpu_has_safe(bit) boot_cpu_has(bit) 551 540 #endif 552 541 553 - #define cpu_has_bug(c, bit) cpu_has(c, (bit)) 554 - #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) 555 - #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)); 542 + #define cpu_has_bug(c, bit) cpu_has(c, (bit)) 543 + #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) 544 + #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) 556 545 557 - #define static_cpu_has_bug(bit) static_cpu_has((bit)) 558 - #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) 546 + #define static_cpu_has_bug(bit) static_cpu_has((bit)) 547 + #define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit)) 548 + #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) 559 549 560 - #define MAX_CPU_FEATURES (NCAPINTS * 32) 561 - #define cpu_have_feature boot_cpu_has 550 + #define MAX_CPU_FEATURES (NCAPINTS * 32) 551 + #define cpu_have_feature boot_cpu_has 562 552 563 - #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" 564 - #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ 565 - boot_cpu_data.x86_model 553 + #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" 554 + #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ 555 + boot_cpu_data.x86_model 566 556 567 557 #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 568 - 569 558 #endif /* _ASM_X86_CPUFEATURE_H */
+1 -1
arch/x86/include/asm/fpu-internal.h
··· 293 293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 294 294 is pending. Clear the x87 state here by setting it to fixed 295 295 values. "m" is a random variable that should be in L1 */ 296 - if (unlikely(static_cpu_has_safe(X86_FEATURE_FXSAVE_LEAK))) { 296 + if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { 297 297 asm volatile( 298 298 "fnclex\n\t" 299 299 "emms\n\t"
+1 -1
arch/x86/include/asm/mwait.h
··· 43 43 static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 44 44 { 45 45 if (!current_set_polling_and_test()) { 46 - if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { 46 + if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { 47 47 mb(); 48 48 clflush((void *)&current_thread_info()->flags); 49 49 mb();
+2
arch/x86/include/uapi/asm/msr-index.h
··· 297 297 #define MSR_IA32_TSC_ADJUST 0x0000003b 298 298 #define MSR_IA32_BNDCFGS 0x00000d90 299 299 300 + #define MSR_IA32_XSS 0x00000da0 301 + 300 302 #define FEATURE_CONTROL_LOCKED (1<<0) 301 303 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) 302 304 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+167 -184
arch/x86/kernel/cpu/amd.c
··· 8 8 #include <asm/processor.h> 9 9 #include <asm/apic.h> 10 10 #include <asm/cpu.h> 11 + #include <asm/smp.h> 11 12 #include <asm/pci-direct.h> 12 13 13 14 #ifdef CONFIG_X86_64 ··· 51 50 return wrmsr_safe_regs(gprs); 52 51 } 53 52 54 - #ifdef CONFIG_X86_32 55 53 /* 56 54 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 57 55 * misexecution of code under Linux. Owners of such processors should ··· 70 70 71 71 static void init_amd_k5(struct cpuinfo_x86 *c) 72 72 { 73 + #ifdef CONFIG_X86_32 73 74 /* 74 75 * General Systems BIOSen alias the cpu frequency registers 75 76 * of the Elan at 0x000df000. Unfortuantly, one of the Linux ··· 84 83 if (inl(CBAR) & CBAR_ENB) 85 84 outl(0 | CBAR_KEY, CBAR); 86 85 } 86 + #endif 87 87 } 88 - 89 88 90 89 static void init_amd_k6(struct cpuinfo_x86 *c) 91 90 { 91 + #ifdef CONFIG_X86_32 92 92 u32 l, h; 93 93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 94 94 ··· 178 176 /* placeholder for any needed mods */ 179 177 return; 180 178 } 181 - } 182 - 183 - static void amd_k7_smp_check(struct cpuinfo_x86 *c) 184 - { 185 - /* calling is from identify_secondary_cpu() ? */ 186 - if (!c->cpu_index) 187 - return; 188 - 189 - /* 190 - * Certain Athlons might work (for various values of 'work') in SMP 191 - * but they are not certified as MP capable. 192 - */ 193 - /* Athlon 660/661 is valid. */ 194 - if ((c->x86_model == 6) && ((c->x86_mask == 0) || 195 - (c->x86_mask == 1))) 196 - return; 197 - 198 - /* Duron 670 is valid */ 199 - if ((c->x86_model == 7) && (c->x86_mask == 0)) 200 - return; 201 - 202 - /* 203 - * Athlon 662, Duron 671, and Athlon >model 7 have capability 204 - * bit. It's worth noting that the A5 stepping (662) of some 205 - * Athlon XP's have the MP bit set. 206 - * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 207 - * more. 208 - */ 209 - if (((c->x86_model == 6) && (c->x86_mask >= 2)) || 210 - ((c->x86_model == 7) && (c->x86_mask >= 1)) || 211 - (c->x86_model > 7)) 212 - if (cpu_has_mp) 213 - return; 214 - 215 - /* If we get here, not a certified SMP capable AMD system. */ 216 - 217 - /* 218 - * Don't taint if we are running SMP kernel on a single non-MP 219 - * approved Athlon 220 - */ 221 - WARN_ONCE(1, "WARNING: This combination of AMD" 222 - " processors is not suitable for SMP.\n"); 223 - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 179 + #endif 224 180 } 225 181 226 182 static void init_amd_k7(struct cpuinfo_x86 *c) 227 183 { 184 + #ifdef CONFIG_X86_32 228 185 u32 l, h; 229 186 230 187 /* ··· 216 255 217 256 set_cpu_cap(c, X86_FEATURE_K7); 218 257 219 - amd_k7_smp_check(c); 220 - } 258 + /* calling is from identify_secondary_cpu() ? */ 259 + if (!c->cpu_index) 260 + return; 261 + 262 + /* 263 + * Certain Athlons might work (for various values of 'work') in SMP 264 + * but they are not certified as MP capable. 265 + */ 266 + /* Athlon 660/661 is valid. */ 267 + if ((c->x86_model == 6) && ((c->x86_mask == 0) || 268 + (c->x86_mask == 1))) 269 + return; 270 + 271 + /* Duron 670 is valid */ 272 + if ((c->x86_model == 7) && (c->x86_mask == 0)) 273 + return; 274 + 275 + /* 276 + * Athlon 662, Duron 671, and Athlon >model 7 have capability 277 + * bit. It's worth noting that the A5 stepping (662) of some 278 + * Athlon XP's have the MP bit set. 279 + * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 280 + * more. 281 + */ 282 + if (((c->x86_model == 6) && (c->x86_mask >= 2)) || 283 + ((c->x86_model == 7) && (c->x86_mask >= 1)) || 284 + (c->x86_model > 7)) 285 + if (cpu_has(c, X86_FEATURE_MP)) 286 + return; 287 + 288 + /* If we get here, not a certified SMP capable AMD system. */ 289 + 290 + /* 291 + * Don't taint if we are running SMP kernel on a single non-MP 292 + * approved Athlon 293 + */ 294 + WARN_ONCE(1, "WARNING: This combination of AMD" 295 + " processors is not suitable for SMP.\n"); 296 + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 221 297 #endif 298 + } 222 299 223 300 #ifdef CONFIG_NUMA 224 301 /* ··· 445 446 446 447 static void bsp_init_amd(struct cpuinfo_x86 *c) 447 448 { 449 + 450 + #ifdef CONFIG_X86_64 451 + if (c->x86 >= 0xf) { 452 + unsigned long long tseg; 453 + 454 + /* 455 + * Split up direct mapping around the TSEG SMM area. 456 + * Don't do it for gbpages because there seems very little 457 + * benefit in doing so. 458 + */ 459 + if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { 460 + unsigned long pfn = tseg >> PAGE_SHIFT; 461 + 462 + printk(KERN_DEBUG "tseg: %010llx\n", tseg); 463 + if (pfn_range_is_mapped(pfn, pfn + 1)) 464 + set_memory_4k((unsigned long)__va(tseg), 1); 465 + } 466 + } 467 + #endif 468 + 448 469 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 449 470 450 471 if (c->x86 > 0x10 || ··· 534 515 static const int amd_erratum_400[]; 535 516 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); 536 517 518 + static void init_amd_k8(struct cpuinfo_x86 *c) 519 + { 520 + u32 level; 521 + u64 value; 522 + 523 + /* On C+ stepping K8 rep microcode works well for copy/memset */ 524 + level = cpuid_eax(1); 525 + if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 526 + set_cpu_cap(c, X86_FEATURE_REP_GOOD); 527 + 528 + /* 529 + * Some BIOSes incorrectly force this feature, but only K8 revision D 530 + * (model = 0x14) and later actually support it. 531 + * (AMD Erratum #110, docId: 25759). 532 + */ 533 + if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 534 + clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 535 + if (!rdmsrl_amd_safe(0xc001100d, &value)) { 536 + value &= ~BIT_64(32); 537 + wrmsrl_amd_safe(0xc001100d, value); 538 + } 539 + } 540 + 541 + if (!c->x86_model_id[0]) 542 + strcpy(c->x86_model_id, "Hammer"); 543 + } 544 + 545 + static void init_amd_gh(struct cpuinfo_x86 *c) 546 + { 547 + #ifdef CONFIG_X86_64 548 + /* do this for boot cpu */ 549 + if (c == &boot_cpu_data) 550 + check_enable_amd_mmconf_dmi(); 551 + 552 + fam10h_check_enable_mmcfg(); 553 + #endif 554 + 555 + /* 556 + * Disable GART TLB Walk Errors on Fam10h. We do this here because this 557 + * is always needed when GART is enabled, even in a kernel which has no 558 + * MCE support built in. BIOS should disable GartTlbWlk Errors already. 559 + * If it doesn't, we do it here as suggested by the BKDG. 560 + * 561 + * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 562 + */ 563 + msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 564 + 565 + /* 566 + * On family 10h BIOS may not have properly enabled WC+ support, causing 567 + * it to be converted to CD memtype. This may result in performance 568 + * degradation for certain nested-paging guests. Prevent this conversion 569 + * by clearing bit 24 in MSR_AMD64_BU_CFG2. 570 + * 571 + * NOTE: we want to use the _safe accessors so as not to #GP kvm 572 + * guests on older kvm hosts. 573 + */ 574 + msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 575 + 576 + if (cpu_has_amd_erratum(c, amd_erratum_383)) 577 + set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 578 + } 579 + 580 + static void init_amd_bd(struct cpuinfo_x86 *c) 581 + { 582 + u64 value; 583 + 584 + /* re-enable TopologyExtensions if switched off by BIOS */ 585 + if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 586 + !cpu_has(c, X86_FEATURE_TOPOEXT)) { 587 + 588 + if (msr_set_bit(0xc0011005, 54) > 0) { 589 + rdmsrl(0xc0011005, value); 590 + if (value & BIT_64(54)) { 591 + set_cpu_cap(c, X86_FEATURE_TOPOEXT); 592 + pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 593 + } 594 + } 595 + } 596 + 597 + /* 598 + * The way access filter has a performance penalty on some workloads. 599 + * Disable it on the affected CPUs. 600 + */ 601 + if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 602 + if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { 603 + value |= 0x1E; 604 + wrmsrl_safe(0xc0011021, value); 605 + } 606 + } 607 + } 608 + 537 609 static void init_amd(struct cpuinfo_x86 *c) 538 610 { 539 611 u32 dummy; 540 - unsigned long long value; 541 612 542 613 #ifdef CONFIG_SMP 543 614 /* ··· 649 540 */ 650 541 clear_cpu_cap(c, 0*32+31); 651 542 652 - #ifdef CONFIG_X86_64 653 - /* On C+ stepping K8 rep microcode works well for copy/memset */ 654 - if (c->x86 == 0xf) { 655 - u32 level; 656 - 657 - level = cpuid_eax(1); 658 - if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 659 - set_cpu_cap(c, X86_FEATURE_REP_GOOD); 660 - 661 - /* 662 - * Some BIOSes incorrectly force this feature, but only K8 663 - * revision D (model = 0x14) and later actually support it. 664 - * (AMD Erratum #110, docId: 25759). 665 - */ 666 - if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 667 - clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 668 - if (!rdmsrl_amd_safe(0xc001100d, &value)) { 669 - value &= ~(1ULL << 32); 670 - wrmsrl_amd_safe(0xc001100d, value); 671 - } 672 - } 673 - 674 - } 675 543 if (c->x86 >= 0x10) 676 544 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 677 545 678 546 /* get apicid instead of initial apic id from cpuid */ 679 547 c->apicid = hard_smp_processor_id(); 680 - #else 681 - 682 - /* 683 - * FIXME: We should handle the K5 here. Set up the write 684 - * range and also turn on MSR 83 bits 4 and 31 (write alloc, 685 - * no bus pipeline) 686 - */ 687 - 688 - switch (c->x86) { 689 - case 4: 690 - init_amd_k5(c); 691 - break; 692 - case 5: 693 - init_amd_k6(c); 694 - break; 695 - case 6: /* An Athlon/Duron */ 696 - init_amd_k7(c); 697 - break; 698 - } 699 548 700 549 /* K6s reports MCEs but don't actually have all the MSRs */ 701 550 if (c->x86 < 6) 702 551 clear_cpu_cap(c, X86_FEATURE_MCE); 703 - #endif 552 + 553 + switch (c->x86) { 554 + case 4: init_amd_k5(c); break; 555 + case 5: init_amd_k6(c); break; 556 + case 6: init_amd_k7(c); break; 557 + case 0xf: init_amd_k8(c); break; 558 + case 0x10: init_amd_gh(c); break; 559 + case 0x15: init_amd_bd(c); break; 560 + } 704 561 705 562 /* Enable workaround for FXSAVE leak */ 706 563 if (c->x86 >= 6) 707 - set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 708 - 709 - if (!c->x86_model_id[0]) { 710 - switch (c->x86) { 711 - case 0xf: 712 - /* Should distinguish Models here, but this is only 713 - a fallback anyways. */ 714 - strcpy(c->x86_model_id, "Hammer"); 715 - break; 716 - } 717 - } 718 - 719 - /* re-enable TopologyExtensions if switched off by BIOS */ 720 - if ((c->x86 == 0x15) && 721 - (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 722 - !cpu_has(c, X86_FEATURE_TOPOEXT)) { 723 - 724 - if (msr_set_bit(0xc0011005, 54) > 0) { 725 - rdmsrl(0xc0011005, value); 726 - if (value & BIT_64(54)) { 727 - set_cpu_cap(c, X86_FEATURE_TOPOEXT); 728 - pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 729 - } 730 - } 731 - } 732 - 733 - /* 734 - * The way access filter has a performance penalty on some workloads. 735 - * Disable it on the affected CPUs. 736 - */ 737 - if ((c->x86 == 0x15) && 738 - (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 739 - 740 - if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { 741 - value |= 0x1E; 742 - wrmsrl_safe(0xc0011021, value); 743 - } 744 - } 564 + set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 745 565 746 566 cpu_detect_cache_sizes(c); 747 567 ··· 694 656 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 695 657 } 696 658 697 - #ifdef CONFIG_X86_64 698 - if (c->x86 == 0x10) { 699 - /* do this for boot cpu */ 700 - if (c == &boot_cpu_data) 701 - check_enable_amd_mmconf_dmi(); 702 - 703 - fam10h_check_enable_mmcfg(); 704 - } 705 - 706 - if (c == &boot_cpu_data && c->x86 >= 0xf) { 707 - unsigned long long tseg; 708 - 709 - /* 710 - * Split up direct mapping around the TSEG SMM area. 711 - * Don't do it for gbpages because there seems very little 712 - * benefit in doing so. 713 - */ 714 - if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { 715 - unsigned long pfn = tseg >> PAGE_SHIFT; 716 - 717 - printk(KERN_DEBUG "tseg: %010llx\n", tseg); 718 - if (pfn_range_is_mapped(pfn, pfn + 1)) 719 - set_memory_4k((unsigned long)__va(tseg), 1); 720 - } 721 - } 722 - #endif 723 - 724 659 /* 725 660 * Family 0x12 and above processors have APIC timer 726 661 * running in deep C states. 727 662 */ 728 663 if (c->x86 > 0x11) 729 664 set_cpu_cap(c, X86_FEATURE_ARAT); 730 - 731 - if (c->x86 == 0x10) { 732 - /* 733 - * Disable GART TLB Walk Errors on Fam10h. We do this here 734 - * because this is always needed when GART is enabled, even in a 735 - * kernel which has no MCE support built in. 736 - * BIOS should disable GartTlbWlk Errors already. If 737 - * it doesn't, do it here as suggested by the BKDG. 738 - * 739 - * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 740 - */ 741 - msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 742 - 743 - /* 744 - * On family 10h BIOS may not have properly enabled WC+ support, 745 - * causing it to be converted to CD memtype. This may result in 746 - * performance degradation for certain nested-paging guests. 747 - * Prevent this conversion by clearing bit 24 in 748 - * MSR_AMD64_BU_CFG2. 749 - * 750 - * NOTE: we want to use the _safe accessors so as not to #GP kvm 751 - * guests on older kvm hosts. 752 - */ 753 - msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 754 - 755 - if (cpu_has_amd_erratum(c, amd_erratum_383)) 756 - set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 757 - } 758 665 759 666 if (cpu_has_amd_erratum(c, amd_erratum_400)) 760 667 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
+9
arch/x86/kernel/cpu/common.c
··· 634 634 c->x86_capability[9] = ebx; 635 635 } 636 636 637 + /* Extended state features: level 0x0000000d */ 638 + if (c->cpuid_level >= 0x0000000d) { 639 + u32 eax, ebx, ecx, edx; 640 + 641 + cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 642 + 643 + c->x86_capability[10] = eax; 644 + } 645 + 637 646 /* AMD-defined flags: level 0x80000001 */ 638 647 xlvl = cpuid_eax(0x80000000); 639 648 c->extended_cpuid_level = xlvl;
+2 -2
arch/x86/kernel/cpu/intel.c
··· 253 253 */ 254 254 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && 255 255 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 256 - set_cpu_cap(c, X86_FEATURE_11AP); 256 + set_cpu_bug(c, X86_BUG_11AP); 257 257 258 258 259 259 #ifdef CONFIG_X86_INTEL_USERCOPY ··· 402 402 403 403 if (c->x86 == 6 && cpu_has_clflush && 404 404 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) 405 - set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); 405 + set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); 406 406 407 407 #ifdef CONFIG_X86_64 408 408 if (c->x86 == 15)
+37 -14
arch/x86/kernel/cpu/mkcapflags.sh
··· 1 1 #!/bin/sh 2 2 # 3 - # Generate the x86_cap_flags[] array from include/asm/cpufeature.h 3 + # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h 4 4 # 5 5 6 6 IN=$1 7 7 OUT=$2 8 8 9 - TABS="$(printf '\t\t\t\t\t')" 10 - trap 'rm "$OUT"' EXIT 9 + function dump_array() 10 + { 11 + ARRAY=$1 12 + SIZE=$2 13 + PFX=$3 14 + POSTFIX=$4 11 15 12 - ( 13 - echo "#ifndef _ASM_X86_CPUFEATURE_H" 14 - echo "#include <asm/cpufeature.h>" 15 - echo "#endif" 16 - echo "" 17 - echo "const char * const x86_cap_flags[NCAPINTS*32] = {" 16 + PFX_SZ=$(echo $PFX | wc -c) 17 + TABS="$(printf '\t\t\t\t\t')" 18 18 19 - # Iterate through any input lines starting with #define X86_FEATURE_ 20 - sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN | 19 + echo "const char * const $ARRAY[$SIZE] = {" 20 + 21 + # Iterate through any input lines starting with #define $PFX 22 + sed -n -e 's/\t/ /g' -e "s/^ *# *define *$PFX//p" $IN | 21 23 while read i 22 24 do 23 25 # Name is everything up to the first whitespace ··· 33 31 # Name is uppercase, VALUE is all lowercase 34 32 VALUE="$(echo "$VALUE" | tr A-Z a-z)" 35 33 36 - TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 )) 37 - printf "\t[%s]%.*s = %s,\n" \ 38 - "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE" 34 + if [ -n "$POSTFIX" ]; then 35 + T=$(( $PFX_SZ + $(echo $POSTFIX | wc -c) + 2 )) 36 + TABS="$(printf '\t\t\t\t\t\t')" 37 + TABCOUNT=$(( ( 6*8 - ($T + 1) - $(echo "$NAME" | wc -c) ) / 8 )) 38 + printf "\t[%s - %s]%.*s = %s,\n" "$PFX$NAME" "$POSTFIX" "$TABCOUNT" "$TABS" "$VALUE" 39 + else 40 + TABCOUNT=$(( ( 5*8 - ($PFX_SZ + 1) - $(echo "$NAME" | wc -c) ) / 8 )) 41 + printf "\t[%s]%.*s = %s,\n" "$PFX$NAME" "$TABCOUNT" "$TABS" "$VALUE" 42 + fi 39 43 done 40 44 echo "};" 45 + } 46 + 47 + trap 'rm "$OUT"' EXIT 48 + 49 + ( 50 + echo "#ifndef _ASM_X86_CPUFEATURE_H" 51 + echo "#include <asm/cpufeature.h>" 52 + echo "#endif" 53 + echo "" 54 + 55 + dump_array "x86_cap_flags" "NCAPINTS*32" "X86_FEATURE_" "" 56 + echo "" 57 + 58 + dump_array "x86_bug_flags" "NBUGINTS*32" "X86_BUG_" "NCAPINTS*32" 59 + 41 60 ) > $OUT 42 61 43 62 trap - EXIT
+8
arch/x86/kernel/cpu/proc.c
··· 97 97 if (cpu_has(c, i) && x86_cap_flags[i] != NULL) 98 98 seq_printf(m, " %s", x86_cap_flags[i]); 99 99 100 + seq_printf(m, "\nbugs\t\t:"); 101 + for (i = 0; i < 32*NBUGINTS; i++) { 102 + unsigned int bug_bit = 32*NCAPINTS + i; 103 + 104 + if (cpu_has_bug(c, bug_bit) && x86_bug_flags[i]) 105 + seq_printf(m, " %s", x86_bug_flags[i]); 106 + } 107 + 100 108 seq_printf(m, "\nbogomips\t: %lu.%02lu\n", 101 109 c->loops_per_jiffy/(500000/HZ), 102 110 (c->loops_per_jiffy/(5000/HZ)) % 100);
-1
arch/x86/kernel/cpu/scattered.c
··· 38 38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, 39 39 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 40 40 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 41 - { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, 42 41 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 43 42 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, 44 43 { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },