Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: cpu_init(): fix memory leak when using CPU hotplug
x86: pda_init(): fix memory leak when using CPU hotplug
x86, xen: Use native_pte_flags instead of native_pte_val for .pte_flags
x86: move mtrr cpu cap setting early in early_init_xxxx
x86: delay early cpu initialization until cpuid is done
x86: use X86_FEATURE_NOPL in alternatives
x86: add NOPL as a synthetic CPU feature bit
x86: boot: stub out unimplemented CPU feature words

+167 -61
+4 -4
arch/x86/boot/cpucheck.c
··· 38 { 39 REQUIRED_MASK0, 40 REQUIRED_MASK1, 41 - REQUIRED_MASK2, 42 - REQUIRED_MASK3, 43 REQUIRED_MASK4, 44 - REQUIRED_MASK5, 45 REQUIRED_MASK6, 46 - REQUIRED_MASK7, 47 }; 48 49 #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
··· 38 { 39 REQUIRED_MASK0, 40 REQUIRED_MASK1, 41 + 0, /* REQUIRED_MASK2 not implemented in this file */ 42 + 0, /* REQUIRED_MASK3 not implemented in this file */ 43 REQUIRED_MASK4, 44 + 0, /* REQUIRED_MASK5 not implemented in this file */ 45 REQUIRED_MASK6, 46 + 0, /* REQUIRED_MASK7 not implemented in this file */ 47 }; 48 49 #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
+13 -23
arch/x86/kernel/alternative.c
··· 145 extern char __vsyscall_0; 146 const unsigned char *const *find_nop_table(void) 147 { 148 - return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || 149 - boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; 150 } 151 152 #else /* CONFIG_X86_64 */ 153 154 - static const struct nop { 155 - int cpuid; 156 - const unsigned char *const *noptable; 157 - } noptypes[] = { 158 - { X86_FEATURE_K8, k8_nops }, 159 - { X86_FEATURE_K7, k7_nops }, 160 - { X86_FEATURE_P4, p6_nops }, 161 - { X86_FEATURE_P3, p6_nops }, 162 - { -1, NULL } 163 - }; 164 - 165 const unsigned char *const *find_nop_table(void) 166 { 167 - const unsigned char *const *noptable = intel_nops; 168 - int i; 169 - 170 - for (i = 0; noptypes[i].cpuid >= 0; i++) { 171 - if (boot_cpu_has(noptypes[i].cpuid)) { 172 - noptable = noptypes[i].noptable; 173 - break; 174 - } 175 - } 176 - return noptable; 177 } 178 179 #endif /* CONFIG_X86_64 */
··· 145 extern char __vsyscall_0; 146 const unsigned char *const *find_nop_table(void) 147 { 148 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 149 + boot_cpu_has(X86_FEATURE_NOPL)) 150 + return p6_nops; 151 + else 152 + return k8_nops; 153 } 154 155 #else /* CONFIG_X86_64 */ 156 157 const unsigned char *const *find_nop_table(void) 158 { 159 + if (boot_cpu_has(X86_FEATURE_K8)) 160 + return k8_nops; 161 + else if (boot_cpu_has(X86_FEATURE_K7)) 162 + return k7_nops; 163 + else if (boot_cpu_has(X86_FEATURE_NOPL)) 164 + return p6_nops; 165 + else 166 + return intel_nops; 167 } 168 169 #endif /* CONFIG_X86_64 */
+5 -4
arch/x86/kernel/cpu/amd.c
··· 31 if (c->x86_power & (1<<8)) 32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 33 } 34 } 35 36 static void __cpuinit init_amd(struct cpuinfo_x86 *c) ··· 171 mbytes); 172 } 173 174 - /* Set MTRR capability flag if appropriate */ 175 - if (c->x86_model == 13 || c->x86_model == 9 || 176 - (c->x86_model == 8 && c->x86_mask >= 8)) 177 - set_cpu_cap(c, X86_FEATURE_K6_MTRR); 178 break; 179 } 180
··· 31 if (c->x86_power & (1<<8)) 32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 33 } 34 + 35 + /* Set MTRR capability flag if appropriate */ 36 + if (c->x86_model == 13 || c->x86_model == 9 || 37 + (c->x86_model == 8 && c->x86_mask >= 8)) 38 + set_cpu_cap(c, X86_FEATURE_K6_MTRR); 39 } 40 41 static void __cpuinit init_amd(struct cpuinfo_x86 *c) ··· 166 mbytes); 167 } 168 169 break; 170 } 171
+11
arch/x86/kernel/cpu/centaur.c
··· 314 EAMD3D = 1<<20, 315 }; 316 317 static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 318 { 319 ··· 472 static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 473 .c_vendor = "Centaur", 474 .c_ident = { "CentaurHauls" }, 475 .c_init = init_centaur, 476 .c_size_cache = centaur_size_cache, 477 };
··· 314 EAMD3D = 1<<20, 315 }; 316 317 + static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 318 + { 319 + switch (c->x86) { 320 + case 5: 321 + /* Emulate MTRRs using Centaur's MCR. */ 322 + set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); 323 + break; 324 + } 325 + } 326 + 327 static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 328 { 329 ··· 462 static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 463 .c_vendor = "Centaur", 464 .c_ident = { "CentaurHauls" }, 465 + .c_early_init = early_init_centaur, 466 .c_init = init_centaur, 467 .c_size_cache = centaur_size_cache, 468 };
+32 -2
arch/x86/kernel/cpu/common.c
··· 13 #include <asm/mtrr.h> 14 #include <asm/mce.h> 15 #include <asm/pat.h> 16 #ifdef CONFIG_X86_LOCAL_APIC 17 #include <asm/mpspec.h> 18 #include <asm/apic.h> ··· 335 336 get_cpu_vendor(c, 1); 337 338 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 339 cpu_devs[c->x86_vendor]->c_early_init) 340 cpu_devs[c->x86_vendor]->c_early_init(c); 341 342 - early_get_cap(c); 343 } 344 345 static void __cpuinit generic_identify(struct cpuinfo_x86 *c) ··· 425 } 426 427 init_scattered_cpuid_features(c); 428 } 429 - 430 } 431 432 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
··· 13 #include <asm/mtrr.h> 14 #include <asm/mce.h> 15 #include <asm/pat.h> 16 + #include <asm/asm.h> 17 #ifdef CONFIG_X86_LOCAL_APIC 18 #include <asm/mpspec.h> 19 #include <asm/apic.h> ··· 334 335 get_cpu_vendor(c, 1); 336 337 + early_get_cap(c); 338 + 339 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 340 cpu_devs[c->x86_vendor]->c_early_init) 341 cpu_devs[c->x86_vendor]->c_early_init(c); 342 + } 343 344 + /* 345 + * The NOPL instruction is supposed to exist on all CPUs with 346 + * family >= 6, unfortunately, that's not true in practice because 347 + * of early VIA chips and (more importantly) broken virtualizers that 348 + * are not easy to detect. Hence, probe for it based on first 349 + * principles. 350 + */ 351 + static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 352 + { 353 + const u32 nopl_signature = 0x888c53b1; /* Random number */ 354 + u32 has_nopl = nopl_signature; 355 + 356 + clear_cpu_cap(c, X86_FEATURE_NOPL); 357 + if (c->x86 >= 6) { 358 + asm volatile("\n" 359 + "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ 360 + "2:\n" 361 + " .section .fixup,\"ax\"\n" 362 + "3: xor %0,%0\n" 363 + " jmp 2b\n" 364 + " .previous\n" 365 + _ASM_EXTABLE(1b,3b) 366 + : "+a" (has_nopl)); 367 + 368 + if (has_nopl == nopl_signature) 369 + set_cpu_cap(c, X86_FEATURE_NOPL); 370 + } 371 } 372 373 static void __cpuinit generic_identify(struct cpuinfo_x86 *c) ··· 395 } 396 397 init_scattered_cpuid_features(c); 398 + detect_nopl(c); 399 } 400 } 401 402 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+58 -16
arch/x86/kernel/cpu/common_64.c
··· 18 #include <asm/mtrr.h> 19 #include <asm/mce.h> 20 #include <asm/pat.h> 21 #include <asm/numa.h> 22 #ifdef CONFIG_X86_LOCAL_APIC 23 #include <asm/mpspec.h> ··· 216 } 217 } 218 219 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 220 221 void __init early_cpu_init(void) ··· 346 c->x86_virt_bits = (eax >> 8) & 0xff; 347 c->x86_phys_bits = eax & 0xff; 348 } 349 350 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 351 cpu_devs[c->x86_vendor]->c_early_init) ··· 529 /* others are initialized in smpboot.c */ 530 pda->pcurrent = &init_task; 531 pda->irqstackptr = boot_cpu_stack; 532 } else { 533 - pda->irqstackptr = (char *) 534 - __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 535 - if (!pda->irqstackptr) 536 - panic("cannot allocate irqstack for cpu %d", cpu); 537 538 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 539 pda->nodenumber = cpu_to_node(cpu); 540 } 541 - 542 - pda->irqstackptr += IRQSTACKSIZE-64; 543 } 544 545 char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + ··· 640 /* 641 * set up and load the per-CPU TSS 642 */ 643 - for (v = 0; v < N_EXCEPTION_STACKS; v++) { 644 static const unsigned int order[N_EXCEPTION_STACKS] = { 645 - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 646 - [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 647 }; 648 - if (cpu) { 649 - estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 650 - if (!estacks) 651 - panic("Cannot allocate exception stack %ld %d\n", 652 - v, cpu); 653 } 654 - estacks += PAGE_SIZE << order[v]; 655 - orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; 656 } 657 658 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
··· 18 #include <asm/mtrr.h> 19 #include <asm/mce.h> 20 #include <asm/pat.h> 21 + #include <asm/asm.h> 22 #include <asm/numa.h> 23 #ifdef CONFIG_X86_LOCAL_APIC 24 #include <asm/mpspec.h> ··· 215 } 216 } 217 218 + /* 219 + * The NOPL instruction is supposed to exist on all CPUs with 220 + * family >= 6, unfortunately, that's not true in practice because 221 + * of early VIA chips and (more importantly) broken virtualizers that 222 + * are not easy to detect. Hence, probe for it based on first 223 + * principles. 224 + * 225 + * Note: no 64-bit chip is known to lack these, but put the code here 226 + * for consistency with 32 bits, and to make it utterly trivial to 227 + * diagnose the problem should it ever surface. 228 + */ 229 + static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 230 + { 231 + const u32 nopl_signature = 0x888c53b1; /* Random number */ 232 + u32 has_nopl = nopl_signature; 233 + 234 + clear_cpu_cap(c, X86_FEATURE_NOPL); 235 + if (c->x86 >= 6) { 236 + asm volatile("\n" 237 + "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ 238 + "2:\n" 239 + " .section .fixup,\"ax\"\n" 240 + "3: xor %0,%0\n" 241 + " jmp 2b\n" 242 + " .previous\n" 243 + _ASM_EXTABLE(1b,3b) 244 + : "+a" (has_nopl)); 245 + 246 + if (has_nopl == nopl_signature) 247 + set_cpu_cap(c, X86_FEATURE_NOPL); 248 + } 249 + } 250 + 251 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 252 253 void __init early_cpu_init(void) ··· 312 c->x86_virt_bits = (eax >> 8) & 0xff; 313 c->x86_phys_bits = eax & 0xff; 314 } 315 + 316 + detect_nopl(c); 317 318 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 319 cpu_devs[c->x86_vendor]->c_early_init) ··· 493 /* others are initialized in smpboot.c */ 494 pda->pcurrent = &init_task; 495 pda->irqstackptr = boot_cpu_stack; 496 + pda->irqstackptr += IRQSTACKSIZE - 64; 497 } else { 498 + if (!pda->irqstackptr) { 499 + pda->irqstackptr = (char *) 500 + __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 501 + if (!pda->irqstackptr) 502 + panic("cannot allocate irqstack for cpu %d", 503 + cpu); 504 + pda->irqstackptr += IRQSTACKSIZE - 64; 505 + } 506 507 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 508 pda->nodenumber = cpu_to_node(cpu); 509 } 510 } 511 512 char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + ··· 601 /* 602 * set up and load the per-CPU TSS 603 */ 604 + if (!orig_ist->ist[0]) { 605 static const unsigned int order[N_EXCEPTION_STACKS] = { 606 + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 607 + [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 608 }; 609 + for (v = 0; v < N_EXCEPTION_STACKS; v++) { 610 + if (cpu) { 611 + estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 612 + if (!estacks) 613 + panic("Cannot allocate exception " 614 + "stack %ld %d\n", v, cpu); 615 + } 616 + estacks += PAGE_SIZE << order[v]; 617 + orig_ist->ist[v] = t->x86_tss.ist[v] = 618 + (unsigned long)estacks; 619 } 620 } 621 622 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+28 -4
arch/x86/kernel/cpu/cyrix.c
··· 15 /* 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 17 */ 18 - static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 19 { 20 unsigned char ccr2, ccr3; 21 - unsigned long flags; 22 23 /* we test for DEVID by checking whether CCR3 is writable */ 24 - local_irq_save(flags); 25 ccr3 = getCx86(CX86_CCR3); 26 setCx86(CX86_CCR3, ccr3 ^ 0x80); 27 getCx86(0xc0); /* dummy to change bus */ ··· 42 *dir0 = getCx86(CX86_DIR0); 43 *dir1 = getCx86(CX86_DIR1); 44 } 45 - local_irq_restore(flags); 46 } 47 48 /* 49 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in 50 * order to identify the Cyrix CPU model after we're out of setup.c ··· 166 local_irq_restore(flags); 167 } 168 169 170 static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 171 { ··· 439 static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 440 .c_vendor = "Cyrix", 441 .c_ident = { "CyrixInstead" }, 442 .c_init = init_cyrix, 443 .c_identify = cyrix_identify, 444 };
··· 15 /* 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 17 */ 18 + static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 19 { 20 unsigned char ccr2, ccr3; 21 22 /* we test for DEVID by checking whether CCR3 is writable */ 23 ccr3 = getCx86(CX86_CCR3); 24 setCx86(CX86_CCR3, ccr3 ^ 0x80); 25 getCx86(0xc0); /* dummy to change bus */ ··· 44 *dir0 = getCx86(CX86_DIR0); 45 *dir1 = getCx86(CX86_DIR1); 46 } 47 } 48 49 + static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 50 + { 51 + unsigned long flags; 52 + 53 + local_irq_save(flags); 54 + __do_cyrix_devid(dir0, dir1); 55 + local_irq_restore(flags); 56 + } 57 /* 58 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in 59 * order to identify the Cyrix CPU model after we're out of setup.c ··· 161 local_irq_restore(flags); 162 } 163 164 + static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) 165 + { 166 + unsigned char dir0, dir0_msn, dir1 = 0; 167 + 168 + __do_cyrix_devid(&dir0, &dir1); 169 + dir0_msn = dir0 >> 4; /* identifies CPU "family" */ 170 + 171 + switch (dir0_msn) { 172 + case 3: /* 6x86/6x86L */ 173 + /* Emulate MTRRs using Cyrix's ARRs. */ 174 + set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 175 + break; 176 + case 5: /* 6x86MX/M II */ 177 + /* Emulate MTRRs using Cyrix's ARRs. */ 178 + set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 179 + break; 180 + } 181 + } 182 183 static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 184 { ··· 416 static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 417 .c_vendor = "Cyrix", 418 .c_ident = { "CyrixInstead" }, 419 + .c_early_init = early_init_cyrix, 420 .c_init = init_cyrix, 421 .c_identify = cyrix_identify, 422 };
+2 -1
arch/x86/kernel/cpu/feature_names.c
··· 39 NULL, NULL, NULL, NULL, 40 "constant_tsc", "up", NULL, "arch_perfmon", 41 "pebs", "bts", NULL, NULL, 42 - "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 45 /* Intel-defined (#2) */
··· 39 NULL, NULL, NULL, NULL, 40 "constant_tsc", "up", NULL, "arch_perfmon", 41 "pebs", "bts", NULL, NULL, 42 + "rep_good", NULL, NULL, NULL, 43 + "nopl", NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 45 46 /* Intel-defined (#2) */
+1 -1
arch/x86/xen/enlighten.c
··· 1324 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 1325 1326 .pte_val = xen_pte_val, 1327 - .pte_flags = native_pte_val, 1328 .pgd_val = xen_pgd_val, 1329 1330 .make_pte = xen_make_pte,
··· 1324 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 1325 1326 .pte_val = xen_pte_val, 1327 + .pte_flags = native_pte_flags, 1328 .pgd_val = xen_pgd_val, 1329 1330 .make_pte = xen_make_pte,
+6 -5
include/asm-x86/cpufeature.h
··· 72 #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 73 #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 74 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 75 - #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 76 - #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 77 - #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ 78 - #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ 79 #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 80 #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 81 #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 82 - #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ 83 84 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 85 #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
··· 72 #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 73 #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 74 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 75 + #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 76 + #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 77 + #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ 78 + #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ 79 #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 80 #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 81 #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 82 + #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ 83 + #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 84 85 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 86 #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
+7 -1
include/asm-x86/required-features.h
··· 41 # define NEED_3DNOW 0 42 #endif 43 44 #ifdef CONFIG_X86_64 45 #define NEED_PSE 0 46 #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) ··· 73 #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) 74 75 #define REQUIRED_MASK2 0 76 - #define REQUIRED_MASK3 0 77 #define REQUIRED_MASK4 0 78 #define REQUIRED_MASK5 0 79 #define REQUIRED_MASK6 0
··· 41 # define NEED_3DNOW 0 42 #endif 43 44 + #if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) 45 + # define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) 46 + #else 47 + # define NEED_NOPL 0 48 + #endif 49 + 50 #ifdef CONFIG_X86_64 51 #define NEED_PSE 0 52 #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) ··· 67 #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) 68 69 #define REQUIRED_MASK2 0 70 + #define REQUIRED_MASK3 (NEED_NOPL) 71 #define REQUIRED_MASK4 0 72 #define REQUIRED_MASK5 0 73 #define REQUIRED_MASK6 0