Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/cpufeature: Remove unused and seldomly used cpu_has_xx macros

Those are stupid and code should use static_cpu_has_safe() or
boot_cpu_has() instead. Kill the least used and unused ones.

The remaining ones need more careful inspection before a conversion can
happen. On the TODO.

Signed-off-by: Borislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/1449481182-27541-4-git-send-email-bp@alien8.de
Cc: David Sterba <dsterba@suse.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <jbacik@fb.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Borislav Petkov and committed by
Thomas Gleixner
362f924b 39c06df4

+48 -68
+1 -1
arch/x86/crypto/chacha20_glue.c
··· 125 125 126 126 static int __init chacha20_simd_mod_init(void) 127 127 { 128 - if (!cpu_has_ssse3) 128 + if (!boot_cpu_has(X86_FEATURE_SSSE3)) 129 129 return -ENODEV; 130 130 131 131 #ifdef CONFIG_AS_AVX2
+1 -1
arch/x86/crypto/crc32c-intel_glue.c
··· 257 257 if (!x86_match_cpu(crc32c_cpu_id)) 258 258 return -ENODEV; 259 259 #ifdef CONFIG_X86_64 260 - if (cpu_has_pclmulqdq) { 260 + if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { 261 261 alg.update = crc32c_pcl_intel_update; 262 262 alg.finup = crc32c_pcl_intel_finup; 263 263 alg.digest = crc32c_pcl_intel_digest;
+1 -1
arch/x86/include/asm/cmpxchg_32.h
··· 109 109 110 110 #endif 111 111 112 - #define system_has_cmpxchg_double() cpu_has_cx8 112 + #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8) 113 113 114 114 #endif /* _ASM_X86_CMPXCHG_32_H */
+1 -1
arch/x86/include/asm/cmpxchg_64.h
··· 18 18 cmpxchg_local((ptr), (o), (n)); \ 19 19 }) 20 20 21 - #define system_has_cmpxchg_double() cpu_has_cx16 21 + #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16) 22 22 23 23 #endif /* _ASM_X86_CMPXCHG_64_H */
+4 -33
arch/x86/include/asm/cpufeature.h
··· 385 385 } while (0) 386 386 387 387 #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 388 - #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) 389 388 #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) 390 389 #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) 391 390 #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) 392 391 #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) 393 - #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) 394 - #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) 395 - #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) 396 392 #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) 397 393 #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 398 394 #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 399 - #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 400 - #define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) 401 395 #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 402 396 #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 403 397 #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 404 - #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 405 - #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 406 - #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) 407 - #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) 408 - #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) 409 - #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) 410 - #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) 411 - #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) 412 - #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) 413 - #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) 414 - #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) 415 - #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) 416 - #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) 417 - #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) 418 398 #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) 419 - #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) 420 399 #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 421 400 #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 422 401 #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 423 - #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) 424 - #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 425 402 #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 426 403 #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 427 - #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) 428 404 #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 429 405 #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 430 406 #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 431 - #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 432 - #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 433 - #define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 434 - #define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) 435 - #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 436 - #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 437 - #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 438 - #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) 439 - #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT) 407 + /* 408 + * Do not add any more of those clumsy macros - use static_cpu_has_safe() for 409 + * fast paths and boot_cpu_has() otherwise! 410 + */ 440 411 441 412 #if __GNUC__ >= 4 442 413 extern void warn_pre_alternatives(void);
+1 -1
arch/x86/include/asm/xor_32.h
··· 553 553 if (cpu_has_xmm) { \ 554 554 xor_speed(&xor_block_pIII_sse); \ 555 555 xor_speed(&xor_block_sse_pf64); \ 556 - } else if (cpu_has_mmx) { \ 556 + } else if (boot_cpu_has(X86_FEATURE_MMX)) { \ 557 557 xor_speed(&xor_block_pII_mmx); \ 558 558 xor_speed(&xor_block_p5_mmx); \ 559 559 } else { \
+2 -2
arch/x86/kernel/cpu/amd.c
··· 304 304 int cpu = smp_processor_id(); 305 305 306 306 /* get information required for multi-node processors */ 307 - if (cpu_has_topoext) { 307 + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 308 308 u32 eax, ebx, ecx, edx; 309 309 310 310 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); ··· 922 922 923 923 void set_dr_addr_mask(unsigned long mask, int dr) 924 924 { 925 - if (!cpu_has_bpext) 925 + if (!boot_cpu_has(X86_FEATURE_BPEXT)) 926 926 return; 927 927 928 928 switch (dr) {
+3 -1
arch/x86/kernel/cpu/common.c
··· 1445 1445 1446 1446 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1447 1447 1448 - if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) 1448 + if (cpu_feature_enabled(X86_FEATURE_VME) || 1449 + cpu_has_tsc || 1450 + boot_cpu_has(X86_FEATURE_DE)) 1449 1451 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1450 1452 1451 1453 load_current_idt();
+2 -1
arch/x86/kernel/cpu/intel.c
··· 445 445 446 446 if (cpu_has_xmm2) 447 447 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 448 - if (cpu_has_ds) { 448 + 449 + if (boot_cpu_has(X86_FEATURE_DS)) { 449 450 unsigned int l1; 450 451 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 451 452 if (!(l1 & (1<<11)))
+3 -3
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 591 591 unsigned edx; 592 592 593 593 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 594 - if (cpu_has_topoext) 594 + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) 595 595 cpuid_count(0x8000001d, index, &eax.full, 596 596 &ebx.full, &ecx.full, &edx); 597 597 else ··· 637 637 void init_amd_cacheinfo(struct cpuinfo_x86 *c) 638 638 { 639 639 640 - if (cpu_has_topoext) { 640 + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 641 641 num_cache_leaves = find_num_cache_leaves(c); 642 642 } else if (c->extended_cpuid_level >= 0x80000006) { 643 643 if (cpuid_edx(0x80000006) & 0xf000) ··· 809 809 struct cacheinfo *this_leaf; 810 810 int i, sibling; 811 811 812 - if (cpu_has_topoext) { 812 + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 813 813 unsigned int apicid, nshared, first, last; 814 814 815 815 this_leaf = this_cpu_ci->info_list + index;
+1 -1
arch/x86/kernel/cpu/mtrr/generic.c
··· 349 349 350 350 void mtrr_save_fixed_ranges(void *info) 351 351 { 352 - if (cpu_has_mtrr) 352 + if (boot_cpu_has(X86_FEATURE_MTRR)) 353 353 get_fixed_ranges(mtrr_state.fixed_ranges); 354 354 } 355 355
+1 -1
arch/x86/kernel/cpu/mtrr/main.c
··· 682 682 683 683 phys_addr = 32; 684 684 685 - if (cpu_has_mtrr) { 685 + if (boot_cpu_has(X86_FEATURE_MTRR)) { 686 686 mtrr_if = &generic_mtrr_ops; 687 687 size_or_mask = SIZE_OR_MASK_BITS(36); 688 688 size_and_mask = 0x00f00000;
+2 -2
arch/x86/kernel/cpu/perf_event_amd.c
··· 160 160 if (offset) 161 161 return offset; 162 162 163 - if (!cpu_has_perfctr_core) 163 + if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) 164 164 offset = index; 165 165 else 166 166 offset = index << 1; ··· 652 652 653 653 static int __init amd_core_pmu_init(void) 654 654 { 655 - if (!cpu_has_perfctr_core) 655 + if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) 656 656 return 0; 657 657 658 658 switch (boot_cpu_data.x86) {
+6 -5
arch/x86/kernel/cpu/perf_event_amd_uncore.c
··· 523 523 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 524 524 goto fail_nodev; 525 525 526 - if (!cpu_has_topoext) 526 + if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) 527 527 goto fail_nodev; 528 528 529 - if (cpu_has_perfctr_nb) { 529 + if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { 530 530 amd_uncore_nb = alloc_percpu(struct amd_uncore *); 531 531 if (!amd_uncore_nb) { 532 532 ret = -ENOMEM; ··· 540 540 ret = 0; 541 541 } 542 542 543 - if (cpu_has_perfctr_l2) { 543 + if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) { 544 544 amd_uncore_l2 = alloc_percpu(struct amd_uncore *); 545 545 if (!amd_uncore_l2) { 546 546 ret = -ENOMEM; ··· 583 583 584 584 /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ 585 585 amd_uncore_nb = amd_uncore_l2 = NULL; 586 - if (cpu_has_perfctr_l2) 586 + 587 + if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) 587 588 perf_pmu_unregister(&amd_l2_pmu); 588 589 fail_l2: 589 - if (cpu_has_perfctr_nb) 590 + if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) 590 591 perf_pmu_unregister(&amd_nb_pmu); 591 592 if (amd_uncore_l2) 592 593 free_percpu(amd_uncore_l2);
+2 -2
arch/x86/kernel/fpu/init.c
··· 12 12 */ 13 13 static void fpu__init_cpu_ctx_switch(void) 14 14 { 15 - if (!cpu_has_eager_fpu) 15 + if (!boot_cpu_has(X86_FEATURE_EAGER_FPU)) 16 16 stts(); 17 17 else 18 18 clts(); ··· 287 287 current_thread_info()->status = 0; 288 288 289 289 /* Auto enable eagerfpu for xsaveopt */ 290 - if (cpu_has_xsaveopt && eagerfpu != DISABLE) 290 + if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE) 291 291 eagerfpu = ENABLE; 292 292 293 293 if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+4 -2
arch/x86/kernel/hw_breakpoint.c
··· 300 300 return -EINVAL; 301 301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) 302 302 return -EINVAL; 303 + 304 + if (!boot_cpu_has(X86_FEATURE_BPEXT)) 305 + return -EOPNOTSUPP; 306 + 303 307 /* 304 308 * It's impossible to use a range breakpoint to fake out 305 309 * user vs kernel detection because bp_len - 1 can't ··· 311 307 * breakpoints, then we'll have to check for kprobe-blacklisted 312 308 * addresses anywhere in the range. 313 309 */ 314 - if (!cpu_has_bpext) 315 - return -EOPNOTSUPP; 316 310 info->mask = bp->attr.bp_len - 1; 317 311 info->len = X86_BREAKPOINT_LEN_1; 318 312 }
+1 -1
arch/x86/kernel/smpboot.c
··· 304 304 305 305 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 306 306 { 307 - if (cpu_has_topoext) { 307 + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 308 308 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 309 309 310 310 if (c->phys_proc_id == o->phys_proc_id &&
+3 -1
arch/x86/kernel/vm86_32.c
··· 357 357 tss = &per_cpu(cpu_tss, get_cpu()); 358 358 /* make room for real-mode segments */ 359 359 tsk->thread.sp0 += 16; 360 - if (cpu_has_sep) 360 + 361 + if (static_cpu_has_safe(X86_FEATURE_SEP)) 361 362 tsk->thread.sysenter_cs = 0; 363 + 362 364 load_sp0(tss, &tsk->thread); 363 365 put_cpu(); 364 366
+2 -2
arch/x86/mm/setup_nx.c
··· 31 31 32 32 void x86_configure_nx(void) 33 33 { 34 - if (cpu_has_nx && !disable_nx) 34 + if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx) 35 35 __supported_pte_mask |= _PAGE_NX; 36 36 else 37 37 __supported_pte_mask &= ~_PAGE_NX; ··· 39 39 40 40 void __init x86_report_nx(void) 41 41 { 42 - if (!cpu_has_nx) { 42 + if (!boot_cpu_has(X86_FEATURE_NX)) { 43 43 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " 44 44 "missing in CPU!\n"); 45 45 } else {
+3 -2
drivers/char/hw_random/via-rng.c
··· 140 140 * RNG configuration like it used to be the case in this 141 141 * register */ 142 142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { 143 - if (!cpu_has_xstore_enabled) { 143 + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { 144 144 pr_err(PFX "can't enable hardware RNG " 145 145 "if XSTORE is not enabled\n"); 146 146 return -ENODEV; ··· 200 200 { 201 201 int err; 202 202 203 - if (!cpu_has_xstore) 203 + if (!boot_cpu_has(X86_FEATURE_XSTORE)) 204 204 return -ENODEV; 205 + 205 206 pr_info("VIA RNG detected\n"); 206 207 err = hwrng_register(&via_rng); 207 208 if (err) {
+1 -1
drivers/crypto/padlock-aes.c
··· 515 515 if (!x86_match_cpu(padlock_cpu_id)) 516 516 return -ENODEV; 517 517 518 - if (!cpu_has_xcrypt_enabled) { 518 + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { 519 519 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 520 520 return -ENODEV; 521 521 }
+1 -1
drivers/crypto/padlock-sha.c
··· 540 540 struct shash_alg *sha1; 541 541 struct shash_alg *sha256; 542 542 543 - if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) 543 + if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) 544 544 return -ENODEV; 545 545 546 546 /* Register the newly added algorithm module if on *
+1 -1
drivers/iommu/intel_irq_remapping.c
··· 753 753 * should have X86_FEATURE_CX16 support, this has been confirmed 754 754 * with Intel hardware guys. 755 755 */ 756 - if ( cpu_has_cx16 ) 756 + if (boot_cpu_has(X86_FEATURE_CX16)) 757 757 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; 758 758 759 759 for_each_iommu(iommu, drhd)
+1 -1
fs/btrfs/disk-io.c
··· 923 923 if (bio_flags & EXTENT_BIO_TREE_LOG) 924 924 return 0; 925 925 #ifdef CONFIG_X86 926 - if (cpu_has_xmm4_2) 926 + if (static_cpu_has_safe(X86_FEATURE_XMM4_2)) 927 927 return 0; 928 928 #endif 929 929 return 1;