Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_cpu_for_v6.11_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu model updates from Borislav Petkov:

- Flip the logic to add feature names to /proc/cpuinfo to having to
explicitly specify the flag if there's a valid reason to show it in
/proc/cpuinfo

- Switch a bunch of Intel x86 model checking code to the new CPU model
defines

- Fixes and cleanups

* tag 'x86_cpu_for_v6.11_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpu/intel: Drop stray FAM6 check with new Intel CPU model defines
x86/cpufeatures: Flip the /proc/cpuinfo appearance logic
x86/CPU/AMD: Always inline amd_clear_divider()
x86/mce/inject: Add missing MODULE_DESCRIPTION() line
perf/x86/rapl: Switch to new Intel CPU model defines
x86/boot: Switch to new Intel CPU model defines
x86/cpu: Switch to new Intel CPU model defines
perf/x86/intel: Switch to new Intel CPU model defines
x86/virt/tdx: Switch to new Intel CPU model defines
x86/PCI: Switch to new Intel CPU model defines
x86/cpu/intel: Switch to new Intel CPU model defines
x86/platform/intel-mid: Switch to new Intel CPU model defines
x86/pconfig: Remove unused MKTME pconfig code
x86/cpu: Remove useless work in detect_tme_early()

+701 -905
+1 -1
arch/x86/boot/cpucheck.c
··· 203 203 */ 204 204 if (!is_intel() || 205 205 cpu.family != 6 || 206 - cpu.model != INTEL_FAM6_XEON_PHI_KNL) 206 + cpu.model != 0x57 /*INTEL_XEON_PHI_KNL*/) 207 207 return 0; 208 208 209 209 /*
+106 -106
arch/x86/events/intel/core.c
··· 4698 4698 static inline bool intel_pmu_broken_perf_cap(void) 4699 4699 { 4700 4700 /* The Perf Metric (Bit 15) is always cleared */ 4701 - if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) || 4702 - (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L)) 4701 + if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE || 4702 + boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L) 4703 4703 return true; 4704 4704 4705 4705 return false; ··· 5187 5187 } 5188 5188 5189 5189 static const struct x86_cpu_desc isolation_ucodes[] = { 5190 - INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), 5191 - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), 5192 - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015), 5193 - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), 5194 - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), 5195 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), 5196 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), 5197 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010), 5198 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), 5199 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), 5200 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), 5201 - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), 5202 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), 5203 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), 5204 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), 5205 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), 5206 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), 5207 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), 5208 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), 5209 - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), 5210 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), 5211 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e), 5212 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e), 5213 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e), 5214 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e), 5215 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), 5216 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), 5217 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), 5218 - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e), 5190 + INTEL_CPU_DESC(INTEL_HASWELL, 3, 0x0000001f), 5191 + INTEL_CPU_DESC(INTEL_HASWELL_L, 1, 0x0000001e), 5192 + INTEL_CPU_DESC(INTEL_HASWELL_G, 1, 0x00000015), 5193 + INTEL_CPU_DESC(INTEL_HASWELL_X, 2, 0x00000037), 5194 + INTEL_CPU_DESC(INTEL_HASWELL_X, 4, 0x0000000a), 5195 + INTEL_CPU_DESC(INTEL_BROADWELL, 4, 0x00000023), 5196 + INTEL_CPU_DESC(INTEL_BROADWELL_G, 1, 0x00000014), 5197 + INTEL_CPU_DESC(INTEL_BROADWELL_D, 2, 0x00000010), 5198 + INTEL_CPU_DESC(INTEL_BROADWELL_D, 3, 0x07000009), 5199 + INTEL_CPU_DESC(INTEL_BROADWELL_D, 4, 0x0f000009), 5200 + INTEL_CPU_DESC(INTEL_BROADWELL_D, 5, 0x0e000002), 5201 + INTEL_CPU_DESC(INTEL_BROADWELL_X, 1, 0x0b000014), 5202 + INTEL_CPU_DESC(INTEL_SKYLAKE_X, 3, 0x00000021), 5203 + INTEL_CPU_DESC(INTEL_SKYLAKE_X, 4, 0x00000000), 5204 + INTEL_CPU_DESC(INTEL_SKYLAKE_X, 5, 0x00000000), 5205 + INTEL_CPU_DESC(INTEL_SKYLAKE_X, 6, 0x00000000), 5206 + INTEL_CPU_DESC(INTEL_SKYLAKE_X, 7, 0x00000000), 5207 + INTEL_CPU_DESC(INTEL_SKYLAKE_X, 11, 0x00000000), 5208 + INTEL_CPU_DESC(INTEL_SKYLAKE_L, 3, 0x0000007c), 5209 + INTEL_CPU_DESC(INTEL_SKYLAKE, 3, 0x0000007c), 5210 + INTEL_CPU_DESC(INTEL_KABYLAKE, 9, 0x0000004e), 5211 + INTEL_CPU_DESC(INTEL_KABYLAKE_L, 9, 0x0000004e), 5212 + INTEL_CPU_DESC(INTEL_KABYLAKE_L, 10, 0x0000004e), 5213 + INTEL_CPU_DESC(INTEL_KABYLAKE_L, 11, 0x0000004e), 5214 + INTEL_CPU_DESC(INTEL_KABYLAKE_L, 12, 0x0000004e), 5215 + INTEL_CPU_DESC(INTEL_KABYLAKE, 10, 0x0000004e), 5216 + INTEL_CPU_DESC(INTEL_KABYLAKE, 11, 0x0000004e), 5217 + INTEL_CPU_DESC(INTEL_KABYLAKE, 12, 0x0000004e), 5218 + INTEL_CPU_DESC(INTEL_KABYLAKE, 13, 0x0000004e), 5219 5219 {} 5220 5220 }; 5221 5221 ··· 5232 5232 } 5233 5233 5234 5234 static const struct x86_cpu_desc pebs_ucodes[] = { 5235 - INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), 5236 - INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), 5237 - INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), 5235 + INTEL_CPU_DESC(INTEL_SANDYBRIDGE, 7, 0x00000028), 5236 + INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 6, 0x00000618), 5237 + INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 7, 0x0000070c), 5238 5238 {} 5239 5239 }; 5240 5240 ··· 6238 6238 /* 6239 6239 * Install the hw-cache-events table: 6240 6240 */ 6241 - switch (boot_cpu_data.x86_model) { 6242 - case INTEL_FAM6_CORE_YONAH: 6241 + switch (boot_cpu_data.x86_vfm) { 6242 + case INTEL_CORE_YONAH: 6243 6243 pr_cont("Core events, "); 6244 6244 name = "core"; 6245 6245 break; 6246 6246 6247 - case INTEL_FAM6_CORE2_MEROM: 6247 + case INTEL_CORE2_MEROM: 6248 6248 x86_add_quirk(intel_clovertown_quirk); 6249 6249 fallthrough; 6250 6250 6251 - case INTEL_FAM6_CORE2_MEROM_L: 6252 - case INTEL_FAM6_CORE2_PENRYN: 6253 - case INTEL_FAM6_CORE2_DUNNINGTON: 6251 + case INTEL_CORE2_MEROM_L: 6252 + case INTEL_CORE2_PENRYN: 6253 + case INTEL_CORE2_DUNNINGTON: 6254 6254 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 6255 6255 sizeof(hw_cache_event_ids)); 6256 6256 ··· 6262 6262 name = "core2"; 6263 6263 break; 6264 6264 6265 - case INTEL_FAM6_NEHALEM: 6266 - case INTEL_FAM6_NEHALEM_EP: 6267 - case INTEL_FAM6_NEHALEM_EX: 6265 + case INTEL_NEHALEM: 6266 + case INTEL_NEHALEM_EP: 6267 + case INTEL_NEHALEM_EX: 6268 6268 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 6269 6269 sizeof(hw_cache_event_ids)); 6270 6270 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, ··· 6296 6296 name = "nehalem"; 6297 6297 break; 6298 6298 6299 - case INTEL_FAM6_ATOM_BONNELL: 6300 - case INTEL_FAM6_ATOM_BONNELL_MID: 6301 - case INTEL_FAM6_ATOM_SALTWELL: 6302 - case INTEL_FAM6_ATOM_SALTWELL_MID: 6303 - case INTEL_FAM6_ATOM_SALTWELL_TABLET: 6299 + case INTEL_ATOM_BONNELL: 6300 + case INTEL_ATOM_BONNELL_MID: 6301 + case INTEL_ATOM_SALTWELL: 6302 + case INTEL_ATOM_SALTWELL_MID: 6303 + case INTEL_ATOM_SALTWELL_TABLET: 6304 6304 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 6305 6305 sizeof(hw_cache_event_ids)); 6306 6306 ··· 6313 6313 name = "bonnell"; 6314 6314 break; 6315 6315 6316 - case INTEL_FAM6_ATOM_SILVERMONT: 6317 - case INTEL_FAM6_ATOM_SILVERMONT_D: 6318 - case INTEL_FAM6_ATOM_SILVERMONT_MID: 6319 - case INTEL_FAM6_ATOM_AIRMONT: 6320 - case INTEL_FAM6_ATOM_AIRMONT_MID: 6316 + case INTEL_ATOM_SILVERMONT: 6317 + case INTEL_ATOM_SILVERMONT_D: 6318 + case INTEL_ATOM_SILVERMONT_MID: 6319 + case INTEL_ATOM_AIRMONT: 6320 + case INTEL_ATOM_AIRMONT_MID: 6321 6321 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 6322 6322 sizeof(hw_cache_event_ids)); 6323 6323 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, ··· 6335 6335 name = "silvermont"; 6336 6336 break; 6337 6337 6338 - case INTEL_FAM6_ATOM_GOLDMONT: 6339 - case INTEL_FAM6_ATOM_GOLDMONT_D: 6338 + case INTEL_ATOM_GOLDMONT: 6339 + case INTEL_ATOM_GOLDMONT_D: 6340 6340 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, 6341 6341 sizeof(hw_cache_event_ids)); 6342 6342 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, ··· 6362 6362 name = "goldmont"; 6363 6363 break; 6364 6364 6365 - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 6365 + case INTEL_ATOM_GOLDMONT_PLUS: 6366 6366 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6367 6367 sizeof(hw_cache_event_ids)); 6368 6368 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, ··· 6391 6391 name = "goldmont_plus"; 6392 6392 break; 6393 6393 6394 - case INTEL_FAM6_ATOM_TREMONT_D: 6395 - case INTEL_FAM6_ATOM_TREMONT: 6396 - case INTEL_FAM6_ATOM_TREMONT_L: 6394 + case INTEL_ATOM_TREMONT_D: 6395 + case INTEL_ATOM_TREMONT: 6396 + case INTEL_ATOM_TREMONT_L: 6397 6397 x86_pmu.late_ack = true; 6398 6398 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6399 6399 sizeof(hw_cache_event_ids)); ··· 6420 6420 name = "Tremont"; 6421 6421 break; 6422 6422 6423 - case INTEL_FAM6_ATOM_GRACEMONT: 6423 + case INTEL_ATOM_GRACEMONT: 6424 6424 intel_pmu_init_grt(NULL); 6425 6425 intel_pmu_pebs_data_source_grt(); 6426 6426 x86_pmu.pebs_latency_data = adl_latency_data_small; ··· 6432 6432 name = "gracemont"; 6433 6433 break; 6434 6434 6435 - case INTEL_FAM6_ATOM_CRESTMONT: 6436 - case INTEL_FAM6_ATOM_CRESTMONT_X: 6435 + case INTEL_ATOM_CRESTMONT: 6436 + case INTEL_ATOM_CRESTMONT_X: 6437 6437 intel_pmu_init_grt(NULL); 6438 6438 x86_pmu.extra_regs = intel_cmt_extra_regs; 6439 6439 intel_pmu_pebs_data_source_cmt(); ··· 6446 6446 name = "crestmont"; 6447 6447 break; 6448 6448 6449 - case INTEL_FAM6_WESTMERE: 6450 - case INTEL_FAM6_WESTMERE_EP: 6451 - case INTEL_FAM6_WESTMERE_EX: 6449 + case INTEL_WESTMERE: 6450 + case INTEL_WESTMERE_EP: 6451 + case INTEL_WESTMERE_EX: 6452 6452 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 6453 6453 sizeof(hw_cache_event_ids)); 6454 6454 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, ··· 6477 6477 name = "westmere"; 6478 6478 break; 6479 6479 6480 - case INTEL_FAM6_SANDYBRIDGE: 6481 - case INTEL_FAM6_SANDYBRIDGE_X: 6480 + case INTEL_SANDYBRIDGE: 6481 + case INTEL_SANDYBRIDGE_X: 6482 6482 x86_add_quirk(intel_sandybridge_quirk); 6483 6483 x86_add_quirk(intel_ht_bug); 6484 6484 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, ··· 6491 6491 x86_pmu.event_constraints = intel_snb_event_constraints; 6492 6492 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 6493 6493 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 6494 - if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X) 6494 + if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X) 6495 6495 x86_pmu.extra_regs = intel_snbep_extra_regs; 6496 6496 else 6497 6497 x86_pmu.extra_regs = intel_snb_extra_regs; ··· 6517 6517 name = "sandybridge"; 6518 6518 break; 6519 6519 6520 - case INTEL_FAM6_IVYBRIDGE: 6521 - case INTEL_FAM6_IVYBRIDGE_X: 6520 + case INTEL_IVYBRIDGE: 6521 + case INTEL_IVYBRIDGE_X: 6522 6522 x86_add_quirk(intel_ht_bug); 6523 6523 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6524 6524 sizeof(hw_cache_event_ids)); ··· 6534 6534 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 6535 6535 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6536 6536 x86_pmu.pebs_prec_dist = true; 6537 - if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X) 6537 + if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X) 6538 6538 x86_pmu.extra_regs = intel_snbep_extra_regs; 6539 6539 else 6540 6540 x86_pmu.extra_regs = intel_snb_extra_regs; ··· 6556 6556 break; 6557 6557 6558 6558 6559 - case INTEL_FAM6_HASWELL: 6560 - case INTEL_FAM6_HASWELL_X: 6561 - case INTEL_FAM6_HASWELL_L: 6562 - case INTEL_FAM6_HASWELL_G: 6559 + case INTEL_HASWELL: 6560 + case INTEL_HASWELL_X: 6561 + case INTEL_HASWELL_L: 6562 + case INTEL_HASWELL_G: 6563 6563 x86_add_quirk(intel_ht_bug); 6564 6564 x86_add_quirk(intel_pebs_isolation_quirk); 6565 6565 x86_pmu.late_ack = true; ··· 6589 6589 name = "haswell"; 6590 6590 break; 6591 6591 6592 - case INTEL_FAM6_BROADWELL: 6593 - case INTEL_FAM6_BROADWELL_D: 6594 - case INTEL_FAM6_BROADWELL_G: 6595 - case INTEL_FAM6_BROADWELL_X: 6592 + case INTEL_BROADWELL: 6593 + case INTEL_BROADWELL_D: 6594 + case INTEL_BROADWELL_G: 6595 + case INTEL_BROADWELL_X: 6596 6596 x86_add_quirk(intel_pebs_isolation_quirk); 6597 6597 x86_pmu.late_ack = true; 6598 6598 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); ··· 6631 6631 name = "broadwell"; 6632 6632 break; 6633 6633 6634 - case INTEL_FAM6_XEON_PHI_KNL: 6635 - case INTEL_FAM6_XEON_PHI_KNM: 6634 + case INTEL_XEON_PHI_KNL: 6635 + case INTEL_XEON_PHI_KNM: 6636 6636 memcpy(hw_cache_event_ids, 6637 6637 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6638 6638 memcpy(hw_cache_extra_regs, ··· 6651 6651 name = "knights-landing"; 6652 6652 break; 6653 6653 6654 - case INTEL_FAM6_SKYLAKE_X: 6654 + case INTEL_SKYLAKE_X: 6655 6655 pmem = true; 6656 6656 fallthrough; 6657 - case INTEL_FAM6_SKYLAKE_L: 6658 - case INTEL_FAM6_SKYLAKE: 6659 - case INTEL_FAM6_KABYLAKE_L: 6660 - case INTEL_FAM6_KABYLAKE: 6661 - case INTEL_FAM6_COMETLAKE_L: 6662 - case INTEL_FAM6_COMETLAKE: 6657 + case INTEL_SKYLAKE_L: 6658 + case INTEL_SKYLAKE: 6659 + case INTEL_KABYLAKE_L: 6660 + case INTEL_KABYLAKE: 6661 + case INTEL_COMETLAKE_L: 6662 + case INTEL_COMETLAKE: 6663 6663 x86_add_quirk(intel_pebs_isolation_quirk); 6664 6664 x86_pmu.late_ack = true; 6665 6665 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); ··· 6708 6708 name = "skylake"; 6709 6709 break; 6710 6710 6711 - case INTEL_FAM6_ICELAKE_X: 6712 - case INTEL_FAM6_ICELAKE_D: 6711 + case INTEL_ICELAKE_X: 6712 + case INTEL_ICELAKE_D: 6713 6713 x86_pmu.pebs_ept = 1; 6714 6714 pmem = true; 6715 6715 fallthrough; 6716 - case INTEL_FAM6_ICELAKE_L: 6717 - case INTEL_FAM6_ICELAKE: 6718 - case INTEL_FAM6_TIGERLAKE_L: 6719 - case INTEL_FAM6_TIGERLAKE: 6720 - case INTEL_FAM6_ROCKETLAKE: 6716 + case INTEL_ICELAKE_L: 6717 + case INTEL_ICELAKE: 6718 + case INTEL_TIGERLAKE_L: 6719 + case INTEL_TIGERLAKE: 6720 + case INTEL_ROCKETLAKE: 6721 6721 x86_pmu.late_ack = true; 6722 6722 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6723 6723 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); ··· 6752 6752 name = "icelake"; 6753 6753 break; 6754 6754 6755 - case INTEL_FAM6_SAPPHIRERAPIDS_X: 6756 - case INTEL_FAM6_EMERALDRAPIDS_X: 6755 + case INTEL_SAPPHIRERAPIDS_X: 6756 + case INTEL_EMERALDRAPIDS_X: 6757 6757 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6758 6758 x86_pmu.extra_regs = intel_glc_extra_regs; 6759 6759 fallthrough; 6760 - case INTEL_FAM6_GRANITERAPIDS_X: 6761 - case INTEL_FAM6_GRANITERAPIDS_D: 6760 + case INTEL_GRANITERAPIDS_X: 6761 + case INTEL_GRANITERAPIDS_D: 6762 6762 intel_pmu_init_glc(NULL); 6763 6763 if (!x86_pmu.extra_regs) 6764 6764 x86_pmu.extra_regs = intel_rwc_extra_regs; ··· 6776 6776 name = "sapphire_rapids"; 6777 6777 break; 6778 6778 6779 - case INTEL_FAM6_ALDERLAKE: 6780 - case INTEL_FAM6_ALDERLAKE_L: 6781 - case INTEL_FAM6_RAPTORLAKE: 6782 - case INTEL_FAM6_RAPTORLAKE_P: 6783 - case INTEL_FAM6_RAPTORLAKE_S: 6779 + case INTEL_ALDERLAKE: 6780 + case INTEL_ALDERLAKE_L: 6781 + case INTEL_RAPTORLAKE: 6782 + case INTEL_RAPTORLAKE_P: 6783 + case INTEL_RAPTORLAKE_S: 6784 6784 /* 6785 6785 * Alder Lake has 2 types of CPU, core and atom. 6786 6786 * ··· 6838 6838 name = "alderlake_hybrid"; 6839 6839 break; 6840 6840 6841 - case INTEL_FAM6_METEORLAKE: 6842 - case INTEL_FAM6_METEORLAKE_L: 6841 + case INTEL_METEORLAKE: 6842 + case INTEL_METEORLAKE_L: 6843 6843 intel_pmu_init_hybrid(hybrid_big_small); 6844 6844 6845 6845 x86_pmu.pebs_latency_data = mtl_latency_data_small;
+45 -45
arch/x86/events/rapl.c
··· 765 765 }; 766 766 767 767 static const struct x86_cpu_id rapl_model_match[] __initconst = { 768 - X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon), 769 - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb), 770 - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep), 771 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb), 772 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &model_snbep), 773 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &model_hsw), 774 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &model_hsx), 775 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &model_hsw), 776 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &model_hsw), 777 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &model_hsw), 778 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &model_hsw), 779 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &model_hsx), 780 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &model_hsx), 781 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &model_knl), 782 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &model_knl), 783 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &model_skl), 784 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &model_skl), 785 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &model_hsx), 786 - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &model_skl), 787 - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &model_skl), 788 - X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &model_skl), 789 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &model_hsw), 790 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &model_hsw), 791 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &model_hsw), 792 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &model_skl), 793 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &model_skl), 794 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &model_hsx), 795 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx), 796 - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl), 797 - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl), 798 - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &model_skl), 799 - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl), 800 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl), 801 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl), 802 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &model_skl), 803 - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr), 804 - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &model_spr), 805 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl), 806 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &model_skl), 807 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl), 808 - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &model_skl), 809 - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &model_skl), 810 - X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &model_skl), 811 - X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &model_skl), 812 - X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &model_skl), 768 + X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon), 769 + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &model_snb), 770 + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &model_snbep), 771 + X86_MATCH_VFM(INTEL_IVYBRIDGE, &model_snb), 772 + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &model_snbep), 773 + X86_MATCH_VFM(INTEL_HASWELL, &model_hsw), 774 + X86_MATCH_VFM(INTEL_HASWELL_X, &model_hsx), 775 + X86_MATCH_VFM(INTEL_HASWELL_L, &model_hsw), 776 + X86_MATCH_VFM(INTEL_HASWELL_G, &model_hsw), 777 + X86_MATCH_VFM(INTEL_BROADWELL, &model_hsw), 778 + X86_MATCH_VFM(INTEL_BROADWELL_G, &model_hsw), 779 + X86_MATCH_VFM(INTEL_BROADWELL_X, &model_hsx), 780 + X86_MATCH_VFM(INTEL_BROADWELL_D, &model_hsx), 781 + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &model_knl), 782 + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &model_knl), 783 + X86_MATCH_VFM(INTEL_SKYLAKE_L, &model_skl), 784 + X86_MATCH_VFM(INTEL_SKYLAKE, &model_skl), 785 + X86_MATCH_VFM(INTEL_SKYLAKE_X, &model_hsx), 786 + X86_MATCH_VFM(INTEL_KABYLAKE_L, &model_skl), 787 + X86_MATCH_VFM(INTEL_KABYLAKE, &model_skl), 788 + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &model_skl), 789 + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &model_hsw), 790 + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &model_hsw), 791 + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &model_hsw), 792 + X86_MATCH_VFM(INTEL_ICELAKE_L, &model_skl), 793 + X86_MATCH_VFM(INTEL_ICELAKE, &model_skl), 794 + X86_MATCH_VFM(INTEL_ICELAKE_D, &model_hsx), 795 + X86_MATCH_VFM(INTEL_ICELAKE_X, &model_hsx), 796 + X86_MATCH_VFM(INTEL_COMETLAKE_L, &model_skl), 797 + X86_MATCH_VFM(INTEL_COMETLAKE, &model_skl), 798 + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &model_skl), 799 + X86_MATCH_VFM(INTEL_TIGERLAKE, &model_skl), 800 + X86_MATCH_VFM(INTEL_ALDERLAKE, &model_skl), 801 + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &model_skl), 802 + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &model_skl), 803 + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &model_spr), 804 + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &model_spr), 805 + X86_MATCH_VFM(INTEL_RAPTORLAKE, &model_skl), 806 + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &model_skl), 807 + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &model_skl), 808 + X86_MATCH_VFM(INTEL_METEORLAKE, &model_skl), 809 + X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl), 810 + X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl), 811 + X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl), 812 + X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl), 813 813 {}, 814 814 }; 815 815 MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
+4 -4
arch/x86/include/asm/cpu_device_id.h
··· 280 280 u32 x86_microcode_rev; 281 281 }; 282 282 283 - #define INTEL_CPU_DESC(model, stepping, revision) { \ 284 - .x86_family = 6, \ 285 - .x86_vendor = X86_VENDOR_INTEL, \ 286 - .x86_model = (model), \ 283 + #define INTEL_CPU_DESC(vfm, stepping, revision) { \ 284 + .x86_family = VFM_FAMILY(vfm), \ 285 + .x86_vendor = VFM_VENDOR(vfm), \ 286 + .x86_model = VFM_MODEL(vfm), \ 287 287 .x86_stepping = (stepping), \ 288 288 .x86_microcode_rev = (revision), \ 289 289 }
+400 -400
arch/x86/include/asm/cpufeatures.h
··· 18 18 19 19 /* 20 20 * Note: If the comment begins with a quoted string, that string is used 21 - * in /proc/cpuinfo instead of the macro name. If the string is "", 22 - * this feature bit is not displayed in /proc/cpuinfo at all. 21 + * in /proc/cpuinfo instead of the macro name. Otherwise, this feature 22 + * bit is not displayed in /proc/cpuinfo at all. 23 23 * 24 24 * When adding new features here that depend on other features, 25 25 * please update the table in kernel/cpu/cpuid-deps.c as well. 26 26 */ 27 27 28 28 /* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ 29 - #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 30 - #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 31 - #define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 32 - #define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 33 - #define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 34 - #define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 35 - #define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 36 - #define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 37 - #define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 38 - #define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 39 - #define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 40 - #define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 41 - #define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 42 - #define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 43 - #define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ 44 - #define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 45 - #define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 46 - #define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 47 - #define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 29 + #define X86_FEATURE_FPU ( 0*32+ 0) /* "fpu" Onboard FPU */ 30 + #define X86_FEATURE_VME ( 0*32+ 1) /* "vme" Virtual Mode Extensions */ 31 + #define X86_FEATURE_DE ( 0*32+ 2) /* "de" Debugging Extensions */ 32 + #define X86_FEATURE_PSE ( 0*32+ 3) /* "pse" Page Size Extensions */ 33 + #define X86_FEATURE_TSC ( 0*32+ 4) /* "tsc" Time Stamp Counter */ 34 + #define X86_FEATURE_MSR ( 0*32+ 5) /* "msr" Model-Specific Registers */ 35 + #define X86_FEATURE_PAE ( 0*32+ 6) /* "pae" Physical Address Extensions */ 36 + #define X86_FEATURE_MCE ( 0*32+ 7) /* "mce" Machine Check Exception */ 37 + #define X86_FEATURE_CX8 ( 0*32+ 8) /* "cx8" CMPXCHG8 instruction */ 38 + #define X86_FEATURE_APIC ( 0*32+ 9) /* "apic" Onboard APIC */ 39 + #define X86_FEATURE_SEP ( 0*32+11) /* "sep" SYSENTER/SYSEXIT */ 40 + #define X86_FEATURE_MTRR ( 0*32+12) /* "mtrr" Memory Type Range Registers */ 41 + #define X86_FEATURE_PGE ( 0*32+13) /* "pge" Page Global Enable */ 42 + #define X86_FEATURE_MCA ( 0*32+14) /* "mca" Machine Check Architecture */ 43 + #define X86_FEATURE_CMOV ( 0*32+15) /* "cmov" CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ 44 + #define X86_FEATURE_PAT ( 0*32+16) /* "pat" Page Attribute Table */ 45 + #define X86_FEATURE_PSE36 ( 0*32+17) /* "pse36" 36-bit PSEs */ 46 + #define X86_FEATURE_PN ( 0*32+18) /* "pn" Processor serial number */ 47 + #define X86_FEATURE_CLFLUSH ( 0*32+19) /* "clflush" CLFLUSH instruction */ 48 48 #define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 49 - #define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 50 - #define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 51 - #define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 49 + #define X86_FEATURE_ACPI ( 0*32+22) /* "acpi" ACPI via MSR */ 50 + #define X86_FEATURE_MMX ( 0*32+23) /* "mmx" Multimedia Extensions */ 51 + #define X86_FEATURE_FXSR ( 0*32+24) /* "fxsr" FXSAVE/FXRSTOR, CR4.OSFXSR */ 52 52 #define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 53 53 #define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 54 54 #define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 55 - #define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 55 + #define X86_FEATURE_HT ( 0*32+28) /* "ht" Hyper-Threading */ 56 56 #define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 57 - #define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 58 - #define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ 57 + #define X86_FEATURE_IA64 ( 0*32+30) /* "ia64" IA-64 processor */ 58 + #define X86_FEATURE_PBE ( 0*32+31) /* "pbe" Pending Break Enable */ 59 59 60 60 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 61 61 /* Don't duplicate feature flags which are redundant with Intel! */ 62 - #define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 63 - #define X86_FEATURE_MP ( 1*32+19) /* MP Capable */ 64 - #define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 65 - #define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 66 - #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 62 + #define X86_FEATURE_SYSCALL ( 1*32+11) /* "syscall" SYSCALL/SYSRET */ 63 + #define X86_FEATURE_MP ( 1*32+19) /* "mp" MP Capable */ 64 + #define X86_FEATURE_NX ( 1*32+20) /* "nx" Execute Disable */ 65 + #define X86_FEATURE_MMXEXT ( 1*32+22) /* "mmxext" AMD MMX extensions */ 66 + #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* "fxsr_opt" FXSAVE/FXRSTOR optimizations */ 67 67 #define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 68 - #define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 69 - #define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ 70 - #define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */ 71 - #define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */ 68 + #define X86_FEATURE_RDTSCP ( 1*32+27) /* "rdtscp" RDTSCP */ 69 + #define X86_FEATURE_LM ( 1*32+29) /* "lm" Long Mode (x86-64, 64-bit support) */ 70 + #define X86_FEATURE_3DNOWEXT ( 1*32+30) /* "3dnowext" AMD 3DNow extensions */ 71 + #define X86_FEATURE_3DNOW ( 1*32+31) /* "3dnow" 3DNow */ 72 72 73 73 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 74 - #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 75 - #define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 76 - #define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 74 + #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* "recovery" CPU in recovery mode */ 75 + #define X86_FEATURE_LONGRUN ( 2*32+ 1) /* "longrun" Longrun power control */ 76 + #define X86_FEATURE_LRTI ( 2*32+ 3) /* "lrti" LongRun table interface */ 77 77 78 78 /* Other features, Linux-defined mapping, word 3 */ 79 79 /* This range is used for feature bits which conflict or are synthesized */ 80 - #define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 81 - #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 82 - #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 83 - #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 84 - #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 85 - #define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */ 86 - #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 87 - #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 88 - #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 89 - #define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */ 90 - #define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */ 91 - #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 92 - #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 93 - #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 94 - #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ 95 - #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ 96 - #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ 97 - #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */ 98 - #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */ 99 - #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ 100 - #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 101 - #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 102 - #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */ 103 - #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 104 - #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 105 - #define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ 106 - #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */ 107 - #define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */ 108 - #define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ 109 - #define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */ 110 - #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 111 - #define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ 80 + #define X86_FEATURE_CXMMX ( 3*32+ 0) /* "cxmmx" Cyrix MMX extensions */ 81 + #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* "k6_mtrr" AMD K6 nonstandard MTRRs */ 82 + #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* "cyrix_arr" Cyrix ARRs (= MTRRs) */ 83 + #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */ 84 + #define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */ 85 + #define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */ 86 + #define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */ 87 + #define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */ 88 + #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */ 89 + #define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */ 90 + #define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */ 91 + #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* "arch_perfmon" Intel Architectural PerfMon */ 92 + #define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */ 93 + #define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */ 94 + #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */ 95 + #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */ 96 + #define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */ 97 + #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */ 98 + #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */ 99 + #define X86_FEATURE_ACC_POWER ( 3*32+19) /* "acc_power" AMD Accumulated Power Mechanism */ 100 + #define X86_FEATURE_NOPL ( 3*32+20) /* "nopl" The NOPL (0F 1F) instructions */ 101 + #define X86_FEATURE_ALWAYS ( 3*32+21) /* Always-present feature */ 102 + #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* "xtopology" CPU topology enum extensions */ 103 + #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* "tsc_reliable" TSC is known to be reliable */ 104 + #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* "nonstop_tsc" TSC does not stop in C states */ 105 + #define X86_FEATURE_CPUID ( 3*32+25) /* "cpuid" CPU has CPUID instruction itself */ 106 + #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* "extd_apicid" Extended APICID (8 bits) */ 107 + #define X86_FEATURE_AMD_DCM ( 3*32+27) /* "amd_dcm" AMD multi-node processor */ 108 + #define X86_FEATURE_APERFMPERF ( 3*32+28) /* "aperfmperf" P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ 109 + #define X86_FEATURE_RAPL ( 3*32+29) /* "rapl" AMD/Hygon RAPL interface */ 110 + #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* "nonstop_tsc_s3" TSC doesn't stop in S3 state */ 111 + #define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* "tsc_known_freq" TSC has known frequency */ 112 112 113 113 /* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ 114 114 #define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 115 - #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 116 - #define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 115 + #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* "pclmulqdq" PCLMULQDQ instruction */ 116 + #define X86_FEATURE_DTES64 ( 4*32+ 2) /* "dtes64" 64-bit Debug Store */ 117 117 #define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ 118 118 #define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ 119 - #define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 120 - #define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */ 121 - #define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 122 - #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 123 - #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 124 - #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 125 - #define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ 126 - #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 127 - #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */ 128 - #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 129 - #define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */ 130 - #define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 131 - #define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 119 + #define X86_FEATURE_VMX ( 4*32+ 5) /* "vmx" Hardware virtualization */ 120 + #define X86_FEATURE_SMX ( 4*32+ 6) /* "smx" Safer Mode eXtensions */ 121 + #define X86_FEATURE_EST ( 4*32+ 7) /* "est" Enhanced SpeedStep */ 122 + #define X86_FEATURE_TM2 ( 4*32+ 8) /* "tm2" Thermal Monitor 2 */ 123 + #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* "ssse3" Supplemental SSE-3 */ 124 + #define X86_FEATURE_CID ( 4*32+10) /* "cid" Context ID */ 125 + #define X86_FEATURE_SDBG ( 4*32+11) /* "sdbg" Silicon Debug */ 126 + #define X86_FEATURE_FMA ( 4*32+12) /* "fma" Fused multiply-add */ 127 + #define X86_FEATURE_CX16 ( 4*32+13) /* "cx16" CMPXCHG16B instruction */ 128 + #define X86_FEATURE_XTPR ( 4*32+14) /* "xtpr" Send Task Priority Messages */ 129 + #define X86_FEATURE_PDCM ( 4*32+15) /* "pdcm" Perf/Debug Capabilities MSR */ 130 + #define X86_FEATURE_PCID ( 4*32+17) /* "pcid" Process Context Identifiers */ 131 + #define X86_FEATURE_DCA ( 4*32+18) /* "dca" Direct Cache Access */ 132 132 #define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 133 133 #define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 134 - #define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */ 135 - #define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 136 - #define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 137 - #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */ 138 - #define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 139 - #define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ 140 - #define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */ 141 - #define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 142 - #define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */ 143 - #define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */ 144 - #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 134 + #define X86_FEATURE_X2APIC ( 4*32+21) /* "x2apic" X2APIC */ 135 + #define X86_FEATURE_MOVBE ( 4*32+22) /* "movbe" MOVBE instruction */ 136 + #define X86_FEATURE_POPCNT ( 4*32+23) /* "popcnt" POPCNT instruction */ 137 + #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* "tsc_deadline_timer" TSC deadline timer */ 138 + #define X86_FEATURE_AES ( 4*32+25) /* "aes" AES instructions */ 139 + #define X86_FEATURE_XSAVE ( 4*32+26) /* "xsave" XSAVE/XRSTOR/XSETBV/XGETBV instructions */ 140 + #define X86_FEATURE_OSXSAVE ( 4*32+27) /* XSAVE instruction enabled in the OS */ 141 + #define X86_FEATURE_AVX ( 4*32+28) /* "avx" Advanced Vector Extensions */ 142 + #define X86_FEATURE_F16C ( 4*32+29) /* "f16c" 16-bit FP conversions */ 143 + #define X86_FEATURE_RDRAND ( 4*32+30) /* "rdrand" RDRAND instruction */ 144 + #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* "hypervisor" Running on a hypervisor */ 145 145 146 146 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 147 147 #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 148 148 #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 149 149 #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 150 150 #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 151 - #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 152 - #define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 153 - #define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 154 - #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 155 - #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 156 - #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 151 + #define X86_FEATURE_ACE2 ( 5*32+ 8) /* "ace2" Advanced Cryptography Engine v2 */ 152 + #define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* "ace2_en" ACE v2 enabled */ 153 + #define X86_FEATURE_PHE ( 5*32+10) /* "phe" PadLock Hash Engine */ 154 + #define X86_FEATURE_PHE_EN ( 5*32+11) /* "phe_en" PHE enabled */ 155 + #define X86_FEATURE_PMM ( 5*32+12) /* "pmm" PadLock Montgomery Multiplier */ 156 + #define X86_FEATURE_PMM_EN ( 5*32+13) /* "pmm_en" PMM enabled */ 157 157 158 158 /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ 159 - #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 160 - #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 161 - #define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */ 162 - #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 163 - #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 164 - #define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 165 - #define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 166 - #define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 167 - #define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 168 - #define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 169 - #define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 170 - #define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 171 - #define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 172 - #define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 173 - #define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 174 - #define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 175 - #define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */ 176 - #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 177 - #define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */ 178 - #define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */ 179 - #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */ 180 - #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 181 - #define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */ 182 - #define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */ 183 - #define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ 184 - #define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */ 159 + #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* "lahf_lm" LAHF/SAHF in long mode */ 160 + #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* "cmp_legacy" If yes HyperThreading not valid */ 161 + #define X86_FEATURE_SVM ( 6*32+ 2) /* "svm" Secure Virtual Machine */ 162 + #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* "extapic" Extended APIC space */ 163 + #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* "cr8_legacy" CR8 in 32-bit mode */ 164 + #define X86_FEATURE_ABM ( 6*32+ 5) /* "abm" Advanced bit manipulation */ 165 + #define X86_FEATURE_SSE4A ( 6*32+ 6) /* "sse4a" SSE-4A */ 166 + #define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* "misalignsse" Misaligned SSE mode */ 167 + #define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* "3dnowprefetch" 3DNow prefetch instructions */ 168 + #define X86_FEATURE_OSVW ( 6*32+ 9) /* "osvw" OS Visible Workaround */ 169 + #define X86_FEATURE_IBS ( 6*32+10) /* "ibs" Instruction Based Sampling */ 170 + #define X86_FEATURE_XOP ( 6*32+11) /* "xop" Extended AVX instructions */ 171 + #define X86_FEATURE_SKINIT ( 6*32+12) /* "skinit" SKINIT/STGI instructions */ 172 + #define X86_FEATURE_WDT ( 6*32+13) /* "wdt" Watchdog timer */ 173 + #define X86_FEATURE_LWP ( 6*32+15) /* "lwp" Light Weight Profiling */ 174 + #define X86_FEATURE_FMA4 ( 6*32+16) /* "fma4" 4 operands MAC instructions */ 175 + #define X86_FEATURE_TCE ( 6*32+17) /* "tce" Translation Cache Extension */ 176 + #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* "nodeid_msr" NodeId MSR */ 177 + #define X86_FEATURE_TBM ( 6*32+21) /* "tbm" Trailing Bit Manipulations */ 178 + #define X86_FEATURE_TOPOEXT ( 6*32+22) /* "topoext" Topology extensions CPUID leafs */ 179 + #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* "perfctr_core" Core performance counter extensions */ 180 + #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* "perfctr_nb" NB performance counter extensions */ 181 + #define X86_FEATURE_BPEXT ( 6*32+26) /* "bpext" Data breakpoint extension */ 182 + #define X86_FEATURE_PTSC ( 6*32+27) /* "ptsc" Performance time-stamp counter */ 183 + #define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* "perfctr_llc" Last Level Cache performance counter extensions */ 184 + #define X86_FEATURE_MWAITX ( 6*32+29) /* "mwaitx" MWAIT extension (MONITORX/MWAITX instructions) */ 185 185 186 186 /* 187 187 * Auxiliary flags: Linux defined - For features scattered in various ··· 189 189 * 190 190 * Reuse free bits when adding new feature flags! 191 191 */ 192 - #define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ 193 - #define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ 194 - #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 195 - #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 196 - #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 197 - #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198 - #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199 - #define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */ 200 - #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 201 - #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 202 - #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */ 203 - #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ 204 - #define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ 205 - #define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */ 206 - #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 207 - #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ 208 - #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ 209 - #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ 210 - #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 211 - #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ 212 - #define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */ 213 - #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 214 - #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ 215 - #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ 216 - #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ 217 - #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ 218 - #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ 219 - #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ 220 - #define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */ 221 - #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ 222 - #define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */ 223 - #define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */ 192 + #define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* "ring3mwait" Ring 3 MONITOR/MWAIT instructions */ 193 + #define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* "cpuid_fault" Intel CPUID faulting */ 194 + #define X86_FEATURE_CPB ( 7*32+ 2) /* "cpb" AMD Core Performance Boost */ 195 + #define X86_FEATURE_EPB ( 7*32+ 3) /* "epb" IA32_ENERGY_PERF_BIAS support */ 196 + #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* "cat_l3" Cache Allocation Technology L3 */ 197 + #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* "cat_l2" Cache Allocation Technology L2 */ 198 + #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* "cdp_l3" Code and Data Prioritization L3 */ 199 + #define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* "tdx_host_platform" Platform supports being a TDX host */ 200 + #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* "hw_pstate" AMD HW-PState */ 201 + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* "proc_feedback" AMD ProcFeedbackInterface */ 202 + #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* Use compacted XSTATE (XSAVES or XSAVEC) */ 203 + #define X86_FEATURE_PTI ( 7*32+11) /* "pti" Kernel Page Table Isolation enabled */ 204 + #define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* Set/clear IBRS on kernel entry/exit */ 205 + #define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* Fill RSB on VM-Exit */ 206 + #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* "intel_ppin" Intel Processor Inventory Number */ 207 + #define X86_FEATURE_CDP_L2 ( 7*32+15) /* "cdp_l2" Code and Data Prioritization L2 */ 208 + #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* MSR SPEC_CTRL is implemented */ 209 + #define X86_FEATURE_SSBD ( 7*32+17) /* "ssbd" Speculative Store Bypass Disable */ 210 + #define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */ 211 + #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ 212 + #define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */ 213 + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */ 214 + #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */ 215 + #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ 216 + #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */ 217 + #define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */ 218 + #define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */ 219 + #define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */ 220 + #define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */ 221 + #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */ 222 + #define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* "ibrs_enhanced" Enhanced IBRS */ 223 + #define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* MSR IA32_FEAT_CTL configured */ 224 224 225 225 /* Virtualization flags: Linux defined, word 8 */ 226 - #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 227 - #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */ 228 - #define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */ 229 - #define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */ 226 + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* "tpr_shadow" Intel TPR Shadow */ 227 + #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */ 228 + #define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */ 229 + #define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */ 230 230 231 - #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ 232 - #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 233 - #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ 234 - #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ 235 - #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ 236 - #define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */ 237 - #define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */ 238 - #define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */ 231 + #define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */ 232 + #define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */ 233 + #define X86_FEATURE_EPT_AD ( 8*32+17) /* "ept_ad" Intel Extended Page Table access-dirty bit */ 234 + #define X86_FEATURE_VMCALL ( 8*32+18) /* Hypervisor supports the VMCALL instruction */ 235 + #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* VMware prefers VMMCALL hypercall instruction */ 236 + #define X86_FEATURE_PVUNLOCK ( 8*32+20) /* PV unlock function */ 237 + #define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* PV vcpu_is_preempted function */ 238 + #define X86_FEATURE_TDX_GUEST ( 8*32+22) /* "tdx_guest" Intel Trust Domain Extensions Guest */ 239 239 240 240 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ 241 - #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ 242 - #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ 243 - #define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */ 244 - #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 245 - #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 246 - #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 247 - #define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */ 248 - #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 249 - #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 250 - #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ 251 - #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 252 - #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 253 - #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ 254 - #define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */ 255 - #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 256 - #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ 257 - #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 258 - #define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ 259 - #define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */ 260 - #define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */ 261 - #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 262 - #define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 263 - #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 264 - #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 265 - #define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ 266 - #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 267 - #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 268 - #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 269 - #define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ 270 - #define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ 271 - #define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ 241 + #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* "fsgsbase" RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ 242 + #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* "tsc_adjust" TSC adjustment MSR 0x3B */ 243 + #define X86_FEATURE_SGX ( 9*32+ 2) /* "sgx" Software Guard Extensions */ 244 + #define X86_FEATURE_BMI1 ( 9*32+ 3) /* "bmi1" 1st group bit manipulation extensions */ 245 + #define X86_FEATURE_HLE ( 9*32+ 4) /* "hle" Hardware Lock Elision */ 246 + #define X86_FEATURE_AVX2 ( 9*32+ 5) /* "avx2" AVX2 instructions */ 247 + #define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* FPU data pointer updated only on x87 exceptions */ 248 + #define X86_FEATURE_SMEP ( 9*32+ 7) /* "smep" Supervisor Mode Execution Protection */ 249 + #define X86_FEATURE_BMI2 ( 9*32+ 8) /* "bmi2" 2nd group bit manipulation extensions */ 250 + #define X86_FEATURE_ERMS ( 9*32+ 9) /* "erms" Enhanced REP MOVSB/STOSB instructions */ 251 + #define X86_FEATURE_INVPCID ( 9*32+10) /* "invpcid" Invalidate Processor Context ID */ 252 + #define X86_FEATURE_RTM ( 9*32+11) /* "rtm" Restricted Transactional Memory */ 253 + #define X86_FEATURE_CQM ( 9*32+12) /* "cqm" Cache QoS Monitoring */ 254 + #define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* Zero out FPU CS and FPU DS */ 255 + #define X86_FEATURE_MPX ( 9*32+14) /* "mpx" Memory Protection Extension */ 256 + #define X86_FEATURE_RDT_A ( 9*32+15) /* "rdt_a" Resource Director Technology Allocation */ 257 + #define X86_FEATURE_AVX512F ( 9*32+16) /* "avx512f" AVX-512 Foundation */ 258 + #define X86_FEATURE_AVX512DQ ( 9*32+17) /* "avx512dq" AVX-512 DQ (Double/Quad granular) Instructions */ 259 + #define X86_FEATURE_RDSEED ( 9*32+18) /* "rdseed" RDSEED instruction */ 260 + #define X86_FEATURE_ADX ( 9*32+19) /* "adx" ADCX and ADOX instructions */ 261 + #define X86_FEATURE_SMAP ( 9*32+20) /* "smap" Supervisor Mode Access Prevention */ 262 + #define X86_FEATURE_AVX512IFMA ( 9*32+21) /* "avx512ifma" AVX-512 Integer Fused Multiply-Add instructions */ 263 + #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* "clflushopt" CLFLUSHOPT instruction */ 264 + #define X86_FEATURE_CLWB ( 9*32+24) /* "clwb" CLWB instruction */ 265 + #define X86_FEATURE_INTEL_PT ( 9*32+25) /* "intel_pt" Intel Processor Trace */ 266 + #define X86_FEATURE_AVX512PF ( 9*32+26) /* "avx512pf" AVX-512 Prefetch */ 267 + #define X86_FEATURE_AVX512ER ( 9*32+27) /* "avx512er" AVX-512 Exponential and Reciprocal */ 268 + #define X86_FEATURE_AVX512CD ( 9*32+28) /* "avx512cd" AVX-512 Conflict Detection */ 269 + #define X86_FEATURE_SHA_NI ( 9*32+29) /* "sha_ni" SHA1/SHA256 Instruction Extensions */ 270 + #define X86_FEATURE_AVX512BW ( 9*32+30) /* "avx512bw" AVX-512 BW (Byte/Word granular) Instructions */ 271 + #define X86_FEATURE_AVX512VL ( 9*32+31) /* "avx512vl" AVX-512 VL (128/256 Vector Length) Extensions */ 272 272 273 273 /* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ 274 - #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */ 275 - #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ 276 - #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ 277 - #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ 278 - #define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */ 274 + #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* "xsaveopt" XSAVEOPT instruction */ 275 + #define X86_FEATURE_XSAVEC (10*32+ 1) /* "xsavec" XSAVEC instruction */ 276 + #define X86_FEATURE_XGETBV1 (10*32+ 2) /* "xgetbv1" XGETBV with ECX = 1 instruction */ 277 + #define X86_FEATURE_XSAVES (10*32+ 3) /* "xsaves" XSAVES/XRSTORS instructions */ 278 + #define X86_FEATURE_XFD (10*32+ 4) /* eXtended Feature Disabling */ 279 279 280 280 /* 281 281 * Extended auxiliary flags: Linux defined - for features scattered in various ··· 283 283 * 284 284 * Reuse free bits when adding new feature flags! 285 285 */ 286 - #define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */ 287 - #define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */ 288 - #define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */ 289 - #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */ 290 - #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ 291 - #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ 292 - #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ 293 - #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ 294 - #define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */ 295 - #define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */ 296 - #define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */ 297 - #define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */ 298 - #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ 299 - #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ 300 - #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ 301 - #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ 302 - #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ 303 - #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ 304 - #define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */ 305 - #define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */ 306 - #define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ 307 - #define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */ 308 - #define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */ 309 - #define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */ 310 - #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ 311 - #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ 312 - #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ 313 - #define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */ 314 - #define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */ 315 - #define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */ 316 - #define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */ 317 - #define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */ 286 + #define X86_FEATURE_CQM_LLC (11*32+ 0) /* "cqm_llc" LLC QoS if 1 */ 287 + #define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* "cqm_occup_llc" LLC occupancy monitoring */ 288 + #define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* "cqm_mbm_total" LLC Total MBM monitoring */ 289 + #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* "cqm_mbm_local" LLC Local MBM monitoring */ 290 + #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* LFENCE in user entry SWAPGS path */ 291 + #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* LFENCE in kernel entry SWAPGS path */ 292 + #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* "split_lock_detect" #AC for split lock */ 293 + #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* Per-thread Memory Bandwidth Allocation */ 294 + #define X86_FEATURE_SGX1 (11*32+ 8) /* Basic SGX */ 295 + #define X86_FEATURE_SGX2 (11*32+ 9) /* SGX Enclave Dynamic Memory Management (EDMM) */ 296 + #define X86_FEATURE_ENTRY_IBPB (11*32+10) /* Issue an IBPB on kernel entry */ 297 + #define X86_FEATURE_RRSBA_CTRL (11*32+11) /* RET prediction control */ 298 + #define X86_FEATURE_RETPOLINE (11*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ 299 + #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* Use LFENCE for Spectre variant 2 */ 300 + #define X86_FEATURE_RETHUNK (11*32+14) /* Use REturn THUNK */ 301 + #define X86_FEATURE_UNRET (11*32+15) /* AMD BTB untrain return */ 302 + #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* Use IBPB during runtime firmware calls */ 303 + #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* Fill RSB on VM exit when EIBRS is enabled */ 304 + #define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* SGX EDECCSSA user leaf function */ 305 + #define X86_FEATURE_CALL_DEPTH (11*32+19) /* Call depth tracking for RSB stuffing */ 306 + #define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* MSR IA32_TSX_CTRL (Intel) implemented */ 307 + #define X86_FEATURE_SMBA (11*32+21) /* Slow Memory Bandwidth Allocation */ 308 + #define X86_FEATURE_BMEC (11*32+22) /* Bandwidth Monitoring Event Configuration */ 309 + #define X86_FEATURE_USER_SHSTK (11*32+23) /* "user_shstk" Shadow stack support for user mode applications */ 310 + #define X86_FEATURE_SRSO (11*32+24) /* AMD BTB untrain RETs */ 311 + #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* AMD BTB untrain RETs through aliasing */ 312 + #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* Issue an IBPB only on VMEXIT */ 313 + #define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* IA32_TSC_DEADLINE and X2APIC MSRs need fencing */ 314 + #define X86_FEATURE_ZEN2 (11*32+28) /* CPU based on Zen2 microarchitecture */ 315 + #define X86_FEATURE_ZEN3 (11*32+29) /* CPU based on Zen3 microarchitecture */ 316 + #define X86_FEATURE_ZEN4 (11*32+30) /* CPU based on Zen4 microarchitecture */ 317 + #define X86_FEATURE_ZEN1 (11*32+31) /* CPU based on Zen1 microarchitecture */ 318 318 319 319 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ 320 - #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ 321 - #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ 322 - #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */ 323 - #define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */ 324 - #define X86_FEATURE_FZRM (12*32+10) /* "" Fast zero-length REP MOVSB */ 325 - #define X86_FEATURE_FSRS (12*32+11) /* "" Fast short REP STOSB */ 326 - #define X86_FEATURE_FSRC (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */ 327 - #define X86_FEATURE_FRED (12*32+17) /* Flexible Return and Event Delivery */ 328 - #define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */ 329 - #define X86_FEATURE_WRMSRNS (12*32+19) /* "" Non-serializing WRMSR */ 330 - #define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */ 331 - #define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */ 332 - #define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */ 320 + #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */ 321 + #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */ 322 + #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */ 323 + #define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */ 324 + #define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */ 325 + #define X86_FEATURE_FSRS (12*32+11) /* Fast short REP STOSB */ 326 + #define X86_FEATURE_FSRC (12*32+12) /* Fast short REP {CMPSB,SCASB} */ 327 + #define X86_FEATURE_FRED (12*32+17) /* "fred" Flexible Return and Event Delivery */ 328 + #define X86_FEATURE_LKGS (12*32+18) /* Load "kernel" (userspace) GS */ 329 + #define X86_FEATURE_WRMSRNS (12*32+19) /* Non-serializing WRMSR */ 330 + #define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */ 331 + #define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */ 332 + #define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */ 333 333 334 334 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ 335 - #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 336 - #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 337 - #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ 338 - #define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */ 339 - #define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ 340 - #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ 341 - #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ 342 - #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ 343 - #define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ 344 - #define X86_FEATURE_AMD_PPIN (13*32+23) /* Protected Processor Inventory Number */ 345 - #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ 346 - #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 347 - #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ 348 - #define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */ 349 - #define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */ 350 - #define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ 351 - #define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */ 335 + #define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */ 336 + #define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */ 337 + #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */ 338 + #define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */ 339 + #define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */ 340 + #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ 341 + #define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ 342 + #define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ 343 + #define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */ 344 + #define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */ 345 + #define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */ 346 + #define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */ 347 + #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* Speculative Store Bypass is fixed in hardware. */ 348 + #define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */ 349 + #define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ 350 + #define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */ 351 + #define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */ 352 352 353 353 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ 354 - #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 355 - #define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ 356 - #define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ 357 - #define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ 358 - #define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ 359 - #define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ 360 - #define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ 361 - #define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ 362 - #define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ 363 - #define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ 364 - #define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */ 354 + #define X86_FEATURE_DTHERM (14*32+ 0) /* "dtherm" Digital Thermal Sensor */ 355 + #define X86_FEATURE_IDA (14*32+ 1) /* "ida" Intel Dynamic Acceleration */ 356 + #define X86_FEATURE_ARAT (14*32+ 2) /* "arat" Always Running APIC Timer */ 357 + #define X86_FEATURE_PLN (14*32+ 4) /* "pln" Intel Power Limit Notification */ 358 + #define X86_FEATURE_PTS (14*32+ 6) /* "pts" Intel Package Thermal Status */ 359 + #define X86_FEATURE_HWP (14*32+ 7) /* "hwp" Intel Hardware P-states */ 360 + #define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* "hwp_notify" HWP Notification */ 361 + #define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* "hwp_act_window" HWP Activity Window */ 362 + #define X86_FEATURE_HWP_EPP (14*32+10) /* "hwp_epp" HWP Energy Perf. Preference */ 363 + #define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* "hwp_pkg_req" HWP Package Level Request */ 364 + #define X86_FEATURE_HFI (14*32+19) /* "hfi" Hardware Feedback Interface */ 365 365 366 366 /* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ 367 - #define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ 368 - #define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ 367 + #define X86_FEATURE_NPT (15*32+ 0) /* "npt" Nested Page Table support */ 368 + #define X86_FEATURE_LBRV (15*32+ 1) /* "lbrv" LBR Virtualization support */ 369 369 #define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ 370 370 #define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ 371 371 #define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ 372 372 #define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ 373 - #define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ 374 - #define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ 375 - #define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ 376 - #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ 377 - #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ 378 - #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ 379 - #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ 380 - #define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */ 381 - #define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */ 382 - #define X86_FEATURE_VNMI (15*32+25) /* Virtual NMI */ 383 - #define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */ 373 + #define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* "flushbyasid" Flush-by-ASID support */ 374 + #define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* "decodeassists" Decode Assists support */ 375 + #define X86_FEATURE_PAUSEFILTER (15*32+10) /* "pausefilter" Filtered pause intercept */ 376 + #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* "pfthreshold" Pause filter threshold */ 377 + #define X86_FEATURE_AVIC (15*32+13) /* "avic" Virtual Interrupt Controller */ 378 + #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* "v_vmsave_vmload" Virtual VMSAVE VMLOAD */ 379 + #define X86_FEATURE_VGIF (15*32+16) /* "vgif" Virtual GIF */ 380 + #define X86_FEATURE_X2AVIC (15*32+18) /* "x2avic" Virtual x2apic */ 381 + #define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */ 382 + #define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */ 383 + #define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */ 384 384 385 385 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ 386 - #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ 387 - #define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ 388 - #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ 389 - #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ 390 - #define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */ 391 - #define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ 392 - #define X86_FEATURE_SHSTK (16*32+ 7) /* "" Shadow stack */ 393 - #define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ 394 - #define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ 395 - #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ 396 - #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ 397 - #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ 398 - #define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ 399 - #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 400 - #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 401 - #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 402 - #define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */ 403 - #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ 404 - #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ 405 - #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ 406 - #define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */ 407 - #define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */ 386 + #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/ 387 + #define X86_FEATURE_UMIP (16*32+ 2) /* "umip" User Mode Instruction Protection */ 388 + #define X86_FEATURE_PKU (16*32+ 3) /* "pku" Protection Keys for Userspace */ 389 + #define X86_FEATURE_OSPKE (16*32+ 4) /* "ospke" OS Protection Keys Enable */ 390 + #define X86_FEATURE_WAITPKG (16*32+ 5) /* "waitpkg" UMONITOR/UMWAIT/TPAUSE Instructions */ 391 + #define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* "avx512_vbmi2" Additional AVX512 Vector Bit Manipulation Instructions */ 392 + #define X86_FEATURE_SHSTK (16*32+ 7) /* Shadow stack */ 393 + #define X86_FEATURE_GFNI (16*32+ 8) /* "gfni" Galois Field New Instructions */ 394 + #define X86_FEATURE_VAES (16*32+ 9) /* "vaes" Vector AES */ 395 + #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* "vpclmulqdq" Carry-Less Multiplication Double Quadword */ 396 + #define X86_FEATURE_AVX512_VNNI (16*32+11) /* "avx512_vnni" Vector Neural Network Instructions */ 397 + #define X86_FEATURE_AVX512_BITALG (16*32+12) /* "avx512_bitalg" Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ 398 + #define X86_FEATURE_TME (16*32+13) /* "tme" Intel Total Memory Encryption */ 399 + #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* "avx512_vpopcntdq" POPCNT for vectors of DW/QW */ 400 + #define X86_FEATURE_LA57 (16*32+16) /* "la57" 5-level page tables */ 401 + #define X86_FEATURE_RDPID (16*32+22) /* "rdpid" RDPID instruction */ 402 + #define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* "bus_lock_detect" Bus Lock detect */ 403 + #define X86_FEATURE_CLDEMOTE (16*32+25) /* "cldemote" CLDEMOTE instruction */ 404 + #define X86_FEATURE_MOVDIRI (16*32+27) /* "movdiri" MOVDIRI instruction */ 405 + #define X86_FEATURE_MOVDIR64B (16*32+28) /* "movdir64b" MOVDIR64B instruction */ 406 + #define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */ 407 + #define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */ 408 408 409 409 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 410 - #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ 411 - #define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ 412 - #define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ 410 + #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */ 411 + #define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */ 412 + #define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */ 413 413 414 414 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ 415 - #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ 416 - #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ 417 - #define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */ 418 - #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ 419 - #define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ 420 - #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ 421 - #define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */ 422 - #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ 423 - #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ 424 - #define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */ 425 - #define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ 426 - #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ 427 - #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ 428 - #define X86_FEATURE_IBT (18*32+20) /* Indirect Branch Tracking */ 429 - #define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */ 430 - #define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */ 431 - #define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */ 432 - #define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */ 433 - #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 434 - #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 435 - #define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ 436 - #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ 437 - #define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */ 438 - #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ 415 + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* "avx512_4vnniw" AVX-512 Neural Network Instructions */ 416 + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* "avx512_4fmaps" AVX-512 Multiply Accumulation Single precision */ 417 + #define X86_FEATURE_FSRM (18*32+ 4) /* "fsrm" Fast Short Rep Mov */ 418 + #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* "avx512_vp2intersect" AVX-512 Intersect for D/Q */ 419 + #define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* SRBDS mitigation MSR available */ 420 + #define X86_FEATURE_MD_CLEAR (18*32+10) /* "md_clear" VERW clears CPU buffers */ 421 + #define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* RTM transaction always aborts */ 422 + #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* TSX_FORCE_ABORT */ 423 + #define X86_FEATURE_SERIALIZE (18*32+14) /* "serialize" SERIALIZE instruction */ 424 + #define X86_FEATURE_HYBRID_CPU (18*32+15) /* This part has CPUs of more than one type */ 425 + #define X86_FEATURE_TSXLDTRK (18*32+16) /* "tsxldtrk" TSX Suspend Load Address Tracking */ 426 + #define X86_FEATURE_PCONFIG (18*32+18) /* "pconfig" Intel PCONFIG */ 427 + #define X86_FEATURE_ARCH_LBR (18*32+19) /* "arch_lbr" Intel ARCH LBR */ 428 + #define X86_FEATURE_IBT (18*32+20) /* "ibt" Indirect Branch Tracking */ 429 + #define X86_FEATURE_AMX_BF16 (18*32+22) /* "amx_bf16" AMX bf16 Support */ 430 + #define X86_FEATURE_AVX512_FP16 (18*32+23) /* "avx512_fp16" AVX512 FP16 */ 431 + #define X86_FEATURE_AMX_TILE (18*32+24) /* "amx_tile" AMX tile Support */ 432 + #define X86_FEATURE_AMX_INT8 (18*32+25) /* "amx_int8" AMX int8 Support */ 433 + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */ 434 + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */ 435 + #define X86_FEATURE_FLUSH_L1D (18*32+28) /* "flush_l1d" Flush L1D cache */ 436 + #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* "arch_capabilities" IA32_ARCH_CAPABILITIES MSR (Intel) */ 437 + #define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* IA32_CORE_CAPABILITIES MSR */ 438 + #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* Speculative Store Bypass Disable */ 439 439 440 440 /* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */ 441 - #define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */ 442 - #define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */ 443 - #define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */ 444 - #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ 445 - #define X86_FEATURE_SEV_SNP (19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */ 446 - #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ 447 - #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ 448 - #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ 441 + #define X86_FEATURE_SME (19*32+ 0) /* "sme" AMD Secure Memory Encryption */ 442 + #define X86_FEATURE_SEV (19*32+ 1) /* "sev" AMD Secure Encrypted Virtualization */ 443 + #define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */ 444 + #define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" AMD Secure Encrypted Virtualization - Encrypted State */ 445 + #define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" AMD Secure Encrypted Virtualization - Secure Nested Paging */ 446 + #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */ 447 + #define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */ 448 + #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */ 449 449 450 450 /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ 451 - #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ 452 - #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* "" WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ 453 - #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */ 454 - #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */ 455 - #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ 456 - #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ 451 + #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ 452 + #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ 453 + #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ 454 + #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ 455 + #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ 456 + #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ 457 457 458 - #define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ 459 - #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ 460 - #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ 458 + #define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */ 459 + #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */ 460 + #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */ 461 461 462 462 /* 463 463 * Extended auxiliary flags: Linux defined - for features scattered in various ··· 465 465 * 466 466 * Reuse free bits when adding new feature flags! 467 467 */ 468 - #define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */ 469 - #define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */ 470 - #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */ 471 - #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */ 472 - #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ 468 + #define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* "amd_lbr_pmc_freeze" AMD LBR and PMC Freeze */ 469 + #define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */ 470 + #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */ 471 + #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */ 472 + #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */ 473 473 474 474 /* 475 475 * BUG word(s) 476 476 */ 477 477 #define X86_BUG(x) (NCAPINTS*32 + (x)) 478 478 479 - #define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 480 - #define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 481 - #define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 479 + #define X86_BUG_F00F X86_BUG(0) /* "f00f" Intel F00F */ 480 + #define X86_BUG_FDIV X86_BUG(1) /* "fdiv" FPU FDIV */ 481 + #define X86_BUG_COMA X86_BUG(2) /* "coma" Cyrix 6x86 coma */ 482 482 #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 483 483 #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 484 - #define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 485 - #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 486 - #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 487 - #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 484 + #define X86_BUG_11AP X86_BUG(5) /* "11ap" Bad local APIC aka 11AP */ 485 + #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* "fxsave_leak" FXSAVE leaks FOP/FIP/FOP */ 486 + #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* "clflush_monitor" AAI65, CLFLUSH required before MONITOR */ 487 + #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* "sysret_ss_attrs" SYSRET doesn't fix up SS attrs */ 488 488 #ifdef CONFIG_X86_32 489 489 /* 490 490 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional 491 491 * to avoid confusion. 492 492 */ 493 - #define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 493 + #define X86_BUG_ESPFIX X86_BUG(9) /* IRET to 16-bit SS corrupts ESP/RSP high bits */ 494 494 #endif 495 - #define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ 496 - #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 497 - #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 498 - #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 499 - #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ 500 - #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ 501 - #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ 502 - #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ 503 - #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ 504 - #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ 505 - #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ 506 - #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ 507 - #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ 508 - #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ 509 - #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ 510 - #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ 511 - #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ 512 - #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ 513 - #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ 514 - #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ 515 - #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ 516 - #define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */ 495 + #define X86_BUG_NULL_SEG X86_BUG(10) /* "null_seg" Nulling a selector preserves the base */ 496 + #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* "swapgs_fence" SWAPGS without input dep on GS */ 497 + #define X86_BUG_MONITOR X86_BUG(12) /* "monitor" IPI required to wake up remote CPU */ 498 + #define X86_BUG_AMD_E400 X86_BUG(13) /* "amd_e400" CPU is among the affected by Erratum 400 */ 499 + #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* "cpu_meltdown" CPU is affected by meltdown attack and needs kernel page table isolation */ 500 + #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* "spectre_v1" CPU is affected by Spectre variant 1 attack with conditional branches */ 501 + #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* "spectre_v2" CPU is affected by Spectre variant 2 attack with indirect branches */ 502 + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* "spec_store_bypass" CPU is affected by speculative store bypass attack */ 503 + #define X86_BUG_L1TF X86_BUG(18) /* "l1tf" CPU is affected by L1 Terminal Fault */ 504 + #define X86_BUG_MDS X86_BUG(19) /* "mds" CPU is affected by Microarchitectural data sampling */ 505 + #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* "msbds_only" CPU is only affected by the MSDBS variant of BUG_MDS */ 506 + #define X86_BUG_SWAPGS X86_BUG(21) /* "swapgs" CPU is affected by speculation through SWAPGS */ 507 + #define X86_BUG_TAA X86_BUG(22) /* "taa" CPU is affected by TSX Async Abort(TAA) */ 508 + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */ 509 + #define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */ 510 + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */ 511 + #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */ 512 + #define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */ 513 + #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */ 514 + #define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */ 515 + #define X86_BUG_GDS X86_BUG(30) /* "gds" CPU is affected by Gather Data Sampling */ 516 + #define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */ 517 517 518 518 /* BUG word 2 */ 519 - #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ 520 - #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ 521 - #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */ 522 - #define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */ 519 + #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */ 520 + #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ 521 + #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ 522 + #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ 523 523 #endif /* _ASM_X86_CPUFEATURES_H */
-65
arch/x86/include/asm/intel_pconfig.h
··· 1 - #ifndef _ASM_X86_INTEL_PCONFIG_H 2 - #define _ASM_X86_INTEL_PCONFIG_H 3 - 4 - #include <asm/asm.h> 5 - #include <asm/processor.h> 6 - 7 - enum pconfig_target { 8 - INVALID_TARGET = 0, 9 - MKTME_TARGET = 1, 10 - PCONFIG_TARGET_NR 11 - }; 12 - 13 - int pconfig_target_supported(enum pconfig_target target); 14 - 15 - enum pconfig_leaf { 16 - MKTME_KEY_PROGRAM = 0, 17 - PCONFIG_LEAF_INVALID, 18 - }; 19 - 20 - #define PCONFIG ".byte 0x0f, 0x01, 0xc5" 21 - 22 - /* Defines and structure for MKTME_KEY_PROGRAM of PCONFIG instruction */ 23 - 24 - /* mktme_key_program::keyid_ctrl COMMAND, bits [7:0] */ 25 - #define MKTME_KEYID_SET_KEY_DIRECT 0 26 - #define MKTME_KEYID_SET_KEY_RANDOM 1 27 - #define MKTME_KEYID_CLEAR_KEY 2 28 - #define MKTME_KEYID_NO_ENCRYPT 3 29 - 30 - /* mktme_key_program::keyid_ctrl ENC_ALG, bits [23:8] */ 31 - #define MKTME_AES_XTS_128 (1 << 8) 32 - 33 - /* Return codes from the PCONFIG MKTME_KEY_PROGRAM */ 34 - #define MKTME_PROG_SUCCESS 0 35 - #define MKTME_INVALID_PROG_CMD 1 36 - #define MKTME_ENTROPY_ERROR 2 37 - #define MKTME_INVALID_KEYID 3 38 - #define MKTME_INVALID_ENC_ALG 4 39 - #define MKTME_DEVICE_BUSY 5 40 - 41 - /* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */ 42 - struct mktme_key_program { 43 - u16 keyid; 44 - u32 keyid_ctrl; 45 - u8 __rsvd[58]; 46 - u8 key_field_1[64]; 47 - u8 key_field_2[64]; 48 - } __packed __aligned(256); 49 - 50 - static inline int mktme_key_program(struct mktme_key_program *key_program) 51 - { 52 - unsigned long rax = MKTME_KEY_PROGRAM; 53 - 54 - if (!pconfig_target_supported(MKTME_TARGET)) 55 - return -ENXIO; 56 - 57 - asm volatile(PCONFIG 58 - : "=a" (rax), "=b" (key_program) 59 - : "0" (rax), "1" (key_program) 60 - : "memory", "cc"); 61 - 62 - return rax; 63 - } 64 - 65 - #endif /* _ASM_X86_INTEL_PCONFIG_H */
+11 -1
arch/x86/include/asm/processor.h
··· 692 692 693 693 #ifdef CONFIG_CPU_SUP_AMD 694 694 extern u32 amd_get_highest_perf(void); 695 - extern void amd_clear_divider(void); 695 + 696 + /* 697 + * Issue a DIV 0/1 insn to clear any division data from previous DIV 698 + * operations. 699 + */ 700 + static __always_inline void amd_clear_divider(void) 701 + { 702 + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) 703 + :: "a" (0), "d" (0), "r" (1)); 704 + } 705 + 696 706 extern void amd_check_microcode(void); 697 707 #else 698 708 static inline u32 amd_get_highest_perf(void) { return 0; }
+55 -55
arch/x86/include/asm/vmxfeatures.h
··· 9 9 10 10 /* 11 11 * Note: If the comment begins with a quoted string, that string is used 12 - * in /proc/cpuinfo instead of the macro name. If the string is "", 13 - * this feature bit is not displayed in /proc/cpuinfo at all. 12 + * in /proc/cpuinfo instead of the macro name. Otherwise, this feature bit 13 + * is not displayed in /proc/cpuinfo at all. 14 14 */ 15 15 16 16 /* Pin-Based VM-Execution Controls, EPT/VPID, APIC and VM-Functions, word 0 */ 17 - #define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* "" VM-Exit on vectored interrupts */ 18 - #define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* "" VM-Exit on NMIs */ 17 + #define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* VM-Exit on vectored interrupts */ 18 + #define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* VM-Exit on NMIs */ 19 19 #define VMX_FEATURE_VIRTUAL_NMIS ( 0*32+ 5) /* "vnmi" NMI virtualization */ 20 - #define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* VMX Preemption Timer */ 21 - #define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* Posted Interrupts */ 20 + #define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* "preemption_timer" VMX Preemption Timer */ 21 + #define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* "posted_intr" Posted Interrupts */ 22 22 23 23 /* EPT/VPID features, scattered to bits 16-23 */ 24 - #define VMX_FEATURE_INVVPID ( 0*32+ 16) /* INVVPID is supported */ 24 + #define VMX_FEATURE_INVVPID ( 0*32+ 16) /* "invvpid" INVVPID is supported */ 25 25 #define VMX_FEATURE_EPT_EXECUTE_ONLY ( 0*32+ 17) /* "ept_x_only" EPT entries can be execute only */ 26 - #define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* EPT Accessed/Dirty bits */ 27 - #define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* 1GB EPT pages */ 28 - #define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* 5-level EPT paging */ 26 + #define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* "ept_ad" EPT Accessed/Dirty bits */ 27 + #define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* "ept_1gb" 1GB EPT pages */ 28 + #define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* "ept_5level" 5-level EPT paging */ 29 29 30 30 /* Aggregated APIC features 24-27 */ 31 - #define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* TPR shadow + virt APIC */ 32 - #define VMX_FEATURE_APICV ( 0*32+ 25) /* TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */ 31 + #define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* "flexpriority" TPR shadow + virt APIC */ 32 + #define VMX_FEATURE_APICV ( 0*32+ 25) /* "apicv" TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */ 33 33 34 34 /* VM-Functions, shifted to bits 28-31 */ 35 - #define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* EPTP switching (in guest) */ 35 + #define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* "eptp_switching" EPTP switching (in guest) */ 36 36 37 37 /* Primary Processor-Based VM-Execution Controls, word 1 */ 38 - #define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* "" VM-Exit if INTRs are unblocked in guest */ 38 + #define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* VM-Exit if INTRs are unblocked in guest */ 39 39 #define VMX_FEATURE_USE_TSC_OFFSETTING ( 1*32+ 3) /* "tsc_offset" Offset hardware TSC when read in guest */ 40 - #define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* "" VM-Exit on HLT */ 41 - #define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* "" VM-Exit on INVLPG */ 42 - #define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* "" VM-Exit on MWAIT */ 43 - #define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* "" VM-Exit on RDPMC */ 44 - #define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* "" VM-Exit on RDTSC */ 45 - #define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* "" VM-Exit on writes to CR3 */ 46 - #define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* "" VM-Exit on reads from CR3 */ 47 - #define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* "" Enable Tertiary VM-Execution Controls */ 48 - #define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* "" VM-Exit on writes to CR8 */ 49 - #define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* "" VM-Exit on reads from CR8 */ 40 + #define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* VM-Exit on HLT */ 41 + #define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* VM-Exit on INVLPG */ 42 + #define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* VM-Exit on MWAIT */ 43 + #define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* VM-Exit on RDPMC */ 44 + #define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* VM-Exit on RDTSC */ 45 + #define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* VM-Exit on writes to CR3 */ 46 + #define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* VM-Exit on reads from CR3 */ 47 + #define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* Enable Tertiary VM-Execution Controls */ 48 + #define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* VM-Exit on writes to CR8 */ 49 + #define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* VM-Exit on reads from CR8 */ 50 50 #define VMX_FEATURE_VIRTUAL_TPR ( 1*32+ 21) /* "vtpr" TPR virtualization, a.k.a. TPR shadow */ 51 - #define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* "" VM-Exit if NMIs are unblocked in guest */ 52 - #define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* "" VM-Exit on accesses to debug registers */ 53 - #define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* "" VM-Exit on *all* IN{S} and OUT{S}*/ 54 - #define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* "" VM-Exit based on I/O port */ 51 + #define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* VM-Exit if NMIs are unblocked in guest */ 52 + #define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* VM-Exit on accesses to debug registers */ 53 + #define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* VM-Exit on *all* IN{S} and OUT{S}*/ 54 + #define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* VM-Exit based on I/O port */ 55 55 #define VMX_FEATURE_MONITOR_TRAP_FLAG ( 1*32+ 27) /* "mtf" VMX single-step VM-Exits */ 56 - #define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* "" VM-Exit based on MSR index */ 57 - #define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* "" VM-Exit on MONITOR (MWAIT's accomplice) */ 58 - #define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* "" VM-Exit on PAUSE (unconditionally) */ 59 - #define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* "" Enable Secondary VM-Execution Controls */ 56 + #define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* VM-Exit based on MSR index */ 57 + #define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* VM-Exit on MONITOR (MWAIT's accomplice) */ 58 + #define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* VM-Exit on PAUSE (unconditionally) */ 59 + #define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* Enable Secondary VM-Execution Controls */ 60 60 61 61 /* Secondary Processor-Based VM-Execution Controls, word 2 */ 62 62 #define VMX_FEATURE_VIRT_APIC_ACCESSES ( 2*32+ 0) /* "vapic" Virtualize memory mapped APIC accesses */ 63 - #define VMX_FEATURE_EPT ( 2*32+ 1) /* Extended Page Tables, a.k.a. Two-Dimensional Paging */ 64 - #define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* "" VM-Exit on {S,L}*DT instructions */ 65 - #define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* "" Enable RDTSCP in guest */ 66 - #define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* "" Virtualize X2APIC for the guest */ 67 - #define VMX_FEATURE_VPID ( 2*32+ 5) /* Virtual Processor ID (TLB ASID modifier) */ 68 - #define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* "" VM-Exit on WBINVD */ 69 - #define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* Allow Big Real Mode and other "invalid" states */ 63 + #define VMX_FEATURE_EPT ( 2*32+ 1) /* "ept" Extended Page Tables, a.k.a. Two-Dimensional Paging */ 64 + #define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* VM-Exit on {S,L}*DT instructions */ 65 + #define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* Enable RDTSCP in guest */ 66 + #define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* Virtualize X2APIC for the guest */ 67 + #define VMX_FEATURE_VPID ( 2*32+ 5) /* "vpid" Virtual Processor ID (TLB ASID modifier) */ 68 + #define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* VM-Exit on WBINVD */ 69 + #define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* "unrestricted_guest" Allow Big Real Mode and other "invalid" states */ 70 70 #define VMX_FEATURE_APIC_REGISTER_VIRT ( 2*32+ 8) /* "vapic_reg" Hardware emulation of reads to the virtual-APIC */ 71 71 #define VMX_FEATURE_VIRT_INTR_DELIVERY ( 2*32+ 9) /* "vid" Evaluation and delivery of pending virtual interrupts */ 72 72 #define VMX_FEATURE_PAUSE_LOOP_EXITING ( 2*32+ 10) /* "ple" Conditionally VM-Exit on PAUSE at CPL0 */ 73 - #define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* "" VM-Exit on RDRAND*/ 74 - #define VMX_FEATURE_INVPCID ( 2*32+ 12) /* "" Enable INVPCID in guest */ 75 - #define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* "" Enable VM-Functions (leaf dependent) */ 76 - #define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* VMREAD/VMWRITE in guest can access shadow VMCS */ 77 - #define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */ 78 - #define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* "" VM-Exit on RDSEED */ 73 + #define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* VM-Exit on RDRAND*/ 74 + #define VMX_FEATURE_INVPCID ( 2*32+ 12) /* Enable INVPCID in guest */ 75 + #define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* Enable VM-Functions (leaf dependent) */ 76 + #define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* "shadow_vmcs" VMREAD/VMWRITE in guest can access shadow VMCS */ 77 + #define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* VM-Exit on ENCLS (leaf dependent) */ 78 + #define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* VM-Exit on RDSEED */ 79 79 #define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */ 80 - #define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* Conditionally reflect EPT violations as #VE exceptions */ 81 - #define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */ 82 - #define VMX_FEATURE_XSAVES ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */ 80 + #define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "ept_violation_ve" Conditionally reflect EPT violations as #VE exceptions */ 81 + #define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* Suppress VMX indicators in Processor Trace */ 82 + #define VMX_FEATURE_XSAVES ( 2*32+ 20) /* Enable XSAVES and XRSTORS in guest */ 83 83 #define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */ 84 - #define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */ 85 - #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */ 86 - #define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */ 87 - #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */ 88 - #define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */ 89 - #define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* VM-Exit when no event windows after notify window */ 84 + #define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* Processor Trace logs GPAs */ 85 + #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* "tsc_scaling" Scale hardware TSC when read in guest */ 86 + #define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* "usr_wait_pause" Enable TPAUSE, UMONITOR, UMWAIT in guest */ 87 + #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* VM-Exit on ENCLV (leaf dependent) */ 88 + #define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* VM-Exit when bus lock caused */ 89 + #define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* "notify_vm_exiting" VM-Exit when no event windows after notify window */ 90 90 91 91 /* Tertiary Processor-Based VM-Execution Controls, word 3 */ 92 - #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ 92 + #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* "ipi_virt" Enable IPI virtualization */ 93 93 #endif /* _ASM_X86_VMXFEATURES_H */
+1 -1
arch/x86/kernel/cpu/Makefile
··· 34 34 35 35 obj-$(CONFIG_IA32_FEAT_CTL) += feat_ctl.o 36 36 ifdef CONFIG_CPU_SUP_INTEL 37 - obj-y += intel.o intel_pconfig.o tsx.o 37 + obj-y += intel.o tsx.o 38 38 obj-$(CONFIG_PM) += intel_epb.o 39 39 endif 40 40 obj-$(CONFIG_CPU_SUP_AMD) += amd.o
-11
arch/x86/kernel/cpu/amd.c
··· 1220 1220 1221 1221 on_each_cpu(zenbleed_check_cpu, NULL, 1); 1222 1222 } 1223 - 1224 - /* 1225 - * Issue a DIV 0/1 insn to clear any division data from previous DIV 1226 - * operations. 1227 - */ 1228 - void noinstr amd_clear_divider(void) 1229 - { 1230 - asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) 1231 - :: "a" (0), "d" (0), "r" (1)); 1232 - } 1233 - EXPORT_SYMBOL_GPL(amd_clear_divider);
+67 -121
arch/x86/kernel/cpu/intel.c
··· 72 72 */ 73 73 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c) 74 74 { 75 - switch (c->x86_model) { 76 - case INTEL_FAM6_CORE_YONAH: 77 - case INTEL_FAM6_CORE2_MEROM: 78 - case INTEL_FAM6_CORE2_MEROM_L: 79 - case INTEL_FAM6_CORE2_PENRYN: 80 - case INTEL_FAM6_CORE2_DUNNINGTON: 81 - case INTEL_FAM6_NEHALEM: 82 - case INTEL_FAM6_NEHALEM_G: 83 - case INTEL_FAM6_NEHALEM_EP: 84 - case INTEL_FAM6_NEHALEM_EX: 85 - case INTEL_FAM6_WESTMERE: 86 - case INTEL_FAM6_WESTMERE_EP: 87 - case INTEL_FAM6_SANDYBRIDGE: 75 + switch (c->x86_vfm) { 76 + case INTEL_CORE_YONAH: 77 + case INTEL_CORE2_MEROM: 78 + case INTEL_CORE2_MEROM_L: 79 + case INTEL_CORE2_PENRYN: 80 + case INTEL_CORE2_DUNNINGTON: 81 + case INTEL_NEHALEM: 82 + case INTEL_NEHALEM_G: 83 + case INTEL_NEHALEM_EP: 84 + case INTEL_NEHALEM_EX: 85 + case INTEL_WESTMERE: 86 + case INTEL_WESTMERE_EP: 87 + case INTEL_SANDYBRIDGE: 88 88 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP); 89 89 } 90 90 } ··· 106 106 */ 107 107 if (c->x86 != 6) 108 108 return; 109 - switch (c->x86_model) { 110 - case INTEL_FAM6_XEON_PHI_KNL: 111 - case INTEL_FAM6_XEON_PHI_KNM: 109 + switch (c->x86_vfm) { 110 + case INTEL_XEON_PHI_KNL: 111 + case INTEL_XEON_PHI_KNM: 112 112 break; 113 113 default: 114 114 return; ··· 134 134 * - Release note from 20180108 microcode release 135 135 */ 136 136 struct sku_microcode { 137 - u8 model; 137 + u32 vfm; 138 138 u8 stepping; 139 139 u32 microcode; 140 140 }; 141 141 static const struct sku_microcode spectre_bad_microcodes[] = { 142 - { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, 143 - { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, 144 - { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, 145 - { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 }, 146 - { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 }, 147 - { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, 148 - { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, 149 - { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, 150 - { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, 151 - { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 }, 152 - { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 }, 153 - { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, 154 - { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, 155 - { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, 156 - { INTEL_FAM6_HASWELL, 0x03, 0x23 }, 157 - { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, 158 - { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, 159 - { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, 142 + { INTEL_KABYLAKE, 0x0B, 0x80 }, 143 + { INTEL_KABYLAKE, 0x0A, 0x80 }, 144 + { INTEL_KABYLAKE, 0x09, 0x80 }, 145 + { INTEL_KABYLAKE_L, 0x0A, 0x80 }, 146 + { INTEL_KABYLAKE_L, 0x09, 0x80 }, 147 + { INTEL_SKYLAKE_X, 0x03, 0x0100013e }, 148 + { INTEL_SKYLAKE_X, 0x04, 0x0200003c }, 149 + { INTEL_BROADWELL, 0x04, 0x28 }, 150 + { INTEL_BROADWELL_G, 0x01, 0x1b }, 151 + { INTEL_BROADWELL_D, 0x02, 0x14 }, 152 + { INTEL_BROADWELL_D, 0x03, 0x07000011 }, 153 + { INTEL_BROADWELL_X, 0x01, 0x0b000025 }, 154 + { INTEL_HASWELL_L, 0x01, 0x21 }, 155 + { INTEL_HASWELL_G, 0x01, 0x18 }, 156 + { INTEL_HASWELL, 0x03, 0x23 }, 157 + { INTEL_HASWELL_X, 0x02, 0x3b }, 158 + { INTEL_HASWELL_X, 0x04, 0x10 }, 159 + { INTEL_IVYBRIDGE_X, 0x04, 0x42a }, 160 160 /* Observed in the wild */ 161 - { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, 162 - { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, 161 + { INTEL_SANDYBRIDGE_X, 0x06, 0x61b }, 162 + { INTEL_SANDYBRIDGE_X, 0x07, 0x712 }, 163 163 }; 164 164 165 165 static bool bad_spectre_microcode(struct cpuinfo_x86 *c) ··· 173 173 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 174 174 return false; 175 175 176 - if (c->x86 != 6) 177 - return false; 178 - 179 176 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 180 - if (c->x86_model == spectre_bad_microcodes[i].model && 177 + if (c->x86_vfm == spectre_bad_microcodes[i].vfm && 181 178 c->x86_stepping == spectre_bad_microcodes[i].stepping) 182 179 return (c->microcode <= spectre_bad_microcodes[i].microcode); 183 180 } ··· 187 190 #define TME_ACTIVATE_LOCKED(x) (x & 0x1) 188 191 #define TME_ACTIVATE_ENABLED(x) (x & 0x2) 189 192 190 - #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ 191 - #define TME_ACTIVATE_POLICY_AES_XTS_128 0 192 - 193 193 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ 194 - 195 - #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ 196 - #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 197 - 198 - /* Values for mktme_status (SW only construct) */ 199 - #define MKTME_ENABLED 0 200 - #define MKTME_DISABLED 1 201 - #define MKTME_UNINITIALIZED 2 202 - static int mktme_status = MKTME_UNINITIALIZED; 203 194 204 195 static void detect_tme_early(struct cpuinfo_x86 *c) 205 196 { 206 - u64 tme_activate, tme_policy, tme_crypto_algs; 207 - int keyid_bits = 0, nr_keyids = 0; 208 - static u64 tme_activate_cpu0 = 0; 197 + u64 tme_activate; 198 + int keyid_bits; 209 199 210 200 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); 211 201 212 - if (mktme_status != MKTME_UNINITIALIZED) { 213 - if (tme_activate != tme_activate_cpu0) { 214 - /* Broken BIOS? */ 215 - pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); 216 - pr_err_once("x86/tme: MKTME is not usable\n"); 217 - mktme_status = MKTME_DISABLED; 218 - 219 - /* Proceed. We may need to exclude bits from x86_phys_bits. */ 220 - } 221 - } else { 222 - tme_activate_cpu0 = tme_activate; 223 - } 224 - 225 202 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { 226 203 pr_info_once("x86/tme: not enabled by BIOS\n"); 227 - mktme_status = MKTME_DISABLED; 228 204 clear_cpu_cap(c, X86_FEATURE_TME); 229 205 return; 230 206 } 231 - 232 - if (mktme_status != MKTME_UNINITIALIZED) 233 - goto detect_keyid_bits; 234 - 235 - pr_info("x86/tme: enabled by BIOS\n"); 236 - 237 - tme_policy = TME_ACTIVATE_POLICY(tme_activate); 238 - if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) 239 - pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); 240 - 241 - tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); 242 - if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { 243 - pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", 244 - tme_crypto_algs); 245 - mktme_status = MKTME_DISABLED; 246 - } 247 - detect_keyid_bits: 207 + pr_info_once("x86/tme: enabled by BIOS\n"); 248 208 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); 249 - nr_keyids = (1UL << keyid_bits) - 1; 250 - if (nr_keyids) { 251 - pr_info_once("x86/mktme: enabled by BIOS\n"); 252 - pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); 253 - } else { 254 - pr_info_once("x86/mktme: disabled by BIOS\n"); 255 - } 256 - 257 - if (mktme_status == MKTME_UNINITIALIZED) { 258 - /* MKTME is usable */ 259 - mktme_status = MKTME_ENABLED; 260 - } 209 + if (!keyid_bits) 210 + return; 261 211 262 212 /* 263 - * KeyID bits effectively lower the number of physical address 264 - * bits. Update cpuinfo_x86::x86_phys_bits accordingly. 213 + * KeyID bits are set by BIOS and can be present regardless 214 + * of whether the kernel is using them. They effectively lower 215 + * the number of physical address bits. 216 + * 217 + * Update cpuinfo_x86::x86_phys_bits accordingly. 265 218 */ 266 219 c->x86_phys_bits -= keyid_bits; 220 + pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n", 221 + keyid_bits); 267 222 } 268 223 269 224 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) ··· 269 320 * need the microcode to have already been loaded... so if it is 270 321 * not, recommend a BIOS update and disable large pages. 271 322 */ 272 - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && 323 + if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 && 273 324 c->microcode < 0x20e) { 274 325 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 275 326 clear_cpu_cap(c, X86_FEATURE_PSE); ··· 301 352 } 302 353 303 354 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 304 - if (c->x86 == 6) { 305 - switch (c->x86_model) { 306 - case INTEL_FAM6_ATOM_SALTWELL_MID: 307 - case INTEL_FAM6_ATOM_SALTWELL_TABLET: 308 - case INTEL_FAM6_ATOM_SILVERMONT_MID: 309 - case INTEL_FAM6_ATOM_AIRMONT_NP: 310 - set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 311 - break; 312 - default: 313 - break; 314 - } 355 + switch (c->x86_vfm) { 356 + case INTEL_ATOM_SALTWELL_MID: 357 + case INTEL_ATOM_SALTWELL_TABLET: 358 + case INTEL_ATOM_SILVERMONT_MID: 359 + case INTEL_ATOM_AIRMONT_NP: 360 + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 361 + break; 315 362 } 316 363 317 364 /* ··· 346 401 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE 347 402 * to be modified. 348 403 */ 349 - if (c->x86 == 5 && c->x86_model == 9) { 404 + if (c->x86_vfm == INTEL_QUARK_X1000) { 350 405 pr_info("Disabling PGE capability bit\n"); 351 406 setup_clear_cpu_cap(X86_FEATURE_PGE); 352 407 } ··· 578 633 set_cpu_cap(c, X86_FEATURE_PEBS); 579 634 } 580 635 581 - if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && 582 - (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) 636 + if (boot_cpu_has(X86_FEATURE_CLFLUSH) && 637 + (c->x86_vfm == INTEL_CORE2_DUNNINGTON || 638 + c->x86_vfm == INTEL_NEHALEM_EX || 639 + c->x86_vfm == INTEL_WESTMERE_EX)) 583 640 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); 584 641 585 - if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && 586 - ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) 642 + if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT) 587 643 set_cpu_bug(c, X86_BUG_MONITOR); 588 644 589 645 #ifdef CONFIG_X86_64 ··· 1200 1254 * feature even though they do not enumerate IA32_CORE_CAPABILITIES. 1201 1255 */ 1202 1256 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { 1203 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), 1204 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), 1205 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), 1257 + X86_MATCH_VFM(INTEL_ICELAKE_X, 0), 1258 + X86_MATCH_VFM(INTEL_ICELAKE_L, 0), 1259 + X86_MATCH_VFM(INTEL_ICELAKE_D, 0), 1206 1260 {} 1207 1261 }; 1208 1262
-84
arch/x86/kernel/cpu/intel_pconfig.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Intel PCONFIG instruction support. 4 - * 5 - * Copyright (C) 2017 Intel Corporation 6 - * 7 - * Author: 8 - * Kirill A. Shutemov <kirill.shutemov@linux.intel.com> 9 - */ 10 - #include <linux/bug.h> 11 - #include <linux/limits.h> 12 - 13 - #include <asm/cpufeature.h> 14 - #include <asm/intel_pconfig.h> 15 - 16 - #define PCONFIG_CPUID 0x1b 17 - 18 - #define PCONFIG_CPUID_SUBLEAF_MASK ((1 << 12) - 1) 19 - 20 - /* Subleaf type (EAX) for PCONFIG CPUID leaf (0x1B) */ 21 - enum { 22 - PCONFIG_CPUID_SUBLEAF_INVALID = 0, 23 - PCONFIG_CPUID_SUBLEAF_TARGETID = 1, 24 - }; 25 - 26 - /* Bitmask of supported targets */ 27 - static u64 targets_supported __read_mostly; 28 - 29 - int pconfig_target_supported(enum pconfig_target target) 30 - { 31 - /* 32 - * We would need to re-think the implementation once we get > 64 33 - * PCONFIG targets. Spec allows up to 2^32 targets. 34 - */ 35 - BUILD_BUG_ON(PCONFIG_TARGET_NR >= 64); 36 - 37 - if (WARN_ON_ONCE(target >= 64)) 38 - return 0; 39 - return targets_supported & (1ULL << target); 40 - } 41 - 42 - static int __init intel_pconfig_init(void) 43 - { 44 - int subleaf; 45 - 46 - if (!boot_cpu_has(X86_FEATURE_PCONFIG)) 47 - return 0; 48 - 49 - /* 50 - * Scan subleafs of PCONFIG CPUID leaf. 51 - * 52 - * Subleafs of the same type need not to be consecutive. 53 - * 54 - * Stop on the first invalid subleaf type. All subleafs after the first 55 - * invalid are invalid too. 56 - */ 57 - for (subleaf = 0; subleaf < INT_MAX; subleaf++) { 58 - struct cpuid_regs regs; 59 - 60 - cpuid_count(PCONFIG_CPUID, subleaf, 61 - &regs.eax, &regs.ebx, &regs.ecx, &regs.edx); 62 - 63 - switch (regs.eax & PCONFIG_CPUID_SUBLEAF_MASK) { 64 - case PCONFIG_CPUID_SUBLEAF_INVALID: 65 - /* Stop on the first invalid subleaf */ 66 - goto out; 67 - case PCONFIG_CPUID_SUBLEAF_TARGETID: 68 - /* Mark supported PCONFIG targets */ 69 - if (regs.ebx < 64) 70 - targets_supported |= (1ULL << regs.ebx); 71 - if (regs.ecx < 64) 72 - targets_supported |= (1ULL << regs.ecx); 73 - if (regs.edx < 64) 74 - targets_supported |= (1ULL << regs.edx); 75 - break; 76 - default: 77 - /* Unknown CPUID.PCONFIG subleaf: ignore */ 78 - break; 79 - } 80 - } 81 - out: 82 - return 0; 83 - } 84 - arch_initcall(intel_pconfig_init);
+1
arch/x86/kernel/cpu/mce/inject.c
··· 799 799 800 800 module_init(inject_init); 801 801 module_exit(inject_exit); 802 + MODULE_DESCRIPTION("Machine check injection support"); 802 803 MODULE_LICENSE("GPL");
+1 -2
arch/x86/kernel/cpu/mkcapflags.sh
··· 30 30 31 31 # If the /* comment */ starts with a quote string, grab that. 32 32 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" 33 - [ -z "$VALUE" ] && VALUE="\"$NAME\"" 34 - [ "$VALUE" = '""' ] && continue 33 + [ ! "$VALUE" ] && continue 35 34 36 35 # Name is uppercase, VALUE is all lowercase 37 36 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
+2 -2
arch/x86/pci/intel_mid_pci.c
··· 216 216 } 217 217 218 218 static const struct x86_cpu_id intel_mid_cpu_ids[] = { 219 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL), 219 + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL), 220 220 {} 221 221 }; 222 222 ··· 243 243 model = id->model; 244 244 245 245 switch (model) { 246 - case INTEL_FAM6_ATOM_SILVERMONT_MID: 246 + case VFM_MODEL(INTEL_ATOM_SILVERMONT_MID): 247 247 polarity_low = false; 248 248 249 249 /* Special treatment for IRQ0 */
+3 -3
arch/x86/platform/intel-mid/intel-mid.c
··· 22 22 #include <asm/mpspec_def.h> 23 23 #include <asm/hw_irq.h> 24 24 #include <asm/apic.h> 25 + #include <asm/cpu_device_id.h> 25 26 #include <asm/io_apic.h> 26 27 #include <asm/intel-mid.h> 27 28 #include <asm/io.h> ··· 56 55 57 56 static void intel_mid_arch_setup(void) 58 57 { 59 - switch (boot_cpu_data.x86_model) { 60 - case 0x3C: 61 - case 0x4A: 58 + switch (boot_cpu_data.x86_vfm) { 59 + case INTEL_ATOM_SILVERMONT_MID: 62 60 x86_platform.legacy.rtc = 1; 63 61 break; 64 62 default:
+4 -4
arch/x86/virt/vmx/tdx/tdx.c
··· 33 33 #include <asm/msr.h> 34 34 #include <asm/cpufeature.h> 35 35 #include <asm/tdx.h> 36 - #include <asm/intel-family.h> 36 + #include <asm/cpu_device_id.h> 37 37 #include <asm/processor.h> 38 38 #include <asm/mce.h> 39 39 #include "tdx.h" ··· 1426 1426 * private memory poisons that memory, and a subsequent read of 1427 1427 * that memory triggers #MC. 1428 1428 */ 1429 - switch (boot_cpu_data.x86_model) { 1430 - case INTEL_FAM6_SAPPHIRERAPIDS_X: 1431 - case INTEL_FAM6_EMERALDRAPIDS_X: 1429 + switch (boot_cpu_data.x86_vfm) { 1430 + case INTEL_SAPPHIRERAPIDS_X: 1431 + case INTEL_EMERALDRAPIDS_X: 1432 1432 setup_force_cpu_bug(X86_BUG_TDX_PW_MCE); 1433 1433 } 1434 1434 }