Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86-cpu-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Ingo Molnar:

- Rework the x86 CPU vendor/family/model code: introduce the 'VFM'
value that is an 8+8+8 bit concatenation of the vendor/family/model
value, and add macros that work on VFM values. This simplifies the
addition of new Intel models & families, and simplifies existing
enumeration & quirk code.

- Add support for the AMD 0x80000026 leaf, to better parse topology
information

- Optimize the NUMA allocation layout of more per-CPU data structures

- Improve the workaround for AMD erratum 1386

- Clear TME from /proc/cpuinfo as well, when disabled by the firmware

- Improve x86 self-tests

- Extend the mce_record tracepoint with the ::ppin and ::microcode fields

- Implement recovery for MCE errors in TDX/SEAM non-root mode

- Misc cleanups and fixes

* tag 'x86-cpu-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
x86/mm: Switch to new Intel CPU model defines
x86/tsc_msr: Switch to new Intel CPU model defines
x86/tsc: Switch to new Intel CPU model defines
x86/cpu: Switch to new Intel CPU model defines
x86/resctrl: Switch to new Intel CPU model defines
x86/microcode/intel: Switch to new Intel CPU model defines
x86/mce: Switch to new Intel CPU model defines
x86/cpu: Switch to new Intel CPU model defines
x86/cpu/intel_epb: Switch to new Intel CPU model defines
x86/aperfmperf: Switch to new Intel CPU model defines
x86/apic: Switch to new Intel CPU model defines
perf/x86/msr: Switch to new Intel CPU model defines
perf/x86/intel/uncore: Switch to new Intel CPU model defines
perf/x86/intel/pt: Switch to new Intel CPU model defines
perf/x86/lbr: Switch to new Intel CPU model defines
perf/x86/intel/cstate: Switch to new Intel CPU model defines
x86/bugs: Switch to new Intel CPU model defines
x86/bugs: Switch to new Intel CPU model defines
x86/cpu/vfm: Update arch/x86/include/asm/intel-family.h
x86/cpu/vfm: Add new macros to work with (vendor/family/model) values
...

+704 -472
+58 -58
arch/x86/events/intel/cstate.c
··· 696 696 697 697 698 698 static const struct x86_cpu_id intel_cstates_match[] __initconst = { 699 - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), 700 - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates), 701 - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates), 699 + X86_MATCH_VFM(INTEL_NEHALEM, &nhm_cstates), 700 + X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_cstates), 701 + X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhm_cstates), 702 702 703 - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates), 704 - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates), 705 - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates), 703 + X86_MATCH_VFM(INTEL_WESTMERE, &nhm_cstates), 704 + X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_cstates), 705 + X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhm_cstates), 706 706 707 - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates), 708 - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates), 707 + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_cstates), 708 + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snb_cstates), 709 709 710 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates), 711 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates), 710 + X86_MATCH_VFM(INTEL_IVYBRIDGE, &snb_cstates), 711 + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &snb_cstates), 712 712 713 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates), 714 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates), 715 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates), 713 + X86_MATCH_VFM(INTEL_HASWELL, &snb_cstates), 714 + X86_MATCH_VFM(INTEL_HASWELL_X, &snb_cstates), 715 + X86_MATCH_VFM(INTEL_HASWELL_G, &snb_cstates), 716 716 717 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates), 717 + X86_MATCH_VFM(INTEL_HASWELL_L, &hswult_cstates), 718 718 719 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates), 720 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates), 721 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates), 719 + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &slm_cstates), 720 + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &slm_cstates), 721 + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &slm_cstates), 722 722 723 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates), 724 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates), 725 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates), 726 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates), 723 + X86_MATCH_VFM(INTEL_BROADWELL, &snb_cstates), 724 + X86_MATCH_VFM(INTEL_BROADWELL_D, &snb_cstates), 725 + X86_MATCH_VFM(INTEL_BROADWELL_G, &snb_cstates), 726 + X86_MATCH_VFM(INTEL_BROADWELL_X, &snb_cstates), 727 727 728 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates), 729 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates), 730 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates), 728 + X86_MATCH_VFM(INTEL_SKYLAKE_L, &snb_cstates), 729 + X86_MATCH_VFM(INTEL_SKYLAKE, &snb_cstates), 730 + X86_MATCH_VFM(INTEL_SKYLAKE_X, &snb_cstates), 731 731 732 - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates), 733 - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates), 734 - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates), 735 - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates), 732 + X86_MATCH_VFM(INTEL_KABYLAKE_L, &hswult_cstates), 733 + X86_MATCH_VFM(INTEL_KABYLAKE, &hswult_cstates), 734 + X86_MATCH_VFM(INTEL_COMETLAKE_L, &hswult_cstates), 735 + X86_MATCH_VFM(INTEL_COMETLAKE, &hswult_cstates), 736 736 737 - X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates), 737 + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnl_cstates), 738 738 739 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates), 740 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates), 739 + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_cstates), 740 + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_cstates), 741 741 742 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates), 743 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates), 744 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates), 745 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates), 746 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), 747 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), 748 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), 749 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), 750 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates), 742 + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &glm_cstates), 743 + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &glm_cstates), 744 + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &glm_cstates), 745 + X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &glm_cstates), 746 + X86_MATCH_VFM(INTEL_ATOM_TREMONT, &glm_cstates), 747 + X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &glm_cstates), 748 + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_cstates), 749 + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &srf_cstates), 750 + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &grr_cstates), 751 751 752 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), 753 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), 754 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), 755 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), 756 - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates), 757 - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates), 758 - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &icx_cstates), 759 - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &icx_cstates), 752 + X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_cstates), 753 + X86_MATCH_VFM(INTEL_ICELAKE, &icl_cstates), 754 + X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_cstates), 755 + X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_cstates), 756 + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &icx_cstates), 757 + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &icx_cstates), 758 + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &icx_cstates), 759 + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &icx_cstates), 760 760 761 - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), 762 - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), 763 - X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates), 764 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates), 765 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates), 766 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates), 767 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates), 768 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates), 769 - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates), 770 - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates), 761 + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &icl_cstates), 762 + X86_MATCH_VFM(INTEL_TIGERLAKE, &icl_cstates), 763 + X86_MATCH_VFM(INTEL_ROCKETLAKE, &icl_cstates), 764 + X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_cstates), 765 + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_cstates), 766 + X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_cstates), 767 + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_cstates), 768 + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates), 769 + X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates), 770 + X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates), 771 771 { }, 772 772 }; 773 773 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
+2 -1
arch/x86/events/intel/lbr.c
··· 2 2 #include <linux/perf_event.h> 3 3 #include <linux/types.h> 4 4 5 + #include <asm/cpu_device_id.h> 5 6 #include <asm/perf_event.h> 6 7 #include <asm/msr.h> 7 8 ··· 1458 1457 * to have an operational LBR which can freeze 1459 1458 * on PMU interrupt 1460 1459 */ 1461 - if (boot_cpu_data.x86_model == 28 1460 + if (boot_cpu_data.x86_vfm == INTEL_ATOM_BONNELL 1462 1461 && boot_cpu_data.x86_stepping < 10) { 1463 1462 pr_cont("LBR disabled due to erratum"); 1464 1463 return;
+6 -6
arch/x86/events/intel/pt.c
··· 22 22 #include <asm/insn.h> 23 23 #include <asm/io.h> 24 24 #include <asm/intel_pt.h> 25 - #include <asm/intel-family.h> 25 + #include <asm/cpu_device_id.h> 26 26 27 27 #include "../perf_event.h" 28 28 #include "pt.h" ··· 211 211 } 212 212 213 213 /* model-specific quirks */ 214 - switch (boot_cpu_data.x86_model) { 215 - case INTEL_FAM6_BROADWELL: 216 - case INTEL_FAM6_BROADWELL_D: 217 - case INTEL_FAM6_BROADWELL_G: 218 - case INTEL_FAM6_BROADWELL_X: 214 + switch (boot_cpu_data.x86_vfm) { 215 + case INTEL_BROADWELL: 216 + case INTEL_BROADWELL_D: 217 + case INTEL_BROADWELL_G: 218 + case INTEL_BROADWELL_X: 219 219 /* not setting BRANCH_EN will #GP, erratum BDM106 */ 220 220 pt_pmu.branch_en_always_on = true; 221 221 break;
+50 -50
arch/x86/events/intel/uncore.c
··· 1829 1829 }; 1830 1830 1831 1831 static const struct x86_cpu_id intel_uncore_match[] __initconst = { 1832 - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init), 1833 - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init), 1834 - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init), 1835 - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init), 1836 - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init), 1837 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init), 1838 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init), 1839 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init), 1840 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init), 1841 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init), 1842 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init), 1843 - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init), 1844 - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init), 1845 - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init), 1846 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init), 1847 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init), 1848 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init), 1849 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init), 1850 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init), 1851 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init), 1852 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init), 1853 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init), 1854 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init), 1855 - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init), 1856 - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init), 1857 - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init), 1858 - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init), 1859 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init), 1860 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init), 1861 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init), 1862 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init), 1863 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init), 1864 - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init), 1865 - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init), 1866 - X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init), 1867 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init), 1868 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init), 1869 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_uncore_init), 1870 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init), 1871 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init), 1872 - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &mtl_uncore_init), 1873 - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init), 1874 - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), 1875 - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), 1876 - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init), 1877 - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), 1878 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), 1879 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), 1880 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init), 1881 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init), 1832 + X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_uncore_init), 1833 + X86_MATCH_VFM(INTEL_NEHALEM, &nhm_uncore_init), 1834 + X86_MATCH_VFM(INTEL_WESTMERE, &nhm_uncore_init), 1835 + X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_uncore_init), 1836 + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_uncore_init), 1837 + X86_MATCH_VFM(INTEL_IVYBRIDGE, &ivb_uncore_init), 1838 + X86_MATCH_VFM(INTEL_HASWELL, &hsw_uncore_init), 1839 + X86_MATCH_VFM(INTEL_HASWELL_L, &hsw_uncore_init), 1840 + X86_MATCH_VFM(INTEL_HASWELL_G, &hsw_uncore_init), 1841 + X86_MATCH_VFM(INTEL_BROADWELL, &bdw_uncore_init), 1842 + X86_MATCH_VFM(INTEL_BROADWELL_G, &bdw_uncore_init), 1843 + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snbep_uncore_init), 1844 + X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhmex_uncore_init), 1845 + X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhmex_uncore_init), 1846 + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ivbep_uncore_init), 1847 + X86_MATCH_VFM(INTEL_HASWELL_X, &hswep_uncore_init), 1848 + X86_MATCH_VFM(INTEL_BROADWELL_X, &bdx_uncore_init), 1849 + X86_MATCH_VFM(INTEL_BROADWELL_D, &bdx_uncore_init), 1850 + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_uncore_init), 1851 + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_uncore_init), 1852 + X86_MATCH_VFM(INTEL_SKYLAKE, &skl_uncore_init), 1853 + X86_MATCH_VFM(INTEL_SKYLAKE_L, &skl_uncore_init), 1854 + X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_uncore_init), 1855 + X86_MATCH_VFM(INTEL_KABYLAKE_L, &skl_uncore_init), 1856 + X86_MATCH_VFM(INTEL_KABYLAKE, &skl_uncore_init), 1857 + X86_MATCH_VFM(INTEL_COMETLAKE_L, &skl_uncore_init), 1858 + X86_MATCH_VFM(INTEL_COMETLAKE, &skl_uncore_init), 1859 + X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_uncore_init), 1860 + X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_uncore_init), 1861 + X86_MATCH_VFM(INTEL_ICELAKE, &icl_uncore_init), 1862 + X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_uncore_init), 1863 + X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_uncore_init), 1864 + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_uncore_init), 1865 + X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_uncore_init), 1866 + X86_MATCH_VFM(INTEL_ROCKETLAKE, &rkl_uncore_init), 1867 + X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_uncore_init), 1868 + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_uncore_init), 1869 + X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_uncore_init), 1870 + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_uncore_init), 1871 + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init), 1872 + X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init), 1873 + X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init), 1874 + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init), 1875 + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init), 1876 + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init), 1877 + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_uncore_init), 1878 + X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &snr_uncore_init), 1879 + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init), 1880 + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init), 1881 + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init), 1882 1882 {}, 1883 1883 }; 1884 1884 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
+2 -1
arch/x86/events/intel/uncore_nhmex.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Nehalem-EX/Westmere-EX uncore support */ 3 + #include <asm/cpu_device_id.h> 3 4 #include "uncore.h" 4 5 5 6 /* NHM-EX event control */ ··· 1218 1217 1219 1218 void nhmex_uncore_cpu_init(void) 1220 1219 { 1221 - if (boot_cpu_data.x86_model == 46) 1220 + if (boot_cpu_data.x86_vfm == INTEL_NEHALEM_EX) 1222 1221 uncore_nhmex = true; 1223 1222 else 1224 1223 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
+3 -2
arch/x86/events/intel/uncore_snbep.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* SandyBridge-EP/IvyTown uncore support */ 3 + #include <asm/cpu_device_id.h> 3 4 #include "uncore.h" 4 5 #include "uncore_discovery.h" 5 6 ··· 3286 3285 uncore_msr_uncores = bdx_msr_uncores; 3287 3286 3288 3287 /* Detect systems with no SBOXes */ 3289 - if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) 3288 + if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID)) 3290 3289 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; 3291 3290 3292 3291 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; ··· 5395 5394 static void icx_iio_set_mapping(struct intel_uncore_type *type) 5396 5395 { 5397 5396 /* Detect ICX-D system. This case is not supported */ 5398 - if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { 5397 + if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) { 5399 5398 pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group); 5400 5399 return; 5401 5400 }
+58 -58
arch/x86/events/msr.c
··· 2 2 #include <linux/perf_event.h> 3 3 #include <linux/sysfs.h> 4 4 #include <linux/nospec.h> 5 - #include <asm/intel-family.h> 5 + #include <asm/cpu_device_id.h> 6 6 #include "probe.h" 7 7 8 8 enum perf_msr_id { ··· 43 43 boot_cpu_data.x86 != 6) 44 44 return false; 45 45 46 - switch (boot_cpu_data.x86_model) { 47 - case INTEL_FAM6_NEHALEM: 48 - case INTEL_FAM6_NEHALEM_G: 49 - case INTEL_FAM6_NEHALEM_EP: 50 - case INTEL_FAM6_NEHALEM_EX: 46 + switch (boot_cpu_data.x86_vfm) { 47 + case INTEL_NEHALEM: 48 + case INTEL_NEHALEM_G: 49 + case INTEL_NEHALEM_EP: 50 + case INTEL_NEHALEM_EX: 51 51 52 - case INTEL_FAM6_WESTMERE: 53 - case INTEL_FAM6_WESTMERE_EP: 54 - case INTEL_FAM6_WESTMERE_EX: 52 + case INTEL_WESTMERE: 53 + case INTEL_WESTMERE_EP: 54 + case INTEL_WESTMERE_EX: 55 55 56 - case INTEL_FAM6_SANDYBRIDGE: 57 - case INTEL_FAM6_SANDYBRIDGE_X: 56 + case INTEL_SANDYBRIDGE: 57 + case INTEL_SANDYBRIDGE_X: 58 58 59 - case INTEL_FAM6_IVYBRIDGE: 60 - case INTEL_FAM6_IVYBRIDGE_X: 59 + case INTEL_IVYBRIDGE: 60 + case INTEL_IVYBRIDGE_X: 61 61 62 - case INTEL_FAM6_HASWELL: 63 - case INTEL_FAM6_HASWELL_X: 64 - case INTEL_FAM6_HASWELL_L: 65 - case INTEL_FAM6_HASWELL_G: 62 + case INTEL_HASWELL: 63 + case INTEL_HASWELL_X: 64 + case INTEL_HASWELL_L: 65 + case INTEL_HASWELL_G: 66 66 67 - case INTEL_FAM6_BROADWELL: 68 - case INTEL_FAM6_BROADWELL_D: 69 - case INTEL_FAM6_BROADWELL_G: 70 - case INTEL_FAM6_BROADWELL_X: 71 - case INTEL_FAM6_SAPPHIRERAPIDS_X: 72 - case INTEL_FAM6_EMERALDRAPIDS_X: 73 - case INTEL_FAM6_GRANITERAPIDS_X: 74 - case INTEL_FAM6_GRANITERAPIDS_D: 67 + case INTEL_BROADWELL: 68 + case INTEL_BROADWELL_D: 69 + case INTEL_BROADWELL_G: 70 + case INTEL_BROADWELL_X: 71 + case INTEL_SAPPHIRERAPIDS_X: 72 + case INTEL_EMERALDRAPIDS_X: 73 + case INTEL_GRANITERAPIDS_X: 74 + case INTEL_GRANITERAPIDS_D: 75 75 76 - case INTEL_FAM6_ATOM_SILVERMONT: 77 - case INTEL_FAM6_ATOM_SILVERMONT_D: 78 - case INTEL_FAM6_ATOM_AIRMONT: 76 + case INTEL_ATOM_SILVERMONT: 77 + case INTEL_ATOM_SILVERMONT_D: 78 + case INTEL_ATOM_AIRMONT: 79 79 80 - case INTEL_FAM6_ATOM_GOLDMONT: 81 - case INTEL_FAM6_ATOM_GOLDMONT_D: 82 - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 83 - case INTEL_FAM6_ATOM_TREMONT_D: 84 - case INTEL_FAM6_ATOM_TREMONT: 85 - case INTEL_FAM6_ATOM_TREMONT_L: 80 + case INTEL_ATOM_GOLDMONT: 81 + case INTEL_ATOM_GOLDMONT_D: 82 + case INTEL_ATOM_GOLDMONT_PLUS: 83 + case INTEL_ATOM_TREMONT_D: 84 + case INTEL_ATOM_TREMONT: 85 + case INTEL_ATOM_TREMONT_L: 86 86 87 - case INTEL_FAM6_XEON_PHI_KNL: 88 - case INTEL_FAM6_XEON_PHI_KNM: 87 + case INTEL_XEON_PHI_KNL: 88 + case INTEL_XEON_PHI_KNM: 89 89 if (idx == PERF_MSR_SMI) 90 90 return true; 91 91 break; 92 92 93 - case INTEL_FAM6_SKYLAKE_L: 94 - case INTEL_FAM6_SKYLAKE: 95 - case INTEL_FAM6_SKYLAKE_X: 96 - case INTEL_FAM6_KABYLAKE_L: 97 - case INTEL_FAM6_KABYLAKE: 98 - case INTEL_FAM6_COMETLAKE_L: 99 - case INTEL_FAM6_COMETLAKE: 100 - case INTEL_FAM6_ICELAKE_L: 101 - case INTEL_FAM6_ICELAKE: 102 - case INTEL_FAM6_ICELAKE_X: 103 - case INTEL_FAM6_ICELAKE_D: 104 - case INTEL_FAM6_TIGERLAKE_L: 105 - case INTEL_FAM6_TIGERLAKE: 106 - case INTEL_FAM6_ROCKETLAKE: 107 - case INTEL_FAM6_ALDERLAKE: 108 - case INTEL_FAM6_ALDERLAKE_L: 109 - case INTEL_FAM6_ATOM_GRACEMONT: 110 - case INTEL_FAM6_RAPTORLAKE: 111 - case INTEL_FAM6_RAPTORLAKE_P: 112 - case INTEL_FAM6_RAPTORLAKE_S: 113 - case INTEL_FAM6_METEORLAKE: 114 - case INTEL_FAM6_METEORLAKE_L: 93 + case INTEL_SKYLAKE_L: 94 + case INTEL_SKYLAKE: 95 + case INTEL_SKYLAKE_X: 96 + case INTEL_KABYLAKE_L: 97 + case INTEL_KABYLAKE: 98 + case INTEL_COMETLAKE_L: 99 + case INTEL_COMETLAKE: 100 + case INTEL_ICELAKE_L: 101 + case INTEL_ICELAKE: 102 + case INTEL_ICELAKE_X: 103 + case INTEL_ICELAKE_D: 104 + case INTEL_TIGERLAKE_L: 105 + case INTEL_TIGERLAKE: 106 + case INTEL_ROCKETLAKE: 107 + case INTEL_ALDERLAKE: 108 + case INTEL_ALDERLAKE_L: 109 + case INTEL_ATOM_GRACEMONT: 110 + case INTEL_RAPTORLAKE: 111 + case INTEL_RAPTORLAKE_P: 112 + case INTEL_RAPTORLAKE_S: 113 + case INTEL_METEORLAKE: 114 + case INTEL_METEORLAKE_L: 115 115 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) 116 116 return true; 117 117 break;
+101
arch/x86/include/asm/cpu_device_id.h
··· 3 3 #define _ASM_X86_CPU_DEVICE_ID 4 4 5 5 /* 6 + * Can't use <linux/bitfield.h> because it generates expressions that 7 + * cannot be used in structure initializers. Bitfield construction 8 + * here must match the union in struct cpuinfo_86: 9 + * union { 10 + * struct { 11 + * __u8 x86_model; 12 + * __u8 x86; 13 + * __u8 x86_vendor; 14 + * __u8 x86_reserved; 15 + * }; 16 + * __u32 x86_vfm; 17 + * }; 18 + */ 19 + #define VFM_MODEL_BIT 0 20 + #define VFM_FAMILY_BIT 8 21 + #define VFM_VENDOR_BIT 16 22 + #define VFM_RSVD_BIT 24 23 + 24 + #define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT) 25 + #define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT) 26 + #define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT) 27 + 28 + #define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT) 29 + #define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT) 30 + #define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT) 31 + 32 + #define VFM_MAKE(_vendor, _family, _model) ( \ 33 + ((_model) << VFM_MODEL_BIT) | \ 34 + ((_family) << VFM_FAMILY_BIT) | \ 35 + ((_vendor) << VFM_VENDOR_BIT) \ 36 + ) 37 + 38 + /* 6 39 * Declare drivers belonging to specific x86 CPUs 7 40 * Similar in spirit to pci_device_id and related PCI functions 8 41 * ··· 75 42 #define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ 76 43 _steppings, _feature, _data) { \ 77 44 .vendor = X86_VENDOR_##_vendor, \ 45 + .family = _family, \ 46 + .model = _model, \ 47 + .steppings = _steppings, \ 48 + .feature = _feature, \ 49 + .driver_data = (unsigned long) _data \ 50 + } 51 + 52 + #define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ 53 + _steppings, _feature, _data) { \ 54 + .vendor = _vendor, \ 78 55 .family = _family, \ 79 56 .model = _model, \ 80 57 .steppings = _steppings, \ ··· 207 164 X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 208 165 steppings, X86_FEATURE_ANY, data) 209 166 167 + /** 168 + * X86_MATCH_VFM - Match encoded vendor/family/model 169 + * @vfm: Encoded 8-bits each for vendor, family, model 170 + * @data: Driver specific data or NULL. The internal storage 171 + * format is unsigned long. The supplied value, pointer 172 + * etc. is cast to unsigned long internally. 173 + * 174 + * Stepping and feature are set to wildcards 175 + */ 176 + #define X86_MATCH_VFM(vfm, data) \ 177 + X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \ 178 + VFM_VENDOR(vfm), \ 179 + VFM_FAMILY(vfm), \ 180 + VFM_MODEL(vfm), \ 181 + X86_STEPPING_ANY, X86_FEATURE_ANY, data) 182 + 183 + /** 184 + * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping 185 + * @vfm: Encoded 8-bits each for vendor, family, model 186 + * @steppings: Bitmask of steppings to match 187 + * @data: Driver specific data or NULL. The internal storage 188 + * format is unsigned long. The supplied value, pointer 189 + * etc. is cast to unsigned long internally. 190 + * 191 + * feature is set to wildcard 192 + */ 193 + #define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \ 194 + X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \ 195 + VFM_VENDOR(vfm), \ 196 + VFM_FAMILY(vfm), \ 197 + VFM_MODEL(vfm), \ 198 + steppings, X86_FEATURE_ANY, data) 199 + 200 + /** 201 + * X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature 202 + * @vfm: Encoded 8-bits each for vendor, family, model 203 + * @feature: A X86_FEATURE bit 204 + * @data: Driver specific data or NULL. The internal storage 205 + * format is unsigned long. The supplied value, pointer 206 + * etc. is cast to unsigned long internally. 207 + * 208 + * Steppings is set to wildcard 209 + */ 210 + #define X86_MATCH_VFM_FEATURE(vfm, feature, data) \ 211 + X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \ 212 + VFM_VENDOR(vfm), \ 213 + VFM_FAMILY(vfm), \ 214 + VFM_MODEL(vfm), \ 215 + X86_STEPPING_ANY, feature, data) 216 + 210 217 /* 211 218 * Match specific microcode revisions. 212 219 * ··· 278 185 #define INTEL_CPU_DESC(model, stepping, revision) { \ 279 186 .x86_family = 6, \ 280 187 .x86_vendor = X86_VENDOR_INTEL, \ 188 + .x86_model = (model), \ 189 + .x86_stepping = (stepping), \ 190 + .x86_microcode_rev = (revision), \ 191 + } 192 + 193 + #define AMD_CPU_DESC(fam, model, stepping, revision) { \ 194 + .x86_family = (fam), \ 195 + .x86_vendor = X86_VENDOR_AMD, \ 281 196 .x86_model = (model), \ 282 197 .x86_stepping = (stepping), \ 283 198 .x86_microcode_rev = (revision), \
+84
arch/x86/include/asm/intel-family.h
··· 40 40 * their own names :-( 41 41 */ 42 42 43 + #define IFM(_fam, _model) VFM_MAKE(X86_VENDOR_INTEL, _fam, _model) 44 + 43 45 /* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */ 44 46 #define INTEL_FAM6_ANY X86_MODEL_ANY 47 + /* Wildcard match for FAM6 so X86_MATCH_VFM(ANY) works */ 48 + #define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY) 45 49 46 50 #define INTEL_FAM6_CORE_YONAH 0x0E 51 + #define INTEL_CORE_YONAH IFM(6, 0x0E) 47 52 48 53 #define INTEL_FAM6_CORE2_MEROM 0x0F 54 + #define INTEL_CORE2_MEROM IFM(6, 0x0F) 49 55 #define INTEL_FAM6_CORE2_MEROM_L 0x16 56 + #define INTEL_CORE2_MEROM_L IFM(6, 0x16) 50 57 #define INTEL_FAM6_CORE2_PENRYN 0x17 58 + #define INTEL_CORE2_PENRYN IFM(6, 0x17) 51 59 #define INTEL_FAM6_CORE2_DUNNINGTON 0x1D 60 + #define INTEL_CORE2_DUNNINGTON IFM(6, 0x1D) 52 61 53 62 #define INTEL_FAM6_NEHALEM 0x1E 63 + #define INTEL_NEHALEM IFM(6, 0x1E) 54 64 #define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ 65 + #define INTEL_NEHALEM_G IFM(6, 0x1F) /* Auburndale / Havendale */ 55 66 #define INTEL_FAM6_NEHALEM_EP 0x1A 67 + #define INTEL_NEHALEM_EP IFM(6, 0x1A) 56 68 #define INTEL_FAM6_NEHALEM_EX 0x2E 69 + #define INTEL_NEHALEM_EX IFM(6, 0x2E) 57 70 58 71 #define INTEL_FAM6_WESTMERE 0x25 72 + #define INTEL_WESTMERE IFM(6, 0x25) 59 73 #define INTEL_FAM6_WESTMERE_EP 0x2C 74 + #define INTEL_WESTMERE_EP IFM(6, 0x2C) 60 75 #define INTEL_FAM6_WESTMERE_EX 0x2F 76 + #define INTEL_WESTMERE_EX IFM(6, 0x2F) 61 77 62 78 #define INTEL_FAM6_SANDYBRIDGE 0x2A 79 + #define INTEL_SANDYBRIDGE IFM(6, 0x2A) 63 80 #define INTEL_FAM6_SANDYBRIDGE_X 0x2D 81 + #define INTEL_SANDYBRIDGE_X IFM(6, 0x2D) 64 82 #define INTEL_FAM6_IVYBRIDGE 0x3A 83 + #define INTEL_IVYBRIDGE IFM(6, 0x3A) 65 84 #define INTEL_FAM6_IVYBRIDGE_X 0x3E 85 + #define INTEL_IVYBRIDGE_X IFM(6, 0x3E) 66 86 67 87 #define INTEL_FAM6_HASWELL 0x3C 88 + #define INTEL_HASWELL IFM(6, 0x3C) 68 89 #define INTEL_FAM6_HASWELL_X 0x3F 90 + #define INTEL_HASWELL_X IFM(6, 0x3F) 69 91 #define INTEL_FAM6_HASWELL_L 0x45 92 + #define INTEL_HASWELL_L IFM(6, 0x45) 70 93 #define INTEL_FAM6_HASWELL_G 0x46 94 + #define INTEL_HASWELL_G IFM(6, 0x46) 71 95 72 96 #define INTEL_FAM6_BROADWELL 0x3D 97 + #define INTEL_BROADWELL IFM(6, 0x3D) 73 98 #define INTEL_FAM6_BROADWELL_G 0x47 99 + #define INTEL_BROADWELL_G IFM(6, 0x47) 74 100 #define INTEL_FAM6_BROADWELL_X 0x4F 101 + #define INTEL_BROADWELL_X IFM(6, 0x4F) 75 102 #define INTEL_FAM6_BROADWELL_D 0x56 103 + #define INTEL_BROADWELL_D IFM(6, 0x56) 76 104 77 105 #define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */ 106 + #define INTEL_SKYLAKE_L IFM(6, 0x4E) /* Sky Lake */ 78 107 #define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */ 108 + #define INTEL_SKYLAKE IFM(6, 0x5E) /* Sky Lake */ 79 109 #define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */ 110 + #define INTEL_SKYLAKE_X IFM(6, 0x55) /* Sky Lake */ 80 111 /* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */ 81 112 /* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */ 82 113 83 114 #define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */ 115 + #define INTEL_KABYLAKE_L IFM(6, 0x8E) /* Sky Lake */ 84 116 /* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */ 85 117 /* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */ 86 118 /* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */ 87 119 88 120 #define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */ 121 + #define INTEL_KABYLAKE IFM(6, 0x9E) /* Sky Lake */ 89 122 /* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */ 90 123 91 124 #define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */ 125 + #define INTEL_COMETLAKE IFM(6, 0xA5) /* Sky Lake */ 92 126 #define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */ 127 + #define INTEL_COMETLAKE_L IFM(6, 0xA6) /* Sky Lake */ 93 128 94 129 #define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */ 130 + #define INTEL_CANNONLAKE_L IFM(6, 0x66) /* Palm Cove */ 95 131 96 132 #define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */ 133 + #define INTEL_ICELAKE_X IFM(6, 0x6A) /* Sunny Cove */ 97 134 #define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */ 135 + #define INTEL_ICELAKE_D IFM(6, 0x6C) /* Sunny Cove */ 98 136 #define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */ 137 + #define INTEL_ICELAKE IFM(6, 0x7D) /* Sunny Cove */ 99 138 #define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */ 139 + #define INTEL_ICELAKE_L IFM(6, 0x7E) /* Sunny Cove */ 100 140 #define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */ 141 + #define INTEL_ICELAKE_NNPI IFM(6, 0x9D) /* Sunny Cove */ 101 142 102 143 #define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */ 144 + #define INTEL_ROCKETLAKE IFM(6, 0xA7) /* Cypress Cove */ 103 145 104 146 #define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */ 147 + #define INTEL_TIGERLAKE_L IFM(6, 0x8C) /* Willow Cove */ 105 148 #define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */ 149 + #define INTEL_TIGERLAKE IFM(6, 0x8D) /* Willow Cove */ 106 150 107 151 #define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */ 152 + #define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */ 108 153 109 154 #define INTEL_FAM6_EMERALDRAPIDS_X 0xCF 155 + #define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF) 110 156 111 157 #define INTEL_FAM6_GRANITERAPIDS_X 0xAD 158 + #define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) 112 159 #define INTEL_FAM6_GRANITERAPIDS_D 0xAE 160 + #define INTEL_GRANITERAPIDS_D IFM(6, 0xAE) 113 161 114 162 /* "Hybrid" Processors (P-Core/E-Core) */ 115 163 116 164 #define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */ 165 + #define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */ 117 166 118 167 #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ 168 + #define INTEL_ALDERLAKE IFM(6, 0x97) /* Golden Cove / Gracemont */ 119 169 #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ 170 + #define INTEL_ALDERLAKE_L IFM(6, 0x9A) /* Golden Cove / Gracemont */ 120 171 121 172 #define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */ 173 + #define INTEL_RAPTORLAKE IFM(6, 0xB7) /* Raptor Cove / Enhanced Gracemont */ 122 174 #define INTEL_FAM6_RAPTORLAKE_P 0xBA 175 + #define INTEL_RAPTORLAKE_P IFM(6, 0xBA) 123 176 #define INTEL_FAM6_RAPTORLAKE_S 0xBF 177 + #define INTEL_RAPTORLAKE_S IFM(6, 0xBF) 124 178 125 179 #define INTEL_FAM6_METEORLAKE 0xAC 180 + #define INTEL_METEORLAKE IFM(6, 0xAC) 126 181 #define INTEL_FAM6_METEORLAKE_L 0xAA 182 + #define INTEL_METEORLAKE_L IFM(6, 0xAA) 127 183 128 184 #define INTEL_FAM6_ARROWLAKE_H 0xC5 185 + #define INTEL_ARROWLAKE_H IFM(6, 0xC5) 129 186 #define INTEL_FAM6_ARROWLAKE 0xC6 187 + #define INTEL_ARROWLAKE IFM(6, 0xC6) 130 188 #define INTEL_FAM6_ARROWLAKE_U 0xB5 189 + #define INTEL_ARROWLAKE_U IFM(6, 0xB5) 131 190 132 191 #define INTEL_FAM6_LUNARLAKE_M 0xBD 192 + #define INTEL_LUNARLAKE_M IFM(6, 0xBD) 133 193 134 194 /* "Small Core" Processors (Atom/E-Core) */ 135 195 136 196 #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ 197 + #define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */ 137 198 #define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ 199 + #define INTEL_ATOM_BONNELL_MID IFM(6, 0x26) /* Silverthorne, Lincroft */ 138 200 139 201 #define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */ 202 + #define INTEL_ATOM_SALTWELL IFM(6, 0x36) /* Cedarview */ 140 203 #define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */ 204 + #define INTEL_ATOM_SALTWELL_MID IFM(6, 0x27) /* Penwell */ 141 205 #define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */ 206 + #define INTEL_ATOM_SALTWELL_TABLET IFM(6, 0x35) /* Cloverview */ 142 207 143 208 #define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */ 209 + #define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */ 144 210 #define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */ 211 + #define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */ 145 212 #define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */ 213 + #define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */ 146 214 147 215 #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ 216 + #define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */ 148 217 #define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ 218 + #define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */ 149 219 #define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */ 220 + #define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */ 150 221 151 222 #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ 223 + #define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */ 152 224 #define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */ 225 + #define INTEL_ATOM_GOLDMONT_D IFM(6, 0x5F) /* Denverton */ 153 226 154 227 /* Note: the micro-architecture is "Goldmont Plus" */ 155 228 #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ 229 + #define INTEL_ATOM_GOLDMONT_PLUS IFM(6, 0x7A) /* Gemini Lake */ 156 230 157 231 #define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ 232 + #define INTEL_ATOM_TREMONT_D IFM(6, 0x86) /* Jacobsville */ 158 233 #define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ 234 + #define INTEL_ATOM_TREMONT IFM(6, 0x96) /* Elkhart Lake */ 159 235 #define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */ 236 + #define INTEL_ATOM_TREMONT_L IFM(6, 0x9C) /* Jasper Lake */ 160 237 161 238 #define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */ 239 + #define INTEL_ATOM_GRACEMONT IFM(6, 0xBE) /* Alderlake N */ 162 240 163 241 #define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */ 242 + #define INTEL_ATOM_CRESTMONT_X IFM(6, 0xAF) /* Sierra Forest */ 164 243 #define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */ 244 + #define INTEL_ATOM_CRESTMONT IFM(6, 0xB6) /* Grand Ridge */ 165 245 166 246 #define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */ 247 + #define INTEL_ATOM_DARKMONT_X IFM(6, 0xDD) /* Clearwater Forest */ 167 248 168 249 /* Xeon Phi */ 169 250 170 251 #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ 252 + #define INTEL_XEON_PHI_KNL IFM(6, 0x57) /* Knights Landing */ 171 253 #define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ 254 + #define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */ 172 255 173 256 /* Family 5 */ 174 257 #define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */ 258 + #define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */ 175 259 176 260 #endif /* _ASM_X86_INTEL_FAMILY_H */
+2
arch/x86/include/asm/mce.h
··· 13 13 #define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */ 14 14 #define MCG_EXT_P BIT_ULL(9) /* Extended registers available */ 15 15 #define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */ 16 + #define MCG_SEAM_NR BIT_ULL(12) /* MCG_STATUS_SEAM_NR supported */ 16 17 #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ 17 18 #define MCG_EXT_CNT_SHIFT 16 18 19 #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) ··· 26 25 #define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */ 27 26 #define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */ 28 27 #define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */ 28 + #define MCG_STATUS_SEAM_NR BIT_ULL(12) /* Machine check inside SEAM non-root mode */ 29 29 30 30 /* MCG_EXT_CTL register defines */ 31 31 #define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */
+17 -3
arch/x86/include/asm/processor.h
··· 108 108 }; 109 109 110 110 struct cpuinfo_x86 { 111 - __u8 x86; /* CPU family */ 112 - __u8 x86_vendor; /* CPU vendor */ 113 - __u8 x86_model; 111 + union { 112 + /* 113 + * The particular ordering (low-to-high) of (vendor, 114 + * family, model) is done in case range of models, like 115 + * it is usually done on AMD, need to be compared. 116 + */ 117 + struct { 118 + __u8 x86_model; 119 + /* CPU family */ 120 + __u8 x86; 121 + /* CPU vendor */ 122 + __u8 x86_vendor; 123 + __u8 x86_reserved; 124 + }; 125 + /* combined vendor, family, model */ 126 + __u32 x86_vfm; 127 + }; 114 128 __u8 x86_stepping; 115 129 #ifdef CONFIG_X86_64 116 130 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
+1 -1
arch/x86/kernel/Makefile
··· 62 62 obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o 63 63 obj-$(CONFIG_SYSFS) += ksysfs.o 64 64 obj-y += bootflag.o e820.o 65 - obj-y += pci-dma.o quirks.o topology.o kdebugfs.o 65 + obj-y += pci-dma.o quirks.o kdebugfs.o 66 66 obj-y += alternative.o i8253.o hw_breakpoint.o 67 67 obj-y += tsc.o tsc_msr.o io_delay.o rtc.o 68 68 obj-y += resource.o
+19 -19
arch/x86/kernel/apic/apic.c
··· 497 497 static DEFINE_PER_CPU(struct clock_event_device, lapic_events); 498 498 499 499 static const struct x86_cpu_id deadline_match[] __initconst = { 500 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */ 501 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */ 500 + X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */ 501 + X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */ 502 502 503 - X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020), 503 + X86_MATCH_VFM(INTEL_BROADWELL_X, 0x0b000020), 504 504 505 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011), 506 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e), 507 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c), 508 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003), 505 + X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011), 506 + X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e), 507 + X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c), 508 + X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003), 509 509 510 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136), 511 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014), 512 - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0), 510 + X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136), 511 + X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014), 512 + X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0), 513 513 514 - X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22), 515 - X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20), 516 - X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17), 514 + X86_MATCH_VFM(INTEL_HASWELL, 0x22), 515 + X86_MATCH_VFM(INTEL_HASWELL_L, 0x20), 516 + X86_MATCH_VFM(INTEL_HASWELL_G, 0x17), 517 517 518 - X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25), 519 - X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17), 518 + X86_MATCH_VFM(INTEL_BROADWELL, 0x25), 519 + X86_MATCH_VFM(INTEL_BROADWELL_G, 0x17), 520 520 521 - X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2), 522 - X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2), 521 + X86_MATCH_VFM(INTEL_SKYLAKE_L, 0xb2), 522 + X86_MATCH_VFM(INTEL_SKYLAKE, 0xb2), 523 523 524 - X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52), 525 - X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52), 524 + X86_MATCH_VFM(INTEL_KABYLAKE_L, 0x52), 525 + X86_MATCH_VFM(INTEL_KABYLAKE, 0x52), 526 526 527 527 {}, 528 528 };
+5 -2
arch/x86/kernel/apic/x2apic_cluster.c
··· 178 178 u32 phys_apicid = apic->cpu_present_to_apicid(cpu); 179 179 u32 cluster = apic_cluster(phys_apicid); 180 180 u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf)); 181 + int node = cpu_to_node(cpu); 181 182 182 183 x86_cpu_to_logical_apicid[cpu] = logical_apicid; 183 184 184 - if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0) 185 + if (alloc_clustermask(cpu, cluster, node) < 0) 185 186 return -ENOMEM; 186 - if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) 187 + 188 + if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node)) 187 189 return -ENOMEM; 190 + 188 191 return 0; 189 192 } 190 193
+12
arch/x86/kernel/cpu/amd.c
··· 13 13 #include <asm/apic.h> 14 14 #include <asm/cacheinfo.h> 15 15 #include <asm/cpu.h> 16 + #include <asm/cpu_device_id.h> 16 17 #include <asm/spec-ctrl.h> 17 18 #include <asm/smp.h> 18 19 #include <asm/numa.h> ··· 795 794 clear_rdrand_cpuid_bit(c); 796 795 } 797 796 797 + static const struct x86_cpu_desc erratum_1386_microcode[] = { 798 + AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e), 799 + AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052), 800 + }; 801 + 798 802 static void fix_erratum_1386(struct cpuinfo_x86 *c) 799 803 { 800 804 /* ··· 809 803 * 810 804 * Affected parts all have no supervisor XSAVE states, meaning that 811 805 * the XSAVEC instruction (which works fine) is equivalent. 806 + * 807 + * Clear the feature flag only on microcode revisions which 808 + * don't have the fix. 812 809 */ 810 + if (x86_cpu_has_min_microcode_rev(erratum_1386_microcode)) 811 + return; 812 + 813 813 clear_cpu_cap(c, X86_FEATURE_XSAVES); 814 814 } 815 815
+8 -9
arch/x86/kernel/cpu/aperfmperf.c
··· 124 124 return true; 125 125 } 126 126 127 - #define X86_MATCH(model) \ 128 - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ 129 - INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL) 127 + #define X86_MATCH(vfm) \ 128 + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, NULL) 130 129 131 130 static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = { 132 - X86_MATCH(XEON_PHI_KNL), 133 - X86_MATCH(XEON_PHI_KNM), 131 + X86_MATCH(INTEL_XEON_PHI_KNL), 132 + X86_MATCH(INTEL_XEON_PHI_KNM), 134 133 {} 135 134 }; 136 135 137 136 static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = { 138 - X86_MATCH(SKYLAKE_X), 137 + X86_MATCH(INTEL_SKYLAKE_X), 139 138 {} 140 139 }; 141 140 142 141 static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = { 143 - X86_MATCH(ATOM_GOLDMONT), 144 - X86_MATCH(ATOM_GOLDMONT_D), 145 - X86_MATCH(ATOM_GOLDMONT_PLUS), 142 + X86_MATCH(INTEL_ATOM_GOLDMONT), 143 + X86_MATCH(INTEL_ATOM_GOLDMONT_D), 144 + X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS), 146 145 {} 147 146 }; 148 147
+15 -15
arch/x86/kernel/cpu/bugs.c
··· 26 26 #include <asm/msr.h> 27 27 #include <asm/vmx.h> 28 28 #include <asm/paravirt.h> 29 - #include <asm/intel-family.h> 29 + #include <asm/cpu_device_id.h> 30 30 #include <asm/e820/api.h> 31 31 #include <asm/hypervisor.h> 32 32 #include <asm/tlbflush.h> ··· 2391 2391 if (c->x86 != 6) 2392 2392 return; 2393 2393 2394 - switch (c->x86_model) { 2395 - case INTEL_FAM6_NEHALEM: 2396 - case INTEL_FAM6_WESTMERE: 2397 - case INTEL_FAM6_SANDYBRIDGE: 2398 - case INTEL_FAM6_IVYBRIDGE: 2399 - case INTEL_FAM6_HASWELL: 2400 - case INTEL_FAM6_HASWELL_L: 2401 - case INTEL_FAM6_HASWELL_G: 2402 - case INTEL_FAM6_BROADWELL: 2403 - case INTEL_FAM6_BROADWELL_G: 2404 - case INTEL_FAM6_SKYLAKE_L: 2405 - case INTEL_FAM6_SKYLAKE: 2406 - case INTEL_FAM6_KABYLAKE_L: 2407 - case INTEL_FAM6_KABYLAKE: 2394 + switch (c->x86_vfm) { 2395 + case INTEL_NEHALEM: 2396 + case INTEL_WESTMERE: 2397 + case INTEL_SANDYBRIDGE: 2398 + case INTEL_IVYBRIDGE: 2399 + case INTEL_HASWELL: 2400 + case INTEL_HASWELL_L: 2401 + case INTEL_HASWELL_G: 2402 + case INTEL_BROADWELL: 2403 + case INTEL_BROADWELL_G: 2404 + case INTEL_SKYLAKE_L: 2405 + case INTEL_SKYLAKE: 2406 + case INTEL_KABYLAKE_L: 2407 + case INTEL_KABYLAKE: 2408 2408 if (c->x86_cache_bits < 44) 2409 2409 c->x86_cache_bits = 44; 2410 2410 break;
+83 -88
arch/x86/kernel/cpu/common.c
··· 114 114 X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]), 115 115 116 116 /* Legacy models without CPUID enumeration */ 117 - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]), 118 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]), 119 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]), 120 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]), 121 - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]), 122 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]), 123 - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]), 124 - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), 125 - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), 126 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]), 127 - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]), 117 + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]), 118 + X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]), 119 + X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]), 120 + X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]), 121 + X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]), 122 + X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]), 123 + X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]), 124 + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), 125 + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), 126 + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]), 127 + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]), 128 128 129 129 {} 130 130 }; ··· 1053 1053 void get_cpu_address_sizes(struct cpuinfo_x86 *c) 1054 1054 { 1055 1055 u32 eax, ebx, ecx, edx; 1056 - bool vp_bits_from_cpuid = true; 1057 1056 1058 1057 if (!cpu_has(c, X86_FEATURE_CPUID) || 1059 - (c->extended_cpuid_level < 0x80000008)) 1060 - vp_bits_from_cpuid = false; 1061 - 1062 - if (vp_bits_from_cpuid) { 1063 - cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 1064 - 1065 - c->x86_virt_bits = (eax >> 8) & 0xff; 1066 - c->x86_phys_bits = eax & 0xff; 1067 - } else { 1058 + (c->extended_cpuid_level < 0x80000008)) { 1068 1059 if (IS_ENABLED(CONFIG_X86_64)) { 1069 1060 c->x86_clflush_size = 64; 1070 1061 c->x86_phys_bits = 36; ··· 1069 1078 cpu_has(c, X86_FEATURE_PSE36)) 1070 1079 c->x86_phys_bits = 36; 1071 1080 } 1081 + } else { 1082 + cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 1083 + 1084 + c->x86_virt_bits = (eax >> 8) & 0xff; 1085 + c->x86_phys_bits = eax & 0xff; 1072 1086 } 1087 + 1073 1088 c->x86_cache_bits = c->x86_phys_bits; 1074 1089 c->x86_cache_alignment = c->x86_clflush_size; 1075 1090 } ··· 1122 1125 #define VULNWL(vendor, family, model, whitelist) \ 1123 1126 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) 1124 1127 1125 - #define VULNWL_INTEL(model, whitelist) \ 1126 - VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) 1128 + #define VULNWL_INTEL(vfm, whitelist) \ 1129 + X86_MATCH_VFM(vfm, whitelist) 1127 1130 1128 1131 #define VULNWL_AMD(family, whitelist) \ 1129 1132 VULNWL(AMD, family, X86_MODEL_ANY, whitelist) ··· 1140 1143 VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION), 1141 1144 1142 1145 /* Intel Family 6 */ 1143 - VULNWL_INTEL(TIGERLAKE, NO_MMIO), 1144 - VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), 1145 - VULNWL_INTEL(ALDERLAKE, NO_MMIO), 1146 - VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), 1146 + VULNWL_INTEL(INTEL_TIGERLAKE, NO_MMIO), 1147 + VULNWL_INTEL(INTEL_TIGERLAKE_L, NO_MMIO), 1148 + VULNWL_INTEL(INTEL_ALDERLAKE, NO_MMIO), 1149 + VULNWL_INTEL(INTEL_ALDERLAKE_L, NO_MMIO), 1147 1150 1148 - VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1149 - VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), 1150 - VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1151 - VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1152 - VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1151 + VULNWL_INTEL(INTEL_ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1152 + VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), 1153 + VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1154 + VULNWL_INTEL(INTEL_ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1155 + VULNWL_INTEL(INTEL_ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1153 1156 1154 - VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1155 - VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1156 - VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1157 - VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1158 - VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1159 - VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1157 + VULNWL_INTEL(INTEL_ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1158 + VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1159 + VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1160 + VULNWL_INTEL(INTEL_ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1161 + VULNWL_INTEL(INTEL_XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1162 + VULNWL_INTEL(INTEL_XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1160 1163 1161 - VULNWL_INTEL(CORE_YONAH, NO_SSB), 1164 + VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB), 1162 1165 1163 - VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1164 - VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1166 + VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1167 + VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1165 1168 1166 - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1167 - VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1168 - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), 1169 + VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1170 + VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1171 + VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), 1169 1172 1170 1173 /* 1171 1174 * Technically, swapgs isn't serializing on AMD (despite it previously ··· 1175 1178 * good enough for our purposes. 1176 1179 */ 1177 1180 1178 - VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB), 1179 - VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB), 1180 - VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), 1181 + VULNWL_INTEL(INTEL_ATOM_TREMONT, NO_EIBRS_PBRSB), 1182 + VULNWL_INTEL(INTEL_ATOM_TREMONT_L, NO_EIBRS_PBRSB), 1183 + VULNWL_INTEL(INTEL_ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), 1181 1184 1182 1185 /* AMD Family 0xf - 0x12 */ 1183 1186 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), ··· 1198 1201 #define VULNBL(vendor, family, model, blacklist) \ 1199 1202 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) 1200 1203 1201 - #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ 1202 - X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ 1203 - INTEL_FAM6_##model, steppings, \ 1204 - X86_FEATURE_ANY, issues) 1204 + #define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues) \ 1205 + X86_MATCH_VFM_STEPPINGS(vfm, steppings, issues) 1205 1206 1206 1207 #define VULNBL_AMD(family, blacklist) \ 1207 1208 VULNBL(AMD, family, X86_MODEL_ANY, blacklist) ··· 1224 1229 #define RFDS BIT(7) 1225 1230 1226 1231 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { 1227 - VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), 1228 - VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), 1229 - VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), 1230 - VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), 1231 - VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), 1232 - VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO), 1233 - VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), 1234 - VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), 1235 - VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), 1236 - VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), 1237 - VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1238 - VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1239 - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1240 - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1241 - VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), 1242 - VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1243 - VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), 1244 - VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), 1245 - VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1246 - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), 1247 - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1248 - VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), 1249 - VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), 1250 - VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), 1251 - VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), 1252 - VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS), 1253 - VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS), 1254 - VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS), 1255 - VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS), 1256 - VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS), 1257 - VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS), 1258 - VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), 1259 - VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS), 1260 - VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), 1261 - VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS), 1262 - VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS), 1263 - VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS), 1232 + VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS), 1233 + VULNBL_INTEL_STEPPINGS(INTEL_HASWELL, X86_STEPPING_ANY, SRBDS), 1234 + VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L, X86_STEPPING_ANY, SRBDS), 1235 + VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G, X86_STEPPING_ANY, SRBDS), 1236 + VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X, X86_STEPPING_ANY, MMIO), 1237 + VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPING_ANY, MMIO), 1238 + VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G, X86_STEPPING_ANY, SRBDS), 1239 + VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X, X86_STEPPING_ANY, MMIO), 1240 + VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL, X86_STEPPING_ANY, SRBDS), 1241 + VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), 1242 + VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1243 + VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1244 + VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1245 + VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1246 + VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), 1247 + VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1248 + VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), 1249 + VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), 1250 + VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1251 + VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), 1252 + VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1253 + VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS), 1254 + VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS), 1255 + VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), 1256 + VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), 1257 + VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS), 1258 + VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS), 1259 + VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS), 1260 + VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P, X86_STEPPING_ANY, RFDS), 1261 + VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S, X86_STEPPING_ANY, RFDS), 1262 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS), 1263 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), 1264 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS), 1265 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), 1266 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS), 1267 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS), 1268 + VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS), 1264 1269 1265 1270 VULNBL_AMD(0x15, RETBLEED), 1266 1271 VULNBL_AMD(0x16, RETBLEED),
+1
arch/x86/kernel/cpu/intel.c
··· 228 228 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { 229 229 pr_info_once("x86/tme: not enabled by BIOS\n"); 230 230 mktme_status = MKTME_DISABLED; 231 + clear_cpu_cap(c, X86_FEATURE_TME); 231 232 return; 232 233 } 233 234
+6 -6
arch/x86/kernel/cpu/intel_epb.c
··· 204 204 } 205 205 206 206 static const struct x86_cpu_id intel_epb_normal[] = { 207 - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 208 - ENERGY_PERF_BIAS_NORMAL_POWERSAVE), 209 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, 210 - ENERGY_PERF_BIAS_NORMAL_POWERSAVE), 211 - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 212 - ENERGY_PERF_BIAS_NORMAL_POWERSAVE), 207 + X86_MATCH_VFM(INTEL_ALDERLAKE_L, 208 + ENERGY_PERF_BIAS_NORMAL_POWERSAVE), 209 + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 210 + ENERGY_PERF_BIAS_NORMAL_POWERSAVE), 211 + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 212 + ENERGY_PERF_BIAS_NORMAL_POWERSAVE), 213 213 {} 214 214 }; 215 215
+2 -3
arch/x86/kernel/cpu/match.c
··· 17 17 * 18 18 * A typical table entry would be to match a specific CPU 19 19 * 20 - * X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL, 21 - * X86_FEATURE_ANY, NULL); 20 + * X86_MATCH_VFM_FEATURE(INTEL_BROADWELL, X86_FEATURE_ANY, NULL); 22 21 * 23 22 * Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY, 24 23 * %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor) ··· 25 26 * asm/cpu_device_id.h contains a set of useful macros which are shortcuts 26 27 * for various common selections. The above can be shortened to: 27 28 * 28 - * X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL); 29 + * X86_MATCH_VFM(INTEL_BROADWELL, NULL); 29 30 * 30 31 * Arrays used to match for this should also be declared using 31 32 * MODULE_DEVICE_TABLE(x86cpu, ...)
+21 -3
arch/x86/kernel/cpu/mce/core.c
··· 47 47 #include <linux/kexec.h> 48 48 49 49 #include <asm/fred.h> 50 - #include <asm/intel-family.h> 50 + #include <asm/cpu_device_id.h> 51 51 #include <asm/processor.h> 52 52 #include <asm/traps.h> 53 53 #include <asm/tlbflush.h> ··· 1593 1593 else 1594 1594 queue_task_work(&m, msg, kill_me_maybe); 1595 1595 1596 + } else if (m.mcgstatus & MCG_STATUS_SEAM_NR) { 1597 + /* 1598 + * Saved RIP on stack makes it look like the machine check 1599 + * was taken in the kernel on the instruction following 1600 + * the entry to SEAM mode. But MCG_STATUS_SEAM_NR indicates 1601 + * that the machine check was taken inside SEAM non-root 1602 + * mode. CPU core has already marked that guest as dead. 1603 + * It is OK for the kernel to resume execution at the 1604 + * apparent point of the machine check as the fault did 1605 + * not occur there. Mark the page as poisoned so it won't 1606 + * be added to free list when the guest is terminated. 1607 + */ 1608 + if (mce_usable_address(&m)) { 1609 + struct page *p = pfn_to_online_page(m.addr >> PAGE_SHIFT); 1610 + 1611 + if (p) 1612 + SetPageHWPoison(p); 1613 + } 1596 1614 } else { 1597 1615 /* 1598 1616 * Handle an MCE which has happened in kernel space but from ··· 1948 1930 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) 1949 1931 cfg->bootlog = 0; 1950 1932 1951 - if (c->x86 == 6 && c->x86_model == 45) 1933 + if (c->x86_vfm == INTEL_SANDYBRIDGE_X) 1952 1934 mce_flags.snb_ifu_quirk = 1; 1953 1935 1954 1936 /* 1955 1937 * Skylake, Cascacde Lake and Cooper Lake require a quirk on 1956 1938 * rep movs. 1957 1939 */ 1958 - if (c->x86 == 6 && c->x86_model == INTEL_FAM6_SKYLAKE_X) 1940 + if (c->x86_vfm == INTEL_SKYLAKE_X) 1959 1941 mce_flags.skx_repmov_quirk = 1; 1960 1942 } 1961 1943
+10 -11
arch/x86/kernel/cpu/mce/intel.c
··· 13 13 #include <linux/cpumask.h> 14 14 #include <asm/apic.h> 15 15 #include <asm/cpufeature.h> 16 - #include <asm/intel-family.h> 16 + #include <asm/cpu_device_id.h> 17 17 #include <asm/processor.h> 18 18 #include <asm/msr.h> 19 19 #include <asm/mce.h> ··· 455 455 { 456 456 u64 error_control; 457 457 458 - switch (c->x86_model) { 459 - case INTEL_FAM6_SANDYBRIDGE_X: 460 - case INTEL_FAM6_IVYBRIDGE_X: 461 - case INTEL_FAM6_HASWELL_X: 458 + switch (c->x86_vfm) { 459 + case INTEL_SANDYBRIDGE_X: 460 + case INTEL_IVYBRIDGE_X: 461 + case INTEL_HASWELL_X: 462 462 if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control)) 463 463 return; 464 464 error_control |= 2; ··· 484 484 struct cpuinfo_x86 *c = &boot_cpu_data; 485 485 486 486 /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */ 487 - if ((c->x86 == 6) && 488 - ((c->x86_model == INTEL_FAM6_HASWELL) || 489 - (c->x86_model == INTEL_FAM6_HASWELL_L) || 490 - (c->x86_model == INTEL_FAM6_BROADWELL) || 491 - (c->x86_model == INTEL_FAM6_HASWELL_G) || 492 - (c->x86_model == INTEL_FAM6_SKYLAKE_X)) && 487 + if ((c->x86_vfm == INTEL_HASWELL || 488 + c->x86_vfm == INTEL_HASWELL_L || 489 + c->x86_vfm == INTEL_BROADWELL || 490 + c->x86_vfm == INTEL_HASWELL_G || 491 + c->x86_vfm == INTEL_SKYLAKE_X) && 493 492 (m->bank == 0) && 494 493 ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005)) 495 494 return true;
+19 -7
arch/x86/kernel/cpu/mce/severity.c
··· 12 12 #include <linux/uaccess.h> 13 13 14 14 #include <asm/mce.h> 15 - #include <asm/intel-family.h> 15 + #include <asm/cpu_device_id.h> 16 16 #include <asm/traps.h> 17 17 #include <asm/insn.h> 18 18 #include <asm/insn-eval.h> ··· 39 39 u64 mask; 40 40 u64 result; 41 41 unsigned char sev; 42 - unsigned char mcgmask; 43 - unsigned char mcgres; 42 + unsigned short mcgmask; 43 + unsigned short mcgres; 44 44 unsigned char ser; 45 45 unsigned char context; 46 46 unsigned char excp; 47 47 unsigned char covered; 48 - unsigned char cpu_model; 48 + unsigned int cpu_vfm; 49 49 unsigned char cpu_minstepping; 50 50 unsigned char bank_lo, bank_hi; 51 51 char *msg; 52 52 } severities[] = { 53 53 #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } 54 54 #define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h 55 - #define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s 55 + #define VFM_STEPPING(m, s) .cpu_vfm = m, .cpu_minstepping = s 56 56 #define KERNEL .context = IN_KERNEL 57 57 #define USER .context = IN_USER 58 58 #define KERNEL_RECOV .context = IN_KERNEL_RECOV ··· 128 128 MCESEV( 129 129 AO, "Uncorrected Patrol Scrub Error", 130 130 SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0), 131 - MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18) 131 + VFM_STEPPING(INTEL_SKYLAKE_X, 4), BANK_RANGE(13, 18) 132 132 ), 133 133 134 134 /* ignore OVER for UCNA */ ··· 172 172 AR, "Action required: instruction fetch error in a user process", 173 173 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), 174 174 USER 175 + ), 176 + MCESEV( 177 + AR, "Data load error in SEAM non-root mode", 178 + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), 179 + MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR), 180 + KERNEL 181 + ), 182 + MCESEV( 183 + AR, "Instruction fetch error in SEAM non-root mode", 184 + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), 185 + MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR), 186 + KERNEL 175 187 ), 176 188 MCESEV( 177 189 PANIC, "Data load in unrecoverable area of kernel", ··· 397 385 continue; 398 386 if (s->excp && excp != s->excp) 399 387 continue; 400 - if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model) 388 + if (s->cpu_vfm && boot_cpu_data.x86_vfm != s->cpu_vfm) 401 389 continue; 402 390 if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping) 403 391 continue;
+2 -3
arch/x86/kernel/cpu/microcode/intel.c
··· 21 21 #include <linux/uio.h> 22 22 #include <linux/mm.h> 23 23 24 - #include <asm/intel-family.h> 24 + #include <asm/cpu_device_id.h> 25 25 #include <asm/processor.h> 26 26 #include <asm/tlbflush.h> 27 27 #include <asm/setup.h> ··· 577 577 * This behavior is documented in item BDF90, #334165 (Intel Xeon 578 578 * Processor E7-8800/4800 v4 Product Family). 579 579 */ 580 - if (c->x86 == 6 && 581 - c->x86_model == INTEL_FAM6_BROADWELL_X && 580 + if (c->x86_vfm == INTEL_BROADWELL_X && 582 581 c->x86_stepping == 0x01 && 583 582 llc_size_per_core > 2621440 && 584 583 c->microcode < 0x0b000021) {
+5 -5
arch/x86/kernel/cpu/resctrl/core.c
··· 22 22 #include <linux/cacheinfo.h> 23 23 #include <linux/cpuhotplug.h> 24 24 25 - #include <asm/intel-family.h> 25 + #include <asm/cpu_device_id.h> 26 26 #include <asm/resctrl.h> 27 27 #include "internal.h" 28 28 ··· 821 821 822 822 static __init void __check_quirks_intel(void) 823 823 { 824 - switch (boot_cpu_data.x86_model) { 825 - case INTEL_FAM6_HASWELL_X: 824 + switch (boot_cpu_data.x86_vfm) { 825 + case INTEL_HASWELL_X: 826 826 if (!rdt_options[RDT_FLAG_L3_CAT].force_off) 827 827 cache_alloc_hsw_probe(); 828 828 break; 829 - case INTEL_FAM6_SKYLAKE_X: 829 + case INTEL_SKYLAKE_X: 830 830 if (boot_cpu_data.x86_stepping <= 4) 831 831 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); 832 832 else 833 833 set_rdt_options("!l3cat"); 834 834 fallthrough; 835 - case INTEL_FAM6_BROADWELL_X: 835 + case INTEL_BROADWELL_X: 836 836 intel_rdt_mbm_apply_quirk(); 837 837 break; 838 838 }
+11 -11
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
··· 23 23 #include <linux/uaccess.h> 24 24 25 25 #include <asm/cacheflush.h> 26 - #include <asm/intel-family.h> 26 + #include <asm/cpu_device_id.h> 27 27 #include <asm/resctrl.h> 28 28 #include <asm/perf_event.h> 29 29 ··· 88 88 boot_cpu_data.x86 != 6) 89 89 return 0; 90 90 91 - switch (boot_cpu_data.x86_model) { 92 - case INTEL_FAM6_BROADWELL_X: 91 + switch (boot_cpu_data.x86_vfm) { 92 + case INTEL_BROADWELL_X: 93 93 /* 94 94 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register 95 95 * as: ··· 100 100 * 63:4 Reserved 101 101 */ 102 102 return 0xF; 103 - case INTEL_FAM6_ATOM_GOLDMONT: 104 - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 103 + case INTEL_ATOM_GOLDMONT: 104 + case INTEL_ATOM_GOLDMONT_PLUS: 105 105 /* 106 106 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register 107 107 * as: ··· 1084 1084 * L2_HIT 02H 1085 1085 * L2_MISS 10H 1086 1086 */ 1087 - switch (boot_cpu_data.x86_model) { 1088 - case INTEL_FAM6_ATOM_GOLDMONT: 1089 - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 1087 + switch (boot_cpu_data.x86_vfm) { 1088 + case INTEL_ATOM_GOLDMONT: 1089 + case INTEL_ATOM_GOLDMONT_PLUS: 1090 1090 perf_miss_attr.config = X86_CONFIG(.event = 0xd1, 1091 1091 .umask = 0x10); 1092 1092 perf_hit_attr.config = X86_CONFIG(.event = 0xd1, ··· 1123 1123 * MISS 41H 1124 1124 */ 1125 1125 1126 - switch (boot_cpu_data.x86_model) { 1127 - case INTEL_FAM6_BROADWELL_X: 1126 + switch (boot_cpu_data.x86_vfm) { 1127 + case INTEL_BROADWELL_X: 1128 1128 /* On BDW the hit event counts references, not hits */ 1129 1129 perf_hit_attr.config = X86_CONFIG(.event = 0x2e, 1130 1130 .umask = 0x4f); ··· 1142 1142 */ 1143 1143 1144 1144 counts.miss_after -= counts.miss_before; 1145 - if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) { 1145 + if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_X) { 1146 1146 /* 1147 1147 * On BDW references and misses are counted, need to adjust. 1148 1148 * Sometimes the "hits" counter is a bit more than the
+11 -8
arch/x86/kernel/cpu/topology_amd.c
··· 58 58 tscan->amd_node_id = node_id; 59 59 } 60 60 61 - static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb) 61 + static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext) 62 62 { 63 63 struct { 64 64 // eax ··· 86 86 * If leaf 0xb is available, then the domain shifts are set 87 87 * already and nothing to do here. 88 88 */ 89 - if (!has_0xb) { 89 + if (!has_topoext) { 90 90 /* 91 91 * Leaf 0x80000008 set the CORE domain shift already. 92 92 * Update the SMT domain, but do not propagate it. ··· 169 169 170 170 static void parse_topology_amd(struct topo_scan *tscan) 171 171 { 172 - bool has_0xb = false; 172 + bool has_topoext = false; 173 173 174 174 /* 175 175 * If the extended topology leaf 0x8000_001e is available 176 - * try to get SMT and CORE shift from leaf 0xb first, then 177 - * try to get the CORE shift from leaf 0x8000_0008. 176 + * try to get SMT, CORE, TILE, and DIE shifts from extended 177 + * CPUID leaf 0x8000_0026 on supported processors first. If 178 + * extended CPUID leaf 0x8000_0026 is not supported, try to 179 + * get SMT and CORE shift from leaf 0xb first, then try to 180 + * get the CORE shift from leaf 0x8000_0008. 178 181 */ 179 182 if (cpu_feature_enabled(X86_FEATURE_TOPOEXT)) 180 - has_0xb = cpu_parse_topology_ext(tscan); 183 + has_topoext = cpu_parse_topology_ext(tscan); 181 184 182 - if (!has_0xb && !parse_8000_0008(tscan)) 185 + if (!has_topoext && !parse_8000_0008(tscan)) 183 186 return; 184 187 185 188 /* Prefer leaf 0x8000001e if available */ 186 - if (parse_8000_001e(tscan, has_0xb)) 189 + if (parse_8000_001e(tscan, has_topoext)) 187 190 return; 188 191 189 192 /* Try the NODEID MSR */
+15
arch/x86/kernel/cpu/topology_ext.c
··· 13 13 CORE_TYPE = 2, 14 14 MAX_TYPE_0B = 3, 15 15 MODULE_TYPE = 3, 16 + AMD_CCD_TYPE = 3, 16 17 TILE_TYPE = 4, 18 + AMD_SOCKET_TYPE = 4, 19 + MAX_TYPE_80000026 = 5, 17 20 DIE_TYPE = 5, 18 21 DIEGRP_TYPE = 6, 19 22 MAX_TYPE_1F = 7, ··· 33 30 [TILE_TYPE] = TOPO_TILE_DOMAIN, 34 31 [DIE_TYPE] = TOPO_DIE_DOMAIN, 35 32 [DIEGRP_TYPE] = TOPO_DIEGRP_DOMAIN, 33 + }; 34 + 35 + static const unsigned int topo_domain_map_80000026[MAX_TYPE_80000026] = { 36 + [SMT_TYPE] = TOPO_SMT_DOMAIN, 37 + [CORE_TYPE] = TOPO_CORE_DOMAIN, 38 + [AMD_CCD_TYPE] = TOPO_TILE_DOMAIN, 39 + [AMD_SOCKET_TYPE] = TOPO_DIE_DOMAIN, 36 40 }; 37 41 38 42 static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf, ··· 66 56 switch (leaf) { 67 57 case 0x0b: maxtype = MAX_TYPE_0B; map = topo_domain_map_0b_1f; break; 68 58 case 0x1f: maxtype = MAX_TYPE_1F; map = topo_domain_map_0b_1f; break; 59 + case 0x80000026: maxtype = MAX_TYPE_80000026; map = topo_domain_map_80000026; break; 69 60 default: return false; 70 61 } 71 62 ··· 134 123 { 135 124 /* Intel: Try leaf 0x1F first. */ 136 125 if (tscan->c->cpuid_level >= 0x1f && parse_topology_leaf(tscan, 0x1f)) 126 + return true; 127 + 128 + /* AMD: Try leaf 0x80000026 first. */ 129 + if (tscan->c->extended_cpuid_level >= 0x80000026 && parse_topology_leaf(tscan, 0x80000026)) 137 130 return true; 138 131 139 132 /* Intel/AMD: Fall back to leaf 0xB if available */
+8
arch/x86/kernel/setup.c
··· 7 7 */ 8 8 #include <linux/acpi.h> 9 9 #include <linux/console.h> 10 + #include <linux/cpu.h> 10 11 #include <linux/crash_dump.h> 11 12 #include <linux/dma-map-ops.h> 12 13 #include <linux/efi.h> ··· 1219 1218 return 0; 1220 1219 } 1221 1220 __initcall(register_kernel_offset_dumper); 1221 + 1222 + #ifdef CONFIG_HOTPLUG_CPU 1223 + bool arch_cpu_is_hotpluggable(int cpu) 1224 + { 1225 + return cpu > 0; 1226 + } 1227 + #endif /* CONFIG_HOTPLUG_CPU */
+4 -4
arch/x86/kernel/sev.c
··· 938 938 #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2) 939 939 #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3) 940 940 941 - static void *snp_alloc_vmsa_page(void) 941 + static void *snp_alloc_vmsa_page(int cpu) 942 942 { 943 943 struct page *p; 944 944 ··· 950 950 * 951 951 * Allocate an 8k page which is also 8k-aligned. 952 952 */ 953 - p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); 953 + p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); 954 954 if (!p) 955 955 return NULL; 956 956 ··· 1019 1019 * #VMEXIT of that vCPU would wipe out all of the settings being done 1020 1020 * here. 1021 1021 */ 1022 - vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(); 1022 + vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu); 1023 1023 if (!vmsa) 1024 1024 return -ENOMEM; 1025 1025 ··· 1341 1341 { 1342 1342 struct sev_es_runtime_data *data; 1343 1343 1344 - data = memblock_alloc(sizeof(*data), PAGE_SIZE); 1344 + data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu)); 1345 1345 if (!data) 1346 1346 panic("Can't allocate SEV-ES runtime data"); 1347 1347
+15 -13
arch/x86/kernel/smpboot.c
··· 438 438 */ 439 439 440 440 static const struct x86_cpu_id intel_cod_cpu[] = { 441 - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */ 442 - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */ 443 - X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */ 441 + X86_MATCH_VFM(INTEL_HASWELL_X, 0), /* COD */ 442 + X86_MATCH_VFM(INTEL_BROADWELL_X, 0), /* COD */ 443 + X86_MATCH_VFM(INTEL_ANY, 1), /* SNC */ 444 444 {} 445 445 }; 446 446 ··· 1033 1033 1034 1034 void __init smp_prepare_cpus_common(void) 1035 1035 { 1036 - unsigned int i; 1036 + unsigned int cpu, node; 1037 1037 1038 1038 /* Mark all except the boot CPU as hotpluggable */ 1039 - for_each_possible_cpu(i) { 1040 - if (i) 1041 - per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids; 1039 + for_each_possible_cpu(cpu) { 1040 + if (cpu) 1041 + per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids; 1042 1042 } 1043 1043 1044 - for_each_possible_cpu(i) { 1045 - zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1046 - zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1047 - zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL); 1048 - zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 1049 - zalloc_cpumask_var(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL); 1044 + for_each_possible_cpu(cpu) { 1045 + node = cpu_to_node(cpu); 1046 + 1047 + zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node); 1048 + zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node); 1049 + zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node); 1050 + zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node); 1051 + zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node); 1050 1052 } 1051 1053 1052 1054 set_cpu_sibling_map(0);
-43
arch/x86/kernel/topology.c
··· 1 - /* 2 - * Populate sysfs with topology information 3 - * 4 - * Written by: Matthew Dobson, IBM Corporation 5 - * Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL 6 - * 7 - * Copyright (C) 2002, IBM Corp. 8 - * 9 - * All rights reserved. 10 - * 11 - * This program is free software; you can redistribute it and/or modify 12 - * it under the terms of the GNU General Public License as published by 13 - * the Free Software Foundation; either version 2 of the License, or 14 - * (at your option) any later version. 15 - * 16 - * This program is distributed in the hope that it will be useful, but 17 - * WITHOUT ANY WARRANTY; without even the implied warranty of 18 - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 19 - * NON INFRINGEMENT. See the GNU General Public License for more 20 - * details. 21 - * 22 - * You should have received a copy of the GNU General Public License 23 - * along with this program; if not, write to the Free Software 24 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 - * 26 - * Send feedback to <colpatch@us.ibm.com> 27 - */ 28 - #include <linux/interrupt.h> 29 - #include <linux/nodemask.h> 30 - #include <linux/export.h> 31 - #include <linux/mmzone.h> 32 - #include <linux/init.h> 33 - #include <linux/smp.h> 34 - #include <linux/irq.h> 35 - #include <asm/io_apic.h> 36 - #include <asm/cpu.h> 37 - 38 - #ifdef CONFIG_HOTPLUG_CPU 39 - bool arch_cpu_is_hotpluggable(int cpu) 40 - { 41 - return cpu > 0; 42 - } 43 - #endif /* CONFIG_HOTPLUG_CPU */
+3 -3
arch/x86/kernel/tsc.c
··· 26 26 #include <asm/x86_init.h> 27 27 #include <asm/geode.h> 28 28 #include <asm/apic.h> 29 - #include <asm/intel-family.h> 29 + #include <asm/cpu_device_id.h> 30 30 #include <asm/i8259.h> 31 31 #include <asm/uv/uv.h> 32 32 ··· 682 682 * clock. 683 683 */ 684 684 if (crystal_khz == 0 && 685 - boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D) 685 + boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D) 686 686 crystal_khz = 25000; 687 687 688 688 /* ··· 713 713 * For Atom SoCs TSC is the only reliable clocksource. 714 714 * Mark TSC reliable so no watchdog on it. 715 715 */ 716 - if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT) 716 + if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT) 717 717 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 718 718 719 719 #ifdef CONFIG_X86_LOCAL_APIC
+7 -7
arch/x86/kernel/tsc_msr.c
··· 147 147 }; 148 148 149 149 static const struct x86_cpu_id tsc_msr_cpu_ids[] = { 150 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &freq_desc_pnw), 151 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_TABLET,&freq_desc_clv), 152 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &freq_desc_byt), 153 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &freq_desc_tng), 154 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &freq_desc_cht), 155 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &freq_desc_ann), 156 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_NP, &freq_desc_lgm), 150 + X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, &freq_desc_pnw), 151 + X86_MATCH_VFM(INTEL_ATOM_SALTWELL_TABLET, &freq_desc_clv), 152 + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &freq_desc_byt), 153 + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &freq_desc_tng), 154 + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &freq_desc_cht), 155 + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &freq_desc_ann), 156 + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &freq_desc_lgm), 157 157 {} 158 158 }; 159 159
+6 -10
arch/x86/mm/init.c
··· 261 261 } 262 262 } 263 263 264 - #define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \ 265 - .family = 6, \ 266 - .model = _model, \ 267 - } 268 264 /* 269 265 * INVLPG may not properly flush Global entries 270 266 * on these CPUs when PCIDs are enabled. 271 267 */ 272 268 static const struct x86_cpu_id invlpg_miss_ids[] = { 273 - INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), 274 - INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), 275 - INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ), 276 - INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), 277 - INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), 278 - INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), 269 + X86_MATCH_VFM(INTEL_ALDERLAKE, 0), 270 + X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0), 271 + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0), 272 + X86_MATCH_VFM(INTEL_RAPTORLAKE, 0), 273 + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0), 274 + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0), 279 275 {} 280 276 }; 281 277
+21 -4
include/trace/events/mce.h
··· 9 9 #include <linux/tracepoint.h> 10 10 #include <asm/mce.h> 11 11 12 + /* 13 + * MCE Event Record. 14 + * 15 + * Only very relevant and transient information which cannot be 16 + * gathered from a system by any other means or which can only be 17 + * acquired arduously should be added to this record. 18 + */ 19 + 12 20 TRACE_EVENT(mce_record, 13 21 14 22 TP_PROTO(struct mce *m), ··· 33 25 __field( u64, ipid ) 34 26 __field( u64, ip ) 35 27 __field( u64, tsc ) 28 + __field( u64, ppin ) 36 29 __field( u64, walltime ) 37 30 __field( u32, cpu ) 38 31 __field( u32, cpuid ) ··· 42 33 __field( u8, cs ) 43 34 __field( u8, bank ) 44 35 __field( u8, cpuvendor ) 36 + __field( u32, microcode ) 45 37 ), 46 38 47 39 TP_fast_assign( ··· 55 45 __entry->ipid = m->ipid; 56 46 __entry->ip = m->ip; 57 47 __entry->tsc = m->tsc; 48 + __entry->ppin = m->ppin; 58 49 __entry->walltime = m->time; 59 50 __entry->cpu = m->extcpu; 60 51 __entry->cpuid = m->cpuid; ··· 64 53 __entry->cs = m->cs; 65 54 __entry->bank = m->bank; 66 55 __entry->cpuvendor = m->cpuvendor; 56 + __entry->microcode = m->microcode; 67 57 ), 68 58 69 - TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR/MISC/SYND: %016Lx/%016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", 59 + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR: %016Lx, MISC: %016Lx, SYND: %016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x", 70 60 __entry->cpu, 71 61 __entry->mcgcap, __entry->mcgstatus, 72 62 __entry->bank, __entry->status, 73 63 __entry->ipid, 74 - __entry->addr, __entry->misc, __entry->synd, 64 + __entry->addr, 65 + __entry->misc, 66 + __entry->synd, 75 67 __entry->cs, __entry->ip, 76 68 __entry->tsc, 77 - __entry->cpuvendor, __entry->cpuid, 69 + __entry->ppin, 70 + __entry->cpuvendor, 71 + __entry->cpuid, 78 72 __entry->walltime, 79 73 __entry->socketid, 80 - __entry->apicid) 74 + __entry->apicid, 75 + __entry->microcode) 81 76 ); 82 77 83 78 #endif /* _TRACE_MCE_H */
+10 -17
tools/testing/selftests/x86/amx.c
··· 103 103 104 104 #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26) 105 105 #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27) 106 - static inline void check_cpuid_xsave(void) 107 - { 108 - uint32_t eax, ebx, ecx, edx; 109 - 110 - /* 111 - * CPUID.1:ECX.XSAVE[bit 26] enumerates general 112 - * support for the XSAVE feature set, including 113 - * XGETBV. 114 - */ 115 - __cpuid_count(1, 0, eax, ebx, ecx, edx); 116 - if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK)) 117 - fatal_error("cpuid: no CPU xsave support"); 118 - if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK)) 119 - fatal_error("cpuid: no OS xsave support"); 120 - } 121 106 122 107 static uint32_t xbuf_size; 123 108 ··· 335 350 336 351 /* arch_prctl() and sigaltstack() test */ 337 352 353 + #define ARCH_GET_XCOMP_SUPP 0x1021 338 354 #define ARCH_GET_XCOMP_PERM 0x1022 339 355 #define ARCH_REQ_XCOMP_PERM 0x1023 340 356 ··· 914 928 915 929 int main(void) 916 930 { 917 - /* Check hardware availability at first */ 918 - check_cpuid_xsave(); 931 + unsigned long features; 932 + long rc; 933 + 934 + rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features); 935 + if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) { 936 + ksft_print_msg("no AMX support\n"); 937 + return KSFT_SKIP; 938 + } 939 + 919 940 check_cpuid_xtiledata(); 920 941 921 942 init_stashed_xsave();
+1 -1
tools/testing/selftests/x86/lam.c
··· 1183 1183 1184 1184 if (!cpu_has_lam()) { 1185 1185 ksft_print_msg("Unsupported LAM feature!\n"); 1186 - return -1; 1186 + return KSFT_SKIP; 1187 1187 } 1188 1188 1189 1189 while ((c = getopt(argc, argv, "ht:")) != -1) {