Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/its: Enumerate Indirect Target Selection (ITS) bug

ITS bug in some pre-Alderlake Intel CPUs may allow indirect branches in the
first half of a cache line get predicted to a target of a branch located in
the second half of the cache line.

Set X86_BUG_ITS on affected CPUs. Mitigation to follow in later commits.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>

authored by

Pawan Gupta and committed by
Dave Hansen
159013a7 1ac116ce

+58 -13
+1
arch/x86/include/asm/cpufeatures.h
··· 533 533 #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ 534 534 #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ 535 535 #define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ 536 + #define X86_BUG_ITS X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */ 536 537 #endif /* _ASM_X86_CPUFEATURES_H */
+8
arch/x86/include/asm/msr-index.h
··· 211 211 * VERW clears CPU Register 212 212 * File. 213 213 */ 214 + #define ARCH_CAP_ITS_NO BIT_ULL(62) /* 215 + * Not susceptible to 216 + * Indirect Target Selection. 217 + * This bit is not set by 218 + * HW, but is synthesized by 219 + * VMMs for guests to know 220 + * their affected status. 221 + */ 214 222 215 223 #define MSR_IA32_FLUSH_CMD 0x0000010b 216 224 #define L1D_FLUSH BIT(0) /*
+46 -12
arch/x86/kernel/cpu/common.c
··· 1227 1227 #define GDS BIT(6) 1228 1228 /* CPU is affected by Register File Data Sampling */ 1229 1229 #define RFDS BIT(7) 1230 + /* CPU is affected by Indirect Target Selection */ 1231 + #define ITS BIT(8) 1230 1232 1231 1233 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { 1232 1234 VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS), ··· 1240 1238 VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS), 1241 1239 VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO), 1242 1240 VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS), 1243 - VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS), 1241 + VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS), 1242 + VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS), 1244 1243 VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS), 1245 1244 VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS), 1246 - VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS), 1247 - VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS), 1245 + VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS), 1246 + VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS), 1247 + VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS), 1248 + VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS), 1248 1249 VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED), 1249 - VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS), 1250 - VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS), 1251 - VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS), 1252 - VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS), 1253 - VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED), 1254 - VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS), 1255 - VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS), 1256 - VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS), 1250 + VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), 1251 + VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS), 1252 + VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS), 1253 + VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), 1254 + VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS), 1255 + VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), 1256 + VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS), 1257 + VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS), 1257 1258 VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED), 1258 - VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS), 1259 + VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS), 1259 1260 VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS), 1260 1261 VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS), 1261 1262 VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS), ··· 1321 1316 1322 1317 /* Only consult the blacklist when there is no enumeration: */ 1323 1318 return cpu_matches(cpu_vuln_blacklist, RFDS); 1319 + } 1320 + 1321 + static bool __init vulnerable_to_its(u64 x86_arch_cap_msr) 1322 + { 1323 + /* The "immunity" bit trumps everything else: */ 1324 + if (x86_arch_cap_msr & ARCH_CAP_ITS_NO) 1325 + return false; 1326 + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 1327 + return false; 1328 + 1329 + /* None of the affected CPUs have BHI_CTRL */ 1330 + if (boot_cpu_has(X86_FEATURE_BHI_CTRL)) 1331 + return false; 1332 + 1333 + /* 1334 + * If a VMM did not expose ITS_NO, assume that a guest could 1335 + * be running on a vulnerable hardware or may migrate to such 1336 + * hardware. 1337 + */ 1338 + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 1339 + return true; 1340 + 1341 + if (cpu_matches(cpu_vuln_blacklist, ITS)) 1342 + return true; 1343 + 1344 + return false; 1324 1345 } 1325 1346 1326 1347 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ··· 1479 1448 1480 1449 if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) 1481 1450 setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); 1451 + 1452 + if (vulnerable_to_its(x86_arch_cap_msr)) 1453 + setup_force_cpu_bug(X86_BUG_ITS); 1482 1454 1483 1455 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) 1484 1456 return;
+3 -1
arch/x86/kvm/x86.c
··· 1584 1584 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ 1585 1585 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ 1586 1586 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \ 1587 - ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO) 1587 + ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO) 1588 1588 1589 1589 static u64 kvm_get_arch_capabilities(void) 1590 1590 { ··· 1618 1618 data |= ARCH_CAP_MDS_NO; 1619 1619 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 1620 1620 data |= ARCH_CAP_RFDS_NO; 1621 + if (!boot_cpu_has_bug(X86_BUG_ITS)) 1622 + data |= ARCH_CAP_ITS_NO; 1621 1623 1622 1624 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1623 1625 /*