Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-x86-selftests-6.18' of https://github.com/kvm-x86/linux into HEAD

KVM selftests changes for 6.18

- Add #DE coverage in the fastops test (the only exception that's guest-
triggerable in fastop-emulated instructions).

- Fix PMU selftests errors encountered on Granite Rapids (GNR), Sierra
Forest (SRF) and Clearwater Forest (CWF).

- Minor cleanups and improvements

+303 -109
+11 -6
tools/testing/selftests/kvm/include/kvm_util.h
··· 263 263 int open_path_or_exit(const char *path, int flags); 264 264 int open_kvm_dev_path_or_exit(void); 265 265 266 - bool get_kvm_param_bool(const char *param); 267 - bool get_kvm_intel_param_bool(const char *param); 268 - bool get_kvm_amd_param_bool(const char *param); 266 + int kvm_get_module_param_integer(const char *module_name, const char *param); 267 + bool kvm_get_module_param_bool(const char *module_name, const char *param); 269 268 270 - int get_kvm_param_integer(const char *param); 271 - int get_kvm_intel_param_integer(const char *param); 272 - int get_kvm_amd_param_integer(const char *param); 269 + static inline bool get_kvm_param_bool(const char *param) 270 + { 271 + return kvm_get_module_param_bool("kvm", param); 272 + } 273 + 274 + static inline int get_kvm_param_integer(const char *param) 275 + { 276 + return kvm_get_module_param_integer("kvm", param); 277 + } 273 278 274 279 unsigned int kvm_check_cap(long cap); 275 280
+26
tools/testing/selftests/kvm/include/x86/pmu.h
··· 5 5 #ifndef SELFTEST_KVM_PMU_H 6 6 #define SELFTEST_KVM_PMU_H 7 7 8 + #include <stdbool.h> 8 9 #include <stdint.h> 10 + 11 + #include <linux/bits.h> 9 12 10 13 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 11 14 ··· 64 61 #define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00) 65 62 #define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00) 66 63 #define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01) 64 + #define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02) 65 + #define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00) 66 + #define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01) 67 + #define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02) 68 + #define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01) 67 69 68 70 #define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00) 69 71 #define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00) ··· 88 80 INTEL_ARCH_BRANCHES_RETIRED_INDEX, 89 81 INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX, 90 82 INTEL_ARCH_TOPDOWN_SLOTS_INDEX, 83 + INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX, 84 + INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX, 85 + INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX, 86 + INTEL_ARCH_TOPDOWN_RETIRING_INDEX, 87 + INTEL_ARCH_LBR_INSERTS_INDEX, 91 88 NR_INTEL_ARCH_EVENTS, 92 89 }; 93 90 ··· 106 93 107 94 extern const uint64_t intel_pmu_arch_events[]; 108 95 extern const uint64_t amd_pmu_zen_events[]; 96 + 97 + enum pmu_errata { 98 + INSTRUCTIONS_RETIRED_OVERCOUNT, 99 + BRANCHES_RETIRED_OVERCOUNT, 100 + }; 101 + extern uint64_t pmu_errata_mask; 102 + 103 + void kvm_init_pmu_errata(void); 104 + 105 + static inline bool this_pmu_has_errata(enum pmu_errata errata) 106 + { 107 + return pmu_errata_mask & BIT_ULL(errata); 108 + } 109 109 110 110 #endif /* SELFTEST_KVM_PMU_H */
+34 -1
tools/testing/selftests/kvm/include/x86/processor.h
··· 34 34 35 35 #define NMI_VECTOR 0x02 36 36 37 + const char *ex_str(int vector); 38 + 37 39 #define X86_EFLAGS_FIXED (1u << 1) 38 40 39 41 #define X86_CR4_VME (1ul << 0) ··· 267 265 #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) 268 266 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23) 269 267 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) 270 - #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7) 268 + #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12) 271 269 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31) 272 270 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4) 273 271 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12) ··· 334 332 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5) 335 333 #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6) 336 334 #define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7) 335 + #define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8) 336 + #define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9) 337 + #define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10) 338 + #define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11) 339 + #define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12) 337 340 338 341 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0) 339 342 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1) ··· 1186 1179 void vm_install_exception_handler(struct kvm_vm *vm, int vector, 1187 1180 void (*handler)(struct ex_regs *)); 1188 1181 1182 + /* 1183 + * Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be 1184 + * used to signal "no expcetion". 1185 + */ 1186 + #define KVM_MAGIC_DE_VECTOR 0xff 1187 + 1189 1188 /* If a toddler were to say "abracadabra". */ 1190 1189 #define KVM_EXCEPTION_MAGIC 0xabacadabaULL 1191 1190 ··· 1326 1313 } 1327 1314 1328 1315 bool kvm_is_tdp_enabled(void); 1316 + 1317 + static inline bool get_kvm_intel_param_bool(const char *param) 1318 + { 1319 + return kvm_get_module_param_bool("kvm_intel", param); 1320 + } 1321 + 1322 + static inline bool get_kvm_amd_param_bool(const char *param) 1323 + { 1324 + return kvm_get_module_param_bool("kvm_amd", param); 1325 + } 1326 + 1327 + static inline int get_kvm_intel_param_integer(const char *param) 1328 + { 1329 + return kvm_get_module_param_integer("kvm_intel", param); 1330 + } 1331 + 1332 + static inline int get_kvm_amd_param_integer(const char *param) 1333 + { 1334 + return kvm_get_module_param_integer("kvm_amd", param); 1335 + } 1329 1336 1330 1337 static inline bool kvm_is_pmu_enabled(void) 1331 1338 {
+6 -36
tools/testing/selftests/kvm/lib/kvm_util.c
··· 24 24 struct guest_random_state guest_rng; 25 25 static uint32_t last_guest_seed; 26 26 27 - static int vcpu_mmap_sz(void); 27 + static size_t vcpu_mmap_sz(void); 28 28 29 29 int __open_path_or_exit(const char *path, int flags, const char *enoent_help) 30 30 { ··· 95 95 return bytes_read; 96 96 } 97 97 98 - static int get_module_param_integer(const char *module_name, const char *param) 98 + int kvm_get_module_param_integer(const char *module_name, const char *param) 99 99 { 100 100 /* 101 101 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the ··· 119 119 return atoi_paranoid(value); 120 120 } 121 121 122 - static bool get_module_param_bool(const char *module_name, const char *param) 122 + bool kvm_get_module_param_bool(const char *module_name, const char *param) 123 123 { 124 124 char value; 125 125 ssize_t r; ··· 133 133 return false; 134 134 135 135 TEST_FAIL("Unrecognized value '%c' for boolean module param", value); 136 - } 137 - 138 - bool get_kvm_param_bool(const char *param) 139 - { 140 - return get_module_param_bool("kvm", param); 141 - } 142 - 143 - bool get_kvm_intel_param_bool(const char *param) 144 - { 145 - return get_module_param_bool("kvm_intel", param); 146 - } 147 - 148 - bool get_kvm_amd_param_bool(const char *param) 149 - { 150 - return get_module_param_bool("kvm_amd", param); 151 - } 152 - 153 - int get_kvm_param_integer(const char *param) 154 - { 155 - return get_module_param_integer("kvm", param); 156 - } 157 - 158 - int get_kvm_intel_param_integer(const char *param) 159 - { 160 - return get_module_param_integer("kvm_intel", param); 161 - } 162 - 163 - int get_kvm_amd_param_integer(const char *param) 164 - { 165 - return get_module_param_integer("kvm_amd", param); 166 136 } 167 137 168 138 /* ··· 1294 1324 } 1295 1325 1296 1326 /* Returns the size of a vCPU's kvm_run structure. */ 1297 - static int vcpu_mmap_sz(void) 1327 + static size_t vcpu_mmap_sz(void) 1298 1328 { 1299 1329 int dev_fd, ret; 1300 1330 1301 1331 dev_fd = open_kvm_dev_path_or_exit(); 1302 1332 1303 1333 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 1304 - TEST_ASSERT(ret >= sizeof(struct kvm_run), 1334 + TEST_ASSERT(ret >= 0 && ret >= sizeof(struct kvm_run), 1305 1335 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret)); 1306 1336 1307 1337 close(dev_fd); ··· 1342 1372 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); 1343 1373 1344 1374 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " 1345 - "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 1375 + "smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi", 1346 1376 vcpu_mmap_sz(), sizeof(*vcpu->run)); 1347 1377 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), 1348 1378 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
+49
tools/testing/selftests/kvm/lib/x86/pmu.c
··· 8 8 #include <linux/kernel.h> 9 9 10 10 #include "kvm_util.h" 11 + #include "processor.h" 11 12 #include "pmu.h" 12 13 13 14 const uint64_t intel_pmu_arch_events[] = { ··· 20 19 INTEL_ARCH_BRANCHES_RETIRED, 21 20 INTEL_ARCH_BRANCHES_MISPREDICTED, 22 21 INTEL_ARCH_TOPDOWN_SLOTS, 22 + INTEL_ARCH_TOPDOWN_BE_BOUND, 23 + INTEL_ARCH_TOPDOWN_BAD_SPEC, 24 + INTEL_ARCH_TOPDOWN_FE_BOUND, 25 + INTEL_ARCH_TOPDOWN_RETIRING, 26 + INTEL_ARCH_LBR_INSERTS, 23 27 }; 24 28 kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); 25 29 ··· 35 29 AMD_ZEN_BRANCHES_MISPREDICTED, 36 30 }; 37 31 kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS); 32 + 33 + /* 34 + * For Intel Atom CPUs, the PMU events "Instruction Retired" or 35 + * "Branch Instruction Retired" may be overcounted for some certain 36 + * instructions, like FAR CALL/JMP, RETF, IRET, VMENTRY/VMEXIT/VMPTRLD 37 + * and complex SGX/SMX/CSTATE instructions/flows. 38 + * 39 + * The detailed information can be found in the errata (section SRF7): 40 + * https://edc.intel.com/content/www/us/en/design/products-and-solutions/processors-and-chipsets/sierra-forest/xeon-6700-series-processor-with-e-cores-specification-update/errata-details/ 41 + * 42 + * For the Atom platforms before Sierra Forest (including Sierra Forest), 43 + * Both 2 events "Instruction Retired" and "Branch Instruction Retired" would 44 + * be overcounted on these certain instructions, but for Clearwater Forest 45 + * only "Instruction Retired" event is overcounted on these instructions. 46 + */ 47 + static uint64_t get_pmu_errata(void) 48 + { 49 + if (!this_cpu_is_intel()) 50 + return 0; 51 + 52 + if (this_cpu_family() != 0x6) 53 + return 0; 54 + 55 + switch (this_cpu_model()) { 56 + case 0xDD: /* Clearwater Forest */ 57 + return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT); 58 + case 0xAF: /* Sierra Forest */ 59 + case 0x4D: /* Avaton, Rangely */ 60 + case 0x5F: /* Denverton */ 61 + case 0x86: /* Jacobsville */ 62 + return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT) | 63 + BIT_ULL(BRANCHES_RETIRED_OVERCOUNT); 64 + default: 65 + return 0; 66 + } 67 + } 68 + 69 + uint64_t pmu_errata_mask; 70 + 71 + void kvm_init_pmu_errata(void) 72 + { 73 + pmu_errata_mask = get_pmu_errata(); 74 + }
+38 -1
tools/testing/selftests/kvm/lib/x86/processor.c
··· 6 6 #include "linux/bitmap.h" 7 7 #include "test_util.h" 8 8 #include "kvm_util.h" 9 + #include "pmu.h" 9 10 #include "processor.h" 10 11 #include "sev.h" 11 12 ··· 23 22 bool host_cpu_is_intel; 24 23 bool is_forced_emulation_enabled; 25 24 uint64_t guest_tsc_khz; 25 + 26 + const char *ex_str(int vector) 27 + { 28 + switch (vector) { 29 + #define VEC_STR(v) case v##_VECTOR: return "#" #v 30 + case DE_VECTOR: return "no exception"; 31 + case KVM_MAGIC_DE_VECTOR: return "#DE"; 32 + VEC_STR(DB); 33 + VEC_STR(NMI); 34 + VEC_STR(BP); 35 + VEC_STR(OF); 36 + VEC_STR(BR); 37 + VEC_STR(UD); 38 + VEC_STR(NM); 39 + VEC_STR(DF); 40 + VEC_STR(TS); 41 + VEC_STR(NP); 42 + VEC_STR(SS); 43 + VEC_STR(GP); 44 + VEC_STR(PF); 45 + VEC_STR(MF); 46 + VEC_STR(AC); 47 + VEC_STR(MC); 48 + VEC_STR(XM); 49 + VEC_STR(VE); 50 + VEC_STR(CP); 51 + VEC_STR(HV); 52 + VEC_STR(VC); 53 + VEC_STR(SX); 54 + default: return "#??"; 55 + #undef VEC_STR 56 + } 57 + } 26 58 27 59 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) 28 60 { ··· 591 557 return false; 592 558 593 559 if (regs->vector == DE_VECTOR) 594 - return false; 560 + regs->vector = KVM_MAGIC_DE_VECTOR; 595 561 596 562 regs->rip = regs->r11; 597 563 regs->r9 = regs->vector; ··· 672 638 sync_global_to_guest(vm, host_cpu_is_intel); 673 639 sync_global_to_guest(vm, host_cpu_is_amd); 674 640 sync_global_to_guest(vm, is_forced_emulation_enabled); 641 + sync_global_to_guest(vm, pmu_errata_mask); 675 642 676 643 if (is_sev_vm(vm)) { 677 644 struct kvm_sev_init init = { 0 }; ··· 1304 1269 host_cpu_is_intel = this_cpu_is_intel(); 1305 1270 host_cpu_is_amd = this_cpu_is_amd(); 1306 1271 is_forced_emulation_enabled = kvm_is_forced_emulation_enabled(); 1272 + 1273 + kvm_init_pmu_errata(); 1307 1274 } 1308 1275 1309 1276 bool sys_clocksource_is_based_on_tsc(void)
+1 -1
tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c
··· 291 291 ksft_test_result_pass("%s\n", testlist[idx].subfunc_name); 292 292 free(array); 293 293 } else { 294 - ksft_test_result_skip("%s feature is not avaialable\n", 294 + ksft_test_result_skip("%s feature is not available\n", 295 295 testlist[idx].subfunc_name); 296 296 } 297 297 }
+63 -19
tools/testing/selftests/kvm/x86/fastops_test.c
··· 8 8 * to set RFLAGS.CF based on whether or not the input is even or odd, so that 9 9 * instructions like ADC and SBB are deterministic. 10 10 */ 11 + #define fastop(__insn) \ 12 + "bt $0, %[bt_val]\n\t" \ 13 + __insn "\n\t" \ 14 + "pushfq\n\t" \ 15 + "pop %[flags]\n\t" 16 + 17 + #define flags_constraint(flags_val) [flags]"=r"(flags_val) 18 + #define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val) 19 + 11 20 #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ 12 21 ({ \ 13 - __asm__ __volatile__("bt $0, %[val]\n\t" \ 14 - FEP insn " %[val]\n\t" \ 15 - "pushfq\n\t" \ 16 - "pop %[flags]\n\t" \ 17 - : [val]"+r"(__val), [flags]"=r"(__flags) \ 18 - : : "cc", "memory"); \ 22 + __asm__ __volatile__(fastop(FEP insn " %[val]") \ 23 + : [val]"+r"(__val), flags_constraint(__flags) \ 24 + : bt_constraint(__val) \ 25 + : "cc", "memory"); \ 19 26 }) 20 27 21 28 #define guest_test_fastop_1(insn, type_t, __val) \ ··· 43 36 44 37 #define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \ 45 38 ({ \ 46 - __asm__ __volatile__("bt $0, %[output]\n\t" \ 47 - FEP insn " %[input], %[output]\n\t" \ 48 - "pushfq\n\t" \ 49 - "pop %[flags]\n\t" \ 50 - : [output]"+r"(__output), [flags]"=r"(__flags) \ 51 - : [input]"r"(__input) : "cc", "memory"); \ 39 + __asm__ __volatile__(fastop(FEP insn " %[input], %[output]") \ 40 + : [output]"+r"(__output), flags_constraint(__flags) \ 41 + : [input]"r"(__input), bt_constraint(__output) \ 42 + : "cc", "memory"); \ 52 43 }) 53 44 54 45 #define guest_test_fastop_2(insn, type_t, __val1, __val2) \ ··· 68 63 69 64 #define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \ 70 65 ({ \ 71 - __asm__ __volatile__("bt $0, %[output]\n\t" \ 72 - FEP insn " %%cl, %[output]\n\t" \ 73 - "pushfq\n\t" \ 74 - "pop %[flags]\n\t" \ 75 - : [output]"+r"(__output), [flags]"=r"(__flags) \ 76 - : "c"(__shift) : "cc", "memory"); \ 66 + __asm__ __volatile__(fastop(FEP insn " %%cl, %[output]") \ 67 + : [output]"+r"(__output), flags_constraint(__flags) \ 68 + : "c"(__shift), bt_constraint(__output) \ 69 + : "cc", "memory"); \ 77 70 }) 78 71 79 72 #define guest_test_fastop_cl(insn, type_t, __val1, __val2) \ ··· 90 87 __GUEST_ASSERT(flags == ex_flags, \ 91 88 "Wanted flags 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ 92 89 ex_flags, insn, shift, (uint64_t)input, flags); \ 90 + }) 91 + 92 + #define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \ 93 + ({ \ 94 + uint64_t ign_error_code; \ 95 + uint8_t vector; \ 96 + \ 97 + __asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \ 98 + : "+a"(__a), "+d"(__d), flags_constraint(__flags), \ 99 + KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ 100 + : [denom]"rm"(__rm), bt_constraint(__rm) \ 101 + : "cc", "memory", KVM_ASM_SAFE_CLOBBERS); \ 102 + vector; \ 103 + }) 104 + 105 + #define guest_test_fastop_div(insn, type_t, __val1, __val2) \ 106 + ({ \ 107 + type_t _a = __val1, _d = __val1, rm = __val2; \ 108 + type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \ 109 + uint64_t flags, ex_flags; \ 110 + uint8_t v, ex_v; \ 111 + \ 112 + ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \ 113 + v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \ 114 + \ 115 + GUEST_ASSERT_EQ(v, ex_v); \ 116 + __GUEST_ASSERT(v == ex_v, \ 117 + "Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \ 118 + ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \ 119 + __GUEST_ASSERT(a == ex_a && d == ex_d, \ 120 + "Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\ 121 + (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \ 122 + (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \ 123 + __GUEST_ASSERT(v || ex_v || (flags == ex_flags), \ 124 + "Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \ 125 + ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\ 93 126 }) 94 127 95 128 static const uint64_t vals[] = { ··· 154 115 guest_test_fastop_2("add" suffix, type_t, vals[i], vals[j]); \ 155 116 guest_test_fastop_2("adc" suffix, type_t, vals[i], vals[j]); \ 156 117 guest_test_fastop_2("and" suffix, type_t, vals[i], vals[j]); \ 118 + if (sizeof(type_t) != 1) { \ 157 119 guest_test_fastop_2("bsf" suffix, type_t, vals[i], vals[j]); \ 158 120 guest_test_fastop_2("bsr" suffix, type_t, vals[i], vals[j]); \ 159 121 guest_test_fastop_2("bt" suffix, type_t, vals[i], vals[j]); \ 160 122 guest_test_fastop_2("btc" suffix, type_t, vals[i], vals[j]); \ 161 123 guest_test_fastop_2("btr" suffix, type_t, vals[i], vals[j]); \ 162 124 guest_test_fastop_2("bts" suffix, type_t, vals[i], vals[j]); \ 163 - guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \ 164 125 guest_test_fastop_2("imul" suffix, type_t, vals[i], vals[j]); \ 126 + } \ 127 + guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \ 165 128 guest_test_fastop_2("or" suffix, type_t, vals[i], vals[j]); \ 166 129 guest_test_fastop_2("sbb" suffix, type_t, vals[i], vals[j]); \ 167 130 guest_test_fastop_2("sub" suffix, type_t, vals[i], vals[j]); \ ··· 177 136 guest_test_fastop_cl("sar" suffix, type_t, vals[i], vals[j]); \ 178 137 guest_test_fastop_cl("shl" suffix, type_t, vals[i], vals[j]); \ 179 138 guest_test_fastop_cl("shr" suffix, type_t, vals[i], vals[j]); \ 139 + \ 140 + guest_test_fastop_div("div" suffix, type_t, vals[i], vals[j]); \ 180 141 } \ 181 142 } \ 182 143 } while (0) 183 144 184 145 static void guest_code(void) 185 146 { 147 + guest_test_fastops(uint8_t, "b"); 186 148 guest_test_fastops(uint16_t, "w"); 187 149 guest_test_fastops(uint32_t, "l"); 188 150 guest_test_fastops(uint64_t, "q");
+1 -1
tools/testing/selftests/kvm/x86/hyperv_cpuid.c
··· 45 45 46 46 TEST_ASSERT((entry->function >= 0x40000000) && 47 47 (entry->function <= 0x40000082), 48 - "function %x is our of supported range", 48 + "function %x is out of supported range", 49 49 entry->function); 50 50 51 51 TEST_ASSERT(entry->index == 0,
+8 -8
tools/testing/selftests/kvm/x86/hyperv_features.c
··· 54 54 55 55 if (msr->fault_expected) 56 56 __GUEST_ASSERT(vector == GP_VECTOR, 57 - "Expected #GP on %sMSR(0x%x), got vector '0x%x'", 58 - msr->write ? "WR" : "RD", msr->idx, vector); 57 + "Expected #GP on %sMSR(0x%x), got %s", 58 + msr->write ? "WR" : "RD", msr->idx, ex_str(vector)); 59 59 else 60 60 __GUEST_ASSERT(!vector, 61 - "Expected success on %sMSR(0x%x), got vector '0x%x'", 62 - msr->write ? "WR" : "RD", msr->idx, vector); 61 + "Expected success on %sMSR(0x%x), got %s", 62 + msr->write ? "WR" : "RD", msr->idx, ex_str(vector)); 63 63 64 64 if (vector || is_write_only_msr(msr->idx)) 65 65 goto done; ··· 102 102 vector = __hyperv_hypercall(hcall->control, input, output, &res); 103 103 if (hcall->ud_expected) { 104 104 __GUEST_ASSERT(vector == UD_VECTOR, 105 - "Expected #UD for control '%lu', got vector '0x%x'", 106 - hcall->control, vector); 105 + "Expected #UD for control '%lu', got %s", 106 + hcall->control, ex_str(vector)); 107 107 } else { 108 108 __GUEST_ASSERT(!vector, 109 - "Expected no exception for control '%lu', got vector '0x%x'", 110 - hcall->control, vector); 109 + "Expected no exception for control '%lu', got %s", 110 + hcall->control, ex_str(vector)); 111 111 GUEST_ASSERT_EQ(res, hcall->expect); 112 112 } 113 113
+4 -4
tools/testing/selftests/kvm/x86/monitor_mwait_test.c
··· 30 30 \ 31 31 if (fault_wanted) \ 32 32 __GUEST_ASSERT((vector) == UD_VECTOR, \ 33 - "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", \ 34 - testcase, vector); \ 33 + "Expected #UD on " insn " for testcase '0x%x', got %s", \ 34 + testcase, ex_str(vector)); \ 35 35 else \ 36 36 __GUEST_ASSERT(!(vector), \ 37 - "Expected success on " insn " for testcase '0x%x', got '0x%x'", \ 38 - testcase, vector); \ 37 + "Expected success on " insn " for testcase '0x%x', got %s", \ 38 + testcase, ex_str(vector)); \ 39 39 } while (0) 40 40 41 41 static void guest_monitor_wait(void *arg)
+47 -20
tools/testing/selftests/kvm/x86/pmu_counters_test.c
··· 75 75 [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL }, 76 76 [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL }, 77 77 [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED }, 78 + [INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL }, 79 + [INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL }, 80 + [INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL }, 81 + [INTEL_ARCH_TOPDOWN_RETIRING_INDEX] = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL }, 82 + [INTEL_ARCH_LBR_INSERTS_INDEX] = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL }, 78 83 }; 79 84 80 85 kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS); ··· 163 158 164 159 switch (idx) { 165 160 case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX: 166 - GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); 161 + /* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */ 162 + if (this_pmu_has_errata(INSTRUCTIONS_RETIRED_OVERCOUNT)) 163 + GUEST_ASSERT(count >= NUM_INSNS_RETIRED); 164 + else 165 + GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); 167 166 break; 168 167 case INTEL_ARCH_BRANCHES_RETIRED_INDEX: 169 - GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED); 168 + /* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */ 169 + if (this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT)) 170 + GUEST_ASSERT(count >= NUM_BRANCH_INSNS_RETIRED); 171 + else 172 + GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED); 170 173 break; 171 174 case INTEL_ARCH_LLC_REFERENCES_INDEX: 172 175 case INTEL_ARCH_LLC_MISSES_INDEX: ··· 184 171 fallthrough; 185 172 case INTEL_ARCH_CPU_CYCLES_INDEX: 186 173 case INTEL_ARCH_REFERENCE_CYCLES_INDEX: 174 + case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX: 175 + case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX: 187 176 GUEST_ASSERT_NE(count, 0); 188 177 break; 189 178 case INTEL_ARCH_TOPDOWN_SLOTS_INDEX: 179 + case INTEL_ARCH_TOPDOWN_RETIRING_INDEX: 190 180 __GUEST_ASSERT(count >= NUM_INSNS_RETIRED, 191 181 "Expected top-down slots >= %u, got count = %lu", 192 182 NUM_INSNS_RETIRED, count); ··· 327 311 } 328 312 329 313 static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, 330 - uint8_t length, uint8_t unavailable_mask) 314 + uint8_t length, uint32_t unavailable_mask) 331 315 { 332 316 struct kvm_vcpu *vcpu; 333 317 struct kvm_vm *vm; ··· 335 319 /* Testing arch events requires a vPMU (there are no negative tests). */ 336 320 if (!pmu_version) 337 321 return; 322 + 323 + unavailable_mask &= GENMASK(X86_PROPERTY_PMU_EVENTS_MASK.hi_bit, 324 + X86_PROPERTY_PMU_EVENTS_MASK.lo_bit); 338 325 339 326 vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events, 340 327 pmu_version, perf_capabilities); ··· 363 344 364 345 #define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \ 365 346 __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ 366 - "Expected %s on " #insn "(0x%x), got vector %u", \ 367 - expect_gp ? "#GP" : "no fault", msr, vector) \ 347 + "Expected %s on " #insn "(0x%x), got %s", \ 348 + expect_gp ? "#GP" : "no fault", msr, ex_str(vector)) \ 368 349 369 350 #define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \ 370 351 __GUEST_ASSERT(val == expected, \ ··· 594 575 }; 595 576 596 577 /* 578 + * To keep the total runtime reasonable, test only a handful of select, 579 + * semi-arbitrary values for the mask of unavailable PMU events. Test 580 + * 0 (all events available) and all ones (no events available) as well 581 + * as alternating bit sequencues, e.g. to detect if KVM is checking the 582 + * wrong bit(s). 583 + */ 584 + const uint32_t unavailable_masks[] = { 585 + 0x0, 586 + 0xffffffffu, 587 + 0xaaaaaaaau, 588 + 0x55555555u, 589 + 0xf0f0f0f0u, 590 + 0x0f0f0f0fu, 591 + 0xa0a0a0a0u, 592 + 0x0a0a0a0au, 593 + 0x50505050u, 594 + 0x05050505u, 595 + }; 596 + 597 + /* 597 598 * Test up to PMU v5, which is the current maximum version defined by 598 599 * Intel, i.e. is the last version that is guaranteed to be backwards 599 600 * compatible with KVM's existing behavior. ··· 650 611 651 612 pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n", 652 613 v, perf_caps[i]); 653 - /* 654 - * To keep the total runtime reasonable, test every 655 - * possible non-zero, non-reserved bitmap combination 656 - * only with the native PMU version and the full bit 657 - * vector length. 658 - */ 659 - if (v == pmu_version) { 660 - for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++) 661 - test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k); 662 - } 614 + 663 615 /* 664 616 * Test single bits for all PMU version and lengths up 665 617 * the number of events +1 (to verify KVM doesn't do ··· 659 629 * ones i.e. all events being available and unavailable. 660 630 */ 661 631 for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) { 662 - test_arch_events(v, perf_caps[i], j, 0); 663 - test_arch_events(v, perf_caps[i], j, 0xff); 664 - 665 - for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++) 666 - test_arch_events(v, perf_caps[i], j, BIT(k)); 632 + for (k = 1; k < ARRAY_SIZE(unavailable_masks); k++) 633 + test_arch_events(v, perf_caps[i], j, unavailable_masks[k]); 667 634 } 668 635 669 636 pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n",
+3 -1
tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
··· 214 214 do { \ 215 215 uint64_t br = pmc_results.branches_retired; \ 216 216 uint64_t ir = pmc_results.instructions_retired; \ 217 + bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \ 218 + br >= NUM_BRANCHES : br == NUM_BRANCHES; \ 217 219 \ 218 - if (br && br != NUM_BRANCHES) \ 220 + if (br && !br_matched) \ 219 221 pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \ 220 222 __func__, br, NUM_BRANCHES); \ 221 223 TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
+4 -3
tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
··· 29 29 u64 pebs_baseline:1; 30 30 u64 perf_metrics:1; 31 31 u64 pebs_output_pt_available:1; 32 - u64 anythread_deprecated:1; 32 + u64 pebs_timing_info:1; 33 33 }; 34 34 u64 capabilities; 35 35 } host_cap; ··· 44 44 .pebs_arch_reg = 1, 45 45 .pebs_format = -1, 46 46 .pebs_baseline = 1, 47 + .pebs_timing_info = 1, 47 48 }; 48 49 49 50 static const union perf_capabilities format_caps = { ··· 57 56 uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); 58 57 59 58 __GUEST_ASSERT(vector == GP_VECTOR, 60 - "Expected #GP for value '0x%lx', got vector '0x%x'", 61 - val, vector); 59 + "Expected #GP for value '0x%lx', got %s", 60 + val, ex_str(vector)); 62 61 } 63 62 64 63 static void guest_code(uint64_t current_val)
+2 -2
tools/testing/selftests/kvm/x86/xapic_state_test.c
··· 120 120 __test_icr(x, icr | i); 121 121 122 122 /* 123 - * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of 124 - * vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff. 123 + * Send all flavors of IPIs to non-existent vCPUs. Arbitrarily use 124 + * vector 0xff. 125 125 */ 126 126 icr = APIC_INT_ASSERT | 0xff; 127 127 for (i = 0; i < 0xff; i++) {
+6 -6
tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
··· 81 81 82 82 vector = xsetbv_safe(0, XFEATURE_MASK_FP); 83 83 __GUEST_ASSERT(!vector, 84 - "Expected success on XSETBV(FP), got vector '0x%x'", 85 - vector); 84 + "Expected success on XSETBV(FP), got %s", 85 + ex_str(vector)); 86 86 87 87 vector = xsetbv_safe(0, supported_xcr0); 88 88 __GUEST_ASSERT(!vector, 89 - "Expected success on XSETBV(0x%lx), got vector '0x%x'", 90 - supported_xcr0, vector); 89 + "Expected success on XSETBV(0x%lx), got %s", 90 + supported_xcr0, ex_str(vector)); 91 91 92 92 for (i = 0; i < 64; i++) { 93 93 if (supported_xcr0 & BIT_ULL(i)) ··· 95 95 96 96 vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i)); 97 97 __GUEST_ASSERT(vector == GP_VECTOR, 98 - "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'", 99 - BIT_ULL(i), supported_xcr0, vector); 98 + "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got %s", 99 + BIT_ULL(i), supported_xcr0, ex_str(vector)); 100 100 } 101 101 102 102 GUEST_DONE();