Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Add TEST_REQUIRE macros to reduce skipping copy+paste

Add TEST_REQUIRE() and __TEST_REQUIRE() to replace the myriad open coded
instances of selftests exiting with KSFT_SKIP after printing an
informational message. In addition to reducing the amount of boilerplate
code in selftests, the UPPERCASE macro names make it easier to visually
identify a test's requirements.

Convert usage that erroneously uses something other than print_skip()
and/or "exits" with '0' or some other non-KSFT_SKIP value.

Intentionally drop a kvm_vm_free() in aarch64/debug-exceptions.c as part
of the conversion. All memory and file descriptors are freed on process
exit, so the explicit free is superfluous.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
7ed397d1 3ea9b809

+119 -290
+3 -8
tools/testing/selftests/kvm/aarch64/arch_timer.c
··· 375 375 ucall_init(vm, NULL); 376 376 test_init_timer_irq(vm); 377 377 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); 378 - if (gic_fd < 0) { 379 - print_skip("Failed to create vgic-v3"); 380 - exit(KSFT_SKIP); 381 - } 378 + __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3"); 382 379 383 380 /* Make all the test's cmdline args visible to the guest */ 384 381 sync_global_to_guest(vm, test_args); ··· 465 468 if (!parse_args(argc, argv)) 466 469 exit(KSFT_SKIP); 467 470 468 - if (test_args.migration_freq_ms && get_nprocs() < 2) { 469 - print_skip("At least two physical CPUs needed for vCPU migration"); 470 - exit(KSFT_SKIP); 471 - } 471 + __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2, 472 + "At least two physical CPUs needed for vCPU migration"); 472 473 473 474 vm = test_vm_create(); 474 475 test_run(vm);
+2 -5
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
··· 259 259 vm_init_descriptor_tables(vm); 260 260 vcpu_init_descriptor_tables(vcpu); 261 261 262 - if (debug_version(vcpu) < 6) { 263 - print_skip("Armv8 debug architecture not supported."); 264 - kvm_vm_free(vm); 265 - exit(KSFT_SKIP); 266 - } 262 + __TEST_REQUIRE(debug_version(vcpu) >= 6, 263 + "Armv8 debug architecture not supported."); 267 264 268 265 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, 269 266 ESR_EC_BRK_INS, guest_sw_bp_handler);
+6 -4
tools/testing/selftests/kvm/aarch64/get-reg-list.c
··· 395 395 struct reg_sublist *s; 396 396 397 397 for_each_sublist(c, s) { 398 - if (s->capability && !kvm_has_cap(s->capability)) { 399 - fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name); 400 - exit(KSFT_SKIP); 401 - } 398 + if (!s->capability) 399 + continue; 400 + 401 + __TEST_REQUIRE(kvm_has_cap(s->capability), 402 + "%s: %s not available, skipping tests\n", 403 + config_name(c), s->name); 402 404 } 403 405 } 404 406
+1 -4
tools/testing/selftests/kvm/aarch64/psci_test.c
··· 192 192 193 193 int main(void) 194 194 { 195 - if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) { 196 - print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported"); 197 - exit(KSFT_SKIP); 198 - } 195 + TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)); 199 196 200 197 host_test_cpu_on(); 201 198 host_test_system_suspend();
+1 -4
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
··· 82 82 struct kvm_vm *vm; 83 83 int ret; 84 84 85 - if (!kvm_has_cap(KVM_CAP_ARM_EL1_32BIT)) { 86 - print_skip("KVM_CAP_ARM_EL1_32BIT is not supported"); 87 - exit(KSFT_SKIP); 88 - } 85 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT)); 89 86 90 87 /* Get the preferred target type and copy that to init1 for later use */ 91 88 vm = vm_create_barebones();
+3 -7
tools/testing/selftests/kvm/aarch64/vgic_init.c
··· 703 703 } 704 704 705 705 ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2); 706 - if (!ret) { 707 - pr_info("Running GIC_v2 tests.\n"); 708 - run_tests(KVM_DEV_TYPE_ARM_VGIC_V2); 709 - return 0; 710 - } 706 + __TEST_REQUIRE(!ret, "No GICv2 nor GICv3 support"); 711 707 712 - print_skip("No GICv2 nor GICv3 support"); 713 - exit(KSFT_SKIP); 708 + pr_info("Running GIC_v2 tests.\n"); 709 + run_tests(KVM_DEV_TYPE_ARM_VGIC_V2); 714 710 return 0; 715 711 }
+1 -4
tools/testing/selftests/kvm/aarch64/vgic_irq.c
··· 768 768 769 769 gic_fd = vgic_v3_setup(vm, 1, nr_irqs, 770 770 GICD_BASE_GPA, GICR_BASE_GPA); 771 - if (gic_fd < 0) { 772 - print_skip("Failed to create vgic-v3, skipping"); 773 - exit(KSFT_SKIP); 774 - } 771 + __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping"); 775 772 776 773 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, 777 774 guest_irq_handlers[args.eoi_split][args.level_sensitive]);
+3 -8
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 104 104 return 0; 105 105 106 106 pfn = entry & PAGEMAP_PFN_MASK; 107 - if (!pfn) { 108 - print_skip("Looking up PFNs requires CAP_SYS_ADMIN"); 109 - exit(KSFT_SKIP); 110 - } 107 + __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN"); 111 108 112 109 return pfn; 113 110 } ··· 377 380 } 378 381 379 382 page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 380 - if (page_idle_fd < 0) { 381 - print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled"); 382 - exit(KSFT_SKIP); 383 - } 383 + __TEST_REQUIRE(page_idle_fd >= 0, 384 + "CONFIG_IDLE_PAGE_TRACKING is not enabled"); 384 385 close(page_idle_fd); 385 386 386 387 for_each_guest_mode(run_test, &params);
+9
tools/testing/selftests/kvm/include/test_util.h
··· 34 34 #endif 35 35 36 36 void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2))); 37 + #define __TEST_REQUIRE(f, fmt, ...) \ 38 + do { \ 39 + if (!(f)) { \ 40 + print_skip(fmt, ##__VA_ARGS__); \ 41 + exit(KSFT_SKIP); \ 42 + } \ 43 + } while (0) 44 + 45 + #define TEST_REQUIRE(f) __TEST_REQUIRE(f, "Requirement not met: %s", #f) 37 46 38 47 ssize_t test_write(int fd, const void *buf, size_t count); 39 48 ssize_t test_read(int fd, void *buf, size_t count);
+1 -4
tools/testing/selftests/kvm/kvm_binary_stats_test.c
··· 213 213 } 214 214 215 215 /* Check the extension for binary stats */ 216 - if (!kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) { 217 - print_skip("Binary form statistics interface is not supported"); 218 - exit(KSFT_SKIP); 219 - } 216 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD)); 220 217 221 218 /* Create VMs and VCPUs */ 222 219 vms = malloc(sizeof(vms[0]) * max_vm);
+2 -4
tools/testing/selftests/kvm/kvm_create_max_vcpus.c
··· 64 64 rl.rlim_max = nr_fds_wanted; 65 65 66 66 int r = setrlimit(RLIMIT_NOFILE, &rl); 67 - if (r < 0) { 68 - printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", 67 + __TEST_REQUIRE(r >= 0, 68 + "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", 69 69 old_rlim_max, nr_fds_wanted); 70 - exit(KSFT_SKIP); 71 - } 72 70 } else { 73 71 TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); 74 72 }
+2 -8
tools/testing/selftests/kvm/lib/kvm_util.c
··· 26 26 int fd; 27 27 28 28 fd = open(path, flags); 29 - if (fd < 0) { 30 - print_skip("%s not available (errno: %d)", path, errno); 31 - exit(KSFT_SKIP); 32 - } 29 + __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno); 33 30 34 31 return fd; 35 32 } ··· 90 93 { 91 94 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); 92 95 93 - if (!kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)) { 94 - print_skip("immediate_exit not available"); 95 - exit(KSFT_SKIP); 96 - } 96 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)); 97 97 98 98 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); 99 99 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
+4 -4
tools/testing/selftests/kvm/lib/x86_64/processor.c
··· 609 609 kvm_fd = open_kvm_dev_path_or_exit(); 610 610 rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr); 611 611 close(kvm_fd); 612 + 612 613 if (rc == -1 && (errno == ENXIO || errno == EINVAL)) 613 614 exit(KSFT_SKIP); 614 615 TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc); 615 - if (!(bitmask & (1ULL << bit))) 616 - exit(KSFT_SKIP); 617 616 618 - if (!is_xfd_supported()) 619 - exit(KSFT_SKIP); 617 + TEST_REQUIRE(bitmask & (1ULL << bit)); 618 + 619 + TEST_REQUIRE(is_xfd_supported()); 620 620 621 621 rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); 622 622
+1 -4
tools/testing/selftests/kvm/lib/x86_64/svm.c
··· 174 174 175 175 void nested_svm_check_supported(void) 176 176 { 177 - if (!nested_svm_supported()) { 178 - print_skip("nested SVM not enabled"); 179 - exit(KSFT_SKIP); 180 - } 177 + TEST_REQUIRE(nested_svm_supported()); 181 178 } 182 179 183 180 /*
+1 -4
tools/testing/selftests/kvm/lib/x86_64/vmx.c
··· 391 391 392 392 void nested_vmx_check_supported(void) 393 393 { 394 - if (!nested_vmx_supported()) { 395 - print_skip("nested VMX not enabled"); 396 - exit(KSFT_SKIP); 397 - } 394 + TEST_REQUIRE(nested_vmx_supported()); 398 395 } 399 396 400 397 static void nested_create_pte(struct kvm_vm *vm,
+5 -8
tools/testing/selftests/kvm/rseq_test.c
··· 171 171 return NULL; 172 172 } 173 173 174 - static int calc_min_max_cpu(void) 174 + static void calc_min_max_cpu(void) 175 175 { 176 176 int i, cnt, nproc; 177 177 178 - if (CPU_COUNT(&possible_mask) < 2) 179 - return -EINVAL; 178 + TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2); 180 179 181 180 /* 182 181 * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that ··· 197 198 cnt++; 198 199 } 199 200 200 - return (cnt < 2) ? -EINVAL : 0; 201 + __TEST_REQUIRE(cnt >= 2, 202 + "Only one usable CPU, task migration not possible"); 201 203 } 202 204 203 205 int main(int argc, char *argv[]) ··· 215 215 TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, 216 216 strerror(errno)); 217 217 218 - if (calc_min_max_cpu()) { 219 - print_skip("Only one usable CPU, task migration not possible"); 220 - exit(KSFT_SKIP); 221 - } 218 + calc_min_max_cpu(); 222 219 223 220 sys_rseq(0); 224 221
+4 -7
tools/testing/selftests/kvm/s390x/memop.c
··· 756 756 757 757 int main(int argc, char *argv[]) 758 758 { 759 - int memop_cap, extension_cap, idx; 759 + int extension_cap, idx; 760 + 761 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP)); 760 762 761 763 setbuf(stdout, NULL); /* Tell stdout not to buffer its content */ 762 764 763 765 ksft_print_header(); 764 766 765 - memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP); 766 - extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION); 767 - if (!memop_cap) { 768 - ksft_exit_skip("CAP_S390_MEM_OP not supported.\n"); 769 - } 770 - 771 767 ksft_set_plan(ARRAY_SIZE(testlist)); 772 768 769 + extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION); 773 770 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) { 774 771 if (testlist[idx].extension >= extension_cap) { 775 772 testlist[idx].test();
+2 -3
tools/testing/selftests/kvm/s390x/sync_regs_test.c
··· 229 229 struct kvm_vm *vm; 230 230 int idx; 231 231 232 + TEST_REQUIRE(kvm_check_cap(KVM_CAP_SYNC_REGS)); 233 + 232 234 /* Tell stdout not to buffer its content */ 233 235 setbuf(stdout, NULL); 234 236 235 237 ksft_print_header(); 236 - 237 - if (!kvm_check_cap(KVM_CAP_SYNC_REGS)) 238 - ksft_exit_skip("CAP_SYNC_REGS not supported"); 239 238 240 239 ksft_set_plan(ARRAY_SIZE(testlist)); 241 240
+1 -4
tools/testing/selftests/kvm/steal_time.c
··· 271 271 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages); 272 272 ucall_init(vm, NULL); 273 273 274 - if (!is_steal_time_supported(vcpus[0])) { 275 - print_skip("steal-time not supported"); 276 - exit(KSFT_SKIP); 277 - } 274 + TEST_REQUIRE(is_steal_time_supported(vcpus[0])); 278 275 279 276 /* Run test on each VCPU */ 280 277 for (i = 0; i < NR_VCPUS; ++i) {
+3 -5
tools/testing/selftests/kvm/system_counter_offset_test.c
··· 28 28 29 29 static void check_preconditions(struct kvm_vcpu *vcpu) 30 30 { 31 - if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET)) 32 - return; 33 - 34 - print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test"); 35 - exit(KSFT_SKIP); 31 + __TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, 32 + KVM_VCPU_TSC_OFFSET), 33 + "KVM_VCPU_TSC_OFFSET not supported; skipping test"); 36 34 } 37 35 38 36 static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
+8 -15
tools/testing/selftests/kvm/x86_64/amx_test.c
··· 317 317 { 318 318 struct kvm_cpuid_entry2 *entry; 319 319 struct kvm_regs regs1, regs2; 320 - bool amx_supported = false; 321 320 struct kvm_vcpu *vcpu; 322 321 struct kvm_vm *vm; 323 322 struct kvm_run *run; ··· 333 334 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 334 335 335 336 entry = kvm_get_supported_cpuid_entry(1); 336 - if (!(entry->ecx & X86_FEATURE_XSAVE)) { 337 - print_skip("XSAVE feature not supported"); 338 - exit(KSFT_SKIP); 339 - } 337 + TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE); 340 338 341 - if (kvm_get_cpuid_max_basic() >= 0xd) { 342 - entry = kvm_get_supported_cpuid_index(0xd, 0); 343 - amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE); 344 - if (!amx_supported) { 345 - print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax); 346 - exit(KSFT_SKIP); 347 - } 348 - /* Get xsave/restore max size */ 349 - xsave_restore_size = entry->ecx; 350 - } 339 + TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd); 340 + 341 + entry = kvm_get_supported_cpuid_index(0xd, 0); 342 + TEST_REQUIRE(entry->eax & XFEATURE_MASK_XTILE); 343 + 344 + /* Get xsave/restore max size */ 345 + xsave_restore_size = entry->ecx; 351 346 352 347 run = vcpu->run; 353 348 vcpu_regs_get(vcpu, &regs1);
+1 -4
tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
··· 70 70 struct ucall uc; 71 71 72 72 entry = kvm_get_supported_cpuid_entry(1); 73 - if (!(entry->ecx & X86_FEATURE_XSAVE)) { 74 - print_skip("XSAVE feature not supported"); 75 - return 0; 76 - } 73 + TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE); 77 74 78 75 /* Tell stdout not to buffer its content */ 79 76 setbuf(stdout, NULL);
+1 -4
tools/testing/selftests/kvm/x86_64/debug_regs.c
··· 95 95 1, /* cli */ 96 96 }; 97 97 98 - if (!kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG)) { 99 - print_skip("KVM_CAP_SET_GUEST_DEBUG not supported"); 100 - return 0; 101 - } 98 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG)); 102 99 103 100 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 104 101 run = vcpu->run;
+1 -4
tools/testing/selftests/kvm/x86_64/emulator_error_test.c
··· 162 162 /* Tell stdout not to buffer its content */ 163 163 setbuf(stdout, NULL); 164 164 165 - if (!kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)) { 166 - printf("module parameter 'allow_smaller_maxphyaddr' is not set. Skipping test.\n"); 167 - return 0; 168 - } 165 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)); 169 166 170 167 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 171 168
+3 -6
tools/testing/selftests/kvm/x86_64/evmcs_test.c
··· 208 208 209 209 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 210 210 211 - if (!nested_vmx_supported() || 212 - !kvm_has_cap(KVM_CAP_NESTED_STATE) || 213 - !kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { 214 - print_skip("Enlightened VMCS is unsupported"); 215 - exit(KSFT_SKIP); 216 - } 211 + TEST_REQUIRE(nested_vmx_supported()); 212 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 213 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)); 217 214 218 215 vcpu_set_hv_cpuid(vcpu); 219 216 vcpu_enable_evmcs(vcpu);
+1 -4
tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
··· 156 156 157 157 int main(void) 158 158 { 159 - if (!(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 160 - print_skip("KVM_X86_QUIRK_HYPERCALL_INSN not supported"); 161 - exit(KSFT_SKIP); 162 - } 159 + TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 163 160 164 161 test_fix_hypercall(); 165 162 test_fix_hypercall_disabled();
+1 -4
tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
··· 25 25 * will cover the "regular" list of MSRs, the coverage here is purely 26 26 * opportunistic and not interesting on its own. 27 27 */ 28 - if (!kvm_check_cap(KVM_CAP_GET_MSR_FEATURES)) { 29 - print_skip("KVM_CAP_GET_MSR_FEATURES not supported"); 30 - exit(KSFT_SKIP); 31 - } 28 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_GET_MSR_FEATURES)); 32 29 33 30 (void)kvm_get_msr_index_list(); 34 31
+1 -4
tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
··· 137 137 /* Tell stdout not to buffer its content */ 138 138 setbuf(stdout, NULL); 139 139 140 - if (!kvm_has_cap(KVM_CAP_HYPERV_CPUID)) { 141 - print_skip("KVM_CAP_HYPERV_CPUID not supported"); 142 - exit(KSFT_SKIP); 143 - } 140 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID)); 144 141 145 142 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 146 143
+2 -4
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
··· 127 127 struct ucall uc; 128 128 int stage; 129 129 130 - if (!nested_svm_supported()) { 131 - print_skip("Nested SVM not supported"); 132 - exit(KSFT_SKIP); 133 - } 130 + TEST_REQUIRE(nested_svm_supported()); 131 + 134 132 /* Create VM */ 135 133 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 136 134 vcpu_set_hv_cpuid(vcpu);
+1 -5
tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
··· 181 181 int flags; 182 182 183 183 flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK); 184 - if (!(flags & KVM_CLOCK_REALTIME)) { 185 - print_skip("KVM_CLOCK_REALTIME not supported; flags: %x", 186 - flags); 187 - exit(KSFT_SKIP); 188 - } 184 + TEST_REQUIRE(flags & KVM_CLOCK_REALTIME); 189 185 190 186 check_clocksource(); 191 187
+1 -4
tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
··· 204 204 struct kvm_vcpu *vcpu; 205 205 struct kvm_vm *vm; 206 206 207 - if (!kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) { 208 - print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported"); 209 - exit(KSFT_SKIP); 210 - } 207 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)); 211 208 212 209 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 213 210
+2 -8
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
··· 93 93 { 94 94 int warnings_before, warnings_after; 95 95 96 - if (!is_intel_cpu()) { 97 - print_skip("Must be run on an Intel CPU"); 98 - exit(KSFT_SKIP); 99 - } 96 + TEST_REQUIRE(is_intel_cpu()); 100 97 101 - if (vm_is_unrestricted_guest(NULL)) { 102 - print_skip("Unrestricted guest must be disabled"); 103 - exit(KSFT_SKIP); 104 - } 98 + TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); 105 99 106 100 warnings_before = get_warnings_count(); 107 101
+2 -8
tools/testing/selftests/kvm/x86_64/mmu_role_test.c
··· 117 117 } 118 118 } 119 119 120 - if (!do_gbpages && !do_maxphyaddr) { 121 - print_skip("No sub-tests selected"); 122 - return 0; 123 - } 120 + __TEST_REQUIRE(do_gbpages || do_maxphyaddr, "No sub-tests selected"); 124 121 125 122 entry = kvm_get_supported_cpuid_entry(0x80000001); 126 - if (!(entry->edx & CPUID_GBPAGES)) { 127 - print_skip("1gb hugepages not supported"); 128 - return 0; 129 - } 123 + TEST_REQUIRE(entry->edx & CPUID_GBPAGES); 130 124 131 125 if (do_gbpages) { 132 126 pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
+1 -6
tools/testing/selftests/kvm/x86_64/platform_info_test.c
··· 70 70 { 71 71 struct kvm_vcpu *vcpu; 72 72 struct kvm_vm *vm; 73 - int rv; 74 73 uint64_t msr_platform_info; 75 74 76 75 /* Tell stdout not to buffer its content */ 77 76 setbuf(stdout, NULL); 78 77 79 - rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO); 80 - if (!rv) { 81 - print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported"); 82 - exit(KSFT_SKIP); 83 - } 78 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO)); 84 79 85 80 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 86 81
+5 -20
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
··· 443 443 444 444 int main(int argc, char *argv[]) 445 445 { 446 - void (*guest_code)(void) = NULL; 446 + void (*guest_code)(void); 447 447 struct kvm_vcpu *vcpu; 448 448 struct kvm_vm *vm; 449 - int r; 450 449 451 450 /* Tell stdout not to buffer its content */ 452 451 setbuf(stdout, NULL); 453 452 454 - r = kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER); 455 - if (!r) { 456 - print_skip("KVM_CAP_PMU_EVENT_FILTER not supported"); 457 - exit(KSFT_SKIP); 458 - } 453 + TEST_REQUIRE(kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER)); 459 454 460 - if (use_intel_pmu()) 461 - guest_code = intel_guest_code; 462 - else if (use_amd_pmu()) 463 - guest_code = amd_guest_code; 464 - 465 - if (!guest_code) { 466 - print_skip("Don't know how to test this guest PMU"); 467 - exit(KSFT_SKIP); 468 - } 455 + TEST_REQUIRE(use_intel_pmu() || use_amd_pmu()); 456 + guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code; 469 457 470 458 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 471 459 472 460 vm_init_descriptor_tables(vm); 473 461 vcpu_init_descriptor_tables(vcpu); 474 462 475 - if (!sanity_check_pmu(vcpu)) { 476 - print_skip("Guest PMU is not functional"); 477 - exit(KSFT_SKIP); 478 - } 463 + TEST_REQUIRE(sanity_check_pmu(vcpu)); 479 464 480 465 if (use_amd_pmu()) 481 466 test_amd_deny_list(vcpu);
+1 -4
tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
··· 123 123 124 124 int main(int argc, char *argv[]) 125 125 { 126 - if (!kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID)) { 127 - print_skip("set_boot_cpu_id not available"); 128 - return 0; 129 - } 126 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID)); 130 127 131 128 run_vm_bsp(0); 132 129 run_vm_bsp(1);
+6 -13
tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
··· 400 400 { 401 401 struct kvm_cpuid_entry2 *cpuid; 402 402 403 - if (!kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM) && 404 - !kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) { 405 - print_skip("Capabilities not available"); 406 - exit(KSFT_SKIP); 407 - } 403 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)); 404 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)); 408 405 409 406 cpuid = kvm_get_supported_cpuid_entry(0x80000000); 410 - if (cpuid->eax < 0x8000001f) { 411 - print_skip("AMD memory encryption not available"); 412 - exit(KSFT_SKIP); 413 - } 407 + TEST_REQUIRE(cpuid->eax >= 0x8000001f); 408 + 414 409 cpuid = kvm_get_supported_cpuid_entry(0x8000001f); 415 - if (!(cpuid->eax & X86_FEATURE_SEV)) { 416 - print_skip("AMD SEV not available"); 417 - exit(KSFT_SKIP); 418 - } 410 + TEST_REQUIRE(cpuid->eax & X86_FEATURE_SEV); 411 + 419 412 have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES); 420 413 421 414 if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+2 -8
tools/testing/selftests/kvm/x86_64/sync_regs_test.c
··· 94 94 setbuf(stdout, NULL); 95 95 96 96 cap = kvm_check_cap(KVM_CAP_SYNC_REGS); 97 - if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) { 98 - print_skip("KVM_CAP_SYNC_REGS not supported"); 99 - exit(KSFT_SKIP); 100 - } 101 - if ((cap & INVALID_SYNC_FIELD) != 0) { 102 - print_skip("The \"invalid\" field is not invalid"); 103 - exit(KSFT_SKIP); 104 - } 97 + TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS); 98 + TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD)); 105 99 106 100 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 107 101
+2 -8
tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
··· 46 46 vm_vaddr_t vmx_pages_gva; 47 47 struct ucall uc; 48 48 49 - if (!nested_vmx_supported()) { 50 - print_skip("Nested VMX not supported"); 51 - exit(KSFT_SKIP); 52 - } 49 + nested_vmx_check_supported(); 53 50 54 - if (!kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT)) { 55 - print_skip("KVM_CAP_X86_TRIPLE_FAULT_EVENT not supported"); 56 - exit(KSFT_SKIP); 57 - } 51 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT)); 58 52 59 53 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 60 54 vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
+1 -4
tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
··· 93 93 94 94 int main(int argc, char *argv[]) 95 95 { 96 - if (!kvm_has_cap(KVM_CAP_VM_TSC_CONTROL)) { 97 - print_skip("KVM_CAP_VM_TSC_CONTROL not available"); 98 - exit(KSFT_SKIP); 99 - } 96 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL)); 100 97 101 98 vm = vm_create(NR_TEST_VCPUS); 102 99 vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
+2 -4
tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
··· 111 111 struct kvm_vcpu *vcpu; 112 112 struct kvm_vm *vm; 113 113 114 - if (!is_intel_cpu() || vm_is_unrestricted_guest(NULL)) { 115 - print_skip("Must be run with kvm_intel.unrestricted_guest=0"); 116 - exit(KSFT_SKIP); 117 - } 114 + TEST_REQUIRE(is_intel_cpu()); 115 + TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); 118 116 119 117 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 120 118 get_set_sigalrm_vcpu(vcpu);
+1 -9
tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
··· 116 116 GUEST_DONE(); 117 117 } 118 118 119 - static void tsc_scaling_check_supported(void) 120 - { 121 - if (!kvm_has_cap(KVM_CAP_TSC_CONTROL)) { 122 - print_skip("TSC scaling not supported by the HW"); 123 - exit(KSFT_SKIP); 124 - } 125 - } 126 - 127 119 static void stable_tsc_check_supported(void) 128 120 { 129 121 FILE *fp; ··· 151 159 uint64_t l2_tsc_freq = 0; 152 160 153 161 nested_vmx_check_supported(); 154 - tsc_scaling_check_supported(); 162 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL)); 155 163 stable_tsc_check_supported(); 156 164 157 165 /*
+8 -15
tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
··· 57 57 struct kvm_cpuid2 *cpuid; 58 58 struct kvm_cpuid_entry2 *entry_1_0; 59 59 struct kvm_cpuid_entry2 *entry_a_0; 60 - bool pdcm_supported = false; 61 60 struct kvm_vm *vm; 62 61 struct kvm_vcpu *vcpu; 63 62 int ret; ··· 70 71 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 71 72 cpuid = kvm_get_supported_cpuid(); 72 73 73 - if (kvm_get_cpuid_max_basic() >= 0xa) { 74 - entry_1_0 = kvm_get_supported_cpuid_index(1, 0); 75 - entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0); 76 - pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM); 77 - eax.full = entry_a_0->eax; 78 - } 79 - if (!pdcm_supported) { 80 - print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU"); 81 - exit(KSFT_SKIP); 82 - } 83 - if (!eax.split.version_id) { 84 - print_skip("PMU is not supported by the vCPU"); 85 - exit(KSFT_SKIP); 86 - } 74 + TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xa); 75 + 76 + entry_1_0 = kvm_get_supported_cpuid_index(1, 0); 77 + entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0); 78 + TEST_REQUIRE(entry_1_0->ecx & X86_FEATURE_PDCM); 79 + 80 + eax.full = entry_a_0->eax; 81 + __TEST_REQUIRE(eax.split.version_id, "PMU is not supported by the vCPU"); 87 82 88 83 /* testcase 1, set capabilities when we have PDCM bit */ 89 84 vcpu_set_cpuid(vcpu, cpuid);
+1 -4
tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
··· 169 169 */ 170 170 nested_vmx_check_supported(); 171 171 172 - if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) { 173 - print_skip("KVM_CAP_NESTED_STATE not supported"); 174 - exit(KSFT_SKIP); 175 - } 172 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 176 173 177 174 /* Create VM */ 178 175 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+1 -4
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
··· 267 267 268 268 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS); 269 269 270 - if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) { 271 - print_skip("KVM_CAP_NESTED_STATE not available"); 272 - exit(KSFT_SKIP); 273 - } 270 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 274 271 275 272 /* 276 273 * AMD currently does not implement set_nested_state, so for now we
+1 -4
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
··· 362 362 !strncmp(argv[1], "--verbose", 10)); 363 363 364 364 int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM); 365 - if (!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO) ) { 366 - print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available"); 367 - exit(KSFT_SKIP); 368 - } 365 + TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO); 369 366 370 367 bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE); 371 368 bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
+3 -5
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
··· 80 80 81 81 int main(int argc, char *argv[]) 82 82 { 83 + unsigned int xen_caps; 83 84 struct kvm_vcpu *vcpu; 84 85 struct kvm_vm *vm; 85 86 86 - if (!(kvm_check_cap(KVM_CAP_XEN_HVM) & 87 - KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) { 88 - print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available"); 89 - exit(KSFT_SKIP); 90 - } 87 + xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM); 88 + TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); 91 89 92 90 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 93 91 vcpu_set_hv_cpuid(vcpu);
+4 -9
tools/testing/selftests/kvm/x86_64/xss_msr_test.c
··· 19 19 int main(int argc, char *argv[]) 20 20 { 21 21 struct kvm_cpuid_entry2 *entry; 22 - bool xss_supported = false; 23 22 bool xss_in_msr_list; 24 23 struct kvm_vm *vm; 25 24 struct kvm_vcpu *vcpu; ··· 28 29 /* Create VM */ 29 30 vm = vm_create_with_one_vcpu(&vcpu, NULL); 30 31 31 - if (kvm_get_cpuid_max_basic() >= 0xd) { 32 - entry = kvm_get_supported_cpuid_index(0xd, 1); 33 - xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES); 34 - } 35 - if (!xss_supported) { 36 - print_skip("IA32_XSS is not supported by the vCPU"); 37 - exit(KSFT_SKIP); 38 - } 32 + TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd); 33 + 34 + entry = kvm_get_supported_cpuid_index(0xd, 1); 35 + TEST_REQUIRE(entry->eax & X86_FEATURE_XSAVES); 39 36 40 37 xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS); 41 38 TEST_ASSERT(xss_val == 0,