Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: selftests: Split arch_timer test code

Split the arch-neutral test code out of aarch64/arch_timer.c
and put them into a common arch_timer.c. This is a preparation
to share timer test codes in riscv.

Suggested-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>

authored by

Haibo Xu and committed by
Anup Patel
c20dd9e0 d1dafd06

+311 -280
+2 -1
tools/testing/selftests/kvm/Makefile
··· 143 143 TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test 144 144 145 145 TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs 146 - TEST_GEN_PROGS_aarch64 += aarch64/arch_timer 147 146 TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions 148 147 TEST_GEN_PROGS_aarch64 += aarch64/hypercalls 149 148 TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test ··· 154 155 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq 155 156 TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access 156 157 TEST_GEN_PROGS_aarch64 += access_tracking_perf_test 158 + TEST_GEN_PROGS_aarch64 += arch_timer 157 159 TEST_GEN_PROGS_aarch64 += demand_paging_test 158 160 TEST_GEN_PROGS_aarch64 += dirty_log_test 159 161 TEST_GEN_PROGS_aarch64 += dirty_log_perf_test ··· 194 194 TEST_GEN_PROGS_riscv += set_memory_region_test 195 195 TEST_GEN_PROGS_riscv += steal_time 196 196 197 + SPLIT_TESTS += arch_timer 197 198 SPLIT_TESTS += get-reg-list 198 199 199 200 TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
+6 -279
tools/testing/selftests/kvm/aarch64/arch_timer.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * arch_timer.c - Tests the aarch64 timer IRQ functionality 4 - * 5 3 * The test validates both the virtual and physical timer IRQs using 6 - * CVAL and TVAL registers. This consitutes the four stages in the test. 7 - * The guest's main thread configures the timer interrupt for a stage 8 - * and waits for it to fire, with a timeout equal to the timer period. 9 - * It asserts that the timeout doesn't exceed the timer period plus 10 - * a user configurable error margin(default to 100us). 11 - * 12 - * On the other hand, upon receipt of an interrupt, the guest's interrupt 13 - * handler validates the interrupt by checking if the architectural state 14 - * is in compliance with the specifications. 15 - * 16 - * The test provides command-line options to configure the timer's 17 - * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer 18 - * interrupt arrival error margin (-e). To stress-test the timer stack 19 - * even more, an option to migrate the vCPUs across pCPUs (-m), at a 20 - * particular rate, is also provided. 4 + * CVAL and TVAL registers. 21 5 * 22 6 * Copyright (c) 2021, Google LLC. 23 7 */ 24 8 #define _GNU_SOURCE 25 9 26 - #include <stdlib.h> 27 - #include <pthread.h> 28 - #include <linux/kvm.h> 29 - #include <linux/sizes.h> 30 - #include <linux/bitmap.h> 31 - #include <sys/sysinfo.h> 32 - 33 - #include "kvm_util.h" 34 - #include "processor.h" 35 - #include "delay.h" 36 10 #include "arch_timer.h" 11 + #include "delay.h" 37 12 #include "gic.h" 13 + #include "processor.h" 14 + #include "timer_test.h" 38 15 #include "vgic.h" 39 - 40 - #define NR_VCPUS_DEF 4 41 - #define NR_TEST_ITERS_DEF 5 42 - #define TIMER_TEST_PERIOD_MS_DEF 10 43 - #define TIMER_TEST_ERR_MARGIN_US 100 44 - #define TIMER_TEST_MIGRATION_FREQ_MS 2 45 - 46 - struct test_args { 47 - uint32_t nr_vcpus; 48 - uint32_t nr_iter; 49 - uint32_t timer_period_ms; 50 - uint32_t migration_freq_ms; 51 - uint32_t timer_err_margin_us; 52 - struct kvm_arm_counter_offset offset; 53 - }; 54 - 55 - static struct test_args test_args = { 56 - .nr_vcpus = NR_VCPUS_DEF, 57 - .nr_iter = NR_TEST_ITERS_DEF, 58 - .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, 59 - .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, 60 - .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, 61 - .offset = { .reserved = 1 }, 62 - }; 63 - 64 - #define msecs_to_usecs(msec) ((msec) * 1000ULL) 65 16 66 17 #define GICD_BASE_GPA 0x8000000ULL 67 18 #define GICR_BASE_GPA 0x80A0000ULL ··· 25 74 GUEST_STAGE_MAX, 26 75 }; 27 76 28 - /* Shared variables between host and guest */ 29 - struct test_vcpu_shared_data { 30 - uint32_t nr_iter; 31 - enum guest_stage guest_stage; 32 - uint64_t xcnt; 33 - }; 34 - 35 - static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 36 - static pthread_t pt_vcpu_run[KVM_MAX_VCPUS]; 37 - static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; 38 - 39 77 static int vtimer_irq, ptimer_irq; 40 - 41 - static unsigned long *vcpu_done_map; 42 - static pthread_mutex_t vcpu_done_map_lock; 43 78 44 79 static void 45 80 guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) ··· 167 230 GUEST_DONE(); 168 231 } 169 232 170 - static void *test_vcpu_run(void *arg) 171 - { 172 - unsigned int vcpu_idx = (unsigned long)arg; 173 - struct ucall uc; 174 - struct kvm_vcpu *vcpu = vcpus[vcpu_idx]; 175 - struct kvm_vm *vm = vcpu->vm; 176 - struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx]; 177 - 178 - vcpu_run(vcpu); 179 - 180 - /* Currently, any exit from guest is an indication of completion */ 181 - pthread_mutex_lock(&vcpu_done_map_lock); 182 - __set_bit(vcpu_idx, vcpu_done_map); 183 - pthread_mutex_unlock(&vcpu_done_map_lock); 184 - 185 - switch (get_ucall(vcpu, &uc)) { 186 - case UCALL_SYNC: 187 - case UCALL_DONE: 188 - break; 189 - case UCALL_ABORT: 190 - sync_global_from_guest(vm, *shared_data); 191 - fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n", 192 - vcpu_idx, shared_data->guest_stage, shared_data->nr_iter); 193 - REPORT_GUEST_ASSERT(uc); 194 - break; 195 - default: 196 - TEST_FAIL("Unexpected guest exit"); 197 - } 198 - 199 - return NULL; 200 - } 201 - 202 - static uint32_t test_get_pcpu(void) 203 - { 204 - uint32_t pcpu; 205 - unsigned int nproc_conf; 206 - cpu_set_t online_cpuset; 207 - 208 - nproc_conf = get_nprocs_conf(); 209 - sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset); 210 - 211 - /* Randomly find an available pCPU to place a vCPU on */ 212 - do { 213 - pcpu = rand() % nproc_conf; 214 - } while (!CPU_ISSET(pcpu, &online_cpuset)); 215 - 216 - return pcpu; 217 - } 218 - 219 - static int test_migrate_vcpu(unsigned int vcpu_idx) 220 - { 221 - int ret; 222 - cpu_set_t cpuset; 223 - uint32_t new_pcpu = test_get_pcpu(); 224 - 225 - CPU_ZERO(&cpuset); 226 - CPU_SET(new_pcpu, &cpuset); 227 - 228 - pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); 229 - 230 - ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx], 231 - sizeof(cpuset), &cpuset); 232 - 233 - /* Allow the error where the vCPU thread is already finished */ 234 - TEST_ASSERT(ret == 0 || ret == ESRCH, 235 - "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d", 236 - vcpu_idx, new_pcpu, ret); 237 - 238 - return ret; 239 - } 240 - 241 - static void *test_vcpu_migration(void *arg) 242 - { 243 - unsigned int i, n_done; 244 - bool vcpu_done; 245 - 246 - do { 247 - usleep(msecs_to_usecs(test_args.migration_freq_ms)); 248 - 249 - for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) { 250 - pthread_mutex_lock(&vcpu_done_map_lock); 251 - vcpu_done = test_bit(i, vcpu_done_map); 252 - pthread_mutex_unlock(&vcpu_done_map_lock); 253 - 254 - if (vcpu_done) { 255 - n_done++; 256 - continue; 257 - } 258 - 259 - test_migrate_vcpu(i); 260 - } 261 - } while (test_args.nr_vcpus != n_done); 262 - 263 - return NULL; 264 - } 265 - 266 - static void test_run(struct kvm_vm *vm) 267 - { 268 - pthread_t pt_vcpu_migration; 269 - unsigned int i; 270 - int ret; 271 - 272 - pthread_mutex_init(&vcpu_done_map_lock, NULL); 273 - vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus); 274 - TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap"); 275 - 276 - for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) { 277 - ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run, 278 - (void *)(unsigned long)i); 279 - TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i); 280 - } 281 - 282 - /* Spawn a thread to control the vCPU migrations */ 283 - if (test_args.migration_freq_ms) { 284 - srand(time(NULL)); 285 - 286 - ret = pthread_create(&pt_vcpu_migration, NULL, 287 - test_vcpu_migration, NULL); 288 - TEST_ASSERT(!ret, "Failed to create the migration pthread"); 289 - } 290 - 291 - 292 - for (i = 0; i < test_args.nr_vcpus; i++) 293 - pthread_join(pt_vcpu_run[i], NULL); 294 - 295 - if (test_args.migration_freq_ms) 296 - pthread_join(pt_vcpu_migration, NULL); 297 - 298 - bitmap_free(vcpu_done_map); 299 - } 300 - 301 233 static void test_init_timer_irq(struct kvm_vm *vm) 302 234 { 303 235 /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ ··· 183 377 184 378 static int gic_fd; 185 379 186 - static struct kvm_vm *test_vm_create(void) 380 + struct kvm_vm *test_vm_create(void) 187 381 { 188 382 struct kvm_vm *vm; 189 383 unsigned int i; ··· 214 408 return vm; 215 409 } 216 410 217 - static void test_vm_cleanup(struct kvm_vm *vm) 411 + void test_vm_cleanup(struct kvm_vm *vm) 218 412 { 219 413 close(gic_fd); 220 414 kvm_vm_free(vm); 221 - } 222 - 223 - static void test_print_help(char *name) 224 - { 225 - pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n" 226 - "\t\t [-m migration_freq_ms] [-o counter_offset]\n" 227 - "\t\t [-e timer_err_margin_us]\n", name); 228 - pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", 229 - NR_VCPUS_DEF, KVM_MAX_VCPUS); 230 - pr_info("\t-i: Number of iterations per stage (default: %u)\n", 231 - NR_TEST_ITERS_DEF); 232 - pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n", 233 - TIMER_TEST_PERIOD_MS_DEF); 234 - pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", 235 - TIMER_TEST_MIGRATION_FREQ_MS); 236 - pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); 237 - pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", 238 - TIMER_TEST_ERR_MARGIN_US); 239 - pr_info("\t-h: print this help screen\n"); 240 - } 241 - 242 - static bool parse_args(int argc, char *argv[]) 243 - { 244 - int opt; 245 - 246 - while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) { 247 - switch (opt) { 248 - case 'n': 249 - test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); 250 - if (test_args.nr_vcpus > KVM_MAX_VCPUS) { 251 - pr_info("Max allowed vCPUs: %u\n", 252 - KVM_MAX_VCPUS); 253 - goto err; 254 - } 255 - break; 256 - case 'i': 257 - test_args.nr_iter = atoi_positive("Number of iterations", optarg); 258 - break; 259 - case 'p': 260 - test_args.timer_period_ms = atoi_positive("Periodicity", optarg); 261 - break; 262 - case 'm': 263 - test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg); 264 - break; 265 - case 'e': 266 - test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); 267 - break; 268 - case 'o': 269 - test_args.offset.counter_offset = strtol(optarg, NULL, 0); 270 - test_args.offset.reserved = 0; 271 - break; 272 - case 'h': 273 - default: 274 - goto err; 275 - } 276 - } 277 - 278 - return true; 279 - 280 - err: 281 - test_print_help(argv[0]); 282 - return false; 283 - } 284 - 285 - int main(int argc, char *argv[]) 286 - { 287 - struct kvm_vm *vm; 288 - 289 - if (!parse_args(argc, argv)) 290 - exit(KSFT_SKIP); 291 - 292 - __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2, 293 - "At least two physical CPUs needed for vCPU migration"); 294 - 295 - vm = test_vm_create(); 296 - test_run(vm); 297 - test_vm_cleanup(vm); 298 - 299 - return 0; 300 415 }
+257
tools/testing/selftests/kvm/arch_timer.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * arch_timer.c - Tests the arch timer IRQ functionality 4 + * 5 + * The guest's main thread configures the timer interrupt and waits 6 + * for it to fire, with a timeout equal to the timer period. 7 + * It asserts that the timeout doesn't exceed the timer period plus 8 + * a user configurable error margin(default to 100us) 9 + * 10 + * On the other hand, upon receipt of an interrupt, the guest's interrupt 11 + * handler validates the interrupt by checking if the architectural state 12 + * is in compliance with the specifications. 13 + * 14 + * The test provides command-line options to configure the timer's 15 + * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer 16 + * interrupt arrival error margin (-e). To stress-test the timer stack 17 + * even more, an option to migrate the vCPUs across pCPUs (-m), at a 18 + * particular rate, is also provided. 19 + * 20 + * Copyright (c) 2021, Google LLC. 21 + */ 22 + 23 + #define _GNU_SOURCE 24 + 25 + #include <stdlib.h> 26 + #include <pthread.h> 27 + #include <linux/sizes.h> 28 + #include <linux/bitmap.h> 29 + #include <sys/sysinfo.h> 30 + 31 + #include "timer_test.h" 32 + 33 + struct test_args test_args = { 34 + .nr_vcpus = NR_VCPUS_DEF, 35 + .nr_iter = NR_TEST_ITERS_DEF, 36 + .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, 37 + .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, 38 + .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, 39 + .offset = { .reserved = 1 }, 40 + }; 41 + 42 + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 43 + struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; 44 + 45 + static pthread_t pt_vcpu_run[KVM_MAX_VCPUS]; 46 + static unsigned long *vcpu_done_map; 47 + static pthread_mutex_t vcpu_done_map_lock; 48 + 49 + static void *test_vcpu_run(void *arg) 50 + { 51 + unsigned int vcpu_idx = (unsigned long)arg; 52 + struct ucall uc; 53 + struct kvm_vcpu *vcpu = vcpus[vcpu_idx]; 54 + struct kvm_vm *vm = vcpu->vm; 55 + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx]; 56 + 57 + vcpu_run(vcpu); 58 + 59 + /* Currently, any exit from guest is an indication of completion */ 60 + pthread_mutex_lock(&vcpu_done_map_lock); 61 + __set_bit(vcpu_idx, vcpu_done_map); 62 + pthread_mutex_unlock(&vcpu_done_map_lock); 63 + 64 + switch (get_ucall(vcpu, &uc)) { 65 + case UCALL_SYNC: 66 + case UCALL_DONE: 67 + break; 68 + case UCALL_ABORT: 69 + sync_global_from_guest(vm, *shared_data); 70 + fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n", 71 + vcpu_idx, shared_data->guest_stage, shared_data->nr_iter); 72 + REPORT_GUEST_ASSERT(uc); 73 + break; 74 + default: 75 + TEST_FAIL("Unexpected guest exit"); 76 + } 77 + 78 + return NULL; 79 + } 80 + 81 + static uint32_t test_get_pcpu(void) 82 + { 83 + uint32_t pcpu; 84 + unsigned int nproc_conf; 85 + cpu_set_t online_cpuset; 86 + 87 + nproc_conf = get_nprocs_conf(); 88 + sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset); 89 + 90 + /* Randomly find an available pCPU to place a vCPU on */ 91 + do { 92 + pcpu = rand() % nproc_conf; 93 + } while (!CPU_ISSET(pcpu, &online_cpuset)); 94 + 95 + return pcpu; 96 + } 97 + 98 + static int test_migrate_vcpu(unsigned int vcpu_idx) 99 + { 100 + int ret; 101 + cpu_set_t cpuset; 102 + uint32_t new_pcpu = test_get_pcpu(); 103 + 104 + CPU_ZERO(&cpuset); 105 + CPU_SET(new_pcpu, &cpuset); 106 + 107 + pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); 108 + 109 + ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx], 110 + sizeof(cpuset), &cpuset); 111 + 112 + /* Allow the error where the vCPU thread is already finished */ 113 + TEST_ASSERT(ret == 0 || ret == ESRCH, 114 + "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d", 115 + vcpu_idx, new_pcpu, ret); 116 + 117 + return ret; 118 + } 119 + 120 + static void *test_vcpu_migration(void *arg) 121 + { 122 + unsigned int i, n_done; 123 + bool vcpu_done; 124 + 125 + do { 126 + usleep(msecs_to_usecs(test_args.migration_freq_ms)); 127 + 128 + for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) { 129 + pthread_mutex_lock(&vcpu_done_map_lock); 130 + vcpu_done = test_bit(i, vcpu_done_map); 131 + pthread_mutex_unlock(&vcpu_done_map_lock); 132 + 133 + if (vcpu_done) { 134 + n_done++; 135 + continue; 136 + } 137 + 138 + test_migrate_vcpu(i); 139 + } 140 + } while (test_args.nr_vcpus != n_done); 141 + 142 + return NULL; 143 + } 144 + 145 + static void test_run(struct kvm_vm *vm) 146 + { 147 + pthread_t pt_vcpu_migration; 148 + unsigned int i; 149 + int ret; 150 + 151 + pthread_mutex_init(&vcpu_done_map_lock, NULL); 152 + vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus); 153 + TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap"); 154 + 155 + for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) { 156 + ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run, 157 + (void *)(unsigned long)i); 158 + TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i); 159 + } 160 + 161 + /* Spawn a thread to control the vCPU migrations */ 162 + if (test_args.migration_freq_ms) { 163 + srand(time(NULL)); 164 + 165 + ret = pthread_create(&pt_vcpu_migration, NULL, 166 + test_vcpu_migration, NULL); 167 + TEST_ASSERT(!ret, "Failed to create the migration pthread"); 168 + } 169 + 170 + 171 + for (i = 0; i < test_args.nr_vcpus; i++) 172 + pthread_join(pt_vcpu_run[i], NULL); 173 + 174 + if (test_args.migration_freq_ms) 175 + pthread_join(pt_vcpu_migration, NULL); 176 + 177 + bitmap_free(vcpu_done_map); 178 + } 179 + 180 + static void test_print_help(char *name) 181 + { 182 + pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n" 183 + "\t\t [-m migration_freq_ms] [-o counter_offset]\n" 184 + "\t\t [-e timer_err_margin_us]\n", name); 185 + pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", 186 + NR_VCPUS_DEF, KVM_MAX_VCPUS); 187 + pr_info("\t-i: Number of iterations per stage (default: %u)\n", 188 + NR_TEST_ITERS_DEF); 189 + pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n", 190 + TIMER_TEST_PERIOD_MS_DEF); 191 + pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", 192 + TIMER_TEST_MIGRATION_FREQ_MS); 193 + pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); 194 + pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", 195 + TIMER_TEST_ERR_MARGIN_US); 196 + pr_info("\t-h: print this help screen\n"); 197 + } 198 + 199 + static bool parse_args(int argc, char *argv[]) 200 + { 201 + int opt; 202 + 203 + while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) { 204 + switch (opt) { 205 + case 'n': 206 + test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); 207 + if (test_args.nr_vcpus > KVM_MAX_VCPUS) { 208 + pr_info("Max allowed vCPUs: %u\n", 209 + KVM_MAX_VCPUS); 210 + goto err; 211 + } 212 + break; 213 + case 'i': 214 + test_args.nr_iter = atoi_positive("Number of iterations", optarg); 215 + break; 216 + case 'p': 217 + test_args.timer_period_ms = atoi_positive("Periodicity", optarg); 218 + break; 219 + case 'm': 220 + test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg); 221 + break; 222 + case 'e': 223 + test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); 224 + break; 225 + case 'o': 226 + test_args.offset.counter_offset = strtol(optarg, NULL, 0); 227 + test_args.offset.reserved = 0; 228 + break; 229 + case 'h': 230 + default: 231 + goto err; 232 + } 233 + } 234 + 235 + return true; 236 + 237 + err: 238 + test_print_help(argv[0]); 239 + return false; 240 + } 241 + 242 + int main(int argc, char *argv[]) 243 + { 244 + struct kvm_vm *vm; 245 + 246 + if (!parse_args(argc, argv)) 247 + exit(KSFT_SKIP); 248 + 249 + __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2, 250 + "At least two physical CPUs needed for vCPU migration"); 251 + 252 + vm = test_vm_create(); 253 + test_run(vm); 254 + test_vm_cleanup(vm); 255 + 256 + return 0; 257 + }
+2
tools/testing/selftests/kvm/include/test_util.h
··· 20 20 #include <sys/mman.h> 21 21 #include "kselftest.h" 22 22 23 + #define msecs_to_usecs(msec) ((msec) * 1000ULL) 24 + 23 25 static inline int _no_printf(const char *format, ...) { return 0; } 24 26 25 27 #ifdef DEBUG
+44
tools/testing/selftests/kvm/include/timer_test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * timer test specific header 4 + * 5 + * Copyright (C) 2018, Google LLC 6 + */ 7 + 8 + #ifndef SELFTEST_KVM_TIMER_TEST_H 9 + #define SELFTEST_KVM_TIMER_TEST_H 10 + 11 + #include "kvm_util.h" 12 + 13 + #define NR_VCPUS_DEF 4 14 + #define NR_TEST_ITERS_DEF 5 15 + #define TIMER_TEST_PERIOD_MS_DEF 10 16 + #define TIMER_TEST_ERR_MARGIN_US 100 17 + #define TIMER_TEST_MIGRATION_FREQ_MS 2 18 + 19 + /* Timer test cmdline parameters */ 20 + struct test_args { 21 + uint32_t nr_vcpus; 22 + uint32_t nr_iter; 23 + uint32_t timer_period_ms; 24 + uint32_t migration_freq_ms; 25 + uint32_t timer_err_margin_us; 26 + /* TODO: Change arm specific type to a common one */ 27 + struct kvm_arm_counter_offset offset; 28 + }; 29 + 30 + /* Shared variables between host and guest */ 31 + struct test_vcpu_shared_data { 32 + uint32_t nr_iter; 33 + int guest_stage; 34 + uint64_t xcnt; 35 + }; 36 + 37 + extern struct test_args test_args; 38 + extern struct kvm_vcpu *vcpus[]; 39 + extern struct test_vcpu_shared_data vcpu_shared_data[]; 40 + 41 + struct kvm_vm *test_vm_create(void); 42 + void test_vm_cleanup(struct kvm_vm *vm); 43 + 44 + #endif /* SELFTEST_KVM_TIMER_TEST_H */