Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Move vCPU thread creation and joining to common helpers

Move vCPU thread creation and joining to common helper functions. This
is in preparation for the next commit which ensures that all vCPU
threads are fully created before entering guest mode on any one
vCPU.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Message-Id: <20211111001257.1446428-3-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

David Matlack and committed by
Paolo Bonzini
81bcb261 36c5ad73

+67 -90
+6 -34
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 215 215 return true; 216 216 } 217 217 218 - static void *vcpu_thread_main(void *arg) 218 + static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args) 219 219 { 220 - struct perf_test_vcpu_args *vcpu_args = arg; 221 220 struct kvm_vm *vm = perf_test_args.vm; 222 221 int vcpu_id = vcpu_args->vcpu_id; 223 222 int current_iteration = 0; ··· 234 235 235 236 vcpu_last_completed_iteration[vcpu_id] = current_iteration; 236 237 } 237 - 238 - return NULL; 239 238 } 240 239 241 240 static void spin_wait_for_vcpu(int vcpu_id, int target_iteration) ··· 292 295 run_iteration(vm, vcpus, "Mark memory idle"); 293 296 } 294 297 295 - static pthread_t *create_vcpu_threads(int vcpus) 296 - { 297 - pthread_t *vcpu_threads; 298 - int i; 299 - 300 - vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0])); 301 - TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads."); 302 - 303 - for (i = 0; i < vcpus; i++) 304 - pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main, 305 - &perf_test_args.vcpu_args[i]); 306 - 307 - return vcpu_threads; 308 - } 309 - 310 - static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus) 311 - { 312 - int i; 313 - 314 - /* Set done to signal the vCPU threads to exit */ 315 - done = true; 316 - 317 - for (i = 0; i < vcpus; i++) 318 - pthread_join(vcpu_threads[i], NULL); 319 - } 320 - 321 298 static void run_test(enum vm_guest_mode mode, void *arg) 322 299 { 323 300 struct test_params *params = arg; 324 301 struct kvm_vm *vm; 325 - pthread_t *vcpu_threads; 326 302 int vcpus = params->vcpus; 327 303 328 304 vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1, 329 305 params->backing_src, !overlap_memory_access); 330 306 331 - vcpu_threads = create_vcpu_threads(vcpus); 307 + perf_test_start_vcpu_threads(vcpus, vcpu_thread_main); 332 308 333 309 pr_info("\n"); 334 310 access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory"); ··· 316 346 mark_memory_idle(vm, vcpus); 317 347 access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory"); 318 348 319 - terminate_vcpu_threads(vcpu_threads, vcpus); 320 - free(vcpu_threads); 349 + /* Set done to signal the vCPU threads to exit */ 350 + done = true; 351 + 352 + perf_test_join_vcpu_threads(vcpus); 321 353 perf_test_destroy_vm(vm); 322 354 } 323 355
+3 -22
tools/testing/selftests/kvm/demand_paging_test.c
··· 42 42 static size_t demand_paging_size; 43 43 static char *guest_data_prototype; 44 44 45 - static void *vcpu_worker(void *data) 45 + static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 46 46 { 47 47 int ret; 48 - struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data; 49 48 int vcpu_id = vcpu_args->vcpu_id; 50 49 struct kvm_vm *vm = perf_test_args.vm; 51 50 struct kvm_run *run; ··· 67 68 ts_diff = timespec_elapsed(start); 68 69 PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id, 69 70 ts_diff.tv_sec, ts_diff.tv_nsec); 70 - 71 - return NULL; 72 71 } 73 72 74 73 static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr) ··· 279 282 static void run_test(enum vm_guest_mode mode, void *arg) 280 283 { 281 284 struct test_params *p = arg; 282 - pthread_t *vcpu_threads; 283 285 pthread_t *uffd_handler_threads = NULL; 284 286 struct uffd_handler_args *uffd_args = NULL; 285 287 struct timespec start; ··· 297 301 TEST_ASSERT(guest_data_prototype, 298 302 "Failed to allocate buffer for guest data pattern"); 299 303 memset(guest_data_prototype, 0xAB, demand_paging_size); 300 - 301 - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); 302 - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); 303 304 304 305 if (p->uffd_mode) { 305 306 uffd_handler_threads = ··· 339 346 pr_info("Finished creating vCPUs and starting uffd threads\n"); 340 347 341 348 clock_gettime(CLOCK_MONOTONIC, &start); 342 - 343 - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { 344 - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, 345 - &perf_test_args.vcpu_args[vcpu_id]); 346 - } 347 - 349 + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); 348 350 pr_info("Started all vCPUs\n"); 349 351 350 - /* Wait for the vcpu threads to quit */ 351 - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { 352 - pthread_join(vcpu_threads[vcpu_id], NULL); 353 - PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id); 354 - } 355 - 352 + perf_test_join_vcpu_threads(nr_vcpus); 356 353 ts_diff = timespec_elapsed(start); 357 - 358 354 pr_info("All vCPU threads joined\n"); 359 355 360 356 if (p->uffd_mode) { ··· 367 385 perf_test_destroy_vm(vm); 368 386 369 387 free(guest_data_prototype); 370 - free(vcpu_threads); 371 388 if (p->uffd_mode) { 372 389 free(uffd_handler_threads); 373 390 free(uffd_args);
+4 -15
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 31 31 static int iteration; 32 32 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 33 33 34 - static void *vcpu_worker(void *data) 34 + static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 35 35 { 36 36 int ret; 37 37 struct kvm_vm *vm = perf_test_args.vm; ··· 41 41 struct timespec ts_diff; 42 42 struct timespec total = (struct timespec){0}; 43 43 struct timespec avg; 44 - struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data; 45 44 int vcpu_id = vcpu_args->vcpu_id; 46 45 47 46 run = vcpu_state(vm, vcpu_id); ··· 82 83 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 83 84 vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id], 84 85 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec); 85 - 86 - return NULL; 87 86 } 88 87 89 88 struct test_params { ··· 167 170 static void run_test(enum vm_guest_mode mode, void *arg) 168 171 { 169 172 struct test_params *p = arg; 170 - pthread_t *vcpu_threads; 171 173 struct kvm_vm *vm; 172 174 unsigned long **bitmaps; 173 175 uint64_t guest_num_pages; ··· 200 204 vm_enable_cap(vm, &cap); 201 205 } 202 206 203 - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); 204 - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); 205 - 206 207 /* Start the iterations */ 207 208 iteration = 0; 208 209 host_quit = false; 209 210 210 211 clock_gettime(CLOCK_MONOTONIC, &start); 211 - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { 212 + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) 212 213 vcpu_last_completed_iteration[vcpu_id] = -1; 213 214 214 - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, 215 - &perf_test_args.vcpu_args[vcpu_id]); 216 - } 215 + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); 217 216 218 217 /* Allow the vCPUs to populate memory */ 219 218 pr_debug("Starting iteration %d - Populating\n", iteration); ··· 277 286 278 287 /* Tell the vcpu thread to quit */ 279 288 host_quit = true; 280 - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) 281 - pthread_join(vcpu_threads[vcpu_id], NULL); 289 + perf_test_join_vcpu_threads(nr_vcpus); 282 290 283 291 avg = timespec_div(get_dirty_log_total, p->iterations); 284 292 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", ··· 292 302 } 293 303 294 304 free_bitmaps(bitmaps, p->slots); 295 - free(vcpu_threads); 296 305 perf_test_destroy_vm(vm); 297 306 } 298 307
+5
tools/testing/selftests/kvm/include/perf_test_util.h
··· 8 8 #ifndef SELFTEST_KVM_PERF_TEST_UTIL_H 9 9 #define SELFTEST_KVM_PERF_TEST_UTIL_H 10 10 11 + #include <pthread.h> 12 + 11 13 #include "kvm_util.h" 12 14 13 15 /* Default guest test virtual memory offset */ ··· 46 44 void perf_test_destroy_vm(struct kvm_vm *vm); 47 45 48 46 void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract); 47 + 48 + void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *)); 49 + void perf_test_join_vcpu_threads(int vcpus); 49 50 50 51 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
+46
tools/testing/selftests/kvm/lib/perf_test_util.c
··· 16 16 */ 17 17 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 18 18 19 + struct vcpu_thread { 20 + /* The id of the vCPU. */ 21 + int vcpu_id; 22 + 23 + /* The pthread backing the vCPU. */ 24 + pthread_t thread; 25 + }; 26 + 27 + /* The vCPU threads involved in this test. */ 28 + static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS]; 29 + 30 + /* The function run by each vCPU thread, as provided by the test. */ 31 + static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *); 32 + 19 33 /* 20 34 * Continuously write to the first 8 bytes of each page in the 21 35 * specified region. ··· 190 176 { 191 177 perf_test_args.wr_fract = wr_fract; 192 178 sync_global_to_guest(vm, perf_test_args); 179 + } 180 + 181 + static void *vcpu_thread_main(void *data) 182 + { 183 + struct vcpu_thread *vcpu = data; 184 + 185 + vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]); 186 + 187 + return NULL; 188 + } 189 + 190 + void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *)) 191 + { 192 + int vcpu_id; 193 + 194 + vcpu_thread_fn = vcpu_fn; 195 + 196 + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { 197 + struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id]; 198 + 199 + vcpu->vcpu_id = vcpu_id; 200 + 201 + pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu); 202 + } 203 + } 204 + 205 + void perf_test_join_vcpu_threads(int vcpus) 206 + { 207 + int vcpu_id; 208 + 209 + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) 210 + pthread_join(vcpu_threads[vcpu_id].thread, NULL); 193 211 }
+3 -19
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 36 36 37 37 static bool run_vcpus = true; 38 38 39 - static void *vcpu_worker(void *data) 39 + static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 40 40 { 41 41 int ret; 42 - struct perf_test_vcpu_args *vcpu_args = 43 - (struct perf_test_vcpu_args *)data; 44 42 int vcpu_id = vcpu_args->vcpu_id; 45 43 struct kvm_vm *vm = perf_test_args.vm; 46 44 struct kvm_run *run; ··· 57 59 "Invalid guest sync status: exit_reason=%s\n", 58 60 exit_reason_str(run->exit_reason)); 59 61 } 60 - 61 - return NULL; 62 62 } 63 63 64 64 struct memslot_antagonist_args { ··· 96 100 static void run_test(enum vm_guest_mode mode, void *arg) 97 101 { 98 102 struct test_params *p = arg; 99 - pthread_t *vcpu_threads; 100 103 struct kvm_vm *vm; 101 - int vcpu_id; 102 104 103 105 vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, 104 106 VM_MEM_SRC_ANONYMOUS, 105 107 p->partition_vcpu_memory_access); 106 108 107 - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); 108 - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); 109 - 110 109 pr_info("Finished creating vCPUs\n"); 111 110 112 - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) 113 - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, 114 - &perf_test_args.vcpu_args[vcpu_id]); 111 + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); 115 112 116 113 pr_info("Started all vCPUs\n"); 117 114 ··· 113 124 114 125 run_vcpus = false; 115 126 116 - /* Wait for the vcpu threads to quit */ 117 - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) 118 - pthread_join(vcpu_threads[vcpu_id], NULL); 119 - 127 + perf_test_join_vcpu_threads(nr_vcpus); 120 128 pr_info("All vCPU threads joined\n"); 121 129 122 130 ucall_uninit(vm); 123 131 kvm_vm_free(vm); 124 - 125 - free(vcpu_threads); 126 132 } 127 133 128 134 static void help(char *name)