Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Add option to overlap vCPU memory access

Add an option to overlap the ranges of memory each vCPU accesses instead
of partitioning them. This option will increase the probability of
multiple vCPUs faulting on the same page at the same time, and causing
interesting races, if there are bugs in the page fault handler or
elsewhere in the kernel.

Reviewed-by: Jacob Xu <jacobhxu@google.com>
Reviewed-by: Makarand Sonare <makarandsonare@google.com>

Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210112214253.463999-6-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Ben Gardon and committed by
Paolo Bonzini
82f91337 86753bd0

+57 -18
+25 -7
tools/testing/selftests/kvm/demand_paging_test.c
··· 250 250 struct test_params { 251 251 bool use_uffd; 252 252 useconds_t uffd_delay; 253 + bool partition_vcpu_memory_access; 253 254 }; 254 255 255 256 static void run_test(enum vm_guest_mode mode, void *arg) ··· 278 277 vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); 279 278 TEST_ASSERT(vcpu_threads, "Memory allocation failed"); 280 279 281 - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size); 280 + perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, 281 + p->partition_vcpu_memory_access); 282 282 283 283 if (p->use_uffd) { 284 284 uffd_handler_threads = ··· 295 293 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { 296 294 vm_paddr_t vcpu_gpa; 297 295 void *vcpu_hva; 296 + uint64_t vcpu_mem_size; 298 297 299 - vcpu_gpa = guest_test_phys_mem + (vcpu_id * guest_percpu_mem_size); 298 + 299 + if (p->partition_vcpu_memory_access) { 300 + vcpu_gpa = guest_test_phys_mem + 301 + (vcpu_id * guest_percpu_mem_size); 302 + vcpu_mem_size = guest_percpu_mem_size; 303 + } else { 304 + vcpu_gpa = guest_test_phys_mem; 305 + vcpu_mem_size = guest_percpu_mem_size * nr_vcpus; 306 + } 300 307 PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", 301 - vcpu_id, vcpu_gpa, vcpu_gpa + guest_percpu_mem_size); 308 + vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size); 302 309 303 310 /* Cache the HVA pointer of the region */ 304 311 vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); ··· 324 313 &uffd_handler_threads[vcpu_id], 325 314 pipefds[vcpu_id * 2], 326 315 p->uffd_delay, &uffd_args[vcpu_id], 327 - vcpu_hva, guest_percpu_mem_size); 316 + vcpu_hva, vcpu_mem_size); 328 317 if (r < 0) 329 318 exit(-r); 330 319 } ··· 387 376 { 388 377 puts(""); 389 378 printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n" 390 - " [-b memory] [-v vcpus]\n", name); 379 + " [-b memory] [-v vcpus] [-o]\n", name); 391 380 guest_modes_help(); 392 381 printf(" -u: use User Fault FD to handle vCPU page\n" 393 382 " faults.\n"); ··· 398 387 " demand paged by each vCPU. e.g. 10M or 3G.\n" 399 388 " Default: 1G\n"); 400 389 printf(" -v: specify the number of vCPUs to run.\n"); 390 + printf(" -o: Overlap guest memory accesses instead of partitioning\n" 391 + " them into a separate region of memory for each vCPU.\n"); 401 392 puts(""); 402 393 exit(0); 403 394 } ··· 407 394 int main(int argc, char *argv[]) 408 395 { 409 396 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 410 - struct test_params p = {}; 397 + struct test_params p = { 398 + .partition_vcpu_memory_access = true, 399 + }; 411 400 int opt; 412 401 413 402 guest_modes_append_default(); 414 403 415 - while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) { 404 + while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) { 416 405 switch (opt) { 417 406 case 'm': 418 407 guest_modes_cmdline(optarg); ··· 433 418 nr_vcpus = atoi(optarg); 434 419 TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, 435 420 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); 421 + break; 422 + case 'o': 423 + p.partition_vcpu_memory_access = false; 436 424 break; 437 425 case 'h': 438 426 default:
+11 -3
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 92 92 unsigned long iterations; 93 93 uint64_t phys_offset; 94 94 int wr_fract; 95 + bool partition_vcpu_memory_access; 95 96 }; 96 97 97 98 static void run_test(enum vm_guest_mode mode, void *arg) ··· 130 129 vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); 131 130 TEST_ASSERT(vcpu_threads, "Memory allocation failed"); 132 131 133 - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size); 132 + perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, 133 + p->partition_vcpu_memory_access); 134 134 135 135 sync_global_to_guest(vm, perf_test_args); 136 136 ··· 242 240 { 243 241 puts(""); 244 242 printf("usage: %s [-h] [-i iterations] [-p offset] " 245 - "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name); 243 + "[-m mode] [-b vcpu bytes] [-v vcpus] [-o]\n", name); 246 244 puts(""); 247 245 printf(" -i: specify iteration counts (default: %"PRIu64")\n", 248 246 TEST_HOST_LOOP_N); ··· 257 255 " 1/<fraction of pages to write>.\n" 258 256 " (default: 1 i.e. all pages are written to.)\n"); 259 257 printf(" -v: specify the number of vCPUs to run.\n"); 258 + printf(" -o: Overlap guest memory accesses instead of partitioning\n" 259 + " them into a separate region of memory for each vCPU.\n"); 260 260 puts(""); 261 261 exit(0); 262 262 } ··· 269 265 struct test_params p = { 270 266 .iterations = TEST_HOST_LOOP_N, 271 267 .wr_fract = 1, 268 + .partition_vcpu_memory_access = true, 272 269 }; 273 270 int opt; 274 271 ··· 280 275 281 276 guest_modes_append_default(); 282 277 283 - while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) { 278 + while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:o")) != -1) { 284 279 switch (opt) { 285 280 case 'i': 286 281 p.iterations = atoi(optarg); ··· 303 298 nr_vcpus = atoi(optarg); 304 299 TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, 305 300 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); 301 + break; 302 + case 'o': 303 + p.partition_vcpu_memory_access = false; 306 304 break; 307 305 case 'h': 308 306 default:
+3 -1
tools/testing/selftests/kvm/include/perf_test_util.h
··· 46 46 struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, 47 47 uint64_t vcpu_memory_bytes); 48 48 void perf_test_destroy_vm(struct kvm_vm *vm); 49 - void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes); 49 + void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, 50 + uint64_t vcpu_memory_bytes, 51 + bool partition_vcpu_memory_access); 50 52 51 53 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
+18 -7
tools/testing/selftests/kvm/lib/perf_test_util.c
··· 112 112 kvm_vm_free(vm); 113 113 } 114 114 115 - void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes) 115 + void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, 116 + uint64_t vcpu_memory_bytes, 117 + bool partition_vcpu_memory_access) 116 118 { 117 119 vm_paddr_t vcpu_gpa; 118 120 struct perf_test_vcpu_args *vcpu_args; ··· 124 122 vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; 125 123 126 124 vcpu_args->vcpu_id = vcpu_id; 127 - vcpu_args->gva = guest_test_virt_mem + 128 - (vcpu_id * vcpu_memory_bytes); 129 - vcpu_args->pages = vcpu_memory_bytes / 130 - perf_test_args.guest_page_size; 125 + if (partition_vcpu_memory_access) { 126 + vcpu_args->gva = guest_test_virt_mem + 127 + (vcpu_id * vcpu_memory_bytes); 128 + vcpu_args->pages = vcpu_memory_bytes / 129 + perf_test_args.guest_page_size; 130 + vcpu_gpa = guest_test_phys_mem + 131 + (vcpu_id * vcpu_memory_bytes); 132 + } else { 133 + vcpu_args->gva = guest_test_virt_mem; 134 + vcpu_args->pages = (vcpus * vcpu_memory_bytes) / 135 + perf_test_args.guest_page_size; 136 + vcpu_gpa = guest_test_phys_mem; 137 + } 131 138 132 - vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes); 133 139 pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", 134 - vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes); 140 + vcpu_id, vcpu_gpa, vcpu_gpa + 141 + (vcpu_args->pages * perf_test_args.guest_page_size)); 135 142 } 136 143 }