Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: kvm: Add support for customized slot0 memory size

Until commit 39fe2fc96694 ("selftests: kvm: make allocation of extra
memory take effect", 2021-05-27), parameter extra_mem_pages was used
only to calculate the page table size for all the memory chunks,
because real memory allocation happened with calls of
vm_userspace_mem_region_add() after vm_create_default().

Commit 39fe2fc96694 however changed the meaning of extra_mem_pages to
the size of memory slot 0. This makes the memory allocation more
flexible, but makes it harder to account for the number of
pages needed for the page tables. For example, memslot_perf_test
has a small amount of memory in slot 0 but a lot in other slots,
and adding that memory twice (both in slot 0 and with later
calls to vm_userspace_mem_region_add()) causes an error that
was fixed in commit 000ac4295339 ("selftests: kvm: fix overlapping
addresses in memslot_perf_test", 2021-05-29)

Since both uses are sensible, add a new parameter slot0_mem_pages
to vm_create_with_vcpus() and some comments to clarify the meaning of
slot0_mem_pages and extra_mem_pages. With this change,
memslot_perf_test can go back to passing the number of memory
pages as extra_mem_pages.

Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Message-Id: <20210608233816.423958-4-zhenzhong.duan@intel.com>
[Squashed in a single patch and rewrote the commit message. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Zhenzhong Duan and committed by
Paolo Bonzini
f53b16ad 1bc603af

+45 -15
+4 -3
tools/testing/selftests/kvm/include/kvm_util.h
··· 286 286 uint32_t num_percpu_pages, void *guest_code, 287 287 uint32_t vcpuids[]); 288 288 289 - /* Like vm_create_default_with_vcpus, but accepts mode as a parameter */ 289 + /* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */ 290 290 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 291 - uint64_t extra_mem_pages, uint32_t num_percpu_pages, 292 - void *guest_code, uint32_t vcpuids[]); 291 + uint64_t slot0_mem_pages, uint64_t extra_mem_pages, 292 + uint32_t num_percpu_pages, void *guest_code, 293 + uint32_t vcpuids[]); 293 294 294 295 /* 295 296 * Adds a vCPU with reasonable defaults (e.g. a stack)
+1 -1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 268 268 269 269 /* Create a VM with enough guest pages */ 270 270 guest_num_pages = test_mem_size / guest_page_size; 271 - vm = vm_create_with_vcpus(mode, nr_vcpus, 271 + vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES, 272 272 guest_num_pages, 0, guest_code, NULL); 273 273 274 274 /* Align down GPA of the testing memslot */
+38 -9
tools/testing/selftests/kvm/lib/kvm_util.c
··· 313 313 return vm; 314 314 } 315 315 316 + /* 317 + * VM Create with customized parameters 318 + * 319 + * Input Args: 320 + * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 321 + * nr_vcpus - VCPU count 322 + * slot0_mem_pages - Slot0 physical memory size 323 + * extra_mem_pages - Non-slot0 physical memory total size 324 + * num_percpu_pages - Per-cpu physical memory pages 325 + * guest_code - Guest entry point 326 + * vcpuids - VCPU IDs 327 + * 328 + * Output Args: None 329 + * 330 + * Return: 331 + * Pointer to opaque structure that describes the created VM. 332 + * 333 + * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K), 334 + * with customized slot0 memory size, at least 512 pages currently. 335 + * extra_mem_pages is only used to calculate the maximum page table size, 336 + * no real memory allocation for non-slot0 memory in this function. 337 + */ 316 338 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 317 - uint64_t extra_mem_pages, uint32_t num_percpu_pages, 318 - void *guest_code, uint32_t vcpuids[]) 339 + uint64_t slot0_mem_pages, uint64_t extra_mem_pages, 340 + uint32_t num_percpu_pages, void *guest_code, 341 + uint32_t vcpuids[]) 319 342 { 343 + uint64_t vcpu_pages, extra_pg_pages, pages; 344 + struct kvm_vm *vm; 345 + int i; 346 + 347 + /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */ 348 + if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES) 349 + slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES; 350 + 320 351 /* The maximum page table size for a memory region will be when the 321 352 * smallest pages are used. Considering each page contains x page 322 353 * table descriptors, the total extra size for page tables (for extra 323 354 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 324 355 * than N/x*2. 325 356 */ 326 - uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; 327 - uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; 328 - uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages + extra_pg_pages; 329 - struct kvm_vm *vm; 330 - int i; 357 + vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; 358 + extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; 359 + pages = slot0_mem_pages + vcpu_pages + extra_pg_pages; 331 360 332 361 TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 333 362 "nr_vcpus = %d too large for host, max-vcpus = %d", ··· 388 359 uint32_t num_percpu_pages, void *guest_code, 389 360 uint32_t vcpuids[]) 390 361 { 391 - return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages, 392 - num_percpu_pages, guest_code, vcpuids); 362 + return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES, 363 + extra_mem_pages, num_percpu_pages, guest_code, vcpuids); 393 364 } 394 365 395 366 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+1 -1
tools/testing/selftests/kvm/lib/perf_test_util.c
··· 69 69 TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0, 70 70 "Guest memory size is not guest page size aligned."); 71 71 72 - vm = vm_create_with_vcpus(mode, vcpus, 72 + vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES, 73 73 (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size, 74 74 0, guest_code, NULL); 75 75
+1 -1
tools/testing/selftests/kvm/memslot_perf_test.c
··· 267 267 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); 268 268 TEST_ASSERT(data->hva_slots, "malloc() fail"); 269 269 270 - data->vm = vm_create_default(VCPU_ID, 1024, guest_code); 270 + data->vm = vm_create_default(VCPU_ID, mempages, guest_code); 271 271 272 272 pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", 273 273 max_mem_slots - 1, data->pages_per_slot, rempages);