Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Unconditionally allocate EPT tables in memslot 0

Drop the EPTP memslot param from all EPT helpers and shove the hardcoded
'0' down to the vm_phy_page_alloc() calls.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-14-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
444d084b 4307af73

+17 -22
+4 -6
tools/testing/selftests/kvm/include/x86_64/vmx.h
··· 608 608 void nested_vmx_check_supported(void); 609 609 610 610 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, 611 - uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot); 611 + uint64_t nested_paddr, uint64_t paddr); 612 612 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, 613 - uint64_t nested_paddr, uint64_t paddr, uint64_t size, 614 - uint32_t eptp_memslot); 613 + uint64_t nested_paddr, uint64_t paddr, uint64_t size); 615 614 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, 616 - uint32_t memslot, uint32_t eptp_memslot); 615 + uint32_t memslot); 617 616 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, 618 617 uint32_t eptp_memslot); 619 - void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm, 620 - uint32_t eptp_memslot); 618 + void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm); 621 619 622 620 #endif /* SELFTEST_KVM_VMX_H */
+9 -12
tools/testing/selftests/kvm/lib/x86_64/vmx.c
··· 393 393 } 394 394 395 395 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, 396 - uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot) 396 + uint64_t nested_paddr, uint64_t paddr) 397 397 { 398 398 uint16_t index[4]; 399 399 struct eptPageTableEntry *pml4e; ··· 427 427 pml4e = vmx->eptp_hva; 428 428 if (!pml4e[index[3]].readable) { 429 429 pml4e[index[3]].address = vm_phy_page_alloc(vm, 430 - KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) 430 + KVM_EPT_PAGE_TABLE_MIN_PADDR, 0) 431 431 >> vm->page_shift; 432 432 pml4e[index[3]].writable = true; 433 433 pml4e[index[3]].readable = true; ··· 439 439 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); 440 440 if (!pdpe[index[2]].readable) { 441 441 pdpe[index[2]].address = vm_phy_page_alloc(vm, 442 - KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) 442 + KVM_EPT_PAGE_TABLE_MIN_PADDR, 0) 443 443 >> vm->page_shift; 444 444 pdpe[index[2]].writable = true; 445 445 pdpe[index[2]].readable = true; ··· 451 451 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); 452 452 if (!pde[index[1]].readable) { 453 453 pde[index[1]].address = vm_phy_page_alloc(vm, 454 - KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) 454 + KVM_EPT_PAGE_TABLE_MIN_PADDR, 0) 455 455 >> vm->page_shift; 456 456 pde[index[1]].writable = true; 457 457 pde[index[1]].readable = true; ··· 492 492 * page range starting at nested_paddr to the page range starting at paddr. 493 493 */ 494 494 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, 495 - uint64_t nested_paddr, uint64_t paddr, uint64_t size, 496 - uint32_t eptp_memslot) 495 + uint64_t nested_paddr, uint64_t paddr, uint64_t size) 497 496 { 498 497 size_t page_size = vm->page_size; 499 498 size_t npages = size / page_size; ··· 501 502 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 502 503 503 504 while (npages--) { 504 - nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot); 505 + nested_pg_map(vmx, vm, nested_paddr, paddr); 505 506 nested_paddr += page_size; 506 507 paddr += page_size; 507 508 } ··· 511 512 * physical pages in VM. 512 513 */ 513 514 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, 514 - uint32_t memslot, uint32_t eptp_memslot) 515 + uint32_t memslot) 515 516 { 516 517 sparsebit_idx_t i, last; 517 518 struct userspace_mem_region *region = ··· 527 528 nested_map(vmx, vm, 528 529 (uint64_t)i << vm->page_shift, 529 530 (uint64_t)i << vm->page_shift, 530 - 1 << vm->page_shift, 531 - eptp_memslot); 531 + 1 << vm->page_shift); 532 532 } 533 533 } 534 534 ··· 539 541 vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); 540 542 } 541 543 542 - void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm, 543 - uint32_t eptp_memslot) 544 + void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) 544 545 { 545 546 vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); 546 547 vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
+1 -1
tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
··· 96 96 } 97 97 98 98 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); 99 - prepare_virtualize_apic_accesses(vmx, vm, 0); 99 + prepare_virtualize_apic_accesses(vmx, vm); 100 100 vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa); 101 101 102 102 while (!done) {
+3 -3
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
··· 107 107 * meaning after the last call to virt_map. 108 108 */ 109 109 prepare_eptp(vmx, vm, 0); 110 - nested_map_memslot(vmx, vm, 0, 0); 111 - nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0); 112 - nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0); 110 + nested_map_memslot(vmx, vm, 0); 111 + nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096); 112 + nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096); 113 113 114 114 bmap = bitmap_alloc(TEST_MEM_PAGES); 115 115 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);