Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Unconditionally use memslot '0' for page table allocations

Drop the memslot param from virt_pg_map() and virt_map() and shove the
hardcoded '0' down to the vm_phy_page_alloc() calls.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-13-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
4307af73 a75a895e

+31 -35
+1 -1
tools/testing/selftests/kvm/dirty_log_test.c
··· 760 760 KVM_MEM_LOG_DIRTY_PAGES); 761 761 762 762 /* Do mapping for the dirty track memory slot */ 763 - virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); 763 + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 764 764 765 765 /* Cache the HVA pointer of the region */ 766 766 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+2 -3
tools/testing/selftests/kvm/include/kvm_util.h
··· 145 145 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 146 146 147 147 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 148 - unsigned int npages, uint32_t pgd_memslot); 148 + unsigned int npages); 149 149 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 150 150 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 151 151 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); ··· 256 256 * Within @vm, creates a virtual translation for the page starting 257 257 * at @vaddr to the page starting at @paddr. 258 258 */ 259 - void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 260 - uint32_t memslot); 259 + void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 261 260 262 261 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 263 262 uint32_t memslot);
+1 -1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 303 303 TEST_MEM_SLOT_INDEX, guest_num_pages, 0); 304 304 305 305 /* Do mapping(GVA->GPA) for the testing memory slot */ 306 - virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); 306 + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 307 307 308 308 /* Cache the HVA pointer of the region */ 309 309 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+7 -8
tools/testing/selftests/kvm/lib/aarch64/processor.c
··· 83 83 } 84 84 } 85 85 86 - void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 87 - uint32_t pgd_memslot, uint64_t flags) 86 + static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 87 + uint64_t flags) 88 88 { 89 89 uint8_t attr_idx = flags & 7; 90 90 uint64_t *ptep; ··· 105 105 106 106 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; 107 107 if (!*ptep) { 108 - *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); 108 + *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); 109 109 *ptep |= 3; 110 110 } 111 111 ··· 113 113 case 4: 114 114 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; 115 115 if (!*ptep) { 116 - *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); 116 + *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); 117 117 *ptep |= 3; 118 118 } 119 119 /* fall through */ 120 120 case 3: 121 121 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; 122 122 if (!*ptep) { 123 - *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); 123 + *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); 124 124 *ptep |= 3; 125 125 } 126 126 /* fall through */ ··· 135 135 *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */; 136 136 } 137 137 138 - void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 139 - uint32_t pgd_memslot) 138 + void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 140 139 { 141 140 uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */ 142 141 143 - _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx); 142 + _virt_pg_map(vm, vaddr, paddr, attr_idx); 144 143 } 145 144 146 145 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+1 -1
tools/testing/selftests/kvm/lib/aarch64/ucall.c
··· 14 14 if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) 15 15 return false; 16 16 17 - virt_pg_map(vm, gpa, gpa, 0); 17 + virt_pg_map(vm, gpa, gpa); 18 18 19 19 ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; 20 20 sync_global_to_guest(vm, ucall_exit_mmio_addr);
+3 -3
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1265 1265 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1266 1266 pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1267 1267 1268 - virt_pg_map(vm, vaddr, paddr, 0); 1268 + virt_pg_map(vm, vaddr, paddr); 1269 1269 1270 1270 sparsebit_set(vm->vpages_mapped, 1271 1271 vaddr >> vm->page_shift); ··· 1330 1330 * @npages starting at @vaddr to the page range starting at @paddr. 1331 1331 */ 1332 1332 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1333 - unsigned int npages, uint32_t pgd_memslot) 1333 + unsigned int npages) 1334 1334 { 1335 1335 size_t page_size = vm->page_size; 1336 1336 size_t size = npages * page_size; ··· 1339 1339 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1340 1340 1341 1341 while (npages--) { 1342 - virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1342 + virt_pg_map(vm, vaddr, paddr); 1343 1343 vaddr += page_size; 1344 1344 paddr += page_size; 1345 1345 }
+1 -1
tools/testing/selftests/kvm/lib/perf_test_util.c
··· 101 101 guest_num_pages, 0); 102 102 103 103 /* Do mapping for the demand paging memory slot */ 104 - virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); 104 + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 105 105 106 106 ucall_init(vm, NULL); 107 107
+4 -5
tools/testing/selftests/kvm/lib/s390x/processor.c
··· 36 36 * a page table (ri == 4). Returns a suitable region/segment table entry 37 37 * which points to the freshly allocated pages. 38 38 */ 39 - static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) 39 + static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) 40 40 { 41 41 uint64_t taddr; 42 42 43 43 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, 44 - KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); 44 + KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); 45 45 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); 46 46 47 47 return (taddr & REGION_ENTRY_ORIGIN) ··· 49 49 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); 50 50 } 51 51 52 - void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, 53 - uint32_t memslot) 52 + void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) 54 53 { 55 54 int ri, idx; 56 55 uint64_t *entry; ··· 76 77 for (ri = 1; ri <= 4; ri++) { 77 78 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; 78 79 if (entry[idx] & REGION_ENTRY_INVALID) 79 - entry[idx] = virt_alloc_region(vm, ri, memslot); 80 + entry[idx] = virt_alloc_region(vm, ri); 80 81 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); 81 82 } 82 83
+4 -5
tools/testing/selftests/kvm/lib/x86_64/processor.c
··· 221 221 } 222 222 } 223 223 224 - void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 225 - uint32_t pgd_memslot) 224 + void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 226 225 { 227 226 uint16_t index[4]; 228 227 struct pageMapL4Entry *pml4e; ··· 255 256 pml4e = addr_gpa2hva(vm, vm->pgd); 256 257 if (!pml4e[index[3]].present) { 257 258 pml4e[index[3]].address = vm_phy_page_alloc(vm, 258 - KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot) 259 + KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0) 259 260 >> vm->page_shift; 260 261 pml4e[index[3]].writable = true; 261 262 pml4e[index[3]].present = true; ··· 266 267 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); 267 268 if (!pdpe[index[2]].present) { 268 269 pdpe[index[2]].address = vm_phy_page_alloc(vm, 269 - KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot) 270 + KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0) 270 271 >> vm->page_shift; 271 272 pdpe[index[2]].writable = true; 272 273 pdpe[index[2]].present = true; ··· 277 278 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); 278 279 if (!pde[index[1]].present) { 279 280 pde[index[1]].address = vm_phy_page_alloc(vm, 280 - KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot) 281 + KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0) 281 282 >> vm->page_shift; 282 283 pde[index[1]].writable = true; 283 284 pde[index[1]].present = true;
+1 -1
tools/testing/selftests/kvm/memslot_perf_test.c
··· 306 306 guest_addr += npages * 4096; 307 307 } 308 308 309 - virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0); 309 + virt_map(data->vm, MEM_GPA, MEM_GPA, mempages); 310 310 311 311 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); 312 312 atomic_init(&sync->start_flag, false);
+1 -1
tools/testing/selftests/kvm/set_memory_region_test.c
··· 132 132 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); 133 133 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); 134 134 135 - virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0); 135 + virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); 136 136 137 137 /* Ditto for the host mapping so that both pages can be zeroed. */ 138 138 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
+1 -1
tools/testing/selftests/kvm/steal_time.c
··· 293 293 vm = vm_create_default(0, 0, guest_code); 294 294 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS); 295 295 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); 296 - virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages, 0); 296 + virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages); 297 297 ucall_init(vm, NULL); 298 298 299 299 /* Add the rest of the VCPUs */
+1 -1
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
··· 97 97 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This 98 98 * affects both L1 and L2. However... 99 99 */ 100 - virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0); 100 + virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES); 101 101 102 102 /* 103 103 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
+1 -1
tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
··· 423 423 vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID); 424 424 vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler); 425 425 426 - virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA, 0); 426 + virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 427 427 428 428 vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code); 429 429
+1 -1
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
··· 146 146 /* Map a region for the shared_info page */ 147 147 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 148 148 SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0); 149 - virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2, 0); 149 + virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2); 150 150 151 151 struct kvm_xen_hvm_config hvmc = { 152 152 .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
+1 -1
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
··· 103 103 /* Map a region for the hypercall pages */ 104 104 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 105 105 HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0); 106 - virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2, 0); 106 + virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2); 107 107 108 108 for (;;) { 109 109 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);