Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Open code and drop 'struct kvm_vm' accessors

Drop a variety of 'struct kvm_vm' accessors that wrap a single variable
now that tests can simply reference the variable directly.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
68c1b3e9 96a96e1a

+14 -47
+1 -1
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 221 221 222 222 perf_test_set_wr_fract(vm, p->wr_fract); 223 223 224 - guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm); 224 + guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift; 225 225 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 226 226 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 227 227 pages_per_slot = host_num_pages / p->slots;
+4 -5
tools/testing/selftests/kvm/dirty_log_test.c
··· 713 713 vm = create_vm(mode, &vcpu, 714 714 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code); 715 715 716 - guest_page_size = vm_get_page_size(vm); 716 + guest_page_size = vm->page_size; 717 717 /* 718 718 * A little more than 1G of guest page sized pages. Cover the 719 719 * case where the size is not aligned to 64 pages. 720 720 */ 721 - guest_num_pages = (1ul << (DIRTY_MEM_BITS - 722 - vm_get_page_shift(vm))) + 3; 721 + guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3; 723 722 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 724 723 725 724 host_page_size = getpagesize(); 726 725 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 727 726 728 727 if (!p->phys_offset) { 729 - guest_test_phys_mem = (vm_get_max_gfn(vm) - 730 - guest_num_pages) * guest_page_size; 728 + guest_test_phys_mem = (vm->max_gfn - guest_num_pages) * 729 + guest_page_size; 731 730 guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size); 732 731 } else { 733 732 guest_test_phys_mem = p->phys_offset;
-6
tools/testing/selftests/kvm/include/kvm_util_base.h
··· 592 592 593 593 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 594 594 595 - unsigned int vm_get_page_size(struct kvm_vm *vm); 596 - unsigned int vm_get_page_shift(struct kvm_vm *vm); 597 595 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 598 - uint64_t vm_get_max_gfn(struct kvm_vm *vm); 599 - int vm_get_kvm_fd(struct kvm_vm *vm); 600 - int vm_get_fd(struct kvm_vm *vm); 601 - 602 596 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); 603 597 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); 604 598 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
+1 -1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 260 260 261 261 /* Align down GPA of the testing memslot */ 262 262 if (!p->phys_offset) 263 - guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * 263 + guest_test_phys_mem = (vm->max_gfn - guest_num_pages) * 264 264 guest_page_size; 265 265 else 266 266 guest_test_phys_mem = p->phys_offset;
-25
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1827 1827 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1828 1828 } 1829 1829 1830 - unsigned int vm_get_page_size(struct kvm_vm *vm) 1831 - { 1832 - return vm->page_size; 1833 - } 1834 - 1835 - unsigned int vm_get_page_shift(struct kvm_vm *vm) 1836 - { 1837 - return vm->page_shift; 1838 - } 1839 - 1840 1830 unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm) 1841 1831 { 1842 1832 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 1843 - } 1844 - 1845 - uint64_t vm_get_max_gfn(struct kvm_vm *vm) 1846 - { 1847 - return vm->max_gfn; 1848 - } 1849 - 1850 - int vm_get_kvm_fd(struct kvm_vm *vm) 1851 - { 1852 - return vm->kvm_fd; 1853 - } 1854 - 1855 - int vm_get_fd(struct kvm_vm *vm) 1856 - { 1857 - return vm->fd; 1858 1833 } 1859 1834 1860 1835 static unsigned int vm_calc_num_pages(unsigned int num_pages,
+1 -1
tools/testing/selftests/kvm/lib/perf_test_util.c
··· 159 159 pta->vm = vm; 160 160 161 161 /* Put the test region at the top guest physical memory. */ 162 - region_end_gfn = vm_get_max_gfn(vm) + 1; 162 + region_end_gfn = vm->max_gfn + 1; 163 163 164 164 #ifdef __x86_64__ 165 165 /*
+5 -6
tools/testing/selftests/kvm/max_guest_memory_test.c
··· 65 65 struct kvm_sregs sregs; 66 66 struct kvm_regs regs; 67 67 68 - vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, 69 - vm_get_page_size(vm)); 68 + vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size); 70 69 71 70 /* Snapshot regs before the first run. */ 72 71 vcpu_regs_get(vcpu, &regs); ··· 103 104 TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); 104 105 105 106 nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & 106 - ~((uint64_t)vm_get_page_size(vm) - 1); 107 + ~((uint64_t)vm->page_size - 1); 107 108 TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); 108 109 109 110 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { ··· 219 220 220 221 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); 221 222 222 - max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm); 223 + max_gpa = vm->max_gfn << vm->page_shift; 223 224 TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb "); 224 225 225 226 fd = kvm_memfd_alloc(slot_size, hugepages); ··· 229 230 TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed"); 230 231 231 232 /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ 232 - for (i = 0; i < slot_size; i += vm_get_page_size(vm)) 233 + for (i = 0; i < slot_size; i += vm->page_size) 233 234 ((uint8_t *)mem)[i] = 0xaa; 234 235 235 236 gpa = 0; ··· 248 249 for (i = 0; i < slot_size; i += size_1gb) 249 250 __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G); 250 251 #else 251 - for (i = 0; i < slot_size; i += vm_get_page_size(vm)) 252 + for (i = 0; i < slot_size; i += vm->page_size) 252 253 virt_pg_map(vm, gpa + i, gpa + i); 253 254 #endif 254 255 }
+1 -1
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 75 75 * Add the dummy memslot just below the perf_test_util memslot, which is 76 76 * at the top of the guest physical address space. 77 77 */ 78 - gpa = perf_test_args.gpa - pages * vm_get_page_size(vm); 78 + gpa = perf_test_args.gpa - pages * vm->page_size; 79 79 80 80 for (i = 0; i < nr_modifications; i++) { 81 81 usleep(delay);
+1 -1
tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
··· 121 121 if (vcpu) 122 122 ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 123 123 else 124 - ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 124 + ret = __kvm_ioctl(vm->kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 125 125 126 126 TEST_ASSERT(ret == -1 && errno == E2BIG, 127 127 "%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"