at v6.8 2.9 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2 3#ifndef __KVM_TYPES_H__ 4#define __KVM_TYPES_H__ 5 6struct kvm; 7struct kvm_async_pf; 8struct kvm_device_ops; 9struct kvm_gfn_range; 10struct kvm_interrupt; 11struct kvm_irq_routing_table; 12struct kvm_memory_slot; 13struct kvm_one_reg; 14struct kvm_run; 15struct kvm_userspace_memory_region; 16struct kvm_vcpu; 17struct kvm_vcpu_init; 18struct kvm_memslots; 19 20enum kvm_mr_change; 21 22#include <linux/bits.h> 23#include <linux/mutex.h> 24#include <linux/types.h> 25#include <linux/spinlock_types.h> 26 27#include <asm/kvm_types.h> 28 29/* 30 * Address types: 31 * 32 * gva - guest virtual address 33 * gpa - guest physical address 34 * gfn - guest frame number 35 * hva - host virtual address 36 * hpa - host physical address 37 * hfn - host frame number 38 */ 39 40typedef unsigned long gva_t; 41typedef u64 gpa_t; 42typedef u64 gfn_t; 43 44#define INVALID_GPA (~(gpa_t)0) 45 46typedef unsigned long hva_t; 47typedef u64 hpa_t; 48typedef u64 hfn_t; 49 50typedef hfn_t kvm_pfn_t; 51 52enum pfn_cache_usage { 53 KVM_GUEST_USES_PFN = BIT(0), 54 KVM_HOST_USES_PFN = BIT(1), 55 KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN, 56}; 57 58struct gfn_to_hva_cache { 59 u64 generation; 60 gpa_t gpa; 61 unsigned long hva; 62 unsigned long len; 63 struct kvm_memory_slot *memslot; 64}; 65 66struct gfn_to_pfn_cache { 67 u64 generation; 68 gpa_t gpa; 69 unsigned long uhva; 70 struct kvm_memory_slot *memslot; 71 struct kvm *kvm; 72 struct kvm_vcpu *vcpu; 73 struct list_head list; 74 rwlock_t lock; 75 struct mutex refresh_lock; 76 void *khva; 77 kvm_pfn_t pfn; 78 enum pfn_cache_usage usage; 79 bool active; 80 bool valid; 81}; 82 83#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 84/* 85 * Memory caches are used to preallocate memory ahead of various MMU flows, 86 * e.g. page fault handlers. Gracefully handling allocation failures deep in 87 * MMU flows is problematic, as is triggering reclaim, I/O, etc... while 88 * holding MMU locks. Note, these caches act more like prefetch buffers than 89 * classical caches, i.e. objects are not returned to the cache on being freed. 90 * 91 * The @capacity field and @objects array are lazily initialized when the cache 92 * is topped up (__kvm_mmu_topup_memory_cache()). 93 */ 94struct kvm_mmu_memory_cache { 95 gfp_t gfp_zero; 96 gfp_t gfp_custom; 97 struct kmem_cache *kmem_cache; 98 int capacity; 99 int nobjs; 100 void **objects; 101}; 102#endif 103 104#define HALT_POLL_HIST_COUNT 32 105 106struct kvm_vm_stat_generic { 107 u64 remote_tlb_flush; 108 u64 remote_tlb_flush_requests; 109}; 110 111struct kvm_vcpu_stat_generic { 112 u64 halt_successful_poll; 113 u64 halt_attempted_poll; 114 u64 halt_poll_invalid; 115 u64 halt_wakeup; 116 u64 halt_poll_success_ns; 117 u64 halt_poll_fail_ns; 118 u64 halt_wait_ns; 119 u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; 120 u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; 121 u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; 122 u64 blocking; 123}; 124 125#define KVM_STATS_NAME_SIZE 48 126 127#endif /* __KVM_TYPES_H__ */