Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/hardirq.h>
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/mm.h>
17#include <linux/preempt.h>
18#include <linux/marker.h>
19#include <asm/signal.h>
20
21#include <linux/kvm.h>
22#include <linux/kvm_para.h>
23
24#include <linux/kvm_types.h>
25
26#include <asm/kvm_host.h>
27
28/*
29 * vcpu->requests bit members
30 */
31#define KVM_REQ_TLB_FLUSH 0
32#define KVM_REQ_MIGRATE_TIMER 1
33#define KVM_REQ_REPORT_TPR_ACCESS 2
34#define KVM_REQ_MMU_RELOAD 3
35#define KVM_REQ_TRIPLE_FAULT 4
36#define KVM_REQ_PENDING_TIMER 5
37
38struct kvm_vcpu;
39extern struct kmem_cache *kvm_vcpu_cache;
40
41/*
42 * It would be nice to use something smarter than a linear search, TBD...
43 * Thankfully we dont expect many devices to register (famous last words :),
44 * so until then it will suffice. At least its abstracted so we can change
45 * in one place.
46 */
47struct kvm_io_bus {
48 int dev_count;
49#define NR_IOBUS_DEVS 6
50 struct kvm_io_device *devs[NR_IOBUS_DEVS];
51};
52
53void kvm_io_bus_init(struct kvm_io_bus *bus);
54void kvm_io_bus_destroy(struct kvm_io_bus *bus);
55struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
56 gpa_t addr, int len, int is_write);
57void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
58 struct kvm_io_device *dev);
59
60struct kvm_vcpu {
61 struct kvm *kvm;
62#ifdef CONFIG_PREEMPT_NOTIFIERS
63 struct preempt_notifier preempt_notifier;
64#endif
65 int vcpu_id;
66 struct mutex mutex;
67 int cpu;
68 struct kvm_run *run;
69 int guest_mode;
70 unsigned long requests;
71 struct kvm_guest_debug guest_debug;
72 int fpu_active;
73 int guest_fpu_loaded;
74 wait_queue_head_t wq;
75 int sigset_active;
76 sigset_t sigset;
77 struct kvm_vcpu_stat stat;
78
79#ifdef CONFIG_HAS_IOMEM
80 int mmio_needed;
81 int mmio_read_completed;
82 int mmio_is_write;
83 int mmio_size;
84 unsigned char mmio_data[8];
85 gpa_t mmio_phys_addr;
86#endif
87
88 struct kvm_vcpu_arch arch;
89};
90
91struct kvm_memory_slot {
92 gfn_t base_gfn;
93 unsigned long npages;
94 unsigned long flags;
95 unsigned long *rmap;
96 unsigned long *dirty_bitmap;
97 struct {
98 unsigned long rmap_pde;
99 int write_count;
100 } *lpage_info;
101 unsigned long userspace_addr;
102 int user_alloc;
103};
104
105struct kvm {
106 struct mutex lock; /* protects the vcpus array and APIC accesses */
107 spinlock_t mmu_lock;
108 struct rw_semaphore slots_lock;
109 struct mm_struct *mm; /* userspace tied to this vm */
110 int nmemslots;
111 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
112 KVM_PRIVATE_MEM_SLOTS];
113 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
114 struct list_head vm_list;
115 struct kvm_io_bus mmio_bus;
116 struct kvm_io_bus pio_bus;
117 struct kvm_vm_stat stat;
118 struct kvm_arch arch;
119 atomic_t users_count;
120#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
121 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
122 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
123#endif
124
125#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
126 struct mmu_notifier mmu_notifier;
127 unsigned long mmu_notifier_seq;
128 long mmu_notifier_count;
129#endif
130};
131
132/* The guest did something we don't support. */
133#define pr_unimpl(vcpu, fmt, ...) \
134 do { \
135 if (printk_ratelimit()) \
136 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
137 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
138 } while (0)
139
140#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
141#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
142
143int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
144void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
145
146void vcpu_load(struct kvm_vcpu *vcpu);
147void vcpu_put(struct kvm_vcpu *vcpu);
148
149int kvm_init(void *opaque, unsigned int vcpu_size,
150 struct module *module);
151void kvm_exit(void);
152
153void kvm_get_kvm(struct kvm *kvm);
154void kvm_put_kvm(struct kvm *kvm);
155
156#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
157#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
158static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
159struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
160
161extern struct page *bad_page;
162extern pfn_t bad_pfn;
163
164int is_error_page(struct page *page);
165int is_error_pfn(pfn_t pfn);
166int kvm_is_error_hva(unsigned long addr);
167int kvm_set_memory_region(struct kvm *kvm,
168 struct kvm_userspace_memory_region *mem,
169 int user_alloc);
170int __kvm_set_memory_region(struct kvm *kvm,
171 struct kvm_userspace_memory_region *mem,
172 int user_alloc);
173int kvm_arch_set_memory_region(struct kvm *kvm,
174 struct kvm_userspace_memory_region *mem,
175 struct kvm_memory_slot old,
176 int user_alloc);
177void kvm_arch_flush_shadow(struct kvm *kvm);
178gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
179struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
180unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
181void kvm_release_page_clean(struct page *page);
182void kvm_release_page_dirty(struct page *page);
183void kvm_set_page_dirty(struct page *page);
184void kvm_set_page_accessed(struct page *page);
185
186pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
187void kvm_release_pfn_dirty(pfn_t);
188void kvm_release_pfn_clean(pfn_t pfn);
189void kvm_set_pfn_dirty(pfn_t pfn);
190void kvm_set_pfn_accessed(pfn_t pfn);
191void kvm_get_pfn(pfn_t pfn);
192
193int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
194 int len);
195int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
196 unsigned long len);
197int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
198int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
199 int offset, int len);
200int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
201 unsigned long len);
202int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
203int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
204struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
205int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
206void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
207
208void kvm_vcpu_block(struct kvm_vcpu *vcpu);
209void kvm_resched(struct kvm_vcpu *vcpu);
210void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
211void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
212void kvm_flush_remote_tlbs(struct kvm *kvm);
213void kvm_reload_remote_mmus(struct kvm *kvm);
214
215long kvm_arch_dev_ioctl(struct file *filp,
216 unsigned int ioctl, unsigned long arg);
217long kvm_arch_vcpu_ioctl(struct file *filp,
218 unsigned int ioctl, unsigned long arg);
219void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
220void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
221
222int kvm_dev_ioctl_check_extension(long ext);
223
224int kvm_get_dirty_log(struct kvm *kvm,
225 struct kvm_dirty_log *log, int *is_dirty);
226int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
227 struct kvm_dirty_log *log);
228
229int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
230 struct
231 kvm_userspace_memory_region *mem,
232 int user_alloc);
233long kvm_arch_vm_ioctl(struct file *filp,
234 unsigned int ioctl, unsigned long arg);
235void kvm_arch_destroy_vm(struct kvm *kvm);
236
237int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
238int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
239
240int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
241 struct kvm_translation *tr);
242
243int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
244int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
245int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
246 struct kvm_sregs *sregs);
247int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
248 struct kvm_sregs *sregs);
249int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
250 struct kvm_mp_state *mp_state);
251int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
252 struct kvm_mp_state *mp_state);
253int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
254 struct kvm_debug_guest *dbg);
255int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
256
257int kvm_arch_init(void *opaque);
258void kvm_arch_exit(void);
259
260int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
261void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
262
263void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
264void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
265void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
266struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
267int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
268void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
269
270int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
271void kvm_arch_hardware_enable(void *garbage);
272void kvm_arch_hardware_disable(void *garbage);
273int kvm_arch_hardware_setup(void);
274void kvm_arch_hardware_unsetup(void);
275void kvm_arch_check_processor_compat(void *rtn);
276int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
277
278void kvm_free_physmem(struct kvm *kvm);
279
280struct kvm *kvm_arch_create_vm(void);
281void kvm_arch_destroy_vm(struct kvm *kvm);
282
283int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
284int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
285int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
286void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
287
288static inline void kvm_guest_enter(void)
289{
290 account_system_vtime(current);
291 current->flags |= PF_VCPU;
292}
293
294static inline void kvm_guest_exit(void)
295{
296 account_system_vtime(current);
297 current->flags &= ~PF_VCPU;
298}
299
300static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
301{
302 return slot - kvm->memslots;
303}
304
305static inline gpa_t gfn_to_gpa(gfn_t gfn)
306{
307 return (gpa_t)gfn << PAGE_SHIFT;
308}
309
310static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
311{
312 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
313}
314
315enum kvm_stat_kind {
316 KVM_STAT_VM,
317 KVM_STAT_VCPU,
318};
319
320struct kvm_stats_debugfs_item {
321 const char *name;
322 int offset;
323 enum kvm_stat_kind kind;
324 struct dentry *dentry;
325};
326extern struct kvm_stats_debugfs_item debugfs_entries[];
327extern struct dentry *kvm_debugfs_dir;
328
329#ifdef CONFIG_KVM_TRACE
330int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
331void kvm_trace_cleanup(void);
332#else
333static inline
334int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
335{
336 return -EINVAL;
337}
338#define kvm_trace_cleanup() ((void)0)
339#endif
340
341#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
342static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
343{
344 if (unlikely(vcpu->kvm->mmu_notifier_count))
345 return 1;
346 /*
347 * Both reads happen under the mmu_lock and both values are
348 * modified under mmu_lock, so there's no need of smb_rmb()
349 * here in between, otherwise mmu_notifier_count should be
350 * read before mmu_notifier_seq, see
351 * mmu_notifier_invalidate_range_end write side.
352 */
353 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
354 return 1;
355 return 0;
356}
357#endif
358
359#endif