at v2.6.33 598 lines 17 kB view raw
1#ifndef __KVM_HOST_H 2#define __KVM_HOST_H 3 4/* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9#include <linux/types.h> 10#include <linux/hardirq.h> 11#include <linux/list.h> 12#include <linux/mutex.h> 13#include <linux/spinlock.h> 14#include <linux/signal.h> 15#include <linux/sched.h> 16#include <linux/mm.h> 17#include <linux/preempt.h> 18#include <linux/msi.h> 19#include <asm/signal.h> 20 21#include <linux/kvm.h> 22#include <linux/kvm_para.h> 23 24#include <linux/kvm_types.h> 25 26#include <asm/kvm_host.h> 27 28/* 29 * vcpu->requests bit members 30 */ 31#define KVM_REQ_TLB_FLUSH 0 32#define KVM_REQ_MIGRATE_TIMER 1 33#define KVM_REQ_REPORT_TPR_ACCESS 2 34#define KVM_REQ_MMU_RELOAD 3 35#define KVM_REQ_TRIPLE_FAULT 4 36#define KVM_REQ_PENDING_TIMER 5 37#define KVM_REQ_UNHALT 6 38#define KVM_REQ_MMU_SYNC 7 39#define KVM_REQ_KVMCLOCK_UPDATE 8 40#define KVM_REQ_KICK 9 41 42#define KVM_USERSPACE_IRQ_SOURCE_ID 0 43 44struct kvm; 45struct kvm_vcpu; 46extern struct kmem_cache *kvm_vcpu_cache; 47 48/* 49 * It would be nice to use something smarter than a linear search, TBD... 50 * Thankfully we dont expect many devices to register (famous last words :), 51 * so until then it will suffice. At least its abstracted so we can change 52 * in one place. 53 */ 54struct kvm_io_bus { 55 int dev_count; 56#define NR_IOBUS_DEVS 6 57 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 58}; 59 60void kvm_io_bus_init(struct kvm_io_bus *bus); 61void kvm_io_bus_destroy(struct kvm_io_bus *bus); 62int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len, 63 const void *val); 64int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, 65 void *val); 66int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, 67 struct kvm_io_device *dev); 68int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, 69 struct kvm_io_device *dev); 70void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, 71 struct kvm_io_device *dev); 72void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus, 73 struct kvm_io_device *dev); 74 75struct kvm_vcpu { 76 struct kvm *kvm; 77#ifdef CONFIG_PREEMPT_NOTIFIERS 78 struct preempt_notifier preempt_notifier; 79#endif 80 int vcpu_id; 81 struct mutex mutex; 82 int cpu; 83 struct kvm_run *run; 84 unsigned long requests; 85 unsigned long guest_debug; 86 int fpu_active; 87 int guest_fpu_loaded; 88 wait_queue_head_t wq; 89 int sigset_active; 90 sigset_t sigset; 91 struct kvm_vcpu_stat stat; 92 93#ifdef CONFIG_HAS_IOMEM 94 int mmio_needed; 95 int mmio_read_completed; 96 int mmio_is_write; 97 int mmio_size; 98 unsigned char mmio_data[8]; 99 gpa_t mmio_phys_addr; 100#endif 101 102 struct kvm_vcpu_arch arch; 103}; 104 105struct kvm_memory_slot { 106 gfn_t base_gfn; 107 unsigned long npages; 108 unsigned long flags; 109 unsigned long *rmap; 110 unsigned long *dirty_bitmap; 111 struct { 112 unsigned long rmap_pde; 113 int write_count; 114 } *lpage_info[KVM_NR_PAGE_SIZES - 1]; 115 unsigned long userspace_addr; 116 int user_alloc; 117}; 118 119struct kvm_kernel_irq_routing_entry { 120 u32 gsi; 121 u32 type; 122 int (*set)(struct kvm_kernel_irq_routing_entry *e, 123 struct kvm *kvm, int irq_source_id, int level); 124 union { 125 struct { 126 unsigned irqchip; 127 unsigned pin; 128 } irqchip; 129 struct msi_msg msi; 130 }; 131 struct hlist_node link; 132}; 133 134#ifdef __KVM_HAVE_IOAPIC 135 136struct kvm_irq_routing_table { 137 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; 138 struct kvm_kernel_irq_routing_entry *rt_entries; 139 u32 nr_rt_entries; 140 /* 141 * Array indexed by gsi. Each entry contains list of irq chips 142 * the gsi is connected to. 143 */ 144 struct hlist_head map[0]; 145}; 146 147#else 148 149struct kvm_irq_routing_table {}; 150 151#endif 152 153struct kvm { 154 spinlock_t mmu_lock; 155 spinlock_t requests_lock; 156 struct rw_semaphore slots_lock; 157 struct mm_struct *mm; /* userspace tied to this vm */ 158 int nmemslots; 159 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 160 KVM_PRIVATE_MEM_SLOTS]; 161#ifdef CONFIG_KVM_APIC_ARCHITECTURE 162 u32 bsp_vcpu_id; 163 struct kvm_vcpu *bsp_vcpu; 164#endif 165 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 166 atomic_t online_vcpus; 167 struct list_head vm_list; 168 struct mutex lock; 169 struct kvm_io_bus mmio_bus; 170 struct kvm_io_bus pio_bus; 171#ifdef CONFIG_HAVE_KVM_EVENTFD 172 struct { 173 spinlock_t lock; 174 struct list_head items; 175 } irqfds; 176 struct list_head ioeventfds; 177#endif 178 struct kvm_vm_stat stat; 179 struct kvm_arch arch; 180 atomic_t users_count; 181#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 182 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; 183 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 184#endif 185 186 struct mutex irq_lock; 187#ifdef CONFIG_HAVE_KVM_IRQCHIP 188 struct kvm_irq_routing_table *irq_routing; 189 struct hlist_head mask_notifier_list; 190 struct hlist_head irq_ack_notifier_list; 191#endif 192 193#ifdef KVM_ARCH_WANT_MMU_NOTIFIER 194 struct mmu_notifier mmu_notifier; 195 unsigned long mmu_notifier_seq; 196 long mmu_notifier_count; 197#endif 198}; 199 200/* The guest did something we don't support. */ 201#define pr_unimpl(vcpu, fmt, ...) \ 202 do { \ 203 if (printk_ratelimit()) \ 204 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ 205 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ 206 } while (0) 207 208#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 209#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 210 211static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 212{ 213 smp_rmb(); 214 return kvm->vcpus[i]; 215} 216 217#define kvm_for_each_vcpu(idx, vcpup, kvm) \ 218 for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \ 219 idx < atomic_read(&kvm->online_vcpus) && vcpup; \ 220 vcpup = kvm_get_vcpu(kvm, ++idx)) 221 222int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 223void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 224 225void vcpu_load(struct kvm_vcpu *vcpu); 226void vcpu_put(struct kvm_vcpu *vcpu); 227 228int kvm_init(void *opaque, unsigned int vcpu_size, 229 struct module *module); 230void kvm_exit(void); 231 232void kvm_get_kvm(struct kvm *kvm); 233void kvm_put_kvm(struct kvm *kvm); 234 235#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 236#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 237static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 238struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); 239 240extern struct page *bad_page; 241extern pfn_t bad_pfn; 242 243int is_error_page(struct page *page); 244int is_error_pfn(pfn_t pfn); 245int kvm_is_error_hva(unsigned long addr); 246int kvm_set_memory_region(struct kvm *kvm, 247 struct kvm_userspace_memory_region *mem, 248 int user_alloc); 249int __kvm_set_memory_region(struct kvm *kvm, 250 struct kvm_userspace_memory_region *mem, 251 int user_alloc); 252int kvm_arch_set_memory_region(struct kvm *kvm, 253 struct kvm_userspace_memory_region *mem, 254 struct kvm_memory_slot old, 255 int user_alloc); 256void kvm_disable_largepages(void); 257void kvm_arch_flush_shadow(struct kvm *kvm); 258gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 259struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 260unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 261void kvm_release_page_clean(struct page *page); 262void kvm_release_page_dirty(struct page *page); 263void kvm_set_page_dirty(struct page *page); 264void kvm_set_page_accessed(struct page *page); 265 266pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 267void kvm_release_pfn_dirty(pfn_t); 268void kvm_release_pfn_clean(pfn_t pfn); 269void kvm_set_pfn_dirty(pfn_t pfn); 270void kvm_set_pfn_accessed(pfn_t pfn); 271void kvm_get_pfn(pfn_t pfn); 272 273int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 274 int len); 275int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 276 unsigned long len); 277int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 278int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 279 int offset, int len); 280int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 281 unsigned long len); 282int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 283int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 284struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 285int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 286void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 287 288void kvm_vcpu_block(struct kvm_vcpu *vcpu); 289void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 290void kvm_resched(struct kvm_vcpu *vcpu); 291void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 292void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 293void kvm_flush_remote_tlbs(struct kvm *kvm); 294void kvm_reload_remote_mmus(struct kvm *kvm); 295 296long kvm_arch_dev_ioctl(struct file *filp, 297 unsigned int ioctl, unsigned long arg); 298long kvm_arch_vcpu_ioctl(struct file *filp, 299 unsigned int ioctl, unsigned long arg); 300 301int kvm_dev_ioctl_check_extension(long ext); 302 303int kvm_get_dirty_log(struct kvm *kvm, 304 struct kvm_dirty_log *log, int *is_dirty); 305int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 306 struct kvm_dirty_log *log); 307 308int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 309 struct 310 kvm_userspace_memory_region *mem, 311 int user_alloc); 312long kvm_arch_vm_ioctl(struct file *filp, 313 unsigned int ioctl, unsigned long arg); 314 315int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 316int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 317 318int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 319 struct kvm_translation *tr); 320 321int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 322int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 323int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 324 struct kvm_sregs *sregs); 325int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 326 struct kvm_sregs *sregs); 327int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 328 struct kvm_mp_state *mp_state); 329int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 330 struct kvm_mp_state *mp_state); 331int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 332 struct kvm_guest_debug *dbg); 333int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 334 335int kvm_arch_init(void *opaque); 336void kvm_arch_exit(void); 337 338int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 340 341void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 342void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 343void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 344struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 345int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 346void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 347 348int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); 349int kvm_arch_hardware_enable(void *garbage); 350void kvm_arch_hardware_disable(void *garbage); 351int kvm_arch_hardware_setup(void); 352void kvm_arch_hardware_unsetup(void); 353void kvm_arch_check_processor_compat(void *rtn); 354int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 355 356void kvm_free_physmem(struct kvm *kvm); 357 358struct kvm *kvm_arch_create_vm(void); 359void kvm_arch_destroy_vm(struct kvm *kvm); 360void kvm_free_all_assigned_devices(struct kvm *kvm); 361void kvm_arch_sync_events(struct kvm *kvm); 362 363int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 364void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 365 366int kvm_is_mmio_pfn(pfn_t pfn); 367 368struct kvm_irq_ack_notifier { 369 struct hlist_node link; 370 unsigned gsi; 371 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 372}; 373 374#define KVM_ASSIGNED_MSIX_PENDING 0x1 375struct kvm_guest_msix_entry { 376 u32 vector; 377 u16 entry; 378 u16 flags; 379}; 380 381struct kvm_assigned_dev_kernel { 382 struct kvm_irq_ack_notifier ack_notifier; 383 struct work_struct interrupt_work; 384 struct list_head list; 385 int assigned_dev_id; 386 int host_busnr; 387 int host_devfn; 388 unsigned int entries_nr; 389 int host_irq; 390 bool host_irq_disabled; 391 struct msix_entry *host_msix_entries; 392 int guest_irq; 393 struct kvm_guest_msix_entry *guest_msix_entries; 394 unsigned long irq_requested_type; 395 int irq_source_id; 396 int flags; 397 struct pci_dev *dev; 398 struct kvm *kvm; 399 spinlock_t assigned_dev_lock; 400}; 401 402struct kvm_irq_mask_notifier { 403 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 404 int irq; 405 struct hlist_node link; 406}; 407 408void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 409 struct kvm_irq_mask_notifier *kimn); 410void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 411 struct kvm_irq_mask_notifier *kimn); 412void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); 413 414#ifdef __KVM_HAVE_IOAPIC 415void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, 416 union kvm_ioapic_redirect_entry *entry, 417 unsigned long *deliver_bitmask); 418#endif 419int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); 420void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 421void kvm_register_irq_ack_notifier(struct kvm *kvm, 422 struct kvm_irq_ack_notifier *kian); 423void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 424 struct kvm_irq_ack_notifier *kian); 425int kvm_request_irq_source_id(struct kvm *kvm); 426void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 427 428/* For vcpu->arch.iommu_flags */ 429#define KVM_IOMMU_CACHE_COHERENCY 0x1 430 431#ifdef CONFIG_IOMMU_API 432int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, 433 unsigned long npages); 434int kvm_iommu_map_guest(struct kvm *kvm); 435int kvm_iommu_unmap_guest(struct kvm *kvm); 436int kvm_assign_device(struct kvm *kvm, 437 struct kvm_assigned_dev_kernel *assigned_dev); 438int kvm_deassign_device(struct kvm *kvm, 439 struct kvm_assigned_dev_kernel *assigned_dev); 440#else /* CONFIG_IOMMU_API */ 441static inline int kvm_iommu_map_pages(struct kvm *kvm, 442 gfn_t base_gfn, 443 unsigned long npages) 444{ 445 return 0; 446} 447 448static inline int kvm_iommu_map_guest(struct kvm *kvm) 449{ 450 return -ENODEV; 451} 452 453static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 454{ 455 return 0; 456} 457 458static inline int kvm_assign_device(struct kvm *kvm, 459 struct kvm_assigned_dev_kernel *assigned_dev) 460{ 461 return 0; 462} 463 464static inline int kvm_deassign_device(struct kvm *kvm, 465 struct kvm_assigned_dev_kernel *assigned_dev) 466{ 467 return 0; 468} 469#endif /* CONFIG_IOMMU_API */ 470 471static inline void kvm_guest_enter(void) 472{ 473 account_system_vtime(current); 474 current->flags |= PF_VCPU; 475} 476 477static inline void kvm_guest_exit(void) 478{ 479 account_system_vtime(current); 480 current->flags &= ~PF_VCPU; 481} 482 483static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) 484{ 485 return slot - kvm->memslots; 486} 487 488static inline gpa_t gfn_to_gpa(gfn_t gfn) 489{ 490 return (gpa_t)gfn << PAGE_SHIFT; 491} 492 493static inline hpa_t pfn_to_hpa(pfn_t pfn) 494{ 495 return (hpa_t)pfn << PAGE_SHIFT; 496} 497 498static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 499{ 500 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 501} 502 503enum kvm_stat_kind { 504 KVM_STAT_VM, 505 KVM_STAT_VCPU, 506}; 507 508struct kvm_stats_debugfs_item { 509 const char *name; 510 int offset; 511 enum kvm_stat_kind kind; 512 struct dentry *dentry; 513}; 514extern struct kvm_stats_debugfs_item debugfs_entries[]; 515extern struct dentry *kvm_debugfs_dir; 516 517#ifdef KVM_ARCH_WANT_MMU_NOTIFIER 518static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) 519{ 520 if (unlikely(vcpu->kvm->mmu_notifier_count)) 521 return 1; 522 /* 523 * Both reads happen under the mmu_lock and both values are 524 * modified under mmu_lock, so there's no need of smb_rmb() 525 * here in between, otherwise mmu_notifier_count should be 526 * read before mmu_notifier_seq, see 527 * mmu_notifier_invalidate_range_end write side. 528 */ 529 if (vcpu->kvm->mmu_notifier_seq != mmu_seq) 530 return 1; 531 return 0; 532} 533#endif 534 535#ifdef CONFIG_HAVE_KVM_IRQCHIP 536 537#define KVM_MAX_IRQ_ROUTES 1024 538 539int kvm_setup_default_irq_routing(struct kvm *kvm); 540int kvm_set_irq_routing(struct kvm *kvm, 541 const struct kvm_irq_routing_entry *entries, 542 unsigned nr, 543 unsigned flags); 544void kvm_free_irq_routing(struct kvm *kvm); 545 546#else 547 548static inline void kvm_free_irq_routing(struct kvm *kvm) {} 549 550#endif 551 552#ifdef CONFIG_HAVE_KVM_EVENTFD 553 554void kvm_eventfd_init(struct kvm *kvm); 555int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); 556void kvm_irqfd_release(struct kvm *kvm); 557int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 558 559#else 560 561static inline void kvm_eventfd_init(struct kvm *kvm) {} 562static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) 563{ 564 return -EINVAL; 565} 566 567static inline void kvm_irqfd_release(struct kvm *kvm) {} 568static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 569{ 570 return -ENOSYS; 571} 572 573#endif /* CONFIG_HAVE_KVM_EVENTFD */ 574 575#ifdef CONFIG_KVM_APIC_ARCHITECTURE 576static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 577{ 578 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; 579} 580#endif 581 582#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT 583 584long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 585 unsigned long arg); 586 587#else 588 589static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 590 unsigned long arg) 591{ 592 return -ENOTTY; 593} 594 595#endif 596 597#endif 598