1#ifndef __KVM_HOST_H 2#define __KVM_HOST_H 3 4/* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9#include <linux/types.h> 10#include <linux/hardirq.h> 11#include <linux/list.h> 12#include <linux/mutex.h> 13#include <linux/spinlock.h> 14#include <linux/signal.h> 15#include <linux/sched.h> 16#include <linux/bug.h> 17#include <linux/mm.h> 18#include <linux/mmu_notifier.h> 19#include <linux/preempt.h> 20#include <linux/msi.h> 21#include <linux/slab.h> 22#include <linux/rcupdate.h> 23#include <linux/ratelimit.h> 24#include <linux/err.h> 25#include <linux/irqflags.h> 26#include <linux/context_tracking.h> 27#include <linux/irqbypass.h> 28#include <linux/swait.h> 29#include <linux/refcount.h> 30#include <asm/signal.h> 31 32#include <linux/kvm.h> 33#include <linux/kvm_para.h> 34 35#include <linux/kvm_types.h> 36 37#include <asm/kvm_host.h> 38 39#ifndef KVM_MAX_VCPU_ID 40#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS 41#endif 42 43/* 44 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 45 * in kvm, other bits are visible for userspace which are defined in 46 * include/linux/kvm_h. 47 */ 48#define KVM_MEMSLOT_INVALID (1UL << 16) 49 50/* Two fragments for cross MMIO pages. */ 51#define KVM_MAX_MMIO_FRAGMENTS 2 52 53#ifndef KVM_ADDRESS_SPACE_NUM 54#define KVM_ADDRESS_SPACE_NUM 1 55#endif 56 57/* 58 * For the normal pfn, the highest 12 bits should be zero, 59 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 60 * mask bit 63 to indicate the noslot pfn. 61 */ 62#define KVM_PFN_ERR_MASK (0x7ffULL << 52) 63#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 64#define KVM_PFN_NOSLOT (0x1ULL << 63) 65 66#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 67#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 68#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 69 70/* 71 * error pfns indicate that the gfn is in slot but faild to 72 * translate it to pfn on host. 73 */ 74static inline bool is_error_pfn(kvm_pfn_t pfn) 75{ 76 return !!(pfn & KVM_PFN_ERR_MASK); 77} 78 79/* 80 * error_noslot pfns indicate that the gfn can not be 81 * translated to pfn - it is not in slot or failed to 82 * translate it to pfn. 83 */ 84static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 85{ 86 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 87} 88 89/* noslot pfn indicates that the gfn is not in slot. */ 90static inline bool is_noslot_pfn(kvm_pfn_t pfn) 91{ 92 return pfn == KVM_PFN_NOSLOT; 93} 94 95/* 96 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 97 * provide own defines and kvm_is_error_hva 98 */ 99#ifndef KVM_HVA_ERR_BAD 100 101#define KVM_HVA_ERR_BAD (PAGE_OFFSET) 102#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 103 104static inline bool kvm_is_error_hva(unsigned long addr) 105{ 106 return addr >= PAGE_OFFSET; 107} 108 109#endif 110 111#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 112 113static inline bool is_error_page(struct page *page) 114{ 115 return IS_ERR(page); 116} 117 118#define KVM_REQUEST_MASK GENMASK(7,0) 119#define KVM_REQUEST_NO_WAKEUP BIT(8) 120#define KVM_REQUEST_WAIT BIT(9) 121/* 122 * Architecture-independent vcpu->requests bit members 123 * Bits 4-7 are reserved for more arch-independent bits. 124 */ 125#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 126#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 127#define KVM_REQ_PENDING_TIMER 2 128#define KVM_REQ_UNHALT 3 129#define KVM_REQUEST_ARCH_BASE 8 130 131#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 132 BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \ 133 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 134}) 135#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) 136 137#define KVM_USERSPACE_IRQ_SOURCE_ID 0 138#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 139 140extern struct kmem_cache *kvm_vcpu_cache; 141 142extern spinlock_t kvm_lock; 143extern struct list_head vm_list; 144 145struct kvm_io_range { 146 gpa_t addr; 147 int len; 148 struct kvm_io_device *dev; 149}; 150 151#define NR_IOBUS_DEVS 1000 152 153struct kvm_io_bus { 154 int dev_count; 155 int ioeventfd_count; 156 struct kvm_io_range range[]; 157}; 158 159enum kvm_bus { 160 KVM_MMIO_BUS, 161 KVM_PIO_BUS, 162 KVM_VIRTIO_CCW_NOTIFY_BUS, 163 KVM_FAST_MMIO_BUS, 164 KVM_NR_BUSES 165}; 166 167int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 168 int len, const void *val); 169int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 170 gpa_t addr, int len, const void *val, long cookie); 171int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 172 int len, void *val); 173int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 174 int len, struct kvm_io_device *dev); 175void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 176 struct kvm_io_device *dev); 177struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 178 gpa_t addr); 179 180#ifdef CONFIG_KVM_ASYNC_PF 181struct kvm_async_pf { 182 struct work_struct work; 183 struct list_head link; 184 struct list_head queue; 185 struct kvm_vcpu *vcpu; 186 struct mm_struct *mm; 187 gva_t gva; 188 unsigned long addr; 189 struct kvm_arch_async_pf arch; 190 bool wakeup_all; 191}; 192 193void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 194void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 195int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, 196 struct kvm_arch_async_pf *arch); 197int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 198#endif 199 200enum { 201 OUTSIDE_GUEST_MODE, 202 IN_GUEST_MODE, 203 EXITING_GUEST_MODE, 204 READING_SHADOW_PAGE_TABLES, 205}; 206 207/* 208 * Sometimes a large or cross-page mmio needs to be broken up into separate 209 * exits for userspace servicing. 210 */ 211struct kvm_mmio_fragment { 212 gpa_t gpa; 213 void *data; 214 unsigned len; 215}; 216 217struct kvm_vcpu { 218 struct kvm *kvm; 219#ifdef CONFIG_PREEMPT_NOTIFIERS 220 struct preempt_notifier preempt_notifier; 221#endif 222 int cpu; 223 int vcpu_id; 224 int srcu_idx; 225 int mode; 226 unsigned long requests; 227 unsigned long guest_debug; 228 229 int pre_pcpu; 230 struct list_head blocked_vcpu_list; 231 232 struct mutex mutex; 233 struct kvm_run *run; 234 235 int guest_fpu_loaded, guest_xcr0_loaded; 236 struct swait_queue_head wq; 237 struct pid __rcu *pid; 238 int sigset_active; 239 sigset_t sigset; 240 struct kvm_vcpu_stat stat; 241 unsigned int halt_poll_ns; 242 bool valid_wakeup; 243 244#ifdef CONFIG_HAS_IOMEM 245 int mmio_needed; 246 int mmio_read_completed; 247 int mmio_is_write; 248 int mmio_cur_fragment; 249 int mmio_nr_fragments; 250 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 251#endif 252 253#ifdef CONFIG_KVM_ASYNC_PF 254 struct { 255 u32 queued; 256 struct list_head queue; 257 struct list_head done; 258 spinlock_t lock; 259 } async_pf; 260#endif 261 262#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 263 /* 264 * Cpu relax intercept or pause loop exit optimization 265 * in_spin_loop: set when a vcpu does a pause loop exit 266 * or cpu relax intercepted. 267 * dy_eligible: indicates whether vcpu is eligible for directed yield. 268 */ 269 struct { 270 bool in_spin_loop; 271 bool dy_eligible; 272 } spin_loop; 273#endif 274 bool preempted; 275 struct kvm_vcpu_arch arch; 276 struct dentry *debugfs_dentry; 277}; 278 279static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 280{ 281 /* 282 * The memory barrier ensures a previous write to vcpu->requests cannot 283 * be reordered with the read of vcpu->mode. It pairs with the general 284 * memory barrier following the write of vcpu->mode in VCPU RUN. 285 */ 286 smp_mb__before_atomic(); 287 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 288} 289 290/* 291 * Some of the bitops functions do not support too long bitmaps. 292 * This number must be determined not to exceed such limits. 293 */ 294#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 295 296struct kvm_memory_slot { 297 gfn_t base_gfn; 298 unsigned long npages; 299 unsigned long *dirty_bitmap; 300 struct kvm_arch_memory_slot arch; 301 unsigned long userspace_addr; 302 u32 flags; 303 short id; 304}; 305 306static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 307{ 308 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 309} 310 311struct kvm_s390_adapter_int { 312 u64 ind_addr; 313 u64 summary_addr; 314 u64 ind_offset; 315 u32 summary_offset; 316 u32 adapter_id; 317}; 318 319struct kvm_hv_sint { 320 u32 vcpu; 321 u32 sint; 322}; 323 324struct kvm_kernel_irq_routing_entry { 325 u32 gsi; 326 u32 type; 327 int (*set)(struct kvm_kernel_irq_routing_entry *e, 328 struct kvm *kvm, int irq_source_id, int level, 329 bool line_status); 330 union { 331 struct { 332 unsigned irqchip; 333 unsigned pin; 334 } irqchip; 335 struct { 336 u32 address_lo; 337 u32 address_hi; 338 u32 data; 339 u32 flags; 340 u32 devid; 341 } msi; 342 struct kvm_s390_adapter_int adapter; 343 struct kvm_hv_sint hv_sint; 344 }; 345 struct hlist_node link; 346}; 347 348#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 349struct kvm_irq_routing_table { 350 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 351 u32 nr_rt_entries; 352 /* 353 * Array indexed by gsi. Each entry contains list of irq chips 354 * the gsi is connected to. 355 */ 356 struct hlist_head map[0]; 357}; 358#endif 359 360#ifndef KVM_PRIVATE_MEM_SLOTS 361#define KVM_PRIVATE_MEM_SLOTS 0 362#endif 363 364#ifndef KVM_MEM_SLOTS_NUM 365#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 366#endif 367 368#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 369static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 370{ 371 return 0; 372} 373#endif 374 375/* 376 * Note: 377 * memslots are not sorted by id anymore, please use id_to_memslot() 378 * to get the memslot by its id. 379 */ 380struct kvm_memslots { 381 u64 generation; 382 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 383 /* The mapping table from slot id to the index in memslots[]. */ 384 short id_to_index[KVM_MEM_SLOTS_NUM]; 385 atomic_t lru_slot; 386 int used_slots; 387}; 388 389struct kvm { 390 spinlock_t mmu_lock; 391 struct mutex slots_lock; 392 struct mm_struct *mm; /* userspace tied to this vm */ 393 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; 394 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 395 396 /* 397 * created_vcpus is protected by kvm->lock, and is incremented 398 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only 399 * incremented after storing the kvm_vcpu pointer in vcpus, 400 * and is accessed atomically. 401 */ 402 atomic_t online_vcpus; 403 int created_vcpus; 404 int last_boosted_vcpu; 405 struct list_head vm_list; 406 struct mutex lock; 407 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; 408#ifdef CONFIG_HAVE_KVM_EVENTFD 409 struct { 410 spinlock_t lock; 411 struct list_head items; 412 struct list_head resampler_list; 413 struct mutex resampler_lock; 414 } irqfds; 415 struct list_head ioeventfds; 416#endif 417 struct kvm_vm_stat stat; 418 struct kvm_arch arch; 419 refcount_t users_count; 420#ifdef CONFIG_KVM_MMIO 421 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 422 spinlock_t ring_lock; 423 struct list_head coalesced_zones; 424#endif 425 426 struct mutex irq_lock; 427#ifdef CONFIG_HAVE_KVM_IRQCHIP 428 /* 429 * Update side is protected by irq_lock. 430 */ 431 struct kvm_irq_routing_table __rcu *irq_routing; 432#endif 433#ifdef CONFIG_HAVE_KVM_IRQFD 434 struct hlist_head irq_ack_notifier_list; 435#endif 436 437#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 438 struct mmu_notifier mmu_notifier; 439 unsigned long mmu_notifier_seq; 440 long mmu_notifier_count; 441#endif 442 long tlbs_dirty; 443 struct list_head devices; 444 struct dentry *debugfs_dentry; 445 struct kvm_stat_data **debugfs_stat_data; 446 struct srcu_struct srcu; 447 struct srcu_struct irq_srcu; 448 pid_t userspace_pid; 449}; 450 451#define kvm_err(fmt, ...) \ 452 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 453#define kvm_info(fmt, ...) \ 454 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 455#define kvm_debug(fmt, ...) \ 456 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 457#define kvm_debug_ratelimited(fmt, ...) \ 458 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ 459 ## __VA_ARGS__) 460#define kvm_pr_unimpl(fmt, ...) \ 461 pr_err_ratelimited("kvm [%i]: " fmt, \ 462 task_tgid_nr(current), ## __VA_ARGS__) 463 464/* The guest did something we don't support. */ 465#define vcpu_unimpl(vcpu, fmt, ...) \ 466 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 467 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 468 469#define vcpu_debug(vcpu, fmt, ...) \ 470 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 471#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ 472 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ 473 ## __VA_ARGS__) 474#define vcpu_err(vcpu, fmt, ...) \ 475 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 476 477static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 478{ 479 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, 480 lockdep_is_held(&kvm->slots_lock) || 481 !refcount_read(&kvm->users_count)); 482} 483 484static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 485{ 486 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case 487 * the caller has read kvm->online_vcpus before (as is the case 488 * for kvm_for_each_vcpu, for example). 489 */ 490 smp_rmb(); 491 return kvm->vcpus[i]; 492} 493 494#define kvm_for_each_vcpu(idx, vcpup, kvm) \ 495 for (idx = 0; \ 496 idx < atomic_read(&kvm->online_vcpus) && \ 497 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 498 idx++) 499 500static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 501{ 502 struct kvm_vcpu *vcpu = NULL; 503 int i; 504 505 if (id < 0) 506 return NULL; 507 if (id < KVM_MAX_VCPUS) 508 vcpu = kvm_get_vcpu(kvm, id); 509 if (vcpu && vcpu->vcpu_id == id) 510 return vcpu; 511 kvm_for_each_vcpu(i, vcpu, kvm) 512 if (vcpu->vcpu_id == id) 513 return vcpu; 514 return NULL; 515} 516 517static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) 518{ 519 struct kvm_vcpu *tmp; 520 int idx; 521 522 kvm_for_each_vcpu(idx, tmp, vcpu->kvm) 523 if (tmp == vcpu) 524 return idx; 525 BUG(); 526} 527 528#define kvm_for_each_memslot(memslot, slots) \ 529 for (memslot = &slots->memslots[0]; \ 530 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 531 memslot++) 532 533int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 534void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 535 536int __must_check vcpu_load(struct kvm_vcpu *vcpu); 537void vcpu_put(struct kvm_vcpu *vcpu); 538 539#ifdef __KVM_HAVE_IOAPIC 540void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); 541void kvm_arch_post_irq_routing_update(struct kvm *kvm); 542#else 543static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 544{ 545} 546static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) 547{ 548} 549#endif 550 551#ifdef CONFIG_HAVE_KVM_IRQFD 552int kvm_irqfd_init(void); 553void kvm_irqfd_exit(void); 554#else 555static inline int kvm_irqfd_init(void) 556{ 557 return 0; 558} 559 560static inline void kvm_irqfd_exit(void) 561{ 562} 563#endif 564int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 565 struct module *module); 566void kvm_exit(void); 567 568void kvm_get_kvm(struct kvm *kvm); 569void kvm_put_kvm(struct kvm *kvm); 570 571static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 572{ 573 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, 574 lockdep_is_held(&kvm->slots_lock) || 575 !refcount_read(&kvm->users_count)); 576} 577 578static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 579{ 580 return __kvm_memslots(kvm, 0); 581} 582 583static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 584{ 585 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 586 587 return __kvm_memslots(vcpu->kvm, as_id); 588} 589 590static inline struct kvm_memory_slot * 591id_to_memslot(struct kvm_memslots *slots, int id) 592{ 593 int index = slots->id_to_index[id]; 594 struct kvm_memory_slot *slot; 595 596 slot = &slots->memslots[index]; 597 598 WARN_ON(slot->id != id); 599 return slot; 600} 601 602/* 603 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 604 * - create a new memory slot 605 * - delete an existing memory slot 606 * - modify an existing memory slot 607 * -- move it in the guest physical memory space 608 * -- just change its flags 609 * 610 * Since flags can be changed by some of these operations, the following 611 * differentiation is the best we can do for __kvm_set_memory_region(): 612 */ 613enum kvm_mr_change { 614 KVM_MR_CREATE, 615 KVM_MR_DELETE, 616 KVM_MR_MOVE, 617 KVM_MR_FLAGS_ONLY, 618}; 619 620int kvm_set_memory_region(struct kvm *kvm, 621 const struct kvm_userspace_memory_region *mem); 622int __kvm_set_memory_region(struct kvm *kvm, 623 const struct kvm_userspace_memory_region *mem); 624void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 625 struct kvm_memory_slot *dont); 626int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 627 unsigned long npages); 628void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); 629int kvm_arch_prepare_memory_region(struct kvm *kvm, 630 struct kvm_memory_slot *memslot, 631 const struct kvm_userspace_memory_region *mem, 632 enum kvm_mr_change change); 633void kvm_arch_commit_memory_region(struct kvm *kvm, 634 const struct kvm_userspace_memory_region *mem, 635 const struct kvm_memory_slot *old, 636 const struct kvm_memory_slot *new, 637 enum kvm_mr_change change); 638bool kvm_largepages_enabled(void); 639void kvm_disable_largepages(void); 640/* flush all memory translations */ 641void kvm_arch_flush_shadow_all(struct kvm *kvm); 642/* flush memory translations pointing to 'slot' */ 643void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 644 struct kvm_memory_slot *slot); 645 646int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 647 struct page **pages, int nr_pages); 648 649struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 650unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 651unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 652unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 653unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 654 bool *writable); 655void kvm_release_page_clean(struct page *page); 656void kvm_release_page_dirty(struct page *page); 657void kvm_set_page_accessed(struct page *page); 658 659kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 660kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 661kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 662 bool *writable); 663kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 664kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 665kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 666 bool atomic, bool *async, bool write_fault, 667 bool *writable); 668 669void kvm_release_pfn_clean(kvm_pfn_t pfn); 670void kvm_release_pfn_dirty(kvm_pfn_t pfn); 671void kvm_set_pfn_dirty(kvm_pfn_t pfn); 672void kvm_set_pfn_accessed(kvm_pfn_t pfn); 673void kvm_get_pfn(kvm_pfn_t pfn); 674 675int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 676 int len); 677int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 678 unsigned long len); 679int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 680int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 681 void *data, unsigned long len); 682int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 683 int offset, int len); 684int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 685 unsigned long len); 686int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 687 void *data, unsigned long len); 688int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 689 void *data, int offset, unsigned long len); 690int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 691 gpa_t gpa, unsigned long len); 692int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 693int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 694struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 695bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 696unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 697void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 698 699struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 700struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 701kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 702kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 703struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); 704unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 705unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 706int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 707 int len); 708int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 709 unsigned long len); 710int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 711 unsigned long len); 712int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 713 int offset, int len); 714int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 715 unsigned long len); 716void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 717 718void kvm_vcpu_block(struct kvm_vcpu *vcpu); 719void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 720void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 721bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); 722void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 723int kvm_vcpu_yield_to(struct kvm_vcpu *target); 724void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); 725void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 726void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 727 728void kvm_flush_remote_tlbs(struct kvm *kvm); 729void kvm_reload_remote_mmus(struct kvm *kvm); 730bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 731 732long kvm_arch_dev_ioctl(struct file *filp, 733 unsigned int ioctl, unsigned long arg); 734long kvm_arch_vcpu_ioctl(struct file *filp, 735 unsigned int ioctl, unsigned long arg); 736int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 737 738int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 739 740int kvm_get_dirty_log(struct kvm *kvm, 741 struct kvm_dirty_log *log, int *is_dirty); 742 743int kvm_get_dirty_log_protect(struct kvm *kvm, 744 struct kvm_dirty_log *log, bool *is_dirty); 745 746void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 747 struct kvm_memory_slot *slot, 748 gfn_t gfn_offset, 749 unsigned long mask); 750 751int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 752 struct kvm_dirty_log *log); 753 754int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 755 bool line_status); 756long kvm_arch_vm_ioctl(struct file *filp, 757 unsigned int ioctl, unsigned long arg); 758 759int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 760int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 761 762int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 763 struct kvm_translation *tr); 764 765int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 766int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 767int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 768 struct kvm_sregs *sregs); 769int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 770 struct kvm_sregs *sregs); 771int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 772 struct kvm_mp_state *mp_state); 773int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 774 struct kvm_mp_state *mp_state); 775int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 776 struct kvm_guest_debug *dbg); 777int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 778 779int kvm_arch_init(void *opaque); 780void kvm_arch_exit(void); 781 782int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 783void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 784 785void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 786 787void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 788void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 789void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 790struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 791int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 792void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 793void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 794 795bool kvm_arch_has_vcpu_debugfs(void); 796int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); 797 798int kvm_arch_hardware_enable(void); 799void kvm_arch_hardware_disable(void); 800int kvm_arch_hardware_setup(void); 801void kvm_arch_hardware_unsetup(void); 802void kvm_arch_check_processor_compat(void *rtn); 803int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 804bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 805int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 806 807#ifndef __KVM_HAVE_ARCH_VM_ALLOC 808static inline struct kvm *kvm_arch_alloc_vm(void) 809{ 810 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 811} 812 813static inline void kvm_arch_free_vm(struct kvm *kvm) 814{ 815 kfree(kvm); 816} 817#endif 818 819#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 820void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 821void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 822bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 823#else 824static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 825{ 826} 827 828static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 829{ 830} 831 832static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 833{ 834 return false; 835} 836#endif 837#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE 838void kvm_arch_start_assignment(struct kvm *kvm); 839void kvm_arch_end_assignment(struct kvm *kvm); 840bool kvm_arch_has_assigned_device(struct kvm *kvm); 841#else 842static inline void kvm_arch_start_assignment(struct kvm *kvm) 843{ 844} 845 846static inline void kvm_arch_end_assignment(struct kvm *kvm) 847{ 848} 849 850static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) 851{ 852 return false; 853} 854#endif 855 856static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 857{ 858#ifdef __KVM_HAVE_ARCH_WQP 859 return vcpu->arch.wqp; 860#else 861 return &vcpu->wq; 862#endif 863} 864 865#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 866/* 867 * returns true if the virtual interrupt controller is initialized and 868 * ready to accept virtual IRQ. On some architectures the virtual interrupt 869 * controller is dynamically instantiated and this is not always true. 870 */ 871bool kvm_arch_intc_initialized(struct kvm *kvm); 872#else 873static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 874{ 875 return true; 876} 877#endif 878 879int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 880void kvm_arch_destroy_vm(struct kvm *kvm); 881void kvm_arch_sync_events(struct kvm *kvm); 882 883int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 884void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 885 886bool kvm_is_reserved_pfn(kvm_pfn_t pfn); 887 888struct kvm_irq_ack_notifier { 889 struct hlist_node link; 890 unsigned gsi; 891 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 892}; 893 894int kvm_irq_map_gsi(struct kvm *kvm, 895 struct kvm_kernel_irq_routing_entry *entries, int gsi); 896int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 897 898int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 899 bool line_status); 900int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 901 int irq_source_id, int level, bool line_status); 902int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 903 struct kvm *kvm, int irq_source_id, 904 int level, bool line_status); 905bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 906void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 907void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 908void kvm_register_irq_ack_notifier(struct kvm *kvm, 909 struct kvm_irq_ack_notifier *kian); 910void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 911 struct kvm_irq_ack_notifier *kian); 912int kvm_request_irq_source_id(struct kvm *kvm); 913void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 914 915/* 916 * search_memslots() and __gfn_to_memslot() are here because they are 917 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 918 * gfn_to_memslot() itself isn't here as an inline because that would 919 * bloat other code too much. 920 */ 921static inline struct kvm_memory_slot * 922search_memslots(struct kvm_memslots *slots, gfn_t gfn) 923{ 924 int start = 0, end = slots->used_slots; 925 int slot = atomic_read(&slots->lru_slot); 926 struct kvm_memory_slot *memslots = slots->memslots; 927 928 if (gfn >= memslots[slot].base_gfn && 929 gfn < memslots[slot].base_gfn + memslots[slot].npages) 930 return &memslots[slot]; 931 932 while (start < end) { 933 slot = start + (end - start) / 2; 934 935 if (gfn >= memslots[slot].base_gfn) 936 end = slot; 937 else 938 start = slot + 1; 939 } 940 941 if (gfn >= memslots[start].base_gfn && 942 gfn < memslots[start].base_gfn + memslots[start].npages) { 943 atomic_set(&slots->lru_slot, start); 944 return &memslots[start]; 945 } 946 947 return NULL; 948} 949 950static inline struct kvm_memory_slot * 951__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 952{ 953 return search_memslots(slots, gfn); 954} 955 956static inline unsigned long 957__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 958{ 959 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 960} 961 962static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 963{ 964 return gfn_to_memslot(kvm, gfn)->id; 965} 966 967static inline gfn_t 968hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 969{ 970 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 971 972 return slot->base_gfn + gfn_offset; 973} 974 975static inline gpa_t gfn_to_gpa(gfn_t gfn) 976{ 977 return (gpa_t)gfn << PAGE_SHIFT; 978} 979 980static inline gfn_t gpa_to_gfn(gpa_t gpa) 981{ 982 return (gfn_t)(gpa >> PAGE_SHIFT); 983} 984 985static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 986{ 987 return (hpa_t)pfn << PAGE_SHIFT; 988} 989 990static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, 991 gpa_t gpa) 992{ 993 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); 994} 995 996static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 997{ 998 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 999 1000 return kvm_is_error_hva(hva); 1001} 1002 1003enum kvm_stat_kind { 1004 KVM_STAT_VM, 1005 KVM_STAT_VCPU, 1006}; 1007 1008struct kvm_stat_data { 1009 int offset; 1010 struct kvm *kvm; 1011}; 1012 1013struct kvm_stats_debugfs_item { 1014 const char *name; 1015 int offset; 1016 enum kvm_stat_kind kind; 1017}; 1018extern struct kvm_stats_debugfs_item debugfs_entries[]; 1019extern struct dentry *kvm_debugfs_dir; 1020 1021#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1022static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 1023{ 1024 if (unlikely(kvm->mmu_notifier_count)) 1025 return 1; 1026 /* 1027 * Ensure the read of mmu_notifier_count happens before the read 1028 * of mmu_notifier_seq. This interacts with the smp_wmb() in 1029 * mmu_notifier_invalidate_range_end to make sure that the caller 1030 * either sees the old (non-zero) value of mmu_notifier_count or 1031 * the new (incremented) value of mmu_notifier_seq. 1032 * PowerPC Book3s HV KVM calls this under a per-page lock 1033 * rather than under kvm->mmu_lock, for scalability, so 1034 * can't rely on kvm->mmu_lock to keep things ordered. 1035 */ 1036 smp_rmb(); 1037 if (kvm->mmu_notifier_seq != mmu_seq) 1038 return 1; 1039 return 0; 1040} 1041#endif 1042 1043#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 1044 1045#ifdef CONFIG_S390 1046#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... 1047#elif defined(CONFIG_ARM64) 1048#define KVM_MAX_IRQ_ROUTES 4096 1049#else 1050#define KVM_MAX_IRQ_ROUTES 1024 1051#endif 1052 1053bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 1054int kvm_set_irq_routing(struct kvm *kvm, 1055 const struct kvm_irq_routing_entry *entries, 1056 unsigned nr, 1057 unsigned flags); 1058int kvm_set_routing_entry(struct kvm *kvm, 1059 struct kvm_kernel_irq_routing_entry *e, 1060 const struct kvm_irq_routing_entry *ue); 1061void kvm_free_irq_routing(struct kvm *kvm); 1062 1063#else 1064 1065static inline void kvm_free_irq_routing(struct kvm *kvm) {} 1066 1067#endif 1068 1069int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 1070 1071#ifdef CONFIG_HAVE_KVM_EVENTFD 1072 1073void kvm_eventfd_init(struct kvm *kvm); 1074int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 1075 1076#ifdef CONFIG_HAVE_KVM_IRQFD 1077int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 1078void kvm_irqfd_release(struct kvm *kvm); 1079void kvm_irq_routing_update(struct kvm *); 1080#else 1081static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1082{ 1083 return -EINVAL; 1084} 1085 1086static inline void kvm_irqfd_release(struct kvm *kvm) {} 1087#endif 1088 1089#else 1090 1091static inline void kvm_eventfd_init(struct kvm *kvm) {} 1092 1093static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1094{ 1095 return -EINVAL; 1096} 1097 1098static inline void kvm_irqfd_release(struct kvm *kvm) {} 1099 1100#ifdef CONFIG_HAVE_KVM_IRQCHIP 1101static inline void kvm_irq_routing_update(struct kvm *kvm) 1102{ 1103} 1104#endif 1105void kvm_arch_irq_routing_update(struct kvm *kvm); 1106 1107static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 1108{ 1109 return -ENOSYS; 1110} 1111 1112#endif /* CONFIG_HAVE_KVM_EVENTFD */ 1113 1114static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1115{ 1116 /* 1117 * Ensure the rest of the request is published to kvm_check_request's 1118 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 1119 */ 1120 smp_wmb(); 1121 set_bit(req & KVM_REQUEST_MASK, &vcpu->requests); 1122} 1123 1124static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 1125{ 1126 return READ_ONCE(vcpu->requests); 1127} 1128 1129static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 1130{ 1131 return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests); 1132} 1133 1134static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) 1135{ 1136 clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests); 1137} 1138 1139static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1140{ 1141 if (kvm_test_request(req, vcpu)) { 1142 kvm_clear_request(req, vcpu); 1143 1144 /* 1145 * Ensure the rest of the request is visible to kvm_check_request's 1146 * caller. Paired with the smp_wmb in kvm_make_request. 1147 */ 1148 smp_mb__after_atomic(); 1149 return true; 1150 } else { 1151 return false; 1152 } 1153} 1154 1155extern bool kvm_rebooting; 1156 1157extern unsigned int halt_poll_ns; 1158extern unsigned int halt_poll_ns_grow; 1159extern unsigned int halt_poll_ns_shrink; 1160 1161struct kvm_device { 1162 struct kvm_device_ops *ops; 1163 struct kvm *kvm; 1164 void *private; 1165 struct list_head vm_node; 1166}; 1167 1168/* create, destroy, and name are mandatory */ 1169struct kvm_device_ops { 1170 const char *name; 1171 1172 /* 1173 * create is called holding kvm->lock and any operations not suitable 1174 * to do while holding the lock should be deferred to init (see 1175 * below). 1176 */ 1177 int (*create)(struct kvm_device *dev, u32 type); 1178 1179 /* 1180 * init is called after create if create is successful and is called 1181 * outside of holding kvm->lock. 1182 */ 1183 void (*init)(struct kvm_device *dev); 1184 1185 /* 1186 * Destroy is responsible for freeing dev. 1187 * 1188 * Destroy may be called before or after destructors are called 1189 * on emulated I/O regions, depending on whether a reference is 1190 * held by a vcpu or other kvm component that gets destroyed 1191 * after the emulated I/O. 1192 */ 1193 void (*destroy)(struct kvm_device *dev); 1194 1195 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1196 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1197 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1198 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1199 unsigned long arg); 1200}; 1201 1202void kvm_device_get(struct kvm_device *dev); 1203void kvm_device_put(struct kvm_device *dev); 1204struct kvm_device *kvm_device_from_filp(struct file *filp); 1205int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); 1206void kvm_unregister_device_ops(u32 type); 1207 1208extern struct kvm_device_ops kvm_mpic_ops; 1209extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 1210extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 1211 1212#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1213 1214static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1215{ 1216 vcpu->spin_loop.in_spin_loop = val; 1217} 1218static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1219{ 1220 vcpu->spin_loop.dy_eligible = val; 1221} 1222 1223#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1224 1225static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1226{ 1227} 1228 1229static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1230{ 1231} 1232#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1233 1234#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 1235bool kvm_arch_has_irq_bypass(void); 1236int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 1237 struct irq_bypass_producer *); 1238void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 1239 struct irq_bypass_producer *); 1240void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 1241void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 1242int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 1243 uint32_t guest_irq, bool set); 1244#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 1245 1246#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS 1247/* If we wakeup during the poll time, was it a sucessful poll? */ 1248static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 1249{ 1250 return vcpu->valid_wakeup; 1251} 1252 1253#else 1254static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 1255{ 1256 return true; 1257} 1258#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ 1259 1260#endif