at v4.2-rc2 32 kB view raw
1#ifndef __KVM_HOST_H 2#define __KVM_HOST_H 3 4/* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9#include <linux/types.h> 10#include <linux/hardirq.h> 11#include <linux/list.h> 12#include <linux/mutex.h> 13#include <linux/spinlock.h> 14#include <linux/signal.h> 15#include <linux/sched.h> 16#include <linux/bug.h> 17#include <linux/mm.h> 18#include <linux/mmu_notifier.h> 19#include <linux/preempt.h> 20#include <linux/msi.h> 21#include <linux/slab.h> 22#include <linux/rcupdate.h> 23#include <linux/ratelimit.h> 24#include <linux/err.h> 25#include <linux/irqflags.h> 26#include <linux/context_tracking.h> 27#include <asm/signal.h> 28 29#include <linux/kvm.h> 30#include <linux/kvm_para.h> 31 32#include <linux/kvm_types.h> 33 34#include <asm/kvm_host.h> 35 36/* 37 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 38 * in kvm, other bits are visible for userspace which are defined in 39 * include/linux/kvm_h. 40 */ 41#define KVM_MEMSLOT_INVALID (1UL << 16) 42#define KVM_MEMSLOT_INCOHERENT (1UL << 17) 43 44/* Two fragments for cross MMIO pages. */ 45#define KVM_MAX_MMIO_FRAGMENTS 2 46 47#ifndef KVM_ADDRESS_SPACE_NUM 48#define KVM_ADDRESS_SPACE_NUM 1 49#endif 50 51/* 52 * For the normal pfn, the highest 12 bits should be zero, 53 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 54 * mask bit 63 to indicate the noslot pfn. 55 */ 56#define KVM_PFN_ERR_MASK (0x7ffULL << 52) 57#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 58#define KVM_PFN_NOSLOT (0x1ULL << 63) 59 60#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 61#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 62#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 63 64/* 65 * error pfns indicate that the gfn is in slot but faild to 66 * translate it to pfn on host. 67 */ 68static inline bool is_error_pfn(pfn_t pfn) 69{ 70 return !!(pfn & KVM_PFN_ERR_MASK); 71} 72 73/* 74 * error_noslot pfns indicate that the gfn can not be 75 * translated to pfn - it is not in slot or failed to 76 * translate it to pfn. 77 */ 78static inline bool is_error_noslot_pfn(pfn_t pfn) 79{ 80 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 81} 82 83/* noslot pfn indicates that the gfn is not in slot. */ 84static inline bool is_noslot_pfn(pfn_t pfn) 85{ 86 return pfn == KVM_PFN_NOSLOT; 87} 88 89/* 90 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 91 * provide own defines and kvm_is_error_hva 92 */ 93#ifndef KVM_HVA_ERR_BAD 94 95#define KVM_HVA_ERR_BAD (PAGE_OFFSET) 96#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 97 98static inline bool kvm_is_error_hva(unsigned long addr) 99{ 100 return addr >= PAGE_OFFSET; 101} 102 103#endif 104 105#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 106 107static inline bool is_error_page(struct page *page) 108{ 109 return IS_ERR(page); 110} 111 112/* 113 * vcpu->requests bit members 114 */ 115#define KVM_REQ_TLB_FLUSH 0 116#define KVM_REQ_MIGRATE_TIMER 1 117#define KVM_REQ_REPORT_TPR_ACCESS 2 118#define KVM_REQ_MMU_RELOAD 3 119#define KVM_REQ_TRIPLE_FAULT 4 120#define KVM_REQ_PENDING_TIMER 5 121#define KVM_REQ_UNHALT 6 122#define KVM_REQ_MMU_SYNC 7 123#define KVM_REQ_CLOCK_UPDATE 8 124#define KVM_REQ_KICK 9 125#define KVM_REQ_DEACTIVATE_FPU 10 126#define KVM_REQ_EVENT 11 127#define KVM_REQ_APF_HALT 12 128#define KVM_REQ_STEAL_UPDATE 13 129#define KVM_REQ_NMI 14 130#define KVM_REQ_PMU 15 131#define KVM_REQ_PMI 16 132#define KVM_REQ_WATCHDOG 17 133#define KVM_REQ_MASTERCLOCK_UPDATE 18 134#define KVM_REQ_MCLOCK_INPROGRESS 19 135#define KVM_REQ_EPR_EXIT 20 136#define KVM_REQ_SCAN_IOAPIC 21 137#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 138#define KVM_REQ_ENABLE_IBS 23 139#define KVM_REQ_DISABLE_IBS 24 140#define KVM_REQ_APIC_PAGE_RELOAD 25 141#define KVM_REQ_SMI 26 142 143#define KVM_USERSPACE_IRQ_SOURCE_ID 0 144#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 145 146extern struct kmem_cache *kvm_vcpu_cache; 147 148extern spinlock_t kvm_lock; 149extern struct list_head vm_list; 150 151struct kvm_io_range { 152 gpa_t addr; 153 int len; 154 struct kvm_io_device *dev; 155}; 156 157#define NR_IOBUS_DEVS 1000 158 159struct kvm_io_bus { 160 int dev_count; 161 int ioeventfd_count; 162 struct kvm_io_range range[]; 163}; 164 165enum kvm_bus { 166 KVM_MMIO_BUS, 167 KVM_PIO_BUS, 168 KVM_VIRTIO_CCW_NOTIFY_BUS, 169 KVM_FAST_MMIO_BUS, 170 KVM_NR_BUSES 171}; 172 173int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 174 int len, const void *val); 175int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 176 gpa_t addr, int len, const void *val, long cookie); 177int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 178 int len, void *val); 179int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 180 int len, struct kvm_io_device *dev); 181int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 182 struct kvm_io_device *dev); 183 184#ifdef CONFIG_KVM_ASYNC_PF 185struct kvm_async_pf { 186 struct work_struct work; 187 struct list_head link; 188 struct list_head queue; 189 struct kvm_vcpu *vcpu; 190 struct mm_struct *mm; 191 gva_t gva; 192 unsigned long addr; 193 struct kvm_arch_async_pf arch; 194 bool wakeup_all; 195}; 196 197void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 198void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 199int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, 200 struct kvm_arch_async_pf *arch); 201int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 202#endif 203 204enum { 205 OUTSIDE_GUEST_MODE, 206 IN_GUEST_MODE, 207 EXITING_GUEST_MODE, 208 READING_SHADOW_PAGE_TABLES, 209}; 210 211/* 212 * Sometimes a large or cross-page mmio needs to be broken up into separate 213 * exits for userspace servicing. 214 */ 215struct kvm_mmio_fragment { 216 gpa_t gpa; 217 void *data; 218 unsigned len; 219}; 220 221struct kvm_vcpu { 222 struct kvm *kvm; 223#ifdef CONFIG_PREEMPT_NOTIFIERS 224 struct preempt_notifier preempt_notifier; 225#endif 226 int cpu; 227 int vcpu_id; 228 int srcu_idx; 229 int mode; 230 unsigned long requests; 231 unsigned long guest_debug; 232 233 struct mutex mutex; 234 struct kvm_run *run; 235 236 int fpu_active; 237 int guest_fpu_loaded, guest_xcr0_loaded; 238 unsigned char fpu_counter; 239 wait_queue_head_t wq; 240 struct pid *pid; 241 int sigset_active; 242 sigset_t sigset; 243 struct kvm_vcpu_stat stat; 244 245#ifdef CONFIG_HAS_IOMEM 246 int mmio_needed; 247 int mmio_read_completed; 248 int mmio_is_write; 249 int mmio_cur_fragment; 250 int mmio_nr_fragments; 251 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 252#endif 253 254#ifdef CONFIG_KVM_ASYNC_PF 255 struct { 256 u32 queued; 257 struct list_head queue; 258 struct list_head done; 259 spinlock_t lock; 260 } async_pf; 261#endif 262 263#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 264 /* 265 * Cpu relax intercept or pause loop exit optimization 266 * in_spin_loop: set when a vcpu does a pause loop exit 267 * or cpu relax intercepted. 268 * dy_eligible: indicates whether vcpu is eligible for directed yield. 269 */ 270 struct { 271 bool in_spin_loop; 272 bool dy_eligible; 273 } spin_loop; 274#endif 275 bool preempted; 276 struct kvm_vcpu_arch arch; 277}; 278 279static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 280{ 281 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 282} 283 284/* 285 * Some of the bitops functions do not support too long bitmaps. 286 * This number must be determined not to exceed such limits. 287 */ 288#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 289 290struct kvm_memory_slot { 291 gfn_t base_gfn; 292 unsigned long npages; 293 unsigned long *dirty_bitmap; 294 struct kvm_arch_memory_slot arch; 295 unsigned long userspace_addr; 296 u32 flags; 297 short id; 298}; 299 300static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 301{ 302 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 303} 304 305struct kvm_s390_adapter_int { 306 u64 ind_addr; 307 u64 summary_addr; 308 u64 ind_offset; 309 u32 summary_offset; 310 u32 adapter_id; 311}; 312 313struct kvm_kernel_irq_routing_entry { 314 u32 gsi; 315 u32 type; 316 int (*set)(struct kvm_kernel_irq_routing_entry *e, 317 struct kvm *kvm, int irq_source_id, int level, 318 bool line_status); 319 union { 320 struct { 321 unsigned irqchip; 322 unsigned pin; 323 } irqchip; 324 struct msi_msg msi; 325 struct kvm_s390_adapter_int adapter; 326 }; 327 struct hlist_node link; 328}; 329 330#ifndef KVM_PRIVATE_MEM_SLOTS 331#define KVM_PRIVATE_MEM_SLOTS 0 332#endif 333 334#ifndef KVM_MEM_SLOTS_NUM 335#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 336#endif 337 338#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 339static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 340{ 341 return 0; 342} 343#endif 344 345/* 346 * Note: 347 * memslots are not sorted by id anymore, please use id_to_memslot() 348 * to get the memslot by its id. 349 */ 350struct kvm_memslots { 351 u64 generation; 352 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 353 /* The mapping table from slot id to the index in memslots[]. */ 354 short id_to_index[KVM_MEM_SLOTS_NUM]; 355 atomic_t lru_slot; 356 int used_slots; 357}; 358 359struct kvm { 360 spinlock_t mmu_lock; 361 struct mutex slots_lock; 362 struct mm_struct *mm; /* userspace tied to this vm */ 363 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; 364 struct srcu_struct srcu; 365 struct srcu_struct irq_srcu; 366#ifdef CONFIG_KVM_APIC_ARCHITECTURE 367 u32 bsp_vcpu_id; 368#endif 369 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 370 atomic_t online_vcpus; 371 int last_boosted_vcpu; 372 struct list_head vm_list; 373 struct mutex lock; 374 struct kvm_io_bus *buses[KVM_NR_BUSES]; 375#ifdef CONFIG_HAVE_KVM_EVENTFD 376 struct { 377 spinlock_t lock; 378 struct list_head items; 379 struct list_head resampler_list; 380 struct mutex resampler_lock; 381 } irqfds; 382 struct list_head ioeventfds; 383#endif 384 struct kvm_vm_stat stat; 385 struct kvm_arch arch; 386 atomic_t users_count; 387#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 388 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 389 spinlock_t ring_lock; 390 struct list_head coalesced_zones; 391#endif 392 393 struct mutex irq_lock; 394#ifdef CONFIG_HAVE_KVM_IRQCHIP 395 /* 396 * Update side is protected by irq_lock. 397 */ 398 struct kvm_irq_routing_table __rcu *irq_routing; 399#endif 400#ifdef CONFIG_HAVE_KVM_IRQFD 401 struct hlist_head irq_ack_notifier_list; 402#endif 403 404#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 405 struct mmu_notifier mmu_notifier; 406 unsigned long mmu_notifier_seq; 407 long mmu_notifier_count; 408#endif 409 long tlbs_dirty; 410 struct list_head devices; 411}; 412 413#define kvm_err(fmt, ...) \ 414 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 415#define kvm_info(fmt, ...) \ 416 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 417#define kvm_debug(fmt, ...) \ 418 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 419#define kvm_pr_unimpl(fmt, ...) \ 420 pr_err_ratelimited("kvm [%i]: " fmt, \ 421 task_tgid_nr(current), ## __VA_ARGS__) 422 423/* The guest did something we don't support. */ 424#define vcpu_unimpl(vcpu, fmt, ...) \ 425 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 426 427static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 428{ 429 smp_rmb(); 430 return kvm->vcpus[i]; 431} 432 433#define kvm_for_each_vcpu(idx, vcpup, kvm) \ 434 for (idx = 0; \ 435 idx < atomic_read(&kvm->online_vcpus) && \ 436 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 437 idx++) 438 439#define kvm_for_each_memslot(memslot, slots) \ 440 for (memslot = &slots->memslots[0]; \ 441 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 442 memslot++) 443 444int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 445void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 446 447int __must_check vcpu_load(struct kvm_vcpu *vcpu); 448void vcpu_put(struct kvm_vcpu *vcpu); 449 450#ifdef __KVM_HAVE_IOAPIC 451void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); 452#else 453static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) 454{ 455} 456#endif 457 458#ifdef CONFIG_HAVE_KVM_IRQFD 459int kvm_irqfd_init(void); 460void kvm_irqfd_exit(void); 461#else 462static inline int kvm_irqfd_init(void) 463{ 464 return 0; 465} 466 467static inline void kvm_irqfd_exit(void) 468{ 469} 470#endif 471int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 472 struct module *module); 473void kvm_exit(void); 474 475void kvm_get_kvm(struct kvm *kvm); 476void kvm_put_kvm(struct kvm *kvm); 477 478static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 479{ 480 return rcu_dereference_check(kvm->memslots[as_id], 481 srcu_read_lock_held(&kvm->srcu) 482 || lockdep_is_held(&kvm->slots_lock)); 483} 484 485static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 486{ 487 return __kvm_memslots(kvm, 0); 488} 489 490static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 491{ 492 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 493 494 return __kvm_memslots(vcpu->kvm, as_id); 495} 496 497static inline struct kvm_memory_slot * 498id_to_memslot(struct kvm_memslots *slots, int id) 499{ 500 int index = slots->id_to_index[id]; 501 struct kvm_memory_slot *slot; 502 503 slot = &slots->memslots[index]; 504 505 WARN_ON(slot->id != id); 506 return slot; 507} 508 509/* 510 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 511 * - create a new memory slot 512 * - delete an existing memory slot 513 * - modify an existing memory slot 514 * -- move it in the guest physical memory space 515 * -- just change its flags 516 * 517 * Since flags can be changed by some of these operations, the following 518 * differentiation is the best we can do for __kvm_set_memory_region(): 519 */ 520enum kvm_mr_change { 521 KVM_MR_CREATE, 522 KVM_MR_DELETE, 523 KVM_MR_MOVE, 524 KVM_MR_FLAGS_ONLY, 525}; 526 527int kvm_set_memory_region(struct kvm *kvm, 528 const struct kvm_userspace_memory_region *mem); 529int __kvm_set_memory_region(struct kvm *kvm, 530 const struct kvm_userspace_memory_region *mem); 531void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 532 struct kvm_memory_slot *dont); 533int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 534 unsigned long npages); 535void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); 536int kvm_arch_prepare_memory_region(struct kvm *kvm, 537 struct kvm_memory_slot *memslot, 538 const struct kvm_userspace_memory_region *mem, 539 enum kvm_mr_change change); 540void kvm_arch_commit_memory_region(struct kvm *kvm, 541 const struct kvm_userspace_memory_region *mem, 542 const struct kvm_memory_slot *old, 543 const struct kvm_memory_slot *new, 544 enum kvm_mr_change change); 545bool kvm_largepages_enabled(void); 546void kvm_disable_largepages(void); 547/* flush all memory translations */ 548void kvm_arch_flush_shadow_all(struct kvm *kvm); 549/* flush memory translations pointing to 'slot' */ 550void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 551 struct kvm_memory_slot *slot); 552 553int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 554 struct page **pages, int nr_pages); 555 556struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 557unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 558unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 559unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 560unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 561 bool *writable); 562void kvm_release_page_clean(struct page *page); 563void kvm_release_page_dirty(struct page *page); 564void kvm_set_page_accessed(struct page *page); 565 566pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 567pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 568pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 569 bool *writable); 570pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 571pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 572pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 573 bool *async, bool write_fault, bool *writable); 574 575void kvm_release_pfn_clean(pfn_t pfn); 576void kvm_set_pfn_dirty(pfn_t pfn); 577void kvm_set_pfn_accessed(pfn_t pfn); 578void kvm_get_pfn(pfn_t pfn); 579 580int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 581 int len); 582int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 583 unsigned long len); 584int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 585int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 586 void *data, unsigned long len); 587int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 588 int offset, int len); 589int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 590 unsigned long len); 591int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 592 void *data, unsigned long len); 593int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 594 gpa_t gpa, unsigned long len); 595int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 596int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 597struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 598int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 599unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 600void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 601 602struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 603struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 604pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 605pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 606struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); 607unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 608unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 609int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 610 int len); 611int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 612 unsigned long len); 613int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 614 unsigned long len); 615int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 616 int offset, int len); 617int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 618 unsigned long len); 619void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 620 621void kvm_vcpu_block(struct kvm_vcpu *vcpu); 622void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 623int kvm_vcpu_yield_to(struct kvm_vcpu *target); 624void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 625void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 626void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 627 628void kvm_flush_remote_tlbs(struct kvm *kvm); 629void kvm_reload_remote_mmus(struct kvm *kvm); 630void kvm_make_mclock_inprogress_request(struct kvm *kvm); 631void kvm_make_scan_ioapic_request(struct kvm *kvm); 632bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 633 634long kvm_arch_dev_ioctl(struct file *filp, 635 unsigned int ioctl, unsigned long arg); 636long kvm_arch_vcpu_ioctl(struct file *filp, 637 unsigned int ioctl, unsigned long arg); 638int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 639 640int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 641 642int kvm_get_dirty_log(struct kvm *kvm, 643 struct kvm_dirty_log *log, int *is_dirty); 644 645int kvm_get_dirty_log_protect(struct kvm *kvm, 646 struct kvm_dirty_log *log, bool *is_dirty); 647 648void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 649 struct kvm_memory_slot *slot, 650 gfn_t gfn_offset, 651 unsigned long mask); 652 653int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 654 struct kvm_dirty_log *log); 655 656int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 657 bool line_status); 658long kvm_arch_vm_ioctl(struct file *filp, 659 unsigned int ioctl, unsigned long arg); 660 661int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 662int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 663 664int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 665 struct kvm_translation *tr); 666 667int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 668int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 669int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 670 struct kvm_sregs *sregs); 671int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 672 struct kvm_sregs *sregs); 673int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 674 struct kvm_mp_state *mp_state); 675int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 676 struct kvm_mp_state *mp_state); 677int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 678 struct kvm_guest_debug *dbg); 679int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 680 681int kvm_arch_init(void *opaque); 682void kvm_arch_exit(void); 683 684int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 685void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 686 687void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 688 689void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 690void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 691void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 692struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 693int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 694void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 695void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 696 697int kvm_arch_hardware_enable(void); 698void kvm_arch_hardware_disable(void); 699int kvm_arch_hardware_setup(void); 700void kvm_arch_hardware_unsetup(void); 701void kvm_arch_check_processor_compat(void *rtn); 702int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 703int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 704 705void *kvm_kvzalloc(unsigned long size); 706 707#ifndef __KVM_HAVE_ARCH_VM_ALLOC 708static inline struct kvm *kvm_arch_alloc_vm(void) 709{ 710 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 711} 712 713static inline void kvm_arch_free_vm(struct kvm *kvm) 714{ 715 kfree(kvm); 716} 717#endif 718 719#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 720void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 721void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 722bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 723#else 724static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 725{ 726} 727 728static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 729{ 730} 731 732static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 733{ 734 return false; 735} 736#endif 737 738static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 739{ 740#ifdef __KVM_HAVE_ARCH_WQP 741 return vcpu->arch.wqp; 742#else 743 return &vcpu->wq; 744#endif 745} 746 747#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 748/* 749 * returns true if the virtual interrupt controller is initialized and 750 * ready to accept virtual IRQ. On some architectures the virtual interrupt 751 * controller is dynamically instantiated and this is not always true. 752 */ 753bool kvm_arch_intc_initialized(struct kvm *kvm); 754#else 755static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 756{ 757 return true; 758} 759#endif 760 761int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 762void kvm_arch_destroy_vm(struct kvm *kvm); 763void kvm_arch_sync_events(struct kvm *kvm); 764 765int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 766void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 767 768bool kvm_is_reserved_pfn(pfn_t pfn); 769 770struct kvm_irq_ack_notifier { 771 struct hlist_node link; 772 unsigned gsi; 773 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 774}; 775 776int kvm_irq_map_gsi(struct kvm *kvm, 777 struct kvm_kernel_irq_routing_entry *entries, int gsi); 778int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 779 780int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 781 bool line_status); 782int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); 783int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 784 int irq_source_id, int level, bool line_status); 785bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 786void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 787void kvm_register_irq_ack_notifier(struct kvm *kvm, 788 struct kvm_irq_ack_notifier *kian); 789void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 790 struct kvm_irq_ack_notifier *kian); 791int kvm_request_irq_source_id(struct kvm *kvm); 792void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 793 794#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 795int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 796void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 797#else 798static inline int kvm_iommu_map_pages(struct kvm *kvm, 799 struct kvm_memory_slot *slot) 800{ 801 return 0; 802} 803 804static inline void kvm_iommu_unmap_pages(struct kvm *kvm, 805 struct kvm_memory_slot *slot) 806{ 807} 808#endif 809 810/* must be called with irqs disabled */ 811static inline void __kvm_guest_enter(void) 812{ 813 guest_enter(); 814 /* KVM does not hold any references to rcu protected data when it 815 * switches CPU into a guest mode. In fact switching to a guest mode 816 * is very similar to exiting to userspace from rcu point of view. In 817 * addition CPU may stay in a guest mode for quite a long time (up to 818 * one time slice). Lets treat guest mode as quiescent state, just like 819 * we do with user-mode execution. 820 */ 821 if (!context_tracking_cpu_is_enabled()) 822 rcu_virt_note_context_switch(smp_processor_id()); 823} 824 825/* must be called with irqs disabled */ 826static inline void __kvm_guest_exit(void) 827{ 828 guest_exit(); 829} 830 831static inline void kvm_guest_enter(void) 832{ 833 unsigned long flags; 834 835 local_irq_save(flags); 836 __kvm_guest_enter(); 837 local_irq_restore(flags); 838} 839 840static inline void kvm_guest_exit(void) 841{ 842 unsigned long flags; 843 844 local_irq_save(flags); 845 __kvm_guest_exit(); 846 local_irq_restore(flags); 847} 848 849/* 850 * search_memslots() and __gfn_to_memslot() are here because they are 851 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 852 * gfn_to_memslot() itself isn't here as an inline because that would 853 * bloat other code too much. 854 */ 855static inline struct kvm_memory_slot * 856search_memslots(struct kvm_memslots *slots, gfn_t gfn) 857{ 858 int start = 0, end = slots->used_slots; 859 int slot = atomic_read(&slots->lru_slot); 860 struct kvm_memory_slot *memslots = slots->memslots; 861 862 if (gfn >= memslots[slot].base_gfn && 863 gfn < memslots[slot].base_gfn + memslots[slot].npages) 864 return &memslots[slot]; 865 866 while (start < end) { 867 slot = start + (end - start) / 2; 868 869 if (gfn >= memslots[slot].base_gfn) 870 end = slot; 871 else 872 start = slot + 1; 873 } 874 875 if (gfn >= memslots[start].base_gfn && 876 gfn < memslots[start].base_gfn + memslots[start].npages) { 877 atomic_set(&slots->lru_slot, start); 878 return &memslots[start]; 879 } 880 881 return NULL; 882} 883 884static inline struct kvm_memory_slot * 885__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 886{ 887 return search_memslots(slots, gfn); 888} 889 890static inline unsigned long 891__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 892{ 893 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 894} 895 896static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 897{ 898 return gfn_to_memslot(kvm, gfn)->id; 899} 900 901static inline gfn_t 902hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 903{ 904 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 905 906 return slot->base_gfn + gfn_offset; 907} 908 909static inline gpa_t gfn_to_gpa(gfn_t gfn) 910{ 911 return (gpa_t)gfn << PAGE_SHIFT; 912} 913 914static inline gfn_t gpa_to_gfn(gpa_t gpa) 915{ 916 return (gfn_t)(gpa >> PAGE_SHIFT); 917} 918 919static inline hpa_t pfn_to_hpa(pfn_t pfn) 920{ 921 return (hpa_t)pfn << PAGE_SHIFT; 922} 923 924static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 925{ 926 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 927 928 return kvm_is_error_hva(hva); 929} 930 931static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 932{ 933 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 934} 935 936enum kvm_stat_kind { 937 KVM_STAT_VM, 938 KVM_STAT_VCPU, 939}; 940 941struct kvm_stats_debugfs_item { 942 const char *name; 943 int offset; 944 enum kvm_stat_kind kind; 945 struct dentry *dentry; 946}; 947extern struct kvm_stats_debugfs_item debugfs_entries[]; 948extern struct dentry *kvm_debugfs_dir; 949 950#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 951static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 952{ 953 if (unlikely(kvm->mmu_notifier_count)) 954 return 1; 955 /* 956 * Ensure the read of mmu_notifier_count happens before the read 957 * of mmu_notifier_seq. This interacts with the smp_wmb() in 958 * mmu_notifier_invalidate_range_end to make sure that the caller 959 * either sees the old (non-zero) value of mmu_notifier_count or 960 * the new (incremented) value of mmu_notifier_seq. 961 * PowerPC Book3s HV KVM calls this under a per-page lock 962 * rather than under kvm->mmu_lock, for scalability, so 963 * can't rely on kvm->mmu_lock to keep things ordered. 964 */ 965 smp_rmb(); 966 if (kvm->mmu_notifier_seq != mmu_seq) 967 return 1; 968 return 0; 969} 970#endif 971 972#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 973 974#ifdef CONFIG_S390 975#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... 976#else 977#define KVM_MAX_IRQ_ROUTES 1024 978#endif 979 980int kvm_setup_default_irq_routing(struct kvm *kvm); 981int kvm_set_irq_routing(struct kvm *kvm, 982 const struct kvm_irq_routing_entry *entries, 983 unsigned nr, 984 unsigned flags); 985int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, 986 const struct kvm_irq_routing_entry *ue); 987void kvm_free_irq_routing(struct kvm *kvm); 988 989#else 990 991static inline void kvm_free_irq_routing(struct kvm *kvm) {} 992 993#endif 994 995int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 996 997#ifdef CONFIG_HAVE_KVM_EVENTFD 998 999void kvm_eventfd_init(struct kvm *kvm); 1000int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 1001 1002#ifdef CONFIG_HAVE_KVM_IRQFD 1003int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 1004void kvm_irqfd_release(struct kvm *kvm); 1005void kvm_irq_routing_update(struct kvm *); 1006#else 1007static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1008{ 1009 return -EINVAL; 1010} 1011 1012static inline void kvm_irqfd_release(struct kvm *kvm) {} 1013#endif 1014 1015#else 1016 1017static inline void kvm_eventfd_init(struct kvm *kvm) {} 1018 1019static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1020{ 1021 return -EINVAL; 1022} 1023 1024static inline void kvm_irqfd_release(struct kvm *kvm) {} 1025 1026#ifdef CONFIG_HAVE_KVM_IRQCHIP 1027static inline void kvm_irq_routing_update(struct kvm *kvm) 1028{ 1029} 1030#endif 1031 1032static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 1033{ 1034 return -ENOSYS; 1035} 1036 1037#endif /* CONFIG_HAVE_KVM_EVENTFD */ 1038 1039#ifdef CONFIG_KVM_APIC_ARCHITECTURE 1040static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 1041{ 1042 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; 1043} 1044 1045static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 1046{ 1047 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 1048} 1049 1050bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); 1051 1052#else 1053 1054static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } 1055 1056#endif 1057 1058static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1059{ 1060 set_bit(req, &vcpu->requests); 1061} 1062 1063static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1064{ 1065 if (test_bit(req, &vcpu->requests)) { 1066 clear_bit(req, &vcpu->requests); 1067 return true; 1068 } else { 1069 return false; 1070 } 1071} 1072 1073extern bool kvm_rebooting; 1074 1075struct kvm_device { 1076 struct kvm_device_ops *ops; 1077 struct kvm *kvm; 1078 void *private; 1079 struct list_head vm_node; 1080}; 1081 1082/* create, destroy, and name are mandatory */ 1083struct kvm_device_ops { 1084 const char *name; 1085 int (*create)(struct kvm_device *dev, u32 type); 1086 1087 /* 1088 * Destroy is responsible for freeing dev. 1089 * 1090 * Destroy may be called before or after destructors are called 1091 * on emulated I/O regions, depending on whether a reference is 1092 * held by a vcpu or other kvm component that gets destroyed 1093 * after the emulated I/O. 1094 */ 1095 void (*destroy)(struct kvm_device *dev); 1096 1097 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1098 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1099 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1100 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1101 unsigned long arg); 1102}; 1103 1104void kvm_device_get(struct kvm_device *dev); 1105void kvm_device_put(struct kvm_device *dev); 1106struct kvm_device *kvm_device_from_filp(struct file *filp); 1107int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); 1108void kvm_unregister_device_ops(u32 type); 1109 1110extern struct kvm_device_ops kvm_mpic_ops; 1111extern struct kvm_device_ops kvm_xics_ops; 1112extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 1113extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 1114 1115#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1116 1117static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1118{ 1119 vcpu->spin_loop.in_spin_loop = val; 1120} 1121static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1122{ 1123 vcpu->spin_loop.dy_eligible = val; 1124} 1125 1126#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1127 1128static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1129{ 1130} 1131 1132static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1133{ 1134} 1135#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1136#endif 1137