at v5.13-rc2 1586 lines 47 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2#ifndef __KVM_HOST_H 3#define __KVM_HOST_H 4 5 6#include <linux/types.h> 7#include <linux/hardirq.h> 8#include <linux/list.h> 9#include <linux/mutex.h> 10#include <linux/spinlock.h> 11#include <linux/signal.h> 12#include <linux/sched.h> 13#include <linux/bug.h> 14#include <linux/minmax.h> 15#include <linux/mm.h> 16#include <linux/mmu_notifier.h> 17#include <linux/preempt.h> 18#include <linux/msi.h> 19#include <linux/slab.h> 20#include <linux/vmalloc.h> 21#include <linux/rcupdate.h> 22#include <linux/ratelimit.h> 23#include <linux/err.h> 24#include <linux/irqflags.h> 25#include <linux/context_tracking.h> 26#include <linux/irqbypass.h> 27#include <linux/rcuwait.h> 28#include <linux/refcount.h> 29#include <linux/nospec.h> 30#include <asm/signal.h> 31 32#include <linux/kvm.h> 33#include <linux/kvm_para.h> 34 35#include <linux/kvm_types.h> 36 37#include <asm/kvm_host.h> 38#include <linux/kvm_dirty_ring.h> 39 40#ifndef KVM_MAX_VCPU_ID 41#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS 42#endif 43 44/* 45 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 46 * in kvm, other bits are visible for userspace which are defined in 47 * include/linux/kvm_h. 48 */ 49#define KVM_MEMSLOT_INVALID (1UL << 16) 50 51/* 52 * Bit 63 of the memslot generation number is an "update in-progress flag", 53 * e.g. is temporarily set for the duration of install_new_memslots(). 54 * This flag effectively creates a unique generation number that is used to 55 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, 56 * i.e. may (or may not) have come from the previous memslots generation. 57 * 58 * This is necessary because the actual memslots update is not atomic with 59 * respect to the generation number update. Updating the generation number 60 * first would allow a vCPU to cache a spte from the old memslots using the 61 * new generation number, and updating the generation number after switching 62 * to the new memslots would allow cache hits using the old generation number 63 * to reference the defunct memslots. 64 * 65 * This mechanism is used to prevent getting hits in KVM's caches while a 66 * memslot update is in-progress, and to prevent cache hits *after* updating 67 * the actual generation number against accesses that were inserted into the 68 * cache *before* the memslots were updated. 69 */ 70#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) 71 72/* Two fragments for cross MMIO pages. */ 73#define KVM_MAX_MMIO_FRAGMENTS 2 74 75#ifndef KVM_ADDRESS_SPACE_NUM 76#define KVM_ADDRESS_SPACE_NUM 1 77#endif 78 79/* 80 * For the normal pfn, the highest 12 bits should be zero, 81 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 82 * mask bit 63 to indicate the noslot pfn. 83 */ 84#define KVM_PFN_ERR_MASK (0x7ffULL << 52) 85#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 86#define KVM_PFN_NOSLOT (0x1ULL << 63) 87 88#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 89#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 90#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 91 92/* 93 * error pfns indicate that the gfn is in slot but faild to 94 * translate it to pfn on host. 95 */ 96static inline bool is_error_pfn(kvm_pfn_t pfn) 97{ 98 return !!(pfn & KVM_PFN_ERR_MASK); 99} 100 101/* 102 * error_noslot pfns indicate that the gfn can not be 103 * translated to pfn - it is not in slot or failed to 104 * translate it to pfn. 105 */ 106static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 107{ 108 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 109} 110 111/* noslot pfn indicates that the gfn is not in slot. */ 112static inline bool is_noslot_pfn(kvm_pfn_t pfn) 113{ 114 return pfn == KVM_PFN_NOSLOT; 115} 116 117/* 118 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 119 * provide own defines and kvm_is_error_hva 120 */ 121#ifndef KVM_HVA_ERR_BAD 122 123#define KVM_HVA_ERR_BAD (PAGE_OFFSET) 124#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 125 126static inline bool kvm_is_error_hva(unsigned long addr) 127{ 128 return addr >= PAGE_OFFSET; 129} 130 131#endif 132 133#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 134 135static inline bool is_error_page(struct page *page) 136{ 137 return IS_ERR(page); 138} 139 140#define KVM_REQUEST_MASK GENMASK(7,0) 141#define KVM_REQUEST_NO_WAKEUP BIT(8) 142#define KVM_REQUEST_WAIT BIT(9) 143/* 144 * Architecture-independent vcpu->requests bit members 145 * Bits 4-7 are reserved for more arch-independent bits. 146 */ 147#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 148#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 149#define KVM_REQ_PENDING_TIMER 2 150#define KVM_REQ_UNHALT 3 151#define KVM_REQUEST_ARCH_BASE 8 152 153#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 154 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 155 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 156}) 157#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) 158 159#define KVM_USERSPACE_IRQ_SOURCE_ID 0 160#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 161 162extern struct mutex kvm_lock; 163extern struct list_head vm_list; 164 165struct kvm_io_range { 166 gpa_t addr; 167 int len; 168 struct kvm_io_device *dev; 169}; 170 171#define NR_IOBUS_DEVS 1000 172 173struct kvm_io_bus { 174 int dev_count; 175 int ioeventfd_count; 176 struct kvm_io_range range[]; 177}; 178 179enum kvm_bus { 180 KVM_MMIO_BUS, 181 KVM_PIO_BUS, 182 KVM_VIRTIO_CCW_NOTIFY_BUS, 183 KVM_FAST_MMIO_BUS, 184 KVM_NR_BUSES 185}; 186 187int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 188 int len, const void *val); 189int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 190 gpa_t addr, int len, const void *val, long cookie); 191int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 192 int len, void *val); 193int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 194 int len, struct kvm_io_device *dev); 195int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 196 struct kvm_io_device *dev); 197struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 198 gpa_t addr); 199 200#ifdef CONFIG_KVM_ASYNC_PF 201struct kvm_async_pf { 202 struct work_struct work; 203 struct list_head link; 204 struct list_head queue; 205 struct kvm_vcpu *vcpu; 206 struct mm_struct *mm; 207 gpa_t cr2_or_gpa; 208 unsigned long addr; 209 struct kvm_arch_async_pf arch; 210 bool wakeup_all; 211 bool notpresent_injected; 212}; 213 214void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 215void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 216bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 217 unsigned long hva, struct kvm_arch_async_pf *arch); 218int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 219#endif 220 221#ifdef KVM_ARCH_WANT_MMU_NOTIFIER 222struct kvm_gfn_range { 223 struct kvm_memory_slot *slot; 224 gfn_t start; 225 gfn_t end; 226 pte_t pte; 227 bool may_block; 228}; 229bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 230bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 231bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 232bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 233#endif 234 235enum { 236 OUTSIDE_GUEST_MODE, 237 IN_GUEST_MODE, 238 EXITING_GUEST_MODE, 239 READING_SHADOW_PAGE_TABLES, 240}; 241 242#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) 243 244struct kvm_host_map { 245 /* 246 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is 247 * a 'struct page' for it. When using mem= kernel parameter some memory 248 * can be used as guest memory but they are not managed by host 249 * kernel). 250 * If 'pfn' is not managed by the host kernel, this field is 251 * initialized to KVM_UNMAPPED_PAGE. 252 */ 253 struct page *page; 254 void *hva; 255 kvm_pfn_t pfn; 256 kvm_pfn_t gfn; 257}; 258 259/* 260 * Used to check if the mapping is valid or not. Never use 'kvm_host_map' 261 * directly to check for that. 262 */ 263static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) 264{ 265 return !!map->hva; 266} 267 268/* 269 * Sometimes a large or cross-page mmio needs to be broken up into separate 270 * exits for userspace servicing. 271 */ 272struct kvm_mmio_fragment { 273 gpa_t gpa; 274 void *data; 275 unsigned len; 276}; 277 278struct kvm_vcpu { 279 struct kvm *kvm; 280#ifdef CONFIG_PREEMPT_NOTIFIERS 281 struct preempt_notifier preempt_notifier; 282#endif 283 int cpu; 284 int vcpu_id; /* id given by userspace at creation */ 285 int vcpu_idx; /* index in kvm->vcpus array */ 286 int srcu_idx; 287 int mode; 288 u64 requests; 289 unsigned long guest_debug; 290 291 int pre_pcpu; 292 struct list_head blocked_vcpu_list; 293 294 struct mutex mutex; 295 struct kvm_run *run; 296 297 struct rcuwait wait; 298 struct pid __rcu *pid; 299 int sigset_active; 300 sigset_t sigset; 301 struct kvm_vcpu_stat stat; 302 unsigned int halt_poll_ns; 303 bool valid_wakeup; 304 305#ifdef CONFIG_HAS_IOMEM 306 int mmio_needed; 307 int mmio_read_completed; 308 int mmio_is_write; 309 int mmio_cur_fragment; 310 int mmio_nr_fragments; 311 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 312#endif 313 314#ifdef CONFIG_KVM_ASYNC_PF 315 struct { 316 u32 queued; 317 struct list_head queue; 318 struct list_head done; 319 spinlock_t lock; 320 } async_pf; 321#endif 322 323#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 324 /* 325 * Cpu relax intercept or pause loop exit optimization 326 * in_spin_loop: set when a vcpu does a pause loop exit 327 * or cpu relax intercepted. 328 * dy_eligible: indicates whether vcpu is eligible for directed yield. 329 */ 330 struct { 331 bool in_spin_loop; 332 bool dy_eligible; 333 } spin_loop; 334#endif 335 bool preempted; 336 bool ready; 337 struct kvm_vcpu_arch arch; 338 struct kvm_dirty_ring dirty_ring; 339}; 340 341/* must be called with irqs disabled */ 342static __always_inline void guest_enter_irqoff(void) 343{ 344 /* 345 * This is running in ioctl context so its safe to assume that it's the 346 * stime pending cputime to flush. 347 */ 348 instrumentation_begin(); 349 vtime_account_guest_enter(); 350 instrumentation_end(); 351 352 /* 353 * KVM does not hold any references to rcu protected data when it 354 * switches CPU into a guest mode. In fact switching to a guest mode 355 * is very similar to exiting to userspace from rcu point of view. In 356 * addition CPU may stay in a guest mode for quite a long time (up to 357 * one time slice). Lets treat guest mode as quiescent state, just like 358 * we do with user-mode execution. 359 */ 360 if (!context_tracking_guest_enter()) { 361 instrumentation_begin(); 362 rcu_virt_note_context_switch(smp_processor_id()); 363 instrumentation_end(); 364 } 365} 366 367static __always_inline void guest_exit_irqoff(void) 368{ 369 context_tracking_guest_exit(); 370 371 instrumentation_begin(); 372 /* Flush the guest cputime we spent on the guest */ 373 vtime_account_guest_exit(); 374 instrumentation_end(); 375} 376 377static inline void guest_exit(void) 378{ 379 unsigned long flags; 380 381 local_irq_save(flags); 382 guest_exit_irqoff(); 383 local_irq_restore(flags); 384} 385 386static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 387{ 388 /* 389 * The memory barrier ensures a previous write to vcpu->requests cannot 390 * be reordered with the read of vcpu->mode. It pairs with the general 391 * memory barrier following the write of vcpu->mode in VCPU RUN. 392 */ 393 smp_mb__before_atomic(); 394 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 395} 396 397/* 398 * Some of the bitops functions do not support too long bitmaps. 399 * This number must be determined not to exceed such limits. 400 */ 401#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 402 403struct kvm_memory_slot { 404 gfn_t base_gfn; 405 unsigned long npages; 406 unsigned long *dirty_bitmap; 407 struct kvm_arch_memory_slot arch; 408 unsigned long userspace_addr; 409 u32 flags; 410 short id; 411 u16 as_id; 412}; 413 414static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) 415{ 416 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; 417} 418 419static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 420{ 421 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 422} 423 424static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) 425{ 426 unsigned long len = kvm_dirty_bitmap_bytes(memslot); 427 428 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); 429} 430 431#ifndef KVM_DIRTY_LOG_MANUAL_CAPS 432#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE 433#endif 434 435struct kvm_s390_adapter_int { 436 u64 ind_addr; 437 u64 summary_addr; 438 u64 ind_offset; 439 u32 summary_offset; 440 u32 adapter_id; 441}; 442 443struct kvm_hv_sint { 444 u32 vcpu; 445 u32 sint; 446}; 447 448struct kvm_kernel_irq_routing_entry { 449 u32 gsi; 450 u32 type; 451 int (*set)(struct kvm_kernel_irq_routing_entry *e, 452 struct kvm *kvm, int irq_source_id, int level, 453 bool line_status); 454 union { 455 struct { 456 unsigned irqchip; 457 unsigned pin; 458 } irqchip; 459 struct { 460 u32 address_lo; 461 u32 address_hi; 462 u32 data; 463 u32 flags; 464 u32 devid; 465 } msi; 466 struct kvm_s390_adapter_int adapter; 467 struct kvm_hv_sint hv_sint; 468 }; 469 struct hlist_node link; 470}; 471 472#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 473struct kvm_irq_routing_table { 474 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 475 u32 nr_rt_entries; 476 /* 477 * Array indexed by gsi. Each entry contains list of irq chips 478 * the gsi is connected to. 479 */ 480 struct hlist_head map[]; 481}; 482#endif 483 484#ifndef KVM_PRIVATE_MEM_SLOTS 485#define KVM_PRIVATE_MEM_SLOTS 0 486#endif 487 488#define KVM_MEM_SLOTS_NUM SHRT_MAX 489#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) 490 491#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 492static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 493{ 494 return 0; 495} 496#endif 497 498/* 499 * Note: 500 * memslots are not sorted by id anymore, please use id_to_memslot() 501 * to get the memslot by its id. 502 */ 503struct kvm_memslots { 504 u64 generation; 505 /* The mapping table from slot id to the index in memslots[]. */ 506 short id_to_index[KVM_MEM_SLOTS_NUM]; 507 atomic_t lru_slot; 508 int used_slots; 509 struct kvm_memory_slot memslots[]; 510}; 511 512struct kvm { 513#ifdef KVM_HAVE_MMU_RWLOCK 514 rwlock_t mmu_lock; 515#else 516 spinlock_t mmu_lock; 517#endif /* KVM_HAVE_MMU_RWLOCK */ 518 519 struct mutex slots_lock; 520 struct mm_struct *mm; /* userspace tied to this vm */ 521 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; 522 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 523 524 /* 525 * created_vcpus is protected by kvm->lock, and is incremented 526 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only 527 * incremented after storing the kvm_vcpu pointer in vcpus, 528 * and is accessed atomically. 529 */ 530 atomic_t online_vcpus; 531 int created_vcpus; 532 int last_boosted_vcpu; 533 struct list_head vm_list; 534 struct mutex lock; 535 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; 536#ifdef CONFIG_HAVE_KVM_EVENTFD 537 struct { 538 spinlock_t lock; 539 struct list_head items; 540 struct list_head resampler_list; 541 struct mutex resampler_lock; 542 } irqfds; 543 struct list_head ioeventfds; 544#endif 545 struct kvm_vm_stat stat; 546 struct kvm_arch arch; 547 refcount_t users_count; 548#ifdef CONFIG_KVM_MMIO 549 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 550 spinlock_t ring_lock; 551 struct list_head coalesced_zones; 552#endif 553 554 struct mutex irq_lock; 555#ifdef CONFIG_HAVE_KVM_IRQCHIP 556 /* 557 * Update side is protected by irq_lock. 558 */ 559 struct kvm_irq_routing_table __rcu *irq_routing; 560#endif 561#ifdef CONFIG_HAVE_KVM_IRQFD 562 struct hlist_head irq_ack_notifier_list; 563#endif 564 565#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 566 struct mmu_notifier mmu_notifier; 567 unsigned long mmu_notifier_seq; 568 long mmu_notifier_count; 569 unsigned long mmu_notifier_range_start; 570 unsigned long mmu_notifier_range_end; 571#endif 572 long tlbs_dirty; 573 struct list_head devices; 574 u64 manual_dirty_log_protect; 575 struct dentry *debugfs_dentry; 576 struct kvm_stat_data **debugfs_stat_data; 577 struct srcu_struct srcu; 578 struct srcu_struct irq_srcu; 579 pid_t userspace_pid; 580 unsigned int max_halt_poll_ns; 581 u32 dirty_ring_size; 582}; 583 584#define kvm_err(fmt, ...) \ 585 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 586#define kvm_info(fmt, ...) \ 587 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 588#define kvm_debug(fmt, ...) \ 589 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 590#define kvm_debug_ratelimited(fmt, ...) \ 591 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ 592 ## __VA_ARGS__) 593#define kvm_pr_unimpl(fmt, ...) \ 594 pr_err_ratelimited("kvm [%i]: " fmt, \ 595 task_tgid_nr(current), ## __VA_ARGS__) 596 597/* The guest did something we don't support. */ 598#define vcpu_unimpl(vcpu, fmt, ...) \ 599 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 600 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 601 602#define vcpu_debug(vcpu, fmt, ...) \ 603 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 604#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ 605 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ 606 ## __VA_ARGS__) 607#define vcpu_err(vcpu, fmt, ...) \ 608 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 609 610static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) 611{ 612 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); 613} 614 615static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 616{ 617 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, 618 lockdep_is_held(&kvm->slots_lock) || 619 !refcount_read(&kvm->users_count)); 620} 621 622static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 623{ 624 int num_vcpus = atomic_read(&kvm->online_vcpus); 625 i = array_index_nospec(i, num_vcpus); 626 627 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ 628 smp_rmb(); 629 return kvm->vcpus[i]; 630} 631 632#define kvm_for_each_vcpu(idx, vcpup, kvm) \ 633 for (idx = 0; \ 634 idx < atomic_read(&kvm->online_vcpus) && \ 635 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 636 idx++) 637 638static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 639{ 640 struct kvm_vcpu *vcpu = NULL; 641 int i; 642 643 if (id < 0) 644 return NULL; 645 if (id < KVM_MAX_VCPUS) 646 vcpu = kvm_get_vcpu(kvm, id); 647 if (vcpu && vcpu->vcpu_id == id) 648 return vcpu; 649 kvm_for_each_vcpu(i, vcpu, kvm) 650 if (vcpu->vcpu_id == id) 651 return vcpu; 652 return NULL; 653} 654 655static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) 656{ 657 return vcpu->vcpu_idx; 658} 659 660#define kvm_for_each_memslot(memslot, slots) \ 661 for (memslot = &slots->memslots[0]; \ 662 memslot < slots->memslots + slots->used_slots; memslot++) \ 663 if (WARN_ON_ONCE(!memslot->npages)) { \ 664 } else 665 666void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); 667 668void vcpu_load(struct kvm_vcpu *vcpu); 669void vcpu_put(struct kvm_vcpu *vcpu); 670 671#ifdef __KVM_HAVE_IOAPIC 672void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); 673void kvm_arch_post_irq_routing_update(struct kvm *kvm); 674#else 675static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 676{ 677} 678static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) 679{ 680} 681#endif 682 683#ifdef CONFIG_HAVE_KVM_IRQFD 684int kvm_irqfd_init(void); 685void kvm_irqfd_exit(void); 686#else 687static inline int kvm_irqfd_init(void) 688{ 689 return 0; 690} 691 692static inline void kvm_irqfd_exit(void) 693{ 694} 695#endif 696int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 697 struct module *module); 698void kvm_exit(void); 699 700void kvm_get_kvm(struct kvm *kvm); 701void kvm_put_kvm(struct kvm *kvm); 702bool file_is_kvm(struct file *file); 703void kvm_put_kvm_no_destroy(struct kvm *kvm); 704 705static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 706{ 707 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); 708 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, 709 lockdep_is_held(&kvm->slots_lock) || 710 !refcount_read(&kvm->users_count)); 711} 712 713static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 714{ 715 return __kvm_memslots(kvm, 0); 716} 717 718static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 719{ 720 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 721 722 return __kvm_memslots(vcpu->kvm, as_id); 723} 724 725static inline 726struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) 727{ 728 int index = slots->id_to_index[id]; 729 struct kvm_memory_slot *slot; 730 731 if (index < 0) 732 return NULL; 733 734 slot = &slots->memslots[index]; 735 736 WARN_ON(slot->id != id); 737 return slot; 738} 739 740/* 741 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 742 * - create a new memory slot 743 * - delete an existing memory slot 744 * - modify an existing memory slot 745 * -- move it in the guest physical memory space 746 * -- just change its flags 747 * 748 * Since flags can be changed by some of these operations, the following 749 * differentiation is the best we can do for __kvm_set_memory_region(): 750 */ 751enum kvm_mr_change { 752 KVM_MR_CREATE, 753 KVM_MR_DELETE, 754 KVM_MR_MOVE, 755 KVM_MR_FLAGS_ONLY, 756}; 757 758int kvm_set_memory_region(struct kvm *kvm, 759 const struct kvm_userspace_memory_region *mem); 760int __kvm_set_memory_region(struct kvm *kvm, 761 const struct kvm_userspace_memory_region *mem); 762void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); 763void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); 764int kvm_arch_prepare_memory_region(struct kvm *kvm, 765 struct kvm_memory_slot *memslot, 766 const struct kvm_userspace_memory_region *mem, 767 enum kvm_mr_change change); 768void kvm_arch_commit_memory_region(struct kvm *kvm, 769 const struct kvm_userspace_memory_region *mem, 770 struct kvm_memory_slot *old, 771 const struct kvm_memory_slot *new, 772 enum kvm_mr_change change); 773/* flush all memory translations */ 774void kvm_arch_flush_shadow_all(struct kvm *kvm); 775/* flush memory translations pointing to 'slot' */ 776void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 777 struct kvm_memory_slot *slot); 778 779int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 780 struct page **pages, int nr_pages); 781 782struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 783unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 784unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 785unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 786unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 787 bool *writable); 788void kvm_release_page_clean(struct page *page); 789void kvm_release_page_dirty(struct page *page); 790void kvm_set_page_accessed(struct page *page); 791 792kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 793kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 794 bool *writable); 795kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 796kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 797kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 798 bool atomic, bool *async, bool write_fault, 799 bool *writable, hva_t *hva); 800 801void kvm_release_pfn_clean(kvm_pfn_t pfn); 802void kvm_release_pfn_dirty(kvm_pfn_t pfn); 803void kvm_set_pfn_dirty(kvm_pfn_t pfn); 804void kvm_set_pfn_accessed(kvm_pfn_t pfn); 805void kvm_get_pfn(kvm_pfn_t pfn); 806 807void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); 808int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 809 int len); 810int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 811int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 812 void *data, unsigned long len); 813int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 814 void *data, unsigned int offset, 815 unsigned long len); 816int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 817 int offset, int len); 818int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 819 unsigned long len); 820int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 821 void *data, unsigned long len); 822int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 823 void *data, unsigned int offset, 824 unsigned long len); 825int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 826 gpa_t gpa, unsigned long len); 827 828#define __kvm_get_guest(kvm, gfn, offset, v) \ 829({ \ 830 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 831 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 832 int __ret = -EFAULT; \ 833 \ 834 if (!kvm_is_error_hva(__addr)) \ 835 __ret = get_user(v, __uaddr); \ 836 __ret; \ 837}) 838 839#define kvm_get_guest(kvm, gpa, v) \ 840({ \ 841 gpa_t __gpa = gpa; \ 842 struct kvm *__kvm = kvm; \ 843 \ 844 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ 845 offset_in_page(__gpa), v); \ 846}) 847 848#define __kvm_put_guest(kvm, gfn, offset, v) \ 849({ \ 850 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 851 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 852 int __ret = -EFAULT; \ 853 \ 854 if (!kvm_is_error_hva(__addr)) \ 855 __ret = put_user(v, __uaddr); \ 856 if (!__ret) \ 857 mark_page_dirty(kvm, gfn); \ 858 __ret; \ 859}) 860 861#define kvm_put_guest(kvm, gpa, v) \ 862({ \ 863 gpa_t __gpa = gpa; \ 864 struct kvm *__kvm = kvm; \ 865 \ 866 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ 867 offset_in_page(__gpa), v); \ 868}) 869 870int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 871struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 872bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 873bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 874unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); 875void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); 876void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 877 878struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 879struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 880kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 881kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 882int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); 883int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 884 struct gfn_to_pfn_cache *cache, bool atomic); 885struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); 886void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); 887int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 888 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); 889unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 890unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 891int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 892 int len); 893int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 894 unsigned long len); 895int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 896 unsigned long len); 897int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 898 int offset, int len); 899int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 900 unsigned long len); 901void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 902 903void kvm_sigset_activate(struct kvm_vcpu *vcpu); 904void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); 905 906void kvm_vcpu_block(struct kvm_vcpu *vcpu); 907void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 908void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 909bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); 910void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 911int kvm_vcpu_yield_to(struct kvm_vcpu *target); 912void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); 913 914void kvm_flush_remote_tlbs(struct kvm *kvm); 915void kvm_reload_remote_mmus(struct kvm *kvm); 916 917#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 918int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); 919int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); 920void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); 921void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 922#endif 923 924bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 925 struct kvm_vcpu *except, 926 unsigned long *vcpu_bitmap, cpumask_var_t tmp); 927bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 928bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 929 struct kvm_vcpu *except); 930bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, 931 unsigned long *vcpu_bitmap); 932 933long kvm_arch_dev_ioctl(struct file *filp, 934 unsigned int ioctl, unsigned long arg); 935long kvm_arch_vcpu_ioctl(struct file *filp, 936 unsigned int ioctl, unsigned long arg); 937vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 938 939int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 940 941void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 942 struct kvm_memory_slot *slot, 943 gfn_t gfn_offset, 944 unsigned long mask); 945void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); 946 947#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 948void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 949 const struct kvm_memory_slot *memslot); 950#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 951int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); 952int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 953 int *is_dirty, struct kvm_memory_slot **memslot); 954#endif 955 956int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 957 bool line_status); 958int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 959 struct kvm_enable_cap *cap); 960long kvm_arch_vm_ioctl(struct file *filp, 961 unsigned int ioctl, unsigned long arg); 962 963int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 964int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 965 966int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 967 struct kvm_translation *tr); 968 969int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 970int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 971int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 972 struct kvm_sregs *sregs); 973int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 974 struct kvm_sregs *sregs); 975int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 976 struct kvm_mp_state *mp_state); 977int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 978 struct kvm_mp_state *mp_state); 979int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 980 struct kvm_guest_debug *dbg); 981int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); 982 983int kvm_arch_init(void *opaque); 984void kvm_arch_exit(void); 985 986void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 987 988void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 989void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 990int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); 991int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); 992void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 993void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 994 995#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 996void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); 997#endif 998 999int kvm_arch_hardware_enable(void); 1000void kvm_arch_hardware_disable(void); 1001int kvm_arch_hardware_setup(void *opaque); 1002void kvm_arch_hardware_unsetup(void); 1003int kvm_arch_check_processor_compat(void *opaque); 1004int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 1005bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 1006int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 1007bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); 1008bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); 1009int kvm_arch_post_init_vm(struct kvm *kvm); 1010void kvm_arch_pre_destroy_vm(struct kvm *kvm); 1011 1012#ifndef __KVM_HAVE_ARCH_VM_ALLOC 1013/* 1014 * All architectures that want to use vzalloc currently also 1015 * need their own kvm_arch_alloc_vm implementation. 1016 */ 1017static inline struct kvm *kvm_arch_alloc_vm(void) 1018{ 1019 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 1020} 1021 1022static inline void kvm_arch_free_vm(struct kvm *kvm) 1023{ 1024 kfree(kvm); 1025} 1026#endif 1027 1028#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB 1029static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) 1030{ 1031 return -ENOTSUPP; 1032} 1033#endif 1034 1035#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 1036void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 1037void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 1038bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 1039#else 1040static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 1041{ 1042} 1043 1044static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 1045{ 1046} 1047 1048static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 1049{ 1050 return false; 1051} 1052#endif 1053#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE 1054void kvm_arch_start_assignment(struct kvm *kvm); 1055void kvm_arch_end_assignment(struct kvm *kvm); 1056bool kvm_arch_has_assigned_device(struct kvm *kvm); 1057#else 1058static inline void kvm_arch_start_assignment(struct kvm *kvm) 1059{ 1060} 1061 1062static inline void kvm_arch_end_assignment(struct kvm *kvm) 1063{ 1064} 1065 1066static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) 1067{ 1068 return false; 1069} 1070#endif 1071 1072static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) 1073{ 1074#ifdef __KVM_HAVE_ARCH_WQP 1075 return vcpu->arch.waitp; 1076#else 1077 return &vcpu->wait; 1078#endif 1079} 1080 1081#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 1082/* 1083 * returns true if the virtual interrupt controller is initialized and 1084 * ready to accept virtual IRQ. On some architectures the virtual interrupt 1085 * controller is dynamically instantiated and this is not always true. 1086 */ 1087bool kvm_arch_intc_initialized(struct kvm *kvm); 1088#else 1089static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 1090{ 1091 return true; 1092} 1093#endif 1094 1095int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 1096void kvm_arch_destroy_vm(struct kvm *kvm); 1097void kvm_arch_sync_events(struct kvm *kvm); 1098 1099int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 1100 1101bool kvm_is_reserved_pfn(kvm_pfn_t pfn); 1102bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); 1103bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); 1104 1105struct kvm_irq_ack_notifier { 1106 struct hlist_node link; 1107 unsigned gsi; 1108 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 1109}; 1110 1111int kvm_irq_map_gsi(struct kvm *kvm, 1112 struct kvm_kernel_irq_routing_entry *entries, int gsi); 1113int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 1114 1115int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1116 bool line_status); 1117int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 1118 int irq_source_id, int level, bool line_status); 1119int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 1120 struct kvm *kvm, int irq_source_id, 1121 int level, bool line_status); 1122bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 1123void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 1124void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 1125void kvm_register_irq_ack_notifier(struct kvm *kvm, 1126 struct kvm_irq_ack_notifier *kian); 1127void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 1128 struct kvm_irq_ack_notifier *kian); 1129int kvm_request_irq_source_id(struct kvm *kvm); 1130void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 1131bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); 1132 1133/* 1134 * search_memslots() and __gfn_to_memslot() are here because they are 1135 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 1136 * gfn_to_memslot() itself isn't here as an inline because that would 1137 * bloat other code too much. 1138 * 1139 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! 1140 */ 1141static inline struct kvm_memory_slot * 1142search_memslots(struct kvm_memslots *slots, gfn_t gfn) 1143{ 1144 int start = 0, end = slots->used_slots; 1145 int slot = atomic_read(&slots->lru_slot); 1146 struct kvm_memory_slot *memslots = slots->memslots; 1147 1148 if (unlikely(!slots->used_slots)) 1149 return NULL; 1150 1151 if (gfn >= memslots[slot].base_gfn && 1152 gfn < memslots[slot].base_gfn + memslots[slot].npages) 1153 return &memslots[slot]; 1154 1155 while (start < end) { 1156 slot = start + (end - start) / 2; 1157 1158 if (gfn >= memslots[slot].base_gfn) 1159 end = slot; 1160 else 1161 start = slot + 1; 1162 } 1163 1164 if (start < slots->used_slots && gfn >= memslots[start].base_gfn && 1165 gfn < memslots[start].base_gfn + memslots[start].npages) { 1166 atomic_set(&slots->lru_slot, start); 1167 return &memslots[start]; 1168 } 1169 1170 return NULL; 1171} 1172 1173static inline struct kvm_memory_slot * 1174__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 1175{ 1176 return search_memslots(slots, gfn); 1177} 1178 1179static inline unsigned long 1180__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 1181{ 1182 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 1183} 1184 1185static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 1186{ 1187 return gfn_to_memslot(kvm, gfn)->id; 1188} 1189 1190static inline gfn_t 1191hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 1192{ 1193 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 1194 1195 return slot->base_gfn + gfn_offset; 1196} 1197 1198static inline gpa_t gfn_to_gpa(gfn_t gfn) 1199{ 1200 return (gpa_t)gfn << PAGE_SHIFT; 1201} 1202 1203static inline gfn_t gpa_to_gfn(gpa_t gpa) 1204{ 1205 return (gfn_t)(gpa >> PAGE_SHIFT); 1206} 1207 1208static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 1209{ 1210 return (hpa_t)pfn << PAGE_SHIFT; 1211} 1212 1213static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, 1214 gpa_t gpa) 1215{ 1216 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); 1217} 1218 1219static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 1220{ 1221 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 1222 1223 return kvm_is_error_hva(hva); 1224} 1225 1226enum kvm_stat_kind { 1227 KVM_STAT_VM, 1228 KVM_STAT_VCPU, 1229}; 1230 1231struct kvm_stat_data { 1232 struct kvm *kvm; 1233 struct kvm_stats_debugfs_item *dbgfs_item; 1234}; 1235 1236struct kvm_stats_debugfs_item { 1237 const char *name; 1238 int offset; 1239 enum kvm_stat_kind kind; 1240 int mode; 1241}; 1242 1243#define KVM_DBGFS_GET_MODE(dbgfs_item) \ 1244 ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) 1245 1246#define VM_STAT(n, x, ...) \ 1247 { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } 1248#define VCPU_STAT(n, x, ...) \ 1249 { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } 1250 1251extern struct kvm_stats_debugfs_item debugfs_entries[]; 1252extern struct dentry *kvm_debugfs_dir; 1253 1254#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1255static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 1256{ 1257 if (unlikely(kvm->mmu_notifier_count)) 1258 return 1; 1259 /* 1260 * Ensure the read of mmu_notifier_count happens before the read 1261 * of mmu_notifier_seq. This interacts with the smp_wmb() in 1262 * mmu_notifier_invalidate_range_end to make sure that the caller 1263 * either sees the old (non-zero) value of mmu_notifier_count or 1264 * the new (incremented) value of mmu_notifier_seq. 1265 * PowerPC Book3s HV KVM calls this under a per-page lock 1266 * rather than under kvm->mmu_lock, for scalability, so 1267 * can't rely on kvm->mmu_lock to keep things ordered. 1268 */ 1269 smp_rmb(); 1270 if (kvm->mmu_notifier_seq != mmu_seq) 1271 return 1; 1272 return 0; 1273} 1274 1275static inline int mmu_notifier_retry_hva(struct kvm *kvm, 1276 unsigned long mmu_seq, 1277 unsigned long hva) 1278{ 1279 lockdep_assert_held(&kvm->mmu_lock); 1280 /* 1281 * If mmu_notifier_count is non-zero, then the range maintained by 1282 * kvm_mmu_notifier_invalidate_range_start contains all addresses that 1283 * might be being invalidated. Note that it may include some false 1284 * positives, due to shortcuts when handing concurrent invalidations. 1285 */ 1286 if (unlikely(kvm->mmu_notifier_count) && 1287 hva >= kvm->mmu_notifier_range_start && 1288 hva < kvm->mmu_notifier_range_end) 1289 return 1; 1290 if (kvm->mmu_notifier_seq != mmu_seq) 1291 return 1; 1292 return 0; 1293} 1294#endif 1295 1296#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 1297 1298#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ 1299 1300bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 1301int kvm_set_irq_routing(struct kvm *kvm, 1302 const struct kvm_irq_routing_entry *entries, 1303 unsigned nr, 1304 unsigned flags); 1305int kvm_set_routing_entry(struct kvm *kvm, 1306 struct kvm_kernel_irq_routing_entry *e, 1307 const struct kvm_irq_routing_entry *ue); 1308void kvm_free_irq_routing(struct kvm *kvm); 1309 1310#else 1311 1312static inline void kvm_free_irq_routing(struct kvm *kvm) {} 1313 1314#endif 1315 1316int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 1317 1318#ifdef CONFIG_HAVE_KVM_EVENTFD 1319 1320void kvm_eventfd_init(struct kvm *kvm); 1321int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 1322 1323#ifdef CONFIG_HAVE_KVM_IRQFD 1324int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 1325void kvm_irqfd_release(struct kvm *kvm); 1326void kvm_irq_routing_update(struct kvm *); 1327#else 1328static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1329{ 1330 return -EINVAL; 1331} 1332 1333static inline void kvm_irqfd_release(struct kvm *kvm) {} 1334#endif 1335 1336#else 1337 1338static inline void kvm_eventfd_init(struct kvm *kvm) {} 1339 1340static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1341{ 1342 return -EINVAL; 1343} 1344 1345static inline void kvm_irqfd_release(struct kvm *kvm) {} 1346 1347#ifdef CONFIG_HAVE_KVM_IRQCHIP 1348static inline void kvm_irq_routing_update(struct kvm *kvm) 1349{ 1350} 1351#endif 1352 1353static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 1354{ 1355 return -ENOSYS; 1356} 1357 1358#endif /* CONFIG_HAVE_KVM_EVENTFD */ 1359 1360void kvm_arch_irq_routing_update(struct kvm *kvm); 1361 1362static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1363{ 1364 /* 1365 * Ensure the rest of the request is published to kvm_check_request's 1366 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 1367 */ 1368 smp_wmb(); 1369 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 1370} 1371 1372static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 1373{ 1374 return READ_ONCE(vcpu->requests); 1375} 1376 1377static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 1378{ 1379 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 1380} 1381 1382static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) 1383{ 1384 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 1385} 1386 1387static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1388{ 1389 if (kvm_test_request(req, vcpu)) { 1390 kvm_clear_request(req, vcpu); 1391 1392 /* 1393 * Ensure the rest of the request is visible to kvm_check_request's 1394 * caller. Paired with the smp_wmb in kvm_make_request. 1395 */ 1396 smp_mb__after_atomic(); 1397 return true; 1398 } else { 1399 return false; 1400 } 1401} 1402 1403extern bool kvm_rebooting; 1404 1405extern unsigned int halt_poll_ns; 1406extern unsigned int halt_poll_ns_grow; 1407extern unsigned int halt_poll_ns_grow_start; 1408extern unsigned int halt_poll_ns_shrink; 1409 1410struct kvm_device { 1411 const struct kvm_device_ops *ops; 1412 struct kvm *kvm; 1413 void *private; 1414 struct list_head vm_node; 1415}; 1416 1417/* create, destroy, and name are mandatory */ 1418struct kvm_device_ops { 1419 const char *name; 1420 1421 /* 1422 * create is called holding kvm->lock and any operations not suitable 1423 * to do while holding the lock should be deferred to init (see 1424 * below). 1425 */ 1426 int (*create)(struct kvm_device *dev, u32 type); 1427 1428 /* 1429 * init is called after create if create is successful and is called 1430 * outside of holding kvm->lock. 1431 */ 1432 void (*init)(struct kvm_device *dev); 1433 1434 /* 1435 * Destroy is responsible for freeing dev. 1436 * 1437 * Destroy may be called before or after destructors are called 1438 * on emulated I/O regions, depending on whether a reference is 1439 * held by a vcpu or other kvm component that gets destroyed 1440 * after the emulated I/O. 1441 */ 1442 void (*destroy)(struct kvm_device *dev); 1443 1444 /* 1445 * Release is an alternative method to free the device. It is 1446 * called when the device file descriptor is closed. Once 1447 * release is called, the destroy method will not be called 1448 * anymore as the device is removed from the device list of 1449 * the VM. kvm->lock is held. 1450 */ 1451 void (*release)(struct kvm_device *dev); 1452 1453 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1454 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1455 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1456 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1457 unsigned long arg); 1458 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); 1459}; 1460 1461void kvm_device_get(struct kvm_device *dev); 1462void kvm_device_put(struct kvm_device *dev); 1463struct kvm_device *kvm_device_from_filp(struct file *filp); 1464int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); 1465void kvm_unregister_device_ops(u32 type); 1466 1467extern struct kvm_device_ops kvm_mpic_ops; 1468extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 1469extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 1470 1471#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1472 1473static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1474{ 1475 vcpu->spin_loop.in_spin_loop = val; 1476} 1477static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1478{ 1479 vcpu->spin_loop.dy_eligible = val; 1480} 1481 1482#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1483 1484static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1485{ 1486} 1487 1488static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1489{ 1490} 1491#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1492 1493static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) 1494{ 1495 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && 1496 !(memslot->flags & KVM_MEMSLOT_INVALID)); 1497} 1498 1499struct kvm_vcpu *kvm_get_running_vcpu(void); 1500struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 1501 1502#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 1503bool kvm_arch_has_irq_bypass(void); 1504int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 1505 struct irq_bypass_producer *); 1506void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 1507 struct irq_bypass_producer *); 1508void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 1509void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 1510int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 1511 uint32_t guest_irq, bool set); 1512#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 1513 1514#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS 1515/* If we wakeup during the poll time, was it a sucessful poll? */ 1516static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 1517{ 1518 return vcpu->valid_wakeup; 1519} 1520 1521#else 1522static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 1523{ 1524 return true; 1525} 1526#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ 1527 1528#ifdef CONFIG_HAVE_KVM_NO_POLL 1529/* Callback that tells if we must not poll */ 1530bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); 1531#else 1532static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 1533{ 1534 return false; 1535} 1536#endif /* CONFIG_HAVE_KVM_NO_POLL */ 1537 1538#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL 1539long kvm_arch_vcpu_async_ioctl(struct file *filp, 1540 unsigned int ioctl, unsigned long arg); 1541#else 1542static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, 1543 unsigned int ioctl, 1544 unsigned long arg) 1545{ 1546 return -ENOIOCTLCMD; 1547} 1548#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ 1549 1550void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 1551 unsigned long start, unsigned long end); 1552 1553#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE 1554int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); 1555#else 1556static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 1557{ 1558 return 0; 1559} 1560#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ 1561 1562typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); 1563 1564int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 1565 uintptr_t data, const char *name, 1566 struct task_struct **thread_ptr); 1567 1568#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK 1569static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) 1570{ 1571 vcpu->run->exit_reason = KVM_EXIT_INTR; 1572 vcpu->stat.signal_exits++; 1573} 1574#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ 1575 1576/* 1577 * This defines how many reserved entries we want to keep before we 1578 * kick the vcpu to the userspace to avoid dirty ring full. This 1579 * value can be tuned to higher if e.g. PML is enabled on the host. 1580 */ 1581#define KVM_DIRTY_RING_RSVD_ENTRIES 64 1582 1583/* Max number of entries allowed for each kvm dirty ring */ 1584#define KVM_DIRTY_RING_MAX_ENTRIES 65536 1585 1586#endif