Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2#ifndef __KVM_HOST_H
3#define __KVM_HOST_H
4
5
6#include <linux/types.h>
7#include <linux/hardirq.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/sched/stat.h>
14#include <linux/bug.h>
15#include <linux/minmax.h>
16#include <linux/mm.h>
17#include <linux/mmu_notifier.h>
18#include <linux/preempt.h>
19#include <linux/msi.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h>
28#include <linux/rcuwait.h>
29#include <linux/refcount.h>
30#include <linux/nospec.h>
31#include <linux/notifier.h>
32#include <linux/ftrace.h>
33#include <linux/hashtable.h>
34#include <linux/instrumentation.h>
35#include <linux/interval_tree.h>
36#include <linux/rbtree.h>
37#include <linux/xarray.h>
38#include <asm/signal.h>
39
40#include <linux/kvm.h>
41#include <linux/kvm_para.h>
42
43#include <linux/kvm_types.h>
44
45#include <asm/kvm_host.h>
46#include <linux/kvm_dirty_ring.h>
47
48#ifndef KVM_MAX_VCPU_IDS
49#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
50#endif
51
52/*
53 * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
54 * used in kvm, other bits are visible for userspace which are defined in
55 * include/linux/kvm_h.
56 */
57#define KVM_MEMSLOT_INVALID (1UL << 16)
58
59/*
60 * Bit 63 of the memslot generation number is an "update in-progress flag",
61 * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
62 * This flag effectively creates a unique generation number that is used to
63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
64 * i.e. may (or may not) have come from the previous memslots generation.
65 *
66 * This is necessary because the actual memslots update is not atomic with
67 * respect to the generation number update. Updating the generation number
68 * first would allow a vCPU to cache a spte from the old memslots using the
69 * new generation number, and updating the generation number after switching
70 * to the new memslots would allow cache hits using the old generation number
71 * to reference the defunct memslots.
72 *
73 * This mechanism is used to prevent getting hits in KVM's caches while a
74 * memslot update is in-progress, and to prevent cache hits *after* updating
75 * the actual generation number against accesses that were inserted into the
76 * cache *before* the memslots were updated.
77 */
78#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
79
80/* Two fragments for cross MMIO pages. */
81#define KVM_MAX_MMIO_FRAGMENTS 2
82
83#ifndef KVM_MAX_NR_ADDRESS_SPACES
84#define KVM_MAX_NR_ADDRESS_SPACES 1
85#endif
86
87/*
88 * For the normal pfn, the highest 12 bits should be zero,
89 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
90 * mask bit 63 to indicate the noslot pfn.
91 */
92#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
93#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
94#define KVM_PFN_NOSLOT (0x1ULL << 63)
95
96#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
97#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
98#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
99#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
100#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
101
102/*
103 * error pfns indicate that the gfn is in slot but faild to
104 * translate it to pfn on host.
105 */
106static inline bool is_error_pfn(kvm_pfn_t pfn)
107{
108 return !!(pfn & KVM_PFN_ERR_MASK);
109}
110
111/*
112 * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
113 * by a pending signal. Note, the signal may or may not be fatal.
114 */
115static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
116{
117 return pfn == KVM_PFN_ERR_SIGPENDING;
118}
119
120/*
121 * error_noslot pfns indicate that the gfn can not be
122 * translated to pfn - it is not in slot or failed to
123 * translate it to pfn.
124 */
125static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
126{
127 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
128}
129
130/* noslot pfn indicates that the gfn is not in slot. */
131static inline bool is_noslot_pfn(kvm_pfn_t pfn)
132{
133 return pfn == KVM_PFN_NOSLOT;
134}
135
136/*
137 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
138 * provide own defines and kvm_is_error_hva
139 */
140#ifndef KVM_HVA_ERR_BAD
141
142#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
143#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
144
145static inline bool kvm_is_error_hva(unsigned long addr)
146{
147 return addr >= PAGE_OFFSET;
148}
149
150#endif
151
152static inline bool kvm_is_error_gpa(gpa_t gpa)
153{
154 return gpa == INVALID_GPA;
155}
156
157#define KVM_REQUEST_MASK GENMASK(7,0)
158#define KVM_REQUEST_NO_WAKEUP BIT(8)
159#define KVM_REQUEST_WAIT BIT(9)
160#define KVM_REQUEST_NO_ACTION BIT(10)
161/*
162 * Architecture-independent vcpu->requests bit members
163 * Bits 3-7 are reserved for more arch-independent bits.
164 */
165#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
166#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
167#define KVM_REQ_UNBLOCK 2
168#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
169#define KVM_REQUEST_ARCH_BASE 8
170
171/*
172 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
173 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
174 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
175 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
176 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
177 * guarantee the vCPU received an IPI and has actually exited guest mode.
178 */
179#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
180
181#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
182 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
183 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
184})
185#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
186
187bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
188 unsigned long *vcpu_bitmap);
189bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
190
191#define KVM_USERSPACE_IRQ_SOURCE_ID 0
192#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
193
194extern struct mutex kvm_lock;
195extern struct list_head vm_list;
196
197struct kvm_io_range {
198 gpa_t addr;
199 int len;
200 struct kvm_io_device *dev;
201};
202
203#define NR_IOBUS_DEVS 1000
204
205struct kvm_io_bus {
206 int dev_count;
207 int ioeventfd_count;
208 struct kvm_io_range range[];
209};
210
211enum kvm_bus {
212 KVM_MMIO_BUS,
213 KVM_PIO_BUS,
214 KVM_VIRTIO_CCW_NOTIFY_BUS,
215 KVM_FAST_MMIO_BUS,
216 KVM_IOCSR_BUS,
217 KVM_NR_BUSES
218};
219
220int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
221 int len, const void *val);
222int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
223 gpa_t addr, int len, const void *val, long cookie);
224int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
225 int len, void *val);
226int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
227 int len, struct kvm_io_device *dev);
228int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
229 struct kvm_io_device *dev);
230struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
231 gpa_t addr);
232
233#ifdef CONFIG_KVM_ASYNC_PF
234struct kvm_async_pf {
235 struct work_struct work;
236 struct list_head link;
237 struct list_head queue;
238 struct kvm_vcpu *vcpu;
239 gpa_t cr2_or_gpa;
240 unsigned long addr;
241 struct kvm_arch_async_pf arch;
242 bool wakeup_all;
243 bool notpresent_injected;
244};
245
246void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
247void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
248bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
249 unsigned long hva, struct kvm_arch_async_pf *arch);
250int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
251#endif
252
253#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
254union kvm_mmu_notifier_arg {
255 unsigned long attributes;
256};
257
258enum kvm_gfn_range_filter {
259 KVM_FILTER_SHARED = BIT(0),
260 KVM_FILTER_PRIVATE = BIT(1),
261};
262
263struct kvm_gfn_range {
264 struct kvm_memory_slot *slot;
265 gfn_t start;
266 gfn_t end;
267 union kvm_mmu_notifier_arg arg;
268 enum kvm_gfn_range_filter attr_filter;
269 bool may_block;
270};
271bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
272bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
273bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
274#endif
275
276enum {
277 OUTSIDE_GUEST_MODE,
278 IN_GUEST_MODE,
279 EXITING_GUEST_MODE,
280 READING_SHADOW_PAGE_TABLES,
281};
282
283struct kvm_host_map {
284 /*
285 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
286 * a 'struct page' for it. When using mem= kernel parameter some memory
287 * can be used as guest memory but they are not managed by host
288 * kernel).
289 */
290 struct page *pinned_page;
291 struct page *page;
292 void *hva;
293 kvm_pfn_t pfn;
294 kvm_pfn_t gfn;
295 bool writable;
296};
297
298/*
299 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
300 * directly to check for that.
301 */
302static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
303{
304 return !!map->hva;
305}
306
307static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
308{
309 return single_task_running() && !need_resched() && ktime_before(cur, stop);
310}
311
312/*
313 * Sometimes a large or cross-page mmio needs to be broken up into separate
314 * exits for userspace servicing.
315 */
316struct kvm_mmio_fragment {
317 gpa_t gpa;
318 void *data;
319 unsigned len;
320};
321
322struct kvm_vcpu {
323 struct kvm *kvm;
324#ifdef CONFIG_PREEMPT_NOTIFIERS
325 struct preempt_notifier preempt_notifier;
326#endif
327 int cpu;
328 int vcpu_id; /* id given by userspace at creation */
329 int vcpu_idx; /* index into kvm->vcpu_array */
330 int ____srcu_idx; /* Don't use this directly. You've been warned. */
331#ifdef CONFIG_PROVE_RCU
332 int srcu_depth;
333#endif
334 int mode;
335 u64 requests;
336 unsigned long guest_debug;
337
338 struct mutex mutex;
339 struct kvm_run *run;
340
341#ifndef __KVM_HAVE_ARCH_WQP
342 struct rcuwait wait;
343#endif
344 struct pid *pid;
345 rwlock_t pid_lock;
346 int sigset_active;
347 sigset_t sigset;
348 unsigned int halt_poll_ns;
349 bool valid_wakeup;
350
351#ifdef CONFIG_HAS_IOMEM
352 int mmio_needed;
353 int mmio_read_completed;
354 int mmio_is_write;
355 int mmio_cur_fragment;
356 int mmio_nr_fragments;
357 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
358#endif
359
360#ifdef CONFIG_KVM_ASYNC_PF
361 struct {
362 u32 queued;
363 struct list_head queue;
364 struct list_head done;
365 spinlock_t lock;
366 } async_pf;
367#endif
368
369#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
370 /*
371 * Cpu relax intercept or pause loop exit optimization
372 * in_spin_loop: set when a vcpu does a pause loop exit
373 * or cpu relax intercepted.
374 * dy_eligible: indicates whether vcpu is eligible for directed yield.
375 */
376 struct {
377 bool in_spin_loop;
378 bool dy_eligible;
379 } spin_loop;
380#endif
381 bool wants_to_run;
382 bool preempted;
383 bool ready;
384 bool scheduled_out;
385 struct kvm_vcpu_arch arch;
386 struct kvm_vcpu_stat stat;
387 char stats_id[KVM_STATS_NAME_SIZE];
388 struct kvm_dirty_ring dirty_ring;
389
390 /*
391 * The most recently used memslot by this vCPU and the slots generation
392 * for which it is valid.
393 * No wraparound protection is needed since generations won't overflow in
394 * thousands of years, even assuming 1M memslot operations per second.
395 */
396 struct kvm_memory_slot *last_used_slot;
397 u64 last_used_slot_gen;
398};
399
400/*
401 * Start accounting time towards a guest.
402 * Must be called before entering guest context.
403 */
404static __always_inline void guest_timing_enter_irqoff(void)
405{
406 /*
407 * This is running in ioctl context so its safe to assume that it's the
408 * stime pending cputime to flush.
409 */
410 instrumentation_begin();
411 vtime_account_guest_enter();
412 instrumentation_end();
413}
414
415/*
416 * Enter guest context and enter an RCU extended quiescent state.
417 *
418 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
419 * unsafe to use any code which may directly or indirectly use RCU, tracing
420 * (including IRQ flag tracing), or lockdep. All code in this period must be
421 * non-instrumentable.
422 */
423static __always_inline void guest_context_enter_irqoff(void)
424{
425 /*
426 * KVM does not hold any references to rcu protected data when it
427 * switches CPU into a guest mode. In fact switching to a guest mode
428 * is very similar to exiting to userspace from rcu point of view. In
429 * addition CPU may stay in a guest mode for quite a long time (up to
430 * one time slice). Lets treat guest mode as quiescent state, just like
431 * we do with user-mode execution.
432 */
433 if (!context_tracking_guest_enter()) {
434 instrumentation_begin();
435 rcu_virt_note_context_switch();
436 instrumentation_end();
437 }
438}
439
440/*
441 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
442 * guest_state_enter_irqoff().
443 */
444static __always_inline void guest_enter_irqoff(void)
445{
446 guest_timing_enter_irqoff();
447 guest_context_enter_irqoff();
448}
449
450/**
451 * guest_state_enter_irqoff - Fixup state when entering a guest
452 *
453 * Entry to a guest will enable interrupts, but the kernel state is interrupts
454 * disabled when this is invoked. Also tell RCU about it.
455 *
456 * 1) Trace interrupts on state
457 * 2) Invoke context tracking if enabled to adjust RCU state
458 * 3) Tell lockdep that interrupts are enabled
459 *
460 * Invoked from architecture specific code before entering a guest.
461 * Must be called with interrupts disabled and the caller must be
462 * non-instrumentable.
463 * The caller has to invoke guest_timing_enter_irqoff() before this.
464 *
465 * Note: this is analogous to exit_to_user_mode().
466 */
467static __always_inline void guest_state_enter_irqoff(void)
468{
469 instrumentation_begin();
470 trace_hardirqs_on_prepare();
471 lockdep_hardirqs_on_prepare();
472 instrumentation_end();
473
474 guest_context_enter_irqoff();
475 lockdep_hardirqs_on(CALLER_ADDR0);
476}
477
478/*
479 * Exit guest context and exit an RCU extended quiescent state.
480 *
481 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
482 * unsafe to use any code which may directly or indirectly use RCU, tracing
483 * (including IRQ flag tracing), or lockdep. All code in this period must be
484 * non-instrumentable.
485 */
486static __always_inline void guest_context_exit_irqoff(void)
487{
488 /*
489 * Guest mode is treated as a quiescent state, see
490 * guest_context_enter_irqoff() for more details.
491 */
492 if (!context_tracking_guest_exit()) {
493 instrumentation_begin();
494 rcu_virt_note_context_switch();
495 instrumentation_end();
496 }
497}
498
499/*
500 * Stop accounting time towards a guest.
501 * Must be called after exiting guest context.
502 */
503static __always_inline void guest_timing_exit_irqoff(void)
504{
505 instrumentation_begin();
506 /* Flush the guest cputime we spent on the guest */
507 vtime_account_guest_exit();
508 instrumentation_end();
509}
510
511/*
512 * Deprecated. Architectures should move to guest_state_exit_irqoff() and
513 * guest_timing_exit_irqoff().
514 */
515static __always_inline void guest_exit_irqoff(void)
516{
517 guest_context_exit_irqoff();
518 guest_timing_exit_irqoff();
519}
520
521static inline void guest_exit(void)
522{
523 unsigned long flags;
524
525 local_irq_save(flags);
526 guest_exit_irqoff();
527 local_irq_restore(flags);
528}
529
530/**
531 * guest_state_exit_irqoff - Establish state when returning from guest mode
532 *
533 * Entry from a guest disables interrupts, but guest mode is traced as
534 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
535 *
536 * 1) Tell lockdep that interrupts are disabled
537 * 2) Invoke context tracking if enabled to reactivate RCU
538 * 3) Trace interrupts off state
539 *
540 * Invoked from architecture specific code after exiting a guest.
541 * Must be invoked with interrupts disabled and the caller must be
542 * non-instrumentable.
543 * The caller has to invoke guest_timing_exit_irqoff() after this.
544 *
545 * Note: this is analogous to enter_from_user_mode().
546 */
547static __always_inline void guest_state_exit_irqoff(void)
548{
549 lockdep_hardirqs_off(CALLER_ADDR0);
550 guest_context_exit_irqoff();
551
552 instrumentation_begin();
553 trace_hardirqs_off_finish();
554 instrumentation_end();
555}
556
557static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
558{
559 /*
560 * The memory barrier ensures a previous write to vcpu->requests cannot
561 * be reordered with the read of vcpu->mode. It pairs with the general
562 * memory barrier following the write of vcpu->mode in VCPU RUN.
563 */
564 smp_mb__before_atomic();
565 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
566}
567
568/*
569 * Some of the bitops functions do not support too long bitmaps.
570 * This number must be determined not to exceed such limits.
571 */
572#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
573
574/*
575 * Since at idle each memslot belongs to two memslot sets it has to contain
576 * two embedded nodes for each data structure that it forms a part of.
577 *
578 * Two memslot sets (one active and one inactive) are necessary so the VM
579 * continues to run on one memslot set while the other is being modified.
580 *
581 * These two memslot sets normally point to the same set of memslots.
582 * They can, however, be desynchronized when performing a memslot management
583 * operation by replacing the memslot to be modified by its copy.
584 * After the operation is complete, both memslot sets once again point to
585 * the same, common set of memslot data.
586 *
587 * The memslots themselves are independent of each other so they can be
588 * individually added or deleted.
589 */
590struct kvm_memory_slot {
591 struct hlist_node id_node[2];
592 struct interval_tree_node hva_node[2];
593 struct rb_node gfn_node[2];
594 gfn_t base_gfn;
595 unsigned long npages;
596 unsigned long *dirty_bitmap;
597 struct kvm_arch_memory_slot arch;
598 unsigned long userspace_addr;
599 u32 flags;
600 short id;
601 u16 as_id;
602
603#ifdef CONFIG_KVM_PRIVATE_MEM
604 struct {
605 /*
606 * Writes protected by kvm->slots_lock. Acquiring a
607 * reference via kvm_gmem_get_file() is protected by
608 * either kvm->slots_lock or kvm->srcu.
609 */
610 struct file *file;
611 pgoff_t pgoff;
612 } gmem;
613#endif
614};
615
616static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
617{
618 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
619}
620
621static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
622{
623 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
624}
625
626static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
627{
628 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
629}
630
631static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
632{
633 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
634
635 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
636}
637
638#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
639#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
640#endif
641
642struct kvm_s390_adapter_int {
643 u64 ind_addr;
644 u64 summary_addr;
645 u64 ind_offset;
646 u32 summary_offset;
647 u32 adapter_id;
648};
649
650struct kvm_hv_sint {
651 u32 vcpu;
652 u32 sint;
653};
654
655struct kvm_xen_evtchn {
656 u32 port;
657 u32 vcpu_id;
658 int vcpu_idx;
659 u32 priority;
660};
661
662struct kvm_kernel_irq_routing_entry {
663 u32 gsi;
664 u32 type;
665 int (*set)(struct kvm_kernel_irq_routing_entry *e,
666 struct kvm *kvm, int irq_source_id, int level,
667 bool line_status);
668 union {
669 struct {
670 unsigned irqchip;
671 unsigned pin;
672 } irqchip;
673 struct {
674 u32 address_lo;
675 u32 address_hi;
676 u32 data;
677 u32 flags;
678 u32 devid;
679 } msi;
680 struct kvm_s390_adapter_int adapter;
681 struct kvm_hv_sint hv_sint;
682 struct kvm_xen_evtchn xen_evtchn;
683 };
684 struct hlist_node link;
685};
686
687#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
688struct kvm_irq_routing_table {
689 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
690 u32 nr_rt_entries;
691 /*
692 * Array indexed by gsi. Each entry contains list of irq chips
693 * the gsi is connected to.
694 */
695 struct hlist_head map[] __counted_by(nr_rt_entries);
696};
697#endif
698
699bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
700
701#ifndef KVM_INTERNAL_MEM_SLOTS
702#define KVM_INTERNAL_MEM_SLOTS 0
703#endif
704
705#define KVM_MEM_SLOTS_NUM SHRT_MAX
706#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
707
708#if KVM_MAX_NR_ADDRESS_SPACES == 1
709static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
710{
711 return KVM_MAX_NR_ADDRESS_SPACES;
712}
713
714static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
715{
716 return 0;
717}
718#endif
719
720/*
721 * Arch code must define kvm_arch_has_private_mem if support for private memory
722 * is enabled.
723 */
724#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
725static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
726{
727 return false;
728}
729#endif
730
731#ifndef kvm_arch_has_readonly_mem
732static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
733{
734 return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
735}
736#endif
737
738struct kvm_memslots {
739 u64 generation;
740 atomic_long_t last_used_slot;
741 struct rb_root_cached hva_tree;
742 struct rb_root gfn_tree;
743 /*
744 * The mapping table from slot id to memslot.
745 *
746 * 7-bit bucket count matches the size of the old id to index array for
747 * 512 slots, while giving good performance with this slot count.
748 * Higher bucket counts bring only small performance improvements but
749 * always result in higher memory usage (even for lower memslot counts).
750 */
751 DECLARE_HASHTABLE(id_hash, 7);
752 int node_idx;
753};
754
755struct kvm {
756#ifdef KVM_HAVE_MMU_RWLOCK
757 rwlock_t mmu_lock;
758#else
759 spinlock_t mmu_lock;
760#endif /* KVM_HAVE_MMU_RWLOCK */
761
762 struct mutex slots_lock;
763
764 /*
765 * Protects the arch-specific fields of struct kvm_memory_slots in
766 * use by the VM. To be used under the slots_lock (above) or in a
767 * kvm->srcu critical section where acquiring the slots_lock would
768 * lead to deadlock with the synchronize_srcu in
769 * kvm_swap_active_memslots().
770 */
771 struct mutex slots_arch_lock;
772 struct mm_struct *mm; /* userspace tied to this vm */
773 unsigned long nr_memslot_pages;
774 /* The two memslot sets - active and inactive (per address space) */
775 struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
776 /* The current active memslot set for each address space */
777 struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
778 struct xarray vcpu_array;
779 /*
780 * Protected by slots_lock, but can be read outside if an
781 * incorrect answer is acceptable.
782 */
783 atomic_t nr_memslots_dirty_logging;
784
785 /* Used to wait for completion of MMU notifiers. */
786 spinlock_t mn_invalidate_lock;
787 unsigned long mn_active_invalidate_count;
788 struct rcuwait mn_memslots_update_rcuwait;
789
790 /* For management / invalidation of gfn_to_pfn_caches */
791 spinlock_t gpc_lock;
792 struct list_head gpc_list;
793
794 /*
795 * created_vcpus is protected by kvm->lock, and is incremented
796 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
797 * incremented after storing the kvm_vcpu pointer in vcpus,
798 * and is accessed atomically.
799 */
800 atomic_t online_vcpus;
801 int max_vcpus;
802 int created_vcpus;
803 int last_boosted_vcpu;
804 struct list_head vm_list;
805 struct mutex lock;
806 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
807#ifdef CONFIG_HAVE_KVM_IRQCHIP
808 struct {
809 spinlock_t lock;
810 struct list_head items;
811 /* resampler_list update side is protected by resampler_lock. */
812 struct list_head resampler_list;
813 struct mutex resampler_lock;
814 } irqfds;
815#endif
816 struct list_head ioeventfds;
817 struct kvm_vm_stat stat;
818 struct kvm_arch arch;
819 refcount_t users_count;
820#ifdef CONFIG_KVM_MMIO
821 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
822 spinlock_t ring_lock;
823 struct list_head coalesced_zones;
824#endif
825
826 struct mutex irq_lock;
827#ifdef CONFIG_HAVE_KVM_IRQCHIP
828 /*
829 * Update side is protected by irq_lock.
830 */
831 struct kvm_irq_routing_table __rcu *irq_routing;
832
833 struct hlist_head irq_ack_notifier_list;
834#endif
835
836#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
837 struct mmu_notifier mmu_notifier;
838 unsigned long mmu_invalidate_seq;
839 long mmu_invalidate_in_progress;
840 gfn_t mmu_invalidate_range_start;
841 gfn_t mmu_invalidate_range_end;
842#endif
843 struct list_head devices;
844 u64 manual_dirty_log_protect;
845 struct dentry *debugfs_dentry;
846 struct kvm_stat_data **debugfs_stat_data;
847 struct srcu_struct srcu;
848 struct srcu_struct irq_srcu;
849 pid_t userspace_pid;
850 bool override_halt_poll_ns;
851 unsigned int max_halt_poll_ns;
852 u32 dirty_ring_size;
853 bool dirty_ring_with_bitmap;
854 bool vm_bugged;
855 bool vm_dead;
856
857#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
858 struct notifier_block pm_notifier;
859#endif
860#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
861 /* Protected by slots_locks (for writes) and RCU (for reads) */
862 struct xarray mem_attr_array;
863#endif
864 char stats_id[KVM_STATS_NAME_SIZE];
865};
866
867#define kvm_err(fmt, ...) \
868 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
869#define kvm_info(fmt, ...) \
870 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
871#define kvm_debug(fmt, ...) \
872 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
873#define kvm_debug_ratelimited(fmt, ...) \
874 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
875 ## __VA_ARGS__)
876#define kvm_pr_unimpl(fmt, ...) \
877 pr_err_ratelimited("kvm [%i]: " fmt, \
878 task_tgid_nr(current), ## __VA_ARGS__)
879
880/* The guest did something we don't support. */
881#define vcpu_unimpl(vcpu, fmt, ...) \
882 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
883 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
884
885#define vcpu_debug(vcpu, fmt, ...) \
886 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
887#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
888 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
889 ## __VA_ARGS__)
890#define vcpu_err(vcpu, fmt, ...) \
891 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
892
893static inline void kvm_vm_dead(struct kvm *kvm)
894{
895 kvm->vm_dead = true;
896 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
897}
898
899static inline void kvm_vm_bugged(struct kvm *kvm)
900{
901 kvm->vm_bugged = true;
902 kvm_vm_dead(kvm);
903}
904
905
906#define KVM_BUG(cond, kvm, fmt...) \
907({ \
908 bool __ret = !!(cond); \
909 \
910 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
911 kvm_vm_bugged(kvm); \
912 unlikely(__ret); \
913})
914
915#define KVM_BUG_ON(cond, kvm) \
916({ \
917 bool __ret = !!(cond); \
918 \
919 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
920 kvm_vm_bugged(kvm); \
921 unlikely(__ret); \
922})
923
924/*
925 * Note, "data corruption" refers to corruption of host kernel data structures,
926 * not guest data. Guest data corruption, suspected or confirmed, that is tied
927 * and contained to a single VM should *never* BUG() and potentially panic the
928 * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
929 * is corrupted and that corruption can have a cascading effect to other parts
930 * of the hosts and/or to other VMs.
931 */
932#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
933({ \
934 bool __ret = !!(cond); \
935 \
936 if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
937 BUG_ON(__ret); \
938 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
939 kvm_vm_bugged(kvm); \
940 unlikely(__ret); \
941})
942
943static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
944{
945#ifdef CONFIG_PROVE_RCU
946 WARN_ONCE(vcpu->srcu_depth++,
947 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
948#endif
949 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
950}
951
952static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
953{
954 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
955
956#ifdef CONFIG_PROVE_RCU
957 WARN_ONCE(--vcpu->srcu_depth,
958 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
959#endif
960}
961
962static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
963{
964 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
965}
966
967static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
968{
969 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
970 lockdep_is_held(&kvm->slots_lock) ||
971 !refcount_read(&kvm->users_count));
972}
973
974static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
975{
976 int num_vcpus = atomic_read(&kvm->online_vcpus);
977
978 /*
979 * Explicitly verify the target vCPU is online, as the anti-speculation
980 * logic only limits the CPU's ability to speculate, e.g. given a "bad"
981 * index, clamping the index to 0 would return vCPU0, not NULL.
982 */
983 if (i >= num_vcpus)
984 return NULL;
985
986 i = array_index_nospec(i, num_vcpus);
987
988 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
989 smp_rmb();
990 return xa_load(&kvm->vcpu_array, i);
991}
992
993#define kvm_for_each_vcpu(idx, vcpup, kvm) \
994 if (atomic_read(&kvm->online_vcpus)) \
995 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
996 (atomic_read(&kvm->online_vcpus) - 1))
997
998static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
999{
1000 struct kvm_vcpu *vcpu = NULL;
1001 unsigned long i;
1002
1003 if (id < 0)
1004 return NULL;
1005 if (id < KVM_MAX_VCPUS)
1006 vcpu = kvm_get_vcpu(kvm, id);
1007 if (vcpu && vcpu->vcpu_id == id)
1008 return vcpu;
1009 kvm_for_each_vcpu(i, vcpu, kvm)
1010 if (vcpu->vcpu_id == id)
1011 return vcpu;
1012 return NULL;
1013}
1014
1015void kvm_destroy_vcpus(struct kvm *kvm);
1016
1017void vcpu_load(struct kvm_vcpu *vcpu);
1018void vcpu_put(struct kvm_vcpu *vcpu);
1019
1020#ifdef __KVM_HAVE_IOAPIC
1021void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
1022void kvm_arch_post_irq_routing_update(struct kvm *kvm);
1023#else
1024static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
1025{
1026}
1027static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
1028{
1029}
1030#endif
1031
1032#ifdef CONFIG_HAVE_KVM_IRQCHIP
1033int kvm_irqfd_init(void);
1034void kvm_irqfd_exit(void);
1035#else
1036static inline int kvm_irqfd_init(void)
1037{
1038 return 0;
1039}
1040
1041static inline void kvm_irqfd_exit(void)
1042{
1043}
1044#endif
1045int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
1046void kvm_exit(void);
1047
1048void kvm_get_kvm(struct kvm *kvm);
1049bool kvm_get_kvm_safe(struct kvm *kvm);
1050void kvm_put_kvm(struct kvm *kvm);
1051bool file_is_kvm(struct file *file);
1052void kvm_put_kvm_no_destroy(struct kvm *kvm);
1053
1054static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
1055{
1056 as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
1057 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
1058 lockdep_is_held(&kvm->slots_lock) ||
1059 !refcount_read(&kvm->users_count));
1060}
1061
1062static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
1063{
1064 return __kvm_memslots(kvm, 0);
1065}
1066
1067static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
1068{
1069 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
1070
1071 return __kvm_memslots(vcpu->kvm, as_id);
1072}
1073
1074static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
1075{
1076 return RB_EMPTY_ROOT(&slots->gfn_tree);
1077}
1078
1079bool kvm_are_all_memslots_empty(struct kvm *kvm);
1080
1081#define kvm_for_each_memslot(memslot, bkt, slots) \
1082 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
1083 if (WARN_ON_ONCE(!memslot->npages)) { \
1084 } else
1085
1086static inline
1087struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
1088{
1089 struct kvm_memory_slot *slot;
1090 int idx = slots->node_idx;
1091
1092 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
1093 if (slot->id == id)
1094 return slot;
1095 }
1096
1097 return NULL;
1098}
1099
1100/* Iterator used for walking memslots that overlap a gfn range. */
1101struct kvm_memslot_iter {
1102 struct kvm_memslots *slots;
1103 struct rb_node *node;
1104 struct kvm_memory_slot *slot;
1105};
1106
1107static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
1108{
1109 iter->node = rb_next(iter->node);
1110 if (!iter->node)
1111 return;
1112
1113 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
1114}
1115
1116static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
1117 struct kvm_memslots *slots,
1118 gfn_t start)
1119{
1120 int idx = slots->node_idx;
1121 struct rb_node *tmp;
1122 struct kvm_memory_slot *slot;
1123
1124 iter->slots = slots;
1125
1126 /*
1127 * Find the so called "upper bound" of a key - the first node that has
1128 * its key strictly greater than the searched one (the start gfn in our case).
1129 */
1130 iter->node = NULL;
1131 for (tmp = slots->gfn_tree.rb_node; tmp; ) {
1132 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
1133 if (start < slot->base_gfn) {
1134 iter->node = tmp;
1135 tmp = tmp->rb_left;
1136 } else {
1137 tmp = tmp->rb_right;
1138 }
1139 }
1140
1141 /*
1142 * Find the slot with the lowest gfn that can possibly intersect with
1143 * the range, so we'll ideally have slot start <= range start
1144 */
1145 if (iter->node) {
1146 /*
1147 * A NULL previous node means that the very first slot
1148 * already has a higher start gfn.
1149 * In this case slot start > range start.
1150 */
1151 tmp = rb_prev(iter->node);
1152 if (tmp)
1153 iter->node = tmp;
1154 } else {
1155 /* a NULL node below means no slots */
1156 iter->node = rb_last(&slots->gfn_tree);
1157 }
1158
1159 if (iter->node) {
1160 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1161
1162 /*
1163 * It is possible in the slot start < range start case that the
1164 * found slot ends before or at range start (slot end <= range start)
1165 * and so it does not overlap the requested range.
1166 *
1167 * In such non-overlapping case the next slot (if it exists) will
1168 * already have slot start > range start, otherwise the logic above
1169 * would have found it instead of the current slot.
1170 */
1171 if (iter->slot->base_gfn + iter->slot->npages <= start)
1172 kvm_memslot_iter_next(iter);
1173 }
1174}
1175
1176static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1177{
1178 if (!iter->node)
1179 return false;
1180
1181 /*
1182 * If this slot starts beyond or at the end of the range so does
1183 * every next one
1184 */
1185 return iter->slot->base_gfn < end;
1186}
1187
1188/* Iterate over each memslot at least partially intersecting [start, end) range */
1189#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
1190 for (kvm_memslot_iter_start(iter, slots, start); \
1191 kvm_memslot_iter_is_valid(iter, end); \
1192 kvm_memslot_iter_next(iter))
1193
1194struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1195struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1196struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1197
1198/*
1199 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
1200 * - create a new memory slot
1201 * - delete an existing memory slot
1202 * - modify an existing memory slot
1203 * -- move it in the guest physical memory space
1204 * -- just change its flags
1205 *
1206 * Since flags can be changed by some of these operations, the following
1207 * differentiation is the best we can do for kvm_set_memory_region():
1208 */
1209enum kvm_mr_change {
1210 KVM_MR_CREATE,
1211 KVM_MR_DELETE,
1212 KVM_MR_MOVE,
1213 KVM_MR_FLAGS_ONLY,
1214};
1215
1216int kvm_set_internal_memslot(struct kvm *kvm,
1217 const struct kvm_userspace_memory_region2 *mem);
1218void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1219void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1220int kvm_arch_prepare_memory_region(struct kvm *kvm,
1221 const struct kvm_memory_slot *old,
1222 struct kvm_memory_slot *new,
1223 enum kvm_mr_change change);
1224void kvm_arch_commit_memory_region(struct kvm *kvm,
1225 struct kvm_memory_slot *old,
1226 const struct kvm_memory_slot *new,
1227 enum kvm_mr_change change);
1228/* flush all memory translations */
1229void kvm_arch_flush_shadow_all(struct kvm *kvm);
1230/* flush memory translations pointing to 'slot' */
1231void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1232 struct kvm_memory_slot *slot);
1233
1234int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
1235 struct page **pages, int nr_pages);
1236
1237struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
1238static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1239{
1240 return __gfn_to_page(kvm, gfn, true);
1241}
1242
1243unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1244unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1245unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1246unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1247 bool *writable);
1248
1249static inline void kvm_release_page_unused(struct page *page)
1250{
1251 if (!page)
1252 return;
1253
1254 put_page(page);
1255}
1256
1257void kvm_release_page_clean(struct page *page);
1258void kvm_release_page_dirty(struct page *page);
1259
1260static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
1261 bool unused, bool dirty)
1262{
1263 lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
1264
1265 if (!page)
1266 return;
1267
1268 /*
1269 * If the page that KVM got from the *primary MMU* is writable, and KVM
1270 * installed or reused a SPTE, mark the page/folio dirty. Note, this
1271 * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
1272 * the GFN is write-protected. Folios can't be safely marked dirty
1273 * outside of mmu_lock as doing so could race with writeback on the
1274 * folio. As a result, KVM can't mark folios dirty in the fast page
1275 * fault handler, and so KVM must (somewhat) speculatively mark the
1276 * folio dirty if KVM could locklessly make the SPTE writable.
1277 */
1278 if (unused)
1279 kvm_release_page_unused(page);
1280 else if (dirty)
1281 kvm_release_page_dirty(page);
1282 else
1283 kvm_release_page_clean(page);
1284}
1285
1286kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
1287 unsigned int foll, bool *writable,
1288 struct page **refcounted_page);
1289
1290static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
1291 bool write, bool *writable,
1292 struct page **refcounted_page)
1293{
1294 return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
1295 write ? FOLL_WRITE : 0, writable, refcounted_page);
1296}
1297
1298int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1299 int len);
1300int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1301int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1302 void *data, unsigned long len);
1303int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1304 void *data, unsigned int offset,
1305 unsigned long len);
1306int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1307 int offset, int len);
1308int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1309 unsigned long len);
1310int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1311 void *data, unsigned long len);
1312int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1313 void *data, unsigned int offset,
1314 unsigned long len);
1315int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1316 gpa_t gpa, unsigned long len);
1317
1318#define __kvm_get_guest(kvm, gfn, offset, v) \
1319({ \
1320 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1321 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1322 int __ret = -EFAULT; \
1323 \
1324 if (!kvm_is_error_hva(__addr)) \
1325 __ret = get_user(v, __uaddr); \
1326 __ret; \
1327})
1328
1329#define kvm_get_guest(kvm, gpa, v) \
1330({ \
1331 gpa_t __gpa = gpa; \
1332 struct kvm *__kvm = kvm; \
1333 \
1334 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
1335 offset_in_page(__gpa), v); \
1336})
1337
1338#define __kvm_put_guest(kvm, gfn, offset, v) \
1339({ \
1340 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1341 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1342 int __ret = -EFAULT; \
1343 \
1344 if (!kvm_is_error_hva(__addr)) \
1345 __ret = put_user(v, __uaddr); \
1346 if (!__ret) \
1347 mark_page_dirty(kvm, gfn); \
1348 __ret; \
1349})
1350
1351#define kvm_put_guest(kvm, gpa, v) \
1352({ \
1353 gpa_t __gpa = gpa; \
1354 struct kvm *__kvm = kvm; \
1355 \
1356 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
1357 offset_in_page(__gpa), v); \
1358})
1359
1360int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1361bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1362bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1363unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1364void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1365void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1366
1367int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
1368 bool writable);
1369void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
1370
1371static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa,
1372 struct kvm_host_map *map)
1373{
1374 return __kvm_vcpu_map(vcpu, gpa, map, true);
1375}
1376
1377static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
1378 struct kvm_host_map *map)
1379{
1380 return __kvm_vcpu_map(vcpu, gpa, map, false);
1381}
1382
1383unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1384unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1385int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1386 int len);
1387int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1388 unsigned long len);
1389int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1390 unsigned long len);
1391int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1392 int offset, int len);
1393int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1394 unsigned long len);
1395void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1396
1397/**
1398 * kvm_gpc_init - initialize gfn_to_pfn_cache.
1399 *
1400 * @gpc: struct gfn_to_pfn_cache object.
1401 * @kvm: pointer to kvm instance.
1402 *
1403 * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
1404 * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
1405 * the caller before init).
1406 */
1407void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
1408
1409/**
1410 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
1411 * physical address.
1412 *
1413 * @gpc: struct gfn_to_pfn_cache object.
1414 * @gpa: guest physical address to map.
1415 * @len: sanity check; the range being access must fit a single page.
1416 *
1417 * @return: 0 for success.
1418 * -EINVAL for a mapping which would cross a page boundary.
1419 * -EFAULT for an untranslatable guest physical address.
1420 *
1421 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
1422 * invalidations to be processed. Callers are required to use kvm_gpc_check()
1423 * to ensure that the cache is valid before accessing the target page.
1424 */
1425int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
1426
1427/**
1428 * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
1429 *
1430 * @gpc: struct gfn_to_pfn_cache object.
1431 * @hva: userspace virtual address to map.
1432 * @len: sanity check; the range being access must fit a single page.
1433 *
1434 * @return: 0 for success.
1435 * -EINVAL for a mapping which would cross a page boundary.
1436 * -EFAULT for an untranslatable guest physical address.
1437 *
1438 * The semantics of this function are the same as those of kvm_gpc_activate(). It
1439 * merely bypasses a layer of address translation.
1440 */
1441int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
1442
1443/**
1444 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
1445 *
1446 * @gpc: struct gfn_to_pfn_cache object.
1447 * @len: sanity check; the range being access must fit a single page.
1448 *
1449 * @return: %true if the cache is still valid and the address matches.
1450 * %false if the cache is not valid.
1451 *
1452 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
1453 * while calling this function, and then continue to hold the lock until the
1454 * access is complete.
1455 *
1456 * Callers in IN_GUEST_MODE may do so without locking, although they should
1457 * still hold a read lock on kvm->scru for the memslot checks.
1458 */
1459bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
1460
1461/**
1462 * kvm_gpc_refresh - update a previously initialized cache.
1463 *
1464 * @gpc: struct gfn_to_pfn_cache object.
1465 * @len: sanity check; the range being access must fit a single page.
1466 *
1467 * @return: 0 for success.
1468 * -EINVAL for a mapping which would cross a page boundary.
1469 * -EFAULT for an untranslatable guest physical address.
1470 *
1471 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
1472 * return from this function does not mean the page can be immediately
1473 * accessed because it may have raced with an invalidation. Callers must
1474 * still lock and check the cache status, as this function does not return
1475 * with the lock still held to permit access.
1476 */
1477int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
1478
1479/**
1480 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
1481 *
1482 * @gpc: struct gfn_to_pfn_cache object.
1483 *
1484 * This removes a cache from the VM's list to be processed on MMU notifier
1485 * invocation.
1486 */
1487void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
1488
1489static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
1490{
1491 return gpc->active && !kvm_is_error_gpa(gpc->gpa);
1492}
1493
1494static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
1495{
1496 return gpc->active && kvm_is_error_gpa(gpc->gpa);
1497}
1498
1499void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1500void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1501
1502void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1503bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1504void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1505void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1506bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1507void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1508int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1509void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
1510
1511void kvm_flush_remote_tlbs(struct kvm *kvm);
1512void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1513void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
1514 const struct kvm_memory_slot *memslot);
1515
1516#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1517int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1518int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
1519int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1520void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1521void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1522#endif
1523
1524void kvm_mmu_invalidate_begin(struct kvm *kvm);
1525void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
1526void kvm_mmu_invalidate_end(struct kvm *kvm);
1527bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
1528
1529long kvm_arch_dev_ioctl(struct file *filp,
1530 unsigned int ioctl, unsigned long arg);
1531long kvm_arch_vcpu_ioctl(struct file *filp,
1532 unsigned int ioctl, unsigned long arg);
1533vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1534
1535int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1536
1537void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1538 struct kvm_memory_slot *slot,
1539 gfn_t gfn_offset,
1540 unsigned long mask);
1541void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1542
1543#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1544int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1545int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1546 int *is_dirty, struct kvm_memory_slot **memslot);
1547#endif
1548
1549int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1550 bool line_status);
1551int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1552 struct kvm_enable_cap *cap);
1553int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
1554long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
1555 unsigned long arg);
1556
1557int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1558int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1559
1560int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1561 struct kvm_translation *tr);
1562
1563int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1564int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1565int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1566 struct kvm_sregs *sregs);
1567int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1568 struct kvm_sregs *sregs);
1569int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1570 struct kvm_mp_state *mp_state);
1571int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1572 struct kvm_mp_state *mp_state);
1573int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1574 struct kvm_guest_debug *dbg);
1575int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1576
1577void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1578void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1579int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1580int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1581void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1582void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1583
1584#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1585int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1586#endif
1587
1588#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1589void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1590#else
1591static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
1592#endif
1593
1594#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
1595/*
1596 * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
1597 * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
1598 * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
1599 * sequence, and at the end of the generic hardware disabling sequence.
1600 */
1601void kvm_arch_enable_virtualization(void);
1602void kvm_arch_disable_virtualization(void);
1603/*
1604 * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
1605 * do the actual twiddling of hardware bits. The hooks are called on all
1606 * online CPUs when KVM enables/disabled virtualization, and on a single CPU
1607 * when that CPU is onlined/offlined (including for Resume/Suspend).
1608 */
1609int kvm_arch_enable_virtualization_cpu(void);
1610void kvm_arch_disable_virtualization_cpu(void);
1611#endif
1612int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1613bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1614int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1615bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1616bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1617bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
1618int kvm_arch_post_init_vm(struct kvm *kvm);
1619void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1620void kvm_arch_create_vm_debugfs(struct kvm *kvm);
1621
1622#ifndef __KVM_HAVE_ARCH_VM_ALLOC
1623/*
1624 * All architectures that want to use vzalloc currently also
1625 * need their own kvm_arch_alloc_vm implementation.
1626 */
1627static inline struct kvm *kvm_arch_alloc_vm(void)
1628{
1629 return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
1630}
1631#endif
1632
1633static inline void __kvm_arch_free_vm(struct kvm *kvm)
1634{
1635 kvfree(kvm);
1636}
1637
1638#ifndef __KVM_HAVE_ARCH_VM_FREE
1639static inline void kvm_arch_free_vm(struct kvm *kvm)
1640{
1641 __kvm_arch_free_vm(kvm);
1642}
1643#endif
1644
1645#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1646static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
1647{
1648 return -ENOTSUPP;
1649}
1650#else
1651int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
1652#endif
1653
1654#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1655static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
1656 gfn_t gfn, u64 nr_pages)
1657{
1658 return -EOPNOTSUPP;
1659}
1660#else
1661int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1662#endif
1663
1664#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1665void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1666void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1667bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1668#else
1669static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1670{
1671}
1672
1673static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1674{
1675}
1676
1677static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1678{
1679 return false;
1680}
1681#endif
1682#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1683void kvm_arch_start_assignment(struct kvm *kvm);
1684void kvm_arch_end_assignment(struct kvm *kvm);
1685bool kvm_arch_has_assigned_device(struct kvm *kvm);
1686#else
1687static inline void kvm_arch_start_assignment(struct kvm *kvm)
1688{
1689}
1690
1691static inline void kvm_arch_end_assignment(struct kvm *kvm)
1692{
1693}
1694
1695static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1696{
1697 return false;
1698}
1699#endif
1700
1701static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1702{
1703#ifdef __KVM_HAVE_ARCH_WQP
1704 return vcpu->arch.waitp;
1705#else
1706 return &vcpu->wait;
1707#endif
1708}
1709
1710/*
1711 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
1712 * true if the vCPU was blocking and was awakened, false otherwise.
1713 */
1714static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1715{
1716 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1717}
1718
1719static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1720{
1721 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1722}
1723
1724#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1725/*
1726 * returns true if the virtual interrupt controller is initialized and
1727 * ready to accept virtual IRQ. On some architectures the virtual interrupt
1728 * controller is dynamically instantiated and this is not always true.
1729 */
1730bool kvm_arch_intc_initialized(struct kvm *kvm);
1731#else
1732static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1733{
1734 return true;
1735}
1736#endif
1737
1738#ifdef CONFIG_GUEST_PERF_EVENTS
1739unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1740
1741void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
1742void kvm_unregister_perf_callbacks(void);
1743#else
1744static inline void kvm_register_perf_callbacks(void *ign) {}
1745static inline void kvm_unregister_perf_callbacks(void) {}
1746#endif /* CONFIG_GUEST_PERF_EVENTS */
1747
1748int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1749void kvm_arch_destroy_vm(struct kvm *kvm);
1750void kvm_arch_sync_events(struct kvm *kvm);
1751
1752int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1753
1754struct kvm_irq_ack_notifier {
1755 struct hlist_node link;
1756 unsigned gsi;
1757 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1758};
1759
1760int kvm_irq_map_gsi(struct kvm *kvm,
1761 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1762int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1763
1764int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1765 bool line_status);
1766int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1767 int irq_source_id, int level, bool line_status);
1768int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1769 struct kvm *kvm, int irq_source_id,
1770 int level, bool line_status);
1771bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1772void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1773void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1774void kvm_register_irq_ack_notifier(struct kvm *kvm,
1775 struct kvm_irq_ack_notifier *kian);
1776void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1777 struct kvm_irq_ack_notifier *kian);
1778int kvm_request_irq_source_id(struct kvm *kvm);
1779void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1780bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1781
1782/*
1783 * Returns a pointer to the memslot if it contains gfn.
1784 * Otherwise returns NULL.
1785 */
1786static inline struct kvm_memory_slot *
1787try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1788{
1789 if (!slot)
1790 return NULL;
1791
1792 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1793 return slot;
1794 else
1795 return NULL;
1796}
1797
1798/*
1799 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1800 *
1801 * With "approx" set returns the memslot also when the address falls
1802 * in a hole. In that case one of the memslots bordering the hole is
1803 * returned.
1804 */
1805static inline struct kvm_memory_slot *
1806search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1807{
1808 struct kvm_memory_slot *slot;
1809 struct rb_node *node;
1810 int idx = slots->node_idx;
1811
1812 slot = NULL;
1813 for (node = slots->gfn_tree.rb_node; node; ) {
1814 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1815 if (gfn >= slot->base_gfn) {
1816 if (gfn < slot->base_gfn + slot->npages)
1817 return slot;
1818 node = node->rb_right;
1819 } else
1820 node = node->rb_left;
1821 }
1822
1823 return approx ? slot : NULL;
1824}
1825
1826static inline struct kvm_memory_slot *
1827____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1828{
1829 struct kvm_memory_slot *slot;
1830
1831 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1832 slot = try_get_memslot(slot, gfn);
1833 if (slot)
1834 return slot;
1835
1836 slot = search_memslots(slots, gfn, approx);
1837 if (slot) {
1838 atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1839 return slot;
1840 }
1841
1842 return NULL;
1843}
1844
1845/*
1846 * __gfn_to_memslot() and its descendants are here to allow arch code to inline
1847 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
1848 * because that would bloat other code too much.
1849 */
1850static inline struct kvm_memory_slot *
1851__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1852{
1853 return ____gfn_to_memslot(slots, gfn, false);
1854}
1855
1856static inline unsigned long
1857__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1858{
1859 /*
1860 * The index was checked originally in search_memslots. To avoid
1861 * that a malicious guest builds a Spectre gadget out of e.g. page
1862 * table walks, do not let the processor speculate loads outside
1863 * the guest's registered memslots.
1864 */
1865 unsigned long offset = gfn - slot->base_gfn;
1866 offset = array_index_nospec(offset, slot->npages);
1867 return slot->userspace_addr + offset * PAGE_SIZE;
1868}
1869
1870static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1871{
1872 return gfn_to_memslot(kvm, gfn)->id;
1873}
1874
1875static inline gfn_t
1876hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1877{
1878 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1879
1880 return slot->base_gfn + gfn_offset;
1881}
1882
1883static inline gpa_t gfn_to_gpa(gfn_t gfn)
1884{
1885 return (gpa_t)gfn << PAGE_SHIFT;
1886}
1887
1888static inline gfn_t gpa_to_gfn(gpa_t gpa)
1889{
1890 return (gfn_t)(gpa >> PAGE_SHIFT);
1891}
1892
1893static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1894{
1895 return (hpa_t)pfn << PAGE_SHIFT;
1896}
1897
1898static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
1899{
1900 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1901
1902 return !kvm_is_error_hva(hva);
1903}
1904
1905static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
1906{
1907 lockdep_assert_held(&gpc->lock);
1908
1909 if (!gpc->memslot)
1910 return;
1911
1912 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
1913}
1914
1915enum kvm_stat_kind {
1916 KVM_STAT_VM,
1917 KVM_STAT_VCPU,
1918};
1919
1920struct kvm_stat_data {
1921 struct kvm *kvm;
1922 const struct _kvm_stats_desc *desc;
1923 enum kvm_stat_kind kind;
1924};
1925
1926struct _kvm_stats_desc {
1927 struct kvm_stats_desc desc;
1928 char name[KVM_STATS_NAME_SIZE];
1929};
1930
1931#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
1932 .flags = type | unit | base | \
1933 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1934 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1935 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1936 .exponent = exp, \
1937 .size = sz, \
1938 .bucket_size = bsz
1939
1940#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1941 { \
1942 { \
1943 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1944 .offset = offsetof(struct kvm_vm_stat, generic.stat) \
1945 }, \
1946 .name = #stat, \
1947 }
1948#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1949 { \
1950 { \
1951 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1952 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1953 }, \
1954 .name = #stat, \
1955 }
1956#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1957 { \
1958 { \
1959 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1960 .offset = offsetof(struct kvm_vm_stat, stat) \
1961 }, \
1962 .name = #stat, \
1963 }
1964#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1965 { \
1966 { \
1967 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1968 .offset = offsetof(struct kvm_vcpu_stat, stat) \
1969 }, \
1970 .name = #stat, \
1971 }
1972/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
1973#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
1974 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1975
1976#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
1977 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
1978 unit, base, exponent, 1, 0)
1979#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
1980 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
1981 unit, base, exponent, 1, 0)
1982#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
1983 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
1984 unit, base, exponent, 1, 0)
1985#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
1986 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
1987 unit, base, exponent, sz, bsz)
1988#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
1989 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
1990 unit, base, exponent, sz, 0)
1991
1992/* Cumulative counter, read/write */
1993#define STATS_DESC_COUNTER(SCOPE, name) \
1994 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
1995 KVM_STATS_BASE_POW10, 0)
1996/* Instantaneous counter, read only */
1997#define STATS_DESC_ICOUNTER(SCOPE, name) \
1998 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
1999 KVM_STATS_BASE_POW10, 0)
2000/* Peak counter, read/write */
2001#define STATS_DESC_PCOUNTER(SCOPE, name) \
2002 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
2003 KVM_STATS_BASE_POW10, 0)
2004
2005/* Instantaneous boolean value, read only */
2006#define STATS_DESC_IBOOLEAN(SCOPE, name) \
2007 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
2008 KVM_STATS_BASE_POW10, 0)
2009/* Peak (sticky) boolean value, read/write */
2010#define STATS_DESC_PBOOLEAN(SCOPE, name) \
2011 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
2012 KVM_STATS_BASE_POW10, 0)
2013
2014/* Cumulative time in nanosecond */
2015#define STATS_DESC_TIME_NSEC(SCOPE, name) \
2016 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2017 KVM_STATS_BASE_POW10, -9)
2018/* Linear histogram for time in nanosecond */
2019#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
2020 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2021 KVM_STATS_BASE_POW10, -9, sz, bsz)
2022/* Logarithmic histogram for time in nanosecond */
2023#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
2024 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2025 KVM_STATS_BASE_POW10, -9, sz)
2026
2027#define KVM_GENERIC_VM_STATS() \
2028 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
2029 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
2030
2031#define KVM_GENERIC_VCPU_STATS() \
2032 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
2033 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
2034 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
2035 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
2036 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
2037 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
2038 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
2039 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
2040 HALT_POLL_HIST_COUNT), \
2041 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
2042 HALT_POLL_HIST_COUNT), \
2043 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
2044 HALT_POLL_HIST_COUNT), \
2045 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
2046
2047ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
2048 const struct _kvm_stats_desc *desc,
2049 void *stats, size_t size_stats,
2050 char __user *user_buffer, size_t size, loff_t *offset);
2051
2052/**
2053 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
2054 * statistics data.
2055 *
2056 * @data: start address of the stats data
2057 * @size: the number of bucket of the stats data
2058 * @value: the new value used to update the linear histogram's bucket
2059 * @bucket_size: the size (width) of a bucket
2060 */
2061static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
2062 u64 value, size_t bucket_size)
2063{
2064 size_t index = div64_u64(value, bucket_size);
2065
2066 index = min(index, size - 1);
2067 ++data[index];
2068}
2069
2070/**
2071 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
2072 * statistics data.
2073 *
2074 * @data: start address of the stats data
2075 * @size: the number of bucket of the stats data
2076 * @value: the new value used to update the logarithmic histogram's bucket
2077 */
2078static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
2079{
2080 size_t index = fls64(value);
2081
2082 index = min(index, size - 1);
2083 ++data[index];
2084}
2085
2086#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
2087 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
2088#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
2089 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
2090
2091
2092extern const struct kvm_stats_header kvm_vm_stats_header;
2093extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
2094extern const struct kvm_stats_header kvm_vcpu_stats_header;
2095extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
2096
2097#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
2098static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
2099{
2100 if (unlikely(kvm->mmu_invalidate_in_progress))
2101 return 1;
2102 /*
2103 * Ensure the read of mmu_invalidate_in_progress happens before
2104 * the read of mmu_invalidate_seq. This interacts with the
2105 * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
2106 * that the caller either sees the old (non-zero) value of
2107 * mmu_invalidate_in_progress or the new (incremented) value of
2108 * mmu_invalidate_seq.
2109 *
2110 * PowerPC Book3s HV KVM calls this under a per-page lock rather
2111 * than under kvm->mmu_lock, for scalability, so can't rely on
2112 * kvm->mmu_lock to keep things ordered.
2113 */
2114 smp_rmb();
2115 if (kvm->mmu_invalidate_seq != mmu_seq)
2116 return 1;
2117 return 0;
2118}
2119
2120static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
2121 unsigned long mmu_seq,
2122 gfn_t gfn)
2123{
2124 lockdep_assert_held(&kvm->mmu_lock);
2125 /*
2126 * If mmu_invalidate_in_progress is non-zero, then the range maintained
2127 * by kvm_mmu_notifier_invalidate_range_start contains all addresses
2128 * that might be being invalidated. Note that it may include some false
2129 * positives, due to shortcuts when handing concurrent invalidations.
2130 */
2131 if (unlikely(kvm->mmu_invalidate_in_progress)) {
2132 /*
2133 * Dropping mmu_lock after bumping mmu_invalidate_in_progress
2134 * but before updating the range is a KVM bug.
2135 */
2136 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
2137 kvm->mmu_invalidate_range_end == INVALID_GPA))
2138 return 1;
2139
2140 if (gfn >= kvm->mmu_invalidate_range_start &&
2141 gfn < kvm->mmu_invalidate_range_end)
2142 return 1;
2143 }
2144
2145 if (kvm->mmu_invalidate_seq != mmu_seq)
2146 return 1;
2147 return 0;
2148}
2149
2150/*
2151 * This lockless version of the range-based retry check *must* be paired with a
2152 * call to the locked version after acquiring mmu_lock, i.e. this is safe to
2153 * use only as a pre-check to avoid contending mmu_lock. This version *will*
2154 * get false negatives and false positives.
2155 */
2156static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
2157 unsigned long mmu_seq,
2158 gfn_t gfn)
2159{
2160 /*
2161 * Use READ_ONCE() to ensure the in-progress flag and sequence counter
2162 * are always read from memory, e.g. so that checking for retry in a
2163 * loop won't result in an infinite retry loop. Don't force loads for
2164 * start+end, as the key to avoiding infinite retry loops is observing
2165 * the 1=>0 transition of in-progress, i.e. getting false negatives
2166 * due to stale start+end values is acceptable.
2167 */
2168 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
2169 gfn >= kvm->mmu_invalidate_range_start &&
2170 gfn < kvm->mmu_invalidate_range_end)
2171 return true;
2172
2173 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
2174}
2175#endif
2176
2177#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2178
2179#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
2180
2181bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
2182int kvm_set_irq_routing(struct kvm *kvm,
2183 const struct kvm_irq_routing_entry *entries,
2184 unsigned nr,
2185 unsigned flags);
2186int kvm_init_irq_routing(struct kvm *kvm);
2187int kvm_set_routing_entry(struct kvm *kvm,
2188 struct kvm_kernel_irq_routing_entry *e,
2189 const struct kvm_irq_routing_entry *ue);
2190void kvm_free_irq_routing(struct kvm *kvm);
2191
2192#else
2193
2194static inline void kvm_free_irq_routing(struct kvm *kvm) {}
2195
2196static inline int kvm_init_irq_routing(struct kvm *kvm)
2197{
2198 return 0;
2199}
2200
2201#endif
2202
2203int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
2204
2205void kvm_eventfd_init(struct kvm *kvm);
2206int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
2207
2208#ifdef CONFIG_HAVE_KVM_IRQCHIP
2209int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
2210void kvm_irqfd_release(struct kvm *kvm);
2211bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2212 unsigned int irqchip,
2213 unsigned int pin);
2214void kvm_irq_routing_update(struct kvm *);
2215#else
2216static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
2217{
2218 return -EINVAL;
2219}
2220
2221static inline void kvm_irqfd_release(struct kvm *kvm) {}
2222
2223static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2224 unsigned int irqchip,
2225 unsigned int pin)
2226{
2227 return false;
2228}
2229#endif /* CONFIG_HAVE_KVM_IRQCHIP */
2230
2231void kvm_arch_irq_routing_update(struct kvm *kvm);
2232
2233static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
2234{
2235 /*
2236 * Ensure the rest of the request is published to kvm_check_request's
2237 * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
2238 */
2239 smp_wmb();
2240 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2241}
2242
2243static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
2244{
2245 /*
2246 * Request that don't require vCPU action should never be logged in
2247 * vcpu->requests. The vCPU won't clear the request, so it will stay
2248 * logged indefinitely and prevent the vCPU from entering the guest.
2249 */
2250 BUILD_BUG_ON(!__builtin_constant_p(req) ||
2251 (req & KVM_REQUEST_NO_ACTION));
2252
2253 __kvm_make_request(req, vcpu);
2254}
2255
2256static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2257{
2258 return READ_ONCE(vcpu->requests);
2259}
2260
2261static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2262{
2263 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2264}
2265
2266static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2267{
2268 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2269}
2270
2271static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2272{
2273 if (kvm_test_request(req, vcpu)) {
2274 kvm_clear_request(req, vcpu);
2275
2276 /*
2277 * Ensure the rest of the request is visible to kvm_check_request's
2278 * caller. Paired with the smp_wmb in kvm_make_request.
2279 */
2280 smp_mb__after_atomic();
2281 return true;
2282 } else {
2283 return false;
2284 }
2285}
2286
2287#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
2288extern bool kvm_rebooting;
2289#endif
2290
2291extern unsigned int halt_poll_ns;
2292extern unsigned int halt_poll_ns_grow;
2293extern unsigned int halt_poll_ns_grow_start;
2294extern unsigned int halt_poll_ns_shrink;
2295
2296struct kvm_device {
2297 const struct kvm_device_ops *ops;
2298 struct kvm *kvm;
2299 void *private;
2300 struct list_head vm_node;
2301};
2302
2303/* create, destroy, and name are mandatory */
2304struct kvm_device_ops {
2305 const char *name;
2306
2307 /*
2308 * create is called holding kvm->lock and any operations not suitable
2309 * to do while holding the lock should be deferred to init (see
2310 * below).
2311 */
2312 int (*create)(struct kvm_device *dev, u32 type);
2313
2314 /*
2315 * init is called after create if create is successful and is called
2316 * outside of holding kvm->lock.
2317 */
2318 void (*init)(struct kvm_device *dev);
2319
2320 /*
2321 * Destroy is responsible for freeing dev.
2322 *
2323 * Destroy may be called before or after destructors are called
2324 * on emulated I/O regions, depending on whether a reference is
2325 * held by a vcpu or other kvm component that gets destroyed
2326 * after the emulated I/O.
2327 */
2328 void (*destroy)(struct kvm_device *dev);
2329
2330 /*
2331 * Release is an alternative method to free the device. It is
2332 * called when the device file descriptor is closed. Once
2333 * release is called, the destroy method will not be called
2334 * anymore as the device is removed from the device list of
2335 * the VM. kvm->lock is held.
2336 */
2337 void (*release)(struct kvm_device *dev);
2338
2339 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2340 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2341 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2342 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2343 unsigned long arg);
2344 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2345};
2346
2347struct kvm_device *kvm_device_from_filp(struct file *filp);
2348int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2349void kvm_unregister_device_ops(u32 type);
2350
2351extern struct kvm_device_ops kvm_mpic_ops;
2352extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2353extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2354
2355#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2356
2357static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2358{
2359 vcpu->spin_loop.in_spin_loop = val;
2360}
2361static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2362{
2363 vcpu->spin_loop.dy_eligible = val;
2364}
2365
2366#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2367
2368static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2369{
2370}
2371
2372static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2373{
2374}
2375#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2376
2377static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2378{
2379 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2380 !(memslot->flags & KVM_MEMSLOT_INVALID));
2381}
2382
2383struct kvm_vcpu *kvm_get_running_vcpu(void);
2384struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2385
2386#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2387bool kvm_arch_has_irq_bypass(void);
2388int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2389 struct irq_bypass_producer *);
2390void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2391 struct irq_bypass_producer *);
2392void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2393void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2394int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2395 uint32_t guest_irq, bool set);
2396bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
2397 struct kvm_kernel_irq_routing_entry *);
2398#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
2399
2400#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2401/* If we wakeup during the poll time, was it a sucessful poll? */
2402static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2403{
2404 return vcpu->valid_wakeup;
2405}
2406
2407#else
2408static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2409{
2410 return true;
2411}
2412#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
2413
2414#ifdef CONFIG_HAVE_KVM_NO_POLL
2415/* Callback that tells if we must not poll */
2416bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2417#else
2418static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2419{
2420 return false;
2421}
2422#endif /* CONFIG_HAVE_KVM_NO_POLL */
2423
2424#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
2425long kvm_arch_vcpu_async_ioctl(struct file *filp,
2426 unsigned int ioctl, unsigned long arg);
2427#else
2428static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
2429 unsigned int ioctl,
2430 unsigned long arg)
2431{
2432 return -ENOIOCTLCMD;
2433}
2434#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
2435
2436void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2437
2438#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2439int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2440#else
2441static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2442{
2443 return 0;
2444}
2445#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
2446
2447#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
2448static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2449{
2450 vcpu->run->exit_reason = KVM_EXIT_INTR;
2451 vcpu->stat.signal_exits++;
2452}
2453#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
2454
2455/*
2456 * If more than one page is being (un)accounted, @virt must be the address of
2457 * the first page of a block of pages what were allocated together (i.e
2458 * accounted together).
2459 *
2460 * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
2461 * is thread-safe.
2462 */
2463static inline void kvm_account_pgtable_pages(void *virt, int nr)
2464{
2465 mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
2466}
2467
2468/*
2469 * This defines how many reserved entries we want to keep before we
2470 * kick the vcpu to the userspace to avoid dirty ring full. This
2471 * value can be tuned to higher if e.g. PML is enabled on the host.
2472 */
2473#define KVM_DIRTY_RING_RSVD_ENTRIES 64
2474
2475/* Max number of entries allowed for each kvm dirty ring */
2476#define KVM_DIRTY_RING_MAX_ENTRIES 65536
2477
2478static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
2479 gpa_t gpa, gpa_t size,
2480 bool is_write, bool is_exec,
2481 bool is_private)
2482{
2483 vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
2484 vcpu->run->memory_fault.gpa = gpa;
2485 vcpu->run->memory_fault.size = size;
2486
2487 /* RWX flags are not (yet) defined or communicated to userspace. */
2488 vcpu->run->memory_fault.flags = 0;
2489 if (is_private)
2490 vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
2491}
2492
2493#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2494static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
2495{
2496 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
2497}
2498
2499bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2500 unsigned long mask, unsigned long attrs);
2501bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
2502 struct kvm_gfn_range *range);
2503bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
2504 struct kvm_gfn_range *range);
2505
2506static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2507{
2508 return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) &&
2509 kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
2510}
2511#else
2512static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2513{
2514 return false;
2515}
2516#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2517
2518#ifdef CONFIG_KVM_PRIVATE_MEM
2519int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
2520 gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
2521 int *max_order);
2522#else
2523static inline int kvm_gmem_get_pfn(struct kvm *kvm,
2524 struct kvm_memory_slot *slot, gfn_t gfn,
2525 kvm_pfn_t *pfn, struct page **page,
2526 int *max_order)
2527{
2528 KVM_BUG_ON(1, kvm);
2529 return -EIO;
2530}
2531#endif /* CONFIG_KVM_PRIVATE_MEM */
2532
2533#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
2534int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
2535#endif
2536
2537#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
2538/**
2539 * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
2540 *
2541 * @kvm: KVM instance
2542 * @gfn: starting GFN to be populated
2543 * @src: userspace-provided buffer containing data to copy into GFN range
2544 * (passed to @post_populate, and incremented on each iteration
2545 * if not NULL)
2546 * @npages: number of pages to copy from userspace-buffer
2547 * @post_populate: callback to issue for each gmem page that backs the GPA
2548 * range
2549 * @opaque: opaque data to pass to @post_populate callback
2550 *
2551 * This is primarily intended for cases where a gmem-backed GPA range needs
2552 * to be initialized with userspace-provided data prior to being mapped into
2553 * the guest as a private page. This should be called with the slots->lock
2554 * held so that caller-enforced invariants regarding the expected memory
2555 * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES.
2556 *
2557 * Returns the number of pages that were populated.
2558 */
2559typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2560 void __user *src, int order, void *opaque);
2561
2562long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
2563 kvm_gmem_populate_cb post_populate, void *opaque);
2564#endif
2565
2566#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
2567void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
2568#endif
2569
2570#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
2571long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
2572 struct kvm_pre_fault_memory *range);
2573#endif
2574
2575#endif