Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 */
15
16#include <kvm/iodev.h>
17
18#include <linux/kvm_host.h>
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
22#include <linux/percpu.h>
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
26#include <linux/reboot.h>
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
30#include <linux/syscore_ops.h>
31#include <linux/cpu.h>
32#include <linux/sched/signal.h>
33#include <linux/sched/mm.h>
34#include <linux/sched/stat.h>
35#include <linux/cpumask.h>
36#include <linux/smp.h>
37#include <linux/anon_inodes.h>
38#include <linux/profile.h>
39#include <linux/kvm_para.h>
40#include <linux/pagemap.h>
41#include <linux/mman.h>
42#include <linux/swap.h>
43#include <linux/bitops.h>
44#include <linux/spinlock.h>
45#include <linux/compat.h>
46#include <linux/srcu.h>
47#include <linux/hugetlb.h>
48#include <linux/slab.h>
49#include <linux/sort.h>
50#include <linux/bsearch.h>
51#include <linux/io.h>
52#include <linux/lockdep.h>
53#include <linux/kthread.h>
54#include <linux/suspend.h>
55
56#include <asm/processor.h>
57#include <asm/ioctl.h>
58#include <linux/uaccess.h>
59
60#include "coalesced_mmio.h"
61#include "async_pf.h"
62#include "kvm_mm.h"
63#include "vfio.h"
64
65#define CREATE_TRACE_POINTS
66#include <trace/events/kvm.h>
67
68#include <linux/kvm_dirty_ring.h>
69
70/* Worst case buffer size needed for holding an integer. */
71#define ITOA_MAX_LEN 12
72
73MODULE_AUTHOR("Qumranet");
74MODULE_LICENSE("GPL");
75
76/* Architectures should define their poll value according to the halt latency */
77unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
78module_param(halt_poll_ns, uint, 0644);
79EXPORT_SYMBOL_GPL(halt_poll_ns);
80
81/* Default doubles per-vcpu halt_poll_ns. */
82unsigned int halt_poll_ns_grow = 2;
83module_param(halt_poll_ns_grow, uint, 0644);
84EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
85
86/* The start value to grow halt_poll_ns from */
87unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
88module_param(halt_poll_ns_grow_start, uint, 0644);
89EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
90
91/* Default resets per-vcpu halt_poll_ns . */
92unsigned int halt_poll_ns_shrink;
93module_param(halt_poll_ns_shrink, uint, 0644);
94EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
95
96/*
97 * Ordering of locks:
98 *
99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
100 */
101
102DEFINE_MUTEX(kvm_lock);
103static DEFINE_RAW_SPINLOCK(kvm_count_lock);
104LIST_HEAD(vm_list);
105
106static cpumask_var_t cpus_hardware_enabled;
107static int kvm_usage_count;
108static atomic_t hardware_enable_failed;
109
110static struct kmem_cache *kvm_vcpu_cache;
111
112static __read_mostly struct preempt_ops kvm_preempt_ops;
113static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
114
115struct dentry *kvm_debugfs_dir;
116EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
117
118static const struct file_operations stat_fops_per_vm;
119
120static struct file_operations kvm_chardev_ops;
121
122static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
123 unsigned long arg);
124#ifdef CONFIG_KVM_COMPAT
125static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
126 unsigned long arg);
127#define KVM_COMPAT(c) .compat_ioctl = (c)
128#else
129/*
130 * For architectures that don't implement a compat infrastructure,
131 * adopt a double line of defense:
132 * - Prevent a compat task from opening /dev/kvm
133 * - If the open has been done by a 64bit task, and the KVM fd
134 * passed to a compat task, let the ioctls fail.
135 */
136static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
137 unsigned long arg) { return -EINVAL; }
138
139static int kvm_no_compat_open(struct inode *inode, struct file *file)
140{
141 return is_compat_task() ? -ENODEV : 0;
142}
143#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
144 .open = kvm_no_compat_open
145#endif
146static int hardware_enable_all(void);
147static void hardware_disable_all(void);
148
149static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
150
151__visible bool kvm_rebooting;
152EXPORT_SYMBOL_GPL(kvm_rebooting);
153
154#define KVM_EVENT_CREATE_VM 0
155#define KVM_EVENT_DESTROY_VM 1
156static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157static unsigned long long kvm_createvm_count;
158static unsigned long long kvm_active_vms;
159
160static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
161
162__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
163 unsigned long start, unsigned long end)
164{
165}
166
167__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
168{
169}
170
171bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
172{
173 /*
174 * The metadata used by is_zone_device_page() to determine whether or
175 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
176 * the device has been pinned, e.g. by get_user_pages(). WARN if the
177 * page_count() is zero to help detect bad usage of this helper.
178 */
179 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
180 return false;
181
182 return is_zone_device_page(pfn_to_page(pfn));
183}
184
185bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
186{
187 /*
188 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
189 * perspective they are "normal" pages, albeit with slightly different
190 * usage rules.
191 */
192 if (pfn_valid(pfn))
193 return PageReserved(pfn_to_page(pfn)) &&
194 !is_zero_pfn(pfn) &&
195 !kvm_is_zone_device_pfn(pfn);
196
197 return true;
198}
199
200/*
201 * Switches to specified vcpu, until a matching vcpu_put()
202 */
203void vcpu_load(struct kvm_vcpu *vcpu)
204{
205 int cpu = get_cpu();
206
207 __this_cpu_write(kvm_running_vcpu, vcpu);
208 preempt_notifier_register(&vcpu->preempt_notifier);
209 kvm_arch_vcpu_load(vcpu, cpu);
210 put_cpu();
211}
212EXPORT_SYMBOL_GPL(vcpu_load);
213
214void vcpu_put(struct kvm_vcpu *vcpu)
215{
216 preempt_disable();
217 kvm_arch_vcpu_put(vcpu);
218 preempt_notifier_unregister(&vcpu->preempt_notifier);
219 __this_cpu_write(kvm_running_vcpu, NULL);
220 preempt_enable();
221}
222EXPORT_SYMBOL_GPL(vcpu_put);
223
224/* TODO: merge with kvm_arch_vcpu_should_kick */
225static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
226{
227 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
228
229 /*
230 * We need to wait for the VCPU to reenable interrupts and get out of
231 * READING_SHADOW_PAGE_TABLES mode.
232 */
233 if (req & KVM_REQUEST_WAIT)
234 return mode != OUTSIDE_GUEST_MODE;
235
236 /*
237 * Need to kick a running VCPU, but otherwise there is nothing to do.
238 */
239 return mode == IN_GUEST_MODE;
240}
241
242static void ack_flush(void *_completed)
243{
244}
245
246static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
247{
248 if (cpumask_empty(cpus))
249 return false;
250
251 smp_call_function_many(cpus, ack_flush, NULL, wait);
252 return true;
253}
254
255static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
256 struct cpumask *tmp, int current_cpu)
257{
258 int cpu;
259
260 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
261 __kvm_make_request(req, vcpu);
262
263 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
264 return;
265
266 /*
267 * Note, the vCPU could get migrated to a different pCPU at any point
268 * after kvm_request_needs_ipi(), which could result in sending an IPI
269 * to the previous pCPU. But, that's OK because the purpose of the IPI
270 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
271 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
272 * after this point is also OK, as the requirement is only that KVM wait
273 * for vCPUs that were reading SPTEs _before_ any changes were
274 * finalized. See kvm_vcpu_kick() for more details on handling requests.
275 */
276 if (kvm_request_needs_ipi(vcpu, req)) {
277 cpu = READ_ONCE(vcpu->cpu);
278 if (cpu != -1 && cpu != current_cpu)
279 __cpumask_set_cpu(cpu, tmp);
280 }
281}
282
283bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
284 unsigned long *vcpu_bitmap)
285{
286 struct kvm_vcpu *vcpu;
287 struct cpumask *cpus;
288 int i, me;
289 bool called;
290
291 me = get_cpu();
292
293 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
294 cpumask_clear(cpus);
295
296 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
297 vcpu = kvm_get_vcpu(kvm, i);
298 if (!vcpu)
299 continue;
300 kvm_make_vcpu_request(vcpu, req, cpus, me);
301 }
302
303 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
304 put_cpu();
305
306 return called;
307}
308
309bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
310 struct kvm_vcpu *except)
311{
312 struct kvm_vcpu *vcpu;
313 struct cpumask *cpus;
314 unsigned long i;
315 bool called;
316 int me;
317
318 me = get_cpu();
319
320 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
321 cpumask_clear(cpus);
322
323 kvm_for_each_vcpu(i, vcpu, kvm) {
324 if (vcpu == except)
325 continue;
326 kvm_make_vcpu_request(vcpu, req, cpus, me);
327 }
328
329 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
330 put_cpu();
331
332 return called;
333}
334
335bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
336{
337 return kvm_make_all_cpus_request_except(kvm, req, NULL);
338}
339EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
340
341#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
342void kvm_flush_remote_tlbs(struct kvm *kvm)
343{
344 ++kvm->stat.generic.remote_tlb_flush_requests;
345
346 /*
347 * We want to publish modifications to the page tables before reading
348 * mode. Pairs with a memory barrier in arch-specific code.
349 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
350 * and smp_mb in walk_shadow_page_lockless_begin/end.
351 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
352 *
353 * There is already an smp_mb__after_atomic() before
354 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
355 * barrier here.
356 */
357 if (!kvm_arch_flush_remote_tlb(kvm)
358 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
359 ++kvm->stat.generic.remote_tlb_flush;
360}
361EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
362#endif
363
364static void kvm_flush_shadow_all(struct kvm *kvm)
365{
366 kvm_arch_flush_shadow_all(kvm);
367 kvm_arch_guest_memory_reclaimed(kvm);
368}
369
370#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
371static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
372 gfp_t gfp_flags)
373{
374 gfp_flags |= mc->gfp_zero;
375
376 if (mc->kmem_cache)
377 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
378 else
379 return (void *)__get_free_page(gfp_flags);
380}
381
382int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
383{
384 void *obj;
385
386 if (mc->nobjs >= min)
387 return 0;
388 while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
389 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
390 if (!obj)
391 return mc->nobjs >= min ? 0 : -ENOMEM;
392 mc->objects[mc->nobjs++] = obj;
393 }
394 return 0;
395}
396
397int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
398{
399 return mc->nobjs;
400}
401
402void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
403{
404 while (mc->nobjs) {
405 if (mc->kmem_cache)
406 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
407 else
408 free_page((unsigned long)mc->objects[--mc->nobjs]);
409 }
410}
411
412void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
413{
414 void *p;
415
416 if (WARN_ON(!mc->nobjs))
417 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
418 else
419 p = mc->objects[--mc->nobjs];
420 BUG_ON(!p);
421 return p;
422}
423#endif
424
425static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
426{
427 mutex_init(&vcpu->mutex);
428 vcpu->cpu = -1;
429 vcpu->kvm = kvm;
430 vcpu->vcpu_id = id;
431 vcpu->pid = NULL;
432#ifndef __KVM_HAVE_ARCH_WQP
433 rcuwait_init(&vcpu->wait);
434#endif
435 kvm_async_pf_vcpu_init(vcpu);
436
437 kvm_vcpu_set_in_spin_loop(vcpu, false);
438 kvm_vcpu_set_dy_eligible(vcpu, false);
439 vcpu->preempted = false;
440 vcpu->ready = false;
441 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
442 vcpu->last_used_slot = NULL;
443}
444
445static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
446{
447 kvm_arch_vcpu_destroy(vcpu);
448 kvm_dirty_ring_free(&vcpu->dirty_ring);
449
450 /*
451 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
452 * the vcpu->pid pointer, and at destruction time all file descriptors
453 * are already gone.
454 */
455 put_pid(rcu_dereference_protected(vcpu->pid, 1));
456
457 free_page((unsigned long)vcpu->run);
458 kmem_cache_free(kvm_vcpu_cache, vcpu);
459}
460
461void kvm_destroy_vcpus(struct kvm *kvm)
462{
463 unsigned long i;
464 struct kvm_vcpu *vcpu;
465
466 kvm_for_each_vcpu(i, vcpu, kvm) {
467 kvm_vcpu_destroy(vcpu);
468 xa_erase(&kvm->vcpu_array, i);
469 }
470
471 atomic_set(&kvm->online_vcpus, 0);
472}
473EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
474
475#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
476static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
477{
478 return container_of(mn, struct kvm, mmu_notifier);
479}
480
481static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
482 struct mm_struct *mm,
483 unsigned long start, unsigned long end)
484{
485 struct kvm *kvm = mmu_notifier_to_kvm(mn);
486 int idx;
487
488 idx = srcu_read_lock(&kvm->srcu);
489 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
490 srcu_read_unlock(&kvm->srcu, idx);
491}
492
493typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
494
495typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
496 unsigned long end);
497
498typedef void (*on_unlock_fn_t)(struct kvm *kvm);
499
500struct kvm_hva_range {
501 unsigned long start;
502 unsigned long end;
503 pte_t pte;
504 hva_handler_t handler;
505 on_lock_fn_t on_lock;
506 on_unlock_fn_t on_unlock;
507 bool flush_on_ret;
508 bool may_block;
509};
510
511/*
512 * Use a dedicated stub instead of NULL to indicate that there is no callback
513 * function/handler. The compiler technically can't guarantee that a real
514 * function will have a non-zero address, and so it will generate code to
515 * check for !NULL, whereas comparing against a stub will be elided at compile
516 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
517 */
518static void kvm_null_fn(void)
519{
520
521}
522#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
523
524/* Iterate over each memslot intersecting [start, last] (inclusive) range */
525#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
526 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
527 node; \
528 node = interval_tree_iter_next(node, start, last)) \
529
530static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
531 const struct kvm_hva_range *range)
532{
533 bool ret = false, locked = false;
534 struct kvm_gfn_range gfn_range;
535 struct kvm_memory_slot *slot;
536 struct kvm_memslots *slots;
537 int i, idx;
538
539 if (WARN_ON_ONCE(range->end <= range->start))
540 return 0;
541
542 /* A null handler is allowed if and only if on_lock() is provided. */
543 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
544 IS_KVM_NULL_FN(range->handler)))
545 return 0;
546
547 idx = srcu_read_lock(&kvm->srcu);
548
549 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
550 struct interval_tree_node *node;
551
552 slots = __kvm_memslots(kvm, i);
553 kvm_for_each_memslot_in_hva_range(node, slots,
554 range->start, range->end - 1) {
555 unsigned long hva_start, hva_end;
556
557 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
558 hva_start = max(range->start, slot->userspace_addr);
559 hva_end = min(range->end, slot->userspace_addr +
560 (slot->npages << PAGE_SHIFT));
561
562 /*
563 * To optimize for the likely case where the address
564 * range is covered by zero or one memslots, don't
565 * bother making these conditional (to avoid writes on
566 * the second or later invocation of the handler).
567 */
568 gfn_range.pte = range->pte;
569 gfn_range.may_block = range->may_block;
570
571 /*
572 * {gfn(page) | page intersects with [hva_start, hva_end)} =
573 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
574 */
575 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
576 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
577 gfn_range.slot = slot;
578
579 if (!locked) {
580 locked = true;
581 KVM_MMU_LOCK(kvm);
582 if (!IS_KVM_NULL_FN(range->on_lock))
583 range->on_lock(kvm, range->start, range->end);
584 if (IS_KVM_NULL_FN(range->handler))
585 break;
586 }
587 ret |= range->handler(kvm, &gfn_range);
588 }
589 }
590
591 if (range->flush_on_ret && ret)
592 kvm_flush_remote_tlbs(kvm);
593
594 if (locked) {
595 KVM_MMU_UNLOCK(kvm);
596 if (!IS_KVM_NULL_FN(range->on_unlock))
597 range->on_unlock(kvm);
598 }
599
600 srcu_read_unlock(&kvm->srcu, idx);
601
602 /* The notifiers are averse to booleans. :-( */
603 return (int)ret;
604}
605
606static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
607 unsigned long start,
608 unsigned long end,
609 pte_t pte,
610 hva_handler_t handler)
611{
612 struct kvm *kvm = mmu_notifier_to_kvm(mn);
613 const struct kvm_hva_range range = {
614 .start = start,
615 .end = end,
616 .pte = pte,
617 .handler = handler,
618 .on_lock = (void *)kvm_null_fn,
619 .on_unlock = (void *)kvm_null_fn,
620 .flush_on_ret = true,
621 .may_block = false,
622 };
623
624 return __kvm_handle_hva_range(kvm, &range);
625}
626
627static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
628 unsigned long start,
629 unsigned long end,
630 hva_handler_t handler)
631{
632 struct kvm *kvm = mmu_notifier_to_kvm(mn);
633 const struct kvm_hva_range range = {
634 .start = start,
635 .end = end,
636 .pte = __pte(0),
637 .handler = handler,
638 .on_lock = (void *)kvm_null_fn,
639 .on_unlock = (void *)kvm_null_fn,
640 .flush_on_ret = false,
641 .may_block = false,
642 };
643
644 return __kvm_handle_hva_range(kvm, &range);
645}
646static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
647 struct mm_struct *mm,
648 unsigned long address,
649 pte_t pte)
650{
651 struct kvm *kvm = mmu_notifier_to_kvm(mn);
652
653 trace_kvm_set_spte_hva(address);
654
655 /*
656 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
657 * If mmu_notifier_count is zero, then no in-progress invalidations,
658 * including this one, found a relevant memslot at start(); rechecking
659 * memslots here is unnecessary. Note, a false positive (count elevated
660 * by a different invalidation) is sub-optimal but functionally ok.
661 */
662 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
663 if (!READ_ONCE(kvm->mmu_notifier_count))
664 return;
665
666 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
667}
668
669void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
670 unsigned long end)
671{
672 /*
673 * The count increase must become visible at unlock time as no
674 * spte can be established without taking the mmu_lock and
675 * count is also read inside the mmu_lock critical section.
676 */
677 kvm->mmu_notifier_count++;
678 if (likely(kvm->mmu_notifier_count == 1)) {
679 kvm->mmu_notifier_range_start = start;
680 kvm->mmu_notifier_range_end = end;
681 } else {
682 /*
683 * Fully tracking multiple concurrent ranges has diminishing
684 * returns. Keep things simple and just find the minimal range
685 * which includes the current and new ranges. As there won't be
686 * enough information to subtract a range after its invalidate
687 * completes, any ranges invalidated concurrently will
688 * accumulate and persist until all outstanding invalidates
689 * complete.
690 */
691 kvm->mmu_notifier_range_start =
692 min(kvm->mmu_notifier_range_start, start);
693 kvm->mmu_notifier_range_end =
694 max(kvm->mmu_notifier_range_end, end);
695 }
696}
697
698static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
699 const struct mmu_notifier_range *range)
700{
701 struct kvm *kvm = mmu_notifier_to_kvm(mn);
702 const struct kvm_hva_range hva_range = {
703 .start = range->start,
704 .end = range->end,
705 .pte = __pte(0),
706 .handler = kvm_unmap_gfn_range,
707 .on_lock = kvm_inc_notifier_count,
708 .on_unlock = kvm_arch_guest_memory_reclaimed,
709 .flush_on_ret = true,
710 .may_block = mmu_notifier_range_blockable(range),
711 };
712
713 trace_kvm_unmap_hva_range(range->start, range->end);
714
715 /*
716 * Prevent memslot modification between range_start() and range_end()
717 * so that conditionally locking provides the same result in both
718 * functions. Without that guarantee, the mmu_notifier_count
719 * adjustments will be imbalanced.
720 *
721 * Pairs with the decrement in range_end().
722 */
723 spin_lock(&kvm->mn_invalidate_lock);
724 kvm->mn_active_invalidate_count++;
725 spin_unlock(&kvm->mn_invalidate_lock);
726
727 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
728 hva_range.may_block);
729
730 __kvm_handle_hva_range(kvm, &hva_range);
731
732 return 0;
733}
734
735void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
736 unsigned long end)
737{
738 /*
739 * This sequence increase will notify the kvm page fault that
740 * the page that is going to be mapped in the spte could have
741 * been freed.
742 */
743 kvm->mmu_notifier_seq++;
744 smp_wmb();
745 /*
746 * The above sequence increase must be visible before the
747 * below count decrease, which is ensured by the smp_wmb above
748 * in conjunction with the smp_rmb in mmu_notifier_retry().
749 */
750 kvm->mmu_notifier_count--;
751}
752
753static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
754 const struct mmu_notifier_range *range)
755{
756 struct kvm *kvm = mmu_notifier_to_kvm(mn);
757 const struct kvm_hva_range hva_range = {
758 .start = range->start,
759 .end = range->end,
760 .pte = __pte(0),
761 .handler = (void *)kvm_null_fn,
762 .on_lock = kvm_dec_notifier_count,
763 .on_unlock = (void *)kvm_null_fn,
764 .flush_on_ret = false,
765 .may_block = mmu_notifier_range_blockable(range),
766 };
767 bool wake;
768
769 __kvm_handle_hva_range(kvm, &hva_range);
770
771 /* Pairs with the increment in range_start(). */
772 spin_lock(&kvm->mn_invalidate_lock);
773 wake = (--kvm->mn_active_invalidate_count == 0);
774 spin_unlock(&kvm->mn_invalidate_lock);
775
776 /*
777 * There can only be one waiter, since the wait happens under
778 * slots_lock.
779 */
780 if (wake)
781 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
782
783 BUG_ON(kvm->mmu_notifier_count < 0);
784}
785
786static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
787 struct mm_struct *mm,
788 unsigned long start,
789 unsigned long end)
790{
791 trace_kvm_age_hva(start, end);
792
793 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
794}
795
796static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
797 struct mm_struct *mm,
798 unsigned long start,
799 unsigned long end)
800{
801 trace_kvm_age_hva(start, end);
802
803 /*
804 * Even though we do not flush TLB, this will still adversely
805 * affect performance on pre-Haswell Intel EPT, where there is
806 * no EPT Access Bit to clear so that we have to tear down EPT
807 * tables instead. If we find this unacceptable, we can always
808 * add a parameter to kvm_age_hva so that it effectively doesn't
809 * do anything on clear_young.
810 *
811 * Also note that currently we never issue secondary TLB flushes
812 * from clear_young, leaving this job up to the regular system
813 * cadence. If we find this inaccurate, we might come up with a
814 * more sophisticated heuristic later.
815 */
816 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
817}
818
819static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
820 struct mm_struct *mm,
821 unsigned long address)
822{
823 trace_kvm_test_age_hva(address);
824
825 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
826 kvm_test_age_gfn);
827}
828
829static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
830 struct mm_struct *mm)
831{
832 struct kvm *kvm = mmu_notifier_to_kvm(mn);
833 int idx;
834
835 idx = srcu_read_lock(&kvm->srcu);
836 kvm_flush_shadow_all(kvm);
837 srcu_read_unlock(&kvm->srcu, idx);
838}
839
840static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
841 .invalidate_range = kvm_mmu_notifier_invalidate_range,
842 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
843 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
844 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
845 .clear_young = kvm_mmu_notifier_clear_young,
846 .test_young = kvm_mmu_notifier_test_young,
847 .change_pte = kvm_mmu_notifier_change_pte,
848 .release = kvm_mmu_notifier_release,
849};
850
851static int kvm_init_mmu_notifier(struct kvm *kvm)
852{
853 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
854 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
855}
856
857#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
858
859static int kvm_init_mmu_notifier(struct kvm *kvm)
860{
861 return 0;
862}
863
864#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
865
866#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
867static int kvm_pm_notifier_call(struct notifier_block *bl,
868 unsigned long state,
869 void *unused)
870{
871 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
872
873 return kvm_arch_pm_notifier(kvm, state);
874}
875
876static void kvm_init_pm_notifier(struct kvm *kvm)
877{
878 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
879 /* Suspend KVM before we suspend ftrace, RCU, etc. */
880 kvm->pm_notifier.priority = INT_MAX;
881 register_pm_notifier(&kvm->pm_notifier);
882}
883
884static void kvm_destroy_pm_notifier(struct kvm *kvm)
885{
886 unregister_pm_notifier(&kvm->pm_notifier);
887}
888#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
889static void kvm_init_pm_notifier(struct kvm *kvm)
890{
891}
892
893static void kvm_destroy_pm_notifier(struct kvm *kvm)
894{
895}
896#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
897
898static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
899{
900 if (!memslot->dirty_bitmap)
901 return;
902
903 kvfree(memslot->dirty_bitmap);
904 memslot->dirty_bitmap = NULL;
905}
906
907/* This does not remove the slot from struct kvm_memslots data structures */
908static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
909{
910 kvm_destroy_dirty_bitmap(slot);
911
912 kvm_arch_free_memslot(kvm, slot);
913
914 kfree(slot);
915}
916
917static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
918{
919 struct hlist_node *idnode;
920 struct kvm_memory_slot *memslot;
921 int bkt;
922
923 /*
924 * The same memslot objects live in both active and inactive sets,
925 * arbitrarily free using index '1' so the second invocation of this
926 * function isn't operating over a structure with dangling pointers
927 * (even though this function isn't actually touching them).
928 */
929 if (!slots->node_idx)
930 return;
931
932 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
933 kvm_free_memslot(kvm, memslot);
934}
935
936static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
937{
938 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
939 case KVM_STATS_TYPE_INSTANT:
940 return 0444;
941 case KVM_STATS_TYPE_CUMULATIVE:
942 case KVM_STATS_TYPE_PEAK:
943 default:
944 return 0644;
945 }
946}
947
948
949static void kvm_destroy_vm_debugfs(struct kvm *kvm)
950{
951 int i;
952 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
953 kvm_vcpu_stats_header.num_desc;
954
955 if (IS_ERR(kvm->debugfs_dentry))
956 return;
957
958 debugfs_remove_recursive(kvm->debugfs_dentry);
959
960 if (kvm->debugfs_stat_data) {
961 for (i = 0; i < kvm_debugfs_num_entries; i++)
962 kfree(kvm->debugfs_stat_data[i]);
963 kfree(kvm->debugfs_stat_data);
964 }
965}
966
967static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
968{
969 static DEFINE_MUTEX(kvm_debugfs_lock);
970 struct dentry *dent;
971 char dir_name[ITOA_MAX_LEN * 2];
972 struct kvm_stat_data *stat_data;
973 const struct _kvm_stats_desc *pdesc;
974 int i, ret;
975 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
976 kvm_vcpu_stats_header.num_desc;
977
978 if (!debugfs_initialized())
979 return 0;
980
981 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
982 mutex_lock(&kvm_debugfs_lock);
983 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
984 if (dent) {
985 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
986 dput(dent);
987 mutex_unlock(&kvm_debugfs_lock);
988 return 0;
989 }
990 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
991 mutex_unlock(&kvm_debugfs_lock);
992 if (IS_ERR(dent))
993 return 0;
994
995 kvm->debugfs_dentry = dent;
996 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
997 sizeof(*kvm->debugfs_stat_data),
998 GFP_KERNEL_ACCOUNT);
999 if (!kvm->debugfs_stat_data)
1000 return -ENOMEM;
1001
1002 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1003 pdesc = &kvm_vm_stats_desc[i];
1004 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1005 if (!stat_data)
1006 return -ENOMEM;
1007
1008 stat_data->kvm = kvm;
1009 stat_data->desc = pdesc;
1010 stat_data->kind = KVM_STAT_VM;
1011 kvm->debugfs_stat_data[i] = stat_data;
1012 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1013 kvm->debugfs_dentry, stat_data,
1014 &stat_fops_per_vm);
1015 }
1016
1017 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1018 pdesc = &kvm_vcpu_stats_desc[i];
1019 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1020 if (!stat_data)
1021 return -ENOMEM;
1022
1023 stat_data->kvm = kvm;
1024 stat_data->desc = pdesc;
1025 stat_data->kind = KVM_STAT_VCPU;
1026 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1027 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1028 kvm->debugfs_dentry, stat_data,
1029 &stat_fops_per_vm);
1030 }
1031
1032 ret = kvm_arch_create_vm_debugfs(kvm);
1033 if (ret) {
1034 kvm_destroy_vm_debugfs(kvm);
1035 return i;
1036 }
1037
1038 return 0;
1039}
1040
1041/*
1042 * Called after the VM is otherwise initialized, but just before adding it to
1043 * the vm_list.
1044 */
1045int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1046{
1047 return 0;
1048}
1049
1050/*
1051 * Called just after removing the VM from the vm_list, but before doing any
1052 * other destruction.
1053 */
1054void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1055{
1056}
1057
1058/*
1059 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1060 * be setup already, so we can create arch-specific debugfs entries under it.
1061 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1062 * a per-arch destroy interface is not needed.
1063 */
1064int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1065{
1066 return 0;
1067}
1068
1069static struct kvm *kvm_create_vm(unsigned long type)
1070{
1071 struct kvm *kvm = kvm_arch_alloc_vm();
1072 struct kvm_memslots *slots;
1073 int r = -ENOMEM;
1074 int i, j;
1075
1076 if (!kvm)
1077 return ERR_PTR(-ENOMEM);
1078
1079 KVM_MMU_LOCK_INIT(kvm);
1080 mmgrab(current->mm);
1081 kvm->mm = current->mm;
1082 kvm_eventfd_init(kvm);
1083 mutex_init(&kvm->lock);
1084 mutex_init(&kvm->irq_lock);
1085 mutex_init(&kvm->slots_lock);
1086 mutex_init(&kvm->slots_arch_lock);
1087 spin_lock_init(&kvm->mn_invalidate_lock);
1088 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1089 xa_init(&kvm->vcpu_array);
1090
1091 INIT_LIST_HEAD(&kvm->gpc_list);
1092 spin_lock_init(&kvm->gpc_lock);
1093
1094 INIT_LIST_HEAD(&kvm->devices);
1095 kvm->max_vcpus = KVM_MAX_VCPUS;
1096
1097 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1098
1099 /*
1100 * Force subsequent debugfs file creations to fail if the VM directory
1101 * is not created (by kvm_create_vm_debugfs()).
1102 */
1103 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1104
1105 if (init_srcu_struct(&kvm->srcu))
1106 goto out_err_no_srcu;
1107 if (init_srcu_struct(&kvm->irq_srcu))
1108 goto out_err_no_irq_srcu;
1109
1110 refcount_set(&kvm->users_count, 1);
1111 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1112 for (j = 0; j < 2; j++) {
1113 slots = &kvm->__memslots[i][j];
1114
1115 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1116 slots->hva_tree = RB_ROOT_CACHED;
1117 slots->gfn_tree = RB_ROOT;
1118 hash_init(slots->id_hash);
1119 slots->node_idx = j;
1120
1121 /* Generations must be different for each address space. */
1122 slots->generation = i;
1123 }
1124
1125 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1126 }
1127
1128 for (i = 0; i < KVM_NR_BUSES; i++) {
1129 rcu_assign_pointer(kvm->buses[i],
1130 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1131 if (!kvm->buses[i])
1132 goto out_err_no_arch_destroy_vm;
1133 }
1134
1135 kvm->max_halt_poll_ns = halt_poll_ns;
1136
1137 r = kvm_arch_init_vm(kvm, type);
1138 if (r)
1139 goto out_err_no_arch_destroy_vm;
1140
1141 r = hardware_enable_all();
1142 if (r)
1143 goto out_err_no_disable;
1144
1145#ifdef CONFIG_HAVE_KVM_IRQFD
1146 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1147#endif
1148
1149 r = kvm_init_mmu_notifier(kvm);
1150 if (r)
1151 goto out_err_no_mmu_notifier;
1152
1153 r = kvm_arch_post_init_vm(kvm);
1154 if (r)
1155 goto out_err;
1156
1157 mutex_lock(&kvm_lock);
1158 list_add(&kvm->vm_list, &vm_list);
1159 mutex_unlock(&kvm_lock);
1160
1161 preempt_notifier_inc();
1162 kvm_init_pm_notifier(kvm);
1163
1164 /*
1165 * When the fd passed to this ioctl() is opened it pins the module,
1166 * but try_module_get() also prevents getting a reference if the module
1167 * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
1168 */
1169 if (!try_module_get(kvm_chardev_ops.owner)) {
1170 r = -ENODEV;
1171 goto out_err;
1172 }
1173
1174 return kvm;
1175
1176out_err:
1177#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1178 if (kvm->mmu_notifier.ops)
1179 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1180#endif
1181out_err_no_mmu_notifier:
1182 hardware_disable_all();
1183out_err_no_disable:
1184 kvm_arch_destroy_vm(kvm);
1185out_err_no_arch_destroy_vm:
1186 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1187 for (i = 0; i < KVM_NR_BUSES; i++)
1188 kfree(kvm_get_bus(kvm, i));
1189 cleanup_srcu_struct(&kvm->irq_srcu);
1190out_err_no_irq_srcu:
1191 cleanup_srcu_struct(&kvm->srcu);
1192out_err_no_srcu:
1193 kvm_arch_free_vm(kvm);
1194 mmdrop(current->mm);
1195 return ERR_PTR(r);
1196}
1197
1198static void kvm_destroy_devices(struct kvm *kvm)
1199{
1200 struct kvm_device *dev, *tmp;
1201
1202 /*
1203 * We do not need to take the kvm->lock here, because nobody else
1204 * has a reference to the struct kvm at this point and therefore
1205 * cannot access the devices list anyhow.
1206 */
1207 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1208 list_del(&dev->vm_node);
1209 dev->ops->destroy(dev);
1210 }
1211}
1212
1213static void kvm_destroy_vm(struct kvm *kvm)
1214{
1215 int i;
1216 struct mm_struct *mm = kvm->mm;
1217
1218 kvm_destroy_pm_notifier(kvm);
1219 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1220 kvm_destroy_vm_debugfs(kvm);
1221 kvm_arch_sync_events(kvm);
1222 mutex_lock(&kvm_lock);
1223 list_del(&kvm->vm_list);
1224 mutex_unlock(&kvm_lock);
1225 kvm_arch_pre_destroy_vm(kvm);
1226
1227 kvm_free_irq_routing(kvm);
1228 for (i = 0; i < KVM_NR_BUSES; i++) {
1229 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1230
1231 if (bus)
1232 kvm_io_bus_destroy(bus);
1233 kvm->buses[i] = NULL;
1234 }
1235 kvm_coalesced_mmio_free(kvm);
1236#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1237 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1238 /*
1239 * At this point, pending calls to invalidate_range_start()
1240 * have completed but no more MMU notifiers will run, so
1241 * mn_active_invalidate_count may remain unbalanced.
1242 * No threads can be waiting in install_new_memslots as the
1243 * last reference on KVM has been dropped, but freeing
1244 * memslots would deadlock without this manual intervention.
1245 */
1246 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1247 kvm->mn_active_invalidate_count = 0;
1248#else
1249 kvm_flush_shadow_all(kvm);
1250#endif
1251 kvm_arch_destroy_vm(kvm);
1252 kvm_destroy_devices(kvm);
1253 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1254 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1255 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1256 }
1257 cleanup_srcu_struct(&kvm->irq_srcu);
1258 cleanup_srcu_struct(&kvm->srcu);
1259 kvm_arch_free_vm(kvm);
1260 preempt_notifier_dec();
1261 hardware_disable_all();
1262 mmdrop(mm);
1263 module_put(kvm_chardev_ops.owner);
1264}
1265
1266void kvm_get_kvm(struct kvm *kvm)
1267{
1268 refcount_inc(&kvm->users_count);
1269}
1270EXPORT_SYMBOL_GPL(kvm_get_kvm);
1271
1272/*
1273 * Make sure the vm is not during destruction, which is a safe version of
1274 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1275 */
1276bool kvm_get_kvm_safe(struct kvm *kvm)
1277{
1278 return refcount_inc_not_zero(&kvm->users_count);
1279}
1280EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1281
1282void kvm_put_kvm(struct kvm *kvm)
1283{
1284 if (refcount_dec_and_test(&kvm->users_count))
1285 kvm_destroy_vm(kvm);
1286}
1287EXPORT_SYMBOL_GPL(kvm_put_kvm);
1288
1289/*
1290 * Used to put a reference that was taken on behalf of an object associated
1291 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1292 * of the new file descriptor fails and the reference cannot be transferred to
1293 * its final owner. In such cases, the caller is still actively using @kvm and
1294 * will fail miserably if the refcount unexpectedly hits zero.
1295 */
1296void kvm_put_kvm_no_destroy(struct kvm *kvm)
1297{
1298 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1299}
1300EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1301
1302static int kvm_vm_release(struct inode *inode, struct file *filp)
1303{
1304 struct kvm *kvm = filp->private_data;
1305
1306 kvm_irqfd_release(kvm);
1307
1308 kvm_put_kvm(kvm);
1309 return 0;
1310}
1311
1312/*
1313 * Allocation size is twice as large as the actual dirty bitmap size.
1314 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1315 */
1316static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1317{
1318 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1319
1320 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1321 if (!memslot->dirty_bitmap)
1322 return -ENOMEM;
1323
1324 return 0;
1325}
1326
1327static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1328{
1329 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1330 int node_idx_inactive = active->node_idx ^ 1;
1331
1332 return &kvm->__memslots[as_id][node_idx_inactive];
1333}
1334
1335/*
1336 * Helper to get the address space ID when one of memslot pointers may be NULL.
1337 * This also serves as a sanity that at least one of the pointers is non-NULL,
1338 * and that their address space IDs don't diverge.
1339 */
1340static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1341 struct kvm_memory_slot *b)
1342{
1343 if (WARN_ON_ONCE(!a && !b))
1344 return 0;
1345
1346 if (!a)
1347 return b->as_id;
1348 if (!b)
1349 return a->as_id;
1350
1351 WARN_ON_ONCE(a->as_id != b->as_id);
1352 return a->as_id;
1353}
1354
1355static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1356 struct kvm_memory_slot *slot)
1357{
1358 struct rb_root *gfn_tree = &slots->gfn_tree;
1359 struct rb_node **node, *parent;
1360 int idx = slots->node_idx;
1361
1362 parent = NULL;
1363 for (node = &gfn_tree->rb_node; *node; ) {
1364 struct kvm_memory_slot *tmp;
1365
1366 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1367 parent = *node;
1368 if (slot->base_gfn < tmp->base_gfn)
1369 node = &(*node)->rb_left;
1370 else if (slot->base_gfn > tmp->base_gfn)
1371 node = &(*node)->rb_right;
1372 else
1373 BUG();
1374 }
1375
1376 rb_link_node(&slot->gfn_node[idx], parent, node);
1377 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1378}
1379
1380static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1381 struct kvm_memory_slot *slot)
1382{
1383 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1384}
1385
1386static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1387 struct kvm_memory_slot *old,
1388 struct kvm_memory_slot *new)
1389{
1390 int idx = slots->node_idx;
1391
1392 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1393
1394 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1395 &slots->gfn_tree);
1396}
1397
1398/*
1399 * Replace @old with @new in the inactive memslots.
1400 *
1401 * With NULL @old this simply adds @new.
1402 * With NULL @new this simply removes @old.
1403 *
1404 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1405 * appropriately.
1406 */
1407static void kvm_replace_memslot(struct kvm *kvm,
1408 struct kvm_memory_slot *old,
1409 struct kvm_memory_slot *new)
1410{
1411 int as_id = kvm_memslots_get_as_id(old, new);
1412 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1413 int idx = slots->node_idx;
1414
1415 if (old) {
1416 hash_del(&old->id_node[idx]);
1417 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1418
1419 if ((long)old == atomic_long_read(&slots->last_used_slot))
1420 atomic_long_set(&slots->last_used_slot, (long)new);
1421
1422 if (!new) {
1423 kvm_erase_gfn_node(slots, old);
1424 return;
1425 }
1426 }
1427
1428 /*
1429 * Initialize @new's hva range. Do this even when replacing an @old
1430 * slot, kvm_copy_memslot() deliberately does not touch node data.
1431 */
1432 new->hva_node[idx].start = new->userspace_addr;
1433 new->hva_node[idx].last = new->userspace_addr +
1434 (new->npages << PAGE_SHIFT) - 1;
1435
1436 /*
1437 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1438 * hva_node needs to be swapped with remove+insert even though hva can't
1439 * change when replacing an existing slot.
1440 */
1441 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1442 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1443
1444 /*
1445 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1446 * switch the node in the gfn tree instead of removing the old and
1447 * inserting the new as two separate operations. Replacement is a
1448 * single O(1) operation versus two O(log(n)) operations for
1449 * remove+insert.
1450 */
1451 if (old && old->base_gfn == new->base_gfn) {
1452 kvm_replace_gfn_node(slots, old, new);
1453 } else {
1454 if (old)
1455 kvm_erase_gfn_node(slots, old);
1456 kvm_insert_gfn_node(slots, new);
1457 }
1458}
1459
1460static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1461{
1462 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1463
1464#ifdef __KVM_HAVE_READONLY_MEM
1465 valid_flags |= KVM_MEM_READONLY;
1466#endif
1467
1468 if (mem->flags & ~valid_flags)
1469 return -EINVAL;
1470
1471 return 0;
1472}
1473
1474static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1475{
1476 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1477
1478 /* Grab the generation from the activate memslots. */
1479 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1480
1481 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1482 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1483
1484 /*
1485 * Do not store the new memslots while there are invalidations in
1486 * progress, otherwise the locking in invalidate_range_start and
1487 * invalidate_range_end will be unbalanced.
1488 */
1489 spin_lock(&kvm->mn_invalidate_lock);
1490 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1491 while (kvm->mn_active_invalidate_count) {
1492 set_current_state(TASK_UNINTERRUPTIBLE);
1493 spin_unlock(&kvm->mn_invalidate_lock);
1494 schedule();
1495 spin_lock(&kvm->mn_invalidate_lock);
1496 }
1497 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1498 rcu_assign_pointer(kvm->memslots[as_id], slots);
1499 spin_unlock(&kvm->mn_invalidate_lock);
1500
1501 /*
1502 * Acquired in kvm_set_memslot. Must be released before synchronize
1503 * SRCU below in order to avoid deadlock with another thread
1504 * acquiring the slots_arch_lock in an srcu critical section.
1505 */
1506 mutex_unlock(&kvm->slots_arch_lock);
1507
1508 synchronize_srcu_expedited(&kvm->srcu);
1509
1510 /*
1511 * Increment the new memslot generation a second time, dropping the
1512 * update in-progress flag and incrementing the generation based on
1513 * the number of address spaces. This provides a unique and easily
1514 * identifiable generation number while the memslots are in flux.
1515 */
1516 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1517
1518 /*
1519 * Generations must be unique even across address spaces. We do not need
1520 * a global counter for that, instead the generation space is evenly split
1521 * across address spaces. For example, with two address spaces, address
1522 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1523 * use generations 1, 3, 5, ...
1524 */
1525 gen += KVM_ADDRESS_SPACE_NUM;
1526
1527 kvm_arch_memslots_updated(kvm, gen);
1528
1529 slots->generation = gen;
1530}
1531
1532static int kvm_prepare_memory_region(struct kvm *kvm,
1533 const struct kvm_memory_slot *old,
1534 struct kvm_memory_slot *new,
1535 enum kvm_mr_change change)
1536{
1537 int r;
1538
1539 /*
1540 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1541 * will be freed on "commit". If logging is enabled in both old and
1542 * new, reuse the existing bitmap. If logging is enabled only in the
1543 * new and KVM isn't using a ring buffer, allocate and initialize a
1544 * new bitmap.
1545 */
1546 if (change != KVM_MR_DELETE) {
1547 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1548 new->dirty_bitmap = NULL;
1549 else if (old && old->dirty_bitmap)
1550 new->dirty_bitmap = old->dirty_bitmap;
1551 else if (!kvm->dirty_ring_size) {
1552 r = kvm_alloc_dirty_bitmap(new);
1553 if (r)
1554 return r;
1555
1556 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1557 bitmap_set(new->dirty_bitmap, 0, new->npages);
1558 }
1559 }
1560
1561 r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1562
1563 /* Free the bitmap on failure if it was allocated above. */
1564 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1565 kvm_destroy_dirty_bitmap(new);
1566
1567 return r;
1568}
1569
1570static void kvm_commit_memory_region(struct kvm *kvm,
1571 struct kvm_memory_slot *old,
1572 const struct kvm_memory_slot *new,
1573 enum kvm_mr_change change)
1574{
1575 /*
1576 * Update the total number of memslot pages before calling the arch
1577 * hook so that architectures can consume the result directly.
1578 */
1579 if (change == KVM_MR_DELETE)
1580 kvm->nr_memslot_pages -= old->npages;
1581 else if (change == KVM_MR_CREATE)
1582 kvm->nr_memslot_pages += new->npages;
1583
1584 kvm_arch_commit_memory_region(kvm, old, new, change);
1585
1586 switch (change) {
1587 case KVM_MR_CREATE:
1588 /* Nothing more to do. */
1589 break;
1590 case KVM_MR_DELETE:
1591 /* Free the old memslot and all its metadata. */
1592 kvm_free_memslot(kvm, old);
1593 break;
1594 case KVM_MR_MOVE:
1595 case KVM_MR_FLAGS_ONLY:
1596 /*
1597 * Free the dirty bitmap as needed; the below check encompasses
1598 * both the flags and whether a ring buffer is being used)
1599 */
1600 if (old->dirty_bitmap && !new->dirty_bitmap)
1601 kvm_destroy_dirty_bitmap(old);
1602
1603 /*
1604 * The final quirk. Free the detached, old slot, but only its
1605 * memory, not any metadata. Metadata, including arch specific
1606 * data, may be reused by @new.
1607 */
1608 kfree(old);
1609 break;
1610 default:
1611 BUG();
1612 }
1613}
1614
1615/*
1616 * Activate @new, which must be installed in the inactive slots by the caller,
1617 * by swapping the active slots and then propagating @new to @old once @old is
1618 * unreachable and can be safely modified.
1619 *
1620 * With NULL @old this simply adds @new to @active (while swapping the sets).
1621 * With NULL @new this simply removes @old from @active and frees it
1622 * (while also swapping the sets).
1623 */
1624static void kvm_activate_memslot(struct kvm *kvm,
1625 struct kvm_memory_slot *old,
1626 struct kvm_memory_slot *new)
1627{
1628 int as_id = kvm_memslots_get_as_id(old, new);
1629
1630 kvm_swap_active_memslots(kvm, as_id);
1631
1632 /* Propagate the new memslot to the now inactive memslots. */
1633 kvm_replace_memslot(kvm, old, new);
1634}
1635
1636static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1637 const struct kvm_memory_slot *src)
1638{
1639 dest->base_gfn = src->base_gfn;
1640 dest->npages = src->npages;
1641 dest->dirty_bitmap = src->dirty_bitmap;
1642 dest->arch = src->arch;
1643 dest->userspace_addr = src->userspace_addr;
1644 dest->flags = src->flags;
1645 dest->id = src->id;
1646 dest->as_id = src->as_id;
1647}
1648
1649static void kvm_invalidate_memslot(struct kvm *kvm,
1650 struct kvm_memory_slot *old,
1651 struct kvm_memory_slot *invalid_slot)
1652{
1653 /*
1654 * Mark the current slot INVALID. As with all memslot modifications,
1655 * this must be done on an unreachable slot to avoid modifying the
1656 * current slot in the active tree.
1657 */
1658 kvm_copy_memslot(invalid_slot, old);
1659 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1660 kvm_replace_memslot(kvm, old, invalid_slot);
1661
1662 /*
1663 * Activate the slot that is now marked INVALID, but don't propagate
1664 * the slot to the now inactive slots. The slot is either going to be
1665 * deleted or recreated as a new slot.
1666 */
1667 kvm_swap_active_memslots(kvm, old->as_id);
1668
1669 /*
1670 * From this point no new shadow pages pointing to a deleted, or moved,
1671 * memslot will be created. Validation of sp->gfn happens in:
1672 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1673 * - kvm_is_visible_gfn (mmu_check_root)
1674 */
1675 kvm_arch_flush_shadow_memslot(kvm, old);
1676 kvm_arch_guest_memory_reclaimed(kvm);
1677
1678 /* Was released by kvm_swap_active_memslots, reacquire. */
1679 mutex_lock(&kvm->slots_arch_lock);
1680
1681 /*
1682 * Copy the arch-specific field of the newly-installed slot back to the
1683 * old slot as the arch data could have changed between releasing
1684 * slots_arch_lock in install_new_memslots() and re-acquiring the lock
1685 * above. Writers are required to retrieve memslots *after* acquiring
1686 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1687 */
1688 old->arch = invalid_slot->arch;
1689}
1690
1691static void kvm_create_memslot(struct kvm *kvm,
1692 struct kvm_memory_slot *new)
1693{
1694 /* Add the new memslot to the inactive set and activate. */
1695 kvm_replace_memslot(kvm, NULL, new);
1696 kvm_activate_memslot(kvm, NULL, new);
1697}
1698
1699static void kvm_delete_memslot(struct kvm *kvm,
1700 struct kvm_memory_slot *old,
1701 struct kvm_memory_slot *invalid_slot)
1702{
1703 /*
1704 * Remove the old memslot (in the inactive memslots) by passing NULL as
1705 * the "new" slot, and for the invalid version in the active slots.
1706 */
1707 kvm_replace_memslot(kvm, old, NULL);
1708 kvm_activate_memslot(kvm, invalid_slot, NULL);
1709}
1710
1711static void kvm_move_memslot(struct kvm *kvm,
1712 struct kvm_memory_slot *old,
1713 struct kvm_memory_slot *new,
1714 struct kvm_memory_slot *invalid_slot)
1715{
1716 /*
1717 * Replace the old memslot in the inactive slots, and then swap slots
1718 * and replace the current INVALID with the new as well.
1719 */
1720 kvm_replace_memslot(kvm, old, new);
1721 kvm_activate_memslot(kvm, invalid_slot, new);
1722}
1723
1724static void kvm_update_flags_memslot(struct kvm *kvm,
1725 struct kvm_memory_slot *old,
1726 struct kvm_memory_slot *new)
1727{
1728 /*
1729 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1730 * an intermediate step. Instead, the old memslot is simply replaced
1731 * with a new, updated copy in both memslot sets.
1732 */
1733 kvm_replace_memslot(kvm, old, new);
1734 kvm_activate_memslot(kvm, old, new);
1735}
1736
1737static int kvm_set_memslot(struct kvm *kvm,
1738 struct kvm_memory_slot *old,
1739 struct kvm_memory_slot *new,
1740 enum kvm_mr_change change)
1741{
1742 struct kvm_memory_slot *invalid_slot;
1743 int r;
1744
1745 /*
1746 * Released in kvm_swap_active_memslots.
1747 *
1748 * Must be held from before the current memslots are copied until
1749 * after the new memslots are installed with rcu_assign_pointer,
1750 * then released before the synchronize srcu in kvm_swap_active_memslots.
1751 *
1752 * When modifying memslots outside of the slots_lock, must be held
1753 * before reading the pointer to the current memslots until after all
1754 * changes to those memslots are complete.
1755 *
1756 * These rules ensure that installing new memslots does not lose
1757 * changes made to the previous memslots.
1758 */
1759 mutex_lock(&kvm->slots_arch_lock);
1760
1761 /*
1762 * Invalidate the old slot if it's being deleted or moved. This is
1763 * done prior to actually deleting/moving the memslot to allow vCPUs to
1764 * continue running by ensuring there are no mappings or shadow pages
1765 * for the memslot when it is deleted/moved. Without pre-invalidation
1766 * (and without a lock), a window would exist between effecting the
1767 * delete/move and committing the changes in arch code where KVM or a
1768 * guest could access a non-existent memslot.
1769 *
1770 * Modifications are done on a temporary, unreachable slot. The old
1771 * slot needs to be preserved in case a later step fails and the
1772 * invalidation needs to be reverted.
1773 */
1774 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1775 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1776 if (!invalid_slot) {
1777 mutex_unlock(&kvm->slots_arch_lock);
1778 return -ENOMEM;
1779 }
1780 kvm_invalidate_memslot(kvm, old, invalid_slot);
1781 }
1782
1783 r = kvm_prepare_memory_region(kvm, old, new, change);
1784 if (r) {
1785 /*
1786 * For DELETE/MOVE, revert the above INVALID change. No
1787 * modifications required since the original slot was preserved
1788 * in the inactive slots. Changing the active memslots also
1789 * release slots_arch_lock.
1790 */
1791 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1792 kvm_activate_memslot(kvm, invalid_slot, old);
1793 kfree(invalid_slot);
1794 } else {
1795 mutex_unlock(&kvm->slots_arch_lock);
1796 }
1797 return r;
1798 }
1799
1800 /*
1801 * For DELETE and MOVE, the working slot is now active as the INVALID
1802 * version of the old slot. MOVE is particularly special as it reuses
1803 * the old slot and returns a copy of the old slot (in working_slot).
1804 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
1805 * old slot is detached but otherwise preserved.
1806 */
1807 if (change == KVM_MR_CREATE)
1808 kvm_create_memslot(kvm, new);
1809 else if (change == KVM_MR_DELETE)
1810 kvm_delete_memslot(kvm, old, invalid_slot);
1811 else if (change == KVM_MR_MOVE)
1812 kvm_move_memslot(kvm, old, new, invalid_slot);
1813 else if (change == KVM_MR_FLAGS_ONLY)
1814 kvm_update_flags_memslot(kvm, old, new);
1815 else
1816 BUG();
1817
1818 /* Free the temporary INVALID slot used for DELETE and MOVE. */
1819 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1820 kfree(invalid_slot);
1821
1822 /*
1823 * No need to refresh new->arch, changes after dropping slots_arch_lock
1824 * will directly hit the final, active memslot. Architectures are
1825 * responsible for knowing that new->arch may be stale.
1826 */
1827 kvm_commit_memory_region(kvm, old, new, change);
1828
1829 return 0;
1830}
1831
1832static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1833 gfn_t start, gfn_t end)
1834{
1835 struct kvm_memslot_iter iter;
1836
1837 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1838 if (iter.slot->id != id)
1839 return true;
1840 }
1841
1842 return false;
1843}
1844
1845/*
1846 * Allocate some memory and give it an address in the guest physical address
1847 * space.
1848 *
1849 * Discontiguous memory is allowed, mostly for framebuffers.
1850 *
1851 * Must be called holding kvm->slots_lock for write.
1852 */
1853int __kvm_set_memory_region(struct kvm *kvm,
1854 const struct kvm_userspace_memory_region *mem)
1855{
1856 struct kvm_memory_slot *old, *new;
1857 struct kvm_memslots *slots;
1858 enum kvm_mr_change change;
1859 unsigned long npages;
1860 gfn_t base_gfn;
1861 int as_id, id;
1862 int r;
1863
1864 r = check_memory_region_flags(mem);
1865 if (r)
1866 return r;
1867
1868 as_id = mem->slot >> 16;
1869 id = (u16)mem->slot;
1870
1871 /* General sanity checks */
1872 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
1873 (mem->memory_size != (unsigned long)mem->memory_size))
1874 return -EINVAL;
1875 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1876 return -EINVAL;
1877 /* We can read the guest memory with __xxx_user() later on. */
1878 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1879 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1880 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1881 mem->memory_size))
1882 return -EINVAL;
1883 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1884 return -EINVAL;
1885 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1886 return -EINVAL;
1887 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
1888 return -EINVAL;
1889
1890 slots = __kvm_memslots(kvm, as_id);
1891
1892 /*
1893 * Note, the old memslot (and the pointer itself!) may be invalidated
1894 * and/or destroyed by kvm_set_memslot().
1895 */
1896 old = id_to_memslot(slots, id);
1897
1898 if (!mem->memory_size) {
1899 if (!old || !old->npages)
1900 return -EINVAL;
1901
1902 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
1903 return -EIO;
1904
1905 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
1906 }
1907
1908 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
1909 npages = (mem->memory_size >> PAGE_SHIFT);
1910
1911 if (!old || !old->npages) {
1912 change = KVM_MR_CREATE;
1913
1914 /*
1915 * To simplify KVM internals, the total number of pages across
1916 * all memslots must fit in an unsigned long.
1917 */
1918 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
1919 return -EINVAL;
1920 } else { /* Modify an existing slot. */
1921 if ((mem->userspace_addr != old->userspace_addr) ||
1922 (npages != old->npages) ||
1923 ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
1924 return -EINVAL;
1925
1926 if (base_gfn != old->base_gfn)
1927 change = KVM_MR_MOVE;
1928 else if (mem->flags != old->flags)
1929 change = KVM_MR_FLAGS_ONLY;
1930 else /* Nothing to change. */
1931 return 0;
1932 }
1933
1934 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
1935 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
1936 return -EEXIST;
1937
1938 /* Allocate a slot that will persist in the memslot. */
1939 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
1940 if (!new)
1941 return -ENOMEM;
1942
1943 new->as_id = as_id;
1944 new->id = id;
1945 new->base_gfn = base_gfn;
1946 new->npages = npages;
1947 new->flags = mem->flags;
1948 new->userspace_addr = mem->userspace_addr;
1949
1950 r = kvm_set_memslot(kvm, old, new, change);
1951 if (r)
1952 kfree(new);
1953 return r;
1954}
1955EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1956
1957int kvm_set_memory_region(struct kvm *kvm,
1958 const struct kvm_userspace_memory_region *mem)
1959{
1960 int r;
1961
1962 mutex_lock(&kvm->slots_lock);
1963 r = __kvm_set_memory_region(kvm, mem);
1964 mutex_unlock(&kvm->slots_lock);
1965 return r;
1966}
1967EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1968
1969static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1970 struct kvm_userspace_memory_region *mem)
1971{
1972 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1973 return -EINVAL;
1974
1975 return kvm_set_memory_region(kvm, mem);
1976}
1977
1978#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1979/**
1980 * kvm_get_dirty_log - get a snapshot of dirty pages
1981 * @kvm: pointer to kvm instance
1982 * @log: slot id and address to which we copy the log
1983 * @is_dirty: set to '1' if any dirty pages were found
1984 * @memslot: set to the associated memslot, always valid on success
1985 */
1986int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1987 int *is_dirty, struct kvm_memory_slot **memslot)
1988{
1989 struct kvm_memslots *slots;
1990 int i, as_id, id;
1991 unsigned long n;
1992 unsigned long any = 0;
1993
1994 /* Dirty ring tracking is exclusive to dirty log tracking */
1995 if (kvm->dirty_ring_size)
1996 return -ENXIO;
1997
1998 *memslot = NULL;
1999 *is_dirty = 0;
2000
2001 as_id = log->slot >> 16;
2002 id = (u16)log->slot;
2003 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2004 return -EINVAL;
2005
2006 slots = __kvm_memslots(kvm, as_id);
2007 *memslot = id_to_memslot(slots, id);
2008 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2009 return -ENOENT;
2010
2011 kvm_arch_sync_dirty_log(kvm, *memslot);
2012
2013 n = kvm_dirty_bitmap_bytes(*memslot);
2014
2015 for (i = 0; !any && i < n/sizeof(long); ++i)
2016 any = (*memslot)->dirty_bitmap[i];
2017
2018 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2019 return -EFAULT;
2020
2021 if (any)
2022 *is_dirty = 1;
2023 return 0;
2024}
2025EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2026
2027#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2028/**
2029 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2030 * and reenable dirty page tracking for the corresponding pages.
2031 * @kvm: pointer to kvm instance
2032 * @log: slot id and address to which we copy the log
2033 *
2034 * We need to keep it in mind that VCPU threads can write to the bitmap
2035 * concurrently. So, to avoid losing track of dirty pages we keep the
2036 * following order:
2037 *
2038 * 1. Take a snapshot of the bit and clear it if needed.
2039 * 2. Write protect the corresponding page.
2040 * 3. Copy the snapshot to the userspace.
2041 * 4. Upon return caller flushes TLB's if needed.
2042 *
2043 * Between 2 and 4, the guest may write to the page using the remaining TLB
2044 * entry. This is not a problem because the page is reported dirty using
2045 * the snapshot taken before and step 4 ensures that writes done after
2046 * exiting to userspace will be logged for the next call.
2047 *
2048 */
2049static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2050{
2051 struct kvm_memslots *slots;
2052 struct kvm_memory_slot *memslot;
2053 int i, as_id, id;
2054 unsigned long n;
2055 unsigned long *dirty_bitmap;
2056 unsigned long *dirty_bitmap_buffer;
2057 bool flush;
2058
2059 /* Dirty ring tracking is exclusive to dirty log tracking */
2060 if (kvm->dirty_ring_size)
2061 return -ENXIO;
2062
2063 as_id = log->slot >> 16;
2064 id = (u16)log->slot;
2065 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2066 return -EINVAL;
2067
2068 slots = __kvm_memslots(kvm, as_id);
2069 memslot = id_to_memslot(slots, id);
2070 if (!memslot || !memslot->dirty_bitmap)
2071 return -ENOENT;
2072
2073 dirty_bitmap = memslot->dirty_bitmap;
2074
2075 kvm_arch_sync_dirty_log(kvm, memslot);
2076
2077 n = kvm_dirty_bitmap_bytes(memslot);
2078 flush = false;
2079 if (kvm->manual_dirty_log_protect) {
2080 /*
2081 * Unlike kvm_get_dirty_log, we always return false in *flush,
2082 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
2083 * is some code duplication between this function and
2084 * kvm_get_dirty_log, but hopefully all architecture
2085 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2086 * can be eliminated.
2087 */
2088 dirty_bitmap_buffer = dirty_bitmap;
2089 } else {
2090 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2091 memset(dirty_bitmap_buffer, 0, n);
2092
2093 KVM_MMU_LOCK(kvm);
2094 for (i = 0; i < n / sizeof(long); i++) {
2095 unsigned long mask;
2096 gfn_t offset;
2097
2098 if (!dirty_bitmap[i])
2099 continue;
2100
2101 flush = true;
2102 mask = xchg(&dirty_bitmap[i], 0);
2103 dirty_bitmap_buffer[i] = mask;
2104
2105 offset = i * BITS_PER_LONG;
2106 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2107 offset, mask);
2108 }
2109 KVM_MMU_UNLOCK(kvm);
2110 }
2111
2112 if (flush)
2113 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2114
2115 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2116 return -EFAULT;
2117 return 0;
2118}
2119
2120
2121/**
2122 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2123 * @kvm: kvm instance
2124 * @log: slot id and address to which we copy the log
2125 *
2126 * Steps 1-4 below provide general overview of dirty page logging. See
2127 * kvm_get_dirty_log_protect() function description for additional details.
2128 *
2129 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2130 * always flush the TLB (step 4) even if previous step failed and the dirty
2131 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2132 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2133 * writes will be marked dirty for next log read.
2134 *
2135 * 1. Take a snapshot of the bit and clear it if needed.
2136 * 2. Write protect the corresponding page.
2137 * 3. Copy the snapshot to the userspace.
2138 * 4. Flush TLB's if needed.
2139 */
2140static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2141 struct kvm_dirty_log *log)
2142{
2143 int r;
2144
2145 mutex_lock(&kvm->slots_lock);
2146
2147 r = kvm_get_dirty_log_protect(kvm, log);
2148
2149 mutex_unlock(&kvm->slots_lock);
2150 return r;
2151}
2152
2153/**
2154 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2155 * and reenable dirty page tracking for the corresponding pages.
2156 * @kvm: pointer to kvm instance
2157 * @log: slot id and address from which to fetch the bitmap of dirty pages
2158 */
2159static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2160 struct kvm_clear_dirty_log *log)
2161{
2162 struct kvm_memslots *slots;
2163 struct kvm_memory_slot *memslot;
2164 int as_id, id;
2165 gfn_t offset;
2166 unsigned long i, n;
2167 unsigned long *dirty_bitmap;
2168 unsigned long *dirty_bitmap_buffer;
2169 bool flush;
2170
2171 /* Dirty ring tracking is exclusive to dirty log tracking */
2172 if (kvm->dirty_ring_size)
2173 return -ENXIO;
2174
2175 as_id = log->slot >> 16;
2176 id = (u16)log->slot;
2177 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2178 return -EINVAL;
2179
2180 if (log->first_page & 63)
2181 return -EINVAL;
2182
2183 slots = __kvm_memslots(kvm, as_id);
2184 memslot = id_to_memslot(slots, id);
2185 if (!memslot || !memslot->dirty_bitmap)
2186 return -ENOENT;
2187
2188 dirty_bitmap = memslot->dirty_bitmap;
2189
2190 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2191
2192 if (log->first_page > memslot->npages ||
2193 log->num_pages > memslot->npages - log->first_page ||
2194 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2195 return -EINVAL;
2196
2197 kvm_arch_sync_dirty_log(kvm, memslot);
2198
2199 flush = false;
2200 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2201 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2202 return -EFAULT;
2203
2204 KVM_MMU_LOCK(kvm);
2205 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2206 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2207 i++, offset += BITS_PER_LONG) {
2208 unsigned long mask = *dirty_bitmap_buffer++;
2209 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2210 if (!mask)
2211 continue;
2212
2213 mask &= atomic_long_fetch_andnot(mask, p);
2214
2215 /*
2216 * mask contains the bits that really have been cleared. This
2217 * never includes any bits beyond the length of the memslot (if
2218 * the length is not aligned to 64 pages), therefore it is not
2219 * a problem if userspace sets them in log->dirty_bitmap.
2220 */
2221 if (mask) {
2222 flush = true;
2223 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2224 offset, mask);
2225 }
2226 }
2227 KVM_MMU_UNLOCK(kvm);
2228
2229 if (flush)
2230 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2231
2232 return 0;
2233}
2234
2235static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2236 struct kvm_clear_dirty_log *log)
2237{
2238 int r;
2239
2240 mutex_lock(&kvm->slots_lock);
2241
2242 r = kvm_clear_dirty_log_protect(kvm, log);
2243
2244 mutex_unlock(&kvm->slots_lock);
2245 return r;
2246}
2247#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2248
2249struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2250{
2251 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2252}
2253EXPORT_SYMBOL_GPL(gfn_to_memslot);
2254
2255struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2256{
2257 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2258 u64 gen = slots->generation;
2259 struct kvm_memory_slot *slot;
2260
2261 /*
2262 * This also protects against using a memslot from a different address space,
2263 * since different address spaces have different generation numbers.
2264 */
2265 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2266 vcpu->last_used_slot = NULL;
2267 vcpu->last_used_slot_gen = gen;
2268 }
2269
2270 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2271 if (slot)
2272 return slot;
2273
2274 /*
2275 * Fall back to searching all memslots. We purposely use
2276 * search_memslots() instead of __gfn_to_memslot() to avoid
2277 * thrashing the VM-wide last_used_slot in kvm_memslots.
2278 */
2279 slot = search_memslots(slots, gfn, false);
2280 if (slot) {
2281 vcpu->last_used_slot = slot;
2282 return slot;
2283 }
2284
2285 return NULL;
2286}
2287
2288bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2289{
2290 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2291
2292 return kvm_is_visible_memslot(memslot);
2293}
2294EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2295
2296bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2297{
2298 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2299
2300 return kvm_is_visible_memslot(memslot);
2301}
2302EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2303
2304unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2305{
2306 struct vm_area_struct *vma;
2307 unsigned long addr, size;
2308
2309 size = PAGE_SIZE;
2310
2311 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2312 if (kvm_is_error_hva(addr))
2313 return PAGE_SIZE;
2314
2315 mmap_read_lock(current->mm);
2316 vma = find_vma(current->mm, addr);
2317 if (!vma)
2318 goto out;
2319
2320 size = vma_kernel_pagesize(vma);
2321
2322out:
2323 mmap_read_unlock(current->mm);
2324
2325 return size;
2326}
2327
2328static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2329{
2330 return slot->flags & KVM_MEM_READONLY;
2331}
2332
2333static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2334 gfn_t *nr_pages, bool write)
2335{
2336 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2337 return KVM_HVA_ERR_BAD;
2338
2339 if (memslot_is_readonly(slot) && write)
2340 return KVM_HVA_ERR_RO_BAD;
2341
2342 if (nr_pages)
2343 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2344
2345 return __gfn_to_hva_memslot(slot, gfn);
2346}
2347
2348static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2349 gfn_t *nr_pages)
2350{
2351 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2352}
2353
2354unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2355 gfn_t gfn)
2356{
2357 return gfn_to_hva_many(slot, gfn, NULL);
2358}
2359EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2360
2361unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2362{
2363 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2364}
2365EXPORT_SYMBOL_GPL(gfn_to_hva);
2366
2367unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2368{
2369 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2370}
2371EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2372
2373/*
2374 * Return the hva of a @gfn and the R/W attribute if possible.
2375 *
2376 * @slot: the kvm_memory_slot which contains @gfn
2377 * @gfn: the gfn to be translated
2378 * @writable: used to return the read/write attribute of the @slot if the hva
2379 * is valid and @writable is not NULL
2380 */
2381unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2382 gfn_t gfn, bool *writable)
2383{
2384 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2385
2386 if (!kvm_is_error_hva(hva) && writable)
2387 *writable = !memslot_is_readonly(slot);
2388
2389 return hva;
2390}
2391
2392unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2393{
2394 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2395
2396 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2397}
2398
2399unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2400{
2401 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2402
2403 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2404}
2405
2406static inline int check_user_page_hwpoison(unsigned long addr)
2407{
2408 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2409
2410 rc = get_user_pages(addr, 1, flags, NULL, NULL);
2411 return rc == -EHWPOISON;
2412}
2413
2414/*
2415 * The fast path to get the writable pfn which will be stored in @pfn,
2416 * true indicates success, otherwise false is returned. It's also the
2417 * only part that runs if we can in atomic context.
2418 */
2419static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2420 bool *writable, kvm_pfn_t *pfn)
2421{
2422 struct page *page[1];
2423
2424 /*
2425 * Fast pin a writable pfn only if it is a write fault request
2426 * or the caller allows to map a writable pfn for a read fault
2427 * request.
2428 */
2429 if (!(write_fault || writable))
2430 return false;
2431
2432 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2433 *pfn = page_to_pfn(page[0]);
2434
2435 if (writable)
2436 *writable = true;
2437 return true;
2438 }
2439
2440 return false;
2441}
2442
2443/*
2444 * The slow path to get the pfn of the specified host virtual address,
2445 * 1 indicates success, -errno is returned if error is detected.
2446 */
2447static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2448 bool *writable, kvm_pfn_t *pfn)
2449{
2450 unsigned int flags = FOLL_HWPOISON;
2451 struct page *page;
2452 int npages = 0;
2453
2454 might_sleep();
2455
2456 if (writable)
2457 *writable = write_fault;
2458
2459 if (write_fault)
2460 flags |= FOLL_WRITE;
2461 if (async)
2462 flags |= FOLL_NOWAIT;
2463
2464 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2465 if (npages != 1)
2466 return npages;
2467
2468 /* map read fault as writable if possible */
2469 if (unlikely(!write_fault) && writable) {
2470 struct page *wpage;
2471
2472 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2473 *writable = true;
2474 put_page(page);
2475 page = wpage;
2476 }
2477 }
2478 *pfn = page_to_pfn(page);
2479 return npages;
2480}
2481
2482static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2483{
2484 if (unlikely(!(vma->vm_flags & VM_READ)))
2485 return false;
2486
2487 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2488 return false;
2489
2490 return true;
2491}
2492
2493static int kvm_try_get_pfn(kvm_pfn_t pfn)
2494{
2495 if (kvm_is_reserved_pfn(pfn))
2496 return 1;
2497 return get_page_unless_zero(pfn_to_page(pfn));
2498}
2499
2500static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2501 unsigned long addr, bool write_fault,
2502 bool *writable, kvm_pfn_t *p_pfn)
2503{
2504 kvm_pfn_t pfn;
2505 pte_t *ptep;
2506 spinlock_t *ptl;
2507 int r;
2508
2509 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2510 if (r) {
2511 /*
2512 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2513 * not call the fault handler, so do it here.
2514 */
2515 bool unlocked = false;
2516 r = fixup_user_fault(current->mm, addr,
2517 (write_fault ? FAULT_FLAG_WRITE : 0),
2518 &unlocked);
2519 if (unlocked)
2520 return -EAGAIN;
2521 if (r)
2522 return r;
2523
2524 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2525 if (r)
2526 return r;
2527 }
2528
2529 if (write_fault && !pte_write(*ptep)) {
2530 pfn = KVM_PFN_ERR_RO_FAULT;
2531 goto out;
2532 }
2533
2534 if (writable)
2535 *writable = pte_write(*ptep);
2536 pfn = pte_pfn(*ptep);
2537
2538 /*
2539 * Get a reference here because callers of *hva_to_pfn* and
2540 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2541 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2542 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2543 * simply do nothing for reserved pfns.
2544 *
2545 * Whoever called remap_pfn_range is also going to call e.g.
2546 * unmap_mapping_range before the underlying pages are freed,
2547 * causing a call to our MMU notifier.
2548 *
2549 * Certain IO or PFNMAP mappings can be backed with valid
2550 * struct pages, but be allocated without refcounting e.g.,
2551 * tail pages of non-compound higher order allocations, which
2552 * would then underflow the refcount when the caller does the
2553 * required put_page. Don't allow those pages here.
2554 */
2555 if (!kvm_try_get_pfn(pfn))
2556 r = -EFAULT;
2557
2558out:
2559 pte_unmap_unlock(ptep, ptl);
2560 *p_pfn = pfn;
2561
2562 return r;
2563}
2564
2565/*
2566 * Pin guest page in memory and return its pfn.
2567 * @addr: host virtual address which maps memory to the guest
2568 * @atomic: whether this function can sleep
2569 * @async: whether this function need to wait IO complete if the
2570 * host page is not in the memory
2571 * @write_fault: whether we should get a writable host page
2572 * @writable: whether it allows to map a writable host page for !@write_fault
2573 *
2574 * The function will map a writable host page for these two cases:
2575 * 1): @write_fault = true
2576 * 2): @write_fault = false && @writable, @writable will tell the caller
2577 * whether the mapping is writable.
2578 */
2579kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2580 bool write_fault, bool *writable)
2581{
2582 struct vm_area_struct *vma;
2583 kvm_pfn_t pfn = 0;
2584 int npages, r;
2585
2586 /* we can do it either atomically or asynchronously, not both */
2587 BUG_ON(atomic && async);
2588
2589 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2590 return pfn;
2591
2592 if (atomic)
2593 return KVM_PFN_ERR_FAULT;
2594
2595 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2596 if (npages == 1)
2597 return pfn;
2598
2599 mmap_read_lock(current->mm);
2600 if (npages == -EHWPOISON ||
2601 (!async && check_user_page_hwpoison(addr))) {
2602 pfn = KVM_PFN_ERR_HWPOISON;
2603 goto exit;
2604 }
2605
2606retry:
2607 vma = vma_lookup(current->mm, addr);
2608
2609 if (vma == NULL)
2610 pfn = KVM_PFN_ERR_FAULT;
2611 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2612 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2613 if (r == -EAGAIN)
2614 goto retry;
2615 if (r < 0)
2616 pfn = KVM_PFN_ERR_FAULT;
2617 } else {
2618 if (async && vma_is_valid(vma, write_fault))
2619 *async = true;
2620 pfn = KVM_PFN_ERR_FAULT;
2621 }
2622exit:
2623 mmap_read_unlock(current->mm);
2624 return pfn;
2625}
2626
2627kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2628 bool atomic, bool *async, bool write_fault,
2629 bool *writable, hva_t *hva)
2630{
2631 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2632
2633 if (hva)
2634 *hva = addr;
2635
2636 if (addr == KVM_HVA_ERR_RO_BAD) {
2637 if (writable)
2638 *writable = false;
2639 return KVM_PFN_ERR_RO_FAULT;
2640 }
2641
2642 if (kvm_is_error_hva(addr)) {
2643 if (writable)
2644 *writable = false;
2645 return KVM_PFN_NOSLOT;
2646 }
2647
2648 /* Do not map writable pfn in the readonly memslot. */
2649 if (writable && memslot_is_readonly(slot)) {
2650 *writable = false;
2651 writable = NULL;
2652 }
2653
2654 return hva_to_pfn(addr, atomic, async, write_fault,
2655 writable);
2656}
2657EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2658
2659kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2660 bool *writable)
2661{
2662 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2663 write_fault, writable, NULL);
2664}
2665EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2666
2667kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
2668{
2669 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
2670}
2671EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2672
2673kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
2674{
2675 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
2676}
2677EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2678
2679kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2680{
2681 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2682}
2683EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2684
2685kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2686{
2687 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2688}
2689EXPORT_SYMBOL_GPL(gfn_to_pfn);
2690
2691kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2692{
2693 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2694}
2695EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2696
2697int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2698 struct page **pages, int nr_pages)
2699{
2700 unsigned long addr;
2701 gfn_t entry = 0;
2702
2703 addr = gfn_to_hva_many(slot, gfn, &entry);
2704 if (kvm_is_error_hva(addr))
2705 return -1;
2706
2707 if (entry < nr_pages)
2708 return 0;
2709
2710 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2711}
2712EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2713
2714static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
2715{
2716 if (is_error_noslot_pfn(pfn))
2717 return KVM_ERR_PTR_BAD_PAGE;
2718
2719 if (kvm_is_reserved_pfn(pfn)) {
2720 WARN_ON(1);
2721 return KVM_ERR_PTR_BAD_PAGE;
2722 }
2723
2724 return pfn_to_page(pfn);
2725}
2726
2727struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2728{
2729 kvm_pfn_t pfn;
2730
2731 pfn = gfn_to_pfn(kvm, gfn);
2732
2733 return kvm_pfn_to_page(pfn);
2734}
2735EXPORT_SYMBOL_GPL(gfn_to_page);
2736
2737void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
2738{
2739 if (pfn == 0)
2740 return;
2741
2742 if (dirty)
2743 kvm_release_pfn_dirty(pfn);
2744 else
2745 kvm_release_pfn_clean(pfn);
2746}
2747
2748int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2749{
2750 kvm_pfn_t pfn;
2751 void *hva = NULL;
2752 struct page *page = KVM_UNMAPPED_PAGE;
2753
2754 if (!map)
2755 return -EINVAL;
2756
2757 pfn = gfn_to_pfn(vcpu->kvm, gfn);
2758 if (is_error_noslot_pfn(pfn))
2759 return -EINVAL;
2760
2761 if (pfn_valid(pfn)) {
2762 page = pfn_to_page(pfn);
2763 hva = kmap(page);
2764#ifdef CONFIG_HAS_IOMEM
2765 } else {
2766 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2767#endif
2768 }
2769
2770 if (!hva)
2771 return -EFAULT;
2772
2773 map->page = page;
2774 map->hva = hva;
2775 map->pfn = pfn;
2776 map->gfn = gfn;
2777
2778 return 0;
2779}
2780EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2781
2782void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2783{
2784 if (!map)
2785 return;
2786
2787 if (!map->hva)
2788 return;
2789
2790 if (map->page != KVM_UNMAPPED_PAGE)
2791 kunmap(map->page);
2792#ifdef CONFIG_HAS_IOMEM
2793 else
2794 memunmap(map->hva);
2795#endif
2796
2797 if (dirty)
2798 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
2799
2800 kvm_release_pfn(map->pfn, dirty);
2801
2802 map->hva = NULL;
2803 map->page = NULL;
2804}
2805EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2806
2807struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2808{
2809 kvm_pfn_t pfn;
2810
2811 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2812
2813 return kvm_pfn_to_page(pfn);
2814}
2815EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2816
2817void kvm_release_page_clean(struct page *page)
2818{
2819 WARN_ON(is_error_page(page));
2820
2821 kvm_release_pfn_clean(page_to_pfn(page));
2822}
2823EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2824
2825void kvm_release_pfn_clean(kvm_pfn_t pfn)
2826{
2827 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2828 put_page(pfn_to_page(pfn));
2829}
2830EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2831
2832void kvm_release_page_dirty(struct page *page)
2833{
2834 WARN_ON(is_error_page(page));
2835
2836 kvm_release_pfn_dirty(page_to_pfn(page));
2837}
2838EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2839
2840void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2841{
2842 kvm_set_pfn_dirty(pfn);
2843 kvm_release_pfn_clean(pfn);
2844}
2845EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2846
2847void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2848{
2849 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2850 SetPageDirty(pfn_to_page(pfn));
2851}
2852EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2853
2854void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2855{
2856 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2857 mark_page_accessed(pfn_to_page(pfn));
2858}
2859EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2860
2861static int next_segment(unsigned long len, int offset)
2862{
2863 if (len > PAGE_SIZE - offset)
2864 return PAGE_SIZE - offset;
2865 else
2866 return len;
2867}
2868
2869static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2870 void *data, int offset, int len)
2871{
2872 int r;
2873 unsigned long addr;
2874
2875 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2876 if (kvm_is_error_hva(addr))
2877 return -EFAULT;
2878 r = __copy_from_user(data, (void __user *)addr + offset, len);
2879 if (r)
2880 return -EFAULT;
2881 return 0;
2882}
2883
2884int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2885 int len)
2886{
2887 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2888
2889 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2890}
2891EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2892
2893int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2894 int offset, int len)
2895{
2896 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2897
2898 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2899}
2900EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2901
2902int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2903{
2904 gfn_t gfn = gpa >> PAGE_SHIFT;
2905 int seg;
2906 int offset = offset_in_page(gpa);
2907 int ret;
2908
2909 while ((seg = next_segment(len, offset)) != 0) {
2910 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2911 if (ret < 0)
2912 return ret;
2913 offset = 0;
2914 len -= seg;
2915 data += seg;
2916 ++gfn;
2917 }
2918 return 0;
2919}
2920EXPORT_SYMBOL_GPL(kvm_read_guest);
2921
2922int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
2923{
2924 gfn_t gfn = gpa >> PAGE_SHIFT;
2925 int seg;
2926 int offset = offset_in_page(gpa);
2927 int ret;
2928
2929 while ((seg = next_segment(len, offset)) != 0) {
2930 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2931 if (ret < 0)
2932 return ret;
2933 offset = 0;
2934 len -= seg;
2935 data += seg;
2936 ++gfn;
2937 }
2938 return 0;
2939}
2940EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
2941
2942static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2943 void *data, int offset, unsigned long len)
2944{
2945 int r;
2946 unsigned long addr;
2947
2948 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2949 if (kvm_is_error_hva(addr))
2950 return -EFAULT;
2951 pagefault_disable();
2952 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
2953 pagefault_enable();
2954 if (r)
2955 return -EFAULT;
2956 return 0;
2957}
2958
2959int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2960 void *data, unsigned long len)
2961{
2962 gfn_t gfn = gpa >> PAGE_SHIFT;
2963 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2964 int offset = offset_in_page(gpa);
2965
2966 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2967}
2968EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2969
2970static int __kvm_write_guest_page(struct kvm *kvm,
2971 struct kvm_memory_slot *memslot, gfn_t gfn,
2972 const void *data, int offset, int len)
2973{
2974 int r;
2975 unsigned long addr;
2976
2977 addr = gfn_to_hva_memslot(memslot, gfn);
2978 if (kvm_is_error_hva(addr))
2979 return -EFAULT;
2980 r = __copy_to_user((void __user *)addr + offset, data, len);
2981 if (r)
2982 return -EFAULT;
2983 mark_page_dirty_in_slot(kvm, memslot, gfn);
2984 return 0;
2985}
2986
2987int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2988 const void *data, int offset, int len)
2989{
2990 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2991
2992 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
2993}
2994EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2995
2996int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2997 const void *data, int offset, int len)
2998{
2999 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3000
3001 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3002}
3003EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3004
3005int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3006 unsigned long len)
3007{
3008 gfn_t gfn = gpa >> PAGE_SHIFT;
3009 int seg;
3010 int offset = offset_in_page(gpa);
3011 int ret;
3012
3013 while ((seg = next_segment(len, offset)) != 0) {
3014 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3015 if (ret < 0)
3016 return ret;
3017 offset = 0;
3018 len -= seg;
3019 data += seg;
3020 ++gfn;
3021 }
3022 return 0;
3023}
3024EXPORT_SYMBOL_GPL(kvm_write_guest);
3025
3026int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3027 unsigned long len)
3028{
3029 gfn_t gfn = gpa >> PAGE_SHIFT;
3030 int seg;
3031 int offset = offset_in_page(gpa);
3032 int ret;
3033
3034 while ((seg = next_segment(len, offset)) != 0) {
3035 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3036 if (ret < 0)
3037 return ret;
3038 offset = 0;
3039 len -= seg;
3040 data += seg;
3041 ++gfn;
3042 }
3043 return 0;
3044}
3045EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3046
3047static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3048 struct gfn_to_hva_cache *ghc,
3049 gpa_t gpa, unsigned long len)
3050{
3051 int offset = offset_in_page(gpa);
3052 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3053 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3054 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3055 gfn_t nr_pages_avail;
3056
3057 /* Update ghc->generation before performing any error checks. */
3058 ghc->generation = slots->generation;
3059
3060 if (start_gfn > end_gfn) {
3061 ghc->hva = KVM_HVA_ERR_BAD;
3062 return -EINVAL;
3063 }
3064
3065 /*
3066 * If the requested region crosses two memslots, we still
3067 * verify that the entire region is valid here.
3068 */
3069 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3070 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3071 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3072 &nr_pages_avail);
3073 if (kvm_is_error_hva(ghc->hva))
3074 return -EFAULT;
3075 }
3076
3077 /* Use the slow path for cross page reads and writes. */
3078 if (nr_pages_needed == 1)
3079 ghc->hva += offset;
3080 else
3081 ghc->memslot = NULL;
3082
3083 ghc->gpa = gpa;
3084 ghc->len = len;
3085 return 0;
3086}
3087
3088int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3089 gpa_t gpa, unsigned long len)
3090{
3091 struct kvm_memslots *slots = kvm_memslots(kvm);
3092 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3093}
3094EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3095
3096int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3097 void *data, unsigned int offset,
3098 unsigned long len)
3099{
3100 struct kvm_memslots *slots = kvm_memslots(kvm);
3101 int r;
3102 gpa_t gpa = ghc->gpa + offset;
3103
3104 if (WARN_ON_ONCE(len + offset > ghc->len))
3105 return -EINVAL;
3106
3107 if (slots->generation != ghc->generation) {
3108 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3109 return -EFAULT;
3110 }
3111
3112 if (kvm_is_error_hva(ghc->hva))
3113 return -EFAULT;
3114
3115 if (unlikely(!ghc->memslot))
3116 return kvm_write_guest(kvm, gpa, data, len);
3117
3118 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3119 if (r)
3120 return -EFAULT;
3121 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3122
3123 return 0;
3124}
3125EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3126
3127int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3128 void *data, unsigned long len)
3129{
3130 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3131}
3132EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3133
3134int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3135 void *data, unsigned int offset,
3136 unsigned long len)
3137{
3138 struct kvm_memslots *slots = kvm_memslots(kvm);
3139 int r;
3140 gpa_t gpa = ghc->gpa + offset;
3141
3142 if (WARN_ON_ONCE(len + offset > ghc->len))
3143 return -EINVAL;
3144
3145 if (slots->generation != ghc->generation) {
3146 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3147 return -EFAULT;
3148 }
3149
3150 if (kvm_is_error_hva(ghc->hva))
3151 return -EFAULT;
3152
3153 if (unlikely(!ghc->memslot))
3154 return kvm_read_guest(kvm, gpa, data, len);
3155
3156 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3157 if (r)
3158 return -EFAULT;
3159
3160 return 0;
3161}
3162EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3163
3164int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3165 void *data, unsigned long len)
3166{
3167 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3168}
3169EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3170
3171int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3172{
3173 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3174 gfn_t gfn = gpa >> PAGE_SHIFT;
3175 int seg;
3176 int offset = offset_in_page(gpa);
3177 int ret;
3178
3179 while ((seg = next_segment(len, offset)) != 0) {
3180 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3181 if (ret < 0)
3182 return ret;
3183 offset = 0;
3184 len -= seg;
3185 ++gfn;
3186 }
3187 return 0;
3188}
3189EXPORT_SYMBOL_GPL(kvm_clear_guest);
3190
3191void mark_page_dirty_in_slot(struct kvm *kvm,
3192 const struct kvm_memory_slot *memslot,
3193 gfn_t gfn)
3194{
3195 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3196
3197#ifdef CONFIG_HAVE_KVM_DIRTY_RING
3198 if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm))
3199 return;
3200#endif
3201
3202 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3203 unsigned long rel_gfn = gfn - memslot->base_gfn;
3204 u32 slot = (memslot->as_id << 16) | memslot->id;
3205
3206 if (kvm->dirty_ring_size)
3207 kvm_dirty_ring_push(&vcpu->dirty_ring,
3208 slot, rel_gfn);
3209 else
3210 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3211 }
3212}
3213EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3214
3215void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3216{
3217 struct kvm_memory_slot *memslot;
3218
3219 memslot = gfn_to_memslot(kvm, gfn);
3220 mark_page_dirty_in_slot(kvm, memslot, gfn);
3221}
3222EXPORT_SYMBOL_GPL(mark_page_dirty);
3223
3224void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3225{
3226 struct kvm_memory_slot *memslot;
3227
3228 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3229 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3230}
3231EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3232
3233void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3234{
3235 if (!vcpu->sigset_active)
3236 return;
3237
3238 /*
3239 * This does a lockless modification of ->real_blocked, which is fine
3240 * because, only current can change ->real_blocked and all readers of
3241 * ->real_blocked don't care as long ->real_blocked is always a subset
3242 * of ->blocked.
3243 */
3244 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3245}
3246
3247void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3248{
3249 if (!vcpu->sigset_active)
3250 return;
3251
3252 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3253 sigemptyset(¤t->real_blocked);
3254}
3255
3256static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3257{
3258 unsigned int old, val, grow, grow_start;
3259
3260 old = val = vcpu->halt_poll_ns;
3261 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3262 grow = READ_ONCE(halt_poll_ns_grow);
3263 if (!grow)
3264 goto out;
3265
3266 val *= grow;
3267 if (val < grow_start)
3268 val = grow_start;
3269
3270 if (val > vcpu->kvm->max_halt_poll_ns)
3271 val = vcpu->kvm->max_halt_poll_ns;
3272
3273 vcpu->halt_poll_ns = val;
3274out:
3275 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3276}
3277
3278static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3279{
3280 unsigned int old, val, shrink, grow_start;
3281
3282 old = val = vcpu->halt_poll_ns;
3283 shrink = READ_ONCE(halt_poll_ns_shrink);
3284 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3285 if (shrink == 0)
3286 val = 0;
3287 else
3288 val /= shrink;
3289
3290 if (val < grow_start)
3291 val = 0;
3292
3293 vcpu->halt_poll_ns = val;
3294 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3295}
3296
3297static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3298{
3299 int ret = -EINTR;
3300 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3301
3302 if (kvm_arch_vcpu_runnable(vcpu)) {
3303 kvm_make_request(KVM_REQ_UNHALT, vcpu);
3304 goto out;
3305 }
3306 if (kvm_cpu_has_pending_timer(vcpu))
3307 goto out;
3308 if (signal_pending(current))
3309 goto out;
3310 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3311 goto out;
3312
3313 ret = 0;
3314out:
3315 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3316 return ret;
3317}
3318
3319/*
3320 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3321 * pending. This is mostly used when halting a vCPU, but may also be used
3322 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3323 */
3324bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3325{
3326 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3327 bool waited = false;
3328
3329 vcpu->stat.generic.blocking = 1;
3330
3331 preempt_disable();
3332 kvm_arch_vcpu_blocking(vcpu);
3333 prepare_to_rcuwait(wait);
3334 preempt_enable();
3335
3336 for (;;) {
3337 set_current_state(TASK_INTERRUPTIBLE);
3338
3339 if (kvm_vcpu_check_block(vcpu) < 0)
3340 break;
3341
3342 waited = true;
3343 schedule();
3344 }
3345
3346 preempt_disable();
3347 finish_rcuwait(wait);
3348 kvm_arch_vcpu_unblocking(vcpu);
3349 preempt_enable();
3350
3351 vcpu->stat.generic.blocking = 0;
3352
3353 return waited;
3354}
3355
3356static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3357 ktime_t end, bool success)
3358{
3359 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3360 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3361
3362 ++vcpu->stat.generic.halt_attempted_poll;
3363
3364 if (success) {
3365 ++vcpu->stat.generic.halt_successful_poll;
3366
3367 if (!vcpu_valid_wakeup(vcpu))
3368 ++vcpu->stat.generic.halt_poll_invalid;
3369
3370 stats->halt_poll_success_ns += poll_ns;
3371 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3372 } else {
3373 stats->halt_poll_fail_ns += poll_ns;
3374 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3375 }
3376}
3377
3378/*
3379 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
3380 * polling is enabled, busy wait for a short time before blocking to avoid the
3381 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3382 * is halted.
3383 */
3384void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3385{
3386 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3387 bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3388 ktime_t start, cur, poll_end;
3389 bool waited = false;
3390 u64 halt_ns;
3391
3392 start = cur = poll_end = ktime_get();
3393 if (do_halt_poll) {
3394 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3395
3396 do {
3397 /*
3398 * This sets KVM_REQ_UNHALT if an interrupt
3399 * arrives.
3400 */
3401 if (kvm_vcpu_check_block(vcpu) < 0)
3402 goto out;
3403 cpu_relax();
3404 poll_end = cur = ktime_get();
3405 } while (kvm_vcpu_can_poll(cur, stop));
3406 }
3407
3408 waited = kvm_vcpu_block(vcpu);
3409
3410 cur = ktime_get();
3411 if (waited) {
3412 vcpu->stat.generic.halt_wait_ns +=
3413 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3414 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3415 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3416 }
3417out:
3418 /* The total time the vCPU was "halted", including polling time. */
3419 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3420
3421 /*
3422 * Note, halt-polling is considered successful so long as the vCPU was
3423 * never actually scheduled out, i.e. even if the wake event arrived
3424 * after of the halt-polling loop itself, but before the full wait.
3425 */
3426 if (do_halt_poll)
3427 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3428
3429 if (halt_poll_allowed) {
3430 if (!vcpu_valid_wakeup(vcpu)) {
3431 shrink_halt_poll_ns(vcpu);
3432 } else if (vcpu->kvm->max_halt_poll_ns) {
3433 if (halt_ns <= vcpu->halt_poll_ns)
3434 ;
3435 /* we had a long block, shrink polling */
3436 else if (vcpu->halt_poll_ns &&
3437 halt_ns > vcpu->kvm->max_halt_poll_ns)
3438 shrink_halt_poll_ns(vcpu);
3439 /* we had a short halt and our poll time is too small */
3440 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
3441 halt_ns < vcpu->kvm->max_halt_poll_ns)
3442 grow_halt_poll_ns(vcpu);
3443 } else {
3444 vcpu->halt_poll_ns = 0;
3445 }
3446 }
3447
3448 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3449}
3450EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3451
3452bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3453{
3454 if (__kvm_vcpu_wake_up(vcpu)) {
3455 WRITE_ONCE(vcpu->ready, true);
3456 ++vcpu->stat.generic.halt_wakeup;
3457 return true;
3458 }
3459
3460 return false;
3461}
3462EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3463
3464#ifndef CONFIG_S390
3465/*
3466 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3467 */
3468void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3469{
3470 int me, cpu;
3471
3472 if (kvm_vcpu_wake_up(vcpu))
3473 return;
3474
3475 me = get_cpu();
3476 /*
3477 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3478 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should
3479 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3480 * within the vCPU thread itself.
3481 */
3482 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3483 if (vcpu->mode == IN_GUEST_MODE)
3484 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3485 goto out;
3486 }
3487
3488 /*
3489 * Note, the vCPU could get migrated to a different pCPU at any point
3490 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3491 * IPI to the previous pCPU. But, that's ok because the purpose of the
3492 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3493 * vCPU also requires it to leave IN_GUEST_MODE.
3494 */
3495 if (kvm_arch_vcpu_should_kick(vcpu)) {
3496 cpu = READ_ONCE(vcpu->cpu);
3497 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3498 smp_send_reschedule(cpu);
3499 }
3500out:
3501 put_cpu();
3502}
3503EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3504#endif /* !CONFIG_S390 */
3505
3506int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3507{
3508 struct pid *pid;
3509 struct task_struct *task = NULL;
3510 int ret = 0;
3511
3512 rcu_read_lock();
3513 pid = rcu_dereference(target->pid);
3514 if (pid)
3515 task = get_pid_task(pid, PIDTYPE_PID);
3516 rcu_read_unlock();
3517 if (!task)
3518 return ret;
3519 ret = yield_to(task, 1);
3520 put_task_struct(task);
3521
3522 return ret;
3523}
3524EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3525
3526/*
3527 * Helper that checks whether a VCPU is eligible for directed yield.
3528 * Most eligible candidate to yield is decided by following heuristics:
3529 *
3530 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3531 * (preempted lock holder), indicated by @in_spin_loop.
3532 * Set at the beginning and cleared at the end of interception/PLE handler.
3533 *
3534 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3535 * chance last time (mostly it has become eligible now since we have probably
3536 * yielded to lockholder in last iteration. This is done by toggling
3537 * @dy_eligible each time a VCPU checked for eligibility.)
3538 *
3539 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3540 * to preempted lock-holder could result in wrong VCPU selection and CPU
3541 * burning. Giving priority for a potential lock-holder increases lock
3542 * progress.
3543 *
3544 * Since algorithm is based on heuristics, accessing another VCPU data without
3545 * locking does not harm. It may result in trying to yield to same VCPU, fail
3546 * and continue with next VCPU and so on.
3547 */
3548static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3549{
3550#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3551 bool eligible;
3552
3553 eligible = !vcpu->spin_loop.in_spin_loop ||
3554 vcpu->spin_loop.dy_eligible;
3555
3556 if (vcpu->spin_loop.in_spin_loop)
3557 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3558
3559 return eligible;
3560#else
3561 return true;
3562#endif
3563}
3564
3565/*
3566 * Unlike kvm_arch_vcpu_runnable, this function is called outside
3567 * a vcpu_load/vcpu_put pair. However, for most architectures
3568 * kvm_arch_vcpu_runnable does not require vcpu_load.
3569 */
3570bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3571{
3572 return kvm_arch_vcpu_runnable(vcpu);
3573}
3574
3575static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3576{
3577 if (kvm_arch_dy_runnable(vcpu))
3578 return true;
3579
3580#ifdef CONFIG_KVM_ASYNC_PF
3581 if (!list_empty_careful(&vcpu->async_pf.done))
3582 return true;
3583#endif
3584
3585 return false;
3586}
3587
3588bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3589{
3590 return false;
3591}
3592
3593void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3594{
3595 struct kvm *kvm = me->kvm;
3596 struct kvm_vcpu *vcpu;
3597 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3598 unsigned long i;
3599 int yielded = 0;
3600 int try = 3;
3601 int pass;
3602
3603 kvm_vcpu_set_in_spin_loop(me, true);
3604 /*
3605 * We boost the priority of a VCPU that is runnable but not
3606 * currently running, because it got preempted by something
3607 * else and called schedule in __vcpu_run. Hopefully that
3608 * VCPU is holding the lock that we need and will release it.
3609 * We approximate round-robin by starting at the last boosted VCPU.
3610 */
3611 for (pass = 0; pass < 2 && !yielded && try; pass++) {
3612 kvm_for_each_vcpu(i, vcpu, kvm) {
3613 if (!pass && i <= last_boosted_vcpu) {
3614 i = last_boosted_vcpu;
3615 continue;
3616 } else if (pass && i > last_boosted_vcpu)
3617 break;
3618 if (!READ_ONCE(vcpu->ready))
3619 continue;
3620 if (vcpu == me)
3621 continue;
3622 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
3623 continue;
3624 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3625 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3626 !kvm_arch_vcpu_in_kernel(vcpu))
3627 continue;
3628 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3629 continue;
3630
3631 yielded = kvm_vcpu_yield_to(vcpu);
3632 if (yielded > 0) {
3633 kvm->last_boosted_vcpu = i;
3634 break;
3635 } else if (yielded < 0) {
3636 try--;
3637 if (!try)
3638 break;
3639 }
3640 }
3641 }
3642 kvm_vcpu_set_in_spin_loop(me, false);
3643
3644 /* Ensure vcpu is not eligible during next spinloop */
3645 kvm_vcpu_set_dy_eligible(me, false);
3646}
3647EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3648
3649static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3650{
3651#ifdef CONFIG_HAVE_KVM_DIRTY_RING
3652 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3653 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3654 kvm->dirty_ring_size / PAGE_SIZE);
3655#else
3656 return false;
3657#endif
3658}
3659
3660static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3661{
3662 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3663 struct page *page;
3664
3665 if (vmf->pgoff == 0)
3666 page = virt_to_page(vcpu->run);
3667#ifdef CONFIG_X86
3668 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3669 page = virt_to_page(vcpu->arch.pio_data);
3670#endif
3671#ifdef CONFIG_KVM_MMIO
3672 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3673 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3674#endif
3675 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3676 page = kvm_dirty_ring_get_page(
3677 &vcpu->dirty_ring,
3678 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
3679 else
3680 return kvm_arch_vcpu_fault(vcpu, vmf);
3681 get_page(page);
3682 vmf->page = page;
3683 return 0;
3684}
3685
3686static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3687 .fault = kvm_vcpu_fault,
3688};
3689
3690static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3691{
3692 struct kvm_vcpu *vcpu = file->private_data;
3693 unsigned long pages = vma_pages(vma);
3694
3695 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3696 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3697 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3698 return -EINVAL;
3699
3700 vma->vm_ops = &kvm_vcpu_vm_ops;
3701 return 0;
3702}
3703
3704static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3705{
3706 struct kvm_vcpu *vcpu = filp->private_data;
3707
3708 kvm_put_kvm(vcpu->kvm);
3709 return 0;
3710}
3711
3712static const struct file_operations kvm_vcpu_fops = {
3713 .release = kvm_vcpu_release,
3714 .unlocked_ioctl = kvm_vcpu_ioctl,
3715 .mmap = kvm_vcpu_mmap,
3716 .llseek = noop_llseek,
3717 KVM_COMPAT(kvm_vcpu_compat_ioctl),
3718};
3719
3720/*
3721 * Allocates an inode for the vcpu.
3722 */
3723static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3724{
3725 char name[8 + 1 + ITOA_MAX_LEN + 1];
3726
3727 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3728 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3729}
3730
3731static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3732{
3733#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3734 struct dentry *debugfs_dentry;
3735 char dir_name[ITOA_MAX_LEN * 2];
3736
3737 if (!debugfs_initialized())
3738 return;
3739
3740 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3741 debugfs_dentry = debugfs_create_dir(dir_name,
3742 vcpu->kvm->debugfs_dentry);
3743
3744 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3745#endif
3746}
3747
3748/*
3749 * Creates some virtual cpus. Good luck creating more than one.
3750 */
3751static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3752{
3753 int r;
3754 struct kvm_vcpu *vcpu;
3755 struct page *page;
3756
3757 if (id >= KVM_MAX_VCPU_IDS)
3758 return -EINVAL;
3759
3760 mutex_lock(&kvm->lock);
3761 if (kvm->created_vcpus >= kvm->max_vcpus) {
3762 mutex_unlock(&kvm->lock);
3763 return -EINVAL;
3764 }
3765
3766 kvm->created_vcpus++;
3767 mutex_unlock(&kvm->lock);
3768
3769 r = kvm_arch_vcpu_precreate(kvm, id);
3770 if (r)
3771 goto vcpu_decrement;
3772
3773 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
3774 if (!vcpu) {
3775 r = -ENOMEM;
3776 goto vcpu_decrement;
3777 }
3778
3779 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3780 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3781 if (!page) {
3782 r = -ENOMEM;
3783 goto vcpu_free;
3784 }
3785 vcpu->run = page_address(page);
3786
3787 kvm_vcpu_init(vcpu, kvm, id);
3788
3789 r = kvm_arch_vcpu_create(vcpu);
3790 if (r)
3791 goto vcpu_free_run_page;
3792
3793 if (kvm->dirty_ring_size) {
3794 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3795 id, kvm->dirty_ring_size);
3796 if (r)
3797 goto arch_vcpu_destroy;
3798 }
3799
3800 mutex_lock(&kvm->lock);
3801 if (kvm_get_vcpu_by_id(kvm, id)) {
3802 r = -EEXIST;
3803 goto unlock_vcpu_destroy;
3804 }
3805
3806 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3807 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
3808 BUG_ON(r == -EBUSY);
3809 if (r)
3810 goto unlock_vcpu_destroy;
3811
3812 /* Fill the stats id string for the vcpu */
3813 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
3814 task_pid_nr(current), id);
3815
3816 /* Now it's all set up, let userspace reach it */
3817 kvm_get_kvm(kvm);
3818 r = create_vcpu_fd(vcpu);
3819 if (r < 0) {
3820 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
3821 kvm_put_kvm_no_destroy(kvm);
3822 goto unlock_vcpu_destroy;
3823 }
3824
3825 /*
3826 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
3827 * pointer before kvm->online_vcpu's incremented value.
3828 */
3829 smp_wmb();
3830 atomic_inc(&kvm->online_vcpus);
3831
3832 mutex_unlock(&kvm->lock);
3833 kvm_arch_vcpu_postcreate(vcpu);
3834 kvm_create_vcpu_debugfs(vcpu);
3835 return r;
3836
3837unlock_vcpu_destroy:
3838 mutex_unlock(&kvm->lock);
3839 kvm_dirty_ring_free(&vcpu->dirty_ring);
3840arch_vcpu_destroy:
3841 kvm_arch_vcpu_destroy(vcpu);
3842vcpu_free_run_page:
3843 free_page((unsigned long)vcpu->run);
3844vcpu_free:
3845 kmem_cache_free(kvm_vcpu_cache, vcpu);
3846vcpu_decrement:
3847 mutex_lock(&kvm->lock);
3848 kvm->created_vcpus--;
3849 mutex_unlock(&kvm->lock);
3850 return r;
3851}
3852
3853static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3854{
3855 if (sigset) {
3856 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3857 vcpu->sigset_active = 1;
3858 vcpu->sigset = *sigset;
3859 } else
3860 vcpu->sigset_active = 0;
3861 return 0;
3862}
3863
3864static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
3865 size_t size, loff_t *offset)
3866{
3867 struct kvm_vcpu *vcpu = file->private_data;
3868
3869 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
3870 &kvm_vcpu_stats_desc[0], &vcpu->stat,
3871 sizeof(vcpu->stat), user_buffer, size, offset);
3872}
3873
3874static const struct file_operations kvm_vcpu_stats_fops = {
3875 .read = kvm_vcpu_stats_read,
3876 .llseek = noop_llseek,
3877};
3878
3879static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
3880{
3881 int fd;
3882 struct file *file;
3883 char name[15 + ITOA_MAX_LEN + 1];
3884
3885 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
3886
3887 fd = get_unused_fd_flags(O_CLOEXEC);
3888 if (fd < 0)
3889 return fd;
3890
3891 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
3892 if (IS_ERR(file)) {
3893 put_unused_fd(fd);
3894 return PTR_ERR(file);
3895 }
3896 file->f_mode |= FMODE_PREAD;
3897 fd_install(fd, file);
3898
3899 return fd;
3900}
3901
3902static long kvm_vcpu_ioctl(struct file *filp,
3903 unsigned int ioctl, unsigned long arg)
3904{
3905 struct kvm_vcpu *vcpu = filp->private_data;
3906 void __user *argp = (void __user *)arg;
3907 int r;
3908 struct kvm_fpu *fpu = NULL;
3909 struct kvm_sregs *kvm_sregs = NULL;
3910
3911 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
3912 return -EIO;
3913
3914 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3915 return -EINVAL;
3916
3917 /*
3918 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3919 * execution; mutex_lock() would break them.
3920 */
3921 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3922 if (r != -ENOIOCTLCMD)
3923 return r;
3924
3925 if (mutex_lock_killable(&vcpu->mutex))
3926 return -EINTR;
3927 switch (ioctl) {
3928 case KVM_RUN: {
3929 struct pid *oldpid;
3930 r = -EINVAL;
3931 if (arg)
3932 goto out;
3933 oldpid = rcu_access_pointer(vcpu->pid);
3934 if (unlikely(oldpid != task_pid(current))) {
3935 /* The thread running this VCPU changed. */
3936 struct pid *newpid;
3937
3938 r = kvm_arch_vcpu_run_pid_change(vcpu);
3939 if (r)
3940 break;
3941
3942 newpid = get_task_pid(current, PIDTYPE_PID);
3943 rcu_assign_pointer(vcpu->pid, newpid);
3944 if (oldpid)
3945 synchronize_rcu();
3946 put_pid(oldpid);
3947 }
3948 r = kvm_arch_vcpu_ioctl_run(vcpu);
3949 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
3950 break;
3951 }
3952 case KVM_GET_REGS: {
3953 struct kvm_regs *kvm_regs;
3954
3955 r = -ENOMEM;
3956 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3957 if (!kvm_regs)
3958 goto out;
3959 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3960 if (r)
3961 goto out_free1;
3962 r = -EFAULT;
3963 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3964 goto out_free1;
3965 r = 0;
3966out_free1:
3967 kfree(kvm_regs);
3968 break;
3969 }
3970 case KVM_SET_REGS: {
3971 struct kvm_regs *kvm_regs;
3972
3973 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3974 if (IS_ERR(kvm_regs)) {
3975 r = PTR_ERR(kvm_regs);
3976 goto out;
3977 }
3978 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3979 kfree(kvm_regs);
3980 break;
3981 }
3982 case KVM_GET_SREGS: {
3983 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3984 GFP_KERNEL_ACCOUNT);
3985 r = -ENOMEM;
3986 if (!kvm_sregs)
3987 goto out;
3988 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
3989 if (r)
3990 goto out;
3991 r = -EFAULT;
3992 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
3993 goto out;
3994 r = 0;
3995 break;
3996 }
3997 case KVM_SET_SREGS: {
3998 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3999 if (IS_ERR(kvm_sregs)) {
4000 r = PTR_ERR(kvm_sregs);
4001 kvm_sregs = NULL;
4002 goto out;
4003 }
4004 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4005 break;
4006 }
4007 case KVM_GET_MP_STATE: {
4008 struct kvm_mp_state mp_state;
4009
4010 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4011 if (r)
4012 goto out;
4013 r = -EFAULT;
4014 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4015 goto out;
4016 r = 0;
4017 break;
4018 }
4019 case KVM_SET_MP_STATE: {
4020 struct kvm_mp_state mp_state;
4021
4022 r = -EFAULT;
4023 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4024 goto out;
4025 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4026 break;
4027 }
4028 case KVM_TRANSLATE: {
4029 struct kvm_translation tr;
4030
4031 r = -EFAULT;
4032 if (copy_from_user(&tr, argp, sizeof(tr)))
4033 goto out;
4034 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4035 if (r)
4036 goto out;
4037 r = -EFAULT;
4038 if (copy_to_user(argp, &tr, sizeof(tr)))
4039 goto out;
4040 r = 0;
4041 break;
4042 }
4043 case KVM_SET_GUEST_DEBUG: {
4044 struct kvm_guest_debug dbg;
4045
4046 r = -EFAULT;
4047 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4048 goto out;
4049 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4050 break;
4051 }
4052 case KVM_SET_SIGNAL_MASK: {
4053 struct kvm_signal_mask __user *sigmask_arg = argp;
4054 struct kvm_signal_mask kvm_sigmask;
4055 sigset_t sigset, *p;
4056
4057 p = NULL;
4058 if (argp) {
4059 r = -EFAULT;
4060 if (copy_from_user(&kvm_sigmask, argp,
4061 sizeof(kvm_sigmask)))
4062 goto out;
4063 r = -EINVAL;
4064 if (kvm_sigmask.len != sizeof(sigset))
4065 goto out;
4066 r = -EFAULT;
4067 if (copy_from_user(&sigset, sigmask_arg->sigset,
4068 sizeof(sigset)))
4069 goto out;
4070 p = &sigset;
4071 }
4072 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4073 break;
4074 }
4075 case KVM_GET_FPU: {
4076 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4077 r = -ENOMEM;
4078 if (!fpu)
4079 goto out;
4080 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4081 if (r)
4082 goto out;
4083 r = -EFAULT;
4084 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4085 goto out;
4086 r = 0;
4087 break;
4088 }
4089 case KVM_SET_FPU: {
4090 fpu = memdup_user(argp, sizeof(*fpu));
4091 if (IS_ERR(fpu)) {
4092 r = PTR_ERR(fpu);
4093 fpu = NULL;
4094 goto out;
4095 }
4096 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4097 break;
4098 }
4099 case KVM_GET_STATS_FD: {
4100 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4101 break;
4102 }
4103 default:
4104 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4105 }
4106out:
4107 mutex_unlock(&vcpu->mutex);
4108 kfree(fpu);
4109 kfree(kvm_sregs);
4110 return r;
4111}
4112
4113#ifdef CONFIG_KVM_COMPAT
4114static long kvm_vcpu_compat_ioctl(struct file *filp,
4115 unsigned int ioctl, unsigned long arg)
4116{
4117 struct kvm_vcpu *vcpu = filp->private_data;
4118 void __user *argp = compat_ptr(arg);
4119 int r;
4120
4121 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4122 return -EIO;
4123
4124 switch (ioctl) {
4125 case KVM_SET_SIGNAL_MASK: {
4126 struct kvm_signal_mask __user *sigmask_arg = argp;
4127 struct kvm_signal_mask kvm_sigmask;
4128 sigset_t sigset;
4129
4130 if (argp) {
4131 r = -EFAULT;
4132 if (copy_from_user(&kvm_sigmask, argp,
4133 sizeof(kvm_sigmask)))
4134 goto out;
4135 r = -EINVAL;
4136 if (kvm_sigmask.len != sizeof(compat_sigset_t))
4137 goto out;
4138 r = -EFAULT;
4139 if (get_compat_sigset(&sigset,
4140 (compat_sigset_t __user *)sigmask_arg->sigset))
4141 goto out;
4142 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4143 } else
4144 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4145 break;
4146 }
4147 default:
4148 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4149 }
4150
4151out:
4152 return r;
4153}
4154#endif
4155
4156static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4157{
4158 struct kvm_device *dev = filp->private_data;
4159
4160 if (dev->ops->mmap)
4161 return dev->ops->mmap(dev, vma);
4162
4163 return -ENODEV;
4164}
4165
4166static int kvm_device_ioctl_attr(struct kvm_device *dev,
4167 int (*accessor)(struct kvm_device *dev,
4168 struct kvm_device_attr *attr),
4169 unsigned long arg)
4170{
4171 struct kvm_device_attr attr;
4172
4173 if (!accessor)
4174 return -EPERM;
4175
4176 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4177 return -EFAULT;
4178
4179 return accessor(dev, &attr);
4180}
4181
4182static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4183 unsigned long arg)
4184{
4185 struct kvm_device *dev = filp->private_data;
4186
4187 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4188 return -EIO;
4189
4190 switch (ioctl) {
4191 case KVM_SET_DEVICE_ATTR:
4192 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4193 case KVM_GET_DEVICE_ATTR:
4194 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4195 case KVM_HAS_DEVICE_ATTR:
4196 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4197 default:
4198 if (dev->ops->ioctl)
4199 return dev->ops->ioctl(dev, ioctl, arg);
4200
4201 return -ENOTTY;
4202 }
4203}
4204
4205static int kvm_device_release(struct inode *inode, struct file *filp)
4206{
4207 struct kvm_device *dev = filp->private_data;
4208 struct kvm *kvm = dev->kvm;
4209
4210 if (dev->ops->release) {
4211 mutex_lock(&kvm->lock);
4212 list_del(&dev->vm_node);
4213 dev->ops->release(dev);
4214 mutex_unlock(&kvm->lock);
4215 }
4216
4217 kvm_put_kvm(kvm);
4218 return 0;
4219}
4220
4221static const struct file_operations kvm_device_fops = {
4222 .unlocked_ioctl = kvm_device_ioctl,
4223 .release = kvm_device_release,
4224 KVM_COMPAT(kvm_device_ioctl),
4225 .mmap = kvm_device_mmap,
4226};
4227
4228struct kvm_device *kvm_device_from_filp(struct file *filp)
4229{
4230 if (filp->f_op != &kvm_device_fops)
4231 return NULL;
4232
4233 return filp->private_data;
4234}
4235
4236static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4237#ifdef CONFIG_KVM_MPIC
4238 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4239 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4240#endif
4241};
4242
4243int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4244{
4245 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4246 return -ENOSPC;
4247
4248 if (kvm_device_ops_table[type] != NULL)
4249 return -EEXIST;
4250
4251 kvm_device_ops_table[type] = ops;
4252 return 0;
4253}
4254
4255void kvm_unregister_device_ops(u32 type)
4256{
4257 if (kvm_device_ops_table[type] != NULL)
4258 kvm_device_ops_table[type] = NULL;
4259}
4260
4261static int kvm_ioctl_create_device(struct kvm *kvm,
4262 struct kvm_create_device *cd)
4263{
4264 const struct kvm_device_ops *ops = NULL;
4265 struct kvm_device *dev;
4266 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4267 int type;
4268 int ret;
4269
4270 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4271 return -ENODEV;
4272
4273 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4274 ops = kvm_device_ops_table[type];
4275 if (ops == NULL)
4276 return -ENODEV;
4277
4278 if (test)
4279 return 0;
4280
4281 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4282 if (!dev)
4283 return -ENOMEM;
4284
4285 dev->ops = ops;
4286 dev->kvm = kvm;
4287
4288 mutex_lock(&kvm->lock);
4289 ret = ops->create(dev, type);
4290 if (ret < 0) {
4291 mutex_unlock(&kvm->lock);
4292 kfree(dev);
4293 return ret;
4294 }
4295 list_add(&dev->vm_node, &kvm->devices);
4296 mutex_unlock(&kvm->lock);
4297
4298 if (ops->init)
4299 ops->init(dev);
4300
4301 kvm_get_kvm(kvm);
4302 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4303 if (ret < 0) {
4304 kvm_put_kvm_no_destroy(kvm);
4305 mutex_lock(&kvm->lock);
4306 list_del(&dev->vm_node);
4307 if (ops->release)
4308 ops->release(dev);
4309 mutex_unlock(&kvm->lock);
4310 if (ops->destroy)
4311 ops->destroy(dev);
4312 return ret;
4313 }
4314
4315 cd->fd = ret;
4316 return 0;
4317}
4318
4319static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4320{
4321 switch (arg) {
4322 case KVM_CAP_USER_MEMORY:
4323 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4324 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4325 case KVM_CAP_INTERNAL_ERROR_DATA:
4326#ifdef CONFIG_HAVE_KVM_MSI
4327 case KVM_CAP_SIGNAL_MSI:
4328#endif
4329#ifdef CONFIG_HAVE_KVM_IRQFD
4330 case KVM_CAP_IRQFD:
4331 case KVM_CAP_IRQFD_RESAMPLE:
4332#endif
4333 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4334 case KVM_CAP_CHECK_EXTENSION_VM:
4335 case KVM_CAP_ENABLE_CAP_VM:
4336 case KVM_CAP_HALT_POLL:
4337 return 1;
4338#ifdef CONFIG_KVM_MMIO
4339 case KVM_CAP_COALESCED_MMIO:
4340 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4341 case KVM_CAP_COALESCED_PIO:
4342 return 1;
4343#endif
4344#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4345 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4346 return KVM_DIRTY_LOG_MANUAL_CAPS;
4347#endif
4348#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4349 case KVM_CAP_IRQ_ROUTING:
4350 return KVM_MAX_IRQ_ROUTES;
4351#endif
4352#if KVM_ADDRESS_SPACE_NUM > 1
4353 case KVM_CAP_MULTI_ADDRESS_SPACE:
4354 return KVM_ADDRESS_SPACE_NUM;
4355#endif
4356 case KVM_CAP_NR_MEMSLOTS:
4357 return KVM_USER_MEM_SLOTS;
4358 case KVM_CAP_DIRTY_LOG_RING:
4359#ifdef CONFIG_HAVE_KVM_DIRTY_RING
4360 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4361#else
4362 return 0;
4363#endif
4364 case KVM_CAP_BINARY_STATS_FD:
4365 case KVM_CAP_SYSTEM_EVENT_DATA:
4366 return 1;
4367 default:
4368 break;
4369 }
4370 return kvm_vm_ioctl_check_extension(kvm, arg);
4371}
4372
4373static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4374{
4375 int r;
4376
4377 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4378 return -EINVAL;
4379
4380 /* the size should be power of 2 */
4381 if (!size || (size & (size - 1)))
4382 return -EINVAL;
4383
4384 /* Should be bigger to keep the reserved entries, or a page */
4385 if (size < kvm_dirty_ring_get_rsvd_entries() *
4386 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4387 return -EINVAL;
4388
4389 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4390 sizeof(struct kvm_dirty_gfn))
4391 return -E2BIG;
4392
4393 /* We only allow it to set once */
4394 if (kvm->dirty_ring_size)
4395 return -EINVAL;
4396
4397 mutex_lock(&kvm->lock);
4398
4399 if (kvm->created_vcpus) {
4400 /* We don't allow to change this value after vcpu created */
4401 r = -EINVAL;
4402 } else {
4403 kvm->dirty_ring_size = size;
4404 r = 0;
4405 }
4406
4407 mutex_unlock(&kvm->lock);
4408 return r;
4409}
4410
4411static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4412{
4413 unsigned long i;
4414 struct kvm_vcpu *vcpu;
4415 int cleared = 0;
4416
4417 if (!kvm->dirty_ring_size)
4418 return -EINVAL;
4419
4420 mutex_lock(&kvm->slots_lock);
4421
4422 kvm_for_each_vcpu(i, vcpu, kvm)
4423 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4424
4425 mutex_unlock(&kvm->slots_lock);
4426
4427 if (cleared)
4428 kvm_flush_remote_tlbs(kvm);
4429
4430 return cleared;
4431}
4432
4433int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4434 struct kvm_enable_cap *cap)
4435{
4436 return -EINVAL;
4437}
4438
4439static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4440 struct kvm_enable_cap *cap)
4441{
4442 switch (cap->cap) {
4443#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4444 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4445 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4446
4447 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4448 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4449
4450 if (cap->flags || (cap->args[0] & ~allowed_options))
4451 return -EINVAL;
4452 kvm->manual_dirty_log_protect = cap->args[0];
4453 return 0;
4454 }
4455#endif
4456 case KVM_CAP_HALT_POLL: {
4457 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4458 return -EINVAL;
4459
4460 kvm->max_halt_poll_ns = cap->args[0];
4461 return 0;
4462 }
4463 case KVM_CAP_DIRTY_LOG_RING:
4464 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
4465 default:
4466 return kvm_vm_ioctl_enable_cap(kvm, cap);
4467 }
4468}
4469
4470static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4471 size_t size, loff_t *offset)
4472{
4473 struct kvm *kvm = file->private_data;
4474
4475 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4476 &kvm_vm_stats_desc[0], &kvm->stat,
4477 sizeof(kvm->stat), user_buffer, size, offset);
4478}
4479
4480static const struct file_operations kvm_vm_stats_fops = {
4481 .read = kvm_vm_stats_read,
4482 .llseek = noop_llseek,
4483};
4484
4485static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
4486{
4487 int fd;
4488 struct file *file;
4489
4490 fd = get_unused_fd_flags(O_CLOEXEC);
4491 if (fd < 0)
4492 return fd;
4493
4494 file = anon_inode_getfile("kvm-vm-stats",
4495 &kvm_vm_stats_fops, kvm, O_RDONLY);
4496 if (IS_ERR(file)) {
4497 put_unused_fd(fd);
4498 return PTR_ERR(file);
4499 }
4500 file->f_mode |= FMODE_PREAD;
4501 fd_install(fd, file);
4502
4503 return fd;
4504}
4505
4506static long kvm_vm_ioctl(struct file *filp,
4507 unsigned int ioctl, unsigned long arg)
4508{
4509 struct kvm *kvm = filp->private_data;
4510 void __user *argp = (void __user *)arg;
4511 int r;
4512
4513 if (kvm->mm != current->mm || kvm->vm_dead)
4514 return -EIO;
4515 switch (ioctl) {
4516 case KVM_CREATE_VCPU:
4517 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
4518 break;
4519 case KVM_ENABLE_CAP: {
4520 struct kvm_enable_cap cap;
4521
4522 r = -EFAULT;
4523 if (copy_from_user(&cap, argp, sizeof(cap)))
4524 goto out;
4525 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
4526 break;
4527 }
4528 case KVM_SET_USER_MEMORY_REGION: {
4529 struct kvm_userspace_memory_region kvm_userspace_mem;
4530
4531 r = -EFAULT;
4532 if (copy_from_user(&kvm_userspace_mem, argp,
4533 sizeof(kvm_userspace_mem)))
4534 goto out;
4535
4536 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
4537 break;
4538 }
4539 case KVM_GET_DIRTY_LOG: {
4540 struct kvm_dirty_log log;
4541
4542 r = -EFAULT;
4543 if (copy_from_user(&log, argp, sizeof(log)))
4544 goto out;
4545 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4546 break;
4547 }
4548#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4549 case KVM_CLEAR_DIRTY_LOG: {
4550 struct kvm_clear_dirty_log log;
4551
4552 r = -EFAULT;
4553 if (copy_from_user(&log, argp, sizeof(log)))
4554 goto out;
4555 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4556 break;
4557 }
4558#endif
4559#ifdef CONFIG_KVM_MMIO
4560 case KVM_REGISTER_COALESCED_MMIO: {
4561 struct kvm_coalesced_mmio_zone zone;
4562
4563 r = -EFAULT;
4564 if (copy_from_user(&zone, argp, sizeof(zone)))
4565 goto out;
4566 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
4567 break;
4568 }
4569 case KVM_UNREGISTER_COALESCED_MMIO: {
4570 struct kvm_coalesced_mmio_zone zone;
4571
4572 r = -EFAULT;
4573 if (copy_from_user(&zone, argp, sizeof(zone)))
4574 goto out;
4575 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
4576 break;
4577 }
4578#endif
4579 case KVM_IRQFD: {
4580 struct kvm_irqfd data;
4581
4582 r = -EFAULT;
4583 if (copy_from_user(&data, argp, sizeof(data)))
4584 goto out;
4585 r = kvm_irqfd(kvm, &data);
4586 break;
4587 }
4588 case KVM_IOEVENTFD: {
4589 struct kvm_ioeventfd data;
4590
4591 r = -EFAULT;
4592 if (copy_from_user(&data, argp, sizeof(data)))
4593 goto out;
4594 r = kvm_ioeventfd(kvm, &data);
4595 break;
4596 }
4597#ifdef CONFIG_HAVE_KVM_MSI
4598 case KVM_SIGNAL_MSI: {
4599 struct kvm_msi msi;
4600
4601 r = -EFAULT;
4602 if (copy_from_user(&msi, argp, sizeof(msi)))
4603 goto out;
4604 r = kvm_send_userspace_msi(kvm, &msi);
4605 break;
4606 }
4607#endif
4608#ifdef __KVM_HAVE_IRQ_LINE
4609 case KVM_IRQ_LINE_STATUS:
4610 case KVM_IRQ_LINE: {
4611 struct kvm_irq_level irq_event;
4612
4613 r = -EFAULT;
4614 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
4615 goto out;
4616
4617 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4618 ioctl == KVM_IRQ_LINE_STATUS);
4619 if (r)
4620 goto out;
4621
4622 r = -EFAULT;
4623 if (ioctl == KVM_IRQ_LINE_STATUS) {
4624 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
4625 goto out;
4626 }
4627
4628 r = 0;
4629 break;
4630 }
4631#endif
4632#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4633 case KVM_SET_GSI_ROUTING: {
4634 struct kvm_irq_routing routing;
4635 struct kvm_irq_routing __user *urouting;
4636 struct kvm_irq_routing_entry *entries = NULL;
4637
4638 r = -EFAULT;
4639 if (copy_from_user(&routing, argp, sizeof(routing)))
4640 goto out;
4641 r = -EINVAL;
4642 if (!kvm_arch_can_set_irq_routing(kvm))
4643 goto out;
4644 if (routing.nr > KVM_MAX_IRQ_ROUTES)
4645 goto out;
4646 if (routing.flags)
4647 goto out;
4648 if (routing.nr) {
4649 urouting = argp;
4650 entries = vmemdup_user(urouting->entries,
4651 array_size(sizeof(*entries),
4652 routing.nr));
4653 if (IS_ERR(entries)) {
4654 r = PTR_ERR(entries);
4655 goto out;
4656 }
4657 }
4658 r = kvm_set_irq_routing(kvm, entries, routing.nr,
4659 routing.flags);
4660 kvfree(entries);
4661 break;
4662 }
4663#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
4664 case KVM_CREATE_DEVICE: {
4665 struct kvm_create_device cd;
4666
4667 r = -EFAULT;
4668 if (copy_from_user(&cd, argp, sizeof(cd)))
4669 goto out;
4670
4671 r = kvm_ioctl_create_device(kvm, &cd);
4672 if (r)
4673 goto out;
4674
4675 r = -EFAULT;
4676 if (copy_to_user(argp, &cd, sizeof(cd)))
4677 goto out;
4678
4679 r = 0;
4680 break;
4681 }
4682 case KVM_CHECK_EXTENSION:
4683 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4684 break;
4685 case KVM_RESET_DIRTY_RINGS:
4686 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4687 break;
4688 case KVM_GET_STATS_FD:
4689 r = kvm_vm_ioctl_get_stats_fd(kvm);
4690 break;
4691 default:
4692 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4693 }
4694out:
4695 return r;
4696}
4697
4698#ifdef CONFIG_KVM_COMPAT
4699struct compat_kvm_dirty_log {
4700 __u32 slot;
4701 __u32 padding1;
4702 union {
4703 compat_uptr_t dirty_bitmap; /* one bit per page */
4704 __u64 padding2;
4705 };
4706};
4707
4708struct compat_kvm_clear_dirty_log {
4709 __u32 slot;
4710 __u32 num_pages;
4711 __u64 first_page;
4712 union {
4713 compat_uptr_t dirty_bitmap; /* one bit per page */
4714 __u64 padding2;
4715 };
4716};
4717
4718static long kvm_vm_compat_ioctl(struct file *filp,
4719 unsigned int ioctl, unsigned long arg)
4720{
4721 struct kvm *kvm = filp->private_data;
4722 int r;
4723
4724 if (kvm->mm != current->mm || kvm->vm_dead)
4725 return -EIO;
4726 switch (ioctl) {
4727#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4728 case KVM_CLEAR_DIRTY_LOG: {
4729 struct compat_kvm_clear_dirty_log compat_log;
4730 struct kvm_clear_dirty_log log;
4731
4732 if (copy_from_user(&compat_log, (void __user *)arg,
4733 sizeof(compat_log)))
4734 return -EFAULT;
4735 log.slot = compat_log.slot;
4736 log.num_pages = compat_log.num_pages;
4737 log.first_page = compat_log.first_page;
4738 log.padding2 = compat_log.padding2;
4739 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4740
4741 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4742 break;
4743 }
4744#endif
4745 case KVM_GET_DIRTY_LOG: {
4746 struct compat_kvm_dirty_log compat_log;
4747 struct kvm_dirty_log log;
4748
4749 if (copy_from_user(&compat_log, (void __user *)arg,
4750 sizeof(compat_log)))
4751 return -EFAULT;
4752 log.slot = compat_log.slot;
4753 log.padding1 = compat_log.padding1;
4754 log.padding2 = compat_log.padding2;
4755 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4756
4757 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4758 break;
4759 }
4760 default:
4761 r = kvm_vm_ioctl(filp, ioctl, arg);
4762 }
4763 return r;
4764}
4765#endif
4766
4767static const struct file_operations kvm_vm_fops = {
4768 .release = kvm_vm_release,
4769 .unlocked_ioctl = kvm_vm_ioctl,
4770 .llseek = noop_llseek,
4771 KVM_COMPAT(kvm_vm_compat_ioctl),
4772};
4773
4774bool file_is_kvm(struct file *file)
4775{
4776 return file && file->f_op == &kvm_vm_fops;
4777}
4778EXPORT_SYMBOL_GPL(file_is_kvm);
4779
4780static int kvm_dev_ioctl_create_vm(unsigned long type)
4781{
4782 int r;
4783 struct kvm *kvm;
4784 struct file *file;
4785
4786 kvm = kvm_create_vm(type);
4787 if (IS_ERR(kvm))
4788 return PTR_ERR(kvm);
4789#ifdef CONFIG_KVM_MMIO
4790 r = kvm_coalesced_mmio_init(kvm);
4791 if (r < 0)
4792 goto put_kvm;
4793#endif
4794 r = get_unused_fd_flags(O_CLOEXEC);
4795 if (r < 0)
4796 goto put_kvm;
4797
4798 snprintf(kvm->stats_id, sizeof(kvm->stats_id),
4799 "kvm-%d", task_pid_nr(current));
4800
4801 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4802 if (IS_ERR(file)) {
4803 put_unused_fd(r);
4804 r = PTR_ERR(file);
4805 goto put_kvm;
4806 }
4807
4808 /*
4809 * Don't call kvm_put_kvm anymore at this point; file->f_op is
4810 * already set, with ->release() being kvm_vm_release(). In error
4811 * cases it will be called by the final fput(file) and will take
4812 * care of doing kvm_put_kvm(kvm).
4813 */
4814 if (kvm_create_vm_debugfs(kvm, r) < 0) {
4815 put_unused_fd(r);
4816 fput(file);
4817 return -ENOMEM;
4818 }
4819 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4820
4821 fd_install(r, file);
4822 return r;
4823
4824put_kvm:
4825 kvm_put_kvm(kvm);
4826 return r;
4827}
4828
4829static long kvm_dev_ioctl(struct file *filp,
4830 unsigned int ioctl, unsigned long arg)
4831{
4832 long r = -EINVAL;
4833
4834 switch (ioctl) {
4835 case KVM_GET_API_VERSION:
4836 if (arg)
4837 goto out;
4838 r = KVM_API_VERSION;
4839 break;
4840 case KVM_CREATE_VM:
4841 r = kvm_dev_ioctl_create_vm(arg);
4842 break;
4843 case KVM_CHECK_EXTENSION:
4844 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4845 break;
4846 case KVM_GET_VCPU_MMAP_SIZE:
4847 if (arg)
4848 goto out;
4849 r = PAGE_SIZE; /* struct kvm_run */
4850#ifdef CONFIG_X86
4851 r += PAGE_SIZE; /* pio data page */
4852#endif
4853#ifdef CONFIG_KVM_MMIO
4854 r += PAGE_SIZE; /* coalesced mmio ring page */
4855#endif
4856 break;
4857 case KVM_TRACE_ENABLE:
4858 case KVM_TRACE_PAUSE:
4859 case KVM_TRACE_DISABLE:
4860 r = -EOPNOTSUPP;
4861 break;
4862 default:
4863 return kvm_arch_dev_ioctl(filp, ioctl, arg);
4864 }
4865out:
4866 return r;
4867}
4868
4869static struct file_operations kvm_chardev_ops = {
4870 .unlocked_ioctl = kvm_dev_ioctl,
4871 .llseek = noop_llseek,
4872 KVM_COMPAT(kvm_dev_ioctl),
4873};
4874
4875static struct miscdevice kvm_dev = {
4876 KVM_MINOR,
4877 "kvm",
4878 &kvm_chardev_ops,
4879};
4880
4881static void hardware_enable_nolock(void *junk)
4882{
4883 int cpu = raw_smp_processor_id();
4884 int r;
4885
4886 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
4887 return;
4888
4889 cpumask_set_cpu(cpu, cpus_hardware_enabled);
4890
4891 r = kvm_arch_hardware_enable();
4892
4893 if (r) {
4894 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4895 atomic_inc(&hardware_enable_failed);
4896 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
4897 }
4898}
4899
4900static int kvm_starting_cpu(unsigned int cpu)
4901{
4902 raw_spin_lock(&kvm_count_lock);
4903 if (kvm_usage_count)
4904 hardware_enable_nolock(NULL);
4905 raw_spin_unlock(&kvm_count_lock);
4906 return 0;
4907}
4908
4909static void hardware_disable_nolock(void *junk)
4910{
4911 int cpu = raw_smp_processor_id();
4912
4913 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
4914 return;
4915 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4916 kvm_arch_hardware_disable();
4917}
4918
4919static int kvm_dying_cpu(unsigned int cpu)
4920{
4921 raw_spin_lock(&kvm_count_lock);
4922 if (kvm_usage_count)
4923 hardware_disable_nolock(NULL);
4924 raw_spin_unlock(&kvm_count_lock);
4925 return 0;
4926}
4927
4928static void hardware_disable_all_nolock(void)
4929{
4930 BUG_ON(!kvm_usage_count);
4931
4932 kvm_usage_count--;
4933 if (!kvm_usage_count)
4934 on_each_cpu(hardware_disable_nolock, NULL, 1);
4935}
4936
4937static void hardware_disable_all(void)
4938{
4939 raw_spin_lock(&kvm_count_lock);
4940 hardware_disable_all_nolock();
4941 raw_spin_unlock(&kvm_count_lock);
4942}
4943
4944static int hardware_enable_all(void)
4945{
4946 int r = 0;
4947
4948 raw_spin_lock(&kvm_count_lock);
4949
4950 kvm_usage_count++;
4951 if (kvm_usage_count == 1) {
4952 atomic_set(&hardware_enable_failed, 0);
4953 on_each_cpu(hardware_enable_nolock, NULL, 1);
4954
4955 if (atomic_read(&hardware_enable_failed)) {
4956 hardware_disable_all_nolock();
4957 r = -EBUSY;
4958 }
4959 }
4960
4961 raw_spin_unlock(&kvm_count_lock);
4962
4963 return r;
4964}
4965
4966static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
4967 void *v)
4968{
4969 /*
4970 * Some (well, at least mine) BIOSes hang on reboot if
4971 * in vmx root mode.
4972 *
4973 * And Intel TXT required VMX off for all cpu when system shutdown.
4974 */
4975 pr_info("kvm: exiting hardware virtualization\n");
4976 kvm_rebooting = true;
4977 on_each_cpu(hardware_disable_nolock, NULL, 1);
4978 return NOTIFY_OK;
4979}
4980
4981static struct notifier_block kvm_reboot_notifier = {
4982 .notifier_call = kvm_reboot,
4983 .priority = 0,
4984};
4985
4986static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
4987{
4988 int i;
4989
4990 for (i = 0; i < bus->dev_count; i++) {
4991 struct kvm_io_device *pos = bus->range[i].dev;
4992
4993 kvm_iodevice_destructor(pos);
4994 }
4995 kfree(bus);
4996}
4997
4998static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
4999 const struct kvm_io_range *r2)
5000{
5001 gpa_t addr1 = r1->addr;
5002 gpa_t addr2 = r2->addr;
5003
5004 if (addr1 < addr2)
5005 return -1;
5006
5007 /* If r2->len == 0, match the exact address. If r2->len != 0,
5008 * accept any overlapping write. Any order is acceptable for
5009 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5010 * we process all of them.
5011 */
5012 if (r2->len) {
5013 addr1 += r1->len;
5014 addr2 += r2->len;
5015 }
5016
5017 if (addr1 > addr2)
5018 return 1;
5019
5020 return 0;
5021}
5022
5023static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5024{
5025 return kvm_io_bus_cmp(p1, p2);
5026}
5027
5028static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5029 gpa_t addr, int len)
5030{
5031 struct kvm_io_range *range, key;
5032 int off;
5033
5034 key = (struct kvm_io_range) {
5035 .addr = addr,
5036 .len = len,
5037 };
5038
5039 range = bsearch(&key, bus->range, bus->dev_count,
5040 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5041 if (range == NULL)
5042 return -ENOENT;
5043
5044 off = range - bus->range;
5045
5046 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5047 off--;
5048
5049 return off;
5050}
5051
5052static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5053 struct kvm_io_range *range, const void *val)
5054{
5055 int idx;
5056
5057 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5058 if (idx < 0)
5059 return -EOPNOTSUPP;
5060
5061 while (idx < bus->dev_count &&
5062 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5063 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5064 range->len, val))
5065 return idx;
5066 idx++;
5067 }
5068
5069 return -EOPNOTSUPP;
5070}
5071
5072/* kvm_io_bus_write - called under kvm->slots_lock */
5073int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5074 int len, const void *val)
5075{
5076 struct kvm_io_bus *bus;
5077 struct kvm_io_range range;
5078 int r;
5079
5080 range = (struct kvm_io_range) {
5081 .addr = addr,
5082 .len = len,
5083 };
5084
5085 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5086 if (!bus)
5087 return -ENOMEM;
5088 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5089 return r < 0 ? r : 0;
5090}
5091EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5092
5093/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5094int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5095 gpa_t addr, int len, const void *val, long cookie)
5096{
5097 struct kvm_io_bus *bus;
5098 struct kvm_io_range range;
5099
5100 range = (struct kvm_io_range) {
5101 .addr = addr,
5102 .len = len,
5103 };
5104
5105 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5106 if (!bus)
5107 return -ENOMEM;
5108
5109 /* First try the device referenced by cookie. */
5110 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5111 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5112 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5113 val))
5114 return cookie;
5115
5116 /*
5117 * cookie contained garbage; fall back to search and return the
5118 * correct cookie value.
5119 */
5120 return __kvm_io_bus_write(vcpu, bus, &range, val);
5121}
5122
5123static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5124 struct kvm_io_range *range, void *val)
5125{
5126 int idx;
5127
5128 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5129 if (idx < 0)
5130 return -EOPNOTSUPP;
5131
5132 while (idx < bus->dev_count &&
5133 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5134 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5135 range->len, val))
5136 return idx;
5137 idx++;
5138 }
5139
5140 return -EOPNOTSUPP;
5141}
5142
5143/* kvm_io_bus_read - called under kvm->slots_lock */
5144int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5145 int len, void *val)
5146{
5147 struct kvm_io_bus *bus;
5148 struct kvm_io_range range;
5149 int r;
5150
5151 range = (struct kvm_io_range) {
5152 .addr = addr,
5153 .len = len,
5154 };
5155
5156 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5157 if (!bus)
5158 return -ENOMEM;
5159 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5160 return r < 0 ? r : 0;
5161}
5162
5163/* Caller must hold slots_lock. */
5164int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5165 int len, struct kvm_io_device *dev)
5166{
5167 int i;
5168 struct kvm_io_bus *new_bus, *bus;
5169 struct kvm_io_range range;
5170
5171 bus = kvm_get_bus(kvm, bus_idx);
5172 if (!bus)
5173 return -ENOMEM;
5174
5175 /* exclude ioeventfd which is limited by maximum fd */
5176 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5177 return -ENOSPC;
5178
5179 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5180 GFP_KERNEL_ACCOUNT);
5181 if (!new_bus)
5182 return -ENOMEM;
5183
5184 range = (struct kvm_io_range) {
5185 .addr = addr,
5186 .len = len,
5187 .dev = dev,
5188 };
5189
5190 for (i = 0; i < bus->dev_count; i++)
5191 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5192 break;
5193
5194 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5195 new_bus->dev_count++;
5196 new_bus->range[i] = range;
5197 memcpy(new_bus->range + i + 1, bus->range + i,
5198 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5199 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5200 synchronize_srcu_expedited(&kvm->srcu);
5201 kfree(bus);
5202
5203 return 0;
5204}
5205
5206int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5207 struct kvm_io_device *dev)
5208{
5209 int i, j;
5210 struct kvm_io_bus *new_bus, *bus;
5211
5212 lockdep_assert_held(&kvm->slots_lock);
5213
5214 bus = kvm_get_bus(kvm, bus_idx);
5215 if (!bus)
5216 return 0;
5217
5218 for (i = 0; i < bus->dev_count; i++) {
5219 if (bus->range[i].dev == dev) {
5220 break;
5221 }
5222 }
5223
5224 if (i == bus->dev_count)
5225 return 0;
5226
5227 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5228 GFP_KERNEL_ACCOUNT);
5229 if (new_bus) {
5230 memcpy(new_bus, bus, struct_size(bus, range, i));
5231 new_bus->dev_count--;
5232 memcpy(new_bus->range + i, bus->range + i + 1,
5233 flex_array_size(new_bus, range, new_bus->dev_count - i));
5234 }
5235
5236 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5237 synchronize_srcu_expedited(&kvm->srcu);
5238
5239 /* Destroy the old bus _after_ installing the (null) bus. */
5240 if (!new_bus) {
5241 pr_err("kvm: failed to shrink bus, removing it completely\n");
5242 for (j = 0; j < bus->dev_count; j++) {
5243 if (j == i)
5244 continue;
5245 kvm_iodevice_destructor(bus->range[j].dev);
5246 }
5247 }
5248
5249 kfree(bus);
5250 return new_bus ? 0 : -ENOMEM;
5251}
5252
5253struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5254 gpa_t addr)
5255{
5256 struct kvm_io_bus *bus;
5257 int dev_idx, srcu_idx;
5258 struct kvm_io_device *iodev = NULL;
5259
5260 srcu_idx = srcu_read_lock(&kvm->srcu);
5261
5262 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5263 if (!bus)
5264 goto out_unlock;
5265
5266 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
5267 if (dev_idx < 0)
5268 goto out_unlock;
5269
5270 iodev = bus->range[dev_idx].dev;
5271
5272out_unlock:
5273 srcu_read_unlock(&kvm->srcu, srcu_idx);
5274
5275 return iodev;
5276}
5277EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
5278
5279static int kvm_debugfs_open(struct inode *inode, struct file *file,
5280 int (*get)(void *, u64 *), int (*set)(void *, u64),
5281 const char *fmt)
5282{
5283 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5284 inode->i_private;
5285
5286 /*
5287 * The debugfs files are a reference to the kvm struct which
5288 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
5289 * avoids the race between open and the removal of the debugfs directory.
5290 */
5291 if (!kvm_get_kvm_safe(stat_data->kvm))
5292 return -ENOENT;
5293
5294 if (simple_attr_open(inode, file, get,
5295 kvm_stats_debugfs_mode(stat_data->desc) & 0222
5296 ? set : NULL,
5297 fmt)) {
5298 kvm_put_kvm(stat_data->kvm);
5299 return -ENOMEM;
5300 }
5301
5302 return 0;
5303}
5304
5305static int kvm_debugfs_release(struct inode *inode, struct file *file)
5306{
5307 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5308 inode->i_private;
5309
5310 simple_attr_release(inode, file);
5311 kvm_put_kvm(stat_data->kvm);
5312
5313 return 0;
5314}
5315
5316static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
5317{
5318 *val = *(u64 *)((void *)(&kvm->stat) + offset);
5319
5320 return 0;
5321}
5322
5323static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
5324{
5325 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
5326
5327 return 0;
5328}
5329
5330static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
5331{
5332 unsigned long i;
5333 struct kvm_vcpu *vcpu;
5334
5335 *val = 0;
5336
5337 kvm_for_each_vcpu(i, vcpu, kvm)
5338 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
5339
5340 return 0;
5341}
5342
5343static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
5344{
5345 unsigned long i;
5346 struct kvm_vcpu *vcpu;
5347
5348 kvm_for_each_vcpu(i, vcpu, kvm)
5349 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
5350
5351 return 0;
5352}
5353
5354static int kvm_stat_data_get(void *data, u64 *val)
5355{
5356 int r = -EFAULT;
5357 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5358
5359 switch (stat_data->kind) {
5360 case KVM_STAT_VM:
5361 r = kvm_get_stat_per_vm(stat_data->kvm,
5362 stat_data->desc->desc.offset, val);
5363 break;
5364 case KVM_STAT_VCPU:
5365 r = kvm_get_stat_per_vcpu(stat_data->kvm,
5366 stat_data->desc->desc.offset, val);
5367 break;
5368 }
5369
5370 return r;
5371}
5372
5373static int kvm_stat_data_clear(void *data, u64 val)
5374{
5375 int r = -EFAULT;
5376 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5377
5378 if (val)
5379 return -EINVAL;
5380
5381 switch (stat_data->kind) {
5382 case KVM_STAT_VM:
5383 r = kvm_clear_stat_per_vm(stat_data->kvm,
5384 stat_data->desc->desc.offset);
5385 break;
5386 case KVM_STAT_VCPU:
5387 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
5388 stat_data->desc->desc.offset);
5389 break;
5390 }
5391
5392 return r;
5393}
5394
5395static int kvm_stat_data_open(struct inode *inode, struct file *file)
5396{
5397 __simple_attr_check_format("%llu\n", 0ull);
5398 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
5399 kvm_stat_data_clear, "%llu\n");
5400}
5401
5402static const struct file_operations stat_fops_per_vm = {
5403 .owner = THIS_MODULE,
5404 .open = kvm_stat_data_open,
5405 .release = kvm_debugfs_release,
5406 .read = simple_attr_read,
5407 .write = simple_attr_write,
5408 .llseek = no_llseek,
5409};
5410
5411static int vm_stat_get(void *_offset, u64 *val)
5412{
5413 unsigned offset = (long)_offset;
5414 struct kvm *kvm;
5415 u64 tmp_val;
5416
5417 *val = 0;
5418 mutex_lock(&kvm_lock);
5419 list_for_each_entry(kvm, &vm_list, vm_list) {
5420 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
5421 *val += tmp_val;
5422 }
5423 mutex_unlock(&kvm_lock);
5424 return 0;
5425}
5426
5427static int vm_stat_clear(void *_offset, u64 val)
5428{
5429 unsigned offset = (long)_offset;
5430 struct kvm *kvm;
5431
5432 if (val)
5433 return -EINVAL;
5434
5435 mutex_lock(&kvm_lock);
5436 list_for_each_entry(kvm, &vm_list, vm_list) {
5437 kvm_clear_stat_per_vm(kvm, offset);
5438 }
5439 mutex_unlock(&kvm_lock);
5440
5441 return 0;
5442}
5443
5444DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
5445DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
5446
5447static int vcpu_stat_get(void *_offset, u64 *val)
5448{
5449 unsigned offset = (long)_offset;
5450 struct kvm *kvm;
5451 u64 tmp_val;
5452
5453 *val = 0;
5454 mutex_lock(&kvm_lock);
5455 list_for_each_entry(kvm, &vm_list, vm_list) {
5456 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
5457 *val += tmp_val;
5458 }
5459 mutex_unlock(&kvm_lock);
5460 return 0;
5461}
5462
5463static int vcpu_stat_clear(void *_offset, u64 val)
5464{
5465 unsigned offset = (long)_offset;
5466 struct kvm *kvm;
5467
5468 if (val)
5469 return -EINVAL;
5470
5471 mutex_lock(&kvm_lock);
5472 list_for_each_entry(kvm, &vm_list, vm_list) {
5473 kvm_clear_stat_per_vcpu(kvm, offset);
5474 }
5475 mutex_unlock(&kvm_lock);
5476
5477 return 0;
5478}
5479
5480DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5481 "%llu\n");
5482DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
5483
5484static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5485{
5486 struct kobj_uevent_env *env;
5487 unsigned long long created, active;
5488
5489 if (!kvm_dev.this_device || !kvm)
5490 return;
5491
5492 mutex_lock(&kvm_lock);
5493 if (type == KVM_EVENT_CREATE_VM) {
5494 kvm_createvm_count++;
5495 kvm_active_vms++;
5496 } else if (type == KVM_EVENT_DESTROY_VM) {
5497 kvm_active_vms--;
5498 }
5499 created = kvm_createvm_count;
5500 active = kvm_active_vms;
5501 mutex_unlock(&kvm_lock);
5502
5503 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
5504 if (!env)
5505 return;
5506
5507 add_uevent_var(env, "CREATED=%llu", created);
5508 add_uevent_var(env, "COUNT=%llu", active);
5509
5510 if (type == KVM_EVENT_CREATE_VM) {
5511 add_uevent_var(env, "EVENT=create");
5512 kvm->userspace_pid = task_pid_nr(current);
5513 } else if (type == KVM_EVENT_DESTROY_VM) {
5514 add_uevent_var(env, "EVENT=destroy");
5515 }
5516 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
5517
5518 if (!IS_ERR(kvm->debugfs_dentry)) {
5519 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
5520
5521 if (p) {
5522 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5523 if (!IS_ERR(tmp))
5524 add_uevent_var(env, "STATS_PATH=%s", tmp);
5525 kfree(p);
5526 }
5527 }
5528 /* no need for checks, since we are adding at most only 5 keys */
5529 env->envp[env->envp_idx++] = NULL;
5530 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5531 kfree(env);
5532}
5533
5534static void kvm_init_debug(void)
5535{
5536 const struct file_operations *fops;
5537 const struct _kvm_stats_desc *pdesc;
5538 int i;
5539
5540 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
5541
5542 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
5543 pdesc = &kvm_vm_stats_desc[i];
5544 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5545 fops = &vm_stat_fops;
5546 else
5547 fops = &vm_stat_readonly_fops;
5548 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5549 kvm_debugfs_dir,
5550 (void *)(long)pdesc->desc.offset, fops);
5551 }
5552
5553 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
5554 pdesc = &kvm_vcpu_stats_desc[i];
5555 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5556 fops = &vcpu_stat_fops;
5557 else
5558 fops = &vcpu_stat_readonly_fops;
5559 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5560 kvm_debugfs_dir,
5561 (void *)(long)pdesc->desc.offset, fops);
5562 }
5563}
5564
5565static int kvm_suspend(void)
5566{
5567 if (kvm_usage_count)
5568 hardware_disable_nolock(NULL);
5569 return 0;
5570}
5571
5572static void kvm_resume(void)
5573{
5574 if (kvm_usage_count) {
5575 lockdep_assert_not_held(&kvm_count_lock);
5576 hardware_enable_nolock(NULL);
5577 }
5578}
5579
5580static struct syscore_ops kvm_syscore_ops = {
5581 .suspend = kvm_suspend,
5582 .resume = kvm_resume,
5583};
5584
5585static inline
5586struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
5587{
5588 return container_of(pn, struct kvm_vcpu, preempt_notifier);
5589}
5590
5591static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
5592{
5593 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5594
5595 WRITE_ONCE(vcpu->preempted, false);
5596 WRITE_ONCE(vcpu->ready, false);
5597
5598 __this_cpu_write(kvm_running_vcpu, vcpu);
5599 kvm_arch_sched_in(vcpu, cpu);
5600 kvm_arch_vcpu_load(vcpu, cpu);
5601}
5602
5603static void kvm_sched_out(struct preempt_notifier *pn,
5604 struct task_struct *next)
5605{
5606 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5607
5608 if (current->on_rq) {
5609 WRITE_ONCE(vcpu->preempted, true);
5610 WRITE_ONCE(vcpu->ready, true);
5611 }
5612 kvm_arch_vcpu_put(vcpu);
5613 __this_cpu_write(kvm_running_vcpu, NULL);
5614}
5615
5616/**
5617 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
5618 *
5619 * We can disable preemption locally around accessing the per-CPU variable,
5620 * and use the resolved vcpu pointer after enabling preemption again,
5621 * because even if the current thread is migrated to another CPU, reading
5622 * the per-CPU value later will give us the same value as we update the
5623 * per-CPU variable in the preempt notifier handlers.
5624 */
5625struct kvm_vcpu *kvm_get_running_vcpu(void)
5626{
5627 struct kvm_vcpu *vcpu;
5628
5629 preempt_disable();
5630 vcpu = __this_cpu_read(kvm_running_vcpu);
5631 preempt_enable();
5632
5633 return vcpu;
5634}
5635EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
5636
5637/**
5638 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
5639 */
5640struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
5641{
5642 return &kvm_running_vcpu;
5643}
5644
5645#ifdef CONFIG_GUEST_PERF_EVENTS
5646static unsigned int kvm_guest_state(void)
5647{
5648 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5649 unsigned int state;
5650
5651 if (!kvm_arch_pmi_in_guest(vcpu))
5652 return 0;
5653
5654 state = PERF_GUEST_ACTIVE;
5655 if (!kvm_arch_vcpu_in_kernel(vcpu))
5656 state |= PERF_GUEST_USER;
5657
5658 return state;
5659}
5660
5661static unsigned long kvm_guest_get_ip(void)
5662{
5663 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5664
5665 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
5666 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
5667 return 0;
5668
5669 return kvm_arch_vcpu_get_ip(vcpu);
5670}
5671
5672static struct perf_guest_info_callbacks kvm_guest_cbs = {
5673 .state = kvm_guest_state,
5674 .get_ip = kvm_guest_get_ip,
5675 .handle_intel_pt_intr = NULL,
5676};
5677
5678void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
5679{
5680 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
5681 perf_register_guest_info_callbacks(&kvm_guest_cbs);
5682}
5683void kvm_unregister_perf_callbacks(void)
5684{
5685 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5686}
5687#endif
5688
5689struct kvm_cpu_compat_check {
5690 void *opaque;
5691 int *ret;
5692};
5693
5694static void check_processor_compat(void *data)
5695{
5696 struct kvm_cpu_compat_check *c = data;
5697
5698 *c->ret = kvm_arch_check_processor_compat(c->opaque);
5699}
5700
5701int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
5702 struct module *module)
5703{
5704 struct kvm_cpu_compat_check c;
5705 int r;
5706 int cpu;
5707
5708 r = kvm_arch_init(opaque);
5709 if (r)
5710 goto out_fail;
5711
5712 /*
5713 * kvm_arch_init makes sure there's at most one caller
5714 * for architectures that support multiple implementations,
5715 * like intel and amd on x86.
5716 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
5717 * conflicts in case kvm is already setup for another implementation.
5718 */
5719 r = kvm_irqfd_init();
5720 if (r)
5721 goto out_irqfd;
5722
5723 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
5724 r = -ENOMEM;
5725 goto out_free_0;
5726 }
5727
5728 r = kvm_arch_hardware_setup(opaque);
5729 if (r < 0)
5730 goto out_free_1;
5731
5732 c.ret = &r;
5733 c.opaque = opaque;
5734 for_each_online_cpu(cpu) {
5735 smp_call_function_single(cpu, check_processor_compat, &c, 1);
5736 if (r < 0)
5737 goto out_free_2;
5738 }
5739
5740 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
5741 kvm_starting_cpu, kvm_dying_cpu);
5742 if (r)
5743 goto out_free_2;
5744 register_reboot_notifier(&kvm_reboot_notifier);
5745
5746 /* A kmem cache lets us meet the alignment requirements of fx_save. */
5747 if (!vcpu_align)
5748 vcpu_align = __alignof__(struct kvm_vcpu);
5749 kvm_vcpu_cache =
5750 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
5751 SLAB_ACCOUNT,
5752 offsetof(struct kvm_vcpu, arch),
5753 offsetofend(struct kvm_vcpu, stats_id)
5754 - offsetof(struct kvm_vcpu, arch),
5755 NULL);
5756 if (!kvm_vcpu_cache) {
5757 r = -ENOMEM;
5758 goto out_free_3;
5759 }
5760
5761 for_each_possible_cpu(cpu) {
5762 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
5763 GFP_KERNEL, cpu_to_node(cpu))) {
5764 r = -ENOMEM;
5765 goto out_free_4;
5766 }
5767 }
5768
5769 r = kvm_async_pf_init();
5770 if (r)
5771 goto out_free_5;
5772
5773 kvm_chardev_ops.owner = module;
5774
5775 r = misc_register(&kvm_dev);
5776 if (r) {
5777 pr_err("kvm: misc device register failed\n");
5778 goto out_unreg;
5779 }
5780
5781 register_syscore_ops(&kvm_syscore_ops);
5782
5783 kvm_preempt_ops.sched_in = kvm_sched_in;
5784 kvm_preempt_ops.sched_out = kvm_sched_out;
5785
5786 kvm_init_debug();
5787
5788 r = kvm_vfio_ops_init();
5789 WARN_ON(r);
5790
5791 return 0;
5792
5793out_unreg:
5794 kvm_async_pf_deinit();
5795out_free_5:
5796 for_each_possible_cpu(cpu)
5797 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5798out_free_4:
5799 kmem_cache_destroy(kvm_vcpu_cache);
5800out_free_3:
5801 unregister_reboot_notifier(&kvm_reboot_notifier);
5802 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5803out_free_2:
5804 kvm_arch_hardware_unsetup();
5805out_free_1:
5806 free_cpumask_var(cpus_hardware_enabled);
5807out_free_0:
5808 kvm_irqfd_exit();
5809out_irqfd:
5810 kvm_arch_exit();
5811out_fail:
5812 return r;
5813}
5814EXPORT_SYMBOL_GPL(kvm_init);
5815
5816void kvm_exit(void)
5817{
5818 int cpu;
5819
5820 debugfs_remove_recursive(kvm_debugfs_dir);
5821 misc_deregister(&kvm_dev);
5822 for_each_possible_cpu(cpu)
5823 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5824 kmem_cache_destroy(kvm_vcpu_cache);
5825 kvm_async_pf_deinit();
5826 unregister_syscore_ops(&kvm_syscore_ops);
5827 unregister_reboot_notifier(&kvm_reboot_notifier);
5828 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5829 on_each_cpu(hardware_disable_nolock, NULL, 1);
5830 kvm_arch_hardware_unsetup();
5831 kvm_arch_exit();
5832 kvm_irqfd_exit();
5833 free_cpumask_var(cpus_hardware_enabled);
5834 kvm_vfio_ops_exit();
5835}
5836EXPORT_SYMBOL_GPL(kvm_exit);
5837
5838struct kvm_vm_worker_thread_context {
5839 struct kvm *kvm;
5840 struct task_struct *parent;
5841 struct completion init_done;
5842 kvm_vm_thread_fn_t thread_fn;
5843 uintptr_t data;
5844 int err;
5845};
5846
5847static int kvm_vm_worker_thread(void *context)
5848{
5849 /*
5850 * The init_context is allocated on the stack of the parent thread, so
5851 * we have to locally copy anything that is needed beyond initialization
5852 */
5853 struct kvm_vm_worker_thread_context *init_context = context;
5854 struct task_struct *parent;
5855 struct kvm *kvm = init_context->kvm;
5856 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5857 uintptr_t data = init_context->data;
5858 int err;
5859
5860 err = kthread_park(current);
5861 /* kthread_park(current) is never supposed to return an error */
5862 WARN_ON(err != 0);
5863 if (err)
5864 goto init_complete;
5865
5866 err = cgroup_attach_task_all(init_context->parent, current);
5867 if (err) {
5868 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5869 __func__, err);
5870 goto init_complete;
5871 }
5872
5873 set_user_nice(current, task_nice(init_context->parent));
5874
5875init_complete:
5876 init_context->err = err;
5877 complete(&init_context->init_done);
5878 init_context = NULL;
5879
5880 if (err)
5881 goto out;
5882
5883 /* Wait to be woken up by the spawner before proceeding. */
5884 kthread_parkme();
5885
5886 if (!kthread_should_stop())
5887 err = thread_fn(kvm, data);
5888
5889out:
5890 /*
5891 * Move kthread back to its original cgroup to prevent it lingering in
5892 * the cgroup of the VM process, after the latter finishes its
5893 * execution.
5894 *
5895 * kthread_stop() waits on the 'exited' completion condition which is
5896 * set in exit_mm(), via mm_release(), in do_exit(). However, the
5897 * kthread is removed from the cgroup in the cgroup_exit() which is
5898 * called after the exit_mm(). This causes the kthread_stop() to return
5899 * before the kthread actually quits the cgroup.
5900 */
5901 rcu_read_lock();
5902 parent = rcu_dereference(current->real_parent);
5903 get_task_struct(parent);
5904 rcu_read_unlock();
5905 cgroup_attach_task_all(parent, current);
5906 put_task_struct(parent);
5907
5908 return err;
5909}
5910
5911int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
5912 uintptr_t data, const char *name,
5913 struct task_struct **thread_ptr)
5914{
5915 struct kvm_vm_worker_thread_context init_context = {};
5916 struct task_struct *thread;
5917
5918 *thread_ptr = NULL;
5919 init_context.kvm = kvm;
5920 init_context.parent = current;
5921 init_context.thread_fn = thread_fn;
5922 init_context.data = data;
5923 init_completion(&init_context.init_done);
5924
5925 thread = kthread_run(kvm_vm_worker_thread, &init_context,
5926 "%s-%d", name, task_pid_nr(current));
5927 if (IS_ERR(thread))
5928 return PTR_ERR(thread);
5929
5930 /* kthread_run is never supposed to return NULL */
5931 WARN_ON(thread == NULL);
5932
5933 wait_for_completion(&init_context.init_done);
5934
5935 if (!init_context.err)
5936 *thread_ptr = thread;
5937
5938 return init_context.err;
5939}