Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
13#include <linux/vmalloc.h>
14#include <linux/hrtimer.h>
15#include <linux/sched/signal.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/file.h>
19#include <linux/module.h>
20#include <linux/irqbypass.h>
21#include <linux/kvm_irqfd.h>
22#include <linux/of.h>
23#include <asm/cputable.h>
24#include <linux/uaccess.h>
25#include <asm/kvm_ppc.h>
26#include <asm/cputhreads.h>
27#include <asm/irqflags.h>
28#include <asm/iommu.h>
29#include <asm/switch_to.h>
30#include <asm/xive.h>
31#ifdef CONFIG_PPC_PSERIES
32#include <asm/hvcall.h>
33#include <asm/plpar_wrappers.h>
34#endif
35#include <asm/ultravisor.h>
36#include <asm/setup.h>
37
38#include "timing.h"
39#include "../mm/mmu_decl.h"
40
41#define CREATE_TRACE_POINTS
42#include "trace.h"
43
44struct kvmppc_ops *kvmppc_hv_ops;
45EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46struct kvmppc_ops *kvmppc_pr_ops;
47EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
49
50int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51{
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53}
54
55bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
56{
57 return kvm_arch_vcpu_runnable(vcpu);
58}
59
60bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61{
62 return false;
63}
64
65int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66{
67 return 1;
68}
69
70/*
71 * Common checks before entering the guest world. Call with interrupts
72 * disabled.
73 *
74 * returns:
75 *
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
78 */
79int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
80{
81 int r;
82
83 WARN_ON(irqs_disabled());
84 hard_irq_disable();
85
86 while (true) {
87 if (need_resched()) {
88 local_irq_enable();
89 cond_resched();
90 hard_irq_disable();
91 continue;
92 }
93
94 if (signal_pending(current)) {
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96 vcpu->run->exit_reason = KVM_EXIT_INTR;
97 r = -EINTR;
98 break;
99 }
100
101 vcpu->mode = IN_GUEST_MODE;
102
103 /*
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
111 */
112 smp_mb();
113
114 if (kvm_request_pending(vcpu)) {
115 /* Make sure we process requests preemptable */
116 local_irq_enable();
117 trace_kvm_check_requests(vcpu);
118 r = kvmppc_core_check_requests(vcpu);
119 hard_irq_disable();
120 if (r > 0)
121 continue;
122 break;
123 }
124
125 if (kvmppc_core_prepare_to_enter(vcpu)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
128 continue;
129 }
130
131 guest_enter_irqoff();
132 return 1;
133 }
134
135 /* return to host */
136 local_irq_enable();
137 return r;
138}
139EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
140
141#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
143{
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145 int i;
146
147 shared->sprg0 = swab64(shared->sprg0);
148 shared->sprg1 = swab64(shared->sprg1);
149 shared->sprg2 = swab64(shared->sprg2);
150 shared->sprg3 = swab64(shared->sprg3);
151 shared->srr0 = swab64(shared->srr0);
152 shared->srr1 = swab64(shared->srr1);
153 shared->dar = swab64(shared->dar);
154 shared->msr = swab64(shared->msr);
155 shared->dsisr = swab32(shared->dsisr);
156 shared->int_pending = swab32(shared->int_pending);
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158 shared->sr[i] = swab32(shared->sr[i]);
159}
160#endif
161
162int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
163{
164 int nr = kvmppc_get_gpr(vcpu, 11);
165 int r;
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170 unsigned long r2 = 0;
171
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
173 /* 32 bit mode */
174 param1 &= 0xffffffff;
175 param2 &= 0xffffffff;
176 param3 &= 0xffffffff;
177 param4 &= 0xffffffff;
178 }
179
180 switch (nr) {
181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
182 {
183#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian = true;
186 if (vcpu->arch.intr_msr & MSR_LE)
187 shared_big_endian = false;
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 kvmppc_swab_shared(vcpu);
190 vcpu->arch.shared_big_endian = shared_big_endian;
191#endif
192
193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194 /*
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
198 */
199 vcpu->arch.disable_kernel_nx = true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201 }
202
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
205
206#ifdef CONFIG_PPC_64K_PAGES
207 /*
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
210 */
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
215 void *new_shared;
216
217 shared &= PAGE_MASK;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
219 new_shared = (void*)shared;
220 memcpy(new_shared, old_shared, 0x1000);
221 vcpu->arch.shared = new_shared;
222 }
223#endif
224
225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
226
227 r = EV_SUCCESS;
228 break;
229 }
230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231 r = EV_SUCCESS;
232#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234#endif
235
236 /* Second return value is in r4 */
237 break;
238 case EV_HCALL_TOKEN(EV_IDLE):
239 r = EV_SUCCESS;
240 kvm_vcpu_halt(vcpu);
241 break;
242 default:
243 r = EV_UNIMPLEMENTED;
244 break;
245 }
246
247 kvmppc_set_gpr(vcpu, 4, r2);
248
249 return r;
250}
251EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252
253int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254{
255 int r = false;
256
257 /* We have to know what CPU to virtualize */
258 if (!vcpu->arch.pvr)
259 goto out;
260
261 /* PAPR only works with book3s_64 */
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263 goto out;
264
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267 goto out;
268
269#ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV))
271 goto out;
272#endif
273
274 r = true;
275
276out:
277 vcpu->arch.sane = r;
278 return r ? 0 : -EINVAL;
279}
280EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281
282int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283{
284 enum emulation_result er;
285 int r;
286
287 er = kvmppc_emulate_loadstore(vcpu);
288 switch (er) {
289 case EMULATE_DONE:
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
292 r = RESUME_GUEST_NV;
293 break;
294 case EMULATE_AGAIN:
295 r = RESUME_GUEST;
296 break;
297 case EMULATE_DO_MMIO:
298 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
303 r = RESUME_HOST_NV;
304 break;
305 case EMULATE_FAIL:
306 {
307 u32 last_inst;
308
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311 last_inst);
312
313 /*
314 * Injecting a Data Storage here is a bit more
315 * accurate since the instruction that caused the
316 * access could still be a valid one.
317 */
318 if (!IS_ENABLED(CONFIG_BOOKE)) {
319 ulong dsisr = DSISR_BADACCESS;
320
321 if (vcpu->mmio_is_write)
322 dsisr |= DSISR_ISSTORE;
323
324 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
325 } else {
326 /*
327 * BookE does not send a SIGBUS on a bad
328 * fault, so use a Program interrupt instead
329 * to avoid a fault loop.
330 */
331 kvmppc_core_queue_program(vcpu, 0);
332 }
333
334 r = RESUME_GUEST;
335 break;
336 }
337 default:
338 WARN_ON(1);
339 r = RESUME_GUEST;
340 }
341
342 return r;
343}
344EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
345
346int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
347 bool data)
348{
349 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
350 struct kvmppc_pte pte;
351 int r = -EINVAL;
352
353 vcpu->stat.st++;
354
355 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
356 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
357 size);
358
359 if ((!r) || (r == -EAGAIN))
360 return r;
361
362 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
363 XLATE_WRITE, &pte);
364 if (r < 0)
365 return r;
366
367 *eaddr = pte.raddr;
368
369 if (!pte.may_write)
370 return -EPERM;
371
372 /* Magic page override */
373 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
374 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
375 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
376 void *magic = vcpu->arch.shared;
377 magic += pte.eaddr & 0xfff;
378 memcpy(magic, ptr, size);
379 return EMULATE_DONE;
380 }
381
382 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
383 return EMULATE_DO_MMIO;
384
385 return EMULATE_DONE;
386}
387EXPORT_SYMBOL_GPL(kvmppc_st);
388
389int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
390 bool data)
391{
392 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
393 struct kvmppc_pte pte;
394 int rc = -EINVAL;
395
396 vcpu->stat.ld++;
397
398 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
399 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
400 size);
401
402 if ((!rc) || (rc == -EAGAIN))
403 return rc;
404
405 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
406 XLATE_READ, &pte);
407 if (rc)
408 return rc;
409
410 *eaddr = pte.raddr;
411
412 if (!pte.may_read)
413 return -EPERM;
414
415 if (!data && !pte.may_execute)
416 return -ENOEXEC;
417
418 /* Magic page override */
419 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
420 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
421 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
422 void *magic = vcpu->arch.shared;
423 magic += pte.eaddr & 0xfff;
424 memcpy(ptr, magic, size);
425 return EMULATE_DONE;
426 }
427
428 kvm_vcpu_srcu_read_lock(vcpu);
429 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
430 kvm_vcpu_srcu_read_unlock(vcpu);
431 if (rc)
432 return EMULATE_DO_MMIO;
433
434 return EMULATE_DONE;
435}
436EXPORT_SYMBOL_GPL(kvmppc_ld);
437
438int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
439{
440 struct kvmppc_ops *kvm_ops = NULL;
441 int r;
442
443 /*
444 * if we have both HV and PR enabled, default is HV
445 */
446 if (type == 0) {
447 if (kvmppc_hv_ops)
448 kvm_ops = kvmppc_hv_ops;
449 else
450 kvm_ops = kvmppc_pr_ops;
451 if (!kvm_ops)
452 goto err_out;
453 } else if (type == KVM_VM_PPC_HV) {
454 if (!kvmppc_hv_ops)
455 goto err_out;
456 kvm_ops = kvmppc_hv_ops;
457 } else if (type == KVM_VM_PPC_PR) {
458 if (!kvmppc_pr_ops)
459 goto err_out;
460 kvm_ops = kvmppc_pr_ops;
461 } else
462 goto err_out;
463
464 if (!try_module_get(kvm_ops->owner))
465 return -ENOENT;
466
467 kvm->arch.kvm_ops = kvm_ops;
468 r = kvmppc_core_init_vm(kvm);
469 if (r)
470 module_put(kvm_ops->owner);
471 return r;
472err_out:
473 return -EINVAL;
474}
475
476void kvm_arch_destroy_vm(struct kvm *kvm)
477{
478#ifdef CONFIG_KVM_XICS
479 /*
480 * We call kick_all_cpus_sync() to ensure that all
481 * CPUs have executed any pending IPIs before we
482 * continue and free VCPUs structures below.
483 */
484 if (is_kvmppc_hv_enabled(kvm))
485 kick_all_cpus_sync();
486#endif
487
488 kvm_destroy_vcpus(kvm);
489
490 mutex_lock(&kvm->lock);
491
492 kvmppc_core_destroy_vm(kvm);
493
494 mutex_unlock(&kvm->lock);
495
496 /* drop the module reference */
497 module_put(kvm->arch.kvm_ops->owner);
498}
499
500int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
501{
502 int r;
503 /* Assume we're using HV mode when the HV module is loaded */
504 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
505
506 if (kvm) {
507 /*
508 * Hooray - we know which VM type we're running on. Depend on
509 * that rather than the guess above.
510 */
511 hv_enabled = is_kvmppc_hv_enabled(kvm);
512 }
513
514 switch (ext) {
515#ifdef CONFIG_BOOKE
516 case KVM_CAP_PPC_BOOKE_SREGS:
517 case KVM_CAP_PPC_BOOKE_WATCHDOG:
518 case KVM_CAP_PPC_EPR:
519#else
520 case KVM_CAP_PPC_SEGSTATE:
521 case KVM_CAP_PPC_HIOR:
522 case KVM_CAP_PPC_PAPR:
523#endif
524 case KVM_CAP_PPC_UNSET_IRQ:
525 case KVM_CAP_PPC_IRQ_LEVEL:
526 case KVM_CAP_ENABLE_CAP:
527 case KVM_CAP_ONE_REG:
528 case KVM_CAP_IOEVENTFD:
529 case KVM_CAP_DEVICE_CTRL:
530 case KVM_CAP_IMMEDIATE_EXIT:
531 case KVM_CAP_SET_GUEST_DEBUG:
532 r = 1;
533 break;
534 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
535 case KVM_CAP_PPC_PAIRED_SINGLES:
536 case KVM_CAP_PPC_OSI:
537 case KVM_CAP_PPC_GET_PVINFO:
538#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
539 case KVM_CAP_SW_TLB:
540#endif
541 /* We support this only for PR */
542 r = !hv_enabled;
543 break;
544#ifdef CONFIG_KVM_MPIC
545 case KVM_CAP_IRQ_MPIC:
546 r = 1;
547 break;
548#endif
549
550#ifdef CONFIG_PPC_BOOK3S_64
551 case KVM_CAP_SPAPR_TCE:
552 case KVM_CAP_SPAPR_TCE_64:
553 r = 1;
554 break;
555 case KVM_CAP_SPAPR_TCE_VFIO:
556 r = !!cpu_has_feature(CPU_FTR_HVMODE);
557 break;
558 case KVM_CAP_PPC_RTAS:
559 case KVM_CAP_PPC_FIXUP_HCALL:
560 case KVM_CAP_PPC_ENABLE_HCALL:
561#ifdef CONFIG_KVM_XICS
562 case KVM_CAP_IRQ_XICS:
563#endif
564 case KVM_CAP_PPC_GET_CPU_CHAR:
565 r = 1;
566 break;
567#ifdef CONFIG_KVM_XIVE
568 case KVM_CAP_PPC_IRQ_XIVE:
569 /*
570 * We need XIVE to be enabled on the platform (implies
571 * a POWER9 processor) and the PowerNV platform, as
572 * nested is not yet supported.
573 */
574 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
575 kvmppc_xive_native_supported();
576 break;
577#endif
578
579#ifdef CONFIG_HAVE_KVM_IRQFD
580 case KVM_CAP_IRQFD_RESAMPLE:
581 r = !xive_enabled();
582 break;
583#endif
584
585 case KVM_CAP_PPC_ALLOC_HTAB:
586 r = hv_enabled;
587 break;
588#endif /* CONFIG_PPC_BOOK3S_64 */
589#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
590 case KVM_CAP_PPC_SMT:
591 r = 0;
592 if (kvm) {
593 if (kvm->arch.emul_smt_mode > 1)
594 r = kvm->arch.emul_smt_mode;
595 else
596 r = kvm->arch.smt_mode;
597 } else if (hv_enabled) {
598 if (cpu_has_feature(CPU_FTR_ARCH_300))
599 r = 1;
600 else
601 r = threads_per_subcore;
602 }
603 break;
604 case KVM_CAP_PPC_SMT_POSSIBLE:
605 r = 1;
606 if (hv_enabled) {
607 if (!cpu_has_feature(CPU_FTR_ARCH_300))
608 r = ((threads_per_subcore << 1) - 1);
609 else
610 /* P9 can emulate dbells, so allow any mode */
611 r = 8 | 4 | 2 | 1;
612 }
613 break;
614 case KVM_CAP_PPC_RMA:
615 r = 0;
616 break;
617 case KVM_CAP_PPC_HWRNG:
618 r = kvmppc_hwrng_present();
619 break;
620 case KVM_CAP_PPC_MMU_RADIX:
621 r = !!(hv_enabled && radix_enabled());
622 break;
623 case KVM_CAP_PPC_MMU_HASH_V3:
624 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
625 kvmppc_hv_ops->hash_v3_possible());
626 break;
627 case KVM_CAP_PPC_NESTED_HV:
628 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
629 !kvmppc_hv_ops->enable_nested(NULL));
630 break;
631#endif
632 case KVM_CAP_SYNC_MMU:
633#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
634 r = hv_enabled;
635#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
636 r = 1;
637#else
638 r = 0;
639#endif
640 break;
641#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
642 case KVM_CAP_PPC_HTAB_FD:
643 r = hv_enabled;
644 break;
645#endif
646 case KVM_CAP_NR_VCPUS:
647 /*
648 * Recommending a number of CPUs is somewhat arbitrary; we
649 * return the number of present CPUs for -HV (since a host
650 * will have secondary threads "offline"), and for other KVM
651 * implementations just count online CPUs.
652 */
653 if (hv_enabled)
654 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
655 else
656 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
657 break;
658 case KVM_CAP_MAX_VCPUS:
659 r = KVM_MAX_VCPUS;
660 break;
661 case KVM_CAP_MAX_VCPU_ID:
662 r = KVM_MAX_VCPU_IDS;
663 break;
664#ifdef CONFIG_PPC_BOOK3S_64
665 case KVM_CAP_PPC_GET_SMMU_INFO:
666 r = 1;
667 break;
668 case KVM_CAP_SPAPR_MULTITCE:
669 r = 1;
670 break;
671 case KVM_CAP_SPAPR_RESIZE_HPT:
672 r = !!hv_enabled;
673 break;
674#endif
675#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
676 case KVM_CAP_PPC_FWNMI:
677 r = hv_enabled;
678 break;
679#endif
680#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
681 case KVM_CAP_PPC_HTM:
682 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
683 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
684 break;
685#endif
686#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
687 case KVM_CAP_PPC_SECURE_GUEST:
688 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
689 !kvmppc_hv_ops->enable_svm(NULL);
690 break;
691 case KVM_CAP_PPC_DAWR1:
692 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
693 !kvmppc_hv_ops->enable_dawr1(NULL));
694 break;
695 case KVM_CAP_PPC_RPT_INVALIDATE:
696 r = 1;
697 break;
698#endif
699 case KVM_CAP_PPC_AIL_MODE_3:
700 r = 0;
701 /*
702 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
703 * The POWER9s can support it if the guest runs in hash mode,
704 * but QEMU doesn't necessarily query the capability in time.
705 */
706 if (hv_enabled) {
707 if (kvmhv_on_pseries()) {
708 if (pseries_reloc_on_exception())
709 r = 1;
710 } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
711 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
712 r = 1;
713 }
714 }
715 break;
716 default:
717 r = 0;
718 break;
719 }
720 return r;
721
722}
723
724long kvm_arch_dev_ioctl(struct file *filp,
725 unsigned int ioctl, unsigned long arg)
726{
727 return -EINVAL;
728}
729
730void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
731{
732 kvmppc_core_free_memslot(kvm, slot);
733}
734
735int kvm_arch_prepare_memory_region(struct kvm *kvm,
736 const struct kvm_memory_slot *old,
737 struct kvm_memory_slot *new,
738 enum kvm_mr_change change)
739{
740 return kvmppc_core_prepare_memory_region(kvm, old, new, change);
741}
742
743void kvm_arch_commit_memory_region(struct kvm *kvm,
744 struct kvm_memory_slot *old,
745 const struct kvm_memory_slot *new,
746 enum kvm_mr_change change)
747{
748 kvmppc_core_commit_memory_region(kvm, old, new, change);
749}
750
751void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
752 struct kvm_memory_slot *slot)
753{
754 kvmppc_core_flush_memslot(kvm, slot);
755}
756
757int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
758{
759 return 0;
760}
761
762static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
763{
764 struct kvm_vcpu *vcpu;
765
766 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
767 kvmppc_decrementer_func(vcpu);
768
769 return HRTIMER_NORESTART;
770}
771
772int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
773{
774 int err;
775
776 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
777 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
778
779#ifdef CONFIG_KVM_EXIT_TIMING
780 mutex_init(&vcpu->arch.exit_timing_lock);
781#endif
782 err = kvmppc_subarch_vcpu_init(vcpu);
783 if (err)
784 return err;
785
786 err = kvmppc_core_vcpu_create(vcpu);
787 if (err)
788 goto out_vcpu_uninit;
789
790 rcuwait_init(&vcpu->arch.wait);
791 vcpu->arch.waitp = &vcpu->arch.wait;
792 return 0;
793
794out_vcpu_uninit:
795 kvmppc_subarch_vcpu_uninit(vcpu);
796 return err;
797}
798
799void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
800{
801}
802
803void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
804{
805 /* Make sure we're not using the vcpu anymore */
806 hrtimer_cancel(&vcpu->arch.dec_timer);
807
808 switch (vcpu->arch.irq_type) {
809 case KVMPPC_IRQ_MPIC:
810 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
811 break;
812 case KVMPPC_IRQ_XICS:
813 if (xics_on_xive())
814 kvmppc_xive_cleanup_vcpu(vcpu);
815 else
816 kvmppc_xics_free_icp(vcpu);
817 break;
818 case KVMPPC_IRQ_XIVE:
819 kvmppc_xive_native_cleanup_vcpu(vcpu);
820 break;
821 }
822
823 kvmppc_core_vcpu_free(vcpu);
824
825 kvmppc_subarch_vcpu_uninit(vcpu);
826}
827
828int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
829{
830 return kvmppc_core_pending_dec(vcpu);
831}
832
833void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
834{
835#ifdef CONFIG_BOOKE
836 /*
837 * vrsave (formerly usprg0) isn't used by Linux, but may
838 * be used by the guest.
839 *
840 * On non-booke this is associated with Altivec and
841 * is handled by code in book3s.c.
842 */
843 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
844#endif
845 kvmppc_core_vcpu_load(vcpu, cpu);
846}
847
848void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
849{
850 kvmppc_core_vcpu_put(vcpu);
851#ifdef CONFIG_BOOKE
852 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
853#endif
854}
855
856/*
857 * irq_bypass_add_producer and irq_bypass_del_producer are only
858 * useful if the architecture supports PCI passthrough.
859 * irq_bypass_stop and irq_bypass_start are not needed and so
860 * kvm_ops are not defined for them.
861 */
862bool kvm_arch_has_irq_bypass(void)
863{
864 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
865 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
866}
867
868int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
869 struct irq_bypass_producer *prod)
870{
871 struct kvm_kernel_irqfd *irqfd =
872 container_of(cons, struct kvm_kernel_irqfd, consumer);
873 struct kvm *kvm = irqfd->kvm;
874
875 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
876 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
877
878 return 0;
879}
880
881void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
882 struct irq_bypass_producer *prod)
883{
884 struct kvm_kernel_irqfd *irqfd =
885 container_of(cons, struct kvm_kernel_irqfd, consumer);
886 struct kvm *kvm = irqfd->kvm;
887
888 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
889 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
890}
891
892#ifdef CONFIG_VSX
893static inline int kvmppc_get_vsr_dword_offset(int index)
894{
895 int offset;
896
897 if ((index != 0) && (index != 1))
898 return -1;
899
900#ifdef __BIG_ENDIAN
901 offset = index;
902#else
903 offset = 1 - index;
904#endif
905
906 return offset;
907}
908
909static inline int kvmppc_get_vsr_word_offset(int index)
910{
911 int offset;
912
913 if ((index > 3) || (index < 0))
914 return -1;
915
916#ifdef __BIG_ENDIAN
917 offset = index;
918#else
919 offset = 3 - index;
920#endif
921 return offset;
922}
923
924static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
925 u64 gpr)
926{
927 union kvmppc_one_reg val;
928 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
929 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
930
931 if (offset == -1)
932 return;
933
934 if (index >= 32) {
935 val.vval = VCPU_VSX_VR(vcpu, index - 32);
936 val.vsxval[offset] = gpr;
937 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
938 } else {
939 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
940 }
941}
942
943static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
944 u64 gpr)
945{
946 union kvmppc_one_reg val;
947 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
948
949 if (index >= 32) {
950 val.vval = VCPU_VSX_VR(vcpu, index - 32);
951 val.vsxval[0] = gpr;
952 val.vsxval[1] = gpr;
953 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
954 } else {
955 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
956 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
957 }
958}
959
960static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
961 u32 gpr)
962{
963 union kvmppc_one_reg val;
964 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
965
966 if (index >= 32) {
967 val.vsx32val[0] = gpr;
968 val.vsx32val[1] = gpr;
969 val.vsx32val[2] = gpr;
970 val.vsx32val[3] = gpr;
971 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
972 } else {
973 val.vsx32val[0] = gpr;
974 val.vsx32val[1] = gpr;
975 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
976 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
977 }
978}
979
980static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
981 u32 gpr32)
982{
983 union kvmppc_one_reg val;
984 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
985 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
986 int dword_offset, word_offset;
987
988 if (offset == -1)
989 return;
990
991 if (index >= 32) {
992 val.vval = VCPU_VSX_VR(vcpu, index - 32);
993 val.vsx32val[offset] = gpr32;
994 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
995 } else {
996 dword_offset = offset / 2;
997 word_offset = offset % 2;
998 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
999 val.vsx32val[word_offset] = gpr32;
1000 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1001 }
1002}
1003#endif /* CONFIG_VSX */
1004
1005#ifdef CONFIG_ALTIVEC
1006static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1007 int index, int element_size)
1008{
1009 int offset;
1010 int elts = sizeof(vector128)/element_size;
1011
1012 if ((index < 0) || (index >= elts))
1013 return -1;
1014
1015 if (kvmppc_need_byteswap(vcpu))
1016 offset = elts - index - 1;
1017 else
1018 offset = index;
1019
1020 return offset;
1021}
1022
1023static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1024 int index)
1025{
1026 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1027}
1028
1029static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1030 int index)
1031{
1032 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1033}
1034
1035static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1036 int index)
1037{
1038 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1039}
1040
1041static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1042 int index)
1043{
1044 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1045}
1046
1047
1048static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1049 u64 gpr)
1050{
1051 union kvmppc_one_reg val;
1052 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1053 vcpu->arch.mmio_vmx_offset);
1054 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1055
1056 if (offset == -1)
1057 return;
1058
1059 val.vval = VCPU_VSX_VR(vcpu, index);
1060 val.vsxval[offset] = gpr;
1061 VCPU_VSX_VR(vcpu, index) = val.vval;
1062}
1063
1064static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1065 u32 gpr32)
1066{
1067 union kvmppc_one_reg val;
1068 int offset = kvmppc_get_vmx_word_offset(vcpu,
1069 vcpu->arch.mmio_vmx_offset);
1070 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1071
1072 if (offset == -1)
1073 return;
1074
1075 val.vval = VCPU_VSX_VR(vcpu, index);
1076 val.vsx32val[offset] = gpr32;
1077 VCPU_VSX_VR(vcpu, index) = val.vval;
1078}
1079
1080static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1081 u16 gpr16)
1082{
1083 union kvmppc_one_reg val;
1084 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1085 vcpu->arch.mmio_vmx_offset);
1086 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1087
1088 if (offset == -1)
1089 return;
1090
1091 val.vval = VCPU_VSX_VR(vcpu, index);
1092 val.vsx16val[offset] = gpr16;
1093 VCPU_VSX_VR(vcpu, index) = val.vval;
1094}
1095
1096static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1097 u8 gpr8)
1098{
1099 union kvmppc_one_reg val;
1100 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1101 vcpu->arch.mmio_vmx_offset);
1102 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1103
1104 if (offset == -1)
1105 return;
1106
1107 val.vval = VCPU_VSX_VR(vcpu, index);
1108 val.vsx8val[offset] = gpr8;
1109 VCPU_VSX_VR(vcpu, index) = val.vval;
1110}
1111#endif /* CONFIG_ALTIVEC */
1112
1113#ifdef CONFIG_PPC_FPU
1114static inline u64 sp_to_dp(u32 fprs)
1115{
1116 u64 fprd;
1117
1118 preempt_disable();
1119 enable_kernel_fp();
1120 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1121 : "fr0");
1122 preempt_enable();
1123 return fprd;
1124}
1125
1126static inline u32 dp_to_sp(u64 fprd)
1127{
1128 u32 fprs;
1129
1130 preempt_disable();
1131 enable_kernel_fp();
1132 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1133 : "fr0");
1134 preempt_enable();
1135 return fprs;
1136}
1137
1138#else
1139#define sp_to_dp(x) (x)
1140#define dp_to_sp(x) (x)
1141#endif /* CONFIG_PPC_FPU */
1142
1143static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1144{
1145 struct kvm_run *run = vcpu->run;
1146 u64 gpr;
1147
1148 if (run->mmio.len > sizeof(gpr))
1149 return;
1150
1151 if (!vcpu->arch.mmio_host_swabbed) {
1152 switch (run->mmio.len) {
1153 case 8: gpr = *(u64 *)run->mmio.data; break;
1154 case 4: gpr = *(u32 *)run->mmio.data; break;
1155 case 2: gpr = *(u16 *)run->mmio.data; break;
1156 case 1: gpr = *(u8 *)run->mmio.data; break;
1157 }
1158 } else {
1159 switch (run->mmio.len) {
1160 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1161 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1162 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1163 case 1: gpr = *(u8 *)run->mmio.data; break;
1164 }
1165 }
1166
1167 /* conversion between single and double precision */
1168 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1169 gpr = sp_to_dp(gpr);
1170
1171 if (vcpu->arch.mmio_sign_extend) {
1172 switch (run->mmio.len) {
1173#ifdef CONFIG_PPC64
1174 case 4:
1175 gpr = (s64)(s32)gpr;
1176 break;
1177#endif
1178 case 2:
1179 gpr = (s64)(s16)gpr;
1180 break;
1181 case 1:
1182 gpr = (s64)(s8)gpr;
1183 break;
1184 }
1185 }
1186
1187 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1188 case KVM_MMIO_REG_GPR:
1189 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1190 break;
1191 case KVM_MMIO_REG_FPR:
1192 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1193 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1194
1195 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1196 break;
1197#ifdef CONFIG_PPC_BOOK3S
1198 case KVM_MMIO_REG_QPR:
1199 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1200 break;
1201 case KVM_MMIO_REG_FQPR:
1202 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1203 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1204 break;
1205#endif
1206#ifdef CONFIG_VSX
1207 case KVM_MMIO_REG_VSX:
1208 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1209 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1210
1211 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1212 kvmppc_set_vsr_dword(vcpu, gpr);
1213 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1214 kvmppc_set_vsr_word(vcpu, gpr);
1215 else if (vcpu->arch.mmio_copy_type ==
1216 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1217 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1218 else if (vcpu->arch.mmio_copy_type ==
1219 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1220 kvmppc_set_vsr_word_dump(vcpu, gpr);
1221 break;
1222#endif
1223#ifdef CONFIG_ALTIVEC
1224 case KVM_MMIO_REG_VMX:
1225 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1226 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1227
1228 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1229 kvmppc_set_vmx_dword(vcpu, gpr);
1230 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1231 kvmppc_set_vmx_word(vcpu, gpr);
1232 else if (vcpu->arch.mmio_copy_type ==
1233 KVMPPC_VMX_COPY_HWORD)
1234 kvmppc_set_vmx_hword(vcpu, gpr);
1235 else if (vcpu->arch.mmio_copy_type ==
1236 KVMPPC_VMX_COPY_BYTE)
1237 kvmppc_set_vmx_byte(vcpu, gpr);
1238 break;
1239#endif
1240#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1241 case KVM_MMIO_REG_NESTED_GPR:
1242 if (kvmppc_need_byteswap(vcpu))
1243 gpr = swab64(gpr);
1244 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1245 sizeof(gpr));
1246 break;
1247#endif
1248 default:
1249 BUG();
1250 }
1251}
1252
1253static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1254 unsigned int rt, unsigned int bytes,
1255 int is_default_endian, int sign_extend)
1256{
1257 struct kvm_run *run = vcpu->run;
1258 int idx, ret;
1259 bool host_swabbed;
1260
1261 /* Pity C doesn't have a logical XOR operator */
1262 if (kvmppc_need_byteswap(vcpu)) {
1263 host_swabbed = is_default_endian;
1264 } else {
1265 host_swabbed = !is_default_endian;
1266 }
1267
1268 if (bytes > sizeof(run->mmio.data))
1269 return EMULATE_FAIL;
1270
1271 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1272 run->mmio.len = bytes;
1273 run->mmio.is_write = 0;
1274
1275 vcpu->arch.io_gpr = rt;
1276 vcpu->arch.mmio_host_swabbed = host_swabbed;
1277 vcpu->mmio_needed = 1;
1278 vcpu->mmio_is_write = 0;
1279 vcpu->arch.mmio_sign_extend = sign_extend;
1280
1281 idx = srcu_read_lock(&vcpu->kvm->srcu);
1282
1283 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1284 bytes, &run->mmio.data);
1285
1286 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1287
1288 if (!ret) {
1289 kvmppc_complete_mmio_load(vcpu);
1290 vcpu->mmio_needed = 0;
1291 return EMULATE_DONE;
1292 }
1293
1294 return EMULATE_DO_MMIO;
1295}
1296
1297int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1298 unsigned int rt, unsigned int bytes,
1299 int is_default_endian)
1300{
1301 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1302}
1303EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1304
1305/* Same as above, but sign extends */
1306int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1307 unsigned int rt, unsigned int bytes,
1308 int is_default_endian)
1309{
1310 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1311}
1312
1313#ifdef CONFIG_VSX
1314int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1315 unsigned int rt, unsigned int bytes,
1316 int is_default_endian, int mmio_sign_extend)
1317{
1318 enum emulation_result emulated = EMULATE_DONE;
1319
1320 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1321 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1322 return EMULATE_FAIL;
1323
1324 while (vcpu->arch.mmio_vsx_copy_nums) {
1325 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1326 is_default_endian, mmio_sign_extend);
1327
1328 if (emulated != EMULATE_DONE)
1329 break;
1330
1331 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1332
1333 vcpu->arch.mmio_vsx_copy_nums--;
1334 vcpu->arch.mmio_vsx_offset++;
1335 }
1336 return emulated;
1337}
1338#endif /* CONFIG_VSX */
1339
1340int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1341 u64 val, unsigned int bytes, int is_default_endian)
1342{
1343 struct kvm_run *run = vcpu->run;
1344 void *data = run->mmio.data;
1345 int idx, ret;
1346 bool host_swabbed;
1347
1348 /* Pity C doesn't have a logical XOR operator */
1349 if (kvmppc_need_byteswap(vcpu)) {
1350 host_swabbed = is_default_endian;
1351 } else {
1352 host_swabbed = !is_default_endian;
1353 }
1354
1355 if (bytes > sizeof(run->mmio.data))
1356 return EMULATE_FAIL;
1357
1358 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1359 run->mmio.len = bytes;
1360 run->mmio.is_write = 1;
1361 vcpu->mmio_needed = 1;
1362 vcpu->mmio_is_write = 1;
1363
1364 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1365 val = dp_to_sp(val);
1366
1367 /* Store the value at the lowest bytes in 'data'. */
1368 if (!host_swabbed) {
1369 switch (bytes) {
1370 case 8: *(u64 *)data = val; break;
1371 case 4: *(u32 *)data = val; break;
1372 case 2: *(u16 *)data = val; break;
1373 case 1: *(u8 *)data = val; break;
1374 }
1375 } else {
1376 switch (bytes) {
1377 case 8: *(u64 *)data = swab64(val); break;
1378 case 4: *(u32 *)data = swab32(val); break;
1379 case 2: *(u16 *)data = swab16(val); break;
1380 case 1: *(u8 *)data = val; break;
1381 }
1382 }
1383
1384 idx = srcu_read_lock(&vcpu->kvm->srcu);
1385
1386 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1387 bytes, &run->mmio.data);
1388
1389 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1390
1391 if (!ret) {
1392 vcpu->mmio_needed = 0;
1393 return EMULATE_DONE;
1394 }
1395
1396 return EMULATE_DO_MMIO;
1397}
1398EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1399
1400#ifdef CONFIG_VSX
1401static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1402{
1403 u32 dword_offset, word_offset;
1404 union kvmppc_one_reg reg;
1405 int vsx_offset = 0;
1406 int copy_type = vcpu->arch.mmio_copy_type;
1407 int result = 0;
1408
1409 switch (copy_type) {
1410 case KVMPPC_VSX_COPY_DWORD:
1411 vsx_offset =
1412 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1413
1414 if (vsx_offset == -1) {
1415 result = -1;
1416 break;
1417 }
1418
1419 if (rs < 32) {
1420 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1421 } else {
1422 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1423 *val = reg.vsxval[vsx_offset];
1424 }
1425 break;
1426
1427 case KVMPPC_VSX_COPY_WORD:
1428 vsx_offset =
1429 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1430
1431 if (vsx_offset == -1) {
1432 result = -1;
1433 break;
1434 }
1435
1436 if (rs < 32) {
1437 dword_offset = vsx_offset / 2;
1438 word_offset = vsx_offset % 2;
1439 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1440 *val = reg.vsx32val[word_offset];
1441 } else {
1442 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1443 *val = reg.vsx32val[vsx_offset];
1444 }
1445 break;
1446
1447 default:
1448 result = -1;
1449 break;
1450 }
1451
1452 return result;
1453}
1454
1455int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1456 int rs, unsigned int bytes, int is_default_endian)
1457{
1458 u64 val;
1459 enum emulation_result emulated = EMULATE_DONE;
1460
1461 vcpu->arch.io_gpr = rs;
1462
1463 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1464 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1465 return EMULATE_FAIL;
1466
1467 while (vcpu->arch.mmio_vsx_copy_nums) {
1468 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1469 return EMULATE_FAIL;
1470
1471 emulated = kvmppc_handle_store(vcpu,
1472 val, bytes, is_default_endian);
1473
1474 if (emulated != EMULATE_DONE)
1475 break;
1476
1477 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1478
1479 vcpu->arch.mmio_vsx_copy_nums--;
1480 vcpu->arch.mmio_vsx_offset++;
1481 }
1482
1483 return emulated;
1484}
1485
1486static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1487{
1488 struct kvm_run *run = vcpu->run;
1489 enum emulation_result emulated = EMULATE_FAIL;
1490 int r;
1491
1492 vcpu->arch.paddr_accessed += run->mmio.len;
1493
1494 if (!vcpu->mmio_is_write) {
1495 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1496 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1497 } else {
1498 emulated = kvmppc_handle_vsx_store(vcpu,
1499 vcpu->arch.io_gpr, run->mmio.len, 1);
1500 }
1501
1502 switch (emulated) {
1503 case EMULATE_DO_MMIO:
1504 run->exit_reason = KVM_EXIT_MMIO;
1505 r = RESUME_HOST;
1506 break;
1507 case EMULATE_FAIL:
1508 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1509 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1510 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1511 r = RESUME_HOST;
1512 break;
1513 default:
1514 r = RESUME_GUEST;
1515 break;
1516 }
1517 return r;
1518}
1519#endif /* CONFIG_VSX */
1520
1521#ifdef CONFIG_ALTIVEC
1522int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1523 unsigned int rt, unsigned int bytes, int is_default_endian)
1524{
1525 enum emulation_result emulated = EMULATE_DONE;
1526
1527 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1528 return EMULATE_FAIL;
1529
1530 while (vcpu->arch.mmio_vmx_copy_nums) {
1531 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1532 is_default_endian, 0);
1533
1534 if (emulated != EMULATE_DONE)
1535 break;
1536
1537 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1538 vcpu->arch.mmio_vmx_copy_nums--;
1539 vcpu->arch.mmio_vmx_offset++;
1540 }
1541
1542 return emulated;
1543}
1544
1545static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1546{
1547 union kvmppc_one_reg reg;
1548 int vmx_offset = 0;
1549 int result = 0;
1550
1551 vmx_offset =
1552 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1553
1554 if (vmx_offset == -1)
1555 return -1;
1556
1557 reg.vval = VCPU_VSX_VR(vcpu, index);
1558 *val = reg.vsxval[vmx_offset];
1559
1560 return result;
1561}
1562
1563static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1564{
1565 union kvmppc_one_reg reg;
1566 int vmx_offset = 0;
1567 int result = 0;
1568
1569 vmx_offset =
1570 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1571
1572 if (vmx_offset == -1)
1573 return -1;
1574
1575 reg.vval = VCPU_VSX_VR(vcpu, index);
1576 *val = reg.vsx32val[vmx_offset];
1577
1578 return result;
1579}
1580
1581static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1582{
1583 union kvmppc_one_reg reg;
1584 int vmx_offset = 0;
1585 int result = 0;
1586
1587 vmx_offset =
1588 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1589
1590 if (vmx_offset == -1)
1591 return -1;
1592
1593 reg.vval = VCPU_VSX_VR(vcpu, index);
1594 *val = reg.vsx16val[vmx_offset];
1595
1596 return result;
1597}
1598
1599static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1600{
1601 union kvmppc_one_reg reg;
1602 int vmx_offset = 0;
1603 int result = 0;
1604
1605 vmx_offset =
1606 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1607
1608 if (vmx_offset == -1)
1609 return -1;
1610
1611 reg.vval = VCPU_VSX_VR(vcpu, index);
1612 *val = reg.vsx8val[vmx_offset];
1613
1614 return result;
1615}
1616
1617int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1618 unsigned int rs, unsigned int bytes, int is_default_endian)
1619{
1620 u64 val = 0;
1621 unsigned int index = rs & KVM_MMIO_REG_MASK;
1622 enum emulation_result emulated = EMULATE_DONE;
1623
1624 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1625 return EMULATE_FAIL;
1626
1627 vcpu->arch.io_gpr = rs;
1628
1629 while (vcpu->arch.mmio_vmx_copy_nums) {
1630 switch (vcpu->arch.mmio_copy_type) {
1631 case KVMPPC_VMX_COPY_DWORD:
1632 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1633 return EMULATE_FAIL;
1634
1635 break;
1636 case KVMPPC_VMX_COPY_WORD:
1637 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1638 return EMULATE_FAIL;
1639 break;
1640 case KVMPPC_VMX_COPY_HWORD:
1641 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1642 return EMULATE_FAIL;
1643 break;
1644 case KVMPPC_VMX_COPY_BYTE:
1645 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1646 return EMULATE_FAIL;
1647 break;
1648 default:
1649 return EMULATE_FAIL;
1650 }
1651
1652 emulated = kvmppc_handle_store(vcpu, val, bytes,
1653 is_default_endian);
1654 if (emulated != EMULATE_DONE)
1655 break;
1656
1657 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1658 vcpu->arch.mmio_vmx_copy_nums--;
1659 vcpu->arch.mmio_vmx_offset++;
1660 }
1661
1662 return emulated;
1663}
1664
1665static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1666{
1667 struct kvm_run *run = vcpu->run;
1668 enum emulation_result emulated = EMULATE_FAIL;
1669 int r;
1670
1671 vcpu->arch.paddr_accessed += run->mmio.len;
1672
1673 if (!vcpu->mmio_is_write) {
1674 emulated = kvmppc_handle_vmx_load(vcpu,
1675 vcpu->arch.io_gpr, run->mmio.len, 1);
1676 } else {
1677 emulated = kvmppc_handle_vmx_store(vcpu,
1678 vcpu->arch.io_gpr, run->mmio.len, 1);
1679 }
1680
1681 switch (emulated) {
1682 case EMULATE_DO_MMIO:
1683 run->exit_reason = KVM_EXIT_MMIO;
1684 r = RESUME_HOST;
1685 break;
1686 case EMULATE_FAIL:
1687 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1688 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1689 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1690 r = RESUME_HOST;
1691 break;
1692 default:
1693 r = RESUME_GUEST;
1694 break;
1695 }
1696 return r;
1697}
1698#endif /* CONFIG_ALTIVEC */
1699
1700int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1701{
1702 int r = 0;
1703 union kvmppc_one_reg val;
1704 int size;
1705
1706 size = one_reg_size(reg->id);
1707 if (size > sizeof(val))
1708 return -EINVAL;
1709
1710 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1711 if (r == -EINVAL) {
1712 r = 0;
1713 switch (reg->id) {
1714#ifdef CONFIG_ALTIVEC
1715 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1716 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1717 r = -ENXIO;
1718 break;
1719 }
1720 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1721 break;
1722 case KVM_REG_PPC_VSCR:
1723 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1724 r = -ENXIO;
1725 break;
1726 }
1727 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1728 break;
1729 case KVM_REG_PPC_VRSAVE:
1730 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1731 break;
1732#endif /* CONFIG_ALTIVEC */
1733 default:
1734 r = -EINVAL;
1735 break;
1736 }
1737 }
1738
1739 if (r)
1740 return r;
1741
1742 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1743 r = -EFAULT;
1744
1745 return r;
1746}
1747
1748int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1749{
1750 int r;
1751 union kvmppc_one_reg val;
1752 int size;
1753
1754 size = one_reg_size(reg->id);
1755 if (size > sizeof(val))
1756 return -EINVAL;
1757
1758 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1759 return -EFAULT;
1760
1761 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1762 if (r == -EINVAL) {
1763 r = 0;
1764 switch (reg->id) {
1765#ifdef CONFIG_ALTIVEC
1766 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1767 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1768 r = -ENXIO;
1769 break;
1770 }
1771 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1772 break;
1773 case KVM_REG_PPC_VSCR:
1774 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1775 r = -ENXIO;
1776 break;
1777 }
1778 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1779 break;
1780 case KVM_REG_PPC_VRSAVE:
1781 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1782 r = -ENXIO;
1783 break;
1784 }
1785 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1786 break;
1787#endif /* CONFIG_ALTIVEC */
1788 default:
1789 r = -EINVAL;
1790 break;
1791 }
1792 }
1793
1794 return r;
1795}
1796
1797int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1798{
1799 struct kvm_run *run = vcpu->run;
1800 int r;
1801
1802 vcpu_load(vcpu);
1803
1804 if (vcpu->mmio_needed) {
1805 vcpu->mmio_needed = 0;
1806 if (!vcpu->mmio_is_write)
1807 kvmppc_complete_mmio_load(vcpu);
1808#ifdef CONFIG_VSX
1809 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1810 vcpu->arch.mmio_vsx_copy_nums--;
1811 vcpu->arch.mmio_vsx_offset++;
1812 }
1813
1814 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1815 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1816 if (r == RESUME_HOST) {
1817 vcpu->mmio_needed = 1;
1818 goto out;
1819 }
1820 }
1821#endif
1822#ifdef CONFIG_ALTIVEC
1823 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1824 vcpu->arch.mmio_vmx_copy_nums--;
1825 vcpu->arch.mmio_vmx_offset++;
1826 }
1827
1828 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1829 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1830 if (r == RESUME_HOST) {
1831 vcpu->mmio_needed = 1;
1832 goto out;
1833 }
1834 }
1835#endif
1836 } else if (vcpu->arch.osi_needed) {
1837 u64 *gprs = run->osi.gprs;
1838 int i;
1839
1840 for (i = 0; i < 32; i++)
1841 kvmppc_set_gpr(vcpu, i, gprs[i]);
1842 vcpu->arch.osi_needed = 0;
1843 } else if (vcpu->arch.hcall_needed) {
1844 int i;
1845
1846 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1847 for (i = 0; i < 9; ++i)
1848 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1849 vcpu->arch.hcall_needed = 0;
1850#ifdef CONFIG_BOOKE
1851 } else if (vcpu->arch.epr_needed) {
1852 kvmppc_set_epr(vcpu, run->epr.epr);
1853 vcpu->arch.epr_needed = 0;
1854#endif
1855 }
1856
1857 kvm_sigset_activate(vcpu);
1858
1859 if (run->immediate_exit)
1860 r = -EINTR;
1861 else
1862 r = kvmppc_vcpu_run(vcpu);
1863
1864 kvm_sigset_deactivate(vcpu);
1865
1866#ifdef CONFIG_ALTIVEC
1867out:
1868#endif
1869
1870 /*
1871 * We're already returning to userspace, don't pass the
1872 * RESUME_HOST flags along.
1873 */
1874 if (r > 0)
1875 r = 0;
1876
1877 vcpu_put(vcpu);
1878 return r;
1879}
1880
1881int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1882{
1883 if (irq->irq == KVM_INTERRUPT_UNSET) {
1884 kvmppc_core_dequeue_external(vcpu);
1885 return 0;
1886 }
1887
1888 kvmppc_core_queue_external(vcpu, irq);
1889
1890 kvm_vcpu_kick(vcpu);
1891
1892 return 0;
1893}
1894
1895static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1896 struct kvm_enable_cap *cap)
1897{
1898 int r;
1899
1900 if (cap->flags)
1901 return -EINVAL;
1902
1903 switch (cap->cap) {
1904 case KVM_CAP_PPC_OSI:
1905 r = 0;
1906 vcpu->arch.osi_enabled = true;
1907 break;
1908 case KVM_CAP_PPC_PAPR:
1909 r = 0;
1910 vcpu->arch.papr_enabled = true;
1911 break;
1912 case KVM_CAP_PPC_EPR:
1913 r = 0;
1914 if (cap->args[0])
1915 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1916 else
1917 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1918 break;
1919#ifdef CONFIG_BOOKE
1920 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1921 r = 0;
1922 vcpu->arch.watchdog_enabled = true;
1923 break;
1924#endif
1925#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1926 case KVM_CAP_SW_TLB: {
1927 struct kvm_config_tlb cfg;
1928 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1929
1930 r = -EFAULT;
1931 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1932 break;
1933
1934 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1935 break;
1936 }
1937#endif
1938#ifdef CONFIG_KVM_MPIC
1939 case KVM_CAP_IRQ_MPIC: {
1940 struct fd f;
1941 struct kvm_device *dev;
1942
1943 r = -EBADF;
1944 f = fdget(cap->args[0]);
1945 if (!f.file)
1946 break;
1947
1948 r = -EPERM;
1949 dev = kvm_device_from_filp(f.file);
1950 if (dev)
1951 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1952
1953 fdput(f);
1954 break;
1955 }
1956#endif
1957#ifdef CONFIG_KVM_XICS
1958 case KVM_CAP_IRQ_XICS: {
1959 struct fd f;
1960 struct kvm_device *dev;
1961
1962 r = -EBADF;
1963 f = fdget(cap->args[0]);
1964 if (!f.file)
1965 break;
1966
1967 r = -EPERM;
1968 dev = kvm_device_from_filp(f.file);
1969 if (dev) {
1970 if (xics_on_xive())
1971 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1972 else
1973 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1974 }
1975
1976 fdput(f);
1977 break;
1978 }
1979#endif /* CONFIG_KVM_XICS */
1980#ifdef CONFIG_KVM_XIVE
1981 case KVM_CAP_PPC_IRQ_XIVE: {
1982 struct fd f;
1983 struct kvm_device *dev;
1984
1985 r = -EBADF;
1986 f = fdget(cap->args[0]);
1987 if (!f.file)
1988 break;
1989
1990 r = -ENXIO;
1991 if (!xive_enabled())
1992 break;
1993
1994 r = -EPERM;
1995 dev = kvm_device_from_filp(f.file);
1996 if (dev)
1997 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1998 cap->args[1]);
1999
2000 fdput(f);
2001 break;
2002 }
2003#endif /* CONFIG_KVM_XIVE */
2004#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2005 case KVM_CAP_PPC_FWNMI:
2006 r = -EINVAL;
2007 if (!is_kvmppc_hv_enabled(vcpu->kvm))
2008 break;
2009 r = 0;
2010 vcpu->kvm->arch.fwnmi_enabled = true;
2011 break;
2012#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2013 default:
2014 r = -EINVAL;
2015 break;
2016 }
2017
2018 if (!r)
2019 r = kvmppc_sanity_check(vcpu);
2020
2021 return r;
2022}
2023
2024bool kvm_arch_intc_initialized(struct kvm *kvm)
2025{
2026#ifdef CONFIG_KVM_MPIC
2027 if (kvm->arch.mpic)
2028 return true;
2029#endif
2030#ifdef CONFIG_KVM_XICS
2031 if (kvm->arch.xics || kvm->arch.xive)
2032 return true;
2033#endif
2034 return false;
2035}
2036
2037int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2038 struct kvm_mp_state *mp_state)
2039{
2040 return -EINVAL;
2041}
2042
2043int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2044 struct kvm_mp_state *mp_state)
2045{
2046 return -EINVAL;
2047}
2048
2049long kvm_arch_vcpu_async_ioctl(struct file *filp,
2050 unsigned int ioctl, unsigned long arg)
2051{
2052 struct kvm_vcpu *vcpu = filp->private_data;
2053 void __user *argp = (void __user *)arg;
2054
2055 if (ioctl == KVM_INTERRUPT) {
2056 struct kvm_interrupt irq;
2057 if (copy_from_user(&irq, argp, sizeof(irq)))
2058 return -EFAULT;
2059 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2060 }
2061 return -ENOIOCTLCMD;
2062}
2063
2064long kvm_arch_vcpu_ioctl(struct file *filp,
2065 unsigned int ioctl, unsigned long arg)
2066{
2067 struct kvm_vcpu *vcpu = filp->private_data;
2068 void __user *argp = (void __user *)arg;
2069 long r;
2070
2071 switch (ioctl) {
2072 case KVM_ENABLE_CAP:
2073 {
2074 struct kvm_enable_cap cap;
2075 r = -EFAULT;
2076 if (copy_from_user(&cap, argp, sizeof(cap)))
2077 goto out;
2078 vcpu_load(vcpu);
2079 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2080 vcpu_put(vcpu);
2081 break;
2082 }
2083
2084 case KVM_SET_ONE_REG:
2085 case KVM_GET_ONE_REG:
2086 {
2087 struct kvm_one_reg reg;
2088 r = -EFAULT;
2089 if (copy_from_user(®, argp, sizeof(reg)))
2090 goto out;
2091 if (ioctl == KVM_SET_ONE_REG)
2092 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2093 else
2094 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2095 break;
2096 }
2097
2098#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2099 case KVM_DIRTY_TLB: {
2100 struct kvm_dirty_tlb dirty;
2101 r = -EFAULT;
2102 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2103 goto out;
2104 vcpu_load(vcpu);
2105 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2106 vcpu_put(vcpu);
2107 break;
2108 }
2109#endif
2110 default:
2111 r = -EINVAL;
2112 }
2113
2114out:
2115 return r;
2116}
2117
2118vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2119{
2120 return VM_FAULT_SIGBUS;
2121}
2122
2123static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2124{
2125 u32 inst_nop = 0x60000000;
2126#ifdef CONFIG_KVM_BOOKE_HV
2127 u32 inst_sc1 = 0x44000022;
2128 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2129 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2130 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2131 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2132#else
2133 u32 inst_lis = 0x3c000000;
2134 u32 inst_ori = 0x60000000;
2135 u32 inst_sc = 0x44000002;
2136 u32 inst_imm_mask = 0xffff;
2137
2138 /*
2139 * The hypercall to get into KVM from within guest context is as
2140 * follows:
2141 *
2142 * lis r0, r0, KVM_SC_MAGIC_R0@h
2143 * ori r0, KVM_SC_MAGIC_R0@l
2144 * sc
2145 * nop
2146 */
2147 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2148 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2149 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2150 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2151#endif
2152
2153 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2154
2155 return 0;
2156}
2157
2158bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2159{
2160 int ret = 0;
2161
2162#ifdef CONFIG_KVM_MPIC
2163 ret = ret || (kvm->arch.mpic != NULL);
2164#endif
2165#ifdef CONFIG_KVM_XICS
2166 ret = ret || (kvm->arch.xics != NULL);
2167 ret = ret || (kvm->arch.xive != NULL);
2168#endif
2169 smp_rmb();
2170 return ret;
2171}
2172
2173int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2174 bool line_status)
2175{
2176 if (!kvm_arch_irqchip_in_kernel(kvm))
2177 return -ENXIO;
2178
2179 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2180 irq_event->irq, irq_event->level,
2181 line_status);
2182 return 0;
2183}
2184
2185
2186int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2187 struct kvm_enable_cap *cap)
2188{
2189 int r;
2190
2191 if (cap->flags)
2192 return -EINVAL;
2193
2194 switch (cap->cap) {
2195#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2196 case KVM_CAP_PPC_ENABLE_HCALL: {
2197 unsigned long hcall = cap->args[0];
2198
2199 r = -EINVAL;
2200 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2201 cap->args[1] > 1)
2202 break;
2203 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2204 break;
2205 if (cap->args[1])
2206 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2207 else
2208 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2209 r = 0;
2210 break;
2211 }
2212 case KVM_CAP_PPC_SMT: {
2213 unsigned long mode = cap->args[0];
2214 unsigned long flags = cap->args[1];
2215
2216 r = -EINVAL;
2217 if (kvm->arch.kvm_ops->set_smt_mode)
2218 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2219 break;
2220 }
2221
2222 case KVM_CAP_PPC_NESTED_HV:
2223 r = -EINVAL;
2224 if (!is_kvmppc_hv_enabled(kvm) ||
2225 !kvm->arch.kvm_ops->enable_nested)
2226 break;
2227 r = kvm->arch.kvm_ops->enable_nested(kvm);
2228 break;
2229#endif
2230#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2231 case KVM_CAP_PPC_SECURE_GUEST:
2232 r = -EINVAL;
2233 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2234 break;
2235 r = kvm->arch.kvm_ops->enable_svm(kvm);
2236 break;
2237 case KVM_CAP_PPC_DAWR1:
2238 r = -EINVAL;
2239 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2240 break;
2241 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2242 break;
2243#endif
2244 default:
2245 r = -EINVAL;
2246 break;
2247 }
2248
2249 return r;
2250}
2251
2252#ifdef CONFIG_PPC_BOOK3S_64
2253/*
2254 * These functions check whether the underlying hardware is safe
2255 * against attacks based on observing the effects of speculatively
2256 * executed instructions, and whether it supplies instructions for
2257 * use in workarounds. The information comes from firmware, either
2258 * via the device tree on powernv platforms or from an hcall on
2259 * pseries platforms.
2260 */
2261#ifdef CONFIG_PPC_PSERIES
2262static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2263{
2264 struct h_cpu_char_result c;
2265 unsigned long rc;
2266
2267 if (!machine_is(pseries))
2268 return -ENOTTY;
2269
2270 rc = plpar_get_cpu_characteristics(&c);
2271 if (rc == H_SUCCESS) {
2272 cp->character = c.character;
2273 cp->behaviour = c.behaviour;
2274 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2275 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2276 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2277 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2278 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2279 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2280 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2281 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2282 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2283 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2284 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2285 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2286 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2287 }
2288 return 0;
2289}
2290#else
2291static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2292{
2293 return -ENOTTY;
2294}
2295#endif
2296
2297static inline bool have_fw_feat(struct device_node *fw_features,
2298 const char *state, const char *name)
2299{
2300 struct device_node *np;
2301 bool r = false;
2302
2303 np = of_get_child_by_name(fw_features, name);
2304 if (np) {
2305 r = of_property_read_bool(np, state);
2306 of_node_put(np);
2307 }
2308 return r;
2309}
2310
2311static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2312{
2313 struct device_node *np, *fw_features;
2314 int r;
2315
2316 memset(cp, 0, sizeof(*cp));
2317 r = pseries_get_cpu_char(cp);
2318 if (r != -ENOTTY)
2319 return r;
2320
2321 np = of_find_node_by_name(NULL, "ibm,opal");
2322 if (np) {
2323 fw_features = of_get_child_by_name(np, "fw-features");
2324 of_node_put(np);
2325 if (!fw_features)
2326 return 0;
2327 if (have_fw_feat(fw_features, "enabled",
2328 "inst-spec-barrier-ori31,31,0"))
2329 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2330 if (have_fw_feat(fw_features, "enabled",
2331 "fw-bcctrl-serialized"))
2332 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2333 if (have_fw_feat(fw_features, "enabled",
2334 "inst-l1d-flush-ori30,30,0"))
2335 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2336 if (have_fw_feat(fw_features, "enabled",
2337 "inst-l1d-flush-trig2"))
2338 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2339 if (have_fw_feat(fw_features, "enabled",
2340 "fw-l1d-thread-split"))
2341 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2342 if (have_fw_feat(fw_features, "enabled",
2343 "fw-count-cache-disabled"))
2344 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2345 if (have_fw_feat(fw_features, "enabled",
2346 "fw-count-cache-flush-bcctr2,0,0"))
2347 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2348 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2349 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2350 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2351 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2352 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2353 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2354 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2355
2356 if (have_fw_feat(fw_features, "enabled",
2357 "speculation-policy-favor-security"))
2358 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2359 if (!have_fw_feat(fw_features, "disabled",
2360 "needs-l1d-flush-msr-pr-0-to-1"))
2361 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2362 if (!have_fw_feat(fw_features, "disabled",
2363 "needs-spec-barrier-for-bound-checks"))
2364 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2365 if (have_fw_feat(fw_features, "enabled",
2366 "needs-count-cache-flush-on-context-switch"))
2367 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2368 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2369 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2370 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2371 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2372
2373 of_node_put(fw_features);
2374 }
2375
2376 return 0;
2377}
2378#endif
2379
2380long kvm_arch_vm_ioctl(struct file *filp,
2381 unsigned int ioctl, unsigned long arg)
2382{
2383 struct kvm *kvm __maybe_unused = filp->private_data;
2384 void __user *argp = (void __user *)arg;
2385 long r;
2386
2387 switch (ioctl) {
2388 case KVM_PPC_GET_PVINFO: {
2389 struct kvm_ppc_pvinfo pvinfo;
2390 memset(&pvinfo, 0, sizeof(pvinfo));
2391 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2392 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2393 r = -EFAULT;
2394 goto out;
2395 }
2396
2397 break;
2398 }
2399#ifdef CONFIG_SPAPR_TCE_IOMMU
2400 case KVM_CREATE_SPAPR_TCE_64: {
2401 struct kvm_create_spapr_tce_64 create_tce_64;
2402
2403 r = -EFAULT;
2404 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2405 goto out;
2406 if (create_tce_64.flags) {
2407 r = -EINVAL;
2408 goto out;
2409 }
2410 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2411 goto out;
2412 }
2413 case KVM_CREATE_SPAPR_TCE: {
2414 struct kvm_create_spapr_tce create_tce;
2415 struct kvm_create_spapr_tce_64 create_tce_64;
2416
2417 r = -EFAULT;
2418 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2419 goto out;
2420
2421 create_tce_64.liobn = create_tce.liobn;
2422 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2423 create_tce_64.offset = 0;
2424 create_tce_64.size = create_tce.window_size >>
2425 IOMMU_PAGE_SHIFT_4K;
2426 create_tce_64.flags = 0;
2427 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2428 goto out;
2429 }
2430#endif
2431#ifdef CONFIG_PPC_BOOK3S_64
2432 case KVM_PPC_GET_SMMU_INFO: {
2433 struct kvm_ppc_smmu_info info;
2434 struct kvm *kvm = filp->private_data;
2435
2436 memset(&info, 0, sizeof(info));
2437 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2438 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2439 r = -EFAULT;
2440 break;
2441 }
2442 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2443 struct kvm *kvm = filp->private_data;
2444
2445 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2446 break;
2447 }
2448 case KVM_PPC_CONFIGURE_V3_MMU: {
2449 struct kvm *kvm = filp->private_data;
2450 struct kvm_ppc_mmuv3_cfg cfg;
2451
2452 r = -EINVAL;
2453 if (!kvm->arch.kvm_ops->configure_mmu)
2454 goto out;
2455 r = -EFAULT;
2456 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2457 goto out;
2458 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2459 break;
2460 }
2461 case KVM_PPC_GET_RMMU_INFO: {
2462 struct kvm *kvm = filp->private_data;
2463 struct kvm_ppc_rmmu_info info;
2464
2465 r = -EINVAL;
2466 if (!kvm->arch.kvm_ops->get_rmmu_info)
2467 goto out;
2468 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2469 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2470 r = -EFAULT;
2471 break;
2472 }
2473 case KVM_PPC_GET_CPU_CHAR: {
2474 struct kvm_ppc_cpu_char cpuchar;
2475
2476 r = kvmppc_get_cpu_char(&cpuchar);
2477 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2478 r = -EFAULT;
2479 break;
2480 }
2481 case KVM_PPC_SVM_OFF: {
2482 struct kvm *kvm = filp->private_data;
2483
2484 r = 0;
2485 if (!kvm->arch.kvm_ops->svm_off)
2486 goto out;
2487
2488 r = kvm->arch.kvm_ops->svm_off(kvm);
2489 break;
2490 }
2491 default: {
2492 struct kvm *kvm = filp->private_data;
2493 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2494 }
2495#else /* CONFIG_PPC_BOOK3S_64 */
2496 default:
2497 r = -ENOTTY;
2498#endif
2499 }
2500out:
2501 return r;
2502}
2503
2504static DEFINE_IDA(lpid_inuse);
2505static unsigned long nr_lpids;
2506
2507long kvmppc_alloc_lpid(void)
2508{
2509 int lpid;
2510
2511 /* The host LPID must always be 0 (allocation starts at 1) */
2512 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2513 if (lpid < 0) {
2514 if (lpid == -ENOMEM)
2515 pr_err("%s: Out of memory\n", __func__);
2516 else
2517 pr_err("%s: No LPIDs free\n", __func__);
2518 return -ENOMEM;
2519 }
2520
2521 return lpid;
2522}
2523EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2524
2525void kvmppc_free_lpid(long lpid)
2526{
2527 ida_free(&lpid_inuse, lpid);
2528}
2529EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2530
2531/* nr_lpids_param includes the host LPID */
2532void kvmppc_init_lpid(unsigned long nr_lpids_param)
2533{
2534 nr_lpids = nr_lpids_param;
2535}
2536EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2537
2538EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2539
2540void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2541{
2542 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2543 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2544}
2545
2546int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2547{
2548 if (kvm->arch.kvm_ops->create_vm_debugfs)
2549 kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2550 return 0;
2551}