Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
13#include <linux/vmalloc.h>
14#include <linux/hrtimer.h>
15#include <linux/sched/signal.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/file.h>
19#include <linux/module.h>
20#include <linux/irqbypass.h>
21#include <linux/kvm_irqfd.h>
22#include <asm/cputable.h>
23#include <linux/uaccess.h>
24#include <asm/kvm_ppc.h>
25#include <asm/cputhreads.h>
26#include <asm/irqflags.h>
27#include <asm/iommu.h>
28#include <asm/switch_to.h>
29#include <asm/xive.h>
30#ifdef CONFIG_PPC_PSERIES
31#include <asm/hvcall.h>
32#include <asm/plpar_wrappers.h>
33#endif
34#include <asm/ultravisor.h>
35
36#include "timing.h"
37#include "irq.h"
38#include "../mm/mmu_decl.h"
39
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
48
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
51 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
52}
53
54bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
55{
56 return kvm_arch_vcpu_runnable(vcpu);
57}
58
59bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
60{
61 return false;
62}
63
64int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
65{
66 return 1;
67}
68
69/*
70 * Common checks before entering the guest world. Call with interrupts
71 * disabled.
72 *
73 * returns:
74 *
75 * == 1 if we're ready to go into guest state
76 * <= 0 if we need to go back to the host with return value
77 */
78int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
79{
80 int r;
81
82 WARN_ON(irqs_disabled());
83 hard_irq_disable();
84
85 while (true) {
86 if (need_resched()) {
87 local_irq_enable();
88 cond_resched();
89 hard_irq_disable();
90 continue;
91 }
92
93 if (signal_pending(current)) {
94 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
95 vcpu->run->exit_reason = KVM_EXIT_INTR;
96 r = -EINTR;
97 break;
98 }
99
100 vcpu->mode = IN_GUEST_MODE;
101
102 /*
103 * Reading vcpu->requests must happen after setting vcpu->mode,
104 * so we don't miss a request because the requester sees
105 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
106 * before next entering the guest (and thus doesn't IPI).
107 * This also orders the write to mode from any reads
108 * to the page tables done while the VCPU is running.
109 * Please see the comment in kvm_flush_remote_tlbs.
110 */
111 smp_mb();
112
113 if (kvm_request_pending(vcpu)) {
114 /* Make sure we process requests preemptable */
115 local_irq_enable();
116 trace_kvm_check_requests(vcpu);
117 r = kvmppc_core_check_requests(vcpu);
118 hard_irq_disable();
119 if (r > 0)
120 continue;
121 break;
122 }
123
124 if (kvmppc_core_prepare_to_enter(vcpu)) {
125 /* interrupts got enabled in between, so we
126 are back at square 1 */
127 continue;
128 }
129
130 guest_enter_irqoff();
131 return 1;
132 }
133
134 /* return to host */
135 local_irq_enable();
136 return r;
137}
138EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
139
140#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
141static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
142{
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
144 int i;
145
146 shared->sprg0 = swab64(shared->sprg0);
147 shared->sprg1 = swab64(shared->sprg1);
148 shared->sprg2 = swab64(shared->sprg2);
149 shared->sprg3 = swab64(shared->sprg3);
150 shared->srr0 = swab64(shared->srr0);
151 shared->srr1 = swab64(shared->srr1);
152 shared->dar = swab64(shared->dar);
153 shared->msr = swab64(shared->msr);
154 shared->dsisr = swab32(shared->dsisr);
155 shared->int_pending = swab32(shared->int_pending);
156 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
157 shared->sr[i] = swab32(shared->sr[i]);
158}
159#endif
160
161int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
162{
163 int nr = kvmppc_get_gpr(vcpu, 11);
164 int r;
165 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
166 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
167 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
168 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
169 unsigned long r2 = 0;
170
171 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
172 /* 32 bit mode */
173 param1 &= 0xffffffff;
174 param2 &= 0xffffffff;
175 param3 &= 0xffffffff;
176 param4 &= 0xffffffff;
177 }
178
179 switch (nr) {
180 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
181 {
182#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
183 /* Book3S can be little endian, find it out here */
184 int shared_big_endian = true;
185 if (vcpu->arch.intr_msr & MSR_LE)
186 shared_big_endian = false;
187 if (shared_big_endian != vcpu->arch.shared_big_endian)
188 kvmppc_swab_shared(vcpu);
189 vcpu->arch.shared_big_endian = shared_big_endian;
190#endif
191
192 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
193 /*
194 * Older versions of the Linux magic page code had
195 * a bug where they would map their trampoline code
196 * NX. If that's the case, remove !PR NX capability.
197 */
198 vcpu->arch.disable_kernel_nx = true;
199 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
200 }
201
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
204
205#ifdef CONFIG_PPC_64K_PAGES
206 /*
207 * Make sure our 4k magic page is in the same window of a 64k
208 * page within the guest and within the host's page.
209 */
210 if ((vcpu->arch.magic_page_pa & 0xf000) !=
211 ((ulong)vcpu->arch.shared & 0xf000)) {
212 void *old_shared = vcpu->arch.shared;
213 ulong shared = (ulong)vcpu->arch.shared;
214 void *new_shared;
215
216 shared &= PAGE_MASK;
217 shared |= vcpu->arch.magic_page_pa & 0xf000;
218 new_shared = (void*)shared;
219 memcpy(new_shared, old_shared, 0x1000);
220 vcpu->arch.shared = new_shared;
221 }
222#endif
223
224 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
225
226 r = EV_SUCCESS;
227 break;
228 }
229 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
230 r = EV_SUCCESS;
231#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
232 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
233#endif
234
235 /* Second return value is in r4 */
236 break;
237 case EV_HCALL_TOKEN(EV_IDLE):
238 r = EV_SUCCESS;
239 kvm_vcpu_block(vcpu);
240 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
241 break;
242 default:
243 r = EV_UNIMPLEMENTED;
244 break;
245 }
246
247 kvmppc_set_gpr(vcpu, 4, r2);
248
249 return r;
250}
251EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252
253int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254{
255 int r = false;
256
257 /* We have to know what CPU to virtualize */
258 if (!vcpu->arch.pvr)
259 goto out;
260
261 /* PAPR only works with book3s_64 */
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263 goto out;
264
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267 goto out;
268
269#ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV))
271 goto out;
272#endif
273
274 r = true;
275
276out:
277 vcpu->arch.sane = r;
278 return r ? 0 : -EINVAL;
279}
280EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281
282int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283{
284 enum emulation_result er;
285 int r;
286
287 er = kvmppc_emulate_loadstore(vcpu);
288 switch (er) {
289 case EMULATE_DONE:
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
292 r = RESUME_GUEST_NV;
293 break;
294 case EMULATE_AGAIN:
295 r = RESUME_GUEST;
296 break;
297 case EMULATE_DO_MMIO:
298 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
303 r = RESUME_HOST_NV;
304 break;
305 case EMULATE_FAIL:
306 {
307 u32 last_inst;
308
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310 /* XXX Deliver Program interrupt to guest. */
311 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
312 r = RESUME_HOST;
313 break;
314 }
315 default:
316 WARN_ON(1);
317 r = RESUME_GUEST;
318 }
319
320 return r;
321}
322EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
323
324int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
325 bool data)
326{
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
328 struct kvmppc_pte pte;
329 int r = -EINVAL;
330
331 vcpu->stat.st++;
332
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
335 size);
336
337 if ((!r) || (r == -EAGAIN))
338 return r;
339
340 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
341 XLATE_WRITE, &pte);
342 if (r < 0)
343 return r;
344
345 *eaddr = pte.raddr;
346
347 if (!pte.may_write)
348 return -EPERM;
349
350 /* Magic page override */
351 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
352 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
353 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
354 void *magic = vcpu->arch.shared;
355 magic += pte.eaddr & 0xfff;
356 memcpy(magic, ptr, size);
357 return EMULATE_DONE;
358 }
359
360 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
361 return EMULATE_DO_MMIO;
362
363 return EMULATE_DONE;
364}
365EXPORT_SYMBOL_GPL(kvmppc_st);
366
367int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
368 bool data)
369{
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
371 struct kvmppc_pte pte;
372 int rc = -EINVAL;
373
374 vcpu->stat.ld++;
375
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
378 size);
379
380 if ((!rc) || (rc == -EAGAIN))
381 return rc;
382
383 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
384 XLATE_READ, &pte);
385 if (rc)
386 return rc;
387
388 *eaddr = pte.raddr;
389
390 if (!pte.may_read)
391 return -EPERM;
392
393 if (!data && !pte.may_execute)
394 return -ENOEXEC;
395
396 /* Magic page override */
397 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
398 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
399 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
400 void *magic = vcpu->arch.shared;
401 magic += pte.eaddr & 0xfff;
402 memcpy(ptr, magic, size);
403 return EMULATE_DONE;
404 }
405
406 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
407 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
408 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
409 if (rc)
410 return EMULATE_DO_MMIO;
411
412 return EMULATE_DONE;
413}
414EXPORT_SYMBOL_GPL(kvmppc_ld);
415
416int kvm_arch_hardware_enable(void)
417{
418 return 0;
419}
420
421int kvm_arch_hardware_setup(void *opaque)
422{
423 return 0;
424}
425
426int kvm_arch_check_processor_compat(void *opaque)
427{
428 return kvmppc_core_check_processor_compat();
429}
430
431int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
432{
433 struct kvmppc_ops *kvm_ops = NULL;
434 /*
435 * if we have both HV and PR enabled, default is HV
436 */
437 if (type == 0) {
438 if (kvmppc_hv_ops)
439 kvm_ops = kvmppc_hv_ops;
440 else
441 kvm_ops = kvmppc_pr_ops;
442 if (!kvm_ops)
443 goto err_out;
444 } else if (type == KVM_VM_PPC_HV) {
445 if (!kvmppc_hv_ops)
446 goto err_out;
447 kvm_ops = kvmppc_hv_ops;
448 } else if (type == KVM_VM_PPC_PR) {
449 if (!kvmppc_pr_ops)
450 goto err_out;
451 kvm_ops = kvmppc_pr_ops;
452 } else
453 goto err_out;
454
455 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
456 return -ENOENT;
457
458 kvm->arch.kvm_ops = kvm_ops;
459 return kvmppc_core_init_vm(kvm);
460err_out:
461 return -EINVAL;
462}
463
464void kvm_arch_destroy_vm(struct kvm *kvm)
465{
466 unsigned int i;
467 struct kvm_vcpu *vcpu;
468
469#ifdef CONFIG_KVM_XICS
470 /*
471 * We call kick_all_cpus_sync() to ensure that all
472 * CPUs have executed any pending IPIs before we
473 * continue and free VCPUs structures below.
474 */
475 if (is_kvmppc_hv_enabled(kvm))
476 kick_all_cpus_sync();
477#endif
478
479 kvm_for_each_vcpu(i, vcpu, kvm)
480 kvm_vcpu_destroy(vcpu);
481
482 mutex_lock(&kvm->lock);
483 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
484 kvm->vcpus[i] = NULL;
485
486 atomic_set(&kvm->online_vcpus, 0);
487
488 kvmppc_core_destroy_vm(kvm);
489
490 mutex_unlock(&kvm->lock);
491
492 /* drop the module reference */
493 module_put(kvm->arch.kvm_ops->owner);
494}
495
496int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
497{
498 int r;
499 /* Assume we're using HV mode when the HV module is loaded */
500 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
501
502 if (kvm) {
503 /*
504 * Hooray - we know which VM type we're running on. Depend on
505 * that rather than the guess above.
506 */
507 hv_enabled = is_kvmppc_hv_enabled(kvm);
508 }
509
510 switch (ext) {
511#ifdef CONFIG_BOOKE
512 case KVM_CAP_PPC_BOOKE_SREGS:
513 case KVM_CAP_PPC_BOOKE_WATCHDOG:
514 case KVM_CAP_PPC_EPR:
515#else
516 case KVM_CAP_PPC_SEGSTATE:
517 case KVM_CAP_PPC_HIOR:
518 case KVM_CAP_PPC_PAPR:
519#endif
520 case KVM_CAP_PPC_UNSET_IRQ:
521 case KVM_CAP_PPC_IRQ_LEVEL:
522 case KVM_CAP_ENABLE_CAP:
523 case KVM_CAP_ONE_REG:
524 case KVM_CAP_IOEVENTFD:
525 case KVM_CAP_DEVICE_CTRL:
526 case KVM_CAP_IMMEDIATE_EXIT:
527 case KVM_CAP_SET_GUEST_DEBUG:
528 r = 1;
529 break;
530 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
531 case KVM_CAP_PPC_PAIRED_SINGLES:
532 case KVM_CAP_PPC_OSI:
533 case KVM_CAP_PPC_GET_PVINFO:
534#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
535 case KVM_CAP_SW_TLB:
536#endif
537 /* We support this only for PR */
538 r = !hv_enabled;
539 break;
540#ifdef CONFIG_KVM_MPIC
541 case KVM_CAP_IRQ_MPIC:
542 r = 1;
543 break;
544#endif
545
546#ifdef CONFIG_PPC_BOOK3S_64
547 case KVM_CAP_SPAPR_TCE:
548 case KVM_CAP_SPAPR_TCE_64:
549 r = 1;
550 break;
551 case KVM_CAP_SPAPR_TCE_VFIO:
552 r = !!cpu_has_feature(CPU_FTR_HVMODE);
553 break;
554 case KVM_CAP_PPC_RTAS:
555 case KVM_CAP_PPC_FIXUP_HCALL:
556 case KVM_CAP_PPC_ENABLE_HCALL:
557#ifdef CONFIG_KVM_XICS
558 case KVM_CAP_IRQ_XICS:
559#endif
560 case KVM_CAP_PPC_GET_CPU_CHAR:
561 r = 1;
562 break;
563#ifdef CONFIG_KVM_XIVE
564 case KVM_CAP_PPC_IRQ_XIVE:
565 /*
566 * We need XIVE to be enabled on the platform (implies
567 * a POWER9 processor) and the PowerNV platform, as
568 * nested is not yet supported.
569 */
570 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
571 kvmppc_xive_native_supported();
572 break;
573#endif
574
575 case KVM_CAP_PPC_ALLOC_HTAB:
576 r = hv_enabled;
577 break;
578#endif /* CONFIG_PPC_BOOK3S_64 */
579#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
580 case KVM_CAP_PPC_SMT:
581 r = 0;
582 if (kvm) {
583 if (kvm->arch.emul_smt_mode > 1)
584 r = kvm->arch.emul_smt_mode;
585 else
586 r = kvm->arch.smt_mode;
587 } else if (hv_enabled) {
588 if (cpu_has_feature(CPU_FTR_ARCH_300))
589 r = 1;
590 else
591 r = threads_per_subcore;
592 }
593 break;
594 case KVM_CAP_PPC_SMT_POSSIBLE:
595 r = 1;
596 if (hv_enabled) {
597 if (!cpu_has_feature(CPU_FTR_ARCH_300))
598 r = ((threads_per_subcore << 1) - 1);
599 else
600 /* P9 can emulate dbells, so allow any mode */
601 r = 8 | 4 | 2 | 1;
602 }
603 break;
604 case KVM_CAP_PPC_RMA:
605 r = 0;
606 break;
607 case KVM_CAP_PPC_HWRNG:
608 r = kvmppc_hwrng_present();
609 break;
610 case KVM_CAP_PPC_MMU_RADIX:
611 r = !!(hv_enabled && radix_enabled());
612 break;
613 case KVM_CAP_PPC_MMU_HASH_V3:
614 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
615 kvmppc_hv_ops->hash_v3_possible());
616 break;
617 case KVM_CAP_PPC_NESTED_HV:
618 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
619 !kvmppc_hv_ops->enable_nested(NULL));
620 break;
621#endif
622 case KVM_CAP_SYNC_MMU:
623#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
624 r = hv_enabled;
625#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
626 r = 1;
627#else
628 r = 0;
629#endif
630 break;
631#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
632 case KVM_CAP_PPC_HTAB_FD:
633 r = hv_enabled;
634 break;
635#endif
636 case KVM_CAP_NR_VCPUS:
637 /*
638 * Recommending a number of CPUs is somewhat arbitrary; we
639 * return the number of present CPUs for -HV (since a host
640 * will have secondary threads "offline"), and for other KVM
641 * implementations just count online CPUs.
642 */
643 if (hv_enabled)
644 r = num_present_cpus();
645 else
646 r = num_online_cpus();
647 break;
648 case KVM_CAP_MAX_VCPUS:
649 r = KVM_MAX_VCPUS;
650 break;
651 case KVM_CAP_MAX_VCPU_ID:
652 r = KVM_MAX_VCPU_ID;
653 break;
654#ifdef CONFIG_PPC_BOOK3S_64
655 case KVM_CAP_PPC_GET_SMMU_INFO:
656 r = 1;
657 break;
658 case KVM_CAP_SPAPR_MULTITCE:
659 r = 1;
660 break;
661 case KVM_CAP_SPAPR_RESIZE_HPT:
662 r = !!hv_enabled;
663 break;
664#endif
665#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
666 case KVM_CAP_PPC_FWNMI:
667 r = hv_enabled;
668 break;
669#endif
670#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
671 case KVM_CAP_PPC_HTM:
672 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
673 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
674 break;
675#endif
676#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
677 case KVM_CAP_PPC_SECURE_GUEST:
678 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
679 !kvmppc_hv_ops->enable_svm(NULL);
680 break;
681 case KVM_CAP_PPC_DAWR1:
682 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
683 !kvmppc_hv_ops->enable_dawr1(NULL));
684 break;
685#endif
686 default:
687 r = 0;
688 break;
689 }
690 return r;
691
692}
693
694long kvm_arch_dev_ioctl(struct file *filp,
695 unsigned int ioctl, unsigned long arg)
696{
697 return -EINVAL;
698}
699
700void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
701{
702 kvmppc_core_free_memslot(kvm, slot);
703}
704
705int kvm_arch_prepare_memory_region(struct kvm *kvm,
706 struct kvm_memory_slot *memslot,
707 const struct kvm_userspace_memory_region *mem,
708 enum kvm_mr_change change)
709{
710 return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
711}
712
713void kvm_arch_commit_memory_region(struct kvm *kvm,
714 const struct kvm_userspace_memory_region *mem,
715 struct kvm_memory_slot *old,
716 const struct kvm_memory_slot *new,
717 enum kvm_mr_change change)
718{
719 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
720}
721
722void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
723 struct kvm_memory_slot *slot)
724{
725 kvmppc_core_flush_memslot(kvm, slot);
726}
727
728int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
729{
730 return 0;
731}
732
733static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
734{
735 struct kvm_vcpu *vcpu;
736
737 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
738 kvmppc_decrementer_func(vcpu);
739
740 return HRTIMER_NORESTART;
741}
742
743int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
744{
745 int err;
746
747 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
748 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
749 vcpu->arch.dec_expires = get_tb();
750
751#ifdef CONFIG_KVM_EXIT_TIMING
752 mutex_init(&vcpu->arch.exit_timing_lock);
753#endif
754 err = kvmppc_subarch_vcpu_init(vcpu);
755 if (err)
756 return err;
757
758 err = kvmppc_core_vcpu_create(vcpu);
759 if (err)
760 goto out_vcpu_uninit;
761
762 vcpu->arch.waitp = &vcpu->wait;
763 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
764 return 0;
765
766out_vcpu_uninit:
767 kvmppc_subarch_vcpu_uninit(vcpu);
768 return err;
769}
770
771void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
772{
773}
774
775void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
776{
777 /* Make sure we're not using the vcpu anymore */
778 hrtimer_cancel(&vcpu->arch.dec_timer);
779
780 kvmppc_remove_vcpu_debugfs(vcpu);
781
782 switch (vcpu->arch.irq_type) {
783 case KVMPPC_IRQ_MPIC:
784 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
785 break;
786 case KVMPPC_IRQ_XICS:
787 if (xics_on_xive())
788 kvmppc_xive_cleanup_vcpu(vcpu);
789 else
790 kvmppc_xics_free_icp(vcpu);
791 break;
792 case KVMPPC_IRQ_XIVE:
793 kvmppc_xive_native_cleanup_vcpu(vcpu);
794 break;
795 }
796
797 kvmppc_core_vcpu_free(vcpu);
798
799 kvmppc_subarch_vcpu_uninit(vcpu);
800}
801
802int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
803{
804 return kvmppc_core_pending_dec(vcpu);
805}
806
807void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
808{
809#ifdef CONFIG_BOOKE
810 /*
811 * vrsave (formerly usprg0) isn't used by Linux, but may
812 * be used by the guest.
813 *
814 * On non-booke this is associated with Altivec and
815 * is handled by code in book3s.c.
816 */
817 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
818#endif
819 kvmppc_core_vcpu_load(vcpu, cpu);
820}
821
822void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
823{
824 kvmppc_core_vcpu_put(vcpu);
825#ifdef CONFIG_BOOKE
826 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
827#endif
828}
829
830/*
831 * irq_bypass_add_producer and irq_bypass_del_producer are only
832 * useful if the architecture supports PCI passthrough.
833 * irq_bypass_stop and irq_bypass_start are not needed and so
834 * kvm_ops are not defined for them.
835 */
836bool kvm_arch_has_irq_bypass(void)
837{
838 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
839 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
840}
841
842int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
843 struct irq_bypass_producer *prod)
844{
845 struct kvm_kernel_irqfd *irqfd =
846 container_of(cons, struct kvm_kernel_irqfd, consumer);
847 struct kvm *kvm = irqfd->kvm;
848
849 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
850 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
851
852 return 0;
853}
854
855void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
856 struct irq_bypass_producer *prod)
857{
858 struct kvm_kernel_irqfd *irqfd =
859 container_of(cons, struct kvm_kernel_irqfd, consumer);
860 struct kvm *kvm = irqfd->kvm;
861
862 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
863 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
864}
865
866#ifdef CONFIG_VSX
867static inline int kvmppc_get_vsr_dword_offset(int index)
868{
869 int offset;
870
871 if ((index != 0) && (index != 1))
872 return -1;
873
874#ifdef __BIG_ENDIAN
875 offset = index;
876#else
877 offset = 1 - index;
878#endif
879
880 return offset;
881}
882
883static inline int kvmppc_get_vsr_word_offset(int index)
884{
885 int offset;
886
887 if ((index > 3) || (index < 0))
888 return -1;
889
890#ifdef __BIG_ENDIAN
891 offset = index;
892#else
893 offset = 3 - index;
894#endif
895 return offset;
896}
897
898static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
899 u64 gpr)
900{
901 union kvmppc_one_reg val;
902 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
903 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
904
905 if (offset == -1)
906 return;
907
908 if (index >= 32) {
909 val.vval = VCPU_VSX_VR(vcpu, index - 32);
910 val.vsxval[offset] = gpr;
911 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
912 } else {
913 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
914 }
915}
916
917static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
918 u64 gpr)
919{
920 union kvmppc_one_reg val;
921 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
922
923 if (index >= 32) {
924 val.vval = VCPU_VSX_VR(vcpu, index - 32);
925 val.vsxval[0] = gpr;
926 val.vsxval[1] = gpr;
927 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
928 } else {
929 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
930 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
931 }
932}
933
934static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
935 u32 gpr)
936{
937 union kvmppc_one_reg val;
938 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
939
940 if (index >= 32) {
941 val.vsx32val[0] = gpr;
942 val.vsx32val[1] = gpr;
943 val.vsx32val[2] = gpr;
944 val.vsx32val[3] = gpr;
945 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
946 } else {
947 val.vsx32val[0] = gpr;
948 val.vsx32val[1] = gpr;
949 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
950 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
951 }
952}
953
954static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
955 u32 gpr32)
956{
957 union kvmppc_one_reg val;
958 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
959 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
960 int dword_offset, word_offset;
961
962 if (offset == -1)
963 return;
964
965 if (index >= 32) {
966 val.vval = VCPU_VSX_VR(vcpu, index - 32);
967 val.vsx32val[offset] = gpr32;
968 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
969 } else {
970 dword_offset = offset / 2;
971 word_offset = offset % 2;
972 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
973 val.vsx32val[word_offset] = gpr32;
974 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
975 }
976}
977#endif /* CONFIG_VSX */
978
979#ifdef CONFIG_ALTIVEC
980static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
981 int index, int element_size)
982{
983 int offset;
984 int elts = sizeof(vector128)/element_size;
985
986 if ((index < 0) || (index >= elts))
987 return -1;
988
989 if (kvmppc_need_byteswap(vcpu))
990 offset = elts - index - 1;
991 else
992 offset = index;
993
994 return offset;
995}
996
997static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
998 int index)
999{
1000 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1001}
1002
1003static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1004 int index)
1005{
1006 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1007}
1008
1009static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1010 int index)
1011{
1012 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1013}
1014
1015static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1016 int index)
1017{
1018 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1019}
1020
1021
1022static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1023 u64 gpr)
1024{
1025 union kvmppc_one_reg val;
1026 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1027 vcpu->arch.mmio_vmx_offset);
1028 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1029
1030 if (offset == -1)
1031 return;
1032
1033 val.vval = VCPU_VSX_VR(vcpu, index);
1034 val.vsxval[offset] = gpr;
1035 VCPU_VSX_VR(vcpu, index) = val.vval;
1036}
1037
1038static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1039 u32 gpr32)
1040{
1041 union kvmppc_one_reg val;
1042 int offset = kvmppc_get_vmx_word_offset(vcpu,
1043 vcpu->arch.mmio_vmx_offset);
1044 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1045
1046 if (offset == -1)
1047 return;
1048
1049 val.vval = VCPU_VSX_VR(vcpu, index);
1050 val.vsx32val[offset] = gpr32;
1051 VCPU_VSX_VR(vcpu, index) = val.vval;
1052}
1053
1054static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1055 u16 gpr16)
1056{
1057 union kvmppc_one_reg val;
1058 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1059 vcpu->arch.mmio_vmx_offset);
1060 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1061
1062 if (offset == -1)
1063 return;
1064
1065 val.vval = VCPU_VSX_VR(vcpu, index);
1066 val.vsx16val[offset] = gpr16;
1067 VCPU_VSX_VR(vcpu, index) = val.vval;
1068}
1069
1070static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1071 u8 gpr8)
1072{
1073 union kvmppc_one_reg val;
1074 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1075 vcpu->arch.mmio_vmx_offset);
1076 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1077
1078 if (offset == -1)
1079 return;
1080
1081 val.vval = VCPU_VSX_VR(vcpu, index);
1082 val.vsx8val[offset] = gpr8;
1083 VCPU_VSX_VR(vcpu, index) = val.vval;
1084}
1085#endif /* CONFIG_ALTIVEC */
1086
1087#ifdef CONFIG_PPC_FPU
1088static inline u64 sp_to_dp(u32 fprs)
1089{
1090 u64 fprd;
1091
1092 preempt_disable();
1093 enable_kernel_fp();
1094 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m"UPD_CONSTR (fprd) : "m"UPD_CONSTR (fprs)
1095 : "fr0");
1096 preempt_enable();
1097 return fprd;
1098}
1099
1100static inline u32 dp_to_sp(u64 fprd)
1101{
1102 u32 fprs;
1103
1104 preempt_disable();
1105 enable_kernel_fp();
1106 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m"UPD_CONSTR (fprs) : "m"UPD_CONSTR (fprd)
1107 : "fr0");
1108 preempt_enable();
1109 return fprs;
1110}
1111
1112#else
1113#define sp_to_dp(x) (x)
1114#define dp_to_sp(x) (x)
1115#endif /* CONFIG_PPC_FPU */
1116
1117static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1118{
1119 struct kvm_run *run = vcpu->run;
1120 u64 gpr;
1121
1122 if (run->mmio.len > sizeof(gpr)) {
1123 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1124 return;
1125 }
1126
1127 if (!vcpu->arch.mmio_host_swabbed) {
1128 switch (run->mmio.len) {
1129 case 8: gpr = *(u64 *)run->mmio.data; break;
1130 case 4: gpr = *(u32 *)run->mmio.data; break;
1131 case 2: gpr = *(u16 *)run->mmio.data; break;
1132 case 1: gpr = *(u8 *)run->mmio.data; break;
1133 }
1134 } else {
1135 switch (run->mmio.len) {
1136 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1137 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1138 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1139 case 1: gpr = *(u8 *)run->mmio.data; break;
1140 }
1141 }
1142
1143 /* conversion between single and double precision */
1144 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1145 gpr = sp_to_dp(gpr);
1146
1147 if (vcpu->arch.mmio_sign_extend) {
1148 switch (run->mmio.len) {
1149#ifdef CONFIG_PPC64
1150 case 4:
1151 gpr = (s64)(s32)gpr;
1152 break;
1153#endif
1154 case 2:
1155 gpr = (s64)(s16)gpr;
1156 break;
1157 case 1:
1158 gpr = (s64)(s8)gpr;
1159 break;
1160 }
1161 }
1162
1163 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1164 case KVM_MMIO_REG_GPR:
1165 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1166 break;
1167 case KVM_MMIO_REG_FPR:
1168 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1169 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1170
1171 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1172 break;
1173#ifdef CONFIG_PPC_BOOK3S
1174 case KVM_MMIO_REG_QPR:
1175 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1176 break;
1177 case KVM_MMIO_REG_FQPR:
1178 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1179 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1180 break;
1181#endif
1182#ifdef CONFIG_VSX
1183 case KVM_MMIO_REG_VSX:
1184 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1185 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1186
1187 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1188 kvmppc_set_vsr_dword(vcpu, gpr);
1189 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1190 kvmppc_set_vsr_word(vcpu, gpr);
1191 else if (vcpu->arch.mmio_copy_type ==
1192 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1193 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1194 else if (vcpu->arch.mmio_copy_type ==
1195 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1196 kvmppc_set_vsr_word_dump(vcpu, gpr);
1197 break;
1198#endif
1199#ifdef CONFIG_ALTIVEC
1200 case KVM_MMIO_REG_VMX:
1201 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1202 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1203
1204 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1205 kvmppc_set_vmx_dword(vcpu, gpr);
1206 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1207 kvmppc_set_vmx_word(vcpu, gpr);
1208 else if (vcpu->arch.mmio_copy_type ==
1209 KVMPPC_VMX_COPY_HWORD)
1210 kvmppc_set_vmx_hword(vcpu, gpr);
1211 else if (vcpu->arch.mmio_copy_type ==
1212 KVMPPC_VMX_COPY_BYTE)
1213 kvmppc_set_vmx_byte(vcpu, gpr);
1214 break;
1215#endif
1216#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1217 case KVM_MMIO_REG_NESTED_GPR:
1218 if (kvmppc_need_byteswap(vcpu))
1219 gpr = swab64(gpr);
1220 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1221 sizeof(gpr));
1222 break;
1223#endif
1224 default:
1225 BUG();
1226 }
1227}
1228
1229static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1230 unsigned int rt, unsigned int bytes,
1231 int is_default_endian, int sign_extend)
1232{
1233 struct kvm_run *run = vcpu->run;
1234 int idx, ret;
1235 bool host_swabbed;
1236
1237 /* Pity C doesn't have a logical XOR operator */
1238 if (kvmppc_need_byteswap(vcpu)) {
1239 host_swabbed = is_default_endian;
1240 } else {
1241 host_swabbed = !is_default_endian;
1242 }
1243
1244 if (bytes > sizeof(run->mmio.data)) {
1245 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1246 run->mmio.len);
1247 }
1248
1249 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1250 run->mmio.len = bytes;
1251 run->mmio.is_write = 0;
1252
1253 vcpu->arch.io_gpr = rt;
1254 vcpu->arch.mmio_host_swabbed = host_swabbed;
1255 vcpu->mmio_needed = 1;
1256 vcpu->mmio_is_write = 0;
1257 vcpu->arch.mmio_sign_extend = sign_extend;
1258
1259 idx = srcu_read_lock(&vcpu->kvm->srcu);
1260
1261 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1262 bytes, &run->mmio.data);
1263
1264 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1265
1266 if (!ret) {
1267 kvmppc_complete_mmio_load(vcpu);
1268 vcpu->mmio_needed = 0;
1269 return EMULATE_DONE;
1270 }
1271
1272 return EMULATE_DO_MMIO;
1273}
1274
1275int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1276 unsigned int rt, unsigned int bytes,
1277 int is_default_endian)
1278{
1279 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1280}
1281EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1282
1283/* Same as above, but sign extends */
1284int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1285 unsigned int rt, unsigned int bytes,
1286 int is_default_endian)
1287{
1288 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1289}
1290
1291#ifdef CONFIG_VSX
1292int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1293 unsigned int rt, unsigned int bytes,
1294 int is_default_endian, int mmio_sign_extend)
1295{
1296 enum emulation_result emulated = EMULATE_DONE;
1297
1298 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1299 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1300 return EMULATE_FAIL;
1301
1302 while (vcpu->arch.mmio_vsx_copy_nums) {
1303 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1304 is_default_endian, mmio_sign_extend);
1305
1306 if (emulated != EMULATE_DONE)
1307 break;
1308
1309 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1310
1311 vcpu->arch.mmio_vsx_copy_nums--;
1312 vcpu->arch.mmio_vsx_offset++;
1313 }
1314 return emulated;
1315}
1316#endif /* CONFIG_VSX */
1317
1318int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1319 u64 val, unsigned int bytes, int is_default_endian)
1320{
1321 struct kvm_run *run = vcpu->run;
1322 void *data = run->mmio.data;
1323 int idx, ret;
1324 bool host_swabbed;
1325
1326 /* Pity C doesn't have a logical XOR operator */
1327 if (kvmppc_need_byteswap(vcpu)) {
1328 host_swabbed = is_default_endian;
1329 } else {
1330 host_swabbed = !is_default_endian;
1331 }
1332
1333 if (bytes > sizeof(run->mmio.data)) {
1334 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1335 run->mmio.len);
1336 }
1337
1338 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1339 run->mmio.len = bytes;
1340 run->mmio.is_write = 1;
1341 vcpu->mmio_needed = 1;
1342 vcpu->mmio_is_write = 1;
1343
1344 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1345 val = dp_to_sp(val);
1346
1347 /* Store the value at the lowest bytes in 'data'. */
1348 if (!host_swabbed) {
1349 switch (bytes) {
1350 case 8: *(u64 *)data = val; break;
1351 case 4: *(u32 *)data = val; break;
1352 case 2: *(u16 *)data = val; break;
1353 case 1: *(u8 *)data = val; break;
1354 }
1355 } else {
1356 switch (bytes) {
1357 case 8: *(u64 *)data = swab64(val); break;
1358 case 4: *(u32 *)data = swab32(val); break;
1359 case 2: *(u16 *)data = swab16(val); break;
1360 case 1: *(u8 *)data = val; break;
1361 }
1362 }
1363
1364 idx = srcu_read_lock(&vcpu->kvm->srcu);
1365
1366 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1367 bytes, &run->mmio.data);
1368
1369 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1370
1371 if (!ret) {
1372 vcpu->mmio_needed = 0;
1373 return EMULATE_DONE;
1374 }
1375
1376 return EMULATE_DO_MMIO;
1377}
1378EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1379
1380#ifdef CONFIG_VSX
1381static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1382{
1383 u32 dword_offset, word_offset;
1384 union kvmppc_one_reg reg;
1385 int vsx_offset = 0;
1386 int copy_type = vcpu->arch.mmio_copy_type;
1387 int result = 0;
1388
1389 switch (copy_type) {
1390 case KVMPPC_VSX_COPY_DWORD:
1391 vsx_offset =
1392 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1393
1394 if (vsx_offset == -1) {
1395 result = -1;
1396 break;
1397 }
1398
1399 if (rs < 32) {
1400 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1401 } else {
1402 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1403 *val = reg.vsxval[vsx_offset];
1404 }
1405 break;
1406
1407 case KVMPPC_VSX_COPY_WORD:
1408 vsx_offset =
1409 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1410
1411 if (vsx_offset == -1) {
1412 result = -1;
1413 break;
1414 }
1415
1416 if (rs < 32) {
1417 dword_offset = vsx_offset / 2;
1418 word_offset = vsx_offset % 2;
1419 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1420 *val = reg.vsx32val[word_offset];
1421 } else {
1422 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1423 *val = reg.vsx32val[vsx_offset];
1424 }
1425 break;
1426
1427 default:
1428 result = -1;
1429 break;
1430 }
1431
1432 return result;
1433}
1434
1435int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1436 int rs, unsigned int bytes, int is_default_endian)
1437{
1438 u64 val;
1439 enum emulation_result emulated = EMULATE_DONE;
1440
1441 vcpu->arch.io_gpr = rs;
1442
1443 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1444 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1445 return EMULATE_FAIL;
1446
1447 while (vcpu->arch.mmio_vsx_copy_nums) {
1448 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1449 return EMULATE_FAIL;
1450
1451 emulated = kvmppc_handle_store(vcpu,
1452 val, bytes, is_default_endian);
1453
1454 if (emulated != EMULATE_DONE)
1455 break;
1456
1457 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1458
1459 vcpu->arch.mmio_vsx_copy_nums--;
1460 vcpu->arch.mmio_vsx_offset++;
1461 }
1462
1463 return emulated;
1464}
1465
1466static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1467{
1468 struct kvm_run *run = vcpu->run;
1469 enum emulation_result emulated = EMULATE_FAIL;
1470 int r;
1471
1472 vcpu->arch.paddr_accessed += run->mmio.len;
1473
1474 if (!vcpu->mmio_is_write) {
1475 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1476 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1477 } else {
1478 emulated = kvmppc_handle_vsx_store(vcpu,
1479 vcpu->arch.io_gpr, run->mmio.len, 1);
1480 }
1481
1482 switch (emulated) {
1483 case EMULATE_DO_MMIO:
1484 run->exit_reason = KVM_EXIT_MMIO;
1485 r = RESUME_HOST;
1486 break;
1487 case EMULATE_FAIL:
1488 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1489 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1490 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1491 r = RESUME_HOST;
1492 break;
1493 default:
1494 r = RESUME_GUEST;
1495 break;
1496 }
1497 return r;
1498}
1499#endif /* CONFIG_VSX */
1500
1501#ifdef CONFIG_ALTIVEC
1502int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1503 unsigned int rt, unsigned int bytes, int is_default_endian)
1504{
1505 enum emulation_result emulated = EMULATE_DONE;
1506
1507 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1508 return EMULATE_FAIL;
1509
1510 while (vcpu->arch.mmio_vmx_copy_nums) {
1511 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1512 is_default_endian, 0);
1513
1514 if (emulated != EMULATE_DONE)
1515 break;
1516
1517 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1518 vcpu->arch.mmio_vmx_copy_nums--;
1519 vcpu->arch.mmio_vmx_offset++;
1520 }
1521
1522 return emulated;
1523}
1524
1525static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1526{
1527 union kvmppc_one_reg reg;
1528 int vmx_offset = 0;
1529 int result = 0;
1530
1531 vmx_offset =
1532 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1533
1534 if (vmx_offset == -1)
1535 return -1;
1536
1537 reg.vval = VCPU_VSX_VR(vcpu, index);
1538 *val = reg.vsxval[vmx_offset];
1539
1540 return result;
1541}
1542
1543static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1544{
1545 union kvmppc_one_reg reg;
1546 int vmx_offset = 0;
1547 int result = 0;
1548
1549 vmx_offset =
1550 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1551
1552 if (vmx_offset == -1)
1553 return -1;
1554
1555 reg.vval = VCPU_VSX_VR(vcpu, index);
1556 *val = reg.vsx32val[vmx_offset];
1557
1558 return result;
1559}
1560
1561static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1562{
1563 union kvmppc_one_reg reg;
1564 int vmx_offset = 0;
1565 int result = 0;
1566
1567 vmx_offset =
1568 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1569
1570 if (vmx_offset == -1)
1571 return -1;
1572
1573 reg.vval = VCPU_VSX_VR(vcpu, index);
1574 *val = reg.vsx16val[vmx_offset];
1575
1576 return result;
1577}
1578
1579static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1580{
1581 union kvmppc_one_reg reg;
1582 int vmx_offset = 0;
1583 int result = 0;
1584
1585 vmx_offset =
1586 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1587
1588 if (vmx_offset == -1)
1589 return -1;
1590
1591 reg.vval = VCPU_VSX_VR(vcpu, index);
1592 *val = reg.vsx8val[vmx_offset];
1593
1594 return result;
1595}
1596
1597int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1598 unsigned int rs, unsigned int bytes, int is_default_endian)
1599{
1600 u64 val = 0;
1601 unsigned int index = rs & KVM_MMIO_REG_MASK;
1602 enum emulation_result emulated = EMULATE_DONE;
1603
1604 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1605 return EMULATE_FAIL;
1606
1607 vcpu->arch.io_gpr = rs;
1608
1609 while (vcpu->arch.mmio_vmx_copy_nums) {
1610 switch (vcpu->arch.mmio_copy_type) {
1611 case KVMPPC_VMX_COPY_DWORD:
1612 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1613 return EMULATE_FAIL;
1614
1615 break;
1616 case KVMPPC_VMX_COPY_WORD:
1617 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1618 return EMULATE_FAIL;
1619 break;
1620 case KVMPPC_VMX_COPY_HWORD:
1621 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1622 return EMULATE_FAIL;
1623 break;
1624 case KVMPPC_VMX_COPY_BYTE:
1625 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1626 return EMULATE_FAIL;
1627 break;
1628 default:
1629 return EMULATE_FAIL;
1630 }
1631
1632 emulated = kvmppc_handle_store(vcpu, val, bytes,
1633 is_default_endian);
1634 if (emulated != EMULATE_DONE)
1635 break;
1636
1637 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1638 vcpu->arch.mmio_vmx_copy_nums--;
1639 vcpu->arch.mmio_vmx_offset++;
1640 }
1641
1642 return emulated;
1643}
1644
1645static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1646{
1647 struct kvm_run *run = vcpu->run;
1648 enum emulation_result emulated = EMULATE_FAIL;
1649 int r;
1650
1651 vcpu->arch.paddr_accessed += run->mmio.len;
1652
1653 if (!vcpu->mmio_is_write) {
1654 emulated = kvmppc_handle_vmx_load(vcpu,
1655 vcpu->arch.io_gpr, run->mmio.len, 1);
1656 } else {
1657 emulated = kvmppc_handle_vmx_store(vcpu,
1658 vcpu->arch.io_gpr, run->mmio.len, 1);
1659 }
1660
1661 switch (emulated) {
1662 case EMULATE_DO_MMIO:
1663 run->exit_reason = KVM_EXIT_MMIO;
1664 r = RESUME_HOST;
1665 break;
1666 case EMULATE_FAIL:
1667 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1668 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1669 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1670 r = RESUME_HOST;
1671 break;
1672 default:
1673 r = RESUME_GUEST;
1674 break;
1675 }
1676 return r;
1677}
1678#endif /* CONFIG_ALTIVEC */
1679
1680int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1681{
1682 int r = 0;
1683 union kvmppc_one_reg val;
1684 int size;
1685
1686 size = one_reg_size(reg->id);
1687 if (size > sizeof(val))
1688 return -EINVAL;
1689
1690 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1691 if (r == -EINVAL) {
1692 r = 0;
1693 switch (reg->id) {
1694#ifdef CONFIG_ALTIVEC
1695 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1696 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1697 r = -ENXIO;
1698 break;
1699 }
1700 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1701 break;
1702 case KVM_REG_PPC_VSCR:
1703 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1704 r = -ENXIO;
1705 break;
1706 }
1707 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1708 break;
1709 case KVM_REG_PPC_VRSAVE:
1710 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1711 break;
1712#endif /* CONFIG_ALTIVEC */
1713 default:
1714 r = -EINVAL;
1715 break;
1716 }
1717 }
1718
1719 if (r)
1720 return r;
1721
1722 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1723 r = -EFAULT;
1724
1725 return r;
1726}
1727
1728int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1729{
1730 int r;
1731 union kvmppc_one_reg val;
1732 int size;
1733
1734 size = one_reg_size(reg->id);
1735 if (size > sizeof(val))
1736 return -EINVAL;
1737
1738 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1739 return -EFAULT;
1740
1741 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1742 if (r == -EINVAL) {
1743 r = 0;
1744 switch (reg->id) {
1745#ifdef CONFIG_ALTIVEC
1746 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1747 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1748 r = -ENXIO;
1749 break;
1750 }
1751 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1752 break;
1753 case KVM_REG_PPC_VSCR:
1754 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1755 r = -ENXIO;
1756 break;
1757 }
1758 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1759 break;
1760 case KVM_REG_PPC_VRSAVE:
1761 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1762 r = -ENXIO;
1763 break;
1764 }
1765 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1766 break;
1767#endif /* CONFIG_ALTIVEC */
1768 default:
1769 r = -EINVAL;
1770 break;
1771 }
1772 }
1773
1774 return r;
1775}
1776
1777int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1778{
1779 struct kvm_run *run = vcpu->run;
1780 int r;
1781
1782 vcpu_load(vcpu);
1783
1784 if (vcpu->mmio_needed) {
1785 vcpu->mmio_needed = 0;
1786 if (!vcpu->mmio_is_write)
1787 kvmppc_complete_mmio_load(vcpu);
1788#ifdef CONFIG_VSX
1789 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1790 vcpu->arch.mmio_vsx_copy_nums--;
1791 vcpu->arch.mmio_vsx_offset++;
1792 }
1793
1794 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1795 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1796 if (r == RESUME_HOST) {
1797 vcpu->mmio_needed = 1;
1798 goto out;
1799 }
1800 }
1801#endif
1802#ifdef CONFIG_ALTIVEC
1803 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1804 vcpu->arch.mmio_vmx_copy_nums--;
1805 vcpu->arch.mmio_vmx_offset++;
1806 }
1807
1808 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1809 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1810 if (r == RESUME_HOST) {
1811 vcpu->mmio_needed = 1;
1812 goto out;
1813 }
1814 }
1815#endif
1816 } else if (vcpu->arch.osi_needed) {
1817 u64 *gprs = run->osi.gprs;
1818 int i;
1819
1820 for (i = 0; i < 32; i++)
1821 kvmppc_set_gpr(vcpu, i, gprs[i]);
1822 vcpu->arch.osi_needed = 0;
1823 } else if (vcpu->arch.hcall_needed) {
1824 int i;
1825
1826 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1827 for (i = 0; i < 9; ++i)
1828 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1829 vcpu->arch.hcall_needed = 0;
1830#ifdef CONFIG_BOOKE
1831 } else if (vcpu->arch.epr_needed) {
1832 kvmppc_set_epr(vcpu, run->epr.epr);
1833 vcpu->arch.epr_needed = 0;
1834#endif
1835 }
1836
1837 kvm_sigset_activate(vcpu);
1838
1839 if (run->immediate_exit)
1840 r = -EINTR;
1841 else
1842 r = kvmppc_vcpu_run(vcpu);
1843
1844 kvm_sigset_deactivate(vcpu);
1845
1846#ifdef CONFIG_ALTIVEC
1847out:
1848#endif
1849 vcpu_put(vcpu);
1850 return r;
1851}
1852
1853int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1854{
1855 if (irq->irq == KVM_INTERRUPT_UNSET) {
1856 kvmppc_core_dequeue_external(vcpu);
1857 return 0;
1858 }
1859
1860 kvmppc_core_queue_external(vcpu, irq);
1861
1862 kvm_vcpu_kick(vcpu);
1863
1864 return 0;
1865}
1866
1867static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1868 struct kvm_enable_cap *cap)
1869{
1870 int r;
1871
1872 if (cap->flags)
1873 return -EINVAL;
1874
1875 switch (cap->cap) {
1876 case KVM_CAP_PPC_OSI:
1877 r = 0;
1878 vcpu->arch.osi_enabled = true;
1879 break;
1880 case KVM_CAP_PPC_PAPR:
1881 r = 0;
1882 vcpu->arch.papr_enabled = true;
1883 break;
1884 case KVM_CAP_PPC_EPR:
1885 r = 0;
1886 if (cap->args[0])
1887 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1888 else
1889 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1890 break;
1891#ifdef CONFIG_BOOKE
1892 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1893 r = 0;
1894 vcpu->arch.watchdog_enabled = true;
1895 break;
1896#endif
1897#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1898 case KVM_CAP_SW_TLB: {
1899 struct kvm_config_tlb cfg;
1900 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1901
1902 r = -EFAULT;
1903 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1904 break;
1905
1906 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1907 break;
1908 }
1909#endif
1910#ifdef CONFIG_KVM_MPIC
1911 case KVM_CAP_IRQ_MPIC: {
1912 struct fd f;
1913 struct kvm_device *dev;
1914
1915 r = -EBADF;
1916 f = fdget(cap->args[0]);
1917 if (!f.file)
1918 break;
1919
1920 r = -EPERM;
1921 dev = kvm_device_from_filp(f.file);
1922 if (dev)
1923 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1924
1925 fdput(f);
1926 break;
1927 }
1928#endif
1929#ifdef CONFIG_KVM_XICS
1930 case KVM_CAP_IRQ_XICS: {
1931 struct fd f;
1932 struct kvm_device *dev;
1933
1934 r = -EBADF;
1935 f = fdget(cap->args[0]);
1936 if (!f.file)
1937 break;
1938
1939 r = -EPERM;
1940 dev = kvm_device_from_filp(f.file);
1941 if (dev) {
1942 if (xics_on_xive())
1943 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1944 else
1945 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1946 }
1947
1948 fdput(f);
1949 break;
1950 }
1951#endif /* CONFIG_KVM_XICS */
1952#ifdef CONFIG_KVM_XIVE
1953 case KVM_CAP_PPC_IRQ_XIVE: {
1954 struct fd f;
1955 struct kvm_device *dev;
1956
1957 r = -EBADF;
1958 f = fdget(cap->args[0]);
1959 if (!f.file)
1960 break;
1961
1962 r = -ENXIO;
1963 if (!xive_enabled())
1964 break;
1965
1966 r = -EPERM;
1967 dev = kvm_device_from_filp(f.file);
1968 if (dev)
1969 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1970 cap->args[1]);
1971
1972 fdput(f);
1973 break;
1974 }
1975#endif /* CONFIG_KVM_XIVE */
1976#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1977 case KVM_CAP_PPC_FWNMI:
1978 r = -EINVAL;
1979 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1980 break;
1981 r = 0;
1982 vcpu->kvm->arch.fwnmi_enabled = true;
1983 break;
1984#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1985 default:
1986 r = -EINVAL;
1987 break;
1988 }
1989
1990 if (!r)
1991 r = kvmppc_sanity_check(vcpu);
1992
1993 return r;
1994}
1995
1996bool kvm_arch_intc_initialized(struct kvm *kvm)
1997{
1998#ifdef CONFIG_KVM_MPIC
1999 if (kvm->arch.mpic)
2000 return true;
2001#endif
2002#ifdef CONFIG_KVM_XICS
2003 if (kvm->arch.xics || kvm->arch.xive)
2004 return true;
2005#endif
2006 return false;
2007}
2008
2009int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2010 struct kvm_mp_state *mp_state)
2011{
2012 return -EINVAL;
2013}
2014
2015int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2016 struct kvm_mp_state *mp_state)
2017{
2018 return -EINVAL;
2019}
2020
2021long kvm_arch_vcpu_async_ioctl(struct file *filp,
2022 unsigned int ioctl, unsigned long arg)
2023{
2024 struct kvm_vcpu *vcpu = filp->private_data;
2025 void __user *argp = (void __user *)arg;
2026
2027 if (ioctl == KVM_INTERRUPT) {
2028 struct kvm_interrupt irq;
2029 if (copy_from_user(&irq, argp, sizeof(irq)))
2030 return -EFAULT;
2031 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2032 }
2033 return -ENOIOCTLCMD;
2034}
2035
2036long kvm_arch_vcpu_ioctl(struct file *filp,
2037 unsigned int ioctl, unsigned long arg)
2038{
2039 struct kvm_vcpu *vcpu = filp->private_data;
2040 void __user *argp = (void __user *)arg;
2041 long r;
2042
2043 switch (ioctl) {
2044 case KVM_ENABLE_CAP:
2045 {
2046 struct kvm_enable_cap cap;
2047 r = -EFAULT;
2048 vcpu_load(vcpu);
2049 if (copy_from_user(&cap, argp, sizeof(cap)))
2050 goto out;
2051 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2052 vcpu_put(vcpu);
2053 break;
2054 }
2055
2056 case KVM_SET_ONE_REG:
2057 case KVM_GET_ONE_REG:
2058 {
2059 struct kvm_one_reg reg;
2060 r = -EFAULT;
2061 if (copy_from_user(®, argp, sizeof(reg)))
2062 goto out;
2063 if (ioctl == KVM_SET_ONE_REG)
2064 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2065 else
2066 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2067 break;
2068 }
2069
2070#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2071 case KVM_DIRTY_TLB: {
2072 struct kvm_dirty_tlb dirty;
2073 r = -EFAULT;
2074 vcpu_load(vcpu);
2075 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2076 goto out;
2077 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2078 vcpu_put(vcpu);
2079 break;
2080 }
2081#endif
2082 default:
2083 r = -EINVAL;
2084 }
2085
2086out:
2087 return r;
2088}
2089
2090vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2091{
2092 return VM_FAULT_SIGBUS;
2093}
2094
2095static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2096{
2097 u32 inst_nop = 0x60000000;
2098#ifdef CONFIG_KVM_BOOKE_HV
2099 u32 inst_sc1 = 0x44000022;
2100 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2101 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2102 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2103 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2104#else
2105 u32 inst_lis = 0x3c000000;
2106 u32 inst_ori = 0x60000000;
2107 u32 inst_sc = 0x44000002;
2108 u32 inst_imm_mask = 0xffff;
2109
2110 /*
2111 * The hypercall to get into KVM from within guest context is as
2112 * follows:
2113 *
2114 * lis r0, r0, KVM_SC_MAGIC_R0@h
2115 * ori r0, KVM_SC_MAGIC_R0@l
2116 * sc
2117 * nop
2118 */
2119 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2120 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2121 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2122 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2123#endif
2124
2125 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2126
2127 return 0;
2128}
2129
2130int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2131 bool line_status)
2132{
2133 if (!irqchip_in_kernel(kvm))
2134 return -ENXIO;
2135
2136 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2137 irq_event->irq, irq_event->level,
2138 line_status);
2139 return 0;
2140}
2141
2142
2143int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2144 struct kvm_enable_cap *cap)
2145{
2146 int r;
2147
2148 if (cap->flags)
2149 return -EINVAL;
2150
2151 switch (cap->cap) {
2152#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2153 case KVM_CAP_PPC_ENABLE_HCALL: {
2154 unsigned long hcall = cap->args[0];
2155
2156 r = -EINVAL;
2157 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2158 cap->args[1] > 1)
2159 break;
2160 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2161 break;
2162 if (cap->args[1])
2163 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2164 else
2165 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2166 r = 0;
2167 break;
2168 }
2169 case KVM_CAP_PPC_SMT: {
2170 unsigned long mode = cap->args[0];
2171 unsigned long flags = cap->args[1];
2172
2173 r = -EINVAL;
2174 if (kvm->arch.kvm_ops->set_smt_mode)
2175 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2176 break;
2177 }
2178
2179 case KVM_CAP_PPC_NESTED_HV:
2180 r = -EINVAL;
2181 if (!is_kvmppc_hv_enabled(kvm) ||
2182 !kvm->arch.kvm_ops->enable_nested)
2183 break;
2184 r = kvm->arch.kvm_ops->enable_nested(kvm);
2185 break;
2186#endif
2187#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2188 case KVM_CAP_PPC_SECURE_GUEST:
2189 r = -EINVAL;
2190 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2191 break;
2192 r = kvm->arch.kvm_ops->enable_svm(kvm);
2193 break;
2194 case KVM_CAP_PPC_DAWR1:
2195 r = -EINVAL;
2196 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2197 break;
2198 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2199 break;
2200#endif
2201 default:
2202 r = -EINVAL;
2203 break;
2204 }
2205
2206 return r;
2207}
2208
2209#ifdef CONFIG_PPC_BOOK3S_64
2210/*
2211 * These functions check whether the underlying hardware is safe
2212 * against attacks based on observing the effects of speculatively
2213 * executed instructions, and whether it supplies instructions for
2214 * use in workarounds. The information comes from firmware, either
2215 * via the device tree on powernv platforms or from an hcall on
2216 * pseries platforms.
2217 */
2218#ifdef CONFIG_PPC_PSERIES
2219static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2220{
2221 struct h_cpu_char_result c;
2222 unsigned long rc;
2223
2224 if (!machine_is(pseries))
2225 return -ENOTTY;
2226
2227 rc = plpar_get_cpu_characteristics(&c);
2228 if (rc == H_SUCCESS) {
2229 cp->character = c.character;
2230 cp->behaviour = c.behaviour;
2231 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2232 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2233 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2234 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2235 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2236 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2237 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2238 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2239 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2240 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2241 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2242 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2243 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2244 }
2245 return 0;
2246}
2247#else
2248static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2249{
2250 return -ENOTTY;
2251}
2252#endif
2253
2254static inline bool have_fw_feat(struct device_node *fw_features,
2255 const char *state, const char *name)
2256{
2257 struct device_node *np;
2258 bool r = false;
2259
2260 np = of_get_child_by_name(fw_features, name);
2261 if (np) {
2262 r = of_property_read_bool(np, state);
2263 of_node_put(np);
2264 }
2265 return r;
2266}
2267
2268static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2269{
2270 struct device_node *np, *fw_features;
2271 int r;
2272
2273 memset(cp, 0, sizeof(*cp));
2274 r = pseries_get_cpu_char(cp);
2275 if (r != -ENOTTY)
2276 return r;
2277
2278 np = of_find_node_by_name(NULL, "ibm,opal");
2279 if (np) {
2280 fw_features = of_get_child_by_name(np, "fw-features");
2281 of_node_put(np);
2282 if (!fw_features)
2283 return 0;
2284 if (have_fw_feat(fw_features, "enabled",
2285 "inst-spec-barrier-ori31,31,0"))
2286 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2287 if (have_fw_feat(fw_features, "enabled",
2288 "fw-bcctrl-serialized"))
2289 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2290 if (have_fw_feat(fw_features, "enabled",
2291 "inst-l1d-flush-ori30,30,0"))
2292 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2293 if (have_fw_feat(fw_features, "enabled",
2294 "inst-l1d-flush-trig2"))
2295 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2296 if (have_fw_feat(fw_features, "enabled",
2297 "fw-l1d-thread-split"))
2298 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2299 if (have_fw_feat(fw_features, "enabled",
2300 "fw-count-cache-disabled"))
2301 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2302 if (have_fw_feat(fw_features, "enabled",
2303 "fw-count-cache-flush-bcctr2,0,0"))
2304 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2305 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2306 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2307 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2308 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2309 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2310 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2311 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2312
2313 if (have_fw_feat(fw_features, "enabled",
2314 "speculation-policy-favor-security"))
2315 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2316 if (!have_fw_feat(fw_features, "disabled",
2317 "needs-l1d-flush-msr-pr-0-to-1"))
2318 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2319 if (!have_fw_feat(fw_features, "disabled",
2320 "needs-spec-barrier-for-bound-checks"))
2321 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2322 if (have_fw_feat(fw_features, "enabled",
2323 "needs-count-cache-flush-on-context-switch"))
2324 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2325 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2326 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2327 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2328 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2329
2330 of_node_put(fw_features);
2331 }
2332
2333 return 0;
2334}
2335#endif
2336
2337long kvm_arch_vm_ioctl(struct file *filp,
2338 unsigned int ioctl, unsigned long arg)
2339{
2340 struct kvm *kvm __maybe_unused = filp->private_data;
2341 void __user *argp = (void __user *)arg;
2342 long r;
2343
2344 switch (ioctl) {
2345 case KVM_PPC_GET_PVINFO: {
2346 struct kvm_ppc_pvinfo pvinfo;
2347 memset(&pvinfo, 0, sizeof(pvinfo));
2348 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2349 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2350 r = -EFAULT;
2351 goto out;
2352 }
2353
2354 break;
2355 }
2356#ifdef CONFIG_SPAPR_TCE_IOMMU
2357 case KVM_CREATE_SPAPR_TCE_64: {
2358 struct kvm_create_spapr_tce_64 create_tce_64;
2359
2360 r = -EFAULT;
2361 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2362 goto out;
2363 if (create_tce_64.flags) {
2364 r = -EINVAL;
2365 goto out;
2366 }
2367 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2368 goto out;
2369 }
2370 case KVM_CREATE_SPAPR_TCE: {
2371 struct kvm_create_spapr_tce create_tce;
2372 struct kvm_create_spapr_tce_64 create_tce_64;
2373
2374 r = -EFAULT;
2375 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2376 goto out;
2377
2378 create_tce_64.liobn = create_tce.liobn;
2379 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2380 create_tce_64.offset = 0;
2381 create_tce_64.size = create_tce.window_size >>
2382 IOMMU_PAGE_SHIFT_4K;
2383 create_tce_64.flags = 0;
2384 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2385 goto out;
2386 }
2387#endif
2388#ifdef CONFIG_PPC_BOOK3S_64
2389 case KVM_PPC_GET_SMMU_INFO: {
2390 struct kvm_ppc_smmu_info info;
2391 struct kvm *kvm = filp->private_data;
2392
2393 memset(&info, 0, sizeof(info));
2394 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2395 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2396 r = -EFAULT;
2397 break;
2398 }
2399 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2400 struct kvm *kvm = filp->private_data;
2401
2402 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2403 break;
2404 }
2405 case KVM_PPC_CONFIGURE_V3_MMU: {
2406 struct kvm *kvm = filp->private_data;
2407 struct kvm_ppc_mmuv3_cfg cfg;
2408
2409 r = -EINVAL;
2410 if (!kvm->arch.kvm_ops->configure_mmu)
2411 goto out;
2412 r = -EFAULT;
2413 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2414 goto out;
2415 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2416 break;
2417 }
2418 case KVM_PPC_GET_RMMU_INFO: {
2419 struct kvm *kvm = filp->private_data;
2420 struct kvm_ppc_rmmu_info info;
2421
2422 r = -EINVAL;
2423 if (!kvm->arch.kvm_ops->get_rmmu_info)
2424 goto out;
2425 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2426 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2427 r = -EFAULT;
2428 break;
2429 }
2430 case KVM_PPC_GET_CPU_CHAR: {
2431 struct kvm_ppc_cpu_char cpuchar;
2432
2433 r = kvmppc_get_cpu_char(&cpuchar);
2434 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2435 r = -EFAULT;
2436 break;
2437 }
2438 case KVM_PPC_SVM_OFF: {
2439 struct kvm *kvm = filp->private_data;
2440
2441 r = 0;
2442 if (!kvm->arch.kvm_ops->svm_off)
2443 goto out;
2444
2445 r = kvm->arch.kvm_ops->svm_off(kvm);
2446 break;
2447 }
2448 default: {
2449 struct kvm *kvm = filp->private_data;
2450 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2451 }
2452#else /* CONFIG_PPC_BOOK3S_64 */
2453 default:
2454 r = -ENOTTY;
2455#endif
2456 }
2457out:
2458 return r;
2459}
2460
2461static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2462static unsigned long nr_lpids;
2463
2464long kvmppc_alloc_lpid(void)
2465{
2466 long lpid;
2467
2468 do {
2469 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2470 if (lpid >= nr_lpids) {
2471 pr_err("%s: No LPIDs free\n", __func__);
2472 return -ENOMEM;
2473 }
2474 } while (test_and_set_bit(lpid, lpid_inuse));
2475
2476 return lpid;
2477}
2478EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2479
2480void kvmppc_claim_lpid(long lpid)
2481{
2482 set_bit(lpid, lpid_inuse);
2483}
2484EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2485
2486void kvmppc_free_lpid(long lpid)
2487{
2488 clear_bit(lpid, lpid_inuse);
2489}
2490EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2491
2492void kvmppc_init_lpid(unsigned long nr_lpids_param)
2493{
2494 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2495 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2496}
2497EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2498
2499int kvm_arch_init(void *opaque)
2500{
2501 return 0;
2502}
2503
2504EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);