Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#include <linux/bitops.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/kdebug.h>
16#include <linux/module.h>
17#include <linux/uaccess.h>
18#include <linux/vmalloc.h>
19#include <linux/sched/signal.h>
20#include <linux/fs.h>
21#include <linux/memblock.h>
22#include <linux/pgtable.h>
23
24#include <asm/fpu.h>
25#include <asm/page.h>
26#include <asm/cacheflush.h>
27#include <asm/mmu_context.h>
28#include <asm/pgalloc.h>
29
30#include <linux/kvm_host.h>
31
32#include "interrupt.h"
33#include "commpage.h"
34
35#define CREATE_TRACE_POINTS
36#include "trace.h"
37
38#ifndef VECTORSPACING
39#define VECTORSPACING 0x100 /* for EI/VI mode */
40#endif
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 VCPU_STAT("wait", wait_exits),
44 VCPU_STAT("cache", cache_exits),
45 VCPU_STAT("signal", signal_exits),
46 VCPU_STAT("interrupt", int_exits),
47 VCPU_STAT("cop_unusable", cop_unusable_exits),
48 VCPU_STAT("tlbmod", tlbmod_exits),
49 VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
50 VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
51 VCPU_STAT("addrerr_st", addrerr_st_exits),
52 VCPU_STAT("addrerr_ld", addrerr_ld_exits),
53 VCPU_STAT("syscall", syscall_exits),
54 VCPU_STAT("resvd_inst", resvd_inst_exits),
55 VCPU_STAT("break_inst", break_inst_exits),
56 VCPU_STAT("trap_inst", trap_inst_exits),
57 VCPU_STAT("msa_fpe", msa_fpe_exits),
58 VCPU_STAT("fpe", fpe_exits),
59 VCPU_STAT("msa_disabled", msa_disabled_exits),
60 VCPU_STAT("flush_dcache", flush_dcache_exits),
61#ifdef CONFIG_KVM_MIPS_VZ
62 VCPU_STAT("vz_gpsi", vz_gpsi_exits),
63 VCPU_STAT("vz_gsfc", vz_gsfc_exits),
64 VCPU_STAT("vz_hc", vz_hc_exits),
65 VCPU_STAT("vz_grr", vz_grr_exits),
66 VCPU_STAT("vz_gva", vz_gva_exits),
67 VCPU_STAT("vz_ghfc", vz_ghfc_exits),
68 VCPU_STAT("vz_gpa", vz_gpa_exits),
69 VCPU_STAT("vz_resvd", vz_resvd_exits),
70#ifdef CONFIG_CPU_LOONGSON64
71 VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
72#endif
73#endif
74 VCPU_STAT("halt_successful_poll", halt_successful_poll),
75 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
76 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
77 VCPU_STAT("halt_wakeup", halt_wakeup),
78 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
79 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
80 {NULL}
81};
82
83bool kvm_trace_guest_mode_change;
84
85int kvm_guest_mode_change_trace_reg(void)
86{
87 kvm_trace_guest_mode_change = true;
88 return 0;
89}
90
91void kvm_guest_mode_change_trace_unreg(void)
92{
93 kvm_trace_guest_mode_change = false;
94}
95
96/*
97 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
98 * Config7, so we are "runnable" if interrupts are pending
99 */
100int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
101{
102 return !!(vcpu->arch.pending_exceptions);
103}
104
105bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
106{
107 return false;
108}
109
110int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
111{
112 return 1;
113}
114
115int kvm_arch_hardware_enable(void)
116{
117 return kvm_mips_callbacks->hardware_enable();
118}
119
120void kvm_arch_hardware_disable(void)
121{
122 kvm_mips_callbacks->hardware_disable();
123}
124
125int kvm_arch_hardware_setup(void *opaque)
126{
127 return 0;
128}
129
130int kvm_arch_check_processor_compat(void *opaque)
131{
132 return 0;
133}
134
135extern void kvm_init_loongson_ipi(struct kvm *kvm);
136
137int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
138{
139 switch (type) {
140#ifdef CONFIG_KVM_MIPS_VZ
141 case KVM_VM_MIPS_VZ:
142#else
143 case KVM_VM_MIPS_TE:
144#endif
145 break;
146 default:
147 /* Unsupported KVM type */
148 return -EINVAL;
149 };
150
151 /* Allocate page table to map GPA -> RPA */
152 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
153 if (!kvm->arch.gpa_mm.pgd)
154 return -ENOMEM;
155
156#ifdef CONFIG_CPU_LOONGSON64
157 kvm_init_loongson_ipi(kvm);
158#endif
159
160 return 0;
161}
162
163void kvm_mips_free_vcpus(struct kvm *kvm)
164{
165 unsigned int i;
166 struct kvm_vcpu *vcpu;
167
168 kvm_for_each_vcpu(i, vcpu, kvm) {
169 kvm_vcpu_destroy(vcpu);
170 }
171
172 mutex_lock(&kvm->lock);
173
174 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
175 kvm->vcpus[i] = NULL;
176
177 atomic_set(&kvm->online_vcpus, 0);
178
179 mutex_unlock(&kvm->lock);
180}
181
182static void kvm_mips_free_gpa_pt(struct kvm *kvm)
183{
184 /* It should always be safe to remove after flushing the whole range */
185 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
186 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
187}
188
189void kvm_arch_destroy_vm(struct kvm *kvm)
190{
191 kvm_mips_free_vcpus(kvm);
192 kvm_mips_free_gpa_pt(kvm);
193}
194
195long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
196 unsigned long arg)
197{
198 return -ENOIOCTLCMD;
199}
200
201void kvm_arch_flush_shadow_all(struct kvm *kvm)
202{
203 /* Flush whole GPA */
204 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
205
206 /* Let implementation do the rest */
207 kvm_mips_callbacks->flush_shadow_all(kvm);
208}
209
210void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
211 struct kvm_memory_slot *slot)
212{
213 /*
214 * The slot has been made invalid (ready for moving or deletion), so we
215 * need to ensure that it can no longer be accessed by any guest VCPUs.
216 */
217
218 spin_lock(&kvm->mmu_lock);
219 /* Flush slot from GPA */
220 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
221 slot->base_gfn + slot->npages - 1);
222 /* Let implementation do the rest */
223 kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
224 spin_unlock(&kvm->mmu_lock);
225}
226
227int kvm_arch_prepare_memory_region(struct kvm *kvm,
228 struct kvm_memory_slot *memslot,
229 const struct kvm_userspace_memory_region *mem,
230 enum kvm_mr_change change)
231{
232 return 0;
233}
234
235void kvm_arch_commit_memory_region(struct kvm *kvm,
236 const struct kvm_userspace_memory_region *mem,
237 struct kvm_memory_slot *old,
238 const struct kvm_memory_slot *new,
239 enum kvm_mr_change change)
240{
241 int needs_flush;
242
243 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
244 __func__, kvm, mem->slot, mem->guest_phys_addr,
245 mem->memory_size, mem->userspace_addr);
246
247 /*
248 * If dirty page logging is enabled, write protect all pages in the slot
249 * ready for dirty logging.
250 *
251 * There is no need to do this in any of the following cases:
252 * CREATE: No dirty mappings will already exist.
253 * MOVE/DELETE: The old mappings will already have been cleaned up by
254 * kvm_arch_flush_shadow_memslot()
255 */
256 if (change == KVM_MR_FLAGS_ONLY &&
257 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
258 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
259 spin_lock(&kvm->mmu_lock);
260 /* Write protect GPA page table entries */
261 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
262 new->base_gfn + new->npages - 1);
263 /* Let implementation do the rest */
264 if (needs_flush)
265 kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
266 spin_unlock(&kvm->mmu_lock);
267 }
268}
269
270static inline void dump_handler(const char *symbol, void *start, void *end)
271{
272 u32 *p;
273
274 pr_debug("LEAF(%s)\n", symbol);
275
276 pr_debug("\t.set push\n");
277 pr_debug("\t.set noreorder\n");
278
279 for (p = start; p < (u32 *)end; ++p)
280 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
281
282 pr_debug("\t.set\tpop\n");
283
284 pr_debug("\tEND(%s)\n", symbol);
285}
286
287/* low level hrtimer wake routine */
288static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
289{
290 struct kvm_vcpu *vcpu;
291
292 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
293
294 kvm_mips_callbacks->queue_timer_int(vcpu);
295
296 vcpu->arch.wait = 0;
297 rcuwait_wake_up(&vcpu->wait);
298
299 return kvm_mips_count_timeout(vcpu);
300}
301
302int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
303{
304 return 0;
305}
306
307int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
308{
309 int err, size;
310 void *gebase, *p, *handler, *refill_start, *refill_end;
311 int i;
312
313 kvm_debug("kvm @ %p: create cpu %d at %p\n",
314 vcpu->kvm, vcpu->vcpu_id, vcpu);
315
316 err = kvm_mips_callbacks->vcpu_init(vcpu);
317 if (err)
318 return err;
319
320 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
321 HRTIMER_MODE_REL);
322 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
323
324 /*
325 * Allocate space for host mode exception handlers that handle
326 * guest mode exits
327 */
328 if (cpu_has_veic || cpu_has_vint)
329 size = 0x200 + VECTORSPACING * 64;
330 else
331 size = 0x4000;
332
333 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
334
335 if (!gebase) {
336 err = -ENOMEM;
337 goto out_uninit_vcpu;
338 }
339 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
340 ALIGN(size, PAGE_SIZE), gebase);
341
342 /*
343 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
344 * limits us to the low 512MB of physical address space. If the memory
345 * we allocate is out of range, just give up now.
346 */
347 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
348 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
349 gebase);
350 err = -ENOMEM;
351 goto out_free_gebase;
352 }
353
354 /* Save new ebase */
355 vcpu->arch.guest_ebase = gebase;
356
357 /* Build guest exception vectors dynamically in unmapped memory */
358 handler = gebase + 0x2000;
359
360 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
361 refill_start = gebase;
362 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
363 refill_start += 0x080;
364 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
365
366 /* General Exception Entry point */
367 kvm_mips_build_exception(gebase + 0x180, handler);
368
369 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
370 for (i = 0; i < 8; i++) {
371 kvm_debug("L1 Vectored handler @ %p\n",
372 gebase + 0x200 + (i * VECTORSPACING));
373 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
374 handler);
375 }
376
377 /* General exit handler */
378 p = handler;
379 p = kvm_mips_build_exit(p);
380
381 /* Guest entry routine */
382 vcpu->arch.vcpu_run = p;
383 p = kvm_mips_build_vcpu_run(p);
384
385 /* Dump the generated code */
386 pr_debug("#include <asm/asm.h>\n");
387 pr_debug("#include <asm/regdef.h>\n");
388 pr_debug("\n");
389 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
390 dump_handler("kvm_tlb_refill", refill_start, refill_end);
391 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
392 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
393
394 /* Invalidate the icache for these ranges */
395 flush_icache_range((unsigned long)gebase,
396 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
397
398 /*
399 * Allocate comm page for guest kernel, a TLB will be reserved for
400 * mapping GVA @ 0xFFFF8000 to this page
401 */
402 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
403
404 if (!vcpu->arch.kseg0_commpage) {
405 err = -ENOMEM;
406 goto out_free_gebase;
407 }
408
409 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
410 kvm_mips_commpage_init(vcpu);
411
412 /* Init */
413 vcpu->arch.last_sched_cpu = -1;
414 vcpu->arch.last_exec_cpu = -1;
415
416 /* Initial guest state */
417 err = kvm_mips_callbacks->vcpu_setup(vcpu);
418 if (err)
419 goto out_free_commpage;
420
421 return 0;
422
423out_free_commpage:
424 kfree(vcpu->arch.kseg0_commpage);
425out_free_gebase:
426 kfree(gebase);
427out_uninit_vcpu:
428 kvm_mips_callbacks->vcpu_uninit(vcpu);
429 return err;
430}
431
432void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
433{
434 hrtimer_cancel(&vcpu->arch.comparecount_timer);
435
436 kvm_mips_dump_stats(vcpu);
437
438 kvm_mmu_free_memory_caches(vcpu);
439 kfree(vcpu->arch.guest_ebase);
440 kfree(vcpu->arch.kseg0_commpage);
441
442 kvm_mips_callbacks->vcpu_uninit(vcpu);
443}
444
445int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
446 struct kvm_guest_debug *dbg)
447{
448 return -ENOIOCTLCMD;
449}
450
451int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
452{
453 struct kvm_run *run = vcpu->run;
454 int r = -EINTR;
455
456 vcpu_load(vcpu);
457
458 kvm_sigset_activate(vcpu);
459
460 if (vcpu->mmio_needed) {
461 if (!vcpu->mmio_is_write)
462 kvm_mips_complete_mmio_load(vcpu, run);
463 vcpu->mmio_needed = 0;
464 }
465
466 if (run->immediate_exit)
467 goto out;
468
469 lose_fpu(1);
470
471 local_irq_disable();
472 guest_enter_irqoff();
473 trace_kvm_enter(vcpu);
474
475 /*
476 * Make sure the read of VCPU requests in vcpu_run() callback is not
477 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
478 * flush request while the requester sees the VCPU as outside of guest
479 * mode and not needing an IPI.
480 */
481 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
482
483 r = kvm_mips_callbacks->vcpu_run(run, vcpu);
484
485 trace_kvm_out(vcpu);
486 guest_exit_irqoff();
487 local_irq_enable();
488
489out:
490 kvm_sigset_deactivate(vcpu);
491
492 vcpu_put(vcpu);
493 return r;
494}
495
496int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
497 struct kvm_mips_interrupt *irq)
498{
499 int intr = (int)irq->irq;
500 struct kvm_vcpu *dvcpu = NULL;
501
502 if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
503 intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
504 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
505 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
506 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
507 (int)intr);
508
509 if (irq->cpu == -1)
510 dvcpu = vcpu;
511 else
512 dvcpu = vcpu->kvm->vcpus[irq->cpu];
513
514 if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
515 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
516
517 } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
518 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
519 } else {
520 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
521 irq->cpu, irq->irq);
522 return -EINVAL;
523 }
524
525 dvcpu->arch.wait = 0;
526
527 rcuwait_wake_up(&dvcpu->wait);
528
529 return 0;
530}
531
532int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
533 struct kvm_mp_state *mp_state)
534{
535 return -ENOIOCTLCMD;
536}
537
538int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
539 struct kvm_mp_state *mp_state)
540{
541 return -ENOIOCTLCMD;
542}
543
544static u64 kvm_mips_get_one_regs[] = {
545 KVM_REG_MIPS_R0,
546 KVM_REG_MIPS_R1,
547 KVM_REG_MIPS_R2,
548 KVM_REG_MIPS_R3,
549 KVM_REG_MIPS_R4,
550 KVM_REG_MIPS_R5,
551 KVM_REG_MIPS_R6,
552 KVM_REG_MIPS_R7,
553 KVM_REG_MIPS_R8,
554 KVM_REG_MIPS_R9,
555 KVM_REG_MIPS_R10,
556 KVM_REG_MIPS_R11,
557 KVM_REG_MIPS_R12,
558 KVM_REG_MIPS_R13,
559 KVM_REG_MIPS_R14,
560 KVM_REG_MIPS_R15,
561 KVM_REG_MIPS_R16,
562 KVM_REG_MIPS_R17,
563 KVM_REG_MIPS_R18,
564 KVM_REG_MIPS_R19,
565 KVM_REG_MIPS_R20,
566 KVM_REG_MIPS_R21,
567 KVM_REG_MIPS_R22,
568 KVM_REG_MIPS_R23,
569 KVM_REG_MIPS_R24,
570 KVM_REG_MIPS_R25,
571 KVM_REG_MIPS_R26,
572 KVM_REG_MIPS_R27,
573 KVM_REG_MIPS_R28,
574 KVM_REG_MIPS_R29,
575 KVM_REG_MIPS_R30,
576 KVM_REG_MIPS_R31,
577
578#ifndef CONFIG_CPU_MIPSR6
579 KVM_REG_MIPS_HI,
580 KVM_REG_MIPS_LO,
581#endif
582 KVM_REG_MIPS_PC,
583};
584
585static u64 kvm_mips_get_one_regs_fpu[] = {
586 KVM_REG_MIPS_FCR_IR,
587 KVM_REG_MIPS_FCR_CSR,
588};
589
590static u64 kvm_mips_get_one_regs_msa[] = {
591 KVM_REG_MIPS_MSA_IR,
592 KVM_REG_MIPS_MSA_CSR,
593};
594
595static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
596{
597 unsigned long ret;
598
599 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
600 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
601 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
602 /* odd doubles */
603 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
604 ret += 16;
605 }
606 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
607 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
608 ret += kvm_mips_callbacks->num_regs(vcpu);
609
610 return ret;
611}
612
613static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
614{
615 u64 index;
616 unsigned int i;
617
618 if (copy_to_user(indices, kvm_mips_get_one_regs,
619 sizeof(kvm_mips_get_one_regs)))
620 return -EFAULT;
621 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
622
623 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
624 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
625 sizeof(kvm_mips_get_one_regs_fpu)))
626 return -EFAULT;
627 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
628
629 for (i = 0; i < 32; ++i) {
630 index = KVM_REG_MIPS_FPR_32(i);
631 if (copy_to_user(indices, &index, sizeof(index)))
632 return -EFAULT;
633 ++indices;
634
635 /* skip odd doubles if no F64 */
636 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
637 continue;
638
639 index = KVM_REG_MIPS_FPR_64(i);
640 if (copy_to_user(indices, &index, sizeof(index)))
641 return -EFAULT;
642 ++indices;
643 }
644 }
645
646 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
647 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
648 sizeof(kvm_mips_get_one_regs_msa)))
649 return -EFAULT;
650 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
651
652 for (i = 0; i < 32; ++i) {
653 index = KVM_REG_MIPS_VEC_128(i);
654 if (copy_to_user(indices, &index, sizeof(index)))
655 return -EFAULT;
656 ++indices;
657 }
658 }
659
660 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
661}
662
663static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
664 const struct kvm_one_reg *reg)
665{
666 struct mips_coproc *cop0 = vcpu->arch.cop0;
667 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
668 int ret;
669 s64 v;
670 s64 vs[2];
671 unsigned int idx;
672
673 switch (reg->id) {
674 /* General purpose registers */
675 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
676 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
677 break;
678#ifndef CONFIG_CPU_MIPSR6
679 case KVM_REG_MIPS_HI:
680 v = (long)vcpu->arch.hi;
681 break;
682 case KVM_REG_MIPS_LO:
683 v = (long)vcpu->arch.lo;
684 break;
685#endif
686 case KVM_REG_MIPS_PC:
687 v = (long)vcpu->arch.pc;
688 break;
689
690 /* Floating point registers */
691 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
692 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
693 return -EINVAL;
694 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
695 /* Odd singles in top of even double when FR=0 */
696 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
697 v = get_fpr32(&fpu->fpr[idx], 0);
698 else
699 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
700 break;
701 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
702 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
703 return -EINVAL;
704 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
705 /* Can't access odd doubles in FR=0 mode */
706 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
707 return -EINVAL;
708 v = get_fpr64(&fpu->fpr[idx], 0);
709 break;
710 case KVM_REG_MIPS_FCR_IR:
711 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
712 return -EINVAL;
713 v = boot_cpu_data.fpu_id;
714 break;
715 case KVM_REG_MIPS_FCR_CSR:
716 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
717 return -EINVAL;
718 v = fpu->fcr31;
719 break;
720
721 /* MIPS SIMD Architecture (MSA) registers */
722 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
723 if (!kvm_mips_guest_has_msa(&vcpu->arch))
724 return -EINVAL;
725 /* Can't access MSA registers in FR=0 mode */
726 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
727 return -EINVAL;
728 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
729#ifdef CONFIG_CPU_LITTLE_ENDIAN
730 /* least significant byte first */
731 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
732 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
733#else
734 /* most significant byte first */
735 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
736 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
737#endif
738 break;
739 case KVM_REG_MIPS_MSA_IR:
740 if (!kvm_mips_guest_has_msa(&vcpu->arch))
741 return -EINVAL;
742 v = boot_cpu_data.msa_id;
743 break;
744 case KVM_REG_MIPS_MSA_CSR:
745 if (!kvm_mips_guest_has_msa(&vcpu->arch))
746 return -EINVAL;
747 v = fpu->msacsr;
748 break;
749
750 /* registers to be handled specially */
751 default:
752 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
753 if (ret)
754 return ret;
755 break;
756 }
757 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
758 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
759
760 return put_user(v, uaddr64);
761 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
762 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
763 u32 v32 = (u32)v;
764
765 return put_user(v32, uaddr32);
766 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
767 void __user *uaddr = (void __user *)(long)reg->addr;
768
769 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
770 } else {
771 return -EINVAL;
772 }
773}
774
775static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
776 const struct kvm_one_reg *reg)
777{
778 struct mips_coproc *cop0 = vcpu->arch.cop0;
779 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
780 s64 v;
781 s64 vs[2];
782 unsigned int idx;
783
784 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
785 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
786
787 if (get_user(v, uaddr64) != 0)
788 return -EFAULT;
789 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
790 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
791 s32 v32;
792
793 if (get_user(v32, uaddr32) != 0)
794 return -EFAULT;
795 v = (s64)v32;
796 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
797 void __user *uaddr = (void __user *)(long)reg->addr;
798
799 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
800 } else {
801 return -EINVAL;
802 }
803
804 switch (reg->id) {
805 /* General purpose registers */
806 case KVM_REG_MIPS_R0:
807 /* Silently ignore requests to set $0 */
808 break;
809 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
810 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
811 break;
812#ifndef CONFIG_CPU_MIPSR6
813 case KVM_REG_MIPS_HI:
814 vcpu->arch.hi = v;
815 break;
816 case KVM_REG_MIPS_LO:
817 vcpu->arch.lo = v;
818 break;
819#endif
820 case KVM_REG_MIPS_PC:
821 vcpu->arch.pc = v;
822 break;
823
824 /* Floating point registers */
825 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
826 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
827 return -EINVAL;
828 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
829 /* Odd singles in top of even double when FR=0 */
830 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
831 set_fpr32(&fpu->fpr[idx], 0, v);
832 else
833 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
834 break;
835 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
836 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
837 return -EINVAL;
838 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
839 /* Can't access odd doubles in FR=0 mode */
840 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
841 return -EINVAL;
842 set_fpr64(&fpu->fpr[idx], 0, v);
843 break;
844 case KVM_REG_MIPS_FCR_IR:
845 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
846 return -EINVAL;
847 /* Read-only */
848 break;
849 case KVM_REG_MIPS_FCR_CSR:
850 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
851 return -EINVAL;
852 fpu->fcr31 = v;
853 break;
854
855 /* MIPS SIMD Architecture (MSA) registers */
856 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
857 if (!kvm_mips_guest_has_msa(&vcpu->arch))
858 return -EINVAL;
859 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
860#ifdef CONFIG_CPU_LITTLE_ENDIAN
861 /* least significant byte first */
862 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
863 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
864#else
865 /* most significant byte first */
866 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
867 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
868#endif
869 break;
870 case KVM_REG_MIPS_MSA_IR:
871 if (!kvm_mips_guest_has_msa(&vcpu->arch))
872 return -EINVAL;
873 /* Read-only */
874 break;
875 case KVM_REG_MIPS_MSA_CSR:
876 if (!kvm_mips_guest_has_msa(&vcpu->arch))
877 return -EINVAL;
878 fpu->msacsr = v;
879 break;
880
881 /* registers to be handled specially */
882 default:
883 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
884 }
885 return 0;
886}
887
888static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
889 struct kvm_enable_cap *cap)
890{
891 int r = 0;
892
893 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
894 return -EINVAL;
895 if (cap->flags)
896 return -EINVAL;
897 if (cap->args[0])
898 return -EINVAL;
899
900 switch (cap->cap) {
901 case KVM_CAP_MIPS_FPU:
902 vcpu->arch.fpu_enabled = true;
903 break;
904 case KVM_CAP_MIPS_MSA:
905 vcpu->arch.msa_enabled = true;
906 break;
907 default:
908 r = -EINVAL;
909 break;
910 }
911
912 return r;
913}
914
915long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
916 unsigned long arg)
917{
918 struct kvm_vcpu *vcpu = filp->private_data;
919 void __user *argp = (void __user *)arg;
920
921 if (ioctl == KVM_INTERRUPT) {
922 struct kvm_mips_interrupt irq;
923
924 if (copy_from_user(&irq, argp, sizeof(irq)))
925 return -EFAULT;
926 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
927 irq.irq);
928
929 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
930 }
931
932 return -ENOIOCTLCMD;
933}
934
935long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
936 unsigned long arg)
937{
938 struct kvm_vcpu *vcpu = filp->private_data;
939 void __user *argp = (void __user *)arg;
940 long r;
941
942 vcpu_load(vcpu);
943
944 switch (ioctl) {
945 case KVM_SET_ONE_REG:
946 case KVM_GET_ONE_REG: {
947 struct kvm_one_reg reg;
948
949 r = -EFAULT;
950 if (copy_from_user(®, argp, sizeof(reg)))
951 break;
952 if (ioctl == KVM_SET_ONE_REG)
953 r = kvm_mips_set_reg(vcpu, ®);
954 else
955 r = kvm_mips_get_reg(vcpu, ®);
956 break;
957 }
958 case KVM_GET_REG_LIST: {
959 struct kvm_reg_list __user *user_list = argp;
960 struct kvm_reg_list reg_list;
961 unsigned n;
962
963 r = -EFAULT;
964 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
965 break;
966 n = reg_list.n;
967 reg_list.n = kvm_mips_num_regs(vcpu);
968 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
969 break;
970 r = -E2BIG;
971 if (n < reg_list.n)
972 break;
973 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
974 break;
975 }
976 case KVM_ENABLE_CAP: {
977 struct kvm_enable_cap cap;
978
979 r = -EFAULT;
980 if (copy_from_user(&cap, argp, sizeof(cap)))
981 break;
982 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
983 break;
984 }
985 default:
986 r = -ENOIOCTLCMD;
987 }
988
989 vcpu_put(vcpu);
990 return r;
991}
992
993void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
994{
995
996}
997
998void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
999 struct kvm_memory_slot *memslot)
1000{
1001 /* Let implementation handle TLB/GVA invalidation */
1002 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1003}
1004
1005long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1006{
1007 long r;
1008
1009 switch (ioctl) {
1010 default:
1011 r = -ENOIOCTLCMD;
1012 }
1013
1014 return r;
1015}
1016
1017int kvm_arch_init(void *opaque)
1018{
1019 if (kvm_mips_callbacks) {
1020 kvm_err("kvm: module already exists\n");
1021 return -EEXIST;
1022 }
1023
1024 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1025}
1026
1027void kvm_arch_exit(void)
1028{
1029 kvm_mips_callbacks = NULL;
1030}
1031
1032int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1033 struct kvm_sregs *sregs)
1034{
1035 return -ENOIOCTLCMD;
1036}
1037
1038int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1039 struct kvm_sregs *sregs)
1040{
1041 return -ENOIOCTLCMD;
1042}
1043
1044void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1045{
1046}
1047
1048int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1049{
1050 return -ENOIOCTLCMD;
1051}
1052
1053int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1054{
1055 return -ENOIOCTLCMD;
1056}
1057
1058vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1059{
1060 return VM_FAULT_SIGBUS;
1061}
1062
1063int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1064{
1065 int r;
1066
1067 switch (ext) {
1068 case KVM_CAP_ONE_REG:
1069 case KVM_CAP_ENABLE_CAP:
1070 case KVM_CAP_READONLY_MEM:
1071 case KVM_CAP_SYNC_MMU:
1072 case KVM_CAP_IMMEDIATE_EXIT:
1073 r = 1;
1074 break;
1075 case KVM_CAP_NR_VCPUS:
1076 r = num_online_cpus();
1077 break;
1078 case KVM_CAP_MAX_VCPUS:
1079 r = KVM_MAX_VCPUS;
1080 break;
1081 case KVM_CAP_MAX_VCPU_ID:
1082 r = KVM_MAX_VCPU_ID;
1083 break;
1084 case KVM_CAP_MIPS_FPU:
1085 /* We don't handle systems with inconsistent cpu_has_fpu */
1086 r = !!raw_cpu_has_fpu;
1087 break;
1088 case KVM_CAP_MIPS_MSA:
1089 /*
1090 * We don't support MSA vector partitioning yet:
1091 * 1) It would require explicit support which can't be tested
1092 * yet due to lack of support in current hardware.
1093 * 2) It extends the state that would need to be saved/restored
1094 * by e.g. QEMU for migration.
1095 *
1096 * When vector partitioning hardware becomes available, support
1097 * could be added by requiring a flag when enabling
1098 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1099 * to save/restore the appropriate extra state.
1100 */
1101 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1102 break;
1103 default:
1104 r = kvm_mips_callbacks->check_extension(kvm, ext);
1105 break;
1106 }
1107 return r;
1108}
1109
1110int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1111{
1112 return kvm_mips_pending_timer(vcpu) ||
1113 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1114}
1115
1116int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1117{
1118 int i;
1119 struct mips_coproc *cop0;
1120
1121 if (!vcpu)
1122 return -1;
1123
1124 kvm_debug("VCPU Register Dump:\n");
1125 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1126 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1127
1128 for (i = 0; i < 32; i += 4) {
1129 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1130 vcpu->arch.gprs[i],
1131 vcpu->arch.gprs[i + 1],
1132 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1133 }
1134 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1135 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1136
1137 cop0 = vcpu->arch.cop0;
1138 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1139 kvm_read_c0_guest_status(cop0),
1140 kvm_read_c0_guest_cause(cop0));
1141
1142 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1143
1144 return 0;
1145}
1146
1147int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1148{
1149 int i;
1150
1151 vcpu_load(vcpu);
1152
1153 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1154 vcpu->arch.gprs[i] = regs->gpr[i];
1155 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1156 vcpu->arch.hi = regs->hi;
1157 vcpu->arch.lo = regs->lo;
1158 vcpu->arch.pc = regs->pc;
1159
1160 vcpu_put(vcpu);
1161 return 0;
1162}
1163
1164int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1165{
1166 int i;
1167
1168 vcpu_load(vcpu);
1169
1170 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1171 regs->gpr[i] = vcpu->arch.gprs[i];
1172
1173 regs->hi = vcpu->arch.hi;
1174 regs->lo = vcpu->arch.lo;
1175 regs->pc = vcpu->arch.pc;
1176
1177 vcpu_put(vcpu);
1178 return 0;
1179}
1180
1181int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1182 struct kvm_translation *tr)
1183{
1184 return 0;
1185}
1186
1187static void kvm_mips_set_c0_status(void)
1188{
1189 u32 status = read_c0_status();
1190
1191 if (cpu_has_dsp)
1192 status |= (ST0_MX);
1193
1194 write_c0_status(status);
1195 ehb();
1196}
1197
1198/*
1199 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1200 */
1201int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1202{
1203 u32 cause = vcpu->arch.host_cp0_cause;
1204 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1205 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1206 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1207 enum emulation_result er = EMULATE_DONE;
1208 u32 inst;
1209 int ret = RESUME_GUEST;
1210
1211 vcpu->mode = OUTSIDE_GUEST_MODE;
1212
1213 /* re-enable HTW before enabling interrupts */
1214 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1215 htw_start();
1216
1217 /* Set a default exit reason */
1218 run->exit_reason = KVM_EXIT_UNKNOWN;
1219 run->ready_for_interrupt_injection = 1;
1220
1221 /*
1222 * Set the appropriate status bits based on host CPU features,
1223 * before we hit the scheduler
1224 */
1225 kvm_mips_set_c0_status();
1226
1227 local_irq_enable();
1228
1229 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1230 cause, opc, run, vcpu);
1231 trace_kvm_exit(vcpu, exccode);
1232
1233 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1234 /*
1235 * Do a privilege check, if in UM most of these exit conditions
1236 * end up causing an exception to be delivered to the Guest
1237 * Kernel
1238 */
1239 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1240 if (er == EMULATE_PRIV_FAIL) {
1241 goto skip_emul;
1242 } else if (er == EMULATE_FAIL) {
1243 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1244 ret = RESUME_HOST;
1245 goto skip_emul;
1246 }
1247 }
1248
1249 switch (exccode) {
1250 case EXCCODE_INT:
1251 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1252
1253 ++vcpu->stat.int_exits;
1254
1255 if (need_resched())
1256 cond_resched();
1257
1258 ret = RESUME_GUEST;
1259 break;
1260
1261 case EXCCODE_CPU:
1262 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1263
1264 ++vcpu->stat.cop_unusable_exits;
1265 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1266 /* XXXKYMA: Might need to return to user space */
1267 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1268 ret = RESUME_HOST;
1269 break;
1270
1271 case EXCCODE_MOD:
1272 ++vcpu->stat.tlbmod_exits;
1273 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1274 break;
1275
1276 case EXCCODE_TLBS:
1277 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1278 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1279 badvaddr);
1280
1281 ++vcpu->stat.tlbmiss_st_exits;
1282 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1283 break;
1284
1285 case EXCCODE_TLBL:
1286 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1287 cause, opc, badvaddr);
1288
1289 ++vcpu->stat.tlbmiss_ld_exits;
1290 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1291 break;
1292
1293 case EXCCODE_ADES:
1294 ++vcpu->stat.addrerr_st_exits;
1295 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1296 break;
1297
1298 case EXCCODE_ADEL:
1299 ++vcpu->stat.addrerr_ld_exits;
1300 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1301 break;
1302
1303 case EXCCODE_SYS:
1304 ++vcpu->stat.syscall_exits;
1305 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1306 break;
1307
1308 case EXCCODE_RI:
1309 ++vcpu->stat.resvd_inst_exits;
1310 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1311 break;
1312
1313 case EXCCODE_BP:
1314 ++vcpu->stat.break_inst_exits;
1315 ret = kvm_mips_callbacks->handle_break(vcpu);
1316 break;
1317
1318 case EXCCODE_TR:
1319 ++vcpu->stat.trap_inst_exits;
1320 ret = kvm_mips_callbacks->handle_trap(vcpu);
1321 break;
1322
1323 case EXCCODE_MSAFPE:
1324 ++vcpu->stat.msa_fpe_exits;
1325 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1326 break;
1327
1328 case EXCCODE_FPE:
1329 ++vcpu->stat.fpe_exits;
1330 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1331 break;
1332
1333 case EXCCODE_MSADIS:
1334 ++vcpu->stat.msa_disabled_exits;
1335 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1336 break;
1337
1338 case EXCCODE_GE:
1339 /* defer exit accounting to handler */
1340 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1341 break;
1342
1343 default:
1344 if (cause & CAUSEF_BD)
1345 opc += 1;
1346 inst = 0;
1347 kvm_get_badinstr(opc, vcpu, &inst);
1348 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1349 exccode, opc, inst, badvaddr,
1350 kvm_read_c0_guest_status(vcpu->arch.cop0));
1351 kvm_arch_vcpu_dump_regs(vcpu);
1352 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1353 ret = RESUME_HOST;
1354 break;
1355
1356 }
1357
1358skip_emul:
1359 local_irq_disable();
1360
1361 if (ret == RESUME_GUEST)
1362 kvm_vz_acquire_htimer(vcpu);
1363
1364 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1365 kvm_mips_deliver_interrupts(vcpu, cause);
1366
1367 if (!(ret & RESUME_HOST)) {
1368 /* Only check for signals if not already exiting to userspace */
1369 if (signal_pending(current)) {
1370 run->exit_reason = KVM_EXIT_INTR;
1371 ret = (-EINTR << 2) | RESUME_HOST;
1372 ++vcpu->stat.signal_exits;
1373 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1374 }
1375 }
1376
1377 if (ret == RESUME_GUEST) {
1378 trace_kvm_reenter(vcpu);
1379
1380 /*
1381 * Make sure the read of VCPU requests in vcpu_reenter()
1382 * callback is not reordered ahead of the write to vcpu->mode,
1383 * or we could miss a TLB flush request while the requester sees
1384 * the VCPU as outside of guest mode and not needing an IPI.
1385 */
1386 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1387
1388 kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1389
1390 /*
1391 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1392 * is live), restore FCR31 / MSACSR.
1393 *
1394 * This should be before returning to the guest exception
1395 * vector, as it may well cause an [MSA] FP exception if there
1396 * are pending exception bits unmasked. (see
1397 * kvm_mips_csr_die_notifier() for how that is handled).
1398 */
1399 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1400 read_c0_status() & ST0_CU1)
1401 __kvm_restore_fcsr(&vcpu->arch);
1402
1403 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1404 read_c0_config5() & MIPS_CONF5_MSAEN)
1405 __kvm_restore_msacsr(&vcpu->arch);
1406 }
1407
1408 /* Disable HTW before returning to guest or host */
1409 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1410 htw_stop();
1411
1412 return ret;
1413}
1414
1415/* Enable FPU for guest and restore context */
1416void kvm_own_fpu(struct kvm_vcpu *vcpu)
1417{
1418 struct mips_coproc *cop0 = vcpu->arch.cop0;
1419 unsigned int sr, cfg5;
1420
1421 preempt_disable();
1422
1423 sr = kvm_read_c0_guest_status(cop0);
1424
1425 /*
1426 * If MSA state is already live, it is undefined how it interacts with
1427 * FR=0 FPU state, and we don't want to hit reserved instruction
1428 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1429 * play it safe and save it first.
1430 *
1431 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1432 * get called when guest CU1 is set, however we can't trust the guest
1433 * not to clobber the status register directly via the commpage.
1434 */
1435 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1436 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1437 kvm_lose_fpu(vcpu);
1438
1439 /*
1440 * Enable FPU for guest
1441 * We set FR and FRE according to guest context
1442 */
1443 change_c0_status(ST0_CU1 | ST0_FR, sr);
1444 if (cpu_has_fre) {
1445 cfg5 = kvm_read_c0_guest_config5(cop0);
1446 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1447 }
1448 enable_fpu_hazard();
1449
1450 /* If guest FPU state not active, restore it now */
1451 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1452 __kvm_restore_fpu(&vcpu->arch);
1453 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1454 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1455 } else {
1456 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1457 }
1458
1459 preempt_enable();
1460}
1461
1462#ifdef CONFIG_CPU_HAS_MSA
1463/* Enable MSA for guest and restore context */
1464void kvm_own_msa(struct kvm_vcpu *vcpu)
1465{
1466 struct mips_coproc *cop0 = vcpu->arch.cop0;
1467 unsigned int sr, cfg5;
1468
1469 preempt_disable();
1470
1471 /*
1472 * Enable FPU if enabled in guest, since we're restoring FPU context
1473 * anyway. We set FR and FRE according to guest context.
1474 */
1475 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1476 sr = kvm_read_c0_guest_status(cop0);
1477
1478 /*
1479 * If FR=0 FPU state is already live, it is undefined how it
1480 * interacts with MSA state, so play it safe and save it first.
1481 */
1482 if (!(sr & ST0_FR) &&
1483 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1484 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1485 kvm_lose_fpu(vcpu);
1486
1487 change_c0_status(ST0_CU1 | ST0_FR, sr);
1488 if (sr & ST0_CU1 && cpu_has_fre) {
1489 cfg5 = kvm_read_c0_guest_config5(cop0);
1490 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1491 }
1492 }
1493
1494 /* Enable MSA for guest */
1495 set_c0_config5(MIPS_CONF5_MSAEN);
1496 enable_fpu_hazard();
1497
1498 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1499 case KVM_MIPS_AUX_FPU:
1500 /*
1501 * Guest FPU state already loaded, only restore upper MSA state
1502 */
1503 __kvm_restore_msa_upper(&vcpu->arch);
1504 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1505 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1506 break;
1507 case 0:
1508 /* Neither FPU or MSA already active, restore full MSA state */
1509 __kvm_restore_msa(&vcpu->arch);
1510 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1511 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1512 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1513 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1514 KVM_TRACE_AUX_FPU_MSA);
1515 break;
1516 default:
1517 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1518 break;
1519 }
1520
1521 preempt_enable();
1522}
1523#endif
1524
1525/* Drop FPU & MSA without saving it */
1526void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1527{
1528 preempt_disable();
1529 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1530 disable_msa();
1531 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1532 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1533 }
1534 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1535 clear_c0_status(ST0_CU1 | ST0_FR);
1536 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1537 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1538 }
1539 preempt_enable();
1540}
1541
1542/* Save and disable FPU & MSA */
1543void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1544{
1545 /*
1546 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1547 * is disabled in guest context (software), but the register state in
1548 * the hardware may still be in use.
1549 * This is why we explicitly re-enable the hardware before saving.
1550 */
1551
1552 preempt_disable();
1553 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1554 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1555 set_c0_config5(MIPS_CONF5_MSAEN);
1556 enable_fpu_hazard();
1557 }
1558
1559 __kvm_save_msa(&vcpu->arch);
1560 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1561
1562 /* Disable MSA & FPU */
1563 disable_msa();
1564 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1565 clear_c0_status(ST0_CU1 | ST0_FR);
1566 disable_fpu_hazard();
1567 }
1568 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1569 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1570 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1571 set_c0_status(ST0_CU1);
1572 enable_fpu_hazard();
1573 }
1574
1575 __kvm_save_fpu(&vcpu->arch);
1576 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1577 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1578
1579 /* Disable FPU */
1580 clear_c0_status(ST0_CU1 | ST0_FR);
1581 disable_fpu_hazard();
1582 }
1583 preempt_enable();
1584}
1585
1586/*
1587 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1588 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1589 * exception if cause bits are set in the value being written.
1590 */
1591static int kvm_mips_csr_die_notify(struct notifier_block *self,
1592 unsigned long cmd, void *ptr)
1593{
1594 struct die_args *args = (struct die_args *)ptr;
1595 struct pt_regs *regs = args->regs;
1596 unsigned long pc;
1597
1598 /* Only interested in FPE and MSAFPE */
1599 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1600 return NOTIFY_DONE;
1601
1602 /* Return immediately if guest context isn't active */
1603 if (!(current->flags & PF_VCPU))
1604 return NOTIFY_DONE;
1605
1606 /* Should never get here from user mode */
1607 BUG_ON(user_mode(regs));
1608
1609 pc = instruction_pointer(regs);
1610 switch (cmd) {
1611 case DIE_FP:
1612 /* match 2nd instruction in __kvm_restore_fcsr */
1613 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1614 return NOTIFY_DONE;
1615 break;
1616 case DIE_MSAFP:
1617 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1618 if (!cpu_has_msa ||
1619 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1620 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1621 return NOTIFY_DONE;
1622 break;
1623 }
1624
1625 /* Move PC forward a little and continue executing */
1626 instruction_pointer(regs) += 4;
1627
1628 return NOTIFY_STOP;
1629}
1630
1631static struct notifier_block kvm_mips_csr_die_notifier = {
1632 .notifier_call = kvm_mips_csr_die_notify,
1633};
1634
1635static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1636 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1637 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1638 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
1639 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
1640};
1641
1642static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1643 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1644 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1645 [MIPS_EXC_INT_IO_2] = C_IRQ1,
1646 [MIPS_EXC_INT_IPI_1] = C_IRQ4,
1647};
1648
1649u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1650
1651u32 kvm_irq_to_priority(u32 irq)
1652{
1653 int i;
1654
1655 for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1656 if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1657 return i;
1658 }
1659
1660 return MIPS_EXC_MAX;
1661}
1662
1663static int __init kvm_mips_init(void)
1664{
1665 int ret;
1666
1667 if (cpu_has_mmid) {
1668 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1669 return -EOPNOTSUPP;
1670 }
1671
1672 ret = kvm_mips_entry_setup();
1673 if (ret)
1674 return ret;
1675
1676 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1677
1678 if (ret)
1679 return ret;
1680
1681 if (boot_cpu_type() == CPU_LOONGSON64)
1682 kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1683
1684 register_die_notifier(&kvm_mips_csr_die_notifier);
1685
1686 return 0;
1687}
1688
1689static void __exit kvm_mips_exit(void)
1690{
1691 kvm_exit();
1692
1693 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1694}
1695
1696module_init(kvm_mips_init);
1697module_exit(kvm_mips_exit);
1698
1699EXPORT_TRACEPOINT_SYMBOL(kvm_exit);