Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/cpumask.h>
14#include <linux/mutex.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h>
17#include <linux/types.h>
18#include <linux/kvm.h>
19#include <linux/kvm_types.h>
20#include <linux/threads.h>
21#include <linux/spinlock.h>
22
23#include <asm/inst.h>
24#include <asm/mipsregs.h>
25
26#include <kvm/iodev.h>
27
28/* MIPS KVM register ids */
29#define MIPS_CP0_32(_R, _S) \
30 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
31
32#define MIPS_CP0_64(_R, _S) \
33 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
34
35#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
36#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
37#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
38#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
39#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
40#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
41#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
42#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
43#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
44#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
45#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
46#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
47#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
48#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
49#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
50#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
51#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
52#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
53#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
54#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
55#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
56#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
57#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
58#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
59#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
60#define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
61#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
62#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
63#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
64#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
65#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
66#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
67#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
68#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
69#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
70#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
71#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
72#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
73#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
74#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
75#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
76#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
77#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
78#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
79#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
80#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
81#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
82#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
83
84
85#define KVM_MAX_VCPUS 16
86/* memory slots that does not exposed to userspace */
87#define KVM_PRIVATE_MEM_SLOTS 0
88
89#define KVM_HALT_POLL_NS_DEFAULT 500000
90
91#ifdef CONFIG_KVM_MIPS_VZ
92extern unsigned long GUESTID_MASK;
93extern unsigned long GUESTID_FIRST_VERSION;
94extern unsigned long GUESTID_VERSION_MASK;
95#endif
96
97
98/*
99 * Special address that contains the comm page, used for reducing # of traps
100 * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
101 * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
102 * caught.
103 */
104#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
105 (0x8000 - PAGE_SIZE))
106
107#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
108 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
109
110#define KVM_GUEST_KUSEG 0x00000000UL
111#define KVM_GUEST_KSEG0 0x40000000UL
112#define KVM_GUEST_KSEG1 0x40000000UL
113#define KVM_GUEST_KSEG23 0x60000000UL
114#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
115#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
116
117#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
118#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
119#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
120
121/*
122 * Map an address to a certain kernel segment
123 */
124#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
125#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
126#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
127
128#define KVM_INVALID_PAGE 0xdeadbeef
129#define KVM_INVALID_ADDR 0xdeadbeef
130
131/*
132 * EVA has overlapping user & kernel address spaces, so user VAs may be >
133 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
134 * PAGE_OFFSET.
135 */
136
137#define KVM_HVA_ERR_BAD (-1UL)
138#define KVM_HVA_ERR_RO_BAD (-2UL)
139
140static inline bool kvm_is_error_hva(unsigned long addr)
141{
142 return IS_ERR_VALUE(addr);
143}
144
145struct kvm_vm_stat {
146 ulong remote_tlb_flush;
147};
148
149struct kvm_vcpu_stat {
150 u64 wait_exits;
151 u64 cache_exits;
152 u64 signal_exits;
153 u64 int_exits;
154 u64 cop_unusable_exits;
155 u64 tlbmod_exits;
156 u64 tlbmiss_ld_exits;
157 u64 tlbmiss_st_exits;
158 u64 addrerr_st_exits;
159 u64 addrerr_ld_exits;
160 u64 syscall_exits;
161 u64 resvd_inst_exits;
162 u64 break_inst_exits;
163 u64 trap_inst_exits;
164 u64 msa_fpe_exits;
165 u64 fpe_exits;
166 u64 msa_disabled_exits;
167 u64 flush_dcache_exits;
168#ifdef CONFIG_KVM_MIPS_VZ
169 u64 vz_gpsi_exits;
170 u64 vz_gsfc_exits;
171 u64 vz_hc_exits;
172 u64 vz_grr_exits;
173 u64 vz_gva_exits;
174 u64 vz_ghfc_exits;
175 u64 vz_gpa_exits;
176 u64 vz_resvd_exits;
177#ifdef CONFIG_CPU_LOONGSON64
178 u64 vz_cpucfg_exits;
179#endif
180#endif
181 u64 halt_successful_poll;
182 u64 halt_attempted_poll;
183 u64 halt_poll_success_ns;
184 u64 halt_poll_fail_ns;
185 u64 halt_poll_invalid;
186 u64 halt_wakeup;
187};
188
189struct kvm_arch_memory_slot {
190};
191
192#ifdef CONFIG_CPU_LOONGSON64
193struct ipi_state {
194 uint32_t status;
195 uint32_t en;
196 uint32_t set;
197 uint32_t clear;
198 uint64_t buf[4];
199};
200
201struct loongson_kvm_ipi;
202
203struct ipi_io_device {
204 int node_id;
205 struct loongson_kvm_ipi *ipi;
206 struct kvm_io_device device;
207};
208
209struct loongson_kvm_ipi {
210 spinlock_t lock;
211 struct kvm *kvm;
212 struct ipi_state ipistate[16];
213 struct ipi_io_device dev_ipi[4];
214};
215#endif
216
217struct kvm_arch {
218 /* Guest physical mm */
219 struct mm_struct gpa_mm;
220 /* Mask of CPUs needing GPA ASID flush */
221 cpumask_t asid_flush_mask;
222#ifdef CONFIG_CPU_LOONGSON64
223 struct loongson_kvm_ipi ipi;
224#endif
225};
226
227#define N_MIPS_COPROC_REGS 32
228#define N_MIPS_COPROC_SEL 8
229
230struct mips_coproc {
231 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
232#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
233 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
234#endif
235};
236
237/*
238 * Coprocessor 0 register names
239 */
240#define MIPS_CP0_TLB_INDEX 0
241#define MIPS_CP0_TLB_RANDOM 1
242#define MIPS_CP0_TLB_LOW 2
243#define MIPS_CP0_TLB_LO0 2
244#define MIPS_CP0_TLB_LO1 3
245#define MIPS_CP0_TLB_CONTEXT 4
246#define MIPS_CP0_TLB_PG_MASK 5
247#define MIPS_CP0_TLB_WIRED 6
248#define MIPS_CP0_HWRENA 7
249#define MIPS_CP0_BAD_VADDR 8
250#define MIPS_CP0_COUNT 9
251#define MIPS_CP0_TLB_HI 10
252#define MIPS_CP0_COMPARE 11
253#define MIPS_CP0_STATUS 12
254#define MIPS_CP0_CAUSE 13
255#define MIPS_CP0_EXC_PC 14
256#define MIPS_CP0_PRID 15
257#define MIPS_CP0_CONFIG 16
258#define MIPS_CP0_LLADDR 17
259#define MIPS_CP0_WATCH_LO 18
260#define MIPS_CP0_WATCH_HI 19
261#define MIPS_CP0_TLB_XCONTEXT 20
262#define MIPS_CP0_DIAG 22
263#define MIPS_CP0_ECC 26
264#define MIPS_CP0_CACHE_ERR 27
265#define MIPS_CP0_TAG_LO 28
266#define MIPS_CP0_TAG_HI 29
267#define MIPS_CP0_ERROR_PC 30
268#define MIPS_CP0_DEBUG 23
269#define MIPS_CP0_DEPC 24
270#define MIPS_CP0_PERFCNT 25
271#define MIPS_CP0_ERRCTL 26
272#define MIPS_CP0_DATA_LO 28
273#define MIPS_CP0_DATA_HI 29
274#define MIPS_CP0_DESAVE 31
275
276#define MIPS_CP0_CONFIG_SEL 0
277#define MIPS_CP0_CONFIG1_SEL 1
278#define MIPS_CP0_CONFIG2_SEL 2
279#define MIPS_CP0_CONFIG3_SEL 3
280#define MIPS_CP0_CONFIG4_SEL 4
281#define MIPS_CP0_CONFIG5_SEL 5
282
283#define MIPS_CP0_GUESTCTL2 10
284#define MIPS_CP0_GUESTCTL2_SEL 5
285#define MIPS_CP0_GTOFFSET 12
286#define MIPS_CP0_GTOFFSET_SEL 7
287
288/* Resume Flags */
289#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
290#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
291
292#define RESUME_GUEST 0
293#define RESUME_GUEST_DR RESUME_FLAG_DR
294#define RESUME_HOST RESUME_FLAG_HOST
295
296enum emulation_result {
297 EMULATE_DONE, /* no further processing */
298 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
299 EMULATE_FAIL, /* can't emulate this instruction */
300 EMULATE_WAIT, /* WAIT instruction */
301 EMULATE_PRIV_FAIL,
302 EMULATE_EXCEPT, /* A guest exception has been generated */
303 EMULATE_HYPERCALL, /* HYPCALL instruction */
304};
305
306#define mips3_paddr_to_tlbpfn(x) \
307 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
308#define mips3_tlbpfn_to_paddr(x) \
309 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
310
311#define MIPS3_PG_SHIFT 6
312#define MIPS3_PG_FRAME 0x3fffffc0
313
314#if defined(CONFIG_64BIT)
315#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
316#else
317#define VPN2_MASK 0xffffe000
318#endif
319#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
320#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
321#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
322#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
323#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
324#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
325#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
326#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
327 ((y) & VPN2_MASK & ~(x).tlb_mask))
328#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
329 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
330
331struct kvm_mips_tlb {
332 long tlb_mask;
333 long tlb_hi;
334 long tlb_lo[2];
335};
336
337#define KVM_MIPS_AUX_FPU 0x1
338#define KVM_MIPS_AUX_MSA 0x2
339
340#define KVM_MIPS_GUEST_TLB_SIZE 64
341struct kvm_vcpu_arch {
342 void *guest_ebase;
343 int (*vcpu_run)(struct kvm_vcpu *vcpu);
344
345 /* Host registers preserved across guest mode execution */
346 unsigned long host_stack;
347 unsigned long host_gp;
348 unsigned long host_pgd;
349 unsigned long host_entryhi;
350
351 /* Host CP0 registers used when handling exits from guest */
352 unsigned long host_cp0_badvaddr;
353 unsigned long host_cp0_epc;
354 u32 host_cp0_cause;
355 u32 host_cp0_guestctl0;
356 u32 host_cp0_badinstr;
357 u32 host_cp0_badinstrp;
358
359 /* GPRS */
360 unsigned long gprs[32];
361 unsigned long hi;
362 unsigned long lo;
363 unsigned long pc;
364
365 /* FPU State */
366 struct mips_fpu_struct fpu;
367 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
368 unsigned int aux_inuse;
369
370 /* COP0 State */
371 struct mips_coproc *cop0;
372
373 /* Host KSEG0 address of the EI/DI offset */
374 void *kseg0_commpage;
375
376 /* Resume PC after MMIO completion */
377 unsigned long io_pc;
378 /* GPR used as IO source/target */
379 u32 io_gpr;
380
381 struct hrtimer comparecount_timer;
382 /* Count timer control KVM register */
383 u32 count_ctl;
384 /* Count bias from the raw time */
385 u32 count_bias;
386 /* Frequency of timer in Hz */
387 u32 count_hz;
388 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
389 s64 count_dyn_bias;
390 /* Resume time */
391 ktime_t count_resume;
392 /* Period of timer tick in ns */
393 u64 count_period;
394
395 /* Bitmask of exceptions that are pending */
396 unsigned long pending_exceptions;
397
398 /* Bitmask of pending exceptions to be cleared */
399 unsigned long pending_exceptions_clr;
400
401 /* S/W Based TLB for guest */
402 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
403
404 /* Guest kernel/user [partial] mm */
405 struct mm_struct guest_kernel_mm, guest_user_mm;
406
407 /* Guest ASID of last user mode execution */
408 unsigned int last_user_gasid;
409
410 /* Cache some mmu pages needed inside spinlock regions */
411 struct kvm_mmu_memory_cache mmu_page_cache;
412
413#ifdef CONFIG_KVM_MIPS_VZ
414 /* vcpu's vzguestid is different on each host cpu in an smp system */
415 u32 vzguestid[NR_CPUS];
416
417 /* wired guest TLB entries */
418 struct kvm_mips_tlb *wired_tlb;
419 unsigned int wired_tlb_limit;
420 unsigned int wired_tlb_used;
421
422 /* emulated guest MAAR registers */
423 unsigned long maar[6];
424#endif
425
426 /* Last CPU the VCPU state was loaded on */
427 int last_sched_cpu;
428 /* Last CPU the VCPU actually executed guest code on */
429 int last_exec_cpu;
430
431 /* WAIT executed */
432 int wait;
433
434 u8 fpu_enabled;
435 u8 msa_enabled;
436};
437
438static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
439 unsigned long val)
440{
441 unsigned long temp;
442 do {
443 __asm__ __volatile__(
444 " .set push \n"
445 " .set "MIPS_ISA_ARCH_LEVEL" \n"
446 " " __LL "%0, %1 \n"
447 " or %0, %2 \n"
448 " " __SC "%0, %1 \n"
449 " .set pop \n"
450 : "=&r" (temp), "+m" (*reg)
451 : "r" (val));
452 } while (unlikely(!temp));
453}
454
455static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
456 unsigned long val)
457{
458 unsigned long temp;
459 do {
460 __asm__ __volatile__(
461 " .set push \n"
462 " .set "MIPS_ISA_ARCH_LEVEL" \n"
463 " " __LL "%0, %1 \n"
464 " and %0, %2 \n"
465 " " __SC "%0, %1 \n"
466 " .set pop \n"
467 : "=&r" (temp), "+m" (*reg)
468 : "r" (~val));
469 } while (unlikely(!temp));
470}
471
472static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
473 unsigned long change,
474 unsigned long val)
475{
476 unsigned long temp;
477 do {
478 __asm__ __volatile__(
479 " .set push \n"
480 " .set "MIPS_ISA_ARCH_LEVEL" \n"
481 " " __LL "%0, %1 \n"
482 " and %0, %2 \n"
483 " or %0, %3 \n"
484 " " __SC "%0, %1 \n"
485 " .set pop \n"
486 : "=&r" (temp), "+m" (*reg)
487 : "r" (~change), "r" (val & change));
488 } while (unlikely(!temp));
489}
490
491/* Guest register types, used in accessor build below */
492#define __KVMT32 u32
493#define __KVMTl unsigned long
494
495/*
496 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
497 * These operate on the saved guest C0 state in RAM.
498 */
499
500/* Generate saved context simple accessors */
501#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
502static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
503{ \
504 return cop0->reg[(_reg)][(sel)]; \
505} \
506static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
507 __KVMT##type val) \
508{ \
509 cop0->reg[(_reg)][(sel)] = val; \
510}
511
512/* Generate saved context bitwise modifiers */
513#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
514static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
515 __KVMT##type val) \
516{ \
517 cop0->reg[(_reg)][(sel)] |= val; \
518} \
519static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
520 __KVMT##type val) \
521{ \
522 cop0->reg[(_reg)][(sel)] &= ~val; \
523} \
524static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
525 __KVMT##type mask, \
526 __KVMT##type val) \
527{ \
528 unsigned long _mask = mask; \
529 cop0->reg[(_reg)][(sel)] &= ~_mask; \
530 cop0->reg[(_reg)][(sel)] |= val & _mask; \
531}
532
533/* Generate saved context atomic bitwise modifiers */
534#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
535static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
536 __KVMT##type val) \
537{ \
538 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
539} \
540static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
541 __KVMT##type val) \
542{ \
543 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
544} \
545static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
546 __KVMT##type mask, \
547 __KVMT##type val) \
548{ \
549 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
550 val); \
551}
552
553/*
554 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
555 * These operate on the VZ guest C0 context in hardware.
556 */
557
558/* Generate VZ guest context simple accessors */
559#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
560static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
561{ \
562 return read_gc0_##name(); \
563} \
564static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
565 __KVMT##type val) \
566{ \
567 write_gc0_##name(val); \
568}
569
570/* Generate VZ guest context bitwise modifiers */
571#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
572static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
573 __KVMT##type val) \
574{ \
575 set_gc0_##name(val); \
576} \
577static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
578 __KVMT##type val) \
579{ \
580 clear_gc0_##name(val); \
581} \
582static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
583 __KVMT##type mask, \
584 __KVMT##type val) \
585{ \
586 change_gc0_##name(mask, val); \
587}
588
589/* Generate VZ guest context save/restore to/from saved context */
590#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
591static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
592{ \
593 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
594} \
595static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
596{ \
597 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
598}
599
600/*
601 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
602 * These wrap a set of operations to provide them with a different name.
603 */
604
605/* Generate simple accessor wrapper */
606#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
607static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
608{ \
609 return kvm_read_##name2(cop0); \
610} \
611static inline void kvm_write_##name1(struct mips_coproc *cop0, \
612 __KVMT##type val) \
613{ \
614 kvm_write_##name2(cop0, val); \
615}
616
617/* Generate bitwise modifier wrapper */
618#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
619static inline void kvm_set_##name1(struct mips_coproc *cop0, \
620 __KVMT##type val) \
621{ \
622 kvm_set_##name2(cop0, val); \
623} \
624static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
625 __KVMT##type val) \
626{ \
627 kvm_clear_##name2(cop0, val); \
628} \
629static inline void kvm_change_##name1(struct mips_coproc *cop0, \
630 __KVMT##type mask, \
631 __KVMT##type val) \
632{ \
633 kvm_change_##name2(cop0, mask, val); \
634}
635
636/*
637 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
638 * These generate accessors operating on the saved context in RAM, and wrap them
639 * with the common guest C0 accessors (for use by common emulation code).
640 */
641
642#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
643 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
644 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
645
646#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
647 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
648 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
649
650#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
651 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
652 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
653
654#ifndef CONFIG_KVM_MIPS_VZ
655
656/*
657 * T&E (trap & emulate software based virtualisation)
658 * We generate the common accessors operating exclusively on the saved context
659 * in RAM.
660 */
661
662#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
663#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
664#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
665
666#else
667
668/*
669 * VZ (hardware assisted virtualisation)
670 * These macros use the active guest state in VZ mode (hardware registers),
671 */
672
673/*
674 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
675 * These generate accessors operating on the VZ guest context in hardware, and
676 * wrap them with the common guest C0 accessors (for use by common emulation
677 * code).
678 *
679 * Accessors operating on the saved context in RAM are also generated to allow
680 * convenient explicit saving and restoring of the state.
681 */
682
683#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
684 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
685 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
686 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
687 __BUILD_KVM_SAVE_VZ(name, _reg, sel)
688
689#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
690 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
691 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
692 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
693
694/*
695 * We can't do atomic modifications of COP0 state if hardware can modify it.
696 * Races must be handled explicitly.
697 */
698#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
699
700#endif
701
702/*
703 * Define accessors for CP0 registers that are accessible to the guest. These
704 * are primarily used by common emulation code, which may need to access the
705 * registers differently depending on the implementation.
706 *
707 * fns_hw/sw name type reg num select
708 */
709__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
710__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
711__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
712__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
713__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
714__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
715__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
716__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
717__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
718__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
719__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
720__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
721__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
722__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
723__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
724__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
725__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
726__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
727__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
728__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
729__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
730__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
731__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
732__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
733__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
734__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
735__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
736__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
737__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
738__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
739__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
740__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
741__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
742__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
743__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
744__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
745__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
746__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
747__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
748__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
749__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
750__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
751__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
752__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
753__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
754__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
755__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
756
757/* Bitwise operations (on HW state) */
758__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
759/* Cause can be modified asynchronously from hardirq hrtimer callback */
760__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
761__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
762
763/* Bitwise operations (on saved state) */
764__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
765__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
766__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
767__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
768__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
769__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
770
771/* Helpers */
772
773static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
774{
775 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
776 vcpu->fpu_enabled;
777}
778
779static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
780{
781 return kvm_mips_guest_can_have_fpu(vcpu) &&
782 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
783}
784
785static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
786{
787 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
788 vcpu->msa_enabled;
789}
790
791static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
792{
793 return kvm_mips_guest_can_have_msa(vcpu) &&
794 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
795}
796
797struct kvm_mips_callbacks {
798 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
799 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
800 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
801 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
802 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
803 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
804 int (*handle_syscall)(struct kvm_vcpu *vcpu);
805 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
806 int (*handle_break)(struct kvm_vcpu *vcpu);
807 int (*handle_trap)(struct kvm_vcpu *vcpu);
808 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
809 int (*handle_fpe)(struct kvm_vcpu *vcpu);
810 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
811 int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
812 int (*hardware_enable)(void);
813 void (*hardware_disable)(void);
814 int (*check_extension)(struct kvm *kvm, long ext);
815 int (*vcpu_init)(struct kvm_vcpu *vcpu);
816 void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
817 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
818 void (*flush_shadow_all)(struct kvm *kvm);
819 /*
820 * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
821 * VZ root TLB, or T&E GVA page tables and corresponding root TLB
822 * mappings).
823 */
824 void (*flush_shadow_memslot)(struct kvm *kvm,
825 const struct kvm_memory_slot *slot);
826 gpa_t (*gva_to_gpa)(gva_t gva);
827 void (*queue_timer_int)(struct kvm_vcpu *vcpu);
828 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
829 void (*queue_io_int)(struct kvm_vcpu *vcpu,
830 struct kvm_mips_interrupt *irq);
831 void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
832 struct kvm_mips_interrupt *irq);
833 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
834 u32 cause);
835 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
836 u32 cause);
837 unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
838 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
839 int (*get_one_reg)(struct kvm_vcpu *vcpu,
840 const struct kvm_one_reg *reg, s64 *v);
841 int (*set_one_reg)(struct kvm_vcpu *vcpu,
842 const struct kvm_one_reg *reg, s64 v);
843 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
844 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
845 int (*vcpu_run)(struct kvm_vcpu *vcpu);
846 void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
847};
848extern struct kvm_mips_callbacks *kvm_mips_callbacks;
849int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
850
851/* Debug: dump vcpu state */
852int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
853
854extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
855
856/* Building of entry/exception code */
857int kvm_mips_entry_setup(void);
858void *kvm_mips_build_vcpu_run(void *addr);
859void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
860void *kvm_mips_build_exception(void *addr, void *handler);
861void *kvm_mips_build_exit(void *addr);
862
863/* FPU/MSA context management */
864void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
865void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
866void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
867void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
868void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
869void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
870void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
871void kvm_own_fpu(struct kvm_vcpu *vcpu);
872void kvm_own_msa(struct kvm_vcpu *vcpu);
873void kvm_drop_fpu(struct kvm_vcpu *vcpu);
874void kvm_lose_fpu(struct kvm_vcpu *vcpu);
875
876/* TLB handling */
877u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
878
879u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
880
881u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
882
883#ifdef CONFIG_KVM_MIPS_VZ
884int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
885 struct kvm_vcpu *vcpu, bool write_fault);
886#endif
887extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
888 struct kvm_vcpu *vcpu,
889 bool write_fault);
890
891extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
892 struct kvm_vcpu *vcpu);
893
894extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
895 struct kvm_mips_tlb *tlb,
896 unsigned long gva,
897 bool write_fault);
898
899extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
900 u32 *opc,
901 struct kvm_vcpu *vcpu,
902 bool write_fault);
903
904extern void kvm_mips_dump_host_tlbs(void);
905extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
906extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
907 bool user, bool kernel);
908
909extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
910 unsigned long entryhi);
911
912#ifdef CONFIG_KVM_MIPS_VZ
913int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
914int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
915 unsigned long *gpa);
916void kvm_vz_local_flush_roottlb_all_guests(void);
917void kvm_vz_local_flush_guesttlb_all(void);
918void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
919 unsigned int count);
920void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
921 unsigned int count);
922#ifdef CONFIG_CPU_LOONGSON64
923void kvm_loongson_clear_guest_vtlb(void);
924void kvm_loongson_clear_guest_ftlb(void);
925#endif
926#endif
927
928void kvm_mips_suspend_mm(int cpu);
929void kvm_mips_resume_mm(int cpu);
930
931/* MMU handling */
932
933/**
934 * enum kvm_mips_flush - Types of MMU flushes.
935 * @KMF_USER: Flush guest user virtual memory mappings.
936 * Guest USeg only.
937 * @KMF_KERN: Flush guest kernel virtual memory mappings.
938 * Guest USeg and KSeg2/3.
939 * @KMF_GPA: Flush guest physical memory mappings.
940 * Also includes KSeg0 if KMF_KERN is set.
941 */
942enum kvm_mips_flush {
943 KMF_USER = 0x0,
944 KMF_KERN = 0x1,
945 KMF_GPA = 0x2,
946};
947void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
948bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
949int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
950pgd_t *kvm_pgd_alloc(void);
951void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
952void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
953 bool user);
954void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
955void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
956
957enum kvm_mips_fault_result {
958 KVM_MIPS_MAPPED = 0,
959 KVM_MIPS_GVA,
960 KVM_MIPS_GPA,
961 KVM_MIPS_TLB,
962 KVM_MIPS_TLBINV,
963 KVM_MIPS_TLBMOD,
964};
965enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
966 unsigned long gva,
967 bool write);
968
969#define KVM_ARCH_WANT_MMU_NOTIFIER
970int kvm_unmap_hva_range(struct kvm *kvm,
971 unsigned long start, unsigned long end, unsigned flags);
972int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
973int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
974int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
975
976/* Emulation */
977int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
978enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
979int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
980int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
981
982/**
983 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
984 * @vcpu: Virtual CPU.
985 *
986 * Returns: Whether the TLBL exception was likely due to an instruction
987 * fetch fault rather than a data load fault.
988 */
989static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
990{
991 unsigned long badvaddr = vcpu->host_cp0_badvaddr;
992 unsigned long epc = msk_isa16_mode(vcpu->pc);
993 u32 cause = vcpu->host_cp0_cause;
994
995 if (epc == badvaddr)
996 return true;
997
998 /*
999 * Branches may be 32-bit or 16-bit instructions.
1000 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
1001 * in KVM anyway.
1002 */
1003 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
1004 return true;
1005
1006 return false;
1007}
1008
1009extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
1010 u32 *opc,
1011 struct kvm_vcpu *vcpu);
1012
1013long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
1014
1015extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1016 u32 *opc,
1017 struct kvm_vcpu *vcpu);
1018
1019extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1020 u32 *opc,
1021 struct kvm_vcpu *vcpu);
1022
1023extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1024 u32 *opc,
1025 struct kvm_vcpu *vcpu);
1026
1027extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1028 u32 *opc,
1029 struct kvm_vcpu *vcpu);
1030
1031extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
1032 u32 *opc,
1033 struct kvm_vcpu *vcpu);
1034
1035extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
1036 u32 *opc,
1037 struct kvm_vcpu *vcpu);
1038
1039extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
1040 u32 *opc,
1041 struct kvm_vcpu *vcpu);
1042
1043extern enum emulation_result kvm_mips_handle_ri(u32 cause,
1044 u32 *opc,
1045 struct kvm_vcpu *vcpu);
1046
1047extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
1048 u32 *opc,
1049 struct kvm_vcpu *vcpu);
1050
1051extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
1052 u32 *opc,
1053 struct kvm_vcpu *vcpu);
1054
1055extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
1056 u32 *opc,
1057 struct kvm_vcpu *vcpu);
1058
1059extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
1060 u32 *opc,
1061 struct kvm_vcpu *vcpu);
1062
1063extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
1064 u32 *opc,
1065 struct kvm_vcpu *vcpu);
1066
1067extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
1068 u32 *opc,
1069 struct kvm_vcpu *vcpu);
1070
1071extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
1072
1073u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
1074void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
1075void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
1076void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
1077int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
1078int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
1079int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
1080void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
1081void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
1082enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
1083
1084/* fairly internal functions requiring some care to use */
1085int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
1086ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
1087int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
1088 u32 count, int min_drift);
1089
1090#ifdef CONFIG_KVM_MIPS_VZ
1091void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
1092void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
1093#else
1094static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
1095static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
1096#endif
1097
1098enum emulation_result kvm_mips_check_privilege(u32 cause,
1099 u32 *opc,
1100 struct kvm_vcpu *vcpu);
1101
1102enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1103 u32 *opc,
1104 u32 cause,
1105 struct kvm_vcpu *vcpu);
1106enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1107 u32 *opc,
1108 u32 cause,
1109 struct kvm_vcpu *vcpu);
1110enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1111 u32 cause,
1112 struct kvm_vcpu *vcpu);
1113enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1114 u32 cause,
1115 struct kvm_vcpu *vcpu);
1116
1117/* COP0 */
1118enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
1119
1120unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
1121unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
1122unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
1123unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
1124
1125/* Hypercalls (hypcall.c) */
1126
1127enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
1128 union mips_instruction inst);
1129int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
1130
1131/* Dynamic binary translation */
1132extern int kvm_mips_trans_cache_index(union mips_instruction inst,
1133 u32 *opc, struct kvm_vcpu *vcpu);
1134extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
1135 struct kvm_vcpu *vcpu);
1136extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
1137 struct kvm_vcpu *vcpu);
1138extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
1139 struct kvm_vcpu *vcpu);
1140
1141/* Misc */
1142extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
1143extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
1144extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1145 struct kvm_mips_interrupt *irq);
1146
1147static inline void kvm_arch_hardware_unsetup(void) {}
1148static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1149static inline void kvm_arch_free_memslot(struct kvm *kvm,
1150 struct kvm_memory_slot *slot) {}
1151static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1152static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1153static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1154static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1155static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1156
1157#endif /* __MIPS_KVM_HOST_H__ */