Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_HOST_H__
12#define __ARM64_KVM_HOST_H__
13
14#include <linux/arm-smccc.h>
15#include <linux/bitmap.h>
16#include <linux/types.h>
17#include <linux/jump_label.h>
18#include <linux/kvm_types.h>
19#include <linux/percpu.h>
20#include <linux/psci.h>
21#include <asm/arch_gicv3.h>
22#include <asm/barrier.h>
23#include <asm/cpufeature.h>
24#include <asm/cputype.h>
25#include <asm/daifflags.h>
26#include <asm/fpsimd.h>
27#include <asm/kvm.h>
28#include <asm/kvm_asm.h>
29
30#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31
32#define KVM_HALT_POLL_NS_DEFAULT 500000
33
34#include <kvm/arm_vgic.h>
35#include <kvm/arm_arch_timer.h>
36#include <kvm/arm_pmu.h>
37
38#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
39
40#define KVM_VCPU_MAX_FEATURES 7
41
42#define KVM_REQ_SLEEP \
43 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
45#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
46#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
47#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
48#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
49
50#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 KVM_DIRTY_LOG_INITIALLY_SET)
52
53#define KVM_HAVE_MMU_RWLOCK
54
55/*
56 * Mode of operation configurable with kvm-arm.mode early param.
57 * See Documentation/admin-guide/kernel-parameters.txt for more information.
58 */
59enum kvm_mode {
60 KVM_MODE_DEFAULT,
61 KVM_MODE_PROTECTED,
62 KVM_MODE_NONE,
63};
64enum kvm_mode kvm_get_mode(void);
65
66DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
67
68extern unsigned int kvm_sve_max_vl;
69int kvm_arm_init_sve(void);
70
71u32 __attribute_const__ kvm_target_cpu(void);
72int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
73void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
74
75struct kvm_vmid {
76 atomic64_t id;
77};
78
79struct kvm_s2_mmu {
80 struct kvm_vmid vmid;
81
82 /*
83 * stage2 entry level table
84 *
85 * Two kvm_s2_mmu structures in the same VM can point to the same
86 * pgd here. This happens when running a guest using a
87 * translation regime that isn't affected by its own stage-2
88 * translation, such as a non-VHE hypervisor running at vEL2, or
89 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
90 * canonical stage-2 page tables.
91 */
92 phys_addr_t pgd_phys;
93 struct kvm_pgtable *pgt;
94
95 /* The last vcpu id that ran on each physical CPU */
96 int __percpu *last_vcpu_ran;
97
98 struct kvm_arch *arch;
99};
100
101struct kvm_arch_memory_slot {
102};
103
104struct kvm_arch {
105 struct kvm_s2_mmu mmu;
106
107 /* VTCR_EL2 value for this VM */
108 u64 vtcr;
109
110 /* The maximum number of vCPUs depends on the used GIC model */
111 int max_vcpus;
112
113 /* Interrupt controller */
114 struct vgic_dist vgic;
115
116 /* Mandated version of PSCI */
117 u32 psci_version;
118
119 /*
120 * If we encounter a data abort without valid instruction syndrome
121 * information, report this to user space. User space can (and
122 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
123 * supported.
124 */
125#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
126 /* Memory Tagging Extension enabled for the guest */
127#define KVM_ARCH_FLAG_MTE_ENABLED 1
128 /* At least one vCPU has ran in the VM */
129#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
130 unsigned long flags;
131
132 /*
133 * VM-wide PMU filter, implemented as a bitmap and big enough for
134 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
135 */
136 unsigned long *pmu_filter;
137 struct arm_pmu *arm_pmu;
138
139 cpumask_var_t supported_cpus;
140
141 u8 pfr0_csv2;
142 u8 pfr0_csv3;
143};
144
145struct kvm_vcpu_fault_info {
146 u32 esr_el2; /* Hyp Syndrom Register */
147 u64 far_el2; /* Hyp Fault Address Register */
148 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
149 u64 disr_el1; /* Deferred [SError] Status Register */
150};
151
152enum vcpu_sysreg {
153 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
154 MPIDR_EL1, /* MultiProcessor Affinity Register */
155 CSSELR_EL1, /* Cache Size Selection Register */
156 SCTLR_EL1, /* System Control Register */
157 ACTLR_EL1, /* Auxiliary Control Register */
158 CPACR_EL1, /* Coprocessor Access Control */
159 ZCR_EL1, /* SVE Control */
160 TTBR0_EL1, /* Translation Table Base Register 0 */
161 TTBR1_EL1, /* Translation Table Base Register 1 */
162 TCR_EL1, /* Translation Control Register */
163 ESR_EL1, /* Exception Syndrome Register */
164 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
165 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
166 FAR_EL1, /* Fault Address Register */
167 MAIR_EL1, /* Memory Attribute Indirection Register */
168 VBAR_EL1, /* Vector Base Address Register */
169 CONTEXTIDR_EL1, /* Context ID Register */
170 TPIDR_EL0, /* Thread ID, User R/W */
171 TPIDRRO_EL0, /* Thread ID, User R/O */
172 TPIDR_EL1, /* Thread ID, Privileged */
173 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
174 CNTKCTL_EL1, /* Timer Control Register (EL1) */
175 PAR_EL1, /* Physical Address Register */
176 MDSCR_EL1, /* Monitor Debug System Control Register */
177 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
178 OSLSR_EL1, /* OS Lock Status Register */
179 DISR_EL1, /* Deferred Interrupt Status Register */
180
181 /* Performance Monitors Registers */
182 PMCR_EL0, /* Control Register */
183 PMSELR_EL0, /* Event Counter Selection Register */
184 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
185 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
186 PMCCNTR_EL0, /* Cycle Counter Register */
187 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
188 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
189 PMCCFILTR_EL0, /* Cycle Count Filter Register */
190 PMCNTENSET_EL0, /* Count Enable Set Register */
191 PMINTENSET_EL1, /* Interrupt Enable Set Register */
192 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
193 PMUSERENR_EL0, /* User Enable Register */
194
195 /* Pointer Authentication Registers in a strict increasing order. */
196 APIAKEYLO_EL1,
197 APIAKEYHI_EL1,
198 APIBKEYLO_EL1,
199 APIBKEYHI_EL1,
200 APDAKEYLO_EL1,
201 APDAKEYHI_EL1,
202 APDBKEYLO_EL1,
203 APDBKEYHI_EL1,
204 APGAKEYLO_EL1,
205 APGAKEYHI_EL1,
206
207 ELR_EL1,
208 SP_EL1,
209 SPSR_EL1,
210
211 CNTVOFF_EL2,
212 CNTV_CVAL_EL0,
213 CNTV_CTL_EL0,
214 CNTP_CVAL_EL0,
215 CNTP_CTL_EL0,
216
217 /* Memory Tagging Extension registers */
218 RGSR_EL1, /* Random Allocation Tag Seed Register */
219 GCR_EL1, /* Tag Control Register */
220 TFSR_EL1, /* Tag Fault Status Register (EL1) */
221 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
222
223 /* 32bit specific registers. Keep them at the end of the range */
224 DACR32_EL2, /* Domain Access Control Register */
225 IFSR32_EL2, /* Instruction Fault Status Register */
226 FPEXC32_EL2, /* Floating-Point Exception Control Register */
227 DBGVCR32_EL2, /* Debug Vector Catch Register */
228
229 NR_SYS_REGS /* Nothing after this line! */
230};
231
232struct kvm_cpu_context {
233 struct user_pt_regs regs; /* sp = sp_el0 */
234
235 u64 spsr_abt;
236 u64 spsr_und;
237 u64 spsr_irq;
238 u64 spsr_fiq;
239
240 struct user_fpsimd_state fp_regs;
241
242 u64 sys_regs[NR_SYS_REGS];
243
244 struct kvm_vcpu *__hyp_running_vcpu;
245};
246
247struct kvm_pmu_events {
248 u32 events_host;
249 u32 events_guest;
250};
251
252struct kvm_host_data {
253 struct kvm_cpu_context host_ctxt;
254 struct kvm_pmu_events pmu_events;
255};
256
257struct kvm_host_psci_config {
258 /* PSCI version used by host. */
259 u32 version;
260
261 /* Function IDs used by host if version is v0.1. */
262 struct psci_0_1_function_ids function_ids_0_1;
263
264 bool psci_0_1_cpu_suspend_implemented;
265 bool psci_0_1_cpu_on_implemented;
266 bool psci_0_1_cpu_off_implemented;
267 bool psci_0_1_migrate_implemented;
268};
269
270extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
271#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
272
273extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
274#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
275
276extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
277#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
278
279struct vcpu_reset_state {
280 unsigned long pc;
281 unsigned long r0;
282 bool be;
283 bool reset;
284};
285
286struct kvm_vcpu_arch {
287 struct kvm_cpu_context ctxt;
288 void *sve_state;
289 unsigned int sve_max_vl;
290
291 /* Stage 2 paging state used by the hardware on next switch */
292 struct kvm_s2_mmu *hw_mmu;
293
294 /* Values of trap registers for the guest. */
295 u64 hcr_el2;
296 u64 mdcr_el2;
297 u64 cptr_el2;
298
299 /* Values of trap registers for the host before guest entry. */
300 u64 mdcr_el2_host;
301
302 /* Exception Information */
303 struct kvm_vcpu_fault_info fault;
304
305 /* Miscellaneous vcpu state flags */
306 u64 flags;
307
308 /*
309 * We maintain more than a single set of debug registers to support
310 * debugging the guest from the host and to maintain separate host and
311 * guest state during world switches. vcpu_debug_state are the debug
312 * registers of the vcpu as the guest sees them. host_debug_state are
313 * the host registers which are saved and restored during
314 * world switches. external_debug_state contains the debug
315 * values we want to debug the guest. This is set via the
316 * KVM_SET_GUEST_DEBUG ioctl.
317 *
318 * debug_ptr points to the set of debug registers that should be loaded
319 * onto the hardware when running the guest.
320 */
321 struct kvm_guest_debug_arch *debug_ptr;
322 struct kvm_guest_debug_arch vcpu_debug_state;
323 struct kvm_guest_debug_arch external_debug_state;
324
325 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
326 struct task_struct *parent_task;
327
328 struct {
329 /* {Break,watch}point registers */
330 struct kvm_guest_debug_arch regs;
331 /* Statistical profiling extension */
332 u64 pmscr_el1;
333 /* Self-hosted trace */
334 u64 trfcr_el1;
335 } host_debug_state;
336
337 /* VGIC state */
338 struct vgic_cpu vgic_cpu;
339 struct arch_timer_cpu timer_cpu;
340 struct kvm_pmu pmu;
341
342 /*
343 * Anything that is not used directly from assembly code goes
344 * here.
345 */
346
347 /*
348 * Guest registers we preserve during guest debugging.
349 *
350 * These shadow registers are updated by the kvm_handle_sys_reg
351 * trap handler if the guest accesses or updates them while we
352 * are using guest debug.
353 */
354 struct {
355 u32 mdscr_el1;
356 } guest_debug_preserved;
357
358 /* vcpu power-off state */
359 bool power_off;
360
361 /* Don't run the guest (internal implementation need) */
362 bool pause;
363
364 /* Cache some mmu pages needed inside spinlock regions */
365 struct kvm_mmu_memory_cache mmu_page_cache;
366
367 /* Target CPU and feature flags */
368 int target;
369 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
370
371 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
372 u64 vsesr_el2;
373
374 /* Additional reset state */
375 struct vcpu_reset_state reset_state;
376
377 /* True when deferrable sysregs are loaded on the physical CPU,
378 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
379 bool sysregs_loaded_on_cpu;
380
381 /* Guest PV state */
382 struct {
383 u64 last_steal;
384 gpa_t base;
385 } steal;
386};
387
388/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
389#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
390 sve_ffr_offset((vcpu)->arch.sve_max_vl))
391
392#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
393
394#define vcpu_sve_state_size(vcpu) ({ \
395 size_t __size_ret; \
396 unsigned int __vcpu_vq; \
397 \
398 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
399 __size_ret = 0; \
400 } else { \
401 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
402 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
403 } \
404 \
405 __size_ret; \
406})
407
408/* vcpu_arch flags field values: */
409#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
410#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
411#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
412#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
413#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
414#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
415#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
416#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
417/*
418 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
419 * set together with an exception...
420 */
421#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
422#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
423/*
424 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
425 * take the following values:
426 *
427 * For AArch32 EL1:
428 */
429#define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
430#define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
431#define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
432/* For AArch64: */
433#define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
434#define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
435#define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
436#define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
437#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
438#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
439
440#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
441#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
442#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
443#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
444
445#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
446 KVM_GUESTDBG_USE_SW_BP | \
447 KVM_GUESTDBG_USE_HW | \
448 KVM_GUESTDBG_SINGLESTEP)
449
450#define vcpu_has_sve(vcpu) (system_supports_sve() && \
451 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
452
453#ifdef CONFIG_ARM64_PTR_AUTH
454#define vcpu_has_ptrauth(vcpu) \
455 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
456 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
457 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
458#else
459#define vcpu_has_ptrauth(vcpu) false
460#endif
461
462#define vcpu_on_unsupported_cpu(vcpu) \
463 ((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU)
464
465#define vcpu_set_on_unsupported_cpu(vcpu) \
466 ((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU)
467
468#define vcpu_clear_on_unsupported_cpu(vcpu) \
469 ((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU)
470
471#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
472
473/*
474 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
475 * memory backed version of a register, and not the one most recently
476 * accessed by a running VCPU. For example, for userspace access or
477 * for system registers that are never context switched, but only
478 * emulated.
479 */
480#define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
481
482#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
483
484#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
485
486u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
487void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
488
489static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
490{
491 /*
492 * *** VHE ONLY ***
493 *
494 * System registers listed in the switch are not saved on every
495 * exit from the guest but are only saved on vcpu_put.
496 *
497 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
498 * should never be listed below, because the guest cannot modify its
499 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
500 * thread when emulating cross-VCPU communication.
501 */
502 if (!has_vhe())
503 return false;
504
505 switch (reg) {
506 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
507 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
508 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
509 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
510 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
511 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
512 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
513 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
514 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
515 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
516 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
517 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
518 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
519 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
520 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
521 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
522 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
523 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
524 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
525 case PAR_EL1: *val = read_sysreg_par(); break;
526 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
527 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
528 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
529 default: return false;
530 }
531
532 return true;
533}
534
535static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
536{
537 /*
538 * *** VHE ONLY ***
539 *
540 * System registers listed in the switch are not restored on every
541 * entry to the guest but are only restored on vcpu_load.
542 *
543 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
544 * should never be listed below, because the MPIDR should only be set
545 * once, before running the VCPU, and never changed later.
546 */
547 if (!has_vhe())
548 return false;
549
550 switch (reg) {
551 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
552 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
553 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
554 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
555 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
556 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
557 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
558 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
559 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
560 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
561 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
562 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
563 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
564 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
565 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
566 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
567 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
568 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
569 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
570 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
571 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
572 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
573 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
574 default: return false;
575 }
576
577 return true;
578}
579
580struct kvm_vm_stat {
581 struct kvm_vm_stat_generic generic;
582};
583
584struct kvm_vcpu_stat {
585 struct kvm_vcpu_stat_generic generic;
586 u64 hvc_exit_stat;
587 u64 wfe_exit_stat;
588 u64 wfi_exit_stat;
589 u64 mmio_exit_user;
590 u64 mmio_exit_kernel;
591 u64 signal_exits;
592 u64 exits;
593};
594
595void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
596unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
597int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
598int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
599int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
600
601unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
602int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
603int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
604int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
605
606int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
607 struct kvm_vcpu_events *events);
608
609int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
610 struct kvm_vcpu_events *events);
611
612#define KVM_ARCH_WANT_MMU_NOTIFIER
613
614void kvm_arm_halt_guest(struct kvm *kvm);
615void kvm_arm_resume_guest(struct kvm *kvm);
616
617#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
618
619#ifndef __KVM_NVHE_HYPERVISOR__
620#define kvm_call_hyp_nvhe(f, ...) \
621 ({ \
622 struct arm_smccc_res res; \
623 \
624 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
625 ##__VA_ARGS__, &res); \
626 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
627 \
628 res.a1; \
629 })
630
631/*
632 * The couple of isb() below are there to guarantee the same behaviour
633 * on VHE as on !VHE, where the eret to EL1 acts as a context
634 * synchronization event.
635 */
636#define kvm_call_hyp(f, ...) \
637 do { \
638 if (has_vhe()) { \
639 f(__VA_ARGS__); \
640 isb(); \
641 } else { \
642 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
643 } \
644 } while(0)
645
646#define kvm_call_hyp_ret(f, ...) \
647 ({ \
648 typeof(f(__VA_ARGS__)) ret; \
649 \
650 if (has_vhe()) { \
651 ret = f(__VA_ARGS__); \
652 isb(); \
653 } else { \
654 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
655 } \
656 \
657 ret; \
658 })
659#else /* __KVM_NVHE_HYPERVISOR__ */
660#define kvm_call_hyp(f, ...) f(__VA_ARGS__)
661#define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
662#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
663#endif /* __KVM_NVHE_HYPERVISOR__ */
664
665void force_vm_exit(const cpumask_t *mask);
666
667int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
668void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
669
670int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
671int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
672int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
673int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
674int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
675int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
676
677void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
678
679void kvm_sys_reg_table_init(void);
680
681/* MMIO helpers */
682void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
683unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
684
685int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
686int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
687
688/*
689 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
690 * arrived in guest context. For arm64, any event that arrives while a vCPU is
691 * loaded is considered to be "in guest".
692 */
693static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
694{
695 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
696}
697
698long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
699gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
700void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
701
702bool kvm_arm_pvtime_supported(void);
703int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
704 struct kvm_device_attr *attr);
705int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
706 struct kvm_device_attr *attr);
707int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
708 struct kvm_device_attr *attr);
709
710extern unsigned int kvm_arm_vmid_bits;
711int kvm_arm_vmid_alloc_init(void);
712void kvm_arm_vmid_alloc_free(void);
713void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
714void kvm_arm_vmid_clear_active(void);
715
716static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
717{
718 vcpu_arch->steal.base = GPA_INVALID;
719}
720
721static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
722{
723 return (vcpu_arch->steal.base != GPA_INVALID);
724}
725
726void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
727
728struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
729
730DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
731
732static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
733{
734 /* The host's MPIDR is immutable, so let's set it up at boot time */
735 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
736}
737
738static inline bool kvm_system_needs_idmapped_vectors(void)
739{
740 return cpus_have_const_cap(ARM64_SPECTRE_V3A);
741}
742
743void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
744
745static inline void kvm_arch_hardware_unsetup(void) {}
746static inline void kvm_arch_sync_events(struct kvm *kvm) {}
747static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
748
749void kvm_arm_init_debug(void);
750void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
751void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
752void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
753void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
754
755#define kvm_vcpu_os_lock_enabled(vcpu) \
756 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
757
758int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
759 struct kvm_device_attr *attr);
760int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
761 struct kvm_device_attr *attr);
762int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
763 struct kvm_device_attr *attr);
764
765long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
766 struct kvm_arm_copy_mte_tags *copy_tags);
767
768/* Guest/host FPSIMD coordination helpers */
769int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
770void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
771void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
772void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
773void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
774void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
775
776static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
777{
778 return (!has_vhe() && attr->exclude_host);
779}
780
781/* Flags for host debug state */
782void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
783void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
784
785#ifdef CONFIG_KVM
786void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
787void kvm_clr_pmu_events(u32 clr);
788
789void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
790void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
791#else
792static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
793static inline void kvm_clr_pmu_events(u32 clr) {}
794#endif
795
796void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
797void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
798
799int kvm_set_ipa_limit(void);
800
801#define __KVM_HAVE_ARCH_VM_ALLOC
802struct kvm *kvm_arch_alloc_vm(void);
803
804int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
805
806static inline bool kvm_vm_is_protected(struct kvm *kvm)
807{
808 return false;
809}
810
811void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
812
813int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
814bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
815
816#define kvm_arm_vcpu_sve_finalized(vcpu) \
817 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
818
819#define kvm_has_mte(kvm) \
820 (system_supports_mte() && \
821 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
822#define kvm_vcpu_has_pmu(vcpu) \
823 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
824
825int kvm_trng_call(struct kvm_vcpu *vcpu);
826#ifdef CONFIG_KVM
827extern phys_addr_t hyp_mem_base;
828extern phys_addr_t hyp_mem_size;
829void __init kvm_hyp_reserve(void);
830#else
831static inline void kvm_hyp_reserve(void) { }
832#endif
833
834#endif /* __ARM64_KVM_HOST_H__ */