Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_HOST_H__
12#define __ARM64_KVM_HOST_H__
13
14#include <linux/arm-smccc.h>
15#include <linux/bitmap.h>
16#include <linux/types.h>
17#include <linux/jump_label.h>
18#include <linux/kvm_types.h>
19#include <linux/maple_tree.h>
20#include <linux/percpu.h>
21#include <linux/psci.h>
22#include <asm/arch_gicv3.h>
23#include <asm/barrier.h>
24#include <asm/cpufeature.h>
25#include <asm/cputype.h>
26#include <asm/daifflags.h>
27#include <asm/fpsimd.h>
28#include <asm/kvm.h>
29#include <asm/kvm_asm.h>
30#include <asm/vncr_mapping.h>
31
32#define __KVM_HAVE_ARCH_INTC_INITIALIZED
33
34#define KVM_HALT_POLL_NS_DEFAULT 500000
35
36#include <kvm/arm_vgic.h>
37#include <kvm/arm_arch_timer.h>
38#include <kvm/arm_pmu.h>
39
40#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41
42#define KVM_VCPU_MAX_FEATURES 7
43#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
44
45#define KVM_REQ_SLEEP \
46 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
47#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
48#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
49#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
50#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
51#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
52#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
53#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
54
55#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
56 KVM_DIRTY_LOG_INITIALLY_SET)
57
58#define KVM_HAVE_MMU_RWLOCK
59
60/*
61 * Mode of operation configurable with kvm-arm.mode early param.
62 * See Documentation/admin-guide/kernel-parameters.txt for more information.
63 */
64enum kvm_mode {
65 KVM_MODE_DEFAULT,
66 KVM_MODE_PROTECTED,
67 KVM_MODE_NV,
68 KVM_MODE_NONE,
69};
70#ifdef CONFIG_KVM
71enum kvm_mode kvm_get_mode(void);
72#else
73static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
74#endif
75
76DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
77
78extern unsigned int __ro_after_init kvm_sve_max_vl;
79extern unsigned int __ro_after_init kvm_host_sve_max_vl;
80int __init kvm_arm_init_sve(void);
81
82u32 __attribute_const__ kvm_target_cpu(void);
83void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
84void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
85
86struct kvm_hyp_memcache {
87 phys_addr_t head;
88 unsigned long nr_pages;
89};
90
91static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
92 phys_addr_t *p,
93 phys_addr_t (*to_pa)(void *virt))
94{
95 *p = mc->head;
96 mc->head = to_pa(p);
97 mc->nr_pages++;
98}
99
100static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
101 void *(*to_va)(phys_addr_t phys))
102{
103 phys_addr_t *p = to_va(mc->head);
104
105 if (!mc->nr_pages)
106 return NULL;
107
108 mc->head = *p;
109 mc->nr_pages--;
110
111 return p;
112}
113
114static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
115 unsigned long min_pages,
116 void *(*alloc_fn)(void *arg),
117 phys_addr_t (*to_pa)(void *virt),
118 void *arg)
119{
120 while (mc->nr_pages < min_pages) {
121 phys_addr_t *p = alloc_fn(arg);
122
123 if (!p)
124 return -ENOMEM;
125 push_hyp_memcache(mc, p, to_pa);
126 }
127
128 return 0;
129}
130
131static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
132 void (*free_fn)(void *virt, void *arg),
133 void *(*to_va)(phys_addr_t phys),
134 void *arg)
135{
136 while (mc->nr_pages)
137 free_fn(pop_hyp_memcache(mc, to_va), arg);
138}
139
140void free_hyp_memcache(struct kvm_hyp_memcache *mc);
141int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
142
143struct kvm_vmid {
144 atomic64_t id;
145};
146
147struct kvm_s2_mmu {
148 struct kvm_vmid vmid;
149
150 /*
151 * stage2 entry level table
152 *
153 * Two kvm_s2_mmu structures in the same VM can point to the same
154 * pgd here. This happens when running a guest using a
155 * translation regime that isn't affected by its own stage-2
156 * translation, such as a non-VHE hypervisor running at vEL2, or
157 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
158 * canonical stage-2 page tables.
159 */
160 phys_addr_t pgd_phys;
161 struct kvm_pgtable *pgt;
162
163 /*
164 * VTCR value used on the host. For a non-NV guest (or a NV
165 * guest that runs in a context where its own S2 doesn't
166 * apply), its T0SZ value reflects that of the IPA size.
167 *
168 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
169 * the guest.
170 */
171 u64 vtcr;
172
173 /* The last vcpu id that ran on each physical CPU */
174 int __percpu *last_vcpu_ran;
175
176#define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
177 /*
178 * Memory cache used to split
179 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
180 * is used to allocate stage2 page tables while splitting huge
181 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
182 * influences both the capacity of the split page cache, and
183 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
184 * too high.
185 *
186 * Protected by kvm->slots_lock.
187 */
188 struct kvm_mmu_memory_cache split_page_cache;
189 uint64_t split_page_chunk_size;
190
191 struct kvm_arch *arch;
192};
193
194struct kvm_arch_memory_slot {
195};
196
197/**
198 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
199 *
200 * @std_bmap: Bitmap of standard secure service calls
201 * @std_hyp_bmap: Bitmap of standard hypervisor service calls
202 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
203 */
204struct kvm_smccc_features {
205 unsigned long std_bmap;
206 unsigned long std_hyp_bmap;
207 unsigned long vendor_hyp_bmap;
208};
209
210typedef unsigned int pkvm_handle_t;
211
212struct kvm_protected_vm {
213 pkvm_handle_t handle;
214 struct kvm_hyp_memcache teardown_mc;
215 bool enabled;
216};
217
218struct kvm_mpidr_data {
219 u64 mpidr_mask;
220 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
221};
222
223static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
224{
225 unsigned long index = 0, mask = data->mpidr_mask;
226 unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
227
228 bitmap_gather(&index, &aff, &mask, fls(mask));
229
230 return index;
231}
232
233struct kvm_sysreg_masks;
234
235enum fgt_group_id {
236 __NO_FGT_GROUP__,
237 HFGxTR_GROUP,
238 HDFGRTR_GROUP,
239 HDFGWTR_GROUP = HDFGRTR_GROUP,
240 HFGITR_GROUP,
241 HAFGRTR_GROUP,
242
243 /* Must be last */
244 __NR_FGT_GROUP_IDS__
245};
246
247struct kvm_arch {
248 struct kvm_s2_mmu mmu;
249
250 /*
251 * Fine-Grained UNDEF, mimicking the FGT layout defined by the
252 * architecture. We track them globally, as we present the
253 * same feature-set to all vcpus.
254 *
255 * Index 0 is currently spare.
256 */
257 u64 fgu[__NR_FGT_GROUP_IDS__];
258
259 /* Interrupt controller */
260 struct vgic_dist vgic;
261
262 /* Timers */
263 struct arch_timer_vm_data timer_data;
264
265 /* Mandated version of PSCI */
266 u32 psci_version;
267
268 /* Protects VM-scoped configuration data */
269 struct mutex config_lock;
270
271 /*
272 * If we encounter a data abort without valid instruction syndrome
273 * information, report this to user space. User space can (and
274 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
275 * supported.
276 */
277#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
278 /* Memory Tagging Extension enabled for the guest */
279#define KVM_ARCH_FLAG_MTE_ENABLED 1
280 /* At least one vCPU has ran in the VM */
281#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
282 /* The vCPU feature set for the VM is configured */
283#define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
284 /* PSCI SYSTEM_SUSPEND enabled for the guest */
285#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
286 /* VM counter offset */
287#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
288 /* Timer PPIs made immutable */
289#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
290 /* Initial ID reg values loaded */
291#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
292 /* Fine-Grained UNDEF initialised */
293#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
294 unsigned long flags;
295
296 /* VM-wide vCPU feature set */
297 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
298
299 /* MPIDR to vcpu index mapping, optional */
300 struct kvm_mpidr_data *mpidr_data;
301
302 /*
303 * VM-wide PMU filter, implemented as a bitmap and big enough for
304 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
305 */
306 unsigned long *pmu_filter;
307 struct arm_pmu *arm_pmu;
308
309 cpumask_var_t supported_cpus;
310
311 /* PMCR_EL0.N value for the guest */
312 u8 pmcr_n;
313
314 /* Iterator for idreg debugfs */
315 u8 idreg_debugfs_iter;
316
317 /* Hypercall features firmware registers' descriptor */
318 struct kvm_smccc_features smccc_feat;
319 struct maple_tree smccc_filter;
320
321 /*
322 * Emulated CPU ID registers per VM
323 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
324 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
325 *
326 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
327 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
328 */
329#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
330#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
331#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
332#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
333 u64 id_regs[KVM_ARM_ID_REG_NUM];
334
335 /* Masks for VNCR-baked sysregs */
336 struct kvm_sysreg_masks *sysreg_masks;
337
338 /*
339 * For an untrusted host VM, 'pkvm.handle' is used to lookup
340 * the associated pKVM instance in the hypervisor.
341 */
342 struct kvm_protected_vm pkvm;
343};
344
345struct kvm_vcpu_fault_info {
346 u64 esr_el2; /* Hyp Syndrom Register */
347 u64 far_el2; /* Hyp Fault Address Register */
348 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
349 u64 disr_el1; /* Deferred [SError] Status Register */
350};
351
352/*
353 * VNCR() just places the VNCR_capable registers in the enum after
354 * __VNCR_START__, and the value (after correction) to be an 8-byte offset
355 * from the VNCR base. As we don't require the enum to be otherwise ordered,
356 * we need the terrible hack below to ensure that we correctly size the
357 * sys_regs array, no matter what.
358 *
359 * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
360 * treasure trove of bit hacks:
361 * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
362 */
363#define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
364#define VNCR(r) \
365 __before_##r, \
366 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
367 __after_##r = __MAX__(__before_##r - 1, r)
368
369enum vcpu_sysreg {
370 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
371 MPIDR_EL1, /* MultiProcessor Affinity Register */
372 CLIDR_EL1, /* Cache Level ID Register */
373 CSSELR_EL1, /* Cache Size Selection Register */
374 TPIDR_EL0, /* Thread ID, User R/W */
375 TPIDRRO_EL0, /* Thread ID, User R/O */
376 TPIDR_EL1, /* Thread ID, Privileged */
377 CNTKCTL_EL1, /* Timer Control Register (EL1) */
378 PAR_EL1, /* Physical Address Register */
379 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
380 OSLSR_EL1, /* OS Lock Status Register */
381 DISR_EL1, /* Deferred Interrupt Status Register */
382
383 /* Performance Monitors Registers */
384 PMCR_EL0, /* Control Register */
385 PMSELR_EL0, /* Event Counter Selection Register */
386 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
387 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
388 PMCCNTR_EL0, /* Cycle Counter Register */
389 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
390 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
391 PMCCFILTR_EL0, /* Cycle Count Filter Register */
392 PMCNTENSET_EL0, /* Count Enable Set Register */
393 PMINTENSET_EL1, /* Interrupt Enable Set Register */
394 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
395 PMUSERENR_EL0, /* User Enable Register */
396
397 /* Pointer Authentication Registers in a strict increasing order. */
398 APIAKEYLO_EL1,
399 APIAKEYHI_EL1,
400 APIBKEYLO_EL1,
401 APIBKEYHI_EL1,
402 APDAKEYLO_EL1,
403 APDAKEYHI_EL1,
404 APDBKEYLO_EL1,
405 APDBKEYHI_EL1,
406 APGAKEYLO_EL1,
407 APGAKEYHI_EL1,
408
409 /* Memory Tagging Extension registers */
410 RGSR_EL1, /* Random Allocation Tag Seed Register */
411 GCR_EL1, /* Tag Control Register */
412 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
413
414 /* 32bit specific registers. */
415 DACR32_EL2, /* Domain Access Control Register */
416 IFSR32_EL2, /* Instruction Fault Status Register */
417 FPEXC32_EL2, /* Floating-Point Exception Control Register */
418 DBGVCR32_EL2, /* Debug Vector Catch Register */
419
420 /* EL2 registers */
421 SCTLR_EL2, /* System Control Register (EL2) */
422 ACTLR_EL2, /* Auxiliary Control Register (EL2) */
423 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
424 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
425 HACR_EL2, /* Hypervisor Auxiliary Control Register */
426 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
427 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
428 TCR_EL2, /* Translation Control Register (EL2) */
429 SPSR_EL2, /* EL2 saved program status register */
430 ELR_EL2, /* EL2 exception link register */
431 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
432 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */
433 ESR_EL2, /* Exception Syndrome Register (EL2) */
434 FAR_EL2, /* Fault Address Register (EL2) */
435 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */
436 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */
437 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */
438 VBAR_EL2, /* Vector Base Address Register (EL2) */
439 RVBAR_EL2, /* Reset Vector Base Address Register */
440 CONTEXTIDR_EL2, /* Context ID Register (EL2) */
441 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
442 SP_EL2, /* EL2 Stack Pointer */
443 CNTHP_CTL_EL2,
444 CNTHP_CVAL_EL2,
445 CNTHV_CTL_EL2,
446 CNTHV_CVAL_EL2,
447
448 __VNCR_START__, /* Any VNCR-capable reg goes after this point */
449
450 VNCR(SCTLR_EL1),/* System Control Register */
451 VNCR(ACTLR_EL1),/* Auxiliary Control Register */
452 VNCR(CPACR_EL1),/* Coprocessor Access Control */
453 VNCR(ZCR_EL1), /* SVE Control */
454 VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
455 VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
456 VNCR(TCR_EL1), /* Translation Control Register */
457 VNCR(TCR2_EL1), /* Extended Translation Control Register */
458 VNCR(ESR_EL1), /* Exception Syndrome Register */
459 VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
460 VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
461 VNCR(FAR_EL1), /* Fault Address Register */
462 VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */
463 VNCR(VBAR_EL1), /* Vector Base Address Register */
464 VNCR(CONTEXTIDR_EL1), /* Context ID Register */
465 VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
466 VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
467 VNCR(ELR_EL1),
468 VNCR(SP_EL1),
469 VNCR(SPSR_EL1),
470 VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */
471 VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
472 VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
473 VNCR(HCR_EL2), /* Hypervisor Configuration Register */
474 VNCR(HSTR_EL2), /* Hypervisor System Trap Register */
475 VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
476 VNCR(VTCR_EL2), /* Virtualization Translation Control Register */
477 VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
478 VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */
479
480 /* Permission Indirection Extension registers */
481 VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */
482 VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */
483
484 VNCR(HFGRTR_EL2),
485 VNCR(HFGWTR_EL2),
486 VNCR(HFGITR_EL2),
487 VNCR(HDFGRTR_EL2),
488 VNCR(HDFGWTR_EL2),
489 VNCR(HAFGRTR_EL2),
490
491 VNCR(CNTVOFF_EL2),
492 VNCR(CNTV_CVAL_EL0),
493 VNCR(CNTV_CTL_EL0),
494 VNCR(CNTP_CVAL_EL0),
495 VNCR(CNTP_CTL_EL0),
496
497 NR_SYS_REGS /* Nothing after this line! */
498};
499
500struct kvm_sysreg_masks {
501 struct {
502 u64 res0;
503 u64 res1;
504 } mask[NR_SYS_REGS - __VNCR_START__];
505};
506
507struct kvm_cpu_context {
508 struct user_pt_regs regs; /* sp = sp_el0 */
509
510 u64 spsr_abt;
511 u64 spsr_und;
512 u64 spsr_irq;
513 u64 spsr_fiq;
514
515 struct user_fpsimd_state fp_regs;
516
517 u64 sys_regs[NR_SYS_REGS];
518
519 struct kvm_vcpu *__hyp_running_vcpu;
520
521 /* This pointer has to be 4kB aligned. */
522 u64 *vncr_array;
523};
524
525struct cpu_sve_state {
526 __u64 zcr_el1;
527
528 /*
529 * Ordering is important since __sve_save_state/__sve_restore_state
530 * relies on it.
531 */
532 __u32 fpsr;
533 __u32 fpcr;
534
535 /* Must be SVE_VQ_BYTES (128 bit) aligned. */
536 __u8 sve_regs[];
537};
538
539/*
540 * This structure is instantiated on a per-CPU basis, and contains
541 * data that is:
542 *
543 * - tied to a single physical CPU, and
544 * - either have a lifetime that does not extend past vcpu_put()
545 * - or is an invariant for the lifetime of the system
546 *
547 * Use host_data_ptr(field) as a way to access a pointer to such a
548 * field.
549 */
550struct kvm_host_data {
551 struct kvm_cpu_context host_ctxt;
552
553 /*
554 * All pointers in this union are hyp VA.
555 * sve_state is only used in pKVM and if system_supports_sve().
556 */
557 union {
558 struct user_fpsimd_state *fpsimd_state;
559 struct cpu_sve_state *sve_state;
560 };
561
562 /* Ownership of the FP regs */
563 enum {
564 FP_STATE_FREE,
565 FP_STATE_HOST_OWNED,
566 FP_STATE_GUEST_OWNED,
567 } fp_owner;
568
569 /*
570 * host_debug_state contains the host registers which are
571 * saved and restored during world switches.
572 */
573 struct {
574 /* {Break,watch}point registers */
575 struct kvm_guest_debug_arch regs;
576 /* Statistical profiling extension */
577 u64 pmscr_el1;
578 /* Self-hosted trace */
579 u64 trfcr_el1;
580 /* Values of trap registers for the host before guest entry. */
581 u64 mdcr_el2;
582 } host_debug_state;
583};
584
585struct kvm_host_psci_config {
586 /* PSCI version used by host. */
587 u32 version;
588 u32 smccc_version;
589
590 /* Function IDs used by host if version is v0.1. */
591 struct psci_0_1_function_ids function_ids_0_1;
592
593 bool psci_0_1_cpu_suspend_implemented;
594 bool psci_0_1_cpu_on_implemented;
595 bool psci_0_1_cpu_off_implemented;
596 bool psci_0_1_migrate_implemented;
597};
598
599extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
600#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
601
602extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
603#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
604
605extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
606#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
607
608struct vcpu_reset_state {
609 unsigned long pc;
610 unsigned long r0;
611 bool be;
612 bool reset;
613};
614
615struct kvm_vcpu_arch {
616 struct kvm_cpu_context ctxt;
617
618 /*
619 * Guest floating point state
620 *
621 * The architecture has two main floating point extensions,
622 * the original FPSIMD and SVE. These have overlapping
623 * register views, with the FPSIMD V registers occupying the
624 * low 128 bits of the SVE Z registers. When the core
625 * floating point code saves the register state of a task it
626 * records which view it saved in fp_type.
627 */
628 void *sve_state;
629 enum fp_type fp_type;
630 unsigned int sve_max_vl;
631 u64 svcr;
632 u64 fpmr;
633
634 /* Stage 2 paging state used by the hardware on next switch */
635 struct kvm_s2_mmu *hw_mmu;
636
637 /* Values of trap registers for the guest. */
638 u64 hcr_el2;
639 u64 hcrx_el2;
640 u64 mdcr_el2;
641 u64 cptr_el2;
642
643 /* Exception Information */
644 struct kvm_vcpu_fault_info fault;
645
646 /* Configuration flags, set once and for all before the vcpu can run */
647 u8 cflags;
648
649 /* Input flags to the hypervisor code, potentially cleared after use */
650 u8 iflags;
651
652 /* State flags for kernel bookkeeping, unused by the hypervisor code */
653 u8 sflags;
654
655 /*
656 * Don't run the guest (internal implementation need).
657 *
658 * Contrary to the flags above, this is set/cleared outside of
659 * a vcpu context, and thus cannot be mixed with the flags
660 * themselves (or the flag accesses need to be made atomic).
661 */
662 bool pause;
663
664 /*
665 * We maintain more than a single set of debug registers to support
666 * debugging the guest from the host and to maintain separate host and
667 * guest state during world switches. vcpu_debug_state are the debug
668 * registers of the vcpu as the guest sees them.
669 *
670 * external_debug_state contains the debug values we want to debug the
671 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
672 *
673 * debug_ptr points to the set of debug registers that should be loaded
674 * onto the hardware when running the guest.
675 */
676 struct kvm_guest_debug_arch *debug_ptr;
677 struct kvm_guest_debug_arch vcpu_debug_state;
678 struct kvm_guest_debug_arch external_debug_state;
679
680 /* VGIC state */
681 struct vgic_cpu vgic_cpu;
682 struct arch_timer_cpu timer_cpu;
683 struct kvm_pmu pmu;
684
685 /*
686 * Guest registers we preserve during guest debugging.
687 *
688 * These shadow registers are updated by the kvm_handle_sys_reg
689 * trap handler if the guest accesses or updates them while we
690 * are using guest debug.
691 */
692 struct {
693 u32 mdscr_el1;
694 bool pstate_ss;
695 } guest_debug_preserved;
696
697 /* vcpu power state */
698 struct kvm_mp_state mp_state;
699 spinlock_t mp_state_lock;
700
701 /* Cache some mmu pages needed inside spinlock regions */
702 struct kvm_mmu_memory_cache mmu_page_cache;
703
704 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
705 u64 vsesr_el2;
706
707 /* Additional reset state */
708 struct vcpu_reset_state reset_state;
709
710 /* Guest PV state */
711 struct {
712 u64 last_steal;
713 gpa_t base;
714 } steal;
715
716 /* Per-vcpu CCSIDR override or NULL */
717 u32 *ccsidr;
718};
719
720/*
721 * Each 'flag' is composed of a comma-separated triplet:
722 *
723 * - the flag-set it belongs to in the vcpu->arch structure
724 * - the value for that flag
725 * - the mask for that flag
726 *
727 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
728 * unpack_vcpu_flag() extract the flag value from the triplet for
729 * direct use outside of the flag accessors.
730 */
731#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
732
733#define __unpack_flag(_set, _f, _m) _f
734#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
735
736#define __build_check_flag(v, flagset, f, m) \
737 do { \
738 typeof(v->arch.flagset) *_fset; \
739 \
740 /* Check that the flags fit in the mask */ \
741 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
742 /* Check that the flags fit in the type */ \
743 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
744 } while (0)
745
746#define __vcpu_get_flag(v, flagset, f, m) \
747 ({ \
748 __build_check_flag(v, flagset, f, m); \
749 \
750 READ_ONCE(v->arch.flagset) & (m); \
751 })
752
753/*
754 * Note that the set/clear accessors must be preempt-safe in order to
755 * avoid nesting them with load/put which also manipulate flags...
756 */
757#ifdef __KVM_NVHE_HYPERVISOR__
758/* the nVHE hypervisor is always non-preemptible */
759#define __vcpu_flags_preempt_disable()
760#define __vcpu_flags_preempt_enable()
761#else
762#define __vcpu_flags_preempt_disable() preempt_disable()
763#define __vcpu_flags_preempt_enable() preempt_enable()
764#endif
765
766#define __vcpu_set_flag(v, flagset, f, m) \
767 do { \
768 typeof(v->arch.flagset) *fset; \
769 \
770 __build_check_flag(v, flagset, f, m); \
771 \
772 fset = &v->arch.flagset; \
773 __vcpu_flags_preempt_disable(); \
774 if (HWEIGHT(m) > 1) \
775 *fset &= ~(m); \
776 *fset |= (f); \
777 __vcpu_flags_preempt_enable(); \
778 } while (0)
779
780#define __vcpu_clear_flag(v, flagset, f, m) \
781 do { \
782 typeof(v->arch.flagset) *fset; \
783 \
784 __build_check_flag(v, flagset, f, m); \
785 \
786 fset = &v->arch.flagset; \
787 __vcpu_flags_preempt_disable(); \
788 *fset &= ~(m); \
789 __vcpu_flags_preempt_enable(); \
790 } while (0)
791
792#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
793#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
794#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
795
796/* SVE exposed to guest */
797#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
798/* SVE config completed */
799#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
800/* PTRAUTH exposed to guest */
801#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
802/* KVM_ARM_VCPU_INIT completed */
803#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
804
805/* Exception pending */
806#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
807/*
808 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
809 * be set together with an exception...
810 */
811#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
812/* Target EL/MODE (not a single flag, but let's abuse the macro) */
813#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
814
815/* Helpers to encode exceptions with minimum fuss */
816#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
817#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
818#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
819
820/*
821 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
822 * values:
823 *
824 * For AArch32 EL1:
825 */
826#define EXCEPT_AA32_UND __vcpu_except_flags(0)
827#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
828#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
829/* For AArch64: */
830#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
831#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
832#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
833#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
834/* For AArch64 with NV: */
835#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
836#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
837#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
838#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
839/* Guest debug is live */
840#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
841/* Save SPE context if active */
842#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
843/* Save TRBE context if active */
844#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
845
846/* SVE enabled for host EL0 */
847#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
848/* SME enabled for EL0 */
849#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
850/* Physical CPU not in supported_cpus */
851#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
852/* WFIT instruction trapped */
853#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
854/* vcpu system registers loaded on physical CPU */
855#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
856/* Software step state is Active-pending */
857#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
858/* PMUSERENR for the guest EL0 is on physical CPU */
859#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
860/* WFI instruction trapped */
861#define IN_WFI __vcpu_single_flag(sflags, BIT(7))
862
863
864/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
865#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
866 sve_ffr_offset((vcpu)->arch.sve_max_vl))
867
868#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
869
870#define vcpu_sve_state_size(vcpu) ({ \
871 size_t __size_ret; \
872 unsigned int __vcpu_vq; \
873 \
874 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
875 __size_ret = 0; \
876 } else { \
877 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
878 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
879 } \
880 \
881 __size_ret; \
882})
883
884#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
885 KVM_GUESTDBG_USE_SW_BP | \
886 KVM_GUESTDBG_USE_HW | \
887 KVM_GUESTDBG_SINGLESTEP)
888
889#define vcpu_has_sve(vcpu) (system_supports_sve() && \
890 vcpu_get_flag(vcpu, GUEST_HAS_SVE))
891
892#ifdef CONFIG_ARM64_PTR_AUTH
893#define vcpu_has_ptrauth(vcpu) \
894 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
895 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
896 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
897#else
898#define vcpu_has_ptrauth(vcpu) false
899#endif
900
901#define vcpu_on_unsupported_cpu(vcpu) \
902 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
903
904#define vcpu_set_on_unsupported_cpu(vcpu) \
905 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
906
907#define vcpu_clear_on_unsupported_cpu(vcpu) \
908 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
909
910#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
911
912/*
913 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
914 * memory backed version of a register, and not the one most recently
915 * accessed by a running VCPU. For example, for userspace access or
916 * for system registers that are never context switched, but only
917 * emulated.
918 *
919 * Don't bother with VNCR-based accesses in the nVHE code, it has no
920 * business dealing with NV.
921 */
922static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
923{
924#if !defined (__KVM_NVHE_HYPERVISOR__)
925 if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
926 r >= __VNCR_START__ && ctxt->vncr_array))
927 return &ctxt->vncr_array[r - __VNCR_START__];
928#endif
929 return (u64 *)&ctxt->sys_regs[r];
930}
931
932#define __ctxt_sys_reg(c,r) \
933 ({ \
934 BUILD_BUG_ON(__builtin_constant_p(r) && \
935 (r) >= NR_SYS_REGS); \
936 ___ctxt_sys_reg(c, r); \
937 })
938
939#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
940
941u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
942#define __vcpu_sys_reg(v,r) \
943 (*({ \
944 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
945 u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
946 if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
947 *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
948 __r; \
949 }))
950
951u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
952void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
953
954static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
955{
956 /*
957 * *** VHE ONLY ***
958 *
959 * System registers listed in the switch are not saved on every
960 * exit from the guest but are only saved on vcpu_put.
961 *
962 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
963 * should never be listed below, because the guest cannot modify its
964 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
965 * thread when emulating cross-VCPU communication.
966 */
967 if (!has_vhe())
968 return false;
969
970 switch (reg) {
971 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
972 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
973 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
974 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
975 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
976 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
977 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
978 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
979 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
980 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
981 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
982 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
983 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
984 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
985 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
986 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
987 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
988 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
989 case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
990 case PAR_EL1: *val = read_sysreg_par(); break;
991 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
992 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
993 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
994 default: return false;
995 }
996
997 return true;
998}
999
1000static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
1001{
1002 /*
1003 * *** VHE ONLY ***
1004 *
1005 * System registers listed in the switch are not restored on every
1006 * entry to the guest but are only restored on vcpu_load.
1007 *
1008 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
1009 * should never be listed below, because the MPIDR should only be set
1010 * once, before running the VCPU, and never changed later.
1011 */
1012 if (!has_vhe())
1013 return false;
1014
1015 switch (reg) {
1016 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
1017 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
1018 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
1019 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
1020 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
1021 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
1022 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
1023 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
1024 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
1025 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
1026 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
1027 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
1028 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
1029 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
1030 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
1031 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
1032 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
1033 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
1034 case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
1035 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
1036 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
1037 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
1038 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
1039 default: return false;
1040 }
1041
1042 return true;
1043}
1044
1045struct kvm_vm_stat {
1046 struct kvm_vm_stat_generic generic;
1047};
1048
1049struct kvm_vcpu_stat {
1050 struct kvm_vcpu_stat_generic generic;
1051 u64 hvc_exit_stat;
1052 u64 wfe_exit_stat;
1053 u64 wfi_exit_stat;
1054 u64 mmio_exit_user;
1055 u64 mmio_exit_kernel;
1056 u64 signal_exits;
1057 u64 exits;
1058};
1059
1060unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
1061int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
1062int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1063int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1064
1065unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
1066int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
1067
1068int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1069 struct kvm_vcpu_events *events);
1070
1071int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1072 struct kvm_vcpu_events *events);
1073
1074void kvm_arm_halt_guest(struct kvm *kvm);
1075void kvm_arm_resume_guest(struct kvm *kvm);
1076
1077#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
1078
1079#ifndef __KVM_NVHE_HYPERVISOR__
1080#define kvm_call_hyp_nvhe(f, ...) \
1081 ({ \
1082 struct arm_smccc_res res; \
1083 \
1084 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
1085 ##__VA_ARGS__, &res); \
1086 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
1087 \
1088 res.a1; \
1089 })
1090
1091/*
1092 * The couple of isb() below are there to guarantee the same behaviour
1093 * on VHE as on !VHE, where the eret to EL1 acts as a context
1094 * synchronization event.
1095 */
1096#define kvm_call_hyp(f, ...) \
1097 do { \
1098 if (has_vhe()) { \
1099 f(__VA_ARGS__); \
1100 isb(); \
1101 } else { \
1102 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1103 } \
1104 } while(0)
1105
1106#define kvm_call_hyp_ret(f, ...) \
1107 ({ \
1108 typeof(f(__VA_ARGS__)) ret; \
1109 \
1110 if (has_vhe()) { \
1111 ret = f(__VA_ARGS__); \
1112 isb(); \
1113 } else { \
1114 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1115 } \
1116 \
1117 ret; \
1118 })
1119#else /* __KVM_NVHE_HYPERVISOR__ */
1120#define kvm_call_hyp(f, ...) f(__VA_ARGS__)
1121#define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
1122#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
1123#endif /* __KVM_NVHE_HYPERVISOR__ */
1124
1125int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
1126void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
1127
1128int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
1129int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
1130int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
1131int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
1132int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
1133int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
1134int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
1135
1136void kvm_sys_regs_create_debugfs(struct kvm *kvm);
1137void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
1138
1139int __init kvm_sys_reg_table_init(void);
1140struct sys_reg_desc;
1141int __init populate_sysreg_config(const struct sys_reg_desc *sr,
1142 unsigned int idx);
1143int __init populate_nv_trap_config(void);
1144
1145bool lock_all_vcpus(struct kvm *kvm);
1146void unlock_all_vcpus(struct kvm *kvm);
1147
1148void kvm_init_sysreg(struct kvm_vcpu *);
1149
1150/* MMIO helpers */
1151void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
1152unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
1153
1154int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
1155int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
1156
1157/*
1158 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
1159 * arrived in guest context. For arm64, any event that arrives while a vCPU is
1160 * loaded is considered to be "in guest".
1161 */
1162static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1163{
1164 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1165}
1166
1167long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1168gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1169void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1170
1171bool kvm_arm_pvtime_supported(void);
1172int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1173 struct kvm_device_attr *attr);
1174int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1175 struct kvm_device_attr *attr);
1176int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1177 struct kvm_device_attr *attr);
1178
1179extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1180int __init kvm_arm_vmid_alloc_init(void);
1181void __init kvm_arm_vmid_alloc_free(void);
1182bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1183void kvm_arm_vmid_clear_active(void);
1184
1185static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1186{
1187 vcpu_arch->steal.base = INVALID_GPA;
1188}
1189
1190static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1191{
1192 return (vcpu_arch->steal.base != INVALID_GPA);
1193}
1194
1195void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1196
1197struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1198
1199DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1200
1201/*
1202 * How we access per-CPU host data depends on the where we access it from,
1203 * and the mode we're in:
1204 *
1205 * - VHE and nVHE hypervisor bits use their locally defined instance
1206 *
1207 * - the rest of the kernel use either the VHE or nVHE one, depending on
1208 * the mode we're running in.
1209 *
1210 * Unless we're in protected mode, fully deprivileged, and the nVHE
1211 * per-CPU stuff is exclusively accessible to the protected EL2 code.
1212 * In this case, the EL1 code uses the *VHE* data as its private state
1213 * (which makes sense in a way as there shouldn't be any shared state
1214 * between the host and the hypervisor).
1215 *
1216 * Yes, this is all totally trivial. Shoot me now.
1217 */
1218#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
1219#define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f)
1220#else
1221#define host_data_ptr(f) \
1222 (static_branch_unlikely(&kvm_protected_mode_initialized) ? \
1223 &this_cpu_ptr(&kvm_host_data)->f : \
1224 &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
1225#endif
1226
1227/* Check whether the FP regs are owned by the guest */
1228static inline bool guest_owns_fp_regs(void)
1229{
1230 return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
1231}
1232
1233/* Check whether the FP regs are owned by the host */
1234static inline bool host_owns_fp_regs(void)
1235{
1236 return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED;
1237}
1238
1239static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1240{
1241 /* The host's MPIDR is immutable, so let's set it up at boot time */
1242 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1243}
1244
1245static inline bool kvm_system_needs_idmapped_vectors(void)
1246{
1247 return cpus_have_final_cap(ARM64_SPECTRE_V3A);
1248}
1249
1250static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1251static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1252
1253void kvm_arm_init_debug(void);
1254void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
1255void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
1256void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
1257void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
1258
1259#define kvm_vcpu_os_lock_enabled(vcpu) \
1260 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1261
1262int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1263 struct kvm_device_attr *attr);
1264int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1265 struct kvm_device_attr *attr);
1266int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1267 struct kvm_device_attr *attr);
1268
1269int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1270 struct kvm_arm_copy_mte_tags *copy_tags);
1271int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1272 struct kvm_arm_counter_offset *offset);
1273int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
1274 struct reg_mask_range *range);
1275
1276/* Guest/host FPSIMD coordination helpers */
1277int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1278void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1279void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1280void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1281void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1282
1283static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1284{
1285 return (!has_vhe() && attr->exclude_host);
1286}
1287
1288/* Flags for host debug state */
1289void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
1290void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
1291
1292#ifdef CONFIG_KVM
1293void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
1294void kvm_clr_pmu_events(u32 clr);
1295bool kvm_set_pmuserenr(u64 val);
1296#else
1297static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
1298static inline void kvm_clr_pmu_events(u32 clr) {}
1299static inline bool kvm_set_pmuserenr(u64 val)
1300{
1301 return false;
1302}
1303#endif
1304
1305void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1306void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
1307
1308int __init kvm_set_ipa_limit(void);
1309
1310#define __KVM_HAVE_ARCH_VM_ALLOC
1311struct kvm *kvm_arch_alloc_vm(void);
1312
1313#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1314
1315#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1316
1317#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
1318
1319#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
1320
1321int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1322bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1323
1324#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1325
1326#define kvm_has_mte(kvm) \
1327 (system_supports_mte() && \
1328 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1329
1330#define kvm_supports_32bit_el0() \
1331 (system_supports_32bit_el0() && \
1332 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1333
1334#define kvm_vm_has_ran_once(kvm) \
1335 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1336
1337static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
1338{
1339 return test_bit(feature, ka->vcpu_features);
1340}
1341
1342#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
1343
1344#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
1345
1346int kvm_trng_call(struct kvm_vcpu *vcpu);
1347#ifdef CONFIG_KVM
1348extern phys_addr_t hyp_mem_base;
1349extern phys_addr_t hyp_mem_size;
1350void __init kvm_hyp_reserve(void);
1351#else
1352static inline void kvm_hyp_reserve(void) { }
1353#endif
1354
1355void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1356bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1357
1358#define __expand_field_sign_unsigned(id, fld, val) \
1359 ((u64)SYS_FIELD_VALUE(id, fld, val))
1360
1361#define __expand_field_sign_signed(id, fld, val) \
1362 ({ \
1363 u64 __val = SYS_FIELD_VALUE(id, fld, val); \
1364 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1365 })
1366
1367#define expand_field_sign(id, fld, val) \
1368 (id##_##fld##_SIGNED ? \
1369 __expand_field_sign_signed(id, fld, val) : \
1370 __expand_field_sign_unsigned(id, fld, val))
1371
1372#define get_idreg_field_unsigned(kvm, id, fld) \
1373 ({ \
1374 u64 __val = IDREG((kvm), SYS_##id); \
1375 FIELD_GET(id##_##fld##_MASK, __val); \
1376 })
1377
1378#define get_idreg_field_signed(kvm, id, fld) \
1379 ({ \
1380 u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
1381 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1382 })
1383
1384#define get_idreg_field_enum(kvm, id, fld) \
1385 get_idreg_field_unsigned(kvm, id, fld)
1386
1387#define get_idreg_field(kvm, id, fld) \
1388 (id##_##fld##_SIGNED ? \
1389 get_idreg_field_signed(kvm, id, fld) : \
1390 get_idreg_field_unsigned(kvm, id, fld))
1391
1392#define kvm_has_feat(kvm, id, fld, limit) \
1393 (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
1394
1395#define kvm_has_feat_enum(kvm, id, fld, val) \
1396 (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
1397
1398#define kvm_has_feat_range(kvm, id, fld, min, max) \
1399 (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
1400 get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
1401
1402/* Check for a given level of PAuth support */
1403#define kvm_has_pauth(k, l) \
1404 ({ \
1405 bool pa, pi, pa3; \
1406 \
1407 pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \
1408 pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
1409 pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \
1410 pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
1411 pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \
1412 pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
1413 \
1414 (pa + pi + pa3) == 1; \
1415 })
1416
1417#endif /* __ARM64_KVM_HOST_H__ */