Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_HOST_H__
12#define __ARM64_KVM_HOST_H__
13
14#include <linux/arm-smccc.h>
15#include <linux/bitmap.h>
16#include <linux/types.h>
17#include <linux/jump_label.h>
18#include <linux/kvm_types.h>
19#include <linux/maple_tree.h>
20#include <linux/percpu.h>
21#include <linux/psci.h>
22#include <asm/arch_gicv3.h>
23#include <asm/barrier.h>
24#include <asm/cpufeature.h>
25#include <asm/cputype.h>
26#include <asm/daifflags.h>
27#include <asm/fpsimd.h>
28#include <asm/kvm.h>
29#include <asm/kvm_asm.h>
30#include <asm/vncr_mapping.h>
31
32#define __KVM_HAVE_ARCH_INTC_INITIALIZED
33
34#define KVM_HALT_POLL_NS_DEFAULT 500000
35
36#include <kvm/arm_vgic.h>
37#include <kvm/arm_arch_timer.h>
38#include <kvm/arm_pmu.h>
39
40#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41
42#define KVM_VCPU_MAX_FEATURES 7
43#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
44
45#define KVM_REQ_SLEEP \
46 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
47#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
48#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
49#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
50#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
51#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
52#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
53#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
54
55#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
56 KVM_DIRTY_LOG_INITIALLY_SET)
57
58#define KVM_HAVE_MMU_RWLOCK
59
60/*
61 * Mode of operation configurable with kvm-arm.mode early param.
62 * See Documentation/admin-guide/kernel-parameters.txt for more information.
63 */
64enum kvm_mode {
65 KVM_MODE_DEFAULT,
66 KVM_MODE_PROTECTED,
67 KVM_MODE_NV,
68 KVM_MODE_NONE,
69};
70#ifdef CONFIG_KVM
71enum kvm_mode kvm_get_mode(void);
72#else
73static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
74#endif
75
76DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
77
78extern unsigned int __ro_after_init kvm_sve_max_vl;
79int __init kvm_arm_init_sve(void);
80
81u32 __attribute_const__ kvm_target_cpu(void);
82void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
83void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
84
85struct kvm_hyp_memcache {
86 phys_addr_t head;
87 unsigned long nr_pages;
88};
89
90static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
91 phys_addr_t *p,
92 phys_addr_t (*to_pa)(void *virt))
93{
94 *p = mc->head;
95 mc->head = to_pa(p);
96 mc->nr_pages++;
97}
98
99static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
100 void *(*to_va)(phys_addr_t phys))
101{
102 phys_addr_t *p = to_va(mc->head);
103
104 if (!mc->nr_pages)
105 return NULL;
106
107 mc->head = *p;
108 mc->nr_pages--;
109
110 return p;
111}
112
113static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
114 unsigned long min_pages,
115 void *(*alloc_fn)(void *arg),
116 phys_addr_t (*to_pa)(void *virt),
117 void *arg)
118{
119 while (mc->nr_pages < min_pages) {
120 phys_addr_t *p = alloc_fn(arg);
121
122 if (!p)
123 return -ENOMEM;
124 push_hyp_memcache(mc, p, to_pa);
125 }
126
127 return 0;
128}
129
130static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
131 void (*free_fn)(void *virt, void *arg),
132 void *(*to_va)(phys_addr_t phys),
133 void *arg)
134{
135 while (mc->nr_pages)
136 free_fn(pop_hyp_memcache(mc, to_va), arg);
137}
138
139void free_hyp_memcache(struct kvm_hyp_memcache *mc);
140int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
141
142struct kvm_vmid {
143 atomic64_t id;
144};
145
146struct kvm_s2_mmu {
147 struct kvm_vmid vmid;
148
149 /*
150 * stage2 entry level table
151 *
152 * Two kvm_s2_mmu structures in the same VM can point to the same
153 * pgd here. This happens when running a guest using a
154 * translation regime that isn't affected by its own stage-2
155 * translation, such as a non-VHE hypervisor running at vEL2, or
156 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
157 * canonical stage-2 page tables.
158 */
159 phys_addr_t pgd_phys;
160 struct kvm_pgtable *pgt;
161
162 /*
163 * VTCR value used on the host. For a non-NV guest (or a NV
164 * guest that runs in a context where its own S2 doesn't
165 * apply), its T0SZ value reflects that of the IPA size.
166 *
167 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
168 * the guest.
169 */
170 u64 vtcr;
171
172 /* The last vcpu id that ran on each physical CPU */
173 int __percpu *last_vcpu_ran;
174
175#define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
176 /*
177 * Memory cache used to split
178 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
179 * is used to allocate stage2 page tables while splitting huge
180 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
181 * influences both the capacity of the split page cache, and
182 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
183 * too high.
184 *
185 * Protected by kvm->slots_lock.
186 */
187 struct kvm_mmu_memory_cache split_page_cache;
188 uint64_t split_page_chunk_size;
189
190 struct kvm_arch *arch;
191};
192
193struct kvm_arch_memory_slot {
194};
195
196/**
197 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
198 *
199 * @std_bmap: Bitmap of standard secure service calls
200 * @std_hyp_bmap: Bitmap of standard hypervisor service calls
201 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
202 */
203struct kvm_smccc_features {
204 unsigned long std_bmap;
205 unsigned long std_hyp_bmap;
206 unsigned long vendor_hyp_bmap;
207};
208
209typedef unsigned int pkvm_handle_t;
210
211struct kvm_protected_vm {
212 pkvm_handle_t handle;
213 struct kvm_hyp_memcache teardown_mc;
214};
215
216struct kvm_mpidr_data {
217 u64 mpidr_mask;
218 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
219};
220
221static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
222{
223 unsigned long mask = data->mpidr_mask;
224 u64 aff = mpidr & MPIDR_HWID_BITMASK;
225 int nbits, bit, bit_idx = 0;
226 u16 index = 0;
227
228 /*
229 * If this looks like RISC-V's BEXT or x86's PEXT
230 * instructions, it isn't by accident.
231 */
232 nbits = fls(mask);
233 for_each_set_bit(bit, &mask, nbits) {
234 index |= (aff & BIT(bit)) >> (bit - bit_idx);
235 bit_idx++;
236 }
237
238 return index;
239}
240
241struct kvm_sysreg_masks;
242
243enum fgt_group_id {
244 __NO_FGT_GROUP__,
245 HFGxTR_GROUP,
246 HDFGRTR_GROUP,
247 HDFGWTR_GROUP = HDFGRTR_GROUP,
248 HFGITR_GROUP,
249 HAFGRTR_GROUP,
250
251 /* Must be last */
252 __NR_FGT_GROUP_IDS__
253};
254
255struct kvm_arch {
256 struct kvm_s2_mmu mmu;
257
258 /*
259 * Fine-Grained UNDEF, mimicking the FGT layout defined by the
260 * architecture. We track them globally, as we present the
261 * same feature-set to all vcpus.
262 *
263 * Index 0 is currently spare.
264 */
265 u64 fgu[__NR_FGT_GROUP_IDS__];
266
267 /* Interrupt controller */
268 struct vgic_dist vgic;
269
270 /* Timers */
271 struct arch_timer_vm_data timer_data;
272
273 /* Mandated version of PSCI */
274 u32 psci_version;
275
276 /* Protects VM-scoped configuration data */
277 struct mutex config_lock;
278
279 /*
280 * If we encounter a data abort without valid instruction syndrome
281 * information, report this to user space. User space can (and
282 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
283 * supported.
284 */
285#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
286 /* Memory Tagging Extension enabled for the guest */
287#define KVM_ARCH_FLAG_MTE_ENABLED 1
288 /* At least one vCPU has ran in the VM */
289#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
290 /* The vCPU feature set for the VM is configured */
291#define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
292 /* PSCI SYSTEM_SUSPEND enabled for the guest */
293#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
294 /* VM counter offset */
295#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
296 /* Timer PPIs made immutable */
297#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
298 /* Initial ID reg values loaded */
299#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
300 /* Fine-Grained UNDEF initialised */
301#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
302 unsigned long flags;
303
304 /* VM-wide vCPU feature set */
305 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
306
307 /* MPIDR to vcpu index mapping, optional */
308 struct kvm_mpidr_data *mpidr_data;
309
310 /*
311 * VM-wide PMU filter, implemented as a bitmap and big enough for
312 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
313 */
314 unsigned long *pmu_filter;
315 struct arm_pmu *arm_pmu;
316
317 cpumask_var_t supported_cpus;
318
319 /* PMCR_EL0.N value for the guest */
320 u8 pmcr_n;
321
322 /* Iterator for idreg debugfs */
323 u8 idreg_debugfs_iter;
324
325 /* Hypercall features firmware registers' descriptor */
326 struct kvm_smccc_features smccc_feat;
327 struct maple_tree smccc_filter;
328
329 /*
330 * Emulated CPU ID registers per VM
331 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
332 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
333 *
334 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
335 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
336 */
337#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
338#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
339#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
340#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
341 u64 id_regs[KVM_ARM_ID_REG_NUM];
342
343 /* Masks for VNCR-baked sysregs */
344 struct kvm_sysreg_masks *sysreg_masks;
345
346 /*
347 * For an untrusted host VM, 'pkvm.handle' is used to lookup
348 * the associated pKVM instance in the hypervisor.
349 */
350 struct kvm_protected_vm pkvm;
351};
352
353struct kvm_vcpu_fault_info {
354 u64 esr_el2; /* Hyp Syndrom Register */
355 u64 far_el2; /* Hyp Fault Address Register */
356 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
357 u64 disr_el1; /* Deferred [SError] Status Register */
358};
359
360/*
361 * VNCR() just places the VNCR_capable registers in the enum after
362 * __VNCR_START__, and the value (after correction) to be an 8-byte offset
363 * from the VNCR base. As we don't require the enum to be otherwise ordered,
364 * we need the terrible hack below to ensure that we correctly size the
365 * sys_regs array, no matter what.
366 *
367 * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
368 * treasure trove of bit hacks:
369 * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
370 */
371#define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
372#define VNCR(r) \
373 __before_##r, \
374 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
375 __after_##r = __MAX__(__before_##r - 1, r)
376
377enum vcpu_sysreg {
378 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
379 MPIDR_EL1, /* MultiProcessor Affinity Register */
380 CLIDR_EL1, /* Cache Level ID Register */
381 CSSELR_EL1, /* Cache Size Selection Register */
382 TPIDR_EL0, /* Thread ID, User R/W */
383 TPIDRRO_EL0, /* Thread ID, User R/O */
384 TPIDR_EL1, /* Thread ID, Privileged */
385 CNTKCTL_EL1, /* Timer Control Register (EL1) */
386 PAR_EL1, /* Physical Address Register */
387 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
388 OSLSR_EL1, /* OS Lock Status Register */
389 DISR_EL1, /* Deferred Interrupt Status Register */
390
391 /* Performance Monitors Registers */
392 PMCR_EL0, /* Control Register */
393 PMSELR_EL0, /* Event Counter Selection Register */
394 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
395 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
396 PMCCNTR_EL0, /* Cycle Counter Register */
397 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
398 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
399 PMCCFILTR_EL0, /* Cycle Count Filter Register */
400 PMCNTENSET_EL0, /* Count Enable Set Register */
401 PMINTENSET_EL1, /* Interrupt Enable Set Register */
402 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
403 PMUSERENR_EL0, /* User Enable Register */
404
405 /* Pointer Authentication Registers in a strict increasing order. */
406 APIAKEYLO_EL1,
407 APIAKEYHI_EL1,
408 APIBKEYLO_EL1,
409 APIBKEYHI_EL1,
410 APDAKEYLO_EL1,
411 APDAKEYHI_EL1,
412 APDBKEYLO_EL1,
413 APDBKEYHI_EL1,
414 APGAKEYLO_EL1,
415 APGAKEYHI_EL1,
416
417 /* Memory Tagging Extension registers */
418 RGSR_EL1, /* Random Allocation Tag Seed Register */
419 GCR_EL1, /* Tag Control Register */
420 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
421
422 /* 32bit specific registers. */
423 DACR32_EL2, /* Domain Access Control Register */
424 IFSR32_EL2, /* Instruction Fault Status Register */
425 FPEXC32_EL2, /* Floating-Point Exception Control Register */
426 DBGVCR32_EL2, /* Debug Vector Catch Register */
427
428 /* EL2 registers */
429 SCTLR_EL2, /* System Control Register (EL2) */
430 ACTLR_EL2, /* Auxiliary Control Register (EL2) */
431 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
432 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
433 HACR_EL2, /* Hypervisor Auxiliary Control Register */
434 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
435 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
436 TCR_EL2, /* Translation Control Register (EL2) */
437 SPSR_EL2, /* EL2 saved program status register */
438 ELR_EL2, /* EL2 exception link register */
439 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
440 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */
441 ESR_EL2, /* Exception Syndrome Register (EL2) */
442 FAR_EL2, /* Fault Address Register (EL2) */
443 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */
444 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */
445 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */
446 VBAR_EL2, /* Vector Base Address Register (EL2) */
447 RVBAR_EL2, /* Reset Vector Base Address Register */
448 CONTEXTIDR_EL2, /* Context ID Register (EL2) */
449 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
450 SP_EL2, /* EL2 Stack Pointer */
451 CNTHP_CTL_EL2,
452 CNTHP_CVAL_EL2,
453 CNTHV_CTL_EL2,
454 CNTHV_CVAL_EL2,
455
456 __VNCR_START__, /* Any VNCR-capable reg goes after this point */
457
458 VNCR(SCTLR_EL1),/* System Control Register */
459 VNCR(ACTLR_EL1),/* Auxiliary Control Register */
460 VNCR(CPACR_EL1),/* Coprocessor Access Control */
461 VNCR(ZCR_EL1), /* SVE Control */
462 VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
463 VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
464 VNCR(TCR_EL1), /* Translation Control Register */
465 VNCR(TCR2_EL1), /* Extended Translation Control Register */
466 VNCR(ESR_EL1), /* Exception Syndrome Register */
467 VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
468 VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
469 VNCR(FAR_EL1), /* Fault Address Register */
470 VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */
471 VNCR(VBAR_EL1), /* Vector Base Address Register */
472 VNCR(CONTEXTIDR_EL1), /* Context ID Register */
473 VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
474 VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
475 VNCR(ELR_EL1),
476 VNCR(SP_EL1),
477 VNCR(SPSR_EL1),
478 VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */
479 VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
480 VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
481 VNCR(HCR_EL2), /* Hypervisor Configuration Register */
482 VNCR(HSTR_EL2), /* Hypervisor System Trap Register */
483 VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
484 VNCR(VTCR_EL2), /* Virtualization Translation Control Register */
485 VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
486 VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */
487
488 /* Permission Indirection Extension registers */
489 VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */
490 VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */
491
492 VNCR(HFGRTR_EL2),
493 VNCR(HFGWTR_EL2),
494 VNCR(HFGITR_EL2),
495 VNCR(HDFGRTR_EL2),
496 VNCR(HDFGWTR_EL2),
497 VNCR(HAFGRTR_EL2),
498
499 VNCR(CNTVOFF_EL2),
500 VNCR(CNTV_CVAL_EL0),
501 VNCR(CNTV_CTL_EL0),
502 VNCR(CNTP_CVAL_EL0),
503 VNCR(CNTP_CTL_EL0),
504
505 NR_SYS_REGS /* Nothing after this line! */
506};
507
508struct kvm_sysreg_masks {
509 struct {
510 u64 res0;
511 u64 res1;
512 } mask[NR_SYS_REGS - __VNCR_START__];
513};
514
515struct kvm_cpu_context {
516 struct user_pt_regs regs; /* sp = sp_el0 */
517
518 u64 spsr_abt;
519 u64 spsr_und;
520 u64 spsr_irq;
521 u64 spsr_fiq;
522
523 struct user_fpsimd_state fp_regs;
524
525 u64 sys_regs[NR_SYS_REGS];
526
527 struct kvm_vcpu *__hyp_running_vcpu;
528
529 /* This pointer has to be 4kB aligned. */
530 u64 *vncr_array;
531};
532
533struct kvm_host_data {
534 struct kvm_cpu_context host_ctxt;
535};
536
537struct kvm_host_psci_config {
538 /* PSCI version used by host. */
539 u32 version;
540 u32 smccc_version;
541
542 /* Function IDs used by host if version is v0.1. */
543 struct psci_0_1_function_ids function_ids_0_1;
544
545 bool psci_0_1_cpu_suspend_implemented;
546 bool psci_0_1_cpu_on_implemented;
547 bool psci_0_1_cpu_off_implemented;
548 bool psci_0_1_migrate_implemented;
549};
550
551extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
552#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
553
554extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
555#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
556
557extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
558#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
559
560struct vcpu_reset_state {
561 unsigned long pc;
562 unsigned long r0;
563 bool be;
564 bool reset;
565};
566
567struct kvm_vcpu_arch {
568 struct kvm_cpu_context ctxt;
569
570 /*
571 * Guest floating point state
572 *
573 * The architecture has two main floating point extensions,
574 * the original FPSIMD and SVE. These have overlapping
575 * register views, with the FPSIMD V registers occupying the
576 * low 128 bits of the SVE Z registers. When the core
577 * floating point code saves the register state of a task it
578 * records which view it saved in fp_type.
579 */
580 void *sve_state;
581 enum fp_type fp_type;
582 unsigned int sve_max_vl;
583 u64 svcr;
584 u64 fpmr;
585
586 /* Stage 2 paging state used by the hardware on next switch */
587 struct kvm_s2_mmu *hw_mmu;
588
589 /* Values of trap registers for the guest. */
590 u64 hcr_el2;
591 u64 hcrx_el2;
592 u64 mdcr_el2;
593 u64 cptr_el2;
594
595 /* Values of trap registers for the host before guest entry. */
596 u64 mdcr_el2_host;
597
598 /* Exception Information */
599 struct kvm_vcpu_fault_info fault;
600
601 /* Ownership of the FP regs */
602 enum {
603 FP_STATE_FREE,
604 FP_STATE_HOST_OWNED,
605 FP_STATE_GUEST_OWNED,
606 } fp_state;
607
608 /* Configuration flags, set once and for all before the vcpu can run */
609 u8 cflags;
610
611 /* Input flags to the hypervisor code, potentially cleared after use */
612 u8 iflags;
613
614 /* State flags for kernel bookkeeping, unused by the hypervisor code */
615 u8 sflags;
616
617 /*
618 * Don't run the guest (internal implementation need).
619 *
620 * Contrary to the flags above, this is set/cleared outside of
621 * a vcpu context, and thus cannot be mixed with the flags
622 * themselves (or the flag accesses need to be made atomic).
623 */
624 bool pause;
625
626 /*
627 * We maintain more than a single set of debug registers to support
628 * debugging the guest from the host and to maintain separate host and
629 * guest state during world switches. vcpu_debug_state are the debug
630 * registers of the vcpu as the guest sees them. host_debug_state are
631 * the host registers which are saved and restored during
632 * world switches. external_debug_state contains the debug
633 * values we want to debug the guest. This is set via the
634 * KVM_SET_GUEST_DEBUG ioctl.
635 *
636 * debug_ptr points to the set of debug registers that should be loaded
637 * onto the hardware when running the guest.
638 */
639 struct kvm_guest_debug_arch *debug_ptr;
640 struct kvm_guest_debug_arch vcpu_debug_state;
641 struct kvm_guest_debug_arch external_debug_state;
642
643 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
644 struct task_struct *parent_task;
645
646 struct {
647 /* {Break,watch}point registers */
648 struct kvm_guest_debug_arch regs;
649 /* Statistical profiling extension */
650 u64 pmscr_el1;
651 /* Self-hosted trace */
652 u64 trfcr_el1;
653 } host_debug_state;
654
655 /* VGIC state */
656 struct vgic_cpu vgic_cpu;
657 struct arch_timer_cpu timer_cpu;
658 struct kvm_pmu pmu;
659
660 /*
661 * Guest registers we preserve during guest debugging.
662 *
663 * These shadow registers are updated by the kvm_handle_sys_reg
664 * trap handler if the guest accesses or updates them while we
665 * are using guest debug.
666 */
667 struct {
668 u32 mdscr_el1;
669 bool pstate_ss;
670 } guest_debug_preserved;
671
672 /* vcpu power state */
673 struct kvm_mp_state mp_state;
674 spinlock_t mp_state_lock;
675
676 /* Cache some mmu pages needed inside spinlock regions */
677 struct kvm_mmu_memory_cache mmu_page_cache;
678
679 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
680 u64 vsesr_el2;
681
682 /* Additional reset state */
683 struct vcpu_reset_state reset_state;
684
685 /* Guest PV state */
686 struct {
687 u64 last_steal;
688 gpa_t base;
689 } steal;
690
691 /* Per-vcpu CCSIDR override or NULL */
692 u32 *ccsidr;
693};
694
695/*
696 * Each 'flag' is composed of a comma-separated triplet:
697 *
698 * - the flag-set it belongs to in the vcpu->arch structure
699 * - the value for that flag
700 * - the mask for that flag
701 *
702 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
703 * unpack_vcpu_flag() extract the flag value from the triplet for
704 * direct use outside of the flag accessors.
705 */
706#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
707
708#define __unpack_flag(_set, _f, _m) _f
709#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
710
711#define __build_check_flag(v, flagset, f, m) \
712 do { \
713 typeof(v->arch.flagset) *_fset; \
714 \
715 /* Check that the flags fit in the mask */ \
716 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
717 /* Check that the flags fit in the type */ \
718 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
719 } while (0)
720
721#define __vcpu_get_flag(v, flagset, f, m) \
722 ({ \
723 __build_check_flag(v, flagset, f, m); \
724 \
725 READ_ONCE(v->arch.flagset) & (m); \
726 })
727
728/*
729 * Note that the set/clear accessors must be preempt-safe in order to
730 * avoid nesting them with load/put which also manipulate flags...
731 */
732#ifdef __KVM_NVHE_HYPERVISOR__
733/* the nVHE hypervisor is always non-preemptible */
734#define __vcpu_flags_preempt_disable()
735#define __vcpu_flags_preempt_enable()
736#else
737#define __vcpu_flags_preempt_disable() preempt_disable()
738#define __vcpu_flags_preempt_enable() preempt_enable()
739#endif
740
741#define __vcpu_set_flag(v, flagset, f, m) \
742 do { \
743 typeof(v->arch.flagset) *fset; \
744 \
745 __build_check_flag(v, flagset, f, m); \
746 \
747 fset = &v->arch.flagset; \
748 __vcpu_flags_preempt_disable(); \
749 if (HWEIGHT(m) > 1) \
750 *fset &= ~(m); \
751 *fset |= (f); \
752 __vcpu_flags_preempt_enable(); \
753 } while (0)
754
755#define __vcpu_clear_flag(v, flagset, f, m) \
756 do { \
757 typeof(v->arch.flagset) *fset; \
758 \
759 __build_check_flag(v, flagset, f, m); \
760 \
761 fset = &v->arch.flagset; \
762 __vcpu_flags_preempt_disable(); \
763 *fset &= ~(m); \
764 __vcpu_flags_preempt_enable(); \
765 } while (0)
766
767#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
768#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
769#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
770
771/* SVE exposed to guest */
772#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
773/* SVE config completed */
774#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
775/* PTRAUTH exposed to guest */
776#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
777/* KVM_ARM_VCPU_INIT completed */
778#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
779
780/* Exception pending */
781#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
782/*
783 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
784 * be set together with an exception...
785 */
786#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
787/* Target EL/MODE (not a single flag, but let's abuse the macro) */
788#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
789
790/* Helpers to encode exceptions with minimum fuss */
791#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
792#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
793#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
794
795/*
796 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
797 * values:
798 *
799 * For AArch32 EL1:
800 */
801#define EXCEPT_AA32_UND __vcpu_except_flags(0)
802#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
803#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
804/* For AArch64: */
805#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
806#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
807#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
808#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
809/* For AArch64 with NV: */
810#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
811#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
812#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
813#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
814/* Guest debug is live */
815#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
816/* Save SPE context if active */
817#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
818/* Save TRBE context if active */
819#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
820/* vcpu running in HYP context */
821#define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7))
822
823/* SVE enabled for host EL0 */
824#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
825/* SME enabled for EL0 */
826#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
827/* Physical CPU not in supported_cpus */
828#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
829/* WFIT instruction trapped */
830#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
831/* vcpu system registers loaded on physical CPU */
832#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
833/* Software step state is Active-pending */
834#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
835/* PMUSERENR for the guest EL0 is on physical CPU */
836#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
837/* WFI instruction trapped */
838#define IN_WFI __vcpu_single_flag(sflags, BIT(7))
839
840
841/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
842#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
843 sve_ffr_offset((vcpu)->arch.sve_max_vl))
844
845#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
846
847#define vcpu_sve_state_size(vcpu) ({ \
848 size_t __size_ret; \
849 unsigned int __vcpu_vq; \
850 \
851 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
852 __size_ret = 0; \
853 } else { \
854 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
855 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
856 } \
857 \
858 __size_ret; \
859})
860
861#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
862 KVM_GUESTDBG_USE_SW_BP | \
863 KVM_GUESTDBG_USE_HW | \
864 KVM_GUESTDBG_SINGLESTEP)
865
866#define vcpu_has_sve(vcpu) (system_supports_sve() && \
867 vcpu_get_flag(vcpu, GUEST_HAS_SVE))
868
869#ifdef CONFIG_ARM64_PTR_AUTH
870#define vcpu_has_ptrauth(vcpu) \
871 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
872 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
873 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
874#else
875#define vcpu_has_ptrauth(vcpu) false
876#endif
877
878#define vcpu_on_unsupported_cpu(vcpu) \
879 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
880
881#define vcpu_set_on_unsupported_cpu(vcpu) \
882 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
883
884#define vcpu_clear_on_unsupported_cpu(vcpu) \
885 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
886
887#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
888
889/*
890 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
891 * memory backed version of a register, and not the one most recently
892 * accessed by a running VCPU. For example, for userspace access or
893 * for system registers that are never context switched, but only
894 * emulated.
895 *
896 * Don't bother with VNCR-based accesses in the nVHE code, it has no
897 * business dealing with NV.
898 */
899static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
900{
901#if !defined (__KVM_NVHE_HYPERVISOR__)
902 if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
903 r >= __VNCR_START__ && ctxt->vncr_array))
904 return &ctxt->vncr_array[r - __VNCR_START__];
905#endif
906 return (u64 *)&ctxt->sys_regs[r];
907}
908
909#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
910
911u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
912#define __vcpu_sys_reg(v,r) \
913 (*({ \
914 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
915 u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
916 if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
917 *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
918 __r; \
919 }))
920
921u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
922void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
923
924static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
925{
926 /*
927 * *** VHE ONLY ***
928 *
929 * System registers listed in the switch are not saved on every
930 * exit from the guest but are only saved on vcpu_put.
931 *
932 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
933 * should never be listed below, because the guest cannot modify its
934 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
935 * thread when emulating cross-VCPU communication.
936 */
937 if (!has_vhe())
938 return false;
939
940 switch (reg) {
941 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
942 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
943 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
944 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
945 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
946 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
947 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
948 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
949 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
950 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
951 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
952 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
953 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
954 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
955 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
956 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
957 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
958 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
959 case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
960 case PAR_EL1: *val = read_sysreg_par(); break;
961 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
962 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
963 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
964 default: return false;
965 }
966
967 return true;
968}
969
970static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
971{
972 /*
973 * *** VHE ONLY ***
974 *
975 * System registers listed in the switch are not restored on every
976 * entry to the guest but are only restored on vcpu_load.
977 *
978 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
979 * should never be listed below, because the MPIDR should only be set
980 * once, before running the VCPU, and never changed later.
981 */
982 if (!has_vhe())
983 return false;
984
985 switch (reg) {
986 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
987 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
988 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
989 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
990 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
991 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
992 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
993 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
994 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
995 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
996 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
997 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
998 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
999 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
1000 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
1001 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
1002 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
1003 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
1004 case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
1005 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
1006 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
1007 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
1008 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
1009 default: return false;
1010 }
1011
1012 return true;
1013}
1014
1015struct kvm_vm_stat {
1016 struct kvm_vm_stat_generic generic;
1017};
1018
1019struct kvm_vcpu_stat {
1020 struct kvm_vcpu_stat_generic generic;
1021 u64 hvc_exit_stat;
1022 u64 wfe_exit_stat;
1023 u64 wfi_exit_stat;
1024 u64 mmio_exit_user;
1025 u64 mmio_exit_kernel;
1026 u64 signal_exits;
1027 u64 exits;
1028};
1029
1030unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
1031int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
1032int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1033int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1034
1035unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
1036int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
1037
1038int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1039 struct kvm_vcpu_events *events);
1040
1041int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1042 struct kvm_vcpu_events *events);
1043
1044void kvm_arm_halt_guest(struct kvm *kvm);
1045void kvm_arm_resume_guest(struct kvm *kvm);
1046
1047#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
1048
1049#ifndef __KVM_NVHE_HYPERVISOR__
1050#define kvm_call_hyp_nvhe(f, ...) \
1051 ({ \
1052 struct arm_smccc_res res; \
1053 \
1054 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
1055 ##__VA_ARGS__, &res); \
1056 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
1057 \
1058 res.a1; \
1059 })
1060
1061/*
1062 * The couple of isb() below are there to guarantee the same behaviour
1063 * on VHE as on !VHE, where the eret to EL1 acts as a context
1064 * synchronization event.
1065 */
1066#define kvm_call_hyp(f, ...) \
1067 do { \
1068 if (has_vhe()) { \
1069 f(__VA_ARGS__); \
1070 isb(); \
1071 } else { \
1072 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1073 } \
1074 } while(0)
1075
1076#define kvm_call_hyp_ret(f, ...) \
1077 ({ \
1078 typeof(f(__VA_ARGS__)) ret; \
1079 \
1080 if (has_vhe()) { \
1081 ret = f(__VA_ARGS__); \
1082 isb(); \
1083 } else { \
1084 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1085 } \
1086 \
1087 ret; \
1088 })
1089#else /* __KVM_NVHE_HYPERVISOR__ */
1090#define kvm_call_hyp(f, ...) f(__VA_ARGS__)
1091#define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
1092#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
1093#endif /* __KVM_NVHE_HYPERVISOR__ */
1094
1095int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
1096void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
1097
1098int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
1099int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
1100int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
1101int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
1102int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
1103int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
1104int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
1105
1106void kvm_sys_regs_create_debugfs(struct kvm *kvm);
1107void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
1108
1109int __init kvm_sys_reg_table_init(void);
1110struct sys_reg_desc;
1111int __init populate_sysreg_config(const struct sys_reg_desc *sr,
1112 unsigned int idx);
1113int __init populate_nv_trap_config(void);
1114
1115bool lock_all_vcpus(struct kvm *kvm);
1116void unlock_all_vcpus(struct kvm *kvm);
1117
1118void kvm_init_sysreg(struct kvm_vcpu *);
1119
1120/* MMIO helpers */
1121void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
1122unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
1123
1124int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
1125int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
1126
1127/*
1128 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
1129 * arrived in guest context. For arm64, any event that arrives while a vCPU is
1130 * loaded is considered to be "in guest".
1131 */
1132static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1133{
1134 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1135}
1136
1137long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1138gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1139void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1140
1141bool kvm_arm_pvtime_supported(void);
1142int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1143 struct kvm_device_attr *attr);
1144int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1145 struct kvm_device_attr *attr);
1146int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1147 struct kvm_device_attr *attr);
1148
1149extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1150int __init kvm_arm_vmid_alloc_init(void);
1151void __init kvm_arm_vmid_alloc_free(void);
1152bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1153void kvm_arm_vmid_clear_active(void);
1154
1155static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1156{
1157 vcpu_arch->steal.base = INVALID_GPA;
1158}
1159
1160static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1161{
1162 return (vcpu_arch->steal.base != INVALID_GPA);
1163}
1164
1165void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1166
1167struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1168
1169DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1170
1171static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1172{
1173 /* The host's MPIDR is immutable, so let's set it up at boot time */
1174 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1175}
1176
1177static inline bool kvm_system_needs_idmapped_vectors(void)
1178{
1179 return cpus_have_final_cap(ARM64_SPECTRE_V3A);
1180}
1181
1182static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1183static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1184
1185void kvm_arm_init_debug(void);
1186void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
1187void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
1188void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
1189void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
1190
1191#define kvm_vcpu_os_lock_enabled(vcpu) \
1192 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1193
1194int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1195 struct kvm_device_attr *attr);
1196int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1197 struct kvm_device_attr *attr);
1198int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1199 struct kvm_device_attr *attr);
1200
1201int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1202 struct kvm_arm_copy_mte_tags *copy_tags);
1203int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1204 struct kvm_arm_counter_offset *offset);
1205int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
1206 struct reg_mask_range *range);
1207
1208/* Guest/host FPSIMD coordination helpers */
1209int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1210void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1211void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1212void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1213void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1214void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
1215
1216static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1217{
1218 return (!has_vhe() && attr->exclude_host);
1219}
1220
1221/* Flags for host debug state */
1222void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
1223void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
1224
1225#ifdef CONFIG_KVM
1226void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
1227void kvm_clr_pmu_events(u32 clr);
1228bool kvm_set_pmuserenr(u64 val);
1229#else
1230static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
1231static inline void kvm_clr_pmu_events(u32 clr) {}
1232static inline bool kvm_set_pmuserenr(u64 val)
1233{
1234 return false;
1235}
1236#endif
1237
1238void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1239void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
1240
1241int __init kvm_set_ipa_limit(void);
1242
1243#define __KVM_HAVE_ARCH_VM_ALLOC
1244struct kvm *kvm_arch_alloc_vm(void);
1245
1246#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1247
1248#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1249
1250static inline bool kvm_vm_is_protected(struct kvm *kvm)
1251{
1252 return false;
1253}
1254
1255int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1256bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1257
1258#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1259
1260#define kvm_has_mte(kvm) \
1261 (system_supports_mte() && \
1262 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1263
1264#define kvm_supports_32bit_el0() \
1265 (system_supports_32bit_el0() && \
1266 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1267
1268#define kvm_vm_has_ran_once(kvm) \
1269 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1270
1271static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
1272{
1273 return test_bit(feature, ka->vcpu_features);
1274}
1275
1276#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
1277
1278int kvm_trng_call(struct kvm_vcpu *vcpu);
1279#ifdef CONFIG_KVM
1280extern phys_addr_t hyp_mem_base;
1281extern phys_addr_t hyp_mem_size;
1282void __init kvm_hyp_reserve(void);
1283#else
1284static inline void kvm_hyp_reserve(void) { }
1285#endif
1286
1287void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1288bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1289
1290#define __expand_field_sign_unsigned(id, fld, val) \
1291 ((u64)SYS_FIELD_VALUE(id, fld, val))
1292
1293#define __expand_field_sign_signed(id, fld, val) \
1294 ({ \
1295 u64 __val = SYS_FIELD_VALUE(id, fld, val); \
1296 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1297 })
1298
1299#define expand_field_sign(id, fld, val) \
1300 (id##_##fld##_SIGNED ? \
1301 __expand_field_sign_signed(id, fld, val) : \
1302 __expand_field_sign_unsigned(id, fld, val))
1303
1304#define get_idreg_field_unsigned(kvm, id, fld) \
1305 ({ \
1306 u64 __val = IDREG((kvm), SYS_##id); \
1307 FIELD_GET(id##_##fld##_MASK, __val); \
1308 })
1309
1310#define get_idreg_field_signed(kvm, id, fld) \
1311 ({ \
1312 u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
1313 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1314 })
1315
1316#define get_idreg_field_enum(kvm, id, fld) \
1317 get_idreg_field_unsigned(kvm, id, fld)
1318
1319#define get_idreg_field(kvm, id, fld) \
1320 (id##_##fld##_SIGNED ? \
1321 get_idreg_field_signed(kvm, id, fld) : \
1322 get_idreg_field_unsigned(kvm, id, fld))
1323
1324#define kvm_has_feat(kvm, id, fld, limit) \
1325 (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
1326
1327#define kvm_has_feat_enum(kvm, id, fld, val) \
1328 (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
1329
1330#define kvm_has_feat_range(kvm, id, fld, min, max) \
1331 (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
1332 get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
1333
1334#endif /* __ARM64_KVM_HOST_H__ */