Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/bitfield.h>
15#include <linux/kvm_host.h>
16
17#include <asm/debug-monitors.h>
18#include <asm/esr.h>
19#include <asm/kvm_arm.h>
20#include <asm/kvm_hyp.h>
21#include <asm/kvm_nested.h>
22#include <asm/ptrace.h>
23#include <asm/cputype.h>
24#include <asm/virt.h>
25
26#define CURRENT_EL_SP_EL0_VECTOR 0x0
27#define CURRENT_EL_SP_ELx_VECTOR 0x200
28#define LOWER_EL_AArch64_VECTOR 0x400
29#define LOWER_EL_AArch32_VECTOR 0x600
30
31enum exception_type {
32 except_type_sync = 0,
33 except_type_irq = 0x80,
34 except_type_fiq = 0x100,
35 except_type_serror = 0x180,
36};
37
38#define kvm_exception_type_names \
39 { except_type_sync, "SYNC" }, \
40 { except_type_irq, "IRQ" }, \
41 { except_type_fiq, "FIQ" }, \
42 { except_type_serror, "SERROR" }
43
44bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
45void kvm_skip_instr32(struct kvm_vcpu *vcpu);
46
47void kvm_inject_undefined(struct kvm_vcpu *vcpu);
48int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
49int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
50void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
51
52static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
53{
54 return kvm_inject_sea(vcpu, false, addr);
55}
56
57static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
58{
59 return kvm_inject_sea(vcpu, true, addr);
60}
61
62static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
63{
64 /*
65 * ESR_ELx.ISV (later renamed to IDS) indicates whether or not
66 * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
67 *
68 * Set the bit when injecting an SError w/o an ESR to indicate ISS
69 * does not follow the architected format.
70 */
71 return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
72}
73
74void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
75
76void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
77int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
78int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
79int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
80int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr);
81
82static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
83{
84 u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
85 ESR_ELx_IL;
86
87 kvm_inject_nested_sync(vcpu, esr);
88}
89
90#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
91static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
92{
93 return !(vcpu->arch.hcr_el2 & HCR_RW);
94}
95#else
96static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
97{
98 return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
99}
100#endif
101
102static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
103{
104 if (!vcpu_has_run_once(vcpu))
105 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
106
107 /*
108 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
109 * get set in SCTLR_EL1 such that we can detect when the guest
110 * MMU gets turned on and do the necessary cache maintenance
111 * then.
112 */
113 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
114 vcpu->arch.hcr_el2 |= HCR_TVM;
115}
116
117static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
118{
119 return (unsigned long *)&vcpu->arch.hcr_el2;
120}
121
122static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
123{
124 vcpu->arch.hcr_el2 &= ~HCR_TWE;
125 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
126 vcpu->kvm->arch.vgic.nassgireq)
127 vcpu->arch.hcr_el2 &= ~HCR_TWI;
128 else
129 vcpu->arch.hcr_el2 |= HCR_TWI;
130}
131
132static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
133{
134 vcpu->arch.hcr_el2 |= HCR_TWE;
135 vcpu->arch.hcr_el2 |= HCR_TWI;
136}
137
138static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
139{
140 return vcpu->arch.vsesr_el2;
141}
142
143static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
144{
145 vcpu->arch.vsesr_el2 = vsesr;
146}
147
148static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
149{
150 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
151}
152
153static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
154{
155 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
156}
157
158static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
159{
160 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
161}
162
163static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
164{
165 if (vcpu_mode_is_32bit(vcpu))
166 return kvm_condition_valid32(vcpu);
167
168 return true;
169}
170
171static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
172{
173 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
174}
175
176/*
177 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
178 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
179 * AArch32 with banked registers.
180 */
181static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
182 u8 reg_num)
183{
184 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
185}
186
187static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
188 unsigned long val)
189{
190 if (reg_num != 31)
191 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
192}
193
194static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
195{
196 switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
197 case PSR_MODE_EL2h:
198 case PSR_MODE_EL2t:
199 return true;
200 default:
201 return false;
202 }
203}
204
205static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
206{
207 return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
208}
209
210static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
211{
212 return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
213 (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_E2H));
214}
215
216static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
217{
218 return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
219}
220
221static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu)
222{
223 return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO;
224}
225
226static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
227{
228 bool e2h, tge;
229 u64 hcr;
230
231 if (!vcpu_has_nv(vcpu))
232 return false;
233
234 hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
235
236 e2h = (hcr & HCR_E2H);
237 tge = (hcr & HCR_TGE);
238
239 /*
240 * We are in a hypervisor context if the vcpu mode is EL2 or
241 * E2H and TGE bits are set. The latter means we are in the user space
242 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
243 *
244 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
245 * rest of the KVM code, and will result in a misbehaving guest.
246 */
247 return vcpu_is_el2(vcpu) || (e2h && tge) || tge;
248}
249
250static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
251{
252 return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
253}
254
255static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
256{
257 return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
258}
259
260static inline bool vserror_state_is_nested(struct kvm_vcpu *vcpu)
261{
262 if (!is_nested_ctxt(vcpu))
263 return false;
264
265 return vcpu_el2_amo_is_set(vcpu) ||
266 (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
267}
268
269/*
270 * The layout of SPSR for an AArch32 state is different when observed from an
271 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
272 * view given an AArch64 view.
273 *
274 * In ARM DDI 0487E.a see:
275 *
276 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
277 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
278 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
279 *
280 * Which show the following differences:
281 *
282 * | Bit | AA64 | AA32 | Notes |
283 * +-----+------+------+-----------------------------|
284 * | 24 | DIT | J | J is RES0 in ARMv8 |
285 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
286 *
287 * ... and all other bits are (currently) common.
288 */
289static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
290{
291 const unsigned long overlap = BIT(24) | BIT(21);
292 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
293
294 spsr &= ~overlap;
295
296 spsr |= dit << 21;
297
298 return spsr;
299}
300
301static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
302{
303 u32 mode;
304
305 if (vcpu_mode_is_32bit(vcpu)) {
306 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
307 return mode > PSR_AA32_MODE_USR;
308 }
309
310 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
311
312 return mode != PSR_MODE_EL0t;
313}
314
315static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
316{
317 return vcpu->arch.fault.esr_el2;
318}
319
320static inline bool guest_hyp_wfx_traps_enabled(const struct kvm_vcpu *vcpu)
321{
322 u64 esr = kvm_vcpu_get_esr(vcpu);
323 bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE);
324 u64 hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
325
326 if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
327 return false;
328
329 return ((is_wfe && (hcr_el2 & HCR_TWE)) ||
330 (!is_wfe && (hcr_el2 & HCR_TWI)));
331}
332
333static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
334{
335 u64 esr = kvm_vcpu_get_esr(vcpu);
336
337 if (esr & ESR_ELx_CV)
338 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
339
340 return -1;
341}
342
343static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
344{
345 return vcpu->arch.fault.far_el2;
346}
347
348static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
349{
350 u64 hpfar = vcpu->arch.fault.hpfar_el2;
351
352 if (unlikely(!(hpfar & HPFAR_EL2_NS)))
353 return INVALID_GPA;
354
355 return FIELD_GET(HPFAR_EL2_FIPA, hpfar) << 12;
356}
357
358static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
359{
360 return vcpu->arch.fault.disr_el1;
361}
362
363static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
364{
365 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
366}
367
368static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
369{
370 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
371}
372
373static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
374{
375 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
376}
377
378static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
379{
380 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
381}
382
383static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
384{
385 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
386}
387
388static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
389{
390 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
391}
392
393static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
394{
395 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
396}
397
398/* Always check for S1PTW *before* using this. */
399static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
400{
401 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
402}
403
404static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
405{
406 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
407}
408
409static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
410{
411 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
412}
413
414/* This one is not specific to Data Abort */
415static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
416{
417 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
418}
419
420static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
421{
422 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
423}
424
425static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
426{
427 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
428}
429
430static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
431{
432 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
433}
434
435static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
436{
437 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
438}
439
440static inline
441bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
442{
443 return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
444}
445
446static inline
447bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
448{
449 return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
450}
451
452static inline
453u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
454{
455 unsigned long esr = kvm_vcpu_get_esr(vcpu);
456
457 BUG_ON(!esr_fsc_is_permission_fault(esr));
458 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
459}
460
461static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
462{
463 switch (kvm_vcpu_trap_get_fault(vcpu)) {
464 case ESR_ELx_FSC_EXTABT:
465 case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
466 case ESR_ELx_FSC_SECC:
467 case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
468 return true;
469 default:
470 return false;
471 }
472}
473
474static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
475{
476 u64 esr = kvm_vcpu_get_esr(vcpu);
477 return ESR_ELx_SYS64_ISS_RT(esr);
478}
479
480static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
481{
482 if (kvm_vcpu_abt_iss1tw(vcpu)) {
483 /*
484 * Only a permission fault on a S1PTW should be
485 * considered as a write. Otherwise, page tables baked
486 * in a read-only memslot will result in an exception
487 * being delivered in the guest.
488 *
489 * The drawback is that we end-up faulting twice if the
490 * guest is using any of HW AF/DB: a translation fault
491 * to map the page containing the PT (read only at
492 * first), then a permission fault to allow the flags
493 * to be set.
494 */
495 return kvm_vcpu_trap_is_permission_fault(vcpu);
496 }
497
498 if (kvm_vcpu_trap_is_iabt(vcpu))
499 return false;
500
501 return kvm_vcpu_dabt_iswrite(vcpu);
502}
503
504static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
505{
506 return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
507}
508
509static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
510{
511 if (vcpu_mode_is_32bit(vcpu)) {
512 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
513 } else {
514 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
515 sctlr |= SCTLR_ELx_EE;
516 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
517 }
518}
519
520static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
521{
522 if (vcpu_mode_is_32bit(vcpu))
523 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
524
525 if (vcpu_mode_priv(vcpu))
526 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
527 else
528 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
529}
530
531static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
532 unsigned long data,
533 unsigned int len)
534{
535 if (kvm_vcpu_is_be(vcpu)) {
536 switch (len) {
537 case 1:
538 return data & 0xff;
539 case 2:
540 return be16_to_cpu(data & 0xffff);
541 case 4:
542 return be32_to_cpu(data & 0xffffffff);
543 default:
544 return be64_to_cpu(data);
545 }
546 } else {
547 switch (len) {
548 case 1:
549 return data & 0xff;
550 case 2:
551 return le16_to_cpu(data & 0xffff);
552 case 4:
553 return le32_to_cpu(data & 0xffffffff);
554 default:
555 return le64_to_cpu(data);
556 }
557 }
558
559 return data; /* Leave LE untouched */
560}
561
562static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
563 unsigned long data,
564 unsigned int len)
565{
566 if (kvm_vcpu_is_be(vcpu)) {
567 switch (len) {
568 case 1:
569 return data & 0xff;
570 case 2:
571 return cpu_to_be16(data & 0xffff);
572 case 4:
573 return cpu_to_be32(data & 0xffffffff);
574 default:
575 return cpu_to_be64(data);
576 }
577 } else {
578 switch (len) {
579 case 1:
580 return data & 0xff;
581 case 2:
582 return cpu_to_le16(data & 0xffff);
583 case 4:
584 return cpu_to_le32(data & 0xffffffff);
585 default:
586 return cpu_to_le64(data);
587 }
588 }
589
590 return data; /* Leave LE untouched */
591}
592
593static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
594{
595 WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
596 vcpu_set_flag(vcpu, INCREMENT_PC);
597}
598
599#define kvm_pend_exception(v, e) \
600 do { \
601 WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
602 vcpu_set_flag((v), PENDING_EXCEPTION); \
603 vcpu_set_flag((v), e); \
604 } while (0)
605
606/*
607 * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
608 * format if E2H isn't set.
609 */
610static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
611{
612 u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
613
614 if (!vcpu_el2_e2h_is_set(vcpu))
615 cptr = translate_cptr_el2_to_cpacr_el1(cptr);
616
617 return cptr;
618}
619
620static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
621 unsigned int xen)
622{
623 switch (xen) {
624 case 0b00:
625 case 0b10:
626 return true;
627 case 0b01:
628 return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
629 case 0b11:
630 default:
631 return false;
632 }
633}
634
635#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
636 (!vcpu_has_nv(vcpu) ? false : \
637 ____cptr_xen_trap_enabled(vcpu, \
638 SYS_FIELD_GET(CPACR_EL1, xen, \
639 vcpu_sanitised_cptr_el2(vcpu))))
640
641static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
642{
643 return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
644}
645
646static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
647{
648 return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
649}
650
651static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
652{
653 struct kvm *kvm = vcpu->kvm;
654
655 if (cpus_have_final_cap(ARM64_HAS_HCX)) {
656 /*
657 * In general, all HCRX_EL2 bits are gated by a feature.
658 * The only reason we can set SMPME without checking any
659 * feature is that its effects are not directly observable
660 * from the guest.
661 */
662 vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
663
664 if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
665 vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
666
667 if (kvm_has_tcr2(kvm))
668 vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
669
670 if (kvm_has_fpmr(kvm))
671 vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
672
673 if (kvm_has_sctlr2(kvm))
674 vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
675 }
676}
677#endif /* __ARM64_KVM_EMULATE_H__ */