Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#include <linux/bits.h>
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/nospec.h>
15#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/stddef.h>
18#include <linux/string.h>
19#include <linux/vmalloc.h>
20#include <linux/fs.h>
21#include <kvm/arm_psci.h>
22#include <asm/cputype.h>
23#include <linux/uaccess.h>
24#include <asm/fpsimd.h>
25#include <asm/kvm.h>
26#include <asm/kvm_emulate.h>
27#include <asm/kvm_coproc.h>
28#include <asm/kvm_host.h>
29#include <asm/sigcontext.h>
30
31#include "trace.h"
32
33#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
34#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 VCPU_STAT(halt_successful_poll),
38 VCPU_STAT(halt_attempted_poll),
39 VCPU_STAT(halt_poll_invalid),
40 VCPU_STAT(halt_wakeup),
41 VCPU_STAT(hvc_exit_stat),
42 VCPU_STAT(wfe_exit_stat),
43 VCPU_STAT(wfi_exit_stat),
44 VCPU_STAT(mmio_exit_user),
45 VCPU_STAT(mmio_exit_kernel),
46 VCPU_STAT(exits),
47 { NULL }
48};
49
50int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
51{
52 return 0;
53}
54
55static bool core_reg_offset_is_vreg(u64 off)
56{
57 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
58 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
59}
60
61static u64 core_reg_offset_from_id(u64 id)
62{
63 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
64}
65
66static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
67{
68 int size;
69
70 switch (off) {
71 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
72 KVM_REG_ARM_CORE_REG(regs.regs[30]):
73 case KVM_REG_ARM_CORE_REG(regs.sp):
74 case KVM_REG_ARM_CORE_REG(regs.pc):
75 case KVM_REG_ARM_CORE_REG(regs.pstate):
76 case KVM_REG_ARM_CORE_REG(sp_el1):
77 case KVM_REG_ARM_CORE_REG(elr_el1):
78 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
79 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
80 size = sizeof(__u64);
81 break;
82
83 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
84 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
85 size = sizeof(__uint128_t);
86 break;
87
88 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
89 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
90 size = sizeof(__u32);
91 break;
92
93 default:
94 return -EINVAL;
95 }
96
97 if (!IS_ALIGNED(off, size / sizeof(__u32)))
98 return -EINVAL;
99
100 /*
101 * The KVM_REG_ARM64_SVE regs must be used instead of
102 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
103 * SVE-enabled vcpus:
104 */
105 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
106 return -EINVAL;
107
108 return size;
109}
110
111static int validate_core_offset(const struct kvm_vcpu *vcpu,
112 const struct kvm_one_reg *reg)
113{
114 u64 off = core_reg_offset_from_id(reg->id);
115 int size = core_reg_size_from_offset(vcpu, off);
116
117 if (size < 0)
118 return -EINVAL;
119
120 if (KVM_REG_SIZE(reg->id) != size)
121 return -EINVAL;
122
123 return 0;
124}
125
126static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
127{
128 /*
129 * Because the kvm_regs structure is a mix of 32, 64 and
130 * 128bit fields, we index it as if it was a 32bit
131 * array. Hence below, nr_regs is the number of entries, and
132 * off the index in the "array".
133 */
134 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
135 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
136 int nr_regs = sizeof(*regs) / sizeof(__u32);
137 u32 off;
138
139 /* Our ID is an index into the kvm_regs struct. */
140 off = core_reg_offset_from_id(reg->id);
141 if (off >= nr_regs ||
142 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
143 return -ENOENT;
144
145 if (validate_core_offset(vcpu, reg))
146 return -EINVAL;
147
148 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
149 return -EFAULT;
150
151 return 0;
152}
153
154static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
155{
156 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
157 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
158 int nr_regs = sizeof(*regs) / sizeof(__u32);
159 __uint128_t tmp;
160 void *valp = &tmp;
161 u64 off;
162 int err = 0;
163
164 /* Our ID is an index into the kvm_regs struct. */
165 off = core_reg_offset_from_id(reg->id);
166 if (off >= nr_regs ||
167 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
168 return -ENOENT;
169
170 if (validate_core_offset(vcpu, reg))
171 return -EINVAL;
172
173 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
174 return -EINVAL;
175
176 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
177 err = -EFAULT;
178 goto out;
179 }
180
181 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
182 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
183 switch (mode) {
184 case PSR_AA32_MODE_USR:
185 if (!system_supports_32bit_el0())
186 return -EINVAL;
187 break;
188 case PSR_AA32_MODE_FIQ:
189 case PSR_AA32_MODE_IRQ:
190 case PSR_AA32_MODE_SVC:
191 case PSR_AA32_MODE_ABT:
192 case PSR_AA32_MODE_UND:
193 if (!vcpu_el1_is_32bit(vcpu))
194 return -EINVAL;
195 break;
196 case PSR_MODE_EL0t:
197 case PSR_MODE_EL1t:
198 case PSR_MODE_EL1h:
199 if (vcpu_el1_is_32bit(vcpu))
200 return -EINVAL;
201 break;
202 default:
203 err = -EINVAL;
204 goto out;
205 }
206 }
207
208 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
209out:
210 return err;
211}
212
213#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
214#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
215#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
216
217static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
218{
219 unsigned int max_vq, vq;
220 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
221
222 if (!vcpu_has_sve(vcpu))
223 return -ENOENT;
224
225 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
226 return -EINVAL;
227
228 memset(vqs, 0, sizeof(vqs));
229
230 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
231 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
232 if (sve_vq_available(vq))
233 vqs[vq_word(vq)] |= vq_mask(vq);
234
235 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
236 return -EFAULT;
237
238 return 0;
239}
240
241static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
242{
243 unsigned int max_vq, vq;
244 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
245
246 if (!vcpu_has_sve(vcpu))
247 return -ENOENT;
248
249 if (kvm_arm_vcpu_sve_finalized(vcpu))
250 return -EPERM; /* too late! */
251
252 if (WARN_ON(vcpu->arch.sve_state))
253 return -EINVAL;
254
255 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
256 return -EFAULT;
257
258 max_vq = 0;
259 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
260 if (vq_present(vqs, vq))
261 max_vq = vq;
262
263 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
264 return -EINVAL;
265
266 /*
267 * Vector lengths supported by the host can't currently be
268 * hidden from the guest individually: instead we can only set a
269 * maxmium via ZCR_EL2.LEN. So, make sure the available vector
270 * lengths match the set requested exactly up to the requested
271 * maximum:
272 */
273 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
274 if (vq_present(vqs, vq) != sve_vq_available(vq))
275 return -EINVAL;
276
277 /* Can't run with no vector lengths at all: */
278 if (max_vq < SVE_VQ_MIN)
279 return -EINVAL;
280
281 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
282 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
283
284 return 0;
285}
286
287#define SVE_REG_SLICE_SHIFT 0
288#define SVE_REG_SLICE_BITS 5
289#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
290#define SVE_REG_ID_BITS 5
291
292#define SVE_REG_SLICE_MASK \
293 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
294 SVE_REG_SLICE_SHIFT)
295#define SVE_REG_ID_MASK \
296 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
297
298#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
299
300#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
301#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
302
303/*
304 * Number of register slices required to cover each whole SVE register.
305 * NOTE: Only the first slice every exists, for now.
306 * If you are tempted to modify this, you must also rework sve_reg_to_region()
307 * to match:
308 */
309#define vcpu_sve_slices(vcpu) 1
310
311/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
312struct sve_state_reg_region {
313 unsigned int koffset; /* offset into sve_state in kernel memory */
314 unsigned int klen; /* length in kernel memory */
315 unsigned int upad; /* extra trailing padding in user memory */
316};
317
318/*
319 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
320 * register copy
321 */
322static int sve_reg_to_region(struct sve_state_reg_region *region,
323 struct kvm_vcpu *vcpu,
324 const struct kvm_one_reg *reg)
325{
326 /* reg ID ranges for Z- registers */
327 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
328 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
329 SVE_NUM_SLICES - 1);
330
331 /* reg ID ranges for P- registers and FFR (which are contiguous) */
332 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
333 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
334
335 unsigned int vq;
336 unsigned int reg_num;
337
338 unsigned int reqoffset, reqlen; /* User-requested offset and length */
339 unsigned int maxlen; /* Maxmimum permitted length */
340
341 size_t sve_state_size;
342
343 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
344 SVE_NUM_SLICES - 1);
345
346 /* Verify that the P-regs and FFR really do have contiguous IDs: */
347 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
348
349 /* Verify that we match the UAPI header: */
350 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
351
352 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
353
354 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
355 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
356 return -ENOENT;
357
358 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
359
360 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
361 SVE_SIG_REGS_OFFSET;
362 reqlen = KVM_SVE_ZREG_SIZE;
363 maxlen = SVE_SIG_ZREG_SIZE(vq);
364 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
365 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
366 return -ENOENT;
367
368 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
369
370 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
371 SVE_SIG_REGS_OFFSET;
372 reqlen = KVM_SVE_PREG_SIZE;
373 maxlen = SVE_SIG_PREG_SIZE(vq);
374 } else {
375 return -EINVAL;
376 }
377
378 sve_state_size = vcpu_sve_state_size(vcpu);
379 if (WARN_ON(!sve_state_size))
380 return -EINVAL;
381
382 region->koffset = array_index_nospec(reqoffset, sve_state_size);
383 region->klen = min(maxlen, reqlen);
384 region->upad = reqlen - region->klen;
385
386 return 0;
387}
388
389static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
390{
391 int ret;
392 struct sve_state_reg_region region;
393 char __user *uptr = (char __user *)reg->addr;
394
395 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
396 if (reg->id == KVM_REG_ARM64_SVE_VLS)
397 return get_sve_vls(vcpu, reg);
398
399 /* Try to interpret reg ID as an architectural SVE register... */
400 ret = sve_reg_to_region(®ion, vcpu, reg);
401 if (ret)
402 return ret;
403
404 if (!kvm_arm_vcpu_sve_finalized(vcpu))
405 return -EPERM;
406
407 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
408 region.klen) ||
409 clear_user(uptr + region.klen, region.upad))
410 return -EFAULT;
411
412 return 0;
413}
414
415static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
416{
417 int ret;
418 struct sve_state_reg_region region;
419 const char __user *uptr = (const char __user *)reg->addr;
420
421 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
422 if (reg->id == KVM_REG_ARM64_SVE_VLS)
423 return set_sve_vls(vcpu, reg);
424
425 /* Try to interpret reg ID as an architectural SVE register... */
426 ret = sve_reg_to_region(®ion, vcpu, reg);
427 if (ret)
428 return ret;
429
430 if (!kvm_arm_vcpu_sve_finalized(vcpu))
431 return -EPERM;
432
433 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
434 region.klen))
435 return -EFAULT;
436
437 return 0;
438}
439
440int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
441{
442 return -EINVAL;
443}
444
445int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
446{
447 return -EINVAL;
448}
449
450static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
451 u64 __user *uindices)
452{
453 unsigned int i;
454 int n = 0;
455
456 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
457 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
458 int size = core_reg_size_from_offset(vcpu, i);
459
460 if (size < 0)
461 continue;
462
463 switch (size) {
464 case sizeof(__u32):
465 reg |= KVM_REG_SIZE_U32;
466 break;
467
468 case sizeof(__u64):
469 reg |= KVM_REG_SIZE_U64;
470 break;
471
472 case sizeof(__uint128_t):
473 reg |= KVM_REG_SIZE_U128;
474 break;
475
476 default:
477 WARN_ON(1);
478 continue;
479 }
480
481 if (uindices) {
482 if (put_user(reg, uindices))
483 return -EFAULT;
484 uindices++;
485 }
486
487 n++;
488 }
489
490 return n;
491}
492
493static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
494{
495 return copy_core_reg_indices(vcpu, NULL);
496}
497
498/**
499 * ARM64 versions of the TIMER registers, always available on arm64
500 */
501
502#define NUM_TIMER_REGS 3
503
504static bool is_timer_reg(u64 index)
505{
506 switch (index) {
507 case KVM_REG_ARM_TIMER_CTL:
508 case KVM_REG_ARM_TIMER_CNT:
509 case KVM_REG_ARM_TIMER_CVAL:
510 return true;
511 }
512 return false;
513}
514
515static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
516{
517 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
518 return -EFAULT;
519 uindices++;
520 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
521 return -EFAULT;
522 uindices++;
523 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
524 return -EFAULT;
525
526 return 0;
527}
528
529static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
530{
531 void __user *uaddr = (void __user *)(long)reg->addr;
532 u64 val;
533 int ret;
534
535 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
536 if (ret != 0)
537 return -EFAULT;
538
539 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
540}
541
542static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
543{
544 void __user *uaddr = (void __user *)(long)reg->addr;
545 u64 val;
546
547 val = kvm_arm_timer_get_reg(vcpu, reg->id);
548 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
549}
550
551static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
552{
553 const unsigned int slices = vcpu_sve_slices(vcpu);
554
555 if (!vcpu_has_sve(vcpu))
556 return 0;
557
558 /* Policed by KVM_GET_REG_LIST: */
559 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
560
561 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
562 + 1; /* KVM_REG_ARM64_SVE_VLS */
563}
564
565static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
566 u64 __user *uindices)
567{
568 const unsigned int slices = vcpu_sve_slices(vcpu);
569 u64 reg;
570 unsigned int i, n;
571 int num_regs = 0;
572
573 if (!vcpu_has_sve(vcpu))
574 return 0;
575
576 /* Policed by KVM_GET_REG_LIST: */
577 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
578
579 /*
580 * Enumerate this first, so that userspace can save/restore in
581 * the order reported by KVM_GET_REG_LIST:
582 */
583 reg = KVM_REG_ARM64_SVE_VLS;
584 if (put_user(reg, uindices++))
585 return -EFAULT;
586 ++num_regs;
587
588 for (i = 0; i < slices; i++) {
589 for (n = 0; n < SVE_NUM_ZREGS; n++) {
590 reg = KVM_REG_ARM64_SVE_ZREG(n, i);
591 if (put_user(reg, uindices++))
592 return -EFAULT;
593 num_regs++;
594 }
595
596 for (n = 0; n < SVE_NUM_PREGS; n++) {
597 reg = KVM_REG_ARM64_SVE_PREG(n, i);
598 if (put_user(reg, uindices++))
599 return -EFAULT;
600 num_regs++;
601 }
602
603 reg = KVM_REG_ARM64_SVE_FFR(i);
604 if (put_user(reg, uindices++))
605 return -EFAULT;
606 num_regs++;
607 }
608
609 return num_regs;
610}
611
612/**
613 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
614 *
615 * This is for all registers.
616 */
617unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
618{
619 unsigned long res = 0;
620
621 res += num_core_regs(vcpu);
622 res += num_sve_regs(vcpu);
623 res += kvm_arm_num_sys_reg_descs(vcpu);
624 res += kvm_arm_get_fw_num_regs(vcpu);
625 res += NUM_TIMER_REGS;
626
627 return res;
628}
629
630/**
631 * kvm_arm_copy_reg_indices - get indices of all registers.
632 *
633 * We do core registers right here, then we append system regs.
634 */
635int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
636{
637 int ret;
638
639 ret = copy_core_reg_indices(vcpu, uindices);
640 if (ret < 0)
641 return ret;
642 uindices += ret;
643
644 ret = copy_sve_reg_indices(vcpu, uindices);
645 if (ret < 0)
646 return ret;
647 uindices += ret;
648
649 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
650 if (ret < 0)
651 return ret;
652 uindices += kvm_arm_get_fw_num_regs(vcpu);
653
654 ret = copy_timer_indices(vcpu, uindices);
655 if (ret < 0)
656 return ret;
657 uindices += NUM_TIMER_REGS;
658
659 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
660}
661
662int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
663{
664 /* We currently use nothing arch-specific in upper 32 bits */
665 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
666 return -EINVAL;
667
668 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
669 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
670 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
671 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
672 }
673
674 if (is_timer_reg(reg->id))
675 return get_timer_reg(vcpu, reg);
676
677 return kvm_arm_sys_reg_get_reg(vcpu, reg);
678}
679
680int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
681{
682 /* We currently use nothing arch-specific in upper 32 bits */
683 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
684 return -EINVAL;
685
686 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
687 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
688 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
689 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
690 }
691
692 if (is_timer_reg(reg->id))
693 return set_timer_reg(vcpu, reg);
694
695 return kvm_arm_sys_reg_set_reg(vcpu, reg);
696}
697
698int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
699 struct kvm_sregs *sregs)
700{
701 return -EINVAL;
702}
703
704int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
705 struct kvm_sregs *sregs)
706{
707 return -EINVAL;
708}
709
710int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
711 struct kvm_vcpu_events *events)
712{
713 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
714 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
715
716 if (events->exception.serror_pending && events->exception.serror_has_esr)
717 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
718
719 /*
720 * We never return a pending ext_dabt here because we deliver it to
721 * the virtual CPU directly when setting the event and it's no longer
722 * 'pending' at this point.
723 */
724
725 return 0;
726}
727
728int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
729 struct kvm_vcpu_events *events)
730{
731 bool serror_pending = events->exception.serror_pending;
732 bool has_esr = events->exception.serror_has_esr;
733 bool ext_dabt_pending = events->exception.ext_dabt_pending;
734
735 if (serror_pending && has_esr) {
736 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
737 return -EINVAL;
738
739 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
740 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
741 else
742 return -EINVAL;
743 } else if (serror_pending) {
744 kvm_inject_vabt(vcpu);
745 }
746
747 if (ext_dabt_pending)
748 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
749
750 return 0;
751}
752
753int __attribute_const__ kvm_target_cpu(void)
754{
755 unsigned long implementor = read_cpuid_implementor();
756 unsigned long part_number = read_cpuid_part_number();
757
758 switch (implementor) {
759 case ARM_CPU_IMP_ARM:
760 switch (part_number) {
761 case ARM_CPU_PART_AEM_V8:
762 return KVM_ARM_TARGET_AEM_V8;
763 case ARM_CPU_PART_FOUNDATION:
764 return KVM_ARM_TARGET_FOUNDATION_V8;
765 case ARM_CPU_PART_CORTEX_A53:
766 return KVM_ARM_TARGET_CORTEX_A53;
767 case ARM_CPU_PART_CORTEX_A57:
768 return KVM_ARM_TARGET_CORTEX_A57;
769 }
770 break;
771 case ARM_CPU_IMP_APM:
772 switch (part_number) {
773 case APM_CPU_PART_POTENZA:
774 return KVM_ARM_TARGET_XGENE_POTENZA;
775 }
776 break;
777 }
778
779 /* Return a default generic target */
780 return KVM_ARM_TARGET_GENERIC_V8;
781}
782
783int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
784{
785 int target = kvm_target_cpu();
786
787 if (target < 0)
788 return -ENODEV;
789
790 memset(init, 0, sizeof(*init));
791
792 /*
793 * For now, we don't return any features.
794 * In future, we might use features to return target
795 * specific features available for the preferred
796 * target type.
797 */
798 init->target = (__u32)target;
799
800 return 0;
801}
802
803int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
804{
805 return -EINVAL;
806}
807
808int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
809{
810 return -EINVAL;
811}
812
813int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
814 struct kvm_translation *tr)
815{
816 return -EINVAL;
817}
818
819#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
820 KVM_GUESTDBG_USE_SW_BP | \
821 KVM_GUESTDBG_USE_HW | \
822 KVM_GUESTDBG_SINGLESTEP)
823
824/**
825 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
826 * @kvm: pointer to the KVM struct
827 * @kvm_guest_debug: the ioctl data buffer
828 *
829 * This sets up and enables the VM for guest debugging. Userspace
830 * passes in a control flag to enable different debug types and
831 * potentially other architecture specific information in the rest of
832 * the structure.
833 */
834int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
835 struct kvm_guest_debug *dbg)
836{
837 int ret = 0;
838
839 trace_kvm_set_guest_debug(vcpu, dbg->control);
840
841 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
842 ret = -EINVAL;
843 goto out;
844 }
845
846 if (dbg->control & KVM_GUESTDBG_ENABLE) {
847 vcpu->guest_debug = dbg->control;
848
849 /* Hardware assisted Break and Watch points */
850 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
851 vcpu->arch.external_debug_state = dbg->arch;
852 }
853
854 } else {
855 /* If not enabled clear all flags */
856 vcpu->guest_debug = 0;
857 }
858
859out:
860 return ret;
861}
862
863int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
864 struct kvm_device_attr *attr)
865{
866 int ret;
867
868 switch (attr->group) {
869 case KVM_ARM_VCPU_PMU_V3_CTRL:
870 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
871 break;
872 case KVM_ARM_VCPU_TIMER_CTRL:
873 ret = kvm_arm_timer_set_attr(vcpu, attr);
874 break;
875 case KVM_ARM_VCPU_PVTIME_CTRL:
876 ret = kvm_arm_pvtime_set_attr(vcpu, attr);
877 break;
878 default:
879 ret = -ENXIO;
880 break;
881 }
882
883 return ret;
884}
885
886int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
887 struct kvm_device_attr *attr)
888{
889 int ret;
890
891 switch (attr->group) {
892 case KVM_ARM_VCPU_PMU_V3_CTRL:
893 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
894 break;
895 case KVM_ARM_VCPU_TIMER_CTRL:
896 ret = kvm_arm_timer_get_attr(vcpu, attr);
897 break;
898 case KVM_ARM_VCPU_PVTIME_CTRL:
899 ret = kvm_arm_pvtime_get_attr(vcpu, attr);
900 break;
901 default:
902 ret = -ENXIO;
903 break;
904 }
905
906 return ret;
907}
908
909int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
910 struct kvm_device_attr *attr)
911{
912 int ret;
913
914 switch (attr->group) {
915 case KVM_ARM_VCPU_PMU_V3_CTRL:
916 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
917 break;
918 case KVM_ARM_VCPU_TIMER_CTRL:
919 ret = kvm_arm_timer_has_attr(vcpu, attr);
920 break;
921 case KVM_ARM_VCPU_PVTIME_CTRL:
922 ret = kvm_arm_pvtime_has_attr(vcpu, attr);
923 break;
924 default:
925 ret = -ENXIO;
926 break;
927 }
928
929 return ret;
930}