Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * set_id_regs - Test for setting ID register from usersapce.
4 *
5 * Copyright (c) 2023 Google LLC.
6 *
7 *
8 * Test that KVM supports setting ID registers from userspace and handles the
9 * feature set correctly.
10 */
11
12#include <stdint.h>
13#include "kvm_util.h"
14#include "processor.h"
15#include "test_util.h"
16#include <linux/bitfield.h>
17
18enum ftr_type {
19 FTR_EXACT, /* Use a predefined safe value */
20 FTR_LOWER_SAFE, /* Smaller value is safe */
21 FTR_HIGHER_SAFE, /* Bigger value is safe */
22 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
23 FTR_END, /* Mark the last ftr bits */
24};
25
26#define FTR_SIGNED true /* Value should be treated as signed */
27#define FTR_UNSIGNED false /* Value should be treated as unsigned */
28
29struct reg_ftr_bits {
30 char *name;
31 bool sign;
32 enum ftr_type type;
33 uint8_t shift;
34 uint64_t mask;
35 /*
36 * For FTR_EXACT, safe_val is used as the exact safe value.
37 * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
38 */
39 int64_t safe_val;
40};
41
42struct test_feature_reg {
43 uint32_t reg;
44 const struct reg_ftr_bits *ftr_bits;
45};
46
47#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \
48 { \
49 .name = #NAME, \
50 .sign = SIGNED, \
51 .type = TYPE, \
52 .shift = SHIFT, \
53 .mask = MASK, \
54 .safe_val = SAFE_VAL, \
55 }
56
57#define REG_FTR_BITS(type, reg, field, safe_val) \
58 __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
59 reg##_##field##_MASK, safe_val)
60
61#define S_REG_FTR_BITS(type, reg, field, safe_val) \
62 __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
63 reg##_##field##_MASK, safe_val)
64
65#define REG_FTR_END \
66 { \
67 .type = FTR_END, \
68 }
69
70static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
71 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
72 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),
73 REG_FTR_END,
74};
75
76static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
77 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3),
78 REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8),
79 REG_FTR_END,
80};
81
82static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
83 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
84 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
85 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
86 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
87 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
88 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
89 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
90 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
91 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
92 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
93 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
94 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
95 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
96 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
97 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
98 REG_FTR_END,
99};
100
101static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
102 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
103 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
104 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
105 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
106 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
107 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
108 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
109 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
110 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
111 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
112 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
113 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
114 REG_FTR_END,
115};
116
117static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
118 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
119 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
120 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
121 REG_FTR_END,
122};
123
124static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
125 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
126 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
127 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
128 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
129 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
130 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
131 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
132 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
133 REG_FTR_END,
134};
135
136static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
137 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
138 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
139 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
140 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
141 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
142 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
143 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
144 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
145 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
146 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
147 REG_FTR_END,
148};
149
150static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
151 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
152 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
153 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
154 REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
155 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
156 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
157 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
158 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
159 REG_FTR_END,
160};
161
162static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
163 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
164 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
165 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
166 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
167 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
168 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
169 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
170 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
171 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
172 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
173 REG_FTR_END,
174};
175
176static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
177 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
178 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
179 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
180 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
181 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
182 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
183 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
184 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
185 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
186 REG_FTR_END,
187};
188
189#define TEST_REG(id, table) \
190 { \
191 .reg = id, \
192 .ftr_bits = &((table)[0]), \
193 }
194
195static struct test_feature_reg test_regs[] = {
196 TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
197 TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
198 TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
199 TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
200 TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
201 TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
202 TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
203 TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
204 TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
205 TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
206};
207
208#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
209
210static void guest_code(void)
211{
212 GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
213 GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
214 GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
215 GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
216 GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
217 GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
218 GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
219 GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
220 GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
221 GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
222
223 GUEST_DONE();
224}
225
226/* Return a safe value to a given ftr_bits an ftr value */
227uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
228{
229 uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
230
231 if (ftr_bits->sign == FTR_UNSIGNED) {
232 switch (ftr_bits->type) {
233 case FTR_EXACT:
234 ftr = ftr_bits->safe_val;
235 break;
236 case FTR_LOWER_SAFE:
237 if (ftr > ftr_bits->safe_val)
238 ftr--;
239 break;
240 case FTR_HIGHER_SAFE:
241 if (ftr < ftr_max)
242 ftr++;
243 break;
244 case FTR_HIGHER_OR_ZERO_SAFE:
245 if (ftr == ftr_max)
246 ftr = 0;
247 else if (ftr != 0)
248 ftr++;
249 break;
250 default:
251 break;
252 }
253 } else if (ftr != ftr_max) {
254 switch (ftr_bits->type) {
255 case FTR_EXACT:
256 ftr = ftr_bits->safe_val;
257 break;
258 case FTR_LOWER_SAFE:
259 if (ftr > ftr_bits->safe_val)
260 ftr--;
261 break;
262 case FTR_HIGHER_SAFE:
263 if (ftr < ftr_max - 1)
264 ftr++;
265 break;
266 case FTR_HIGHER_OR_ZERO_SAFE:
267 if (ftr != 0 && ftr != ftr_max - 1)
268 ftr++;
269 break;
270 default:
271 break;
272 }
273 }
274
275 return ftr;
276}
277
278/* Return an invalid value to a given ftr_bits an ftr value */
279uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
280{
281 uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
282
283 if (ftr_bits->sign == FTR_UNSIGNED) {
284 switch (ftr_bits->type) {
285 case FTR_EXACT:
286 ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
287 break;
288 case FTR_LOWER_SAFE:
289 ftr++;
290 break;
291 case FTR_HIGHER_SAFE:
292 ftr--;
293 break;
294 case FTR_HIGHER_OR_ZERO_SAFE:
295 if (ftr == 0)
296 ftr = ftr_max;
297 else
298 ftr--;
299 break;
300 default:
301 break;
302 }
303 } else if (ftr != ftr_max) {
304 switch (ftr_bits->type) {
305 case FTR_EXACT:
306 ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
307 break;
308 case FTR_LOWER_SAFE:
309 ftr++;
310 break;
311 case FTR_HIGHER_SAFE:
312 ftr--;
313 break;
314 case FTR_HIGHER_OR_ZERO_SAFE:
315 if (ftr == 0)
316 ftr = ftr_max - 1;
317 else
318 ftr--;
319 break;
320 default:
321 break;
322 }
323 } else {
324 ftr = 0;
325 }
326
327 return ftr;
328}
329
330static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
331 const struct reg_ftr_bits *ftr_bits)
332{
333 uint8_t shift = ftr_bits->shift;
334 uint64_t mask = ftr_bits->mask;
335 uint64_t val, new_val, ftr;
336
337 vcpu_get_reg(vcpu, reg, &val);
338 ftr = (val & mask) >> shift;
339
340 ftr = get_safe_value(ftr_bits, ftr);
341
342 ftr <<= shift;
343 val &= ~mask;
344 val |= ftr;
345
346 vcpu_set_reg(vcpu, reg, val);
347 vcpu_get_reg(vcpu, reg, &new_val);
348 TEST_ASSERT_EQ(new_val, val);
349
350 return new_val;
351}
352
353static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
354 const struct reg_ftr_bits *ftr_bits)
355{
356 uint8_t shift = ftr_bits->shift;
357 uint64_t mask = ftr_bits->mask;
358 uint64_t val, old_val, ftr;
359 int r;
360
361 vcpu_get_reg(vcpu, reg, &val);
362 ftr = (val & mask) >> shift;
363
364 ftr = get_invalid_value(ftr_bits, ftr);
365
366 old_val = val;
367 ftr <<= shift;
368 val &= ~mask;
369 val |= ftr;
370
371 r = __vcpu_set_reg(vcpu, reg, val);
372 TEST_ASSERT(r < 0 && errno == EINVAL,
373 "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
374
375 vcpu_get_reg(vcpu, reg, &val);
376 TEST_ASSERT_EQ(val, old_val);
377}
378
379static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
380
381#define encoding_to_range_idx(encoding) \
382 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \
383 sys_reg_CRn(encoding), sys_reg_CRm(encoding), \
384 sys_reg_Op2(encoding))
385
386
387static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
388{
389 uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
390 struct reg_mask_range range = {
391 .addr = (__u64)masks,
392 };
393 int ret;
394
395 /* KVM should return error when reserved field is not zero */
396 range.reserved[0] = 1;
397 ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
398 TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
399
400 /* Get writable masks for feature ID registers */
401 memset(range.reserved, 0, sizeof(range.reserved));
402 vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
403
404 for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
405 const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
406 uint32_t reg_id = test_regs[i].reg;
407 uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
408 int idx;
409
410 /* Get the index to masks array for the idreg */
411 idx = encoding_to_range_idx(reg_id);
412
413 for (int j = 0; ftr_bits[j].type != FTR_END; j++) {
414 /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
415 if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
416 ksft_test_result_skip("%s on AARCH64 only system\n",
417 ftr_bits[j].name);
418 continue;
419 }
420
421 /* Make sure the feature field is writable */
422 TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
423
424 test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
425
426 test_reg_vals[idx] = test_reg_set_success(vcpu, reg,
427 &ftr_bits[j]);
428
429 ksft_test_result_pass("%s\n", ftr_bits[j].name);
430 }
431 }
432}
433
434static void test_guest_reg_read(struct kvm_vcpu *vcpu)
435{
436 bool done = false;
437 struct ucall uc;
438
439 while (!done) {
440 vcpu_run(vcpu);
441
442 switch (get_ucall(vcpu, &uc)) {
443 case UCALL_ABORT:
444 REPORT_GUEST_ASSERT(uc);
445 break;
446 case UCALL_SYNC:
447 /* Make sure the written values are seen by guest */
448 TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
449 uc.args[3]);
450 break;
451 case UCALL_DONE:
452 done = true;
453 break;
454 default:
455 TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
456 }
457 }
458}
459
460/* Politely lifted from arch/arm64/include/asm/cache.h */
461/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
462#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
463#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
464#define CLIDR_CTYPE(clidr, level) \
465 (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
466
467static void test_clidr(struct kvm_vcpu *vcpu)
468{
469 uint64_t clidr;
470 int level;
471
472 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), &clidr);
473
474 /* find the first empty level in the cache hierarchy */
475 for (level = 1; level < 7; level++) {
476 if (!CLIDR_CTYPE(clidr, level))
477 break;
478 }
479
480 /*
481 * If you have a mind-boggling 7 levels of cache, congratulations, you
482 * get to fix this.
483 */
484 TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");
485
486 /* stick in a unified cache level */
487 clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);
488
489 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr);
490 test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
491}
492
493static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
494{
495 u64 val;
496
497 test_clidr(vcpu);
498
499 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
500 val++;
501 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
502
503 test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
504 ksft_test_result_pass("%s\n", __func__);
505}
506
507static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
508{
509 size_t idx = encoding_to_range_idx(encoding);
510 uint64_t observed;
511
512 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding), &observed);
513 TEST_ASSERT_EQ(test_reg_vals[idx], observed);
514}
515
516static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
517{
518 /*
519 * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an
520 * architectural reset of the vCPU.
521 */
522 aarch64_vcpu_setup(vcpu, NULL);
523
524 for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
525 test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
526
527 test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
528
529 ksft_test_result_pass("%s\n", __func__);
530}
531
532int main(void)
533{
534 struct kvm_vcpu *vcpu;
535 struct kvm_vm *vm;
536 bool aarch64_only;
537 uint64_t val, el0;
538 int test_cnt;
539
540 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
541
542 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
543
544 /* Check for AARCH64 only system */
545 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
546 el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
547 aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
548
549 ksft_print_header();
550
551 test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
552 ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
553 ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
554 ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
555 ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
556 ARRAY_SIZE(test_regs) + 2;
557
558 ksft_set_plan(test_cnt);
559
560 test_vm_ftr_id_regs(vcpu, aarch64_only);
561 test_vcpu_ftr_id_regs(vcpu);
562
563 test_guest_reg_read(vcpu);
564
565 test_reset_preserves_id_regs(vcpu);
566
567 kvm_vm_free(vm);
568
569 ksft_finished();
570}