Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Add aarch64 get-reg-list test

Check for KVM_GET_REG_LIST regressions. The blessed list was
created by running on v4.15 with the --core-reg-fixup option.
The following script was also used in order to annotate system
registers with their names when possible. When new system
registers are added the names can just be added manually using
the same grep.

while read reg; do
if [[ ! $reg =~ ARM64_SYS_REG ]]; then
printf "\t$reg\n"
continue
fi
encoding=$(echo "$reg" | sed "s/ARM64_SYS_REG(//;s/),//")
if ! name=$(grep "$encoding" ../../../../arch/arm64/include/asm/sysreg.h); then
printf "\t$reg\n"
continue
fi
name=$(echo "$name" | sed "s/.*SYS_//;s/[\t ]*sys_reg($encoding)$//")
printf "\t$reg\t/* $name */\n"
done < <(aarch64/get-reg-list --core-reg-fixup --list)

Signed-off-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20201029201703.102716-3-drjones@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Andrew Jones and committed by
Paolo Bonzini
fd02029a ac4a4d6d

+703
+1
tools/testing/selftests/kvm/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 + /aarch64/get-reg-list 2 3 /s390x/memop 3 4 /s390x/resets 4 5 /s390x/sync_regs_test
+1
tools/testing/selftests/kvm/Makefile
··· 66 66 TEST_GEN_PROGS_x86_64 += set_memory_region_test 67 67 TEST_GEN_PROGS_x86_64 += steal_time 68 68 69 + TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list 69 70 TEST_GEN_PROGS_aarch64 += clear_dirty_log_test 70 71 TEST_GEN_PROGS_aarch64 += demand_paging_test 71 72 TEST_GEN_PROGS_aarch64 += dirty_log_test
+671
tools/testing/selftests/kvm/aarch64/get-reg-list.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Check for KVM_GET_REG_LIST regressions. 4 + * 5 + * Copyright (C) 2020, Red Hat, Inc. 6 + * 7 + * When attempting to migrate from a host with an older kernel to a host 8 + * with a newer kernel we allow the newer kernel on the destination to 9 + * list new registers with get-reg-list. We assume they'll be unused, at 10 + * least until the guest reboots, and so they're relatively harmless. 11 + * However, if the destination host with the newer kernel is missing 12 + * registers which the source host with the older kernel has, then that's 13 + * a regression in get-reg-list. This test checks for that regression by 14 + * checking the current list against a blessed list. We should never have 15 + * missing registers, but if new ones appear then they can probably be 16 + * added to the blessed list. A completely new blessed list can be created 17 + * by running the test with the --list command line argument. 18 + * 19 + * Note, the blessed list should be created from the oldest possible 20 + * kernel. We can't go older than v4.15, though, because that's the first 21 + * release to expose the ID system registers in KVM_GET_REG_LIST, see 22 + * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features 23 + * from guests"). Also, one must use the --core-reg-fixup command line 24 + * option when running on an older kernel that doesn't include df205b5c6328 25 + * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST") 26 + */ 27 + #include <stdio.h> 28 + #include <stdlib.h> 29 + #include <string.h> 30 + #include "kvm_util.h" 31 + #include "test_util.h" 32 + 33 + #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK) 34 + 35 + #define for_each_reg(i) \ 36 + for ((i) = 0; (i) < reg_list->n; ++(i)) 37 + 38 + #define for_each_missing_reg(i) \ 39 + for ((i) = 0; (i) < blessed_n; ++(i)) \ 40 + if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i])) 41 + 42 + #define for_each_new_reg(i) \ 43 + for ((i) = 0; (i) < reg_list->n; ++(i)) \ 44 + if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i])) 45 + 46 + 47 + static struct kvm_reg_list *reg_list; 48 + 49 + static __u64 blessed_reg[]; 50 + static __u64 blessed_n; 51 + 52 + static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg) 53 + { 54 + int i; 55 + 56 + for (i = 0; i < nr_regs; ++i) 57 + if (reg == regs[i]) 58 + return true; 59 + return false; 60 + } 61 + 62 + static const char *str_with_index(const char *template, __u64 index) 63 + { 64 + char *str, *p; 65 + int n; 66 + 67 + str = strdup(template); 68 + p = strstr(str, "##"); 69 + n = sprintf(p, "%lld", index); 70 + strcat(p + n, strstr(template, "##") + 2); 71 + 72 + return (const char *)str; 73 + } 74 + 75 + #define CORE_REGS_XX_NR_WORDS 2 76 + #define CORE_SPSR_XX_NR_WORDS 2 77 + #define CORE_FPREGS_XX_NR_WORDS 4 78 + 79 + static const char *core_id_to_str(__u64 id) 80 + { 81 + __u64 core_off = id & ~REG_MASK, idx; 82 + 83 + /* 84 + * core_off is the offset into struct kvm_regs 85 + */ 86 + switch (core_off) { 87 + case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... 88 + KVM_REG_ARM_CORE_REG(regs.regs[30]): 89 + idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS; 90 + TEST_ASSERT(idx < 31, "Unexpected regs.regs index: %lld", idx); 91 + return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx); 92 + case KVM_REG_ARM_CORE_REG(regs.sp): 93 + return "KVM_REG_ARM_CORE_REG(regs.sp)"; 94 + case KVM_REG_ARM_CORE_REG(regs.pc): 95 + return "KVM_REG_ARM_CORE_REG(regs.pc)"; 96 + case KVM_REG_ARM_CORE_REG(regs.pstate): 97 + return "KVM_REG_ARM_CORE_REG(regs.pstate)"; 98 + case KVM_REG_ARM_CORE_REG(sp_el1): 99 + return "KVM_REG_ARM_CORE_REG(sp_el1)"; 100 + case KVM_REG_ARM_CORE_REG(elr_el1): 101 + return "KVM_REG_ARM_CORE_REG(elr_el1)"; 102 + case KVM_REG_ARM_CORE_REG(spsr[0]) ... 103 + KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): 104 + idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS; 105 + TEST_ASSERT(idx < KVM_NR_SPSR, "Unexpected spsr index: %lld", idx); 106 + return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx); 107 + case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 108 + KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 109 + idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS; 110 + TEST_ASSERT(idx < 32, "Unexpected fp_regs.vregs index: %lld", idx); 111 + return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx); 112 + case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 113 + return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)"; 114 + case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 115 + return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)"; 116 + } 117 + 118 + TEST_FAIL("Unknown core reg id: 0x%llx", id); 119 + return NULL; 120 + } 121 + 122 + static void print_reg(__u64 id) 123 + { 124 + unsigned op0, op1, crn, crm, op2; 125 + const char *reg_size = NULL; 126 + 127 + TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64, 128 + "KVM_REG_ARM64 missing in reg id: 0x%llx", id); 129 + 130 + switch (id & KVM_REG_SIZE_MASK) { 131 + case KVM_REG_SIZE_U8: 132 + reg_size = "KVM_REG_SIZE_U8"; 133 + break; 134 + case KVM_REG_SIZE_U16: 135 + reg_size = "KVM_REG_SIZE_U16"; 136 + break; 137 + case KVM_REG_SIZE_U32: 138 + reg_size = "KVM_REG_SIZE_U32"; 139 + break; 140 + case KVM_REG_SIZE_U64: 141 + reg_size = "KVM_REG_SIZE_U64"; 142 + break; 143 + case KVM_REG_SIZE_U128: 144 + reg_size = "KVM_REG_SIZE_U128"; 145 + break; 146 + case KVM_REG_SIZE_U256: 147 + reg_size = "KVM_REG_SIZE_U256"; 148 + break; 149 + case KVM_REG_SIZE_U512: 150 + reg_size = "KVM_REG_SIZE_U512"; 151 + break; 152 + case KVM_REG_SIZE_U1024: 153 + reg_size = "KVM_REG_SIZE_U1024"; 154 + break; 155 + case KVM_REG_SIZE_U2048: 156 + reg_size = "KVM_REG_SIZE_U2048"; 157 + break; 158 + default: 159 + TEST_FAIL("Unexpected reg size: 0x%llx in reg id: 0x%llx", 160 + (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id); 161 + } 162 + 163 + switch (id & KVM_REG_ARM_COPROC_MASK) { 164 + case KVM_REG_ARM_CORE: 165 + printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(id)); 166 + break; 167 + case KVM_REG_ARM_DEMUX: 168 + TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)), 169 + "Unexpected bits set in DEMUX reg id: 0x%llx", id); 170 + printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n", 171 + reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK); 172 + break; 173 + case KVM_REG_ARM64_SYSREG: 174 + op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT; 175 + op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT; 176 + crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT; 177 + crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT; 178 + op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT; 179 + TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2), 180 + "Unexpected bits set in SYSREG reg id: 0x%llx", id); 181 + printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2); 182 + break; 183 + case KVM_REG_ARM_FW: 184 + TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff), 185 + "Unexpected bits set in FW reg id: 0x%llx", id); 186 + printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff); 187 + break; 188 + case KVM_REG_ARM64_SVE: 189 + TEST_FAIL("KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", id); 190 + break; 191 + default: 192 + TEST_FAIL("Unexpected coproc type: 0x%llx in reg id: 0x%llx", 193 + (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id); 194 + } 195 + } 196 + 197 + /* 198 + * Older kernels listed each 32-bit word of CORE registers separately. 199 + * For 64 and 128-bit registers we need to ignore the extra words. We 200 + * also need to fixup the sizes, because the older kernels stated all 201 + * registers were 64-bit, even when they weren't. 202 + */ 203 + static void core_reg_fixup(void) 204 + { 205 + struct kvm_reg_list *tmp; 206 + __u64 id, core_off; 207 + int i; 208 + 209 + tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64)); 210 + 211 + for (i = 0; i < reg_list->n; ++i) { 212 + id = reg_list->reg[i]; 213 + 214 + if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) { 215 + tmp->reg[tmp->n++] = id; 216 + continue; 217 + } 218 + 219 + core_off = id & ~REG_MASK; 220 + 221 + switch (core_off) { 222 + case 0x52: case 0xd2: case 0xd6: 223 + /* 224 + * These offsets are pointing at padding. 225 + * We need to ignore them too. 226 + */ 227 + continue; 228 + case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 229 + KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 230 + if (core_off & 3) 231 + continue; 232 + id &= ~KVM_REG_SIZE_MASK; 233 + id |= KVM_REG_SIZE_U128; 234 + tmp->reg[tmp->n++] = id; 235 + continue; 236 + case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 237 + case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 238 + id &= ~KVM_REG_SIZE_MASK; 239 + id |= KVM_REG_SIZE_U32; 240 + tmp->reg[tmp->n++] = id; 241 + continue; 242 + default: 243 + if (core_off & 1) 244 + continue; 245 + tmp->reg[tmp->n++] = id; 246 + break; 247 + } 248 + } 249 + 250 + free(reg_list); 251 + reg_list = tmp; 252 + } 253 + 254 + int main(int ac, char **av) 255 + { 256 + int new_regs = 0, missing_regs = 0, i; 257 + int failed_get = 0, failed_set = 0; 258 + bool print_list = false, fixup_core_regs = false; 259 + struct kvm_vm *vm; 260 + 261 + for (i = 1; i < ac; ++i) { 262 + if (strcmp(av[i], "--core-reg-fixup") == 0) 263 + fixup_core_regs = true; 264 + else if (strcmp(av[i], "--list") == 0) 265 + print_list = true; 266 + else 267 + fprintf(stderr, "Ignoring unknown option: %s\n", av[i]); 268 + } 269 + 270 + vm = vm_create_default(0, 0, NULL); 271 + reg_list = vcpu_get_reg_list(vm, 0); 272 + 273 + if (fixup_core_regs) 274 + core_reg_fixup(); 275 + 276 + if (print_list) { 277 + putchar('\n'); 278 + for_each_reg(i) 279 + print_reg(reg_list->reg[i]); 280 + putchar('\n'); 281 + return 0; 282 + } 283 + 284 + /* 285 + * We only test that we can get the register and then write back the 286 + * same value. Some registers may allow other values to be written 287 + * back, but others only allow some bits to be changed, and at least 288 + * for ID registers set will fail if the value does not exactly match 289 + * what was returned by get. If registers that allow other values to 290 + * be written need to have the other values tested, then we should 291 + * create a new set of tests for those in a new independent test 292 + * executable. 293 + */ 294 + for_each_reg(i) { 295 + uint8_t addr[2048 / 8]; 296 + struct kvm_one_reg reg = { 297 + .id = reg_list->reg[i], 298 + .addr = (__u64)&addr, 299 + }; 300 + int ret; 301 + 302 + ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg); 303 + if (ret) { 304 + puts("Failed to get "); 305 + print_reg(reg.id); 306 + putchar('\n'); 307 + ++failed_get; 308 + } 309 + 310 + ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg); 311 + if (ret) { 312 + puts("Failed to set "); 313 + print_reg(reg.id); 314 + putchar('\n'); 315 + ++failed_set; 316 + } 317 + } 318 + 319 + for_each_new_reg(i) 320 + ++new_regs; 321 + 322 + for_each_missing_reg(i) 323 + ++missing_regs; 324 + 325 + if (new_regs || missing_regs) { 326 + printf("Number blessed registers: %5lld\n", blessed_n); 327 + printf("Number registers: %5lld\n", reg_list->n); 328 + } 329 + 330 + if (new_regs) { 331 + printf("\nThere are %d new registers.\n" 332 + "Consider adding them to the blessed reg " 333 + "list with the following lines:\n\n", new_regs); 334 + for_each_new_reg(i) 335 + print_reg(reg_list->reg[i]); 336 + putchar('\n'); 337 + } 338 + 339 + if (missing_regs) { 340 + printf("\nThere are %d missing registers.\n" 341 + "The following lines are missing registers:\n\n", missing_regs); 342 + for_each_missing_reg(i) 343 + print_reg(blessed_reg[i]); 344 + putchar('\n'); 345 + } 346 + 347 + TEST_ASSERT(!missing_regs && !failed_get && !failed_set, 348 + "There are %d missing registers; %d registers failed get; %d registers failed set", 349 + missing_regs, failed_get, failed_set); 350 + 351 + return 0; 352 + } 353 + 354 + /* 355 + * The current blessed list was primed with the output of kernel version 356 + * v4.15 with --core-reg-fixup and then later updated with new registers. 357 + */ 358 + static __u64 blessed_reg[] = { 359 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]), 360 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]), 361 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]), 362 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]), 363 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]), 364 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]), 365 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]), 366 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]), 367 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]), 368 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]), 369 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]), 370 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]), 371 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]), 372 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]), 373 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]), 374 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]), 375 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]), 376 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]), 377 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]), 378 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]), 379 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]), 380 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]), 381 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]), 382 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]), 383 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]), 384 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]), 385 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]), 386 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]), 387 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]), 388 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]), 389 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]), 390 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp), 391 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc), 392 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate), 393 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1), 394 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1), 395 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]), 396 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]), 397 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]), 398 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]), 399 + KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]), 400 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]), 401 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]), 402 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]), 403 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]), 404 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]), 405 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]), 406 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]), 407 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]), 408 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]), 409 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]), 410 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]), 411 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]), 412 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]), 413 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]), 414 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]), 415 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]), 416 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]), 417 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]), 418 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]), 419 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]), 420 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]), 421 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]), 422 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]), 423 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]), 424 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]), 425 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]), 426 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]), 427 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]), 428 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]), 429 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]), 430 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]), 431 + KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]), 432 + KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr), 433 + KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr), 434 + KVM_REG_ARM_FW_REG(0), 435 + KVM_REG_ARM_FW_REG(1), 436 + KVM_REG_ARM_FW_REG(2), 437 + ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */ 438 + ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */ 439 + ARM64_SYS_REG(3, 3, 14, 0, 2), 440 + ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */ 441 + ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */ 442 + ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */ 443 + ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */ 444 + ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */ 445 + ARM64_SYS_REG(2, 0, 0, 0, 4), 446 + ARM64_SYS_REG(2, 0, 0, 0, 5), 447 + ARM64_SYS_REG(2, 0, 0, 0, 6), 448 + ARM64_SYS_REG(2, 0, 0, 0, 7), 449 + ARM64_SYS_REG(2, 0, 0, 1, 4), 450 + ARM64_SYS_REG(2, 0, 0, 1, 5), 451 + ARM64_SYS_REG(2, 0, 0, 1, 6), 452 + ARM64_SYS_REG(2, 0, 0, 1, 7), 453 + ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */ 454 + ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */ 455 + ARM64_SYS_REG(2, 0, 0, 2, 4), 456 + ARM64_SYS_REG(2, 0, 0, 2, 5), 457 + ARM64_SYS_REG(2, 0, 0, 2, 6), 458 + ARM64_SYS_REG(2, 0, 0, 2, 7), 459 + ARM64_SYS_REG(2, 0, 0, 3, 4), 460 + ARM64_SYS_REG(2, 0, 0, 3, 5), 461 + ARM64_SYS_REG(2, 0, 0, 3, 6), 462 + ARM64_SYS_REG(2, 0, 0, 3, 7), 463 + ARM64_SYS_REG(2, 0, 0, 4, 4), 464 + ARM64_SYS_REG(2, 0, 0, 4, 5), 465 + ARM64_SYS_REG(2, 0, 0, 4, 6), 466 + ARM64_SYS_REG(2, 0, 0, 4, 7), 467 + ARM64_SYS_REG(2, 0, 0, 5, 4), 468 + ARM64_SYS_REG(2, 0, 0, 5, 5), 469 + ARM64_SYS_REG(2, 0, 0, 5, 6), 470 + ARM64_SYS_REG(2, 0, 0, 5, 7), 471 + ARM64_SYS_REG(2, 0, 0, 6, 4), 472 + ARM64_SYS_REG(2, 0, 0, 6, 5), 473 + ARM64_SYS_REG(2, 0, 0, 6, 6), 474 + ARM64_SYS_REG(2, 0, 0, 6, 7), 475 + ARM64_SYS_REG(2, 0, 0, 7, 4), 476 + ARM64_SYS_REG(2, 0, 0, 7, 5), 477 + ARM64_SYS_REG(2, 0, 0, 7, 6), 478 + ARM64_SYS_REG(2, 0, 0, 7, 7), 479 + ARM64_SYS_REG(2, 0, 0, 8, 4), 480 + ARM64_SYS_REG(2, 0, 0, 8, 5), 481 + ARM64_SYS_REG(2, 0, 0, 8, 6), 482 + ARM64_SYS_REG(2, 0, 0, 8, 7), 483 + ARM64_SYS_REG(2, 0, 0, 9, 4), 484 + ARM64_SYS_REG(2, 0, 0, 9, 5), 485 + ARM64_SYS_REG(2, 0, 0, 9, 6), 486 + ARM64_SYS_REG(2, 0, 0, 9, 7), 487 + ARM64_SYS_REG(2, 0, 0, 10, 4), 488 + ARM64_SYS_REG(2, 0, 0, 10, 5), 489 + ARM64_SYS_REG(2, 0, 0, 10, 6), 490 + ARM64_SYS_REG(2, 0, 0, 10, 7), 491 + ARM64_SYS_REG(2, 0, 0, 11, 4), 492 + ARM64_SYS_REG(2, 0, 0, 11, 5), 493 + ARM64_SYS_REG(2, 0, 0, 11, 6), 494 + ARM64_SYS_REG(2, 0, 0, 11, 7), 495 + ARM64_SYS_REG(2, 0, 0, 12, 4), 496 + ARM64_SYS_REG(2, 0, 0, 12, 5), 497 + ARM64_SYS_REG(2, 0, 0, 12, 6), 498 + ARM64_SYS_REG(2, 0, 0, 12, 7), 499 + ARM64_SYS_REG(2, 0, 0, 13, 4), 500 + ARM64_SYS_REG(2, 0, 0, 13, 5), 501 + ARM64_SYS_REG(2, 0, 0, 13, 6), 502 + ARM64_SYS_REG(2, 0, 0, 13, 7), 503 + ARM64_SYS_REG(2, 0, 0, 14, 4), 504 + ARM64_SYS_REG(2, 0, 0, 14, 5), 505 + ARM64_SYS_REG(2, 0, 0, 14, 6), 506 + ARM64_SYS_REG(2, 0, 0, 14, 7), 507 + ARM64_SYS_REG(2, 0, 0, 15, 4), 508 + ARM64_SYS_REG(2, 0, 0, 15, 5), 509 + ARM64_SYS_REG(2, 0, 0, 15, 6), 510 + ARM64_SYS_REG(2, 0, 0, 15, 7), 511 + ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */ 512 + ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */ 513 + ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */ 514 + ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */ 515 + ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */ 516 + ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */ 517 + ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */ 518 + ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */ 519 + ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */ 520 + ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */ 521 + ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */ 522 + ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */ 523 + ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */ 524 + ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */ 525 + ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */ 526 + ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */ 527 + ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */ 528 + ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */ 529 + ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */ 530 + ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */ 531 + ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */ 532 + ARM64_SYS_REG(3, 0, 0, 3, 3), 533 + ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */ 534 + ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */ 535 + ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */ 536 + ARM64_SYS_REG(3, 0, 0, 3, 7), 537 + ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */ 538 + ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */ 539 + ARM64_SYS_REG(3, 0, 0, 4, 2), 540 + ARM64_SYS_REG(3, 0, 0, 4, 3), 541 + ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */ 542 + ARM64_SYS_REG(3, 0, 0, 4, 5), 543 + ARM64_SYS_REG(3, 0, 0, 4, 6), 544 + ARM64_SYS_REG(3, 0, 0, 4, 7), 545 + ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */ 546 + ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */ 547 + ARM64_SYS_REG(3, 0, 0, 5, 2), 548 + ARM64_SYS_REG(3, 0, 0, 5, 3), 549 + ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */ 550 + ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */ 551 + ARM64_SYS_REG(3, 0, 0, 5, 6), 552 + ARM64_SYS_REG(3, 0, 0, 5, 7), 553 + ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */ 554 + ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */ 555 + ARM64_SYS_REG(3, 0, 0, 6, 2), 556 + ARM64_SYS_REG(3, 0, 0, 6, 3), 557 + ARM64_SYS_REG(3, 0, 0, 6, 4), 558 + ARM64_SYS_REG(3, 0, 0, 6, 5), 559 + ARM64_SYS_REG(3, 0, 0, 6, 6), 560 + ARM64_SYS_REG(3, 0, 0, 6, 7), 561 + ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */ 562 + ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */ 563 + ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */ 564 + ARM64_SYS_REG(3, 0, 0, 7, 3), 565 + ARM64_SYS_REG(3, 0, 0, 7, 4), 566 + ARM64_SYS_REG(3, 0, 0, 7, 5), 567 + ARM64_SYS_REG(3, 0, 0, 7, 6), 568 + ARM64_SYS_REG(3, 0, 0, 7, 7), 569 + ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */ 570 + ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */ 571 + ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */ 572 + ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */ 573 + ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */ 574 + ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */ 575 + ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */ 576 + ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */ 577 + ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */ 578 + ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */ 579 + ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */ 580 + ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */ 581 + ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */ 582 + ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */ 583 + ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */ 584 + ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */ 585 + ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */ 586 + ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */ 587 + ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */ 588 + ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */ 589 + ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */ 590 + ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */ 591 + ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */ 592 + ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */ 593 + ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */ 594 + ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */ 595 + ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */ 596 + ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */ 597 + ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */ 598 + ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */ 599 + ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */ 600 + ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */ 601 + ARM64_SYS_REG(3, 3, 14, 8, 0), 602 + ARM64_SYS_REG(3, 3, 14, 8, 1), 603 + ARM64_SYS_REG(3, 3, 14, 8, 2), 604 + ARM64_SYS_REG(3, 3, 14, 8, 3), 605 + ARM64_SYS_REG(3, 3, 14, 8, 4), 606 + ARM64_SYS_REG(3, 3, 14, 8, 5), 607 + ARM64_SYS_REG(3, 3, 14, 8, 6), 608 + ARM64_SYS_REG(3, 3, 14, 8, 7), 609 + ARM64_SYS_REG(3, 3, 14, 9, 0), 610 + ARM64_SYS_REG(3, 3, 14, 9, 1), 611 + ARM64_SYS_REG(3, 3, 14, 9, 2), 612 + ARM64_SYS_REG(3, 3, 14, 9, 3), 613 + ARM64_SYS_REG(3, 3, 14, 9, 4), 614 + ARM64_SYS_REG(3, 3, 14, 9, 5), 615 + ARM64_SYS_REG(3, 3, 14, 9, 6), 616 + ARM64_SYS_REG(3, 3, 14, 9, 7), 617 + ARM64_SYS_REG(3, 3, 14, 10, 0), 618 + ARM64_SYS_REG(3, 3, 14, 10, 1), 619 + ARM64_SYS_REG(3, 3, 14, 10, 2), 620 + ARM64_SYS_REG(3, 3, 14, 10, 3), 621 + ARM64_SYS_REG(3, 3, 14, 10, 4), 622 + ARM64_SYS_REG(3, 3, 14, 10, 5), 623 + ARM64_SYS_REG(3, 3, 14, 10, 6), 624 + ARM64_SYS_REG(3, 3, 14, 10, 7), 625 + ARM64_SYS_REG(3, 3, 14, 11, 0), 626 + ARM64_SYS_REG(3, 3, 14, 11, 1), 627 + ARM64_SYS_REG(3, 3, 14, 11, 2), 628 + ARM64_SYS_REG(3, 3, 14, 11, 3), 629 + ARM64_SYS_REG(3, 3, 14, 11, 4), 630 + ARM64_SYS_REG(3, 3, 14, 11, 5), 631 + ARM64_SYS_REG(3, 3, 14, 11, 6), 632 + ARM64_SYS_REG(3, 3, 14, 12, 0), 633 + ARM64_SYS_REG(3, 3, 14, 12, 1), 634 + ARM64_SYS_REG(3, 3, 14, 12, 2), 635 + ARM64_SYS_REG(3, 3, 14, 12, 3), 636 + ARM64_SYS_REG(3, 3, 14, 12, 4), 637 + ARM64_SYS_REG(3, 3, 14, 12, 5), 638 + ARM64_SYS_REG(3, 3, 14, 12, 6), 639 + ARM64_SYS_REG(3, 3, 14, 12, 7), 640 + ARM64_SYS_REG(3, 3, 14, 13, 0), 641 + ARM64_SYS_REG(3, 3, 14, 13, 1), 642 + ARM64_SYS_REG(3, 3, 14, 13, 2), 643 + ARM64_SYS_REG(3, 3, 14, 13, 3), 644 + ARM64_SYS_REG(3, 3, 14, 13, 4), 645 + ARM64_SYS_REG(3, 3, 14, 13, 5), 646 + ARM64_SYS_REG(3, 3, 14, 13, 6), 647 + ARM64_SYS_REG(3, 3, 14, 13, 7), 648 + ARM64_SYS_REG(3, 3, 14, 14, 0), 649 + ARM64_SYS_REG(3, 3, 14, 14, 1), 650 + ARM64_SYS_REG(3, 3, 14, 14, 2), 651 + ARM64_SYS_REG(3, 3, 14, 14, 3), 652 + ARM64_SYS_REG(3, 3, 14, 14, 4), 653 + ARM64_SYS_REG(3, 3, 14, 14, 5), 654 + ARM64_SYS_REG(3, 3, 14, 14, 6), 655 + ARM64_SYS_REG(3, 3, 14, 14, 7), 656 + ARM64_SYS_REG(3, 3, 14, 15, 0), 657 + ARM64_SYS_REG(3, 3, 14, 15, 1), 658 + ARM64_SYS_REG(3, 3, 14, 15, 2), 659 + ARM64_SYS_REG(3, 3, 14, 15, 3), 660 + ARM64_SYS_REG(3, 3, 14, 15, 4), 661 + ARM64_SYS_REG(3, 3, 14, 15, 5), 662 + ARM64_SYS_REG(3, 3, 14, 15, 6), 663 + ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */ 664 + ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */ 665 + ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */ 666 + ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */ 667 + KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 0, 668 + KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 1, 669 + KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 2, 670 + }; 671 + static __u64 blessed_n = ARRAY_SIZE(blessed_reg);
+1
tools/testing/selftests/kvm/include/kvm_util.h
··· 152 152 struct kvm_guest_debug *debug); 153 153 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 154 154 struct kvm_mp_state *mp_state); 155 + struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid); 155 156 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); 156 157 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); 157 158
+29
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1292 1292 } 1293 1293 1294 1294 /* 1295 + * VM VCPU Get Reg List 1296 + * 1297 + * Input Args: 1298 + * vm - Virtual Machine 1299 + * vcpuid - VCPU ID 1300 + * 1301 + * Output Args: 1302 + * None 1303 + * 1304 + * Return: 1305 + * A pointer to an allocated struct kvm_reg_list 1306 + * 1307 + * Get the list of guest registers which are supported for 1308 + * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls 1309 + */ 1310 + struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) 1311 + { 1312 + struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1313 + int ret; 1314 + 1315 + ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n); 1316 + TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1317 + reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1318 + reg_list->n = reg_list_n.n; 1319 + vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); 1320 + return reg_list; 1321 + } 1322 + 1323 + /* 1295 1324 * VM VCPU Regs Get 1296 1325 * 1297 1326 * Input Args: