Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2018, Google LLC.
4 */
5
6#include "linux/bitmap.h"
7#include "test_util.h"
8#include "kvm_util.h"
9#include "pmu.h"
10#include "processor.h"
11#include "sev.h"
12
13#ifndef NUM_INTERRUPTS
14#define NUM_INTERRUPTS 256
15#endif
16
17#define KERNEL_CS 0x8
18#define KERNEL_DS 0x10
19#define KERNEL_TSS 0x18
20
21vm_vaddr_t exception_handlers;
22bool host_cpu_is_amd;
23bool host_cpu_is_intel;
24bool is_forced_emulation_enabled;
25uint64_t guest_tsc_khz;
26
27const char *ex_str(int vector)
28{
29 switch (vector) {
30#define VEC_STR(v) case v##_VECTOR: return "#" #v
31 case DE_VECTOR: return "no exception";
32 case KVM_MAGIC_DE_VECTOR: return "#DE";
33 VEC_STR(DB);
34 VEC_STR(NMI);
35 VEC_STR(BP);
36 VEC_STR(OF);
37 VEC_STR(BR);
38 VEC_STR(UD);
39 VEC_STR(NM);
40 VEC_STR(DF);
41 VEC_STR(TS);
42 VEC_STR(NP);
43 VEC_STR(SS);
44 VEC_STR(GP);
45 VEC_STR(PF);
46 VEC_STR(MF);
47 VEC_STR(AC);
48 VEC_STR(MC);
49 VEC_STR(XM);
50 VEC_STR(VE);
51 VEC_STR(CP);
52 VEC_STR(HV);
53 VEC_STR(VC);
54 VEC_STR(SX);
55 default: return "#??";
56#undef VEC_STR
57 }
58}
59
60static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
61{
62 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
63 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
64 indent, "",
65 regs->rax, regs->rbx, regs->rcx, regs->rdx);
66 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
67 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
68 indent, "",
69 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
70 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
71 "r10: 0x%.16llx r11: 0x%.16llx\n",
72 indent, "",
73 regs->r8, regs->r9, regs->r10, regs->r11);
74 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
75 "r14: 0x%.16llx r15: 0x%.16llx\n",
76 indent, "",
77 regs->r12, regs->r13, regs->r14, regs->r15);
78 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
79 indent, "",
80 regs->rip, regs->rflags);
81}
82
83static void segment_dump(FILE *stream, struct kvm_segment *segment,
84 uint8_t indent)
85{
86 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
87 "selector: 0x%.4x type: 0x%.2x\n",
88 indent, "", segment->base, segment->limit,
89 segment->selector, segment->type);
90 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
91 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
92 indent, "", segment->present, segment->dpl,
93 segment->db, segment->s, segment->l);
94 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
95 "unusable: 0x%.2x padding: 0x%.2x\n",
96 indent, "", segment->g, segment->avl,
97 segment->unusable, segment->padding);
98}
99
100static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
101 uint8_t indent)
102{
103 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
104 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
105 indent, "", dtable->base, dtable->limit,
106 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
107}
108
109static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
110{
111 unsigned int i;
112
113 fprintf(stream, "%*scs:\n", indent, "");
114 segment_dump(stream, &sregs->cs, indent + 2);
115 fprintf(stream, "%*sds:\n", indent, "");
116 segment_dump(stream, &sregs->ds, indent + 2);
117 fprintf(stream, "%*ses:\n", indent, "");
118 segment_dump(stream, &sregs->es, indent + 2);
119 fprintf(stream, "%*sfs:\n", indent, "");
120 segment_dump(stream, &sregs->fs, indent + 2);
121 fprintf(stream, "%*sgs:\n", indent, "");
122 segment_dump(stream, &sregs->gs, indent + 2);
123 fprintf(stream, "%*sss:\n", indent, "");
124 segment_dump(stream, &sregs->ss, indent + 2);
125 fprintf(stream, "%*str:\n", indent, "");
126 segment_dump(stream, &sregs->tr, indent + 2);
127 fprintf(stream, "%*sldt:\n", indent, "");
128 segment_dump(stream, &sregs->ldt, indent + 2);
129
130 fprintf(stream, "%*sgdt:\n", indent, "");
131 dtable_dump(stream, &sregs->gdt, indent + 2);
132 fprintf(stream, "%*sidt:\n", indent, "");
133 dtable_dump(stream, &sregs->idt, indent + 2);
134
135 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
136 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
137 indent, "",
138 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
139 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
140 "apic_base: 0x%.16llx\n",
141 indent, "",
142 sregs->cr8, sregs->efer, sregs->apic_base);
143
144 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
145 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
146 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
147 sregs->interrupt_bitmap[i]);
148 }
149}
150
151bool kvm_is_tdp_enabled(void)
152{
153 if (host_cpu_is_intel)
154 return get_kvm_intel_param_bool("ept");
155 else
156 return get_kvm_amd_param_bool("npt");
157}
158
159void virt_arch_pgd_alloc(struct kvm_vm *vm)
160{
161 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
162 "Unknown or unsupported guest mode: 0x%x", vm->mode);
163
164 /* If needed, create the top-level page table. */
165 if (!vm->pgd_created) {
166 vm->pgd = vm_alloc_page_table(vm);
167 vm->pgd_created = true;
168 }
169}
170
171static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte,
172 uint64_t vaddr, int level)
173{
174 uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
175 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
176 int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
177
178 TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd,
179 "Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
180 level + 1, vaddr);
181
182 return &page_table[index];
183}
184
185static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
186 uint64_t *parent_pte,
187 uint64_t vaddr,
188 uint64_t paddr,
189 int current_level,
190 int target_level)
191{
192 uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
193
194 paddr = vm_untag_gpa(vm, paddr);
195
196 if (!(*pte & PTE_PRESENT_MASK)) {
197 *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
198 if (current_level == target_level)
199 *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
200 else
201 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
202 } else {
203 /*
204 * Entry already present. Assert that the caller doesn't want
205 * a hugepage at this level, and that there isn't a hugepage at
206 * this level.
207 */
208 TEST_ASSERT(current_level != target_level,
209 "Cannot create hugepage at level: %u, vaddr: 0x%lx",
210 current_level, vaddr);
211 TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
212 "Cannot create page table at level: %u, vaddr: 0x%lx",
213 current_level, vaddr);
214 }
215 return pte;
216}
217
218void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
219{
220 const uint64_t pg_size = PG_LEVEL_SIZE(level);
221 uint64_t *pte = &vm->pgd;
222 int current_level;
223
224 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
225 "Unknown or unsupported guest mode: 0x%x", vm->mode);
226
227 TEST_ASSERT((vaddr % pg_size) == 0,
228 "Virtual address not aligned,\n"
229 "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
230 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
231 "Invalid virtual address, vaddr: 0x%lx", vaddr);
232 TEST_ASSERT((paddr % pg_size) == 0,
233 "Physical address not aligned,\n"
234 " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
235 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
236 "Physical address beyond maximum supported,\n"
237 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
238 paddr, vm->max_gfn, vm->page_size);
239 TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
240 "Unexpected bits in paddr: %lx", paddr);
241
242 /*
243 * Allocate upper level page tables, if not already present. Return
244 * early if a hugepage was created.
245 */
246 for (current_level = vm->pgtable_levels;
247 current_level > PG_LEVEL_4K;
248 current_level--) {
249 pte = virt_create_upper_pte(vm, pte, vaddr, paddr,
250 current_level, level);
251 if (*pte & PTE_LARGE_MASK)
252 return;
253 }
254
255 /* Fill in page table entry. */
256 pte = virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
257 TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
258 "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
259 *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
260
261 /*
262 * Neither SEV nor TDX supports shared page tables, so only the final
263 * leaf PTE needs manually set the C/S-bit.
264 */
265 if (vm_is_gpa_protected(vm, paddr))
266 *pte |= vm->arch.c_bit;
267 else
268 *pte |= vm->arch.s_bit;
269}
270
271void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
272{
273 __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
274}
275
276void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
277 uint64_t nr_bytes, int level)
278{
279 uint64_t pg_size = PG_LEVEL_SIZE(level);
280 uint64_t nr_pages = nr_bytes / pg_size;
281 int i;
282
283 TEST_ASSERT(nr_bytes % pg_size == 0,
284 "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
285 nr_bytes, pg_size);
286
287 for (i = 0; i < nr_pages; i++) {
288 __virt_pg_map(vm, vaddr, paddr, level);
289 sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
290 nr_bytes / PAGE_SIZE);
291
292 vaddr += pg_size;
293 paddr += pg_size;
294 }
295}
296
297static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
298{
299 if (*pte & PTE_LARGE_MASK) {
300 TEST_ASSERT(*level == PG_LEVEL_NONE ||
301 *level == current_level,
302 "Unexpected hugepage at level %d", current_level);
303 *level = current_level;
304 }
305
306 return *level == current_level;
307}
308
309uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
310 int *level)
311{
312 int va_width = 12 + (vm->pgtable_levels) * 9;
313 uint64_t *pte = &vm->pgd;
314 int current_level;
315
316 TEST_ASSERT(!vm->arch.is_pt_protected,
317 "Walking page tables of protected guests is impossible");
318
319 TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= vm->pgtable_levels,
320 "Invalid PG_LEVEL_* '%d'", *level);
321
322 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
323 "Unknown or unsupported guest mode: 0x%x", vm->mode);
324 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
325 (vaddr >> vm->page_shift)),
326 "Invalid virtual address, vaddr: 0x%lx",
327 vaddr);
328 /*
329 * Check that the vaddr is a sign-extended va_width value.
330 */
331 TEST_ASSERT(vaddr ==
332 (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
333 "Canonical check failed. The virtual address is invalid.");
334
335 for (current_level = vm->pgtable_levels;
336 current_level > PG_LEVEL_4K;
337 current_level--) {
338 pte = virt_get_pte(vm, pte, vaddr, current_level);
339 if (vm_is_target_pte(pte, level, current_level))
340 return pte;
341 }
342
343 return virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
344}
345
346uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
347{
348 int level = PG_LEVEL_4K;
349
350 return __vm_get_page_table_entry(vm, vaddr, &level);
351}
352
353void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
354{
355 uint64_t *pml4e, *pml4e_start;
356 uint64_t *pdpe, *pdpe_start;
357 uint64_t *pde, *pde_start;
358 uint64_t *pte, *pte_start;
359
360 if (!vm->pgd_created)
361 return;
362
363 fprintf(stream, "%*s "
364 " no\n", indent, "");
365 fprintf(stream, "%*s index hvaddr gpaddr "
366 "addr w exec dirty\n",
367 indent, "");
368 pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
369 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
370 pml4e = &pml4e_start[n1];
371 if (!(*pml4e & PTE_PRESENT_MASK))
372 continue;
373 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
374 " %u\n",
375 indent, "",
376 pml4e - pml4e_start, pml4e,
377 addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
378 !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
379
380 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
381 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
382 pdpe = &pdpe_start[n2];
383 if (!(*pdpe & PTE_PRESENT_MASK))
384 continue;
385 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
386 "%u %u\n",
387 indent, "",
388 pdpe - pdpe_start, pdpe,
389 addr_hva2gpa(vm, pdpe),
390 PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
391 !!(*pdpe & PTE_NX_MASK));
392
393 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
394 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
395 pde = &pde_start[n3];
396 if (!(*pde & PTE_PRESENT_MASK))
397 continue;
398 fprintf(stream, "%*spde 0x%-3zx %p "
399 "0x%-12lx 0x%-10llx %u %u\n",
400 indent, "", pde - pde_start, pde,
401 addr_hva2gpa(vm, pde),
402 PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
403 !!(*pde & PTE_NX_MASK));
404
405 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
406 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
407 pte = &pte_start[n4];
408 if (!(*pte & PTE_PRESENT_MASK))
409 continue;
410 fprintf(stream, "%*spte 0x%-3zx %p "
411 "0x%-12lx 0x%-10llx %u %u "
412 " %u 0x%-10lx\n",
413 indent, "",
414 pte - pte_start, pte,
415 addr_hva2gpa(vm, pte),
416 PTE_GET_PFN(*pte),
417 !!(*pte & PTE_WRITABLE_MASK),
418 !!(*pte & PTE_NX_MASK),
419 !!(*pte & PTE_DIRTY_MASK),
420 ((uint64_t) n1 << 27)
421 | ((uint64_t) n2 << 18)
422 | ((uint64_t) n3 << 9)
423 | ((uint64_t) n4));
424 }
425 }
426 }
427 }
428}
429
430/*
431 * Set Unusable Segment
432 *
433 * Input Args: None
434 *
435 * Output Args:
436 * segp - Pointer to segment register
437 *
438 * Return: None
439 *
440 * Sets the segment register pointed to by @segp to an unusable state.
441 */
442static void kvm_seg_set_unusable(struct kvm_segment *segp)
443{
444 memset(segp, 0, sizeof(*segp));
445 segp->unusable = true;
446}
447
448static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
449{
450 void *gdt = addr_gva2hva(vm, vm->arch.gdt);
451 struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
452
453 desc->limit0 = segp->limit & 0xFFFF;
454 desc->base0 = segp->base & 0xFFFF;
455 desc->base1 = segp->base >> 16;
456 desc->type = segp->type;
457 desc->s = segp->s;
458 desc->dpl = segp->dpl;
459 desc->p = segp->present;
460 desc->limit1 = segp->limit >> 16;
461 desc->avl = segp->avl;
462 desc->l = segp->l;
463 desc->db = segp->db;
464 desc->g = segp->g;
465 desc->base2 = segp->base >> 24;
466 if (!segp->s)
467 desc->base3 = segp->base >> 32;
468}
469
470static void kvm_seg_set_kernel_code_64bit(struct kvm_segment *segp)
471{
472 memset(segp, 0, sizeof(*segp));
473 segp->selector = KERNEL_CS;
474 segp->limit = 0xFFFFFFFFu;
475 segp->s = 0x1; /* kTypeCodeData */
476 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
477 * | kFlagCodeReadable
478 */
479 segp->g = true;
480 segp->l = true;
481 segp->present = 1;
482}
483
484static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
485{
486 memset(segp, 0, sizeof(*segp));
487 segp->selector = KERNEL_DS;
488 segp->limit = 0xFFFFFFFFu;
489 segp->s = 0x1; /* kTypeCodeData */
490 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
491 * | kFlagDataWritable
492 */
493 segp->g = true;
494 segp->present = true;
495}
496
497vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
498{
499 int level = PG_LEVEL_NONE;
500 uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
501
502 TEST_ASSERT(*pte & PTE_PRESENT_MASK,
503 "Leaf PTE not PRESENT for gva: 0x%08lx", gva);
504
505 /*
506 * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
507 * address bits to be zero.
508 */
509 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
510}
511
512static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp)
513{
514 memset(segp, 0, sizeof(*segp));
515 segp->base = base;
516 segp->limit = 0x67;
517 segp->selector = KERNEL_TSS;
518 segp->type = 0xb;
519 segp->present = 1;
520}
521
522static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
523{
524 struct kvm_sregs sregs;
525
526 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
527 "Unknown or unsupported guest mode: 0x%x", vm->mode);
528
529 /* Set mode specific system register values. */
530 vcpu_sregs_get(vcpu, &sregs);
531
532 sregs.idt.base = vm->arch.idt;
533 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
534 sregs.gdt.base = vm->arch.gdt;
535 sregs.gdt.limit = getpagesize() - 1;
536
537 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
538 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
539 if (kvm_cpu_has(X86_FEATURE_XSAVE))
540 sregs.cr4 |= X86_CR4_OSXSAVE;
541 if (vm->pgtable_levels == 5)
542 sregs.cr4 |= X86_CR4_LA57;
543 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
544
545 kvm_seg_set_unusable(&sregs.ldt);
546 kvm_seg_set_kernel_code_64bit(&sregs.cs);
547 kvm_seg_set_kernel_data_64bit(&sregs.ds);
548 kvm_seg_set_kernel_data_64bit(&sregs.es);
549 kvm_seg_set_kernel_data_64bit(&sregs.gs);
550 kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr);
551
552 sregs.cr3 = vm->pgd;
553 vcpu_sregs_set(vcpu, &sregs);
554}
555
556static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
557{
558 struct kvm_xcrs xcrs = {
559 .nr_xcrs = 1,
560 .xcrs[0].xcr = 0,
561 .xcrs[0].value = kvm_cpu_supported_xcr0(),
562 };
563
564 if (!kvm_cpu_has(X86_FEATURE_XSAVE))
565 return;
566
567 vcpu_xcrs_set(vcpu, &xcrs);
568}
569
570static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
571 int dpl, unsigned short selector)
572{
573 struct idt_entry *base =
574 (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt);
575 struct idt_entry *e = &base[vector];
576
577 memset(e, 0, sizeof(*e));
578 e->offset0 = addr;
579 e->selector = selector;
580 e->ist = 0;
581 e->type = 14;
582 e->dpl = dpl;
583 e->p = 1;
584 e->offset1 = addr >> 16;
585 e->offset2 = addr >> 32;
586}
587
588static bool kvm_fixup_exception(struct ex_regs *regs)
589{
590 if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
591 return false;
592
593 if (regs->vector == DE_VECTOR)
594 regs->vector = KVM_MAGIC_DE_VECTOR;
595
596 regs->rip = regs->r11;
597 regs->r9 = regs->vector;
598 regs->r10 = regs->error_code;
599 return true;
600}
601
602void route_exception(struct ex_regs *regs)
603{
604 typedef void(*handler)(struct ex_regs *);
605 handler *handlers = (handler *)exception_handlers;
606
607 if (handlers && handlers[regs->vector]) {
608 handlers[regs->vector](regs);
609 return;
610 }
611
612 if (kvm_fixup_exception(regs))
613 return;
614
615 GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'",
616 regs->vector, regs->rip);
617}
618
619static void vm_init_descriptor_tables(struct kvm_vm *vm)
620{
621 extern void *idt_handlers;
622 struct kvm_segment seg;
623 int i;
624
625 vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
626 vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
627 vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
628 vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
629
630 /* Handlers have the same address in both address spaces.*/
631 for (i = 0; i < NUM_INTERRUPTS; i++)
632 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
633
634 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
635
636 kvm_seg_set_kernel_code_64bit(&seg);
637 kvm_seg_fill_gdt_64bit(vm, &seg);
638
639 kvm_seg_set_kernel_data_64bit(&seg);
640 kvm_seg_fill_gdt_64bit(vm, &seg);
641
642 kvm_seg_set_tss_64bit(vm->arch.tss, &seg);
643 kvm_seg_fill_gdt_64bit(vm, &seg);
644}
645
646void vm_install_exception_handler(struct kvm_vm *vm, int vector,
647 void (*handler)(struct ex_regs *))
648{
649 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
650
651 handlers[vector] = (vm_vaddr_t)handler;
652}
653
654void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
655{
656 struct ucall uc;
657
658 if (get_ucall(vcpu, &uc) == UCALL_ABORT)
659 REPORT_GUEST_ASSERT(uc);
660}
661
662void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
663{
664 int r;
665
666 TEST_ASSERT(kvm_has_cap(KVM_CAP_GET_TSC_KHZ),
667 "Require KVM_GET_TSC_KHZ to provide udelay() to guest.");
668
669 vm_create_irqchip(vm);
670 vm_init_descriptor_tables(vm);
671
672 sync_global_to_guest(vm, host_cpu_is_intel);
673 sync_global_to_guest(vm, host_cpu_is_amd);
674 sync_global_to_guest(vm, is_forced_emulation_enabled);
675 sync_global_to_guest(vm, pmu_errata_mask);
676
677 if (is_sev_vm(vm)) {
678 struct kvm_sev_init init = { 0 };
679
680 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
681 }
682
683 r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
684 TEST_ASSERT(r > 0, "KVM_GET_TSC_KHZ did not provide a valid TSC frequency.");
685 guest_tsc_khz = r;
686 sync_global_to_guest(vm, guest_tsc_khz);
687}
688
689void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
690{
691 struct kvm_regs regs;
692
693 vcpu_regs_get(vcpu, ®s);
694 regs.rip = (unsigned long) guest_code;
695 vcpu_regs_set(vcpu, ®s);
696}
697
698struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
699{
700 struct kvm_mp_state mp_state;
701 struct kvm_regs regs;
702 vm_vaddr_t stack_vaddr;
703 struct kvm_vcpu *vcpu;
704
705 stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
706 DEFAULT_GUEST_STACK_VADDR_MIN,
707 MEM_REGION_DATA);
708
709 stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
710
711 /*
712 * Align stack to match calling sequence requirements in section "The
713 * Stack Frame" of the System V ABI AMD64 Architecture Processor
714 * Supplement, which requires the value (%rsp + 8) to be a multiple of
715 * 16 when control is transferred to the function entry point.
716 *
717 * If this code is ever used to launch a vCPU with 32-bit entry point it
718 * may need to subtract 4 bytes instead of 8 bytes.
719 */
720 TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
721 "__vm_vaddr_alloc() did not provide a page-aligned address");
722 stack_vaddr -= 8;
723
724 vcpu = __vm_vcpu_add(vm, vcpu_id);
725 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
726 vcpu_init_sregs(vm, vcpu);
727 vcpu_init_xcrs(vm, vcpu);
728
729 /* Setup guest general purpose registers */
730 vcpu_regs_get(vcpu, ®s);
731 regs.rflags = regs.rflags | 0x2;
732 regs.rsp = stack_vaddr;
733 vcpu_regs_set(vcpu, ®s);
734
735 /* Setup the MP state */
736 mp_state.mp_state = 0;
737 vcpu_mp_state_set(vcpu, &mp_state);
738
739 /*
740 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
741 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
742 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache
743 * is consistent with vCPU state.
744 */
745 vcpu_get_cpuid(vcpu);
746 return vcpu;
747}
748
749struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
750{
751 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
752
753 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
754
755 return vcpu;
756}
757
758void vcpu_arch_free(struct kvm_vcpu *vcpu)
759{
760 if (vcpu->cpuid)
761 free(vcpu->cpuid);
762}
763
764/* Do not use kvm_supported_cpuid directly except for validity checks. */
765static void *kvm_supported_cpuid;
766
767const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
768{
769 int kvm_fd;
770
771 if (kvm_supported_cpuid)
772 return kvm_supported_cpuid;
773
774 kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
775 kvm_fd = open_kvm_dev_path_or_exit();
776
777 kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID,
778 (struct kvm_cpuid2 *)kvm_supported_cpuid);
779
780 close(kvm_fd);
781 return kvm_supported_cpuid;
782}
783
784static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
785 uint32_t function, uint32_t index,
786 uint8_t reg, uint8_t lo, uint8_t hi)
787{
788 const struct kvm_cpuid_entry2 *entry;
789 int i;
790
791 for (i = 0; i < cpuid->nent; i++) {
792 entry = &cpuid->entries[i];
793
794 /*
795 * The output registers in kvm_cpuid_entry2 are in alphabetical
796 * order, but kvm_x86_cpu_feature matches that mess, so yay
797 * pointer shenanigans!
798 */
799 if (entry->function == function && entry->index == index)
800 return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo;
801 }
802
803 return 0;
804}
805
806bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
807 struct kvm_x86_cpu_feature feature)
808{
809 return __kvm_cpu_has(cpuid, feature.function, feature.index,
810 feature.reg, feature.bit, feature.bit);
811}
812
813uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
814 struct kvm_x86_cpu_property property)
815{
816 return __kvm_cpu_has(cpuid, property.function, property.index,
817 property.reg, property.lo_bit, property.hi_bit);
818}
819
820uint64_t kvm_get_feature_msr(uint64_t msr_index)
821{
822 struct {
823 struct kvm_msrs header;
824 struct kvm_msr_entry entry;
825 } buffer = {};
826 int r, kvm_fd;
827
828 buffer.header.nmsrs = 1;
829 buffer.entry.index = msr_index;
830 kvm_fd = open_kvm_dev_path_or_exit();
831
832 r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
833 TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
834
835 close(kvm_fd);
836 return buffer.entry.data;
837}
838
839void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
840{
841 int kvm_fd;
842 u64 bitmask;
843 long rc;
844 struct kvm_device_attr attr = {
845 .group = 0,
846 .attr = KVM_X86_XCOMP_GUEST_SUPP,
847 .addr = (unsigned long) &bitmask,
848 };
849
850 TEST_ASSERT(!kvm_supported_cpuid,
851 "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM");
852
853 TEST_ASSERT(is_power_of_2(xfeature),
854 "Dynamic XFeatures must be enabled one at a time");
855
856 kvm_fd = open_kvm_dev_path_or_exit();
857 rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
858 close(kvm_fd);
859
860 if (rc == -1 && (errno == ENXIO || errno == EINVAL))
861 __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
862
863 TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
864
865 __TEST_REQUIRE(bitmask & xfeature,
866 "Required XSAVE feature '%s' not supported", name);
867
868 TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature)));
869
870 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
871 TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
872 TEST_ASSERT(bitmask & xfeature,
873 "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx",
874 name, xfeature, bitmask);
875}
876
877void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
878{
879 TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
880
881 /* Allow overriding the default CPUID. */
882 if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
883 free(vcpu->cpuid);
884 vcpu->cpuid = NULL;
885 }
886
887 if (!vcpu->cpuid)
888 vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
889
890 memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
891 vcpu_set_cpuid(vcpu);
892}
893
894void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
895 struct kvm_x86_cpu_property property,
896 uint32_t value)
897{
898 struct kvm_cpuid_entry2 *entry;
899
900 entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index);
901
902 (&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit);
903 (&entry->eax)[property.reg] |= value << property.lo_bit;
904
905 vcpu_set_cpuid(vcpu);
906
907 /* Sanity check that @value doesn't exceed the bounds in any way. */
908 TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
909}
910
911void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
912{
913 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
914
915 entry->eax = 0;
916 entry->ebx = 0;
917 entry->ecx = 0;
918 entry->edx = 0;
919 vcpu_set_cpuid(vcpu);
920}
921
922void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
923 struct kvm_x86_cpu_feature feature,
924 bool set)
925{
926 struct kvm_cpuid_entry2 *entry;
927 u32 *reg;
928
929 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
930 reg = (&entry->eax) + feature.reg;
931
932 if (set)
933 *reg |= BIT(feature.bit);
934 else
935 *reg &= ~BIT(feature.bit);
936
937 vcpu_set_cpuid(vcpu);
938}
939
940uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
941{
942 struct {
943 struct kvm_msrs header;
944 struct kvm_msr_entry entry;
945 } buffer = {};
946
947 buffer.header.nmsrs = 1;
948 buffer.entry.index = msr_index;
949
950 vcpu_msrs_get(vcpu, &buffer.header);
951
952 return buffer.entry.data;
953}
954
955int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
956{
957 struct {
958 struct kvm_msrs header;
959 struct kvm_msr_entry entry;
960 } buffer = {};
961
962 memset(&buffer, 0, sizeof(buffer));
963 buffer.header.nmsrs = 1;
964 buffer.entry.index = msr_index;
965 buffer.entry.data = msr_value;
966
967 return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
968}
969
970void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
971{
972 va_list ap;
973 struct kvm_regs regs;
974
975 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
976 " num: %u",
977 num);
978
979 va_start(ap, num);
980 vcpu_regs_get(vcpu, ®s);
981
982 if (num >= 1)
983 regs.rdi = va_arg(ap, uint64_t);
984
985 if (num >= 2)
986 regs.rsi = va_arg(ap, uint64_t);
987
988 if (num >= 3)
989 regs.rdx = va_arg(ap, uint64_t);
990
991 if (num >= 4)
992 regs.rcx = va_arg(ap, uint64_t);
993
994 if (num >= 5)
995 regs.r8 = va_arg(ap, uint64_t);
996
997 if (num >= 6)
998 regs.r9 = va_arg(ap, uint64_t);
999
1000 vcpu_regs_set(vcpu, ®s);
1001 va_end(ap);
1002}
1003
1004void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
1005{
1006 struct kvm_regs regs;
1007 struct kvm_sregs sregs;
1008
1009 fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
1010
1011 fprintf(stream, "%*sregs:\n", indent + 2, "");
1012 vcpu_regs_get(vcpu, ®s);
1013 regs_dump(stream, ®s, indent + 4);
1014
1015 fprintf(stream, "%*ssregs:\n", indent + 2, "");
1016 vcpu_sregs_get(vcpu, &sregs);
1017 sregs_dump(stream, &sregs, indent + 4);
1018}
1019
1020static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
1021{
1022 struct kvm_msr_list *list;
1023 struct kvm_msr_list nmsrs;
1024 int kvm_fd, r;
1025
1026 kvm_fd = open_kvm_dev_path_or_exit();
1027
1028 nmsrs.nmsrs = 0;
1029 if (!feature_msrs)
1030 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
1031 else
1032 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
1033
1034 TEST_ASSERT(r == -1 && errno == E2BIG,
1035 "Expected -E2BIG, got rc: %i errno: %i (%s)",
1036 r, errno, strerror(errno));
1037
1038 list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
1039 TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
1040 list->nmsrs = nmsrs.nmsrs;
1041
1042 if (!feature_msrs)
1043 kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1044 else
1045 kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
1046 close(kvm_fd);
1047
1048 TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
1049 "Number of MSRs in list changed, was %d, now %d",
1050 nmsrs.nmsrs, list->nmsrs);
1051 return list;
1052}
1053
1054const struct kvm_msr_list *kvm_get_msr_index_list(void)
1055{
1056 static const struct kvm_msr_list *list;
1057
1058 if (!list)
1059 list = __kvm_get_msr_index_list(false);
1060 return list;
1061}
1062
1063
1064const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
1065{
1066 static const struct kvm_msr_list *list;
1067
1068 if (!list)
1069 list = __kvm_get_msr_index_list(true);
1070 return list;
1071}
1072
1073bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
1074{
1075 const struct kvm_msr_list *list = kvm_get_msr_index_list();
1076 int i;
1077
1078 for (i = 0; i < list->nmsrs; ++i) {
1079 if (list->indices[i] == msr_index)
1080 return true;
1081 }
1082
1083 return false;
1084}
1085
1086static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
1087 struct kvm_x86_state *state)
1088{
1089 int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
1090
1091 if (size) {
1092 state->xsave = malloc(size);
1093 vcpu_xsave2_get(vcpu, state->xsave);
1094 } else {
1095 state->xsave = malloc(sizeof(struct kvm_xsave));
1096 vcpu_xsave_get(vcpu, state->xsave);
1097 }
1098}
1099
1100struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
1101{
1102 const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
1103 struct kvm_x86_state *state;
1104 int i;
1105
1106 static int nested_size = -1;
1107
1108 if (nested_size == -1) {
1109 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
1110 TEST_ASSERT(nested_size <= sizeof(state->nested_),
1111 "Nested state size too big, %i > %zi",
1112 nested_size, sizeof(state->nested_));
1113 }
1114
1115 /*
1116 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
1117 * guest state is consistent only after userspace re-enters the
1118 * kernel with KVM_RUN. Complete IO prior to migrating state
1119 * to a new VM.
1120 */
1121 vcpu_run_complete_io(vcpu);
1122
1123 state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
1124 TEST_ASSERT(state, "-ENOMEM when allocating kvm state");
1125
1126 vcpu_events_get(vcpu, &state->events);
1127 vcpu_mp_state_get(vcpu, &state->mp_state);
1128 vcpu_regs_get(vcpu, &state->regs);
1129 vcpu_save_xsave_state(vcpu, state);
1130
1131 if (kvm_has_cap(KVM_CAP_XCRS))
1132 vcpu_xcrs_get(vcpu, &state->xcrs);
1133
1134 vcpu_sregs_get(vcpu, &state->sregs);
1135
1136 if (nested_size) {
1137 state->nested.size = sizeof(state->nested_);
1138
1139 vcpu_nested_state_get(vcpu, &state->nested);
1140 TEST_ASSERT(state->nested.size <= nested_size,
1141 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
1142 state->nested.size, nested_size);
1143 } else {
1144 state->nested.size = 0;
1145 }
1146
1147 state->msrs.nmsrs = msr_list->nmsrs;
1148 for (i = 0; i < msr_list->nmsrs; i++)
1149 state->msrs.entries[i].index = msr_list->indices[i];
1150 vcpu_msrs_get(vcpu, &state->msrs);
1151
1152 vcpu_debugregs_get(vcpu, &state->debugregs);
1153
1154 return state;
1155}
1156
1157void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
1158{
1159 vcpu_sregs_set(vcpu, &state->sregs);
1160 vcpu_msrs_set(vcpu, &state->msrs);
1161
1162 if (kvm_has_cap(KVM_CAP_XCRS))
1163 vcpu_xcrs_set(vcpu, &state->xcrs);
1164
1165 vcpu_xsave_set(vcpu, state->xsave);
1166 vcpu_events_set(vcpu, &state->events);
1167 vcpu_mp_state_set(vcpu, &state->mp_state);
1168 vcpu_debugregs_set(vcpu, &state->debugregs);
1169 vcpu_regs_set(vcpu, &state->regs);
1170
1171 if (state->nested.size)
1172 vcpu_nested_state_set(vcpu, &state->nested);
1173}
1174
1175void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1176{
1177 free(state->xsave);
1178 free(state);
1179}
1180
1181void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1182{
1183 if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
1184 *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
1185 *va_bits = 32;
1186 } else {
1187 *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1188 *va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
1189 }
1190}
1191
1192void kvm_init_vm_address_properties(struct kvm_vm *vm)
1193{
1194 if (is_sev_vm(vm)) {
1195 vm->arch.sev_fd = open_sev_dev_path_or_exit();
1196 vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT));
1197 vm->gpa_tag_mask = vm->arch.c_bit;
1198 } else {
1199 vm->arch.sev_fd = -1;
1200 }
1201}
1202
1203const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
1204 uint32_t function, uint32_t index)
1205{
1206 int i;
1207
1208 for (i = 0; i < cpuid->nent; i++) {
1209 if (cpuid->entries[i].function == function &&
1210 cpuid->entries[i].index == index)
1211 return &cpuid->entries[i];
1212 }
1213
1214 TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1215
1216 return NULL;
1217}
1218
1219#define X86_HYPERCALL(inputs...) \
1220({ \
1221 uint64_t r; \
1222 \
1223 asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
1224 "jnz 1f\n\t" \
1225 "vmcall\n\t" \
1226 "jmp 2f\n\t" \
1227 "1: vmmcall\n\t" \
1228 "2:" \
1229 : "=a"(r) \
1230 : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \
1231 \
1232 r; \
1233})
1234
1235uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1236 uint64_t a3)
1237{
1238 return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1239}
1240
1241uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
1242{
1243 return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
1244}
1245
1246void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
1247{
1248 GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
1249}
1250
1251unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1252{
1253 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1254 unsigned long ht_gfn, max_gfn, max_pfn;
1255 uint8_t maxphyaddr, guest_maxphyaddr;
1256
1257 /*
1258 * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
1259 * enumerates the max _mappable_ GPA, which can be less than the raw
1260 * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
1261 * doesn't support 5-level TDP.
1262 */
1263 guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
1264 guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
1265 TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
1266 "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
1267
1268 max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
1269
1270 /* Avoid reserved HyperTransport region on AMD processors. */
1271 if (!host_cpu_is_amd)
1272 return max_gfn;
1273
1274 /* On parts with <40 physical address bits, the area is fully hidden */
1275 if (vm->pa_bits < 40)
1276 return max_gfn;
1277
1278 /* Before family 17h, the HyperTransport area is just below 1T. */
1279 ht_gfn = (1 << 28) - num_ht_pages;
1280 if (this_cpu_family() < 0x17)
1281 goto done;
1282
1283 /*
1284 * Otherwise it's at the top of the physical address space, possibly
1285 * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
1286 * the old conservative value if MAXPHYADDR is not enumerated.
1287 */
1288 if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
1289 goto done;
1290
1291 maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1292 max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1;
1293
1294 if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION))
1295 max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION);
1296
1297 ht_gfn = max_pfn - num_ht_pages;
1298done:
1299 return min(max_gfn, ht_gfn - 1);
1300}
1301
1302void kvm_selftest_arch_init(void)
1303{
1304 host_cpu_is_intel = this_cpu_is_intel();
1305 host_cpu_is_amd = this_cpu_is_amd();
1306 is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
1307
1308 kvm_init_pmu_errata();
1309}
1310
1311bool sys_clocksource_is_based_on_tsc(void)
1312{
1313 char *clk_name = sys_get_cur_clocksource();
1314 bool ret = !strcmp(clk_name, "tsc\n") ||
1315 !strcmp(clk_name, "hyperv_clocksource_tsc_page\n");
1316
1317 free(clk_name);
1318
1319 return ret;
1320}
1321
1322bool kvm_arch_has_default_irqchip(void)
1323{
1324 return true;
1325}