Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kvm_host.h>
4
5#include "irq.h"
6#include "mmu.h"
7#include "kvm_cache_regs.h"
8#include "x86.h"
9#include "smm.h"
10#include "cpuid.h"
11#include "pmu.h"
12
13#include <linux/module.h>
14#include <linux/mod_devicetable.h>
15#include <linux/kernel.h>
16#include <linux/vmalloc.h>
17#include <linux/highmem.h>
18#include <linux/amd-iommu.h>
19#include <linux/sched.h>
20#include <linux/trace_events.h>
21#include <linux/slab.h>
22#include <linux/hashtable.h>
23#include <linux/objtool.h>
24#include <linux/psp-sev.h>
25#include <linux/file.h>
26#include <linux/pagemap.h>
27#include <linux/swap.h>
28#include <linux/rwsem.h>
29#include <linux/cc_platform.h>
30#include <linux/smp.h>
31
32#include <asm/apic.h>
33#include <asm/perf_event.h>
34#include <asm/tlbflush.h>
35#include <asm/desc.h>
36#include <asm/debugreg.h>
37#include <asm/kvm_para.h>
38#include <asm/irq_remapping.h>
39#include <asm/spec-ctrl.h>
40#include <asm/cpu_device_id.h>
41#include <asm/traps.h>
42#include <asm/fpu/api.h>
43
44#include <asm/virtext.h>
45
46#include <trace/events/ipi.h>
47
48#include "trace.h"
49
50#include "svm.h"
51#include "svm_ops.h"
52
53#include "kvm_onhyperv.h"
54#include "svm_onhyperv.h"
55
56MODULE_AUTHOR("Qumranet");
57MODULE_LICENSE("GPL");
58
59#ifdef MODULE
60static const struct x86_cpu_id svm_cpu_id[] = {
61 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
62 {}
63};
64MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
65#endif
66
67#define SEG_TYPE_LDT 2
68#define SEG_TYPE_BUSY_TSS16 3
69
70static bool erratum_383_found __read_mostly;
71
72u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
73
74/*
75 * Set osvw_len to higher value when updated Revision Guides
76 * are published and we know what the new status bits are
77 */
78static uint64_t osvw_len = 4, osvw_status;
79
80static DEFINE_PER_CPU(u64, current_tsc_ratio);
81
82#define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
83
84static const struct svm_direct_access_msrs {
85 u32 index; /* Index of the MSR */
86 bool always; /* True if intercept is initially cleared */
87} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
88 { .index = MSR_STAR, .always = true },
89 { .index = MSR_IA32_SYSENTER_CS, .always = true },
90 { .index = MSR_IA32_SYSENTER_EIP, .always = false },
91 { .index = MSR_IA32_SYSENTER_ESP, .always = false },
92#ifdef CONFIG_X86_64
93 { .index = MSR_GS_BASE, .always = true },
94 { .index = MSR_FS_BASE, .always = true },
95 { .index = MSR_KERNEL_GS_BASE, .always = true },
96 { .index = MSR_LSTAR, .always = true },
97 { .index = MSR_CSTAR, .always = true },
98 { .index = MSR_SYSCALL_MASK, .always = true },
99#endif
100 { .index = MSR_IA32_SPEC_CTRL, .always = false },
101 { .index = MSR_IA32_PRED_CMD, .always = false },
102 { .index = MSR_IA32_FLUSH_CMD, .always = false },
103 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
104 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
105 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
106 { .index = MSR_IA32_LASTINTTOIP, .always = false },
107 { .index = MSR_EFER, .always = false },
108 { .index = MSR_IA32_CR_PAT, .always = false },
109 { .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
110 { .index = MSR_TSC_AUX, .always = false },
111 { .index = X2APIC_MSR(APIC_ID), .always = false },
112 { .index = X2APIC_MSR(APIC_LVR), .always = false },
113 { .index = X2APIC_MSR(APIC_TASKPRI), .always = false },
114 { .index = X2APIC_MSR(APIC_ARBPRI), .always = false },
115 { .index = X2APIC_MSR(APIC_PROCPRI), .always = false },
116 { .index = X2APIC_MSR(APIC_EOI), .always = false },
117 { .index = X2APIC_MSR(APIC_RRR), .always = false },
118 { .index = X2APIC_MSR(APIC_LDR), .always = false },
119 { .index = X2APIC_MSR(APIC_DFR), .always = false },
120 { .index = X2APIC_MSR(APIC_SPIV), .always = false },
121 { .index = X2APIC_MSR(APIC_ISR), .always = false },
122 { .index = X2APIC_MSR(APIC_TMR), .always = false },
123 { .index = X2APIC_MSR(APIC_IRR), .always = false },
124 { .index = X2APIC_MSR(APIC_ESR), .always = false },
125 { .index = X2APIC_MSR(APIC_ICR), .always = false },
126 { .index = X2APIC_MSR(APIC_ICR2), .always = false },
127
128 /*
129 * Note:
130 * AMD does not virtualize APIC TSC-deadline timer mode, but it is
131 * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
132 * the AVIC hardware would generate GP fault. Therefore, always
133 * intercept the MSR 0x832, and do not setup direct_access_msr.
134 */
135 { .index = X2APIC_MSR(APIC_LVTTHMR), .always = false },
136 { .index = X2APIC_MSR(APIC_LVTPC), .always = false },
137 { .index = X2APIC_MSR(APIC_LVT0), .always = false },
138 { .index = X2APIC_MSR(APIC_LVT1), .always = false },
139 { .index = X2APIC_MSR(APIC_LVTERR), .always = false },
140 { .index = X2APIC_MSR(APIC_TMICT), .always = false },
141 { .index = X2APIC_MSR(APIC_TMCCT), .always = false },
142 { .index = X2APIC_MSR(APIC_TDCR), .always = false },
143 { .index = MSR_INVALID, .always = false },
144};
145
146/*
147 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
148 * pause_filter_count: On processors that support Pause filtering(indicated
149 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
150 * count value. On VMRUN this value is loaded into an internal counter.
151 * Each time a pause instruction is executed, this counter is decremented
152 * until it reaches zero at which time a #VMEXIT is generated if pause
153 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
154 * Intercept Filtering for more details.
155 * This also indicate if ple logic enabled.
156 *
157 * pause_filter_thresh: In addition, some processor families support advanced
158 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
159 * the amount of time a guest is allowed to execute in a pause loop.
160 * In this mode, a 16-bit pause filter threshold field is added in the
161 * VMCB. The threshold value is a cycle count that is used to reset the
162 * pause counter. As with simple pause filtering, VMRUN loads the pause
163 * count value from VMCB into an internal counter. Then, on each pause
164 * instruction the hardware checks the elapsed number of cycles since
165 * the most recent pause instruction against the pause filter threshold.
166 * If the elapsed cycle count is greater than the pause filter threshold,
167 * then the internal pause count is reloaded from the VMCB and execution
168 * continues. If the elapsed cycle count is less than the pause filter
169 * threshold, then the internal pause count is decremented. If the count
170 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
171 * triggered. If advanced pause filtering is supported and pause filter
172 * threshold field is set to zero, the filter will operate in the simpler,
173 * count only mode.
174 */
175
176static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
177module_param(pause_filter_thresh, ushort, 0444);
178
179static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
180module_param(pause_filter_count, ushort, 0444);
181
182/* Default doubles per-vcpu window every exit. */
183static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
184module_param(pause_filter_count_grow, ushort, 0444);
185
186/* Default resets per-vcpu window every exit to pause_filter_count. */
187static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
188module_param(pause_filter_count_shrink, ushort, 0444);
189
190/* Default is to compute the maximum so we can never overflow. */
191static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
192module_param(pause_filter_count_max, ushort, 0444);
193
194/*
195 * Use nested page tables by default. Note, NPT may get forced off by
196 * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
197 */
198bool npt_enabled = true;
199module_param_named(npt, npt_enabled, bool, 0444);
200
201/* allow nested virtualization in KVM/SVM */
202static int nested = true;
203module_param(nested, int, S_IRUGO);
204
205/* enable/disable Next RIP Save */
206static int nrips = true;
207module_param(nrips, int, 0444);
208
209/* enable/disable Virtual VMLOAD VMSAVE */
210static int vls = true;
211module_param(vls, int, 0444);
212
213/* enable/disable Virtual GIF */
214int vgif = true;
215module_param(vgif, int, 0444);
216
217/* enable/disable LBR virtualization */
218static int lbrv = true;
219module_param(lbrv, int, 0444);
220
221static int tsc_scaling = true;
222module_param(tsc_scaling, int, 0444);
223
224/*
225 * enable / disable AVIC. Because the defaults differ for APICv
226 * support between VMX and SVM we cannot use module_param_named.
227 */
228static bool avic;
229module_param(avic, bool, 0444);
230
231bool __read_mostly dump_invalid_vmcb;
232module_param(dump_invalid_vmcb, bool, 0644);
233
234
235bool intercept_smi = true;
236module_param(intercept_smi, bool, 0444);
237
238bool vnmi = true;
239module_param(vnmi, bool, 0444);
240
241static bool svm_gp_erratum_intercept = true;
242
243static u8 rsm_ins_bytes[] = "\x0f\xaa";
244
245static unsigned long iopm_base;
246
247DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
248
249/*
250 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
251 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
252 *
253 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
254 * defer the restoration of TSC_AUX until the CPU returns to userspace.
255 */
256static int tsc_aux_uret_slot __read_mostly = -1;
257
258static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
259
260#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
261#define MSRS_RANGE_SIZE 2048
262#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
263
264u32 svm_msrpm_offset(u32 msr)
265{
266 u32 offset;
267 int i;
268
269 for (i = 0; i < NUM_MSR_MAPS; i++) {
270 if (msr < msrpm_ranges[i] ||
271 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
272 continue;
273
274 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
275 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
276
277 /* Now we have the u8 offset - but need the u32 offset */
278 return offset / 4;
279 }
280
281 /* MSR not in any range */
282 return MSR_INVALID;
283}
284
285static void svm_flush_tlb_current(struct kvm_vcpu *vcpu);
286
287static int get_npt_level(void)
288{
289#ifdef CONFIG_X86_64
290 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
291#else
292 return PT32E_ROOT_LEVEL;
293#endif
294}
295
296int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
297{
298 struct vcpu_svm *svm = to_svm(vcpu);
299 u64 old_efer = vcpu->arch.efer;
300 vcpu->arch.efer = efer;
301
302 if (!npt_enabled) {
303 /* Shadow paging assumes NX to be available. */
304 efer |= EFER_NX;
305
306 if (!(efer & EFER_LMA))
307 efer &= ~EFER_LME;
308 }
309
310 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
311 if (!(efer & EFER_SVME)) {
312 svm_leave_nested(vcpu);
313 svm_set_gif(svm, true);
314 /* #GP intercept is still needed for vmware backdoor */
315 if (!enable_vmware_backdoor)
316 clr_exception_intercept(svm, GP_VECTOR);
317
318 /*
319 * Free the nested guest state, unless we are in SMM.
320 * In this case we will return to the nested guest
321 * as soon as we leave SMM.
322 */
323 if (!is_smm(vcpu))
324 svm_free_nested(svm);
325
326 } else {
327 int ret = svm_allocate_nested(svm);
328
329 if (ret) {
330 vcpu->arch.efer = old_efer;
331 return ret;
332 }
333
334 /*
335 * Never intercept #GP for SEV guests, KVM can't
336 * decrypt guest memory to workaround the erratum.
337 */
338 if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
339 set_exception_intercept(svm, GP_VECTOR);
340 }
341 }
342
343 svm->vmcb->save.efer = efer | EFER_SVME;
344 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
345 return 0;
346}
347
348static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
349{
350 struct vcpu_svm *svm = to_svm(vcpu);
351 u32 ret = 0;
352
353 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
354 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
355 return ret;
356}
357
358static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
359{
360 struct vcpu_svm *svm = to_svm(vcpu);
361
362 if (mask == 0)
363 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
364 else
365 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
366
367}
368
369static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
370 bool commit_side_effects)
371{
372 struct vcpu_svm *svm = to_svm(vcpu);
373 unsigned long old_rflags;
374
375 /*
376 * SEV-ES does not expose the next RIP. The RIP update is controlled by
377 * the type of exit and the #VC handler in the guest.
378 */
379 if (sev_es_guest(vcpu->kvm))
380 goto done;
381
382 if (nrips && svm->vmcb->control.next_rip != 0) {
383 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
384 svm->next_rip = svm->vmcb->control.next_rip;
385 }
386
387 if (!svm->next_rip) {
388 if (unlikely(!commit_side_effects))
389 old_rflags = svm->vmcb->save.rflags;
390
391 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
392 return 0;
393
394 if (unlikely(!commit_side_effects))
395 svm->vmcb->save.rflags = old_rflags;
396 } else {
397 kvm_rip_write(vcpu, svm->next_rip);
398 }
399
400done:
401 if (likely(commit_side_effects))
402 svm_set_interrupt_shadow(vcpu, 0);
403
404 return 1;
405}
406
407static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
408{
409 return __svm_skip_emulated_instruction(vcpu, true);
410}
411
412static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
413{
414 unsigned long rip, old_rip = kvm_rip_read(vcpu);
415 struct vcpu_svm *svm = to_svm(vcpu);
416
417 /*
418 * Due to architectural shortcomings, the CPU doesn't always provide
419 * NextRIP, e.g. if KVM intercepted an exception that occurred while
420 * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip
421 * the instruction even if NextRIP is supported to acquire the next
422 * RIP so that it can be shoved into the NextRIP field, otherwise
423 * hardware will fail to advance guest RIP during event injection.
424 * Drop the exception/interrupt if emulation fails and effectively
425 * retry the instruction, it's the least awful option. If NRIPS is
426 * in use, the skip must not commit any side effects such as clearing
427 * the interrupt shadow or RFLAGS.RF.
428 */
429 if (!__svm_skip_emulated_instruction(vcpu, !nrips))
430 return -EIO;
431
432 rip = kvm_rip_read(vcpu);
433
434 /*
435 * Save the injection information, even when using next_rip, as the
436 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
437 * doesn't complete due to a VM-Exit occurring while the CPU is
438 * vectoring the event. Decoding the instruction isn't guaranteed to
439 * work as there may be no backing instruction, e.g. if the event is
440 * being injected by L1 for L2, or if the guest is patching INT3 into
441 * a different instruction.
442 */
443 svm->soft_int_injected = true;
444 svm->soft_int_csbase = svm->vmcb->save.cs.base;
445 svm->soft_int_old_rip = old_rip;
446 svm->soft_int_next_rip = rip;
447
448 if (nrips)
449 kvm_rip_write(vcpu, old_rip);
450
451 if (static_cpu_has(X86_FEATURE_NRIPS))
452 svm->vmcb->control.next_rip = rip;
453
454 return 0;
455}
456
457static void svm_inject_exception(struct kvm_vcpu *vcpu)
458{
459 struct kvm_queued_exception *ex = &vcpu->arch.exception;
460 struct vcpu_svm *svm = to_svm(vcpu);
461
462 kvm_deliver_exception_payload(vcpu, ex);
463
464 if (kvm_exception_is_soft(ex->vector) &&
465 svm_update_soft_interrupt_rip(vcpu))
466 return;
467
468 svm->vmcb->control.event_inj = ex->vector
469 | SVM_EVTINJ_VALID
470 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
471 | SVM_EVTINJ_TYPE_EXEPT;
472 svm->vmcb->control.event_inj_err = ex->error_code;
473}
474
475static void svm_init_erratum_383(void)
476{
477 u32 low, high;
478 int err;
479 u64 val;
480
481 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
482 return;
483
484 /* Use _safe variants to not break nested virtualization */
485 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
486 if (err)
487 return;
488
489 val |= (1ULL << 47);
490
491 low = lower_32_bits(val);
492 high = upper_32_bits(val);
493
494 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
495
496 erratum_383_found = true;
497}
498
499static void svm_init_osvw(struct kvm_vcpu *vcpu)
500{
501 /*
502 * Guests should see errata 400 and 415 as fixed (assuming that
503 * HLT and IO instructions are intercepted).
504 */
505 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
506 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
507
508 /*
509 * By increasing VCPU's osvw.length to 3 we are telling the guest that
510 * all osvw.status bits inside that length, including bit 0 (which is
511 * reserved for erratum 298), are valid. However, if host processor's
512 * osvw_len is 0 then osvw_status[0] carries no information. We need to
513 * be conservative here and therefore we tell the guest that erratum 298
514 * is present (because we really don't know).
515 */
516 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
517 vcpu->arch.osvw.status |= 1;
518}
519
520static bool kvm_is_svm_supported(void)
521{
522 int cpu = raw_smp_processor_id();
523 const char *msg;
524 u64 vm_cr;
525
526 if (!cpu_has_svm(&msg)) {
527 pr_err("SVM not supported by CPU %d, %s\n", cpu, msg);
528 return false;
529 }
530
531 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
532 pr_info("KVM is unsupported when running as an SEV guest\n");
533 return false;
534 }
535
536 rdmsrl(MSR_VM_CR, vm_cr);
537 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) {
538 pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu);
539 return false;
540 }
541
542 return true;
543}
544
545static int svm_check_processor_compat(void)
546{
547 if (!kvm_is_svm_supported())
548 return -EIO;
549
550 return 0;
551}
552
553void __svm_write_tsc_multiplier(u64 multiplier)
554{
555 preempt_disable();
556
557 if (multiplier == __this_cpu_read(current_tsc_ratio))
558 goto out;
559
560 wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
561 __this_cpu_write(current_tsc_ratio, multiplier);
562out:
563 preempt_enable();
564}
565
566static void svm_hardware_disable(void)
567{
568 /* Make sure we clean up behind us */
569 if (tsc_scaling)
570 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
571
572 cpu_svm_disable();
573
574 amd_pmu_disable_virt();
575}
576
577static int svm_hardware_enable(void)
578{
579
580 struct svm_cpu_data *sd;
581 uint64_t efer;
582 int me = raw_smp_processor_id();
583
584 rdmsrl(MSR_EFER, efer);
585 if (efer & EFER_SVME)
586 return -EBUSY;
587
588 sd = per_cpu_ptr(&svm_data, me);
589 sd->asid_generation = 1;
590 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
591 sd->next_asid = sd->max_asid + 1;
592 sd->min_asid = max_sev_asid + 1;
593
594 wrmsrl(MSR_EFER, efer | EFER_SVME);
595
596 wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
597
598 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
599 /*
600 * Set the default value, even if we don't use TSC scaling
601 * to avoid having stale value in the msr
602 */
603 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
604 }
605
606
607 /*
608 * Get OSVW bits.
609 *
610 * Note that it is possible to have a system with mixed processor
611 * revisions and therefore different OSVW bits. If bits are not the same
612 * on different processors then choose the worst case (i.e. if erratum
613 * is present on one processor and not on another then assume that the
614 * erratum is present everywhere).
615 */
616 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
617 uint64_t len, status = 0;
618 int err;
619
620 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
621 if (!err)
622 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
623 &err);
624
625 if (err)
626 osvw_status = osvw_len = 0;
627 else {
628 if (len < osvw_len)
629 osvw_len = len;
630 osvw_status |= status;
631 osvw_status &= (1ULL << osvw_len) - 1;
632 }
633 } else
634 osvw_status = osvw_len = 0;
635
636 svm_init_erratum_383();
637
638 amd_pmu_enable_virt();
639
640 return 0;
641}
642
643static void svm_cpu_uninit(int cpu)
644{
645 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
646
647 if (!sd->save_area)
648 return;
649
650 kfree(sd->sev_vmcbs);
651 __free_page(sd->save_area);
652 sd->save_area_pa = 0;
653 sd->save_area = NULL;
654}
655
656static int svm_cpu_init(int cpu)
657{
658 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
659 int ret = -ENOMEM;
660
661 memset(sd, 0, sizeof(struct svm_cpu_data));
662 sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
663 if (!sd->save_area)
664 return ret;
665
666 ret = sev_cpu_init(sd);
667 if (ret)
668 goto free_save_area;
669
670 sd->save_area_pa = __sme_page_pa(sd->save_area);
671 return 0;
672
673free_save_area:
674 __free_page(sd->save_area);
675 sd->save_area = NULL;
676 return ret;
677
678}
679
680static int direct_access_msr_slot(u32 msr)
681{
682 u32 i;
683
684 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
685 if (direct_access_msrs[i].index == msr)
686 return i;
687
688 return -ENOENT;
689}
690
691static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
692 int write)
693{
694 struct vcpu_svm *svm = to_svm(vcpu);
695 int slot = direct_access_msr_slot(msr);
696
697 if (slot == -ENOENT)
698 return;
699
700 /* Set the shadow bitmaps to the desired intercept states */
701 if (read)
702 set_bit(slot, svm->shadow_msr_intercept.read);
703 else
704 clear_bit(slot, svm->shadow_msr_intercept.read);
705
706 if (write)
707 set_bit(slot, svm->shadow_msr_intercept.write);
708 else
709 clear_bit(slot, svm->shadow_msr_intercept.write);
710}
711
712static bool valid_msr_intercept(u32 index)
713{
714 return direct_access_msr_slot(index) != -ENOENT;
715}
716
717static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
718{
719 u8 bit_write;
720 unsigned long tmp;
721 u32 offset;
722 u32 *msrpm;
723
724 /*
725 * For non-nested case:
726 * If the L01 MSR bitmap does not intercept the MSR, then we need to
727 * save it.
728 *
729 * For nested case:
730 * If the L02 MSR bitmap does not intercept the MSR, then we need to
731 * save it.
732 */
733 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
734 to_svm(vcpu)->msrpm;
735
736 offset = svm_msrpm_offset(msr);
737 bit_write = 2 * (msr & 0x0f) + 1;
738 tmp = msrpm[offset];
739
740 BUG_ON(offset == MSR_INVALID);
741
742 return test_bit(bit_write, &tmp);
743}
744
745static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
746 u32 msr, int read, int write)
747{
748 struct vcpu_svm *svm = to_svm(vcpu);
749 u8 bit_read, bit_write;
750 unsigned long tmp;
751 u32 offset;
752
753 /*
754 * If this warning triggers extend the direct_access_msrs list at the
755 * beginning of the file
756 */
757 WARN_ON(!valid_msr_intercept(msr));
758
759 /* Enforce non allowed MSRs to trap */
760 if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
761 read = 0;
762
763 if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
764 write = 0;
765
766 offset = svm_msrpm_offset(msr);
767 bit_read = 2 * (msr & 0x0f);
768 bit_write = 2 * (msr & 0x0f) + 1;
769 tmp = msrpm[offset];
770
771 BUG_ON(offset == MSR_INVALID);
772
773 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
774 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
775
776 msrpm[offset] = tmp;
777
778 svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
779 svm->nested.force_msr_bitmap_recalc = true;
780}
781
782void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
783 int read, int write)
784{
785 set_shadow_msr_intercept(vcpu, msr, read, write);
786 set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
787}
788
789u32 *svm_vcpu_alloc_msrpm(void)
790{
791 unsigned int order = get_order(MSRPM_SIZE);
792 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
793 u32 *msrpm;
794
795 if (!pages)
796 return NULL;
797
798 msrpm = page_address(pages);
799 memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
800
801 return msrpm;
802}
803
804void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
805{
806 int i;
807
808 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
809 if (!direct_access_msrs[i].always)
810 continue;
811 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
812 }
813}
814
815void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
816{
817 int i;
818
819 if (intercept == svm->x2avic_msrs_intercepted)
820 return;
821
822 if (!x2avic_enabled ||
823 !apic_x2apic_mode(svm->vcpu.arch.apic))
824 return;
825
826 for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
827 int index = direct_access_msrs[i].index;
828
829 if ((index < APIC_BASE_MSR) ||
830 (index > APIC_BASE_MSR + 0xff))
831 continue;
832 set_msr_interception(&svm->vcpu, svm->msrpm, index,
833 !intercept, !intercept);
834 }
835
836 svm->x2avic_msrs_intercepted = intercept;
837}
838
839void svm_vcpu_free_msrpm(u32 *msrpm)
840{
841 __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
842}
843
844static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
845{
846 struct vcpu_svm *svm = to_svm(vcpu);
847 u32 i;
848
849 /*
850 * Set intercept permissions for all direct access MSRs again. They
851 * will automatically get filtered through the MSR filter, so we are
852 * back in sync after this.
853 */
854 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
855 u32 msr = direct_access_msrs[i].index;
856 u32 read = test_bit(i, svm->shadow_msr_intercept.read);
857 u32 write = test_bit(i, svm->shadow_msr_intercept.write);
858
859 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
860 }
861}
862
863static void add_msr_offset(u32 offset)
864{
865 int i;
866
867 for (i = 0; i < MSRPM_OFFSETS; ++i) {
868
869 /* Offset already in list? */
870 if (msrpm_offsets[i] == offset)
871 return;
872
873 /* Slot used by another offset? */
874 if (msrpm_offsets[i] != MSR_INVALID)
875 continue;
876
877 /* Add offset to list */
878 msrpm_offsets[i] = offset;
879
880 return;
881 }
882
883 /*
884 * If this BUG triggers the msrpm_offsets table has an overflow. Just
885 * increase MSRPM_OFFSETS in this case.
886 */
887 BUG();
888}
889
890static void init_msrpm_offsets(void)
891{
892 int i;
893
894 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
895
896 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
897 u32 offset;
898
899 offset = svm_msrpm_offset(direct_access_msrs[i].index);
900 BUG_ON(offset == MSR_INVALID);
901
902 add_msr_offset(offset);
903 }
904}
905
906void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
907{
908 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl;
909 to_vmcb->save.br_from = from_vmcb->save.br_from;
910 to_vmcb->save.br_to = from_vmcb->save.br_to;
911 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from;
912 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to;
913
914 vmcb_mark_dirty(to_vmcb, VMCB_LBR);
915}
916
917static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
918{
919 struct vcpu_svm *svm = to_svm(vcpu);
920
921 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
922 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
923 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
924 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
925 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
926
927 /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
928 if (is_guest_mode(vcpu))
929 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
930}
931
932static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
933{
934 struct vcpu_svm *svm = to_svm(vcpu);
935
936 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
937 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
938 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
939 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
940 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
941
942 /*
943 * Move the LBR msrs back to the vmcb01 to avoid copying them
944 * on nested guest entries.
945 */
946 if (is_guest_mode(vcpu))
947 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
948}
949
950static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index)
951{
952 /*
953 * If the LBR virtualization is disabled, the LBR msrs are always
954 * kept in the vmcb01 to avoid copying them on nested guest entries.
955 *
956 * If nested, and the LBR virtualization is enabled/disabled, the msrs
957 * are moved between the vmcb01 and vmcb02 as needed.
958 */
959 struct vmcb *vmcb =
960 (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ?
961 svm->vmcb : svm->vmcb01.ptr;
962
963 switch (index) {
964 case MSR_IA32_DEBUGCTLMSR:
965 return vmcb->save.dbgctl;
966 case MSR_IA32_LASTBRANCHFROMIP:
967 return vmcb->save.br_from;
968 case MSR_IA32_LASTBRANCHTOIP:
969 return vmcb->save.br_to;
970 case MSR_IA32_LASTINTFROMIP:
971 return vmcb->save.last_excp_from;
972 case MSR_IA32_LASTINTTOIP:
973 return vmcb->save.last_excp_to;
974 default:
975 KVM_BUG(false, svm->vcpu.kvm,
976 "%s: Unknown MSR 0x%x", __func__, index);
977 return 0;
978 }
979}
980
981void svm_update_lbrv(struct kvm_vcpu *vcpu)
982{
983 struct vcpu_svm *svm = to_svm(vcpu);
984
985 bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) &
986 DEBUGCTLMSR_LBR;
987
988 bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
989 LBR_CTL_ENABLE_MASK);
990
991 if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
992 if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
993 enable_lbrv = true;
994
995 if (enable_lbrv == current_enable_lbrv)
996 return;
997
998 if (enable_lbrv)
999 svm_enable_lbrv(vcpu);
1000 else
1001 svm_disable_lbrv(vcpu);
1002}
1003
1004void disable_nmi_singlestep(struct vcpu_svm *svm)
1005{
1006 svm->nmi_singlestep = false;
1007
1008 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1009 /* Clear our flags if they were not set by the guest */
1010 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1011 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1012 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1013 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1014 }
1015}
1016
1017static void grow_ple_window(struct kvm_vcpu *vcpu)
1018{
1019 struct vcpu_svm *svm = to_svm(vcpu);
1020 struct vmcb_control_area *control = &svm->vmcb->control;
1021 int old = control->pause_filter_count;
1022
1023 if (kvm_pause_in_guest(vcpu->kvm))
1024 return;
1025
1026 control->pause_filter_count = __grow_ple_window(old,
1027 pause_filter_count,
1028 pause_filter_count_grow,
1029 pause_filter_count_max);
1030
1031 if (control->pause_filter_count != old) {
1032 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1033 trace_kvm_ple_window_update(vcpu->vcpu_id,
1034 control->pause_filter_count, old);
1035 }
1036}
1037
1038static void shrink_ple_window(struct kvm_vcpu *vcpu)
1039{
1040 struct vcpu_svm *svm = to_svm(vcpu);
1041 struct vmcb_control_area *control = &svm->vmcb->control;
1042 int old = control->pause_filter_count;
1043
1044 if (kvm_pause_in_guest(vcpu->kvm))
1045 return;
1046
1047 control->pause_filter_count =
1048 __shrink_ple_window(old,
1049 pause_filter_count,
1050 pause_filter_count_shrink,
1051 pause_filter_count);
1052 if (control->pause_filter_count != old) {
1053 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1054 trace_kvm_ple_window_update(vcpu->vcpu_id,
1055 control->pause_filter_count, old);
1056 }
1057}
1058
1059static void svm_hardware_unsetup(void)
1060{
1061 int cpu;
1062
1063 sev_hardware_unsetup();
1064
1065 for_each_possible_cpu(cpu)
1066 svm_cpu_uninit(cpu);
1067
1068 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
1069 get_order(IOPM_SIZE));
1070 iopm_base = 0;
1071}
1072
1073static void init_seg(struct vmcb_seg *seg)
1074{
1075 seg->selector = 0;
1076 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1077 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1078 seg->limit = 0xffff;
1079 seg->base = 0;
1080}
1081
1082static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1083{
1084 seg->selector = 0;
1085 seg->attrib = SVM_SELECTOR_P_MASK | type;
1086 seg->limit = 0xffff;
1087 seg->base = 0;
1088}
1089
1090static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1091{
1092 struct vcpu_svm *svm = to_svm(vcpu);
1093
1094 return svm->nested.ctl.tsc_offset;
1095}
1096
1097static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1098{
1099 struct vcpu_svm *svm = to_svm(vcpu);
1100
1101 return svm->tsc_ratio_msr;
1102}
1103
1104static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1105{
1106 struct vcpu_svm *svm = to_svm(vcpu);
1107
1108 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1109 svm->vmcb->control.tsc_offset = offset;
1110 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1111}
1112
1113static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
1114{
1115 __svm_write_tsc_multiplier(multiplier);
1116}
1117
1118
1119/* Evaluate instruction intercepts that depend on guest CPUID features. */
1120static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
1121 struct vcpu_svm *svm)
1122{
1123 /*
1124 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1125 * roots, or if INVPCID is disabled in the guest to inject #UD.
1126 */
1127 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1128 if (!npt_enabled ||
1129 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
1130 svm_set_intercept(svm, INTERCEPT_INVPCID);
1131 else
1132 svm_clr_intercept(svm, INTERCEPT_INVPCID);
1133 }
1134
1135 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1136 if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1137 svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1138 else
1139 svm_set_intercept(svm, INTERCEPT_RDTSCP);
1140 }
1141}
1142
1143static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
1144{
1145 struct vcpu_svm *svm = to_svm(vcpu);
1146
1147 if (guest_cpuid_is_intel(vcpu)) {
1148 /*
1149 * We must intercept SYSENTER_EIP and SYSENTER_ESP
1150 * accesses because the processor only stores 32 bits.
1151 * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1152 */
1153 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1154 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1155 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1156
1157 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
1158 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
1159
1160 svm->v_vmload_vmsave_enabled = false;
1161 } else {
1162 /*
1163 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1164 * in VMCB and clear intercepts to avoid #VMEXIT.
1165 */
1166 if (vls) {
1167 svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1168 svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1169 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1170 }
1171 /* No need to intercept these MSRs */
1172 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
1173 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
1174 }
1175}
1176
1177static void init_vmcb(struct kvm_vcpu *vcpu)
1178{
1179 struct vcpu_svm *svm = to_svm(vcpu);
1180 struct vmcb *vmcb = svm->vmcb01.ptr;
1181 struct vmcb_control_area *control = &vmcb->control;
1182 struct vmcb_save_area *save = &vmcb->save;
1183
1184 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1185 svm_set_intercept(svm, INTERCEPT_CR3_READ);
1186 svm_set_intercept(svm, INTERCEPT_CR4_READ);
1187 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1188 svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1189 svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1190 if (!kvm_vcpu_apicv_active(vcpu))
1191 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1192
1193 set_dr_intercepts(svm);
1194
1195 set_exception_intercept(svm, PF_VECTOR);
1196 set_exception_intercept(svm, UD_VECTOR);
1197 set_exception_intercept(svm, MC_VECTOR);
1198 set_exception_intercept(svm, AC_VECTOR);
1199 set_exception_intercept(svm, DB_VECTOR);
1200 /*
1201 * Guest access to VMware backdoor ports could legitimately
1202 * trigger #GP because of TSS I/O permission bitmap.
1203 * We intercept those #GP and allow access to them anyway
1204 * as VMware does. Don't intercept #GP for SEV guests as KVM can't
1205 * decrypt guest memory to decode the faulting instruction.
1206 */
1207 if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
1208 set_exception_intercept(svm, GP_VECTOR);
1209
1210 svm_set_intercept(svm, INTERCEPT_INTR);
1211 svm_set_intercept(svm, INTERCEPT_NMI);
1212
1213 if (intercept_smi)
1214 svm_set_intercept(svm, INTERCEPT_SMI);
1215
1216 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1217 svm_set_intercept(svm, INTERCEPT_RDPMC);
1218 svm_set_intercept(svm, INTERCEPT_CPUID);
1219 svm_set_intercept(svm, INTERCEPT_INVD);
1220 svm_set_intercept(svm, INTERCEPT_INVLPG);
1221 svm_set_intercept(svm, INTERCEPT_INVLPGA);
1222 svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1223 svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1224 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1225 svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1226 svm_set_intercept(svm, INTERCEPT_VMRUN);
1227 svm_set_intercept(svm, INTERCEPT_VMMCALL);
1228 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1229 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1230 svm_set_intercept(svm, INTERCEPT_STGI);
1231 svm_set_intercept(svm, INTERCEPT_CLGI);
1232 svm_set_intercept(svm, INTERCEPT_SKINIT);
1233 svm_set_intercept(svm, INTERCEPT_WBINVD);
1234 svm_set_intercept(svm, INTERCEPT_XSETBV);
1235 svm_set_intercept(svm, INTERCEPT_RDPRU);
1236 svm_set_intercept(svm, INTERCEPT_RSM);
1237
1238 if (!kvm_mwait_in_guest(vcpu->kvm)) {
1239 svm_set_intercept(svm, INTERCEPT_MONITOR);
1240 svm_set_intercept(svm, INTERCEPT_MWAIT);
1241 }
1242
1243 if (!kvm_hlt_in_guest(vcpu->kvm))
1244 svm_set_intercept(svm, INTERCEPT_HLT);
1245
1246 control->iopm_base_pa = __sme_set(iopm_base);
1247 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1248 control->int_ctl = V_INTR_MASKING_MASK;
1249
1250 init_seg(&save->es);
1251 init_seg(&save->ss);
1252 init_seg(&save->ds);
1253 init_seg(&save->fs);
1254 init_seg(&save->gs);
1255
1256 save->cs.selector = 0xf000;
1257 save->cs.base = 0xffff0000;
1258 /* Executable/Readable Code Segment */
1259 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1260 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1261 save->cs.limit = 0xffff;
1262
1263 save->gdtr.base = 0;
1264 save->gdtr.limit = 0xffff;
1265 save->idtr.base = 0;
1266 save->idtr.limit = 0xffff;
1267
1268 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1269 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1270
1271 if (npt_enabled) {
1272 /* Setup VMCB for Nested Paging */
1273 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1274 svm_clr_intercept(svm, INTERCEPT_INVLPG);
1275 clr_exception_intercept(svm, PF_VECTOR);
1276 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1277 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1278 save->g_pat = vcpu->arch.pat;
1279 save->cr3 = 0;
1280 }
1281 svm->current_vmcb->asid_generation = 0;
1282 svm->asid = 0;
1283
1284 svm->nested.vmcb12_gpa = INVALID_GPA;
1285 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1286
1287 if (!kvm_pause_in_guest(vcpu->kvm)) {
1288 control->pause_filter_count = pause_filter_count;
1289 if (pause_filter_thresh)
1290 control->pause_filter_thresh = pause_filter_thresh;
1291 svm_set_intercept(svm, INTERCEPT_PAUSE);
1292 } else {
1293 svm_clr_intercept(svm, INTERCEPT_PAUSE);
1294 }
1295
1296 svm_recalc_instruction_intercepts(vcpu, svm);
1297
1298 /*
1299 * If the host supports V_SPEC_CTRL then disable the interception
1300 * of MSR_IA32_SPEC_CTRL.
1301 */
1302 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1303 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
1304
1305 if (kvm_vcpu_apicv_active(vcpu))
1306 avic_init_vmcb(svm, vmcb);
1307
1308 if (vnmi)
1309 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1310
1311 if (vgif) {
1312 svm_clr_intercept(svm, INTERCEPT_STGI);
1313 svm_clr_intercept(svm, INTERCEPT_CLGI);
1314 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1315 }
1316
1317 if (sev_guest(vcpu->kvm))
1318 sev_init_vmcb(svm);
1319
1320 svm_hv_init_vmcb(vmcb);
1321 init_vmcb_after_set_cpuid(vcpu);
1322
1323 vmcb_mark_all_dirty(vmcb);
1324
1325 enable_gif(svm);
1326}
1327
1328static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1329{
1330 struct vcpu_svm *svm = to_svm(vcpu);
1331
1332 svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1333
1334 svm_init_osvw(vcpu);
1335 vcpu->arch.microcode_version = 0x01000065;
1336 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1337
1338 svm->nmi_masked = false;
1339 svm->awaiting_iret_completion = false;
1340
1341 if (sev_es_guest(vcpu->kvm))
1342 sev_es_vcpu_reset(svm);
1343}
1344
1345static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1346{
1347 struct vcpu_svm *svm = to_svm(vcpu);
1348
1349 svm->spec_ctrl = 0;
1350 svm->virt_spec_ctrl = 0;
1351
1352 init_vmcb(vcpu);
1353
1354 if (!init_event)
1355 __svm_vcpu_reset(vcpu);
1356}
1357
1358void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1359{
1360 svm->current_vmcb = target_vmcb;
1361 svm->vmcb = target_vmcb->ptr;
1362}
1363
1364static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1365{
1366 struct vcpu_svm *svm;
1367 struct page *vmcb01_page;
1368 struct page *vmsa_page = NULL;
1369 int err;
1370
1371 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1372 svm = to_svm(vcpu);
1373
1374 err = -ENOMEM;
1375 vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1376 if (!vmcb01_page)
1377 goto out;
1378
1379 if (sev_es_guest(vcpu->kvm)) {
1380 /*
1381 * SEV-ES guests require a separate VMSA page used to contain
1382 * the encrypted register state of the guest.
1383 */
1384 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1385 if (!vmsa_page)
1386 goto error_free_vmcb_page;
1387
1388 /*
1389 * SEV-ES guests maintain an encrypted version of their FPU
1390 * state which is restored and saved on VMRUN and VMEXIT.
1391 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1392 * do xsave/xrstor on it.
1393 */
1394 fpstate_set_confidential(&vcpu->arch.guest_fpu);
1395 }
1396
1397 err = avic_init_vcpu(svm);
1398 if (err)
1399 goto error_free_vmsa_page;
1400
1401 svm->msrpm = svm_vcpu_alloc_msrpm();
1402 if (!svm->msrpm) {
1403 err = -ENOMEM;
1404 goto error_free_vmsa_page;
1405 }
1406
1407 svm->x2avic_msrs_intercepted = true;
1408
1409 svm->vmcb01.ptr = page_address(vmcb01_page);
1410 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1411 svm_switch_vmcb(svm, &svm->vmcb01);
1412
1413 if (vmsa_page)
1414 svm->sev_es.vmsa = page_address(vmsa_page);
1415
1416 svm->guest_state_loaded = false;
1417
1418 return 0;
1419
1420error_free_vmsa_page:
1421 if (vmsa_page)
1422 __free_page(vmsa_page);
1423error_free_vmcb_page:
1424 __free_page(vmcb01_page);
1425out:
1426 return err;
1427}
1428
1429static void svm_clear_current_vmcb(struct vmcb *vmcb)
1430{
1431 int i;
1432
1433 for_each_online_cpu(i)
1434 cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
1435}
1436
1437static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1438{
1439 struct vcpu_svm *svm = to_svm(vcpu);
1440
1441 /*
1442 * The vmcb page can be recycled, causing a false negative in
1443 * svm_vcpu_load(). So, ensure that no logical CPU has this
1444 * vmcb page recorded as its current vmcb.
1445 */
1446 svm_clear_current_vmcb(svm->vmcb);
1447
1448 svm_leave_nested(vcpu);
1449 svm_free_nested(svm);
1450
1451 sev_free_vcpu(vcpu);
1452
1453 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1454 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
1455}
1456
1457static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1458{
1459 struct vcpu_svm *svm = to_svm(vcpu);
1460 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1461
1462 if (sev_es_guest(vcpu->kvm))
1463 sev_es_unmap_ghcb(svm);
1464
1465 if (svm->guest_state_loaded)
1466 return;
1467
1468 /*
1469 * Save additional host state that will be restored on VMEXIT (sev-es)
1470 * or subsequent vmload of host save area.
1471 */
1472 vmsave(sd->save_area_pa);
1473 if (sev_es_guest(vcpu->kvm)) {
1474 struct sev_es_save_area *hostsa;
1475 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1476
1477 sev_es_prepare_switch_to_guest(hostsa);
1478 }
1479
1480 if (tsc_scaling)
1481 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1482
1483 if (likely(tsc_aux_uret_slot >= 0))
1484 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1485
1486 svm->guest_state_loaded = true;
1487}
1488
1489static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1490{
1491 to_svm(vcpu)->guest_state_loaded = false;
1492}
1493
1494static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1495{
1496 struct vcpu_svm *svm = to_svm(vcpu);
1497 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
1498
1499 if (sd->current_vmcb != svm->vmcb) {
1500 sd->current_vmcb = svm->vmcb;
1501
1502 if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
1503 indirect_branch_prediction_barrier();
1504 }
1505 if (kvm_vcpu_apicv_active(vcpu))
1506 avic_vcpu_load(vcpu, cpu);
1507}
1508
1509static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1510{
1511 if (kvm_vcpu_apicv_active(vcpu))
1512 avic_vcpu_put(vcpu);
1513
1514 svm_prepare_host_switch(vcpu);
1515
1516 ++vcpu->stat.host_state_reload;
1517}
1518
1519static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1520{
1521 struct vcpu_svm *svm = to_svm(vcpu);
1522 unsigned long rflags = svm->vmcb->save.rflags;
1523
1524 if (svm->nmi_singlestep) {
1525 /* Hide our flags if they were not set by the guest */
1526 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1527 rflags &= ~X86_EFLAGS_TF;
1528 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1529 rflags &= ~X86_EFLAGS_RF;
1530 }
1531 return rflags;
1532}
1533
1534static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1535{
1536 if (to_svm(vcpu)->nmi_singlestep)
1537 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1538
1539 /*
1540 * Any change of EFLAGS.VM is accompanied by a reload of SS
1541 * (caused by either a task switch or an inter-privilege IRET),
1542 * so we do not need to update the CPL here.
1543 */
1544 to_svm(vcpu)->vmcb->save.rflags = rflags;
1545}
1546
1547static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1548{
1549 struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1550
1551 return sev_es_guest(vcpu->kvm)
1552 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1553 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1554}
1555
1556static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1557{
1558 kvm_register_mark_available(vcpu, reg);
1559
1560 switch (reg) {
1561 case VCPU_EXREG_PDPTR:
1562 /*
1563 * When !npt_enabled, mmu->pdptrs[] is already available since
1564 * it is always updated per SDM when moving to CRs.
1565 */
1566 if (npt_enabled)
1567 load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1568 break;
1569 default:
1570 KVM_BUG_ON(1, vcpu->kvm);
1571 }
1572}
1573
1574static void svm_set_vintr(struct vcpu_svm *svm)
1575{
1576 struct vmcb_control_area *control;
1577
1578 /*
1579 * The following fields are ignored when AVIC is enabled
1580 */
1581 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1582
1583 svm_set_intercept(svm, INTERCEPT_VINTR);
1584
1585 /*
1586 * Recalculating intercepts may have cleared the VINTR intercept. If
1587 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1588 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1589 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1590 * interrupts will never be unblocked while L2 is running.
1591 */
1592 if (!svm_is_intercept(svm, INTERCEPT_VINTR))
1593 return;
1594
1595 /*
1596 * This is just a dummy VINTR to actually cause a vmexit to happen.
1597 * Actual injection of virtual interrupts happens through EVENTINJ.
1598 */
1599 control = &svm->vmcb->control;
1600 control->int_vector = 0x0;
1601 control->int_ctl &= ~V_INTR_PRIO_MASK;
1602 control->int_ctl |= V_IRQ_MASK |
1603 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1604 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1605}
1606
1607static void svm_clear_vintr(struct vcpu_svm *svm)
1608{
1609 svm_clr_intercept(svm, INTERCEPT_VINTR);
1610
1611 /* Drop int_ctl fields related to VINTR injection. */
1612 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1613 if (is_guest_mode(&svm->vcpu)) {
1614 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1615
1616 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1617 (svm->nested.ctl.int_ctl & V_TPR_MASK));
1618
1619 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1620 V_IRQ_INJECTION_BITS_MASK;
1621
1622 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1623 }
1624
1625 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1626}
1627
1628static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1629{
1630 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1631 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1632
1633 switch (seg) {
1634 case VCPU_SREG_CS: return &save->cs;
1635 case VCPU_SREG_DS: return &save->ds;
1636 case VCPU_SREG_ES: return &save->es;
1637 case VCPU_SREG_FS: return &save01->fs;
1638 case VCPU_SREG_GS: return &save01->gs;
1639 case VCPU_SREG_SS: return &save->ss;
1640 case VCPU_SREG_TR: return &save01->tr;
1641 case VCPU_SREG_LDTR: return &save01->ldtr;
1642 }
1643 BUG();
1644 return NULL;
1645}
1646
1647static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1648{
1649 struct vmcb_seg *s = svm_seg(vcpu, seg);
1650
1651 return s->base;
1652}
1653
1654static void svm_get_segment(struct kvm_vcpu *vcpu,
1655 struct kvm_segment *var, int seg)
1656{
1657 struct vmcb_seg *s = svm_seg(vcpu, seg);
1658
1659 var->base = s->base;
1660 var->limit = s->limit;
1661 var->selector = s->selector;
1662 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1663 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1664 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1665 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1666 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1667 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1668 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1669
1670 /*
1671 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1672 * However, the SVM spec states that the G bit is not observed by the
1673 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1674 * So let's synthesize a legal G bit for all segments, this helps
1675 * running KVM nested. It also helps cross-vendor migration, because
1676 * Intel's vmentry has a check on the 'G' bit.
1677 */
1678 var->g = s->limit > 0xfffff;
1679
1680 /*
1681 * AMD's VMCB does not have an explicit unusable field, so emulate it
1682 * for cross vendor migration purposes by "not present"
1683 */
1684 var->unusable = !var->present;
1685
1686 switch (seg) {
1687 case VCPU_SREG_TR:
1688 /*
1689 * Work around a bug where the busy flag in the tr selector
1690 * isn't exposed
1691 */
1692 var->type |= 0x2;
1693 break;
1694 case VCPU_SREG_DS:
1695 case VCPU_SREG_ES:
1696 case VCPU_SREG_FS:
1697 case VCPU_SREG_GS:
1698 /*
1699 * The accessed bit must always be set in the segment
1700 * descriptor cache, although it can be cleared in the
1701 * descriptor, the cached bit always remains at 1. Since
1702 * Intel has a check on this, set it here to support
1703 * cross-vendor migration.
1704 */
1705 if (!var->unusable)
1706 var->type |= 0x1;
1707 break;
1708 case VCPU_SREG_SS:
1709 /*
1710 * On AMD CPUs sometimes the DB bit in the segment
1711 * descriptor is left as 1, although the whole segment has
1712 * been made unusable. Clear it here to pass an Intel VMX
1713 * entry check when cross vendor migrating.
1714 */
1715 if (var->unusable)
1716 var->db = 0;
1717 /* This is symmetric with svm_set_segment() */
1718 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1719 break;
1720 }
1721}
1722
1723static int svm_get_cpl(struct kvm_vcpu *vcpu)
1724{
1725 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1726
1727 return save->cpl;
1728}
1729
1730static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1731{
1732 struct kvm_segment cs;
1733
1734 svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1735 *db = cs.db;
1736 *l = cs.l;
1737}
1738
1739static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1740{
1741 struct vcpu_svm *svm = to_svm(vcpu);
1742
1743 dt->size = svm->vmcb->save.idtr.limit;
1744 dt->address = svm->vmcb->save.idtr.base;
1745}
1746
1747static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1748{
1749 struct vcpu_svm *svm = to_svm(vcpu);
1750
1751 svm->vmcb->save.idtr.limit = dt->size;
1752 svm->vmcb->save.idtr.base = dt->address ;
1753 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1754}
1755
1756static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1757{
1758 struct vcpu_svm *svm = to_svm(vcpu);
1759
1760 dt->size = svm->vmcb->save.gdtr.limit;
1761 dt->address = svm->vmcb->save.gdtr.base;
1762}
1763
1764static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1765{
1766 struct vcpu_svm *svm = to_svm(vcpu);
1767
1768 svm->vmcb->save.gdtr.limit = dt->size;
1769 svm->vmcb->save.gdtr.base = dt->address ;
1770 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1771}
1772
1773static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1774{
1775 struct vcpu_svm *svm = to_svm(vcpu);
1776
1777 /*
1778 * For guests that don't set guest_state_protected, the cr3 update is
1779 * handled via kvm_mmu_load() while entering the guest. For guests
1780 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1781 * VMCB save area now, since the save area will become the initial
1782 * contents of the VMSA, and future VMCB save area updates won't be
1783 * seen.
1784 */
1785 if (sev_es_guest(vcpu->kvm)) {
1786 svm->vmcb->save.cr3 = cr3;
1787 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1788 }
1789}
1790
1791static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1792{
1793 return true;
1794}
1795
1796void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1797{
1798 struct vcpu_svm *svm = to_svm(vcpu);
1799 u64 hcr0 = cr0;
1800 bool old_paging = is_paging(vcpu);
1801
1802#ifdef CONFIG_X86_64
1803 if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
1804 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1805 vcpu->arch.efer |= EFER_LMA;
1806 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1807 }
1808
1809 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1810 vcpu->arch.efer &= ~EFER_LMA;
1811 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1812 }
1813 }
1814#endif
1815 vcpu->arch.cr0 = cr0;
1816
1817 if (!npt_enabled) {
1818 hcr0 |= X86_CR0_PG | X86_CR0_WP;
1819 if (old_paging != is_paging(vcpu))
1820 svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1821 }
1822
1823 /*
1824 * re-enable caching here because the QEMU bios
1825 * does not do it - this results in some delay at
1826 * reboot
1827 */
1828 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1829 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1830
1831 svm->vmcb->save.cr0 = hcr0;
1832 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1833
1834 /*
1835 * SEV-ES guests must always keep the CR intercepts cleared. CR
1836 * tracking is done using the CR write traps.
1837 */
1838 if (sev_es_guest(vcpu->kvm))
1839 return;
1840
1841 if (hcr0 == cr0) {
1842 /* Selective CR0 write remains on. */
1843 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1844 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1845 } else {
1846 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1847 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1848 }
1849}
1850
1851static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1852{
1853 return true;
1854}
1855
1856void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1857{
1858 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1859 unsigned long old_cr4 = vcpu->arch.cr4;
1860
1861 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1862 svm_flush_tlb_current(vcpu);
1863
1864 vcpu->arch.cr4 = cr4;
1865 if (!npt_enabled) {
1866 cr4 |= X86_CR4_PAE;
1867
1868 if (!is_paging(vcpu))
1869 cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1870 }
1871 cr4 |= host_cr4_mce;
1872 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1873 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1874
1875 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1876 kvm_update_cpuid_runtime(vcpu);
1877}
1878
1879static void svm_set_segment(struct kvm_vcpu *vcpu,
1880 struct kvm_segment *var, int seg)
1881{
1882 struct vcpu_svm *svm = to_svm(vcpu);
1883 struct vmcb_seg *s = svm_seg(vcpu, seg);
1884
1885 s->base = var->base;
1886 s->limit = var->limit;
1887 s->selector = var->selector;
1888 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1889 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1890 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1891 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1892 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1893 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1894 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1895 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1896
1897 /*
1898 * This is always accurate, except if SYSRET returned to a segment
1899 * with SS.DPL != 3. Intel does not have this quirk, and always
1900 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1901 * would entail passing the CPL to userspace and back.
1902 */
1903 if (seg == VCPU_SREG_SS)
1904 /* This is symmetric with svm_get_segment() */
1905 svm->vmcb->save.cpl = (var->dpl & 3);
1906
1907 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1908}
1909
1910static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1911{
1912 struct vcpu_svm *svm = to_svm(vcpu);
1913
1914 clr_exception_intercept(svm, BP_VECTOR);
1915
1916 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1917 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1918 set_exception_intercept(svm, BP_VECTOR);
1919 }
1920}
1921
1922static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1923{
1924 if (sd->next_asid > sd->max_asid) {
1925 ++sd->asid_generation;
1926 sd->next_asid = sd->min_asid;
1927 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1928 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1929 }
1930
1931 svm->current_vmcb->asid_generation = sd->asid_generation;
1932 svm->asid = sd->next_asid++;
1933}
1934
1935static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
1936{
1937 struct vmcb *vmcb = svm->vmcb;
1938
1939 if (svm->vcpu.arch.guest_state_protected)
1940 return;
1941
1942 if (unlikely(value != vmcb->save.dr6)) {
1943 vmcb->save.dr6 = value;
1944 vmcb_mark_dirty(vmcb, VMCB_DR);
1945 }
1946}
1947
1948static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1949{
1950 struct vcpu_svm *svm = to_svm(vcpu);
1951
1952 if (vcpu->arch.guest_state_protected)
1953 return;
1954
1955 get_debugreg(vcpu->arch.db[0], 0);
1956 get_debugreg(vcpu->arch.db[1], 1);
1957 get_debugreg(vcpu->arch.db[2], 2);
1958 get_debugreg(vcpu->arch.db[3], 3);
1959 /*
1960 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
1961 * because db_interception might need it. We can do it before vmentry.
1962 */
1963 vcpu->arch.dr6 = svm->vmcb->save.dr6;
1964 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1965 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1966 set_dr_intercepts(svm);
1967}
1968
1969static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1970{
1971 struct vcpu_svm *svm = to_svm(vcpu);
1972
1973 if (vcpu->arch.guest_state_protected)
1974 return;
1975
1976 svm->vmcb->save.dr7 = value;
1977 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1978}
1979
1980static int pf_interception(struct kvm_vcpu *vcpu)
1981{
1982 struct vcpu_svm *svm = to_svm(vcpu);
1983
1984 u64 fault_address = svm->vmcb->control.exit_info_2;
1985 u64 error_code = svm->vmcb->control.exit_info_1;
1986
1987 return kvm_handle_page_fault(vcpu, error_code, fault_address,
1988 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1989 svm->vmcb->control.insn_bytes : NULL,
1990 svm->vmcb->control.insn_len);
1991}
1992
1993static int npf_interception(struct kvm_vcpu *vcpu)
1994{
1995 struct vcpu_svm *svm = to_svm(vcpu);
1996
1997 u64 fault_address = svm->vmcb->control.exit_info_2;
1998 u64 error_code = svm->vmcb->control.exit_info_1;
1999
2000 trace_kvm_page_fault(vcpu, fault_address, error_code);
2001 return kvm_mmu_page_fault(vcpu, fault_address, error_code,
2002 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2003 svm->vmcb->control.insn_bytes : NULL,
2004 svm->vmcb->control.insn_len);
2005}
2006
2007static int db_interception(struct kvm_vcpu *vcpu)
2008{
2009 struct kvm_run *kvm_run = vcpu->run;
2010 struct vcpu_svm *svm = to_svm(vcpu);
2011
2012 if (!(vcpu->guest_debug &
2013 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2014 !svm->nmi_singlestep) {
2015 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2016 kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
2017 return 1;
2018 }
2019
2020 if (svm->nmi_singlestep) {
2021 disable_nmi_singlestep(svm);
2022 /* Make sure we check for pending NMIs upon entry */
2023 kvm_make_request(KVM_REQ_EVENT, vcpu);
2024 }
2025
2026 if (vcpu->guest_debug &
2027 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2028 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2029 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2030 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2031 kvm_run->debug.arch.pc =
2032 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2033 kvm_run->debug.arch.exception = DB_VECTOR;
2034 return 0;
2035 }
2036
2037 return 1;
2038}
2039
2040static int bp_interception(struct kvm_vcpu *vcpu)
2041{
2042 struct vcpu_svm *svm = to_svm(vcpu);
2043 struct kvm_run *kvm_run = vcpu->run;
2044
2045 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2046 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2047 kvm_run->debug.arch.exception = BP_VECTOR;
2048 return 0;
2049}
2050
2051static int ud_interception(struct kvm_vcpu *vcpu)
2052{
2053 return handle_ud(vcpu);
2054}
2055
2056static int ac_interception(struct kvm_vcpu *vcpu)
2057{
2058 kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
2059 return 1;
2060}
2061
2062static bool is_erratum_383(void)
2063{
2064 int err, i;
2065 u64 value;
2066
2067 if (!erratum_383_found)
2068 return false;
2069
2070 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2071 if (err)
2072 return false;
2073
2074 /* Bit 62 may or may not be set for this mce */
2075 value &= ~(1ULL << 62);
2076
2077 if (value != 0xb600000000010015ULL)
2078 return false;
2079
2080 /* Clear MCi_STATUS registers */
2081 for (i = 0; i < 6; ++i)
2082 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2083
2084 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2085 if (!err) {
2086 u32 low, high;
2087
2088 value &= ~(1ULL << 2);
2089 low = lower_32_bits(value);
2090 high = upper_32_bits(value);
2091
2092 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2093 }
2094
2095 /* Flush tlb to evict multi-match entries */
2096 __flush_tlb_all();
2097
2098 return true;
2099}
2100
2101static void svm_handle_mce(struct kvm_vcpu *vcpu)
2102{
2103 if (is_erratum_383()) {
2104 /*
2105 * Erratum 383 triggered. Guest state is corrupt so kill the
2106 * guest.
2107 */
2108 pr_err("Guest triggered AMD Erratum 383\n");
2109
2110 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2111
2112 return;
2113 }
2114
2115 /*
2116 * On an #MC intercept the MCE handler is not called automatically in
2117 * the host. So do it by hand here.
2118 */
2119 kvm_machine_check();
2120}
2121
2122static int mc_interception(struct kvm_vcpu *vcpu)
2123{
2124 return 1;
2125}
2126
2127static int shutdown_interception(struct kvm_vcpu *vcpu)
2128{
2129 struct kvm_run *kvm_run = vcpu->run;
2130 struct vcpu_svm *svm = to_svm(vcpu);
2131
2132 /*
2133 * The VM save area has already been encrypted so it
2134 * cannot be reinitialized - just terminate.
2135 */
2136 if (sev_es_guest(vcpu->kvm))
2137 return -EINVAL;
2138
2139 /*
2140 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
2141 * the VMCB in a known good state. Unfortuately, KVM doesn't have
2142 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2143 * userspace. At a platform view, INIT is acceptable behavior as
2144 * there exist bare metal platforms that automatically INIT the CPU
2145 * in response to shutdown.
2146 */
2147 clear_page(svm->vmcb);
2148 kvm_vcpu_reset(vcpu, true);
2149
2150 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2151 return 0;
2152}
2153
2154static int io_interception(struct kvm_vcpu *vcpu)
2155{
2156 struct vcpu_svm *svm = to_svm(vcpu);
2157 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2158 int size, in, string;
2159 unsigned port;
2160
2161 ++vcpu->stat.io_exits;
2162 string = (io_info & SVM_IOIO_STR_MASK) != 0;
2163 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2164 port = io_info >> 16;
2165 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2166
2167 if (string) {
2168 if (sev_es_guest(vcpu->kvm))
2169 return sev_es_string_io(svm, size, port, in);
2170 else
2171 return kvm_emulate_instruction(vcpu, 0);
2172 }
2173
2174 svm->next_rip = svm->vmcb->control.exit_info_2;
2175
2176 return kvm_fast_pio(vcpu, size, port, in);
2177}
2178
2179static int nmi_interception(struct kvm_vcpu *vcpu)
2180{
2181 return 1;
2182}
2183
2184static int smi_interception(struct kvm_vcpu *vcpu)
2185{
2186 return 1;
2187}
2188
2189static int intr_interception(struct kvm_vcpu *vcpu)
2190{
2191 ++vcpu->stat.irq_exits;
2192 return 1;
2193}
2194
2195static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2196{
2197 struct vcpu_svm *svm = to_svm(vcpu);
2198 struct vmcb *vmcb12;
2199 struct kvm_host_map map;
2200 int ret;
2201
2202 if (nested_svm_check_permissions(vcpu))
2203 return 1;
2204
2205 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2206 if (ret) {
2207 if (ret == -EINVAL)
2208 kvm_inject_gp(vcpu, 0);
2209 return 1;
2210 }
2211
2212 vmcb12 = map.hva;
2213
2214 ret = kvm_skip_emulated_instruction(vcpu);
2215
2216 if (vmload) {
2217 svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
2218 svm->sysenter_eip_hi = 0;
2219 svm->sysenter_esp_hi = 0;
2220 } else {
2221 svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
2222 }
2223
2224 kvm_vcpu_unmap(vcpu, &map, true);
2225
2226 return ret;
2227}
2228
2229static int vmload_interception(struct kvm_vcpu *vcpu)
2230{
2231 return vmload_vmsave_interception(vcpu, true);
2232}
2233
2234static int vmsave_interception(struct kvm_vcpu *vcpu)
2235{
2236 return vmload_vmsave_interception(vcpu, false);
2237}
2238
2239static int vmrun_interception(struct kvm_vcpu *vcpu)
2240{
2241 if (nested_svm_check_permissions(vcpu))
2242 return 1;
2243
2244 return nested_svm_vmrun(vcpu);
2245}
2246
2247enum {
2248 NONE_SVM_INSTR,
2249 SVM_INSTR_VMRUN,
2250 SVM_INSTR_VMLOAD,
2251 SVM_INSTR_VMSAVE,
2252};
2253
2254/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2255static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2256{
2257 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2258
2259 if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2260 return NONE_SVM_INSTR;
2261
2262 switch (ctxt->modrm) {
2263 case 0xd8: /* VMRUN */
2264 return SVM_INSTR_VMRUN;
2265 case 0xda: /* VMLOAD */
2266 return SVM_INSTR_VMLOAD;
2267 case 0xdb: /* VMSAVE */
2268 return SVM_INSTR_VMSAVE;
2269 default:
2270 break;
2271 }
2272
2273 return NONE_SVM_INSTR;
2274}
2275
2276static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2277{
2278 const int guest_mode_exit_codes[] = {
2279 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
2280 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
2281 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
2282 };
2283 int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
2284 [SVM_INSTR_VMRUN] = vmrun_interception,
2285 [SVM_INSTR_VMLOAD] = vmload_interception,
2286 [SVM_INSTR_VMSAVE] = vmsave_interception,
2287 };
2288 struct vcpu_svm *svm = to_svm(vcpu);
2289 int ret;
2290
2291 if (is_guest_mode(vcpu)) {
2292 /* Returns '1' or -errno on failure, '0' on success. */
2293 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
2294 if (ret)
2295 return ret;
2296 return 1;
2297 }
2298 return svm_instr_handlers[opcode](vcpu);
2299}
2300
2301/*
2302 * #GP handling code. Note that #GP can be triggered under the following two
2303 * cases:
2304 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2305 * some AMD CPUs when EAX of these instructions are in the reserved memory
2306 * regions (e.g. SMM memory on host).
2307 * 2) VMware backdoor
2308 */
2309static int gp_interception(struct kvm_vcpu *vcpu)
2310{
2311 struct vcpu_svm *svm = to_svm(vcpu);
2312 u32 error_code = svm->vmcb->control.exit_info_1;
2313 int opcode;
2314
2315 /* Both #GP cases have zero error_code */
2316 if (error_code)
2317 goto reinject;
2318
2319 /* Decode the instruction for usage later */
2320 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2321 goto reinject;
2322
2323 opcode = svm_instr_opcode(vcpu);
2324
2325 if (opcode == NONE_SVM_INSTR) {
2326 if (!enable_vmware_backdoor)
2327 goto reinject;
2328
2329 /*
2330 * VMware backdoor emulation on #GP interception only handles
2331 * IN{S}, OUT{S}, and RDPMC.
2332 */
2333 if (!is_guest_mode(vcpu))
2334 return kvm_emulate_instruction(vcpu,
2335 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2336 } else {
2337 /* All SVM instructions expect page aligned RAX */
2338 if (svm->vmcb->save.rax & ~PAGE_MASK)
2339 goto reinject;
2340
2341 return emulate_svm_instr(vcpu, opcode);
2342 }
2343
2344reinject:
2345 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2346 return 1;
2347}
2348
2349void svm_set_gif(struct vcpu_svm *svm, bool value)
2350{
2351 if (value) {
2352 /*
2353 * If VGIF is enabled, the STGI intercept is only added to
2354 * detect the opening of the SMI/NMI window; remove it now.
2355 * Likewise, clear the VINTR intercept, we will set it
2356 * again while processing KVM_REQ_EVENT if needed.
2357 */
2358 if (vgif)
2359 svm_clr_intercept(svm, INTERCEPT_STGI);
2360 if (svm_is_intercept(svm, INTERCEPT_VINTR))
2361 svm_clear_vintr(svm);
2362
2363 enable_gif(svm);
2364 if (svm->vcpu.arch.smi_pending ||
2365 svm->vcpu.arch.nmi_pending ||
2366 kvm_cpu_has_injectable_intr(&svm->vcpu) ||
2367 kvm_apic_has_pending_init_or_sipi(&svm->vcpu))
2368 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2369 } else {
2370 disable_gif(svm);
2371
2372 /*
2373 * After a CLGI no interrupts should come. But if vGIF is
2374 * in use, we still rely on the VINTR intercept (rather than
2375 * STGI) to detect an open interrupt window.
2376 */
2377 if (!vgif)
2378 svm_clear_vintr(svm);
2379 }
2380}
2381
2382static int stgi_interception(struct kvm_vcpu *vcpu)
2383{
2384 int ret;
2385
2386 if (nested_svm_check_permissions(vcpu))
2387 return 1;
2388
2389 ret = kvm_skip_emulated_instruction(vcpu);
2390 svm_set_gif(to_svm(vcpu), true);
2391 return ret;
2392}
2393
2394static int clgi_interception(struct kvm_vcpu *vcpu)
2395{
2396 int ret;
2397
2398 if (nested_svm_check_permissions(vcpu))
2399 return 1;
2400
2401 ret = kvm_skip_emulated_instruction(vcpu);
2402 svm_set_gif(to_svm(vcpu), false);
2403 return ret;
2404}
2405
2406static int invlpga_interception(struct kvm_vcpu *vcpu)
2407{
2408 gva_t gva = kvm_rax_read(vcpu);
2409 u32 asid = kvm_rcx_read(vcpu);
2410
2411 /* FIXME: Handle an address size prefix. */
2412 if (!is_long_mode(vcpu))
2413 gva = (u32)gva;
2414
2415 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2416
2417 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2418 kvm_mmu_invlpg(vcpu, gva);
2419
2420 return kvm_skip_emulated_instruction(vcpu);
2421}
2422
2423static int skinit_interception(struct kvm_vcpu *vcpu)
2424{
2425 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2426
2427 kvm_queue_exception(vcpu, UD_VECTOR);
2428 return 1;
2429}
2430
2431static int task_switch_interception(struct kvm_vcpu *vcpu)
2432{
2433 struct vcpu_svm *svm = to_svm(vcpu);
2434 u16 tss_selector;
2435 int reason;
2436 int int_type = svm->vmcb->control.exit_int_info &
2437 SVM_EXITINTINFO_TYPE_MASK;
2438 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2439 uint32_t type =
2440 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2441 uint32_t idt_v =
2442 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2443 bool has_error_code = false;
2444 u32 error_code = 0;
2445
2446 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2447
2448 if (svm->vmcb->control.exit_info_2 &
2449 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2450 reason = TASK_SWITCH_IRET;
2451 else if (svm->vmcb->control.exit_info_2 &
2452 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2453 reason = TASK_SWITCH_JMP;
2454 else if (idt_v)
2455 reason = TASK_SWITCH_GATE;
2456 else
2457 reason = TASK_SWITCH_CALL;
2458
2459 if (reason == TASK_SWITCH_GATE) {
2460 switch (type) {
2461 case SVM_EXITINTINFO_TYPE_NMI:
2462 vcpu->arch.nmi_injected = false;
2463 break;
2464 case SVM_EXITINTINFO_TYPE_EXEPT:
2465 if (svm->vmcb->control.exit_info_2 &
2466 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2467 has_error_code = true;
2468 error_code =
2469 (u32)svm->vmcb->control.exit_info_2;
2470 }
2471 kvm_clear_exception_queue(vcpu);
2472 break;
2473 case SVM_EXITINTINFO_TYPE_INTR:
2474 case SVM_EXITINTINFO_TYPE_SOFT:
2475 kvm_clear_interrupt_queue(vcpu);
2476 break;
2477 default:
2478 break;
2479 }
2480 }
2481
2482 if (reason != TASK_SWITCH_GATE ||
2483 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2484 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2485 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2486 if (!svm_skip_emulated_instruction(vcpu))
2487 return 0;
2488 }
2489
2490 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2491 int_vec = -1;
2492
2493 return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2494 has_error_code, error_code);
2495}
2496
2497static void svm_clr_iret_intercept(struct vcpu_svm *svm)
2498{
2499 if (!sev_es_guest(svm->vcpu.kvm))
2500 svm_clr_intercept(svm, INTERCEPT_IRET);
2501}
2502
2503static void svm_set_iret_intercept(struct vcpu_svm *svm)
2504{
2505 if (!sev_es_guest(svm->vcpu.kvm))
2506 svm_set_intercept(svm, INTERCEPT_IRET);
2507}
2508
2509static int iret_interception(struct kvm_vcpu *vcpu)
2510{
2511 struct vcpu_svm *svm = to_svm(vcpu);
2512
2513 ++vcpu->stat.nmi_window_exits;
2514 svm->awaiting_iret_completion = true;
2515
2516 svm_clr_iret_intercept(svm);
2517 if (!sev_es_guest(vcpu->kvm))
2518 svm->nmi_iret_rip = kvm_rip_read(vcpu);
2519
2520 kvm_make_request(KVM_REQ_EVENT, vcpu);
2521 return 1;
2522}
2523
2524static int invlpg_interception(struct kvm_vcpu *vcpu)
2525{
2526 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2527 return kvm_emulate_instruction(vcpu, 0);
2528
2529 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2530 return kvm_skip_emulated_instruction(vcpu);
2531}
2532
2533static int emulate_on_interception(struct kvm_vcpu *vcpu)
2534{
2535 return kvm_emulate_instruction(vcpu, 0);
2536}
2537
2538static int rsm_interception(struct kvm_vcpu *vcpu)
2539{
2540 return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2541}
2542
2543static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2544 unsigned long val)
2545{
2546 struct vcpu_svm *svm = to_svm(vcpu);
2547 unsigned long cr0 = vcpu->arch.cr0;
2548 bool ret = false;
2549
2550 if (!is_guest_mode(vcpu) ||
2551 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2552 return false;
2553
2554 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2555 val &= ~SVM_CR0_SELECTIVE_MASK;
2556
2557 if (cr0 ^ val) {
2558 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2559 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2560 }
2561
2562 return ret;
2563}
2564
2565#define CR_VALID (1ULL << 63)
2566
2567static int cr_interception(struct kvm_vcpu *vcpu)
2568{
2569 struct vcpu_svm *svm = to_svm(vcpu);
2570 int reg, cr;
2571 unsigned long val;
2572 int err;
2573
2574 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2575 return emulate_on_interception(vcpu);
2576
2577 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2578 return emulate_on_interception(vcpu);
2579
2580 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2581 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2582 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2583 else
2584 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2585
2586 err = 0;
2587 if (cr >= 16) { /* mov to cr */
2588 cr -= 16;
2589 val = kvm_register_read(vcpu, reg);
2590 trace_kvm_cr_write(cr, val);
2591 switch (cr) {
2592 case 0:
2593 if (!check_selective_cr0_intercepted(vcpu, val))
2594 err = kvm_set_cr0(vcpu, val);
2595 else
2596 return 1;
2597
2598 break;
2599 case 3:
2600 err = kvm_set_cr3(vcpu, val);
2601 break;
2602 case 4:
2603 err = kvm_set_cr4(vcpu, val);
2604 break;
2605 case 8:
2606 err = kvm_set_cr8(vcpu, val);
2607 break;
2608 default:
2609 WARN(1, "unhandled write to CR%d", cr);
2610 kvm_queue_exception(vcpu, UD_VECTOR);
2611 return 1;
2612 }
2613 } else { /* mov from cr */
2614 switch (cr) {
2615 case 0:
2616 val = kvm_read_cr0(vcpu);
2617 break;
2618 case 2:
2619 val = vcpu->arch.cr2;
2620 break;
2621 case 3:
2622 val = kvm_read_cr3(vcpu);
2623 break;
2624 case 4:
2625 val = kvm_read_cr4(vcpu);
2626 break;
2627 case 8:
2628 val = kvm_get_cr8(vcpu);
2629 break;
2630 default:
2631 WARN(1, "unhandled read from CR%d", cr);
2632 kvm_queue_exception(vcpu, UD_VECTOR);
2633 return 1;
2634 }
2635 kvm_register_write(vcpu, reg, val);
2636 trace_kvm_cr_read(cr, val);
2637 }
2638 return kvm_complete_insn_gp(vcpu, err);
2639}
2640
2641static int cr_trap(struct kvm_vcpu *vcpu)
2642{
2643 struct vcpu_svm *svm = to_svm(vcpu);
2644 unsigned long old_value, new_value;
2645 unsigned int cr;
2646 int ret = 0;
2647
2648 new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2649
2650 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2651 switch (cr) {
2652 case 0:
2653 old_value = kvm_read_cr0(vcpu);
2654 svm_set_cr0(vcpu, new_value);
2655
2656 kvm_post_set_cr0(vcpu, old_value, new_value);
2657 break;
2658 case 4:
2659 old_value = kvm_read_cr4(vcpu);
2660 svm_set_cr4(vcpu, new_value);
2661
2662 kvm_post_set_cr4(vcpu, old_value, new_value);
2663 break;
2664 case 8:
2665 ret = kvm_set_cr8(vcpu, new_value);
2666 break;
2667 default:
2668 WARN(1, "unhandled CR%d write trap", cr);
2669 kvm_queue_exception(vcpu, UD_VECTOR);
2670 return 1;
2671 }
2672
2673 return kvm_complete_insn_gp(vcpu, ret);
2674}
2675
2676static int dr_interception(struct kvm_vcpu *vcpu)
2677{
2678 struct vcpu_svm *svm = to_svm(vcpu);
2679 int reg, dr;
2680 unsigned long val;
2681 int err = 0;
2682
2683 if (vcpu->guest_debug == 0) {
2684 /*
2685 * No more DR vmexits; force a reload of the debug registers
2686 * and reenter on this instruction. The next vmexit will
2687 * retrieve the full state of the debug registers.
2688 */
2689 clr_dr_intercepts(svm);
2690 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2691 return 1;
2692 }
2693
2694 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2695 return emulate_on_interception(vcpu);
2696
2697 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2698 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2699 if (dr >= 16) { /* mov to DRn */
2700 dr -= 16;
2701 val = kvm_register_read(vcpu, reg);
2702 err = kvm_set_dr(vcpu, dr, val);
2703 } else {
2704 kvm_get_dr(vcpu, dr, &val);
2705 kvm_register_write(vcpu, reg, val);
2706 }
2707
2708 return kvm_complete_insn_gp(vcpu, err);
2709}
2710
2711static int cr8_write_interception(struct kvm_vcpu *vcpu)
2712{
2713 int r;
2714
2715 u8 cr8_prev = kvm_get_cr8(vcpu);
2716 /* instruction emulation calls kvm_set_cr8() */
2717 r = cr_interception(vcpu);
2718 if (lapic_in_kernel(vcpu))
2719 return r;
2720 if (cr8_prev <= kvm_get_cr8(vcpu))
2721 return r;
2722 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2723 return 0;
2724}
2725
2726static int efer_trap(struct kvm_vcpu *vcpu)
2727{
2728 struct msr_data msr_info;
2729 int ret;
2730
2731 /*
2732 * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2733 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2734 * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2735 * the guest doesn't have X86_FEATURE_SVM.
2736 */
2737 msr_info.host_initiated = false;
2738 msr_info.index = MSR_EFER;
2739 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2740 ret = kvm_set_msr_common(vcpu, &msr_info);
2741
2742 return kvm_complete_insn_gp(vcpu, ret);
2743}
2744
2745static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2746{
2747 msr->data = 0;
2748
2749 switch (msr->index) {
2750 case MSR_AMD64_DE_CFG:
2751 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2752 msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2753 break;
2754 default:
2755 return KVM_MSR_RET_INVALID;
2756 }
2757
2758 return 0;
2759}
2760
2761static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2762{
2763 struct vcpu_svm *svm = to_svm(vcpu);
2764
2765 switch (msr_info->index) {
2766 case MSR_AMD64_TSC_RATIO:
2767 if (!msr_info->host_initiated && !svm->tsc_scaling_enabled)
2768 return 1;
2769 msr_info->data = svm->tsc_ratio_msr;
2770 break;
2771 case MSR_STAR:
2772 msr_info->data = svm->vmcb01.ptr->save.star;
2773 break;
2774#ifdef CONFIG_X86_64
2775 case MSR_LSTAR:
2776 msr_info->data = svm->vmcb01.ptr->save.lstar;
2777 break;
2778 case MSR_CSTAR:
2779 msr_info->data = svm->vmcb01.ptr->save.cstar;
2780 break;
2781 case MSR_KERNEL_GS_BASE:
2782 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2783 break;
2784 case MSR_SYSCALL_MASK:
2785 msr_info->data = svm->vmcb01.ptr->save.sfmask;
2786 break;
2787#endif
2788 case MSR_IA32_SYSENTER_CS:
2789 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2790 break;
2791 case MSR_IA32_SYSENTER_EIP:
2792 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2793 if (guest_cpuid_is_intel(vcpu))
2794 msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2795 break;
2796 case MSR_IA32_SYSENTER_ESP:
2797 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2798 if (guest_cpuid_is_intel(vcpu))
2799 msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2800 break;
2801 case MSR_TSC_AUX:
2802 msr_info->data = svm->tsc_aux;
2803 break;
2804 case MSR_IA32_DEBUGCTLMSR:
2805 case MSR_IA32_LASTBRANCHFROMIP:
2806 case MSR_IA32_LASTBRANCHTOIP:
2807 case MSR_IA32_LASTINTFROMIP:
2808 case MSR_IA32_LASTINTTOIP:
2809 msr_info->data = svm_get_lbr_msr(svm, msr_info->index);
2810 break;
2811 case MSR_VM_HSAVE_PA:
2812 msr_info->data = svm->nested.hsave_msr;
2813 break;
2814 case MSR_VM_CR:
2815 msr_info->data = svm->nested.vm_cr_msr;
2816 break;
2817 case MSR_IA32_SPEC_CTRL:
2818 if (!msr_info->host_initiated &&
2819 !guest_has_spec_ctrl_msr(vcpu))
2820 return 1;
2821
2822 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2823 msr_info->data = svm->vmcb->save.spec_ctrl;
2824 else
2825 msr_info->data = svm->spec_ctrl;
2826 break;
2827 case MSR_AMD64_VIRT_SPEC_CTRL:
2828 if (!msr_info->host_initiated &&
2829 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2830 return 1;
2831
2832 msr_info->data = svm->virt_spec_ctrl;
2833 break;
2834 case MSR_F15H_IC_CFG: {
2835
2836 int family, model;
2837
2838 family = guest_cpuid_family(vcpu);
2839 model = guest_cpuid_model(vcpu);
2840
2841 if (family < 0 || model < 0)
2842 return kvm_get_msr_common(vcpu, msr_info);
2843
2844 msr_info->data = 0;
2845
2846 if (family == 0x15 &&
2847 (model >= 0x2 && model < 0x20))
2848 msr_info->data = 0x1E;
2849 }
2850 break;
2851 case MSR_AMD64_DE_CFG:
2852 msr_info->data = svm->msr_decfg;
2853 break;
2854 default:
2855 return kvm_get_msr_common(vcpu, msr_info);
2856 }
2857 return 0;
2858}
2859
2860static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2861{
2862 struct vcpu_svm *svm = to_svm(vcpu);
2863 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2864 return kvm_complete_insn_gp(vcpu, err);
2865
2866 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
2867 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
2868 X86_TRAP_GP |
2869 SVM_EVTINJ_TYPE_EXEPT |
2870 SVM_EVTINJ_VALID);
2871 return 1;
2872}
2873
2874static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2875{
2876 struct vcpu_svm *svm = to_svm(vcpu);
2877 int svm_dis, chg_mask;
2878
2879 if (data & ~SVM_VM_CR_VALID_MASK)
2880 return 1;
2881
2882 chg_mask = SVM_VM_CR_VALID_MASK;
2883
2884 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2885 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2886
2887 svm->nested.vm_cr_msr &= ~chg_mask;
2888 svm->nested.vm_cr_msr |= (data & chg_mask);
2889
2890 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2891
2892 /* check for svm_disable while efer.svme is set */
2893 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2894 return 1;
2895
2896 return 0;
2897}
2898
2899static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2900{
2901 struct vcpu_svm *svm = to_svm(vcpu);
2902 int ret = 0;
2903
2904 u32 ecx = msr->index;
2905 u64 data = msr->data;
2906 switch (ecx) {
2907 case MSR_AMD64_TSC_RATIO:
2908
2909 if (!svm->tsc_scaling_enabled) {
2910
2911 if (!msr->host_initiated)
2912 return 1;
2913 /*
2914 * In case TSC scaling is not enabled, always
2915 * leave this MSR at the default value.
2916 *
2917 * Due to bug in qemu 6.2.0, it would try to set
2918 * this msr to 0 if tsc scaling is not enabled.
2919 * Ignore this value as well.
2920 */
2921 if (data != 0 && data != svm->tsc_ratio_msr)
2922 return 1;
2923 break;
2924 }
2925
2926 if (data & SVM_TSC_RATIO_RSVD)
2927 return 1;
2928
2929 svm->tsc_ratio_msr = data;
2930
2931 if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
2932 nested_svm_update_tsc_ratio_msr(vcpu);
2933
2934 break;
2935 case MSR_IA32_CR_PAT:
2936 ret = kvm_set_msr_common(vcpu, msr);
2937 if (ret)
2938 break;
2939
2940 svm->vmcb01.ptr->save.g_pat = data;
2941 if (is_guest_mode(vcpu))
2942 nested_vmcb02_compute_g_pat(svm);
2943 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2944 break;
2945 case MSR_IA32_SPEC_CTRL:
2946 if (!msr->host_initiated &&
2947 !guest_has_spec_ctrl_msr(vcpu))
2948 return 1;
2949
2950 if (kvm_spec_ctrl_test_value(data))
2951 return 1;
2952
2953 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2954 svm->vmcb->save.spec_ctrl = data;
2955 else
2956 svm->spec_ctrl = data;
2957 if (!data)
2958 break;
2959
2960 /*
2961 * For non-nested:
2962 * When it's written (to non-zero) for the first time, pass
2963 * it through.
2964 *
2965 * For nested:
2966 * The handling of the MSR bitmap for L2 guests is done in
2967 * nested_svm_vmrun_msrpm.
2968 * We update the L1 MSR bit as well since it will end up
2969 * touching the MSR anyway now.
2970 */
2971 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2972 break;
2973 case MSR_AMD64_VIRT_SPEC_CTRL:
2974 if (!msr->host_initiated &&
2975 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2976 return 1;
2977
2978 if (data & ~SPEC_CTRL_SSBD)
2979 return 1;
2980
2981 svm->virt_spec_ctrl = data;
2982 break;
2983 case MSR_STAR:
2984 svm->vmcb01.ptr->save.star = data;
2985 break;
2986#ifdef CONFIG_X86_64
2987 case MSR_LSTAR:
2988 svm->vmcb01.ptr->save.lstar = data;
2989 break;
2990 case MSR_CSTAR:
2991 svm->vmcb01.ptr->save.cstar = data;
2992 break;
2993 case MSR_KERNEL_GS_BASE:
2994 svm->vmcb01.ptr->save.kernel_gs_base = data;
2995 break;
2996 case MSR_SYSCALL_MASK:
2997 svm->vmcb01.ptr->save.sfmask = data;
2998 break;
2999#endif
3000 case MSR_IA32_SYSENTER_CS:
3001 svm->vmcb01.ptr->save.sysenter_cs = data;
3002 break;
3003 case MSR_IA32_SYSENTER_EIP:
3004 svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
3005 /*
3006 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3007 * when we spoof an Intel vendor ID (for cross vendor migration).
3008 * In this case we use this intercept to track the high
3009 * 32 bit part of these msrs to support Intel's
3010 * implementation of SYSENTER/SYSEXIT.
3011 */
3012 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
3013 break;
3014 case MSR_IA32_SYSENTER_ESP:
3015 svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
3016 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
3017 break;
3018 case MSR_TSC_AUX:
3019 /*
3020 * TSC_AUX is usually changed only during boot and never read
3021 * directly. Intercept TSC_AUX instead of exposing it to the
3022 * guest via direct_access_msrs, and switch it via user return.
3023 */
3024 preempt_disable();
3025 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
3026 preempt_enable();
3027 if (ret)
3028 break;
3029
3030 svm->tsc_aux = data;
3031 break;
3032 case MSR_IA32_DEBUGCTLMSR:
3033 if (!lbrv) {
3034 kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3035 break;
3036 }
3037 if (data & DEBUGCTL_RESERVED_BITS)
3038 return 1;
3039
3040 if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)
3041 svm->vmcb->save.dbgctl = data;
3042 else
3043 svm->vmcb01.ptr->save.dbgctl = data;
3044
3045 svm_update_lbrv(vcpu);
3046
3047 break;
3048 case MSR_VM_HSAVE_PA:
3049 /*
3050 * Old kernels did not validate the value written to
3051 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
3052 * value to allow live migrating buggy or malicious guests
3053 * originating from those kernels.
3054 */
3055 if (!msr->host_initiated && !page_address_valid(vcpu, data))
3056 return 1;
3057
3058 svm->nested.hsave_msr = data & PAGE_MASK;
3059 break;
3060 case MSR_VM_CR:
3061 return svm_set_vm_cr(vcpu, data);
3062 case MSR_VM_IGNNE:
3063 kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3064 break;
3065 case MSR_AMD64_DE_CFG: {
3066 struct kvm_msr_entry msr_entry;
3067
3068 msr_entry.index = msr->index;
3069 if (svm_get_msr_feature(&msr_entry))
3070 return 1;
3071
3072 /* Check the supported bits */
3073 if (data & ~msr_entry.data)
3074 return 1;
3075
3076 /* Don't allow the guest to change a bit, #GP */
3077 if (!msr->host_initiated && (data ^ msr_entry.data))
3078 return 1;
3079
3080 svm->msr_decfg = data;
3081 break;
3082 }
3083 default:
3084 return kvm_set_msr_common(vcpu, msr);
3085 }
3086 return ret;
3087}
3088
3089static int msr_interception(struct kvm_vcpu *vcpu)
3090{
3091 if (to_svm(vcpu)->vmcb->control.exit_info_1)
3092 return kvm_emulate_wrmsr(vcpu);
3093 else
3094 return kvm_emulate_rdmsr(vcpu);
3095}
3096
3097static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3098{
3099 kvm_make_request(KVM_REQ_EVENT, vcpu);
3100 svm_clear_vintr(to_svm(vcpu));
3101
3102 /*
3103 * If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3104 * In this case AVIC was temporarily disabled for
3105 * requesting the IRQ window and we have to re-enable it.
3106 *
3107 * If running nested, still remove the VM wide AVIC inhibit to
3108 * support case in which the interrupt window was requested when the
3109 * vCPU was not running nested.
3110
3111 * All vCPUs which run still run nested, will remain to have their
3112 * AVIC still inhibited due to per-cpu AVIC inhibition.
3113 */
3114 kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3115
3116 ++vcpu->stat.irq_window_exits;
3117 return 1;
3118}
3119
3120static int pause_interception(struct kvm_vcpu *vcpu)
3121{
3122 bool in_kernel;
3123 /*
3124 * CPL is not made available for an SEV-ES guest, therefore
3125 * vcpu->arch.preempted_in_kernel can never be true. Just
3126 * set in_kernel to false as well.
3127 */
3128 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
3129
3130 grow_ple_window(vcpu);
3131
3132 kvm_vcpu_on_spin(vcpu, in_kernel);
3133 return kvm_skip_emulated_instruction(vcpu);
3134}
3135
3136static int invpcid_interception(struct kvm_vcpu *vcpu)
3137{
3138 struct vcpu_svm *svm = to_svm(vcpu);
3139 unsigned long type;
3140 gva_t gva;
3141
3142 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
3143 kvm_queue_exception(vcpu, UD_VECTOR);
3144 return 1;
3145 }
3146
3147 /*
3148 * For an INVPCID intercept:
3149 * EXITINFO1 provides the linear address of the memory operand.
3150 * EXITINFO2 provides the contents of the register operand.
3151 */
3152 type = svm->vmcb->control.exit_info_2;
3153 gva = svm->vmcb->control.exit_info_1;
3154
3155 return kvm_handle_invpcid(vcpu, type, gva);
3156}
3157
3158static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3159 [SVM_EXIT_READ_CR0] = cr_interception,
3160 [SVM_EXIT_READ_CR3] = cr_interception,
3161 [SVM_EXIT_READ_CR4] = cr_interception,
3162 [SVM_EXIT_READ_CR8] = cr_interception,
3163 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
3164 [SVM_EXIT_WRITE_CR0] = cr_interception,
3165 [SVM_EXIT_WRITE_CR3] = cr_interception,
3166 [SVM_EXIT_WRITE_CR4] = cr_interception,
3167 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
3168 [SVM_EXIT_READ_DR0] = dr_interception,
3169 [SVM_EXIT_READ_DR1] = dr_interception,
3170 [SVM_EXIT_READ_DR2] = dr_interception,
3171 [SVM_EXIT_READ_DR3] = dr_interception,
3172 [SVM_EXIT_READ_DR4] = dr_interception,
3173 [SVM_EXIT_READ_DR5] = dr_interception,
3174 [SVM_EXIT_READ_DR6] = dr_interception,
3175 [SVM_EXIT_READ_DR7] = dr_interception,
3176 [SVM_EXIT_WRITE_DR0] = dr_interception,
3177 [SVM_EXIT_WRITE_DR1] = dr_interception,
3178 [SVM_EXIT_WRITE_DR2] = dr_interception,
3179 [SVM_EXIT_WRITE_DR3] = dr_interception,
3180 [SVM_EXIT_WRITE_DR4] = dr_interception,
3181 [SVM_EXIT_WRITE_DR5] = dr_interception,
3182 [SVM_EXIT_WRITE_DR6] = dr_interception,
3183 [SVM_EXIT_WRITE_DR7] = dr_interception,
3184 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3185 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
3186 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
3187 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3188 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3189 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
3190 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
3191 [SVM_EXIT_INTR] = intr_interception,
3192 [SVM_EXIT_NMI] = nmi_interception,
3193 [SVM_EXIT_SMI] = smi_interception,
3194 [SVM_EXIT_VINTR] = interrupt_window_interception,
3195 [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
3196 [SVM_EXIT_CPUID] = kvm_emulate_cpuid,
3197 [SVM_EXIT_IRET] = iret_interception,
3198 [SVM_EXIT_INVD] = kvm_emulate_invd,
3199 [SVM_EXIT_PAUSE] = pause_interception,
3200 [SVM_EXIT_HLT] = kvm_emulate_halt,
3201 [SVM_EXIT_INVLPG] = invlpg_interception,
3202 [SVM_EXIT_INVLPGA] = invlpga_interception,
3203 [SVM_EXIT_IOIO] = io_interception,
3204 [SVM_EXIT_MSR] = msr_interception,
3205 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
3206 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3207 [SVM_EXIT_VMRUN] = vmrun_interception,
3208 [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall,
3209 [SVM_EXIT_VMLOAD] = vmload_interception,
3210 [SVM_EXIT_VMSAVE] = vmsave_interception,
3211 [SVM_EXIT_STGI] = stgi_interception,
3212 [SVM_EXIT_CLGI] = clgi_interception,
3213 [SVM_EXIT_SKINIT] = skinit_interception,
3214 [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op,
3215 [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd,
3216 [SVM_EXIT_MONITOR] = kvm_emulate_monitor,
3217 [SVM_EXIT_MWAIT] = kvm_emulate_mwait,
3218 [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv,
3219 [SVM_EXIT_RDPRU] = kvm_handle_invalid_op,
3220 [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap,
3221 [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap,
3222 [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap,
3223 [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap,
3224 [SVM_EXIT_INVPCID] = invpcid_interception,
3225 [SVM_EXIT_NPF] = npf_interception,
3226 [SVM_EXIT_RSM] = rsm_interception,
3227 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
3228 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
3229 [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
3230};
3231
3232static void dump_vmcb(struct kvm_vcpu *vcpu)
3233{
3234 struct vcpu_svm *svm = to_svm(vcpu);
3235 struct vmcb_control_area *control = &svm->vmcb->control;
3236 struct vmcb_save_area *save = &svm->vmcb->save;
3237 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3238
3239 if (!dump_invalid_vmcb) {
3240 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3241 return;
3242 }
3243
3244 pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3245 svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3246 pr_err("VMCB Control Area:\n");
3247 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3248 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3249 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3250 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3251 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3252 pr_err("%-20s%08x %08x\n", "intercepts:",
3253 control->intercepts[INTERCEPT_WORD3],
3254 control->intercepts[INTERCEPT_WORD4]);
3255 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3256 pr_err("%-20s%d\n", "pause filter threshold:",
3257 control->pause_filter_thresh);
3258 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3259 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3260 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3261 pr_err("%-20s%d\n", "asid:", control->asid);
3262 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3263 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3264 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3265 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3266 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3267 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3268 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3269 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3270 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3271 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3272 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3273 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3274 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3275 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3276 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3277 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
3278 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3279 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3280 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3281 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3282 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3283 pr_err("VMCB State Save Area:\n");
3284 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3285 "es:",
3286 save->es.selector, save->es.attrib,
3287 save->es.limit, save->es.base);
3288 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3289 "cs:",
3290 save->cs.selector, save->cs.attrib,
3291 save->cs.limit, save->cs.base);
3292 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3293 "ss:",
3294 save->ss.selector, save->ss.attrib,
3295 save->ss.limit, save->ss.base);
3296 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3297 "ds:",
3298 save->ds.selector, save->ds.attrib,
3299 save->ds.limit, save->ds.base);
3300 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3301 "fs:",
3302 save01->fs.selector, save01->fs.attrib,
3303 save01->fs.limit, save01->fs.base);
3304 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3305 "gs:",
3306 save01->gs.selector, save01->gs.attrib,
3307 save01->gs.limit, save01->gs.base);
3308 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3309 "gdtr:",
3310 save->gdtr.selector, save->gdtr.attrib,
3311 save->gdtr.limit, save->gdtr.base);
3312 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3313 "ldtr:",
3314 save01->ldtr.selector, save01->ldtr.attrib,
3315 save01->ldtr.limit, save01->ldtr.base);
3316 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3317 "idtr:",
3318 save->idtr.selector, save->idtr.attrib,
3319 save->idtr.limit, save->idtr.base);
3320 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3321 "tr:",
3322 save01->tr.selector, save01->tr.attrib,
3323 save01->tr.limit, save01->tr.base);
3324 pr_err("vmpl: %d cpl: %d efer: %016llx\n",
3325 save->vmpl, save->cpl, save->efer);
3326 pr_err("%-15s %016llx %-13s %016llx\n",
3327 "cr0:", save->cr0, "cr2:", save->cr2);
3328 pr_err("%-15s %016llx %-13s %016llx\n",
3329 "cr3:", save->cr3, "cr4:", save->cr4);
3330 pr_err("%-15s %016llx %-13s %016llx\n",
3331 "dr6:", save->dr6, "dr7:", save->dr7);
3332 pr_err("%-15s %016llx %-13s %016llx\n",
3333 "rip:", save->rip, "rflags:", save->rflags);
3334 pr_err("%-15s %016llx %-13s %016llx\n",
3335 "rsp:", save->rsp, "rax:", save->rax);
3336 pr_err("%-15s %016llx %-13s %016llx\n",
3337 "star:", save01->star, "lstar:", save01->lstar);
3338 pr_err("%-15s %016llx %-13s %016llx\n",
3339 "cstar:", save01->cstar, "sfmask:", save01->sfmask);
3340 pr_err("%-15s %016llx %-13s %016llx\n",
3341 "kernel_gs_base:", save01->kernel_gs_base,
3342 "sysenter_cs:", save01->sysenter_cs);
3343 pr_err("%-15s %016llx %-13s %016llx\n",
3344 "sysenter_esp:", save01->sysenter_esp,
3345 "sysenter_eip:", save01->sysenter_eip);
3346 pr_err("%-15s %016llx %-13s %016llx\n",
3347 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3348 pr_err("%-15s %016llx %-13s %016llx\n",
3349 "br_from:", save->br_from, "br_to:", save->br_to);
3350 pr_err("%-15s %016llx %-13s %016llx\n",
3351 "excp_from:", save->last_excp_from,
3352 "excp_to:", save->last_excp_to);
3353}
3354
3355static bool svm_check_exit_valid(u64 exit_code)
3356{
3357 return (exit_code < ARRAY_SIZE(svm_exit_handlers) &&
3358 svm_exit_handlers[exit_code]);
3359}
3360
3361static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
3362{
3363 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
3364 dump_vmcb(vcpu);
3365 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3366 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3367 vcpu->run->internal.ndata = 2;
3368 vcpu->run->internal.data[0] = exit_code;
3369 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3370 return 0;
3371}
3372
3373int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
3374{
3375 if (!svm_check_exit_valid(exit_code))
3376 return svm_handle_invalid_exit(vcpu, exit_code);
3377
3378#ifdef CONFIG_RETPOLINE
3379 if (exit_code == SVM_EXIT_MSR)
3380 return msr_interception(vcpu);
3381 else if (exit_code == SVM_EXIT_VINTR)
3382 return interrupt_window_interception(vcpu);
3383 else if (exit_code == SVM_EXIT_INTR)
3384 return intr_interception(vcpu);
3385 else if (exit_code == SVM_EXIT_HLT)
3386 return kvm_emulate_halt(vcpu);
3387 else if (exit_code == SVM_EXIT_NPF)
3388 return npf_interception(vcpu);
3389#endif
3390 return svm_exit_handlers[exit_code](vcpu);
3391}
3392
3393static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3394 u64 *info1, u64 *info2,
3395 u32 *intr_info, u32 *error_code)
3396{
3397 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3398
3399 *reason = control->exit_code;
3400 *info1 = control->exit_info_1;
3401 *info2 = control->exit_info_2;
3402 *intr_info = control->exit_int_info;
3403 if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3404 (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3405 *error_code = control->exit_int_info_err;
3406 else
3407 *error_code = 0;
3408}
3409
3410static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3411{
3412 struct vcpu_svm *svm = to_svm(vcpu);
3413 struct kvm_run *kvm_run = vcpu->run;
3414 u32 exit_code = svm->vmcb->control.exit_code;
3415
3416 /* SEV-ES guests must use the CR write traps to track CR registers. */
3417 if (!sev_es_guest(vcpu->kvm)) {
3418 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3419 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3420 if (npt_enabled)
3421 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3422 }
3423
3424 if (is_guest_mode(vcpu)) {
3425 int vmexit;
3426
3427 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3428
3429 vmexit = nested_svm_exit_special(svm);
3430
3431 if (vmexit == NESTED_EXIT_CONTINUE)
3432 vmexit = nested_svm_exit_handled(svm);
3433
3434 if (vmexit == NESTED_EXIT_DONE)
3435 return 1;
3436 }
3437
3438 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3439 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3440 kvm_run->fail_entry.hardware_entry_failure_reason
3441 = svm->vmcb->control.exit_code;
3442 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3443 dump_vmcb(vcpu);
3444 return 0;
3445 }
3446
3447 if (exit_fastpath != EXIT_FASTPATH_NONE)
3448 return 1;
3449
3450 return svm_invoke_exit_handler(vcpu, exit_code);
3451}
3452
3453static void pre_svm_run(struct kvm_vcpu *vcpu)
3454{
3455 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3456 struct vcpu_svm *svm = to_svm(vcpu);
3457
3458 /*
3459 * If the previous vmrun of the vmcb occurred on a different physical
3460 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3461 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3462 */
3463 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3464 svm->current_vmcb->asid_generation = 0;
3465 vmcb_mark_all_dirty(svm->vmcb);
3466 svm->current_vmcb->cpu = vcpu->cpu;
3467 }
3468
3469 if (sev_guest(vcpu->kvm))
3470 return pre_sev_run(svm, vcpu->cpu);
3471
3472 /* FIXME: handle wraparound of asid_generation */
3473 if (svm->current_vmcb->asid_generation != sd->asid_generation)
3474 new_asid(svm, sd);
3475}
3476
3477static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3478{
3479 struct vcpu_svm *svm = to_svm(vcpu);
3480
3481 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3482
3483 if (svm->nmi_l1_to_l2)
3484 return;
3485
3486 svm->nmi_masked = true;
3487 svm_set_iret_intercept(svm);
3488 ++vcpu->stat.nmi_injections;
3489}
3490
3491static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
3492{
3493 struct vcpu_svm *svm = to_svm(vcpu);
3494
3495 if (!is_vnmi_enabled(svm))
3496 return false;
3497
3498 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3499}
3500
3501static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
3502{
3503 struct vcpu_svm *svm = to_svm(vcpu);
3504
3505 if (!is_vnmi_enabled(svm))
3506 return false;
3507
3508 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3509 return false;
3510
3511 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3512 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3513
3514 /*
3515 * Because the pending NMI is serviced by hardware, KVM can't know when
3516 * the NMI is "injected", but for all intents and purposes, passing the
3517 * NMI off to hardware counts as injection.
3518 */
3519 ++vcpu->stat.nmi_injections;
3520
3521 return true;
3522}
3523
3524static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3525{
3526 struct vcpu_svm *svm = to_svm(vcpu);
3527 u32 type;
3528
3529 if (vcpu->arch.interrupt.soft) {
3530 if (svm_update_soft_interrupt_rip(vcpu))
3531 return;
3532
3533 type = SVM_EVTINJ_TYPE_SOFT;
3534 } else {
3535 type = SVM_EVTINJ_TYPE_INTR;
3536 }
3537
3538 trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
3539 vcpu->arch.interrupt.soft, reinjected);
3540 ++vcpu->stat.irq_injections;
3541
3542 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3543 SVM_EVTINJ_VALID | type;
3544}
3545
3546void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3547 int trig_mode, int vector)
3548{
3549 /*
3550 * apic->apicv_active must be read after vcpu->mode.
3551 * Pairs with smp_store_release in vcpu_enter_guest.
3552 */
3553 bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3554
3555 /* Note, this is called iff the local APIC is in-kernel. */
3556 if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3557 /* Process the interrupt via kvm_check_and_inject_events(). */
3558 kvm_make_request(KVM_REQ_EVENT, vcpu);
3559 kvm_vcpu_kick(vcpu);
3560 return;
3561 }
3562
3563 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3564 if (in_guest_mode) {
3565 /*
3566 * Signal the doorbell to tell hardware to inject the IRQ. If
3567 * the vCPU exits the guest before the doorbell chimes, hardware
3568 * will automatically process AVIC interrupts at the next VMRUN.
3569 */
3570 avic_ring_doorbell(vcpu);
3571 } else {
3572 /*
3573 * Wake the vCPU if it was blocking. KVM will then detect the
3574 * pending IRQ when checking if the vCPU has a wake event.
3575 */
3576 kvm_vcpu_wake_up(vcpu);
3577 }
3578}
3579
3580static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
3581 int trig_mode, int vector)
3582{
3583 kvm_lapic_set_irr(vector, apic);
3584
3585 /*
3586 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3587 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3588 * the read of guest_mode. This guarantees that either VMRUN will see
3589 * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3590 * will signal the doorbell if the CPU has already entered the guest.
3591 */
3592 smp_mb__after_atomic();
3593 svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3594}
3595
3596static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3597{
3598 struct vcpu_svm *svm = to_svm(vcpu);
3599
3600 /*
3601 * SEV-ES guests must always keep the CR intercepts cleared. CR
3602 * tracking is done using the CR write traps.
3603 */
3604 if (sev_es_guest(vcpu->kvm))
3605 return;
3606
3607 if (nested_svm_virtualize_tpr(vcpu))
3608 return;
3609
3610 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3611
3612 if (irr == -1)
3613 return;
3614
3615 if (tpr >= irr)
3616 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3617}
3618
3619static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3620{
3621 struct vcpu_svm *svm = to_svm(vcpu);
3622
3623 if (is_vnmi_enabled(svm))
3624 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3625 else
3626 return svm->nmi_masked;
3627}
3628
3629static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3630{
3631 struct vcpu_svm *svm = to_svm(vcpu);
3632
3633 if (is_vnmi_enabled(svm)) {
3634 if (masked)
3635 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3636 else
3637 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3638
3639 } else {
3640 svm->nmi_masked = masked;
3641 if (masked)
3642 svm_set_iret_intercept(svm);
3643 else
3644 svm_clr_iret_intercept(svm);
3645 }
3646}
3647
3648bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3649{
3650 struct vcpu_svm *svm = to_svm(vcpu);
3651 struct vmcb *vmcb = svm->vmcb;
3652
3653 if (!gif_set(svm))
3654 return true;
3655
3656 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3657 return false;
3658
3659 if (svm_get_nmi_mask(vcpu))
3660 return true;
3661
3662 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3663}
3664
3665static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3666{
3667 struct vcpu_svm *svm = to_svm(vcpu);
3668 if (svm->nested.nested_run_pending)
3669 return -EBUSY;
3670
3671 if (svm_nmi_blocked(vcpu))
3672 return 0;
3673
3674 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
3675 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3676 return -EBUSY;
3677 return 1;
3678}
3679
3680bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3681{
3682 struct vcpu_svm *svm = to_svm(vcpu);
3683 struct vmcb *vmcb = svm->vmcb;
3684
3685 if (!gif_set(svm))
3686 return true;
3687
3688 if (is_guest_mode(vcpu)) {
3689 /* As long as interrupts are being delivered... */
3690 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3691 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3692 : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3693 return true;
3694
3695 /* ... vmexits aren't blocked by the interrupt shadow */
3696 if (nested_exit_on_intr(svm))
3697 return false;
3698 } else {
3699 if (!svm_get_if_flag(vcpu))
3700 return true;
3701 }
3702
3703 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3704}
3705
3706static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3707{
3708 struct vcpu_svm *svm = to_svm(vcpu);
3709
3710 if (svm->nested.nested_run_pending)
3711 return -EBUSY;
3712
3713 if (svm_interrupt_blocked(vcpu))
3714 return 0;
3715
3716 /*
3717 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3718 * e.g. if the IRQ arrived asynchronously after checking nested events.
3719 */
3720 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
3721 return -EBUSY;
3722
3723 return 1;
3724}
3725
3726static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
3727{
3728 struct vcpu_svm *svm = to_svm(vcpu);
3729
3730 /*
3731 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3732 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3733 * get that intercept, this function will be called again though and
3734 * we'll get the vintr intercept. However, if the vGIF feature is
3735 * enabled, the STGI interception will not occur. Enable the irq
3736 * window under the assumption that the hardware will set the GIF.
3737 */
3738 if (vgif || gif_set(svm)) {
3739 /*
3740 * IRQ window is not needed when AVIC is enabled,
3741 * unless we have pending ExtINT since it cannot be injected
3742 * via AVIC. In such case, KVM needs to temporarily disable AVIC,
3743 * and fallback to injecting IRQ via V_IRQ.
3744 *
3745 * If running nested, AVIC is already locally inhibited
3746 * on this vCPU, therefore there is no need to request
3747 * the VM wide AVIC inhibition.
3748 */
3749 if (!is_guest_mode(vcpu))
3750 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3751
3752 svm_set_vintr(svm);
3753 }
3754}
3755
3756static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
3757{
3758 struct vcpu_svm *svm = to_svm(vcpu);
3759
3760 /*
3761 * KVM should never request an NMI window when vNMI is enabled, as KVM
3762 * allows at most one to-be-injected NMI and one pending NMI, i.e. if
3763 * two NMIs arrive simultaneously, KVM will inject one and set
3764 * V_NMI_PENDING for the other. WARN, but continue with the standard
3765 * single-step approach to try and salvage the pending NMI.
3766 */
3767 WARN_ON_ONCE(is_vnmi_enabled(svm));
3768
3769 if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
3770 return; /* IRET will cause a vm exit */
3771
3772 if (!gif_set(svm)) {
3773 if (vgif)
3774 svm_set_intercept(svm, INTERCEPT_STGI);
3775 return; /* STGI will cause a vm exit */
3776 }
3777
3778 /*
3779 * Something prevents NMI from been injected. Single step over possible
3780 * problem (IRET or exception injection or interrupt shadow)
3781 */
3782 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3783 svm->nmi_singlestep = true;
3784 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3785}
3786
3787static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
3788{
3789 struct vcpu_svm *svm = to_svm(vcpu);
3790
3791 /*
3792 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
3793 * A TLB flush for the current ASID flushes both "host" and "guest" TLB
3794 * entries, and thus is a superset of Hyper-V's fine grained flushing.
3795 */
3796 kvm_hv_vcpu_purge_flush_tlb(vcpu);
3797
3798 /*
3799 * Flush only the current ASID even if the TLB flush was invoked via
3800 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
3801 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3802 * unconditionally does a TLB flush on both nested VM-Enter and nested
3803 * VM-Exit (via kvm_mmu_reset_context()).
3804 */
3805 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3806 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3807 else
3808 svm->current_vmcb->asid_generation--;
3809}
3810
3811static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
3812{
3813 hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
3814
3815 /*
3816 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
3817 * flush the NPT mappings via hypercall as flushing the ASID only
3818 * affects virtual to physical mappings, it does not invalidate guest
3819 * physical to host physical mappings.
3820 */
3821 if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
3822 hyperv_flush_guest_mapping(root_tdp);
3823
3824 svm_flush_tlb_asid(vcpu);
3825}
3826
3827static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
3828{
3829 /*
3830 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
3831 * flushes should be routed to hv_flush_remote_tlbs() without requesting
3832 * a "regular" remote flush. Reaching this point means either there's
3833 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
3834 * which might be fatal to the guest. Yell, but try to recover.
3835 */
3836 if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
3837 hv_flush_remote_tlbs(vcpu->kvm);
3838
3839 svm_flush_tlb_asid(vcpu);
3840}
3841
3842static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3843{
3844 struct vcpu_svm *svm = to_svm(vcpu);
3845
3846 invlpga(gva, svm->vmcb->control.asid);
3847}
3848
3849static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3850{
3851 struct vcpu_svm *svm = to_svm(vcpu);
3852
3853 if (nested_svm_virtualize_tpr(vcpu))
3854 return;
3855
3856 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
3857 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3858 kvm_set_cr8(vcpu, cr8);
3859 }
3860}
3861
3862static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3863{
3864 struct vcpu_svm *svm = to_svm(vcpu);
3865 u64 cr8;
3866
3867 if (nested_svm_virtualize_tpr(vcpu) ||
3868 kvm_vcpu_apicv_active(vcpu))
3869 return;
3870
3871 cr8 = kvm_get_cr8(vcpu);
3872 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3873 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3874}
3875
3876static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
3877 int type)
3878{
3879 bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
3880 bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
3881 struct vcpu_svm *svm = to_svm(vcpu);
3882
3883 /*
3884 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
3885 * associated with the original soft exception/interrupt. next_rip is
3886 * cleared on all exits that can occur while vectoring an event, so KVM
3887 * needs to manually set next_rip for re-injection. Unlike the !nrips
3888 * case below, this needs to be done if and only if KVM is re-injecting
3889 * the same event, i.e. if the event is a soft exception/interrupt,
3890 * otherwise next_rip is unused on VMRUN.
3891 */
3892 if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
3893 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
3894 svm->vmcb->control.next_rip = svm->soft_int_next_rip;
3895 /*
3896 * If NRIPS isn't enabled, KVM must manually advance RIP prior to
3897 * injecting the soft exception/interrupt. That advancement needs to
3898 * be unwound if vectoring didn't complete. Note, the new event may
3899 * not be the injected event, e.g. if KVM injected an INTn, the INTn
3900 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
3901 * be the reported vectored event, but RIP still needs to be unwound.
3902 */
3903 else if (!nrips && (is_soft || is_exception) &&
3904 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
3905 kvm_rip_write(vcpu, svm->soft_int_old_rip);
3906}
3907
3908static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
3909{
3910 struct vcpu_svm *svm = to_svm(vcpu);
3911 u8 vector;
3912 int type;
3913 u32 exitintinfo = svm->vmcb->control.exit_int_info;
3914 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
3915 bool soft_int_injected = svm->soft_int_injected;
3916
3917 svm->nmi_l1_to_l2 = false;
3918 svm->soft_int_injected = false;
3919
3920 /*
3921 * If we've made progress since setting HF_IRET_MASK, we've
3922 * executed an IRET and can allow NMI injection.
3923 */
3924 if (svm->awaiting_iret_completion &&
3925 (sev_es_guest(vcpu->kvm) ||
3926 kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
3927 svm->awaiting_iret_completion = false;
3928 svm->nmi_masked = false;
3929 kvm_make_request(KVM_REQ_EVENT, vcpu);
3930 }
3931
3932 vcpu->arch.nmi_injected = false;
3933 kvm_clear_exception_queue(vcpu);
3934 kvm_clear_interrupt_queue(vcpu);
3935
3936 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3937 return;
3938
3939 kvm_make_request(KVM_REQ_EVENT, vcpu);
3940
3941 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3942 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3943
3944 if (soft_int_injected)
3945 svm_complete_soft_interrupt(vcpu, vector, type);
3946
3947 switch (type) {
3948 case SVM_EXITINTINFO_TYPE_NMI:
3949 vcpu->arch.nmi_injected = true;
3950 svm->nmi_l1_to_l2 = nmi_l1_to_l2;
3951 break;
3952 case SVM_EXITINTINFO_TYPE_EXEPT:
3953 /*
3954 * Never re-inject a #VC exception.
3955 */
3956 if (vector == X86_TRAP_VC)
3957 break;
3958
3959 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3960 u32 err = svm->vmcb->control.exit_int_info_err;
3961 kvm_requeue_exception_e(vcpu, vector, err);
3962
3963 } else
3964 kvm_requeue_exception(vcpu, vector);
3965 break;
3966 case SVM_EXITINTINFO_TYPE_INTR:
3967 kvm_queue_interrupt(vcpu, vector, false);
3968 break;
3969 case SVM_EXITINTINFO_TYPE_SOFT:
3970 kvm_queue_interrupt(vcpu, vector, true);
3971 break;
3972 default:
3973 break;
3974 }
3975
3976}
3977
3978static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3979{
3980 struct vcpu_svm *svm = to_svm(vcpu);
3981 struct vmcb_control_area *control = &svm->vmcb->control;
3982
3983 control->exit_int_info = control->event_inj;
3984 control->exit_int_info_err = control->event_inj_err;
3985 control->event_inj = 0;
3986 svm_complete_interrupts(vcpu);
3987}
3988
3989static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
3990{
3991 return 1;
3992}
3993
3994static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
3995{
3996 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3997 to_svm(vcpu)->vmcb->control.exit_info_1)
3998 return handle_fastpath_set_msr_irqoff(vcpu);
3999
4000 return EXIT_FASTPATH_NONE;
4001}
4002
4003static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4004{
4005 struct vcpu_svm *svm = to_svm(vcpu);
4006
4007 guest_state_enter_irqoff();
4008
4009 amd_clear_divider();
4010
4011 if (sev_es_guest(vcpu->kvm))
4012 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
4013 else
4014 __svm_vcpu_run(svm, spec_ctrl_intercepted);
4015
4016 guest_state_exit_irqoff();
4017}
4018
4019static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
4020{
4021 struct vcpu_svm *svm = to_svm(vcpu);
4022 bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
4023
4024 trace_kvm_entry(vcpu);
4025
4026 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4027 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4028 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4029
4030 /*
4031 * Disable singlestep if we're injecting an interrupt/exception.
4032 * We don't want our modified rflags to be pushed on the stack where
4033 * we might not be able to easily reset them if we disabled NMI
4034 * singlestep later.
4035 */
4036 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4037 /*
4038 * Event injection happens before external interrupts cause a
4039 * vmexit and interrupts are disabled here, so smp_send_reschedule
4040 * is enough to force an immediate vmexit.
4041 */
4042 disable_nmi_singlestep(svm);
4043 smp_send_reschedule(vcpu->cpu);
4044 }
4045
4046 pre_svm_run(vcpu);
4047
4048 sync_lapic_to_cr8(vcpu);
4049
4050 if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4051 svm->vmcb->control.asid = svm->asid;
4052 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4053 }
4054 svm->vmcb->save.cr2 = vcpu->arch.cr2;
4055
4056 svm_hv_update_vp_id(svm->vmcb, vcpu);
4057
4058 /*
4059 * Run with all-zero DR6 unless needed, so that we can get the exact cause
4060 * of a #DB.
4061 */
4062 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
4063 svm_set_dr6(svm, vcpu->arch.dr6);
4064 else
4065 svm_set_dr6(svm, DR6_ACTIVE_LOW);
4066
4067 clgi();
4068 kvm_load_guest_xsave_state(vcpu);
4069
4070 kvm_wait_lapic_expire(vcpu);
4071
4072 /*
4073 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4074 * it's non-zero. Since vmentry is serialising on affected CPUs, there
4075 * is no need to worry about the conditional branch over the wrmsr
4076 * being speculatively taken.
4077 */
4078 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4079 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
4080
4081 svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
4082
4083 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4084 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
4085
4086 if (!sev_es_guest(vcpu->kvm)) {
4087 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4088 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4089 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4090 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4091 }
4092 vcpu->arch.regs_dirty = 0;
4093
4094 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4095 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4096
4097 kvm_load_host_xsave_state(vcpu);
4098 stgi();
4099
4100 /* Any pending NMI will happen here */
4101
4102 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4103 kvm_after_interrupt(vcpu);
4104
4105 sync_cr8_to_lapic(vcpu);
4106
4107 svm->next_rip = 0;
4108 if (is_guest_mode(vcpu)) {
4109 nested_sync_control_from_vmcb02(svm);
4110
4111 /* Track VMRUNs that have made past consistency checking */
4112 if (svm->nested.nested_run_pending &&
4113 svm->vmcb->control.exit_code != SVM_EXIT_ERR)
4114 ++vcpu->stat.nested_run;
4115
4116 svm->nested.nested_run_pending = 0;
4117 }
4118
4119 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4120 vmcb_mark_all_clean(svm->vmcb);
4121
4122 /* if exit due to PF check for async PF */
4123 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4124 vcpu->arch.apf.host_apf_flags =
4125 kvm_read_and_reset_apf_flags();
4126
4127 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4128
4129 /*
4130 * We need to handle MC intercepts here before the vcpu has a chance to
4131 * change the physical cpu
4132 */
4133 if (unlikely(svm->vmcb->control.exit_code ==
4134 SVM_EXIT_EXCP_BASE + MC_VECTOR))
4135 svm_handle_mce(vcpu);
4136
4137 trace_kvm_exit(vcpu, KVM_ISA_SVM);
4138
4139 svm_complete_interrupts(vcpu);
4140
4141 if (is_guest_mode(vcpu))
4142 return EXIT_FASTPATH_NONE;
4143
4144 return svm_exit_handlers_fastpath(vcpu);
4145}
4146
4147static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4148 int root_level)
4149{
4150 struct vcpu_svm *svm = to_svm(vcpu);
4151 unsigned long cr3;
4152
4153 if (npt_enabled) {
4154 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4155 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4156
4157 hv_track_root_tdp(vcpu, root_hpa);
4158
4159 cr3 = vcpu->arch.cr3;
4160 } else if (root_level >= PT64_ROOT_4LEVEL) {
4161 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4162 } else {
4163 /* PCID in the guest should be impossible with a 32-bit MMU. */
4164 WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4165 cr3 = root_hpa;
4166 }
4167
4168 svm->vmcb->save.cr3 = cr3;
4169 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4170}
4171
4172static void
4173svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4174{
4175 /*
4176 * Patch in the VMMCALL instruction:
4177 */
4178 hypercall[0] = 0x0f;
4179 hypercall[1] = 0x01;
4180 hypercall[2] = 0xd9;
4181}
4182
4183/*
4184 * The kvm parameter can be NULL (module initialization, or invocation before
4185 * VM creation). Be sure to check the kvm parameter before using it.
4186 */
4187static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4188{
4189 switch (index) {
4190 case MSR_IA32_MCG_EXT_CTL:
4191 case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
4192 return false;
4193 case MSR_IA32_SMBASE:
4194 if (!IS_ENABLED(CONFIG_KVM_SMM))
4195 return false;
4196 /* SEV-ES guests do not support SMM, so report false */
4197 if (kvm && sev_es_guest(kvm))
4198 return false;
4199 break;
4200 default:
4201 break;
4202 }
4203
4204 return true;
4205}
4206
4207static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4208{
4209 struct vcpu_svm *svm = to_svm(vcpu);
4210 struct kvm_cpuid_entry2 *best;
4211
4212 vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
4213 boot_cpu_has(X86_FEATURE_XSAVE) &&
4214 boot_cpu_has(X86_FEATURE_XSAVES);
4215
4216 /* Update nrips enabled cache */
4217 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
4218 guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
4219
4220 svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
4221 svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
4222
4223 svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4224
4225 svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) &&
4226 guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER);
4227
4228 svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) &&
4229 guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD);
4230
4231 svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
4232
4233 svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI);
4234
4235 svm_recalc_instruction_intercepts(vcpu, svm);
4236
4237 if (boot_cpu_has(X86_FEATURE_IBPB))
4238 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0,
4239 !!guest_has_pred_cmd_msr(vcpu));
4240
4241 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
4242 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
4243 !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
4244
4245 /* For sev guests, the memory encryption bit is not reserved in CR3. */
4246 if (sev_guest(vcpu->kvm)) {
4247 best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
4248 if (best)
4249 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
4250 }
4251
4252 init_vmcb_after_set_cpuid(vcpu);
4253}
4254
4255static bool svm_has_wbinvd_exit(void)
4256{
4257 return true;
4258}
4259
4260#define PRE_EX(exit) { .exit_code = (exit), \
4261 .stage = X86_ICPT_PRE_EXCEPT, }
4262#define POST_EX(exit) { .exit_code = (exit), \
4263 .stage = X86_ICPT_POST_EXCEPT, }
4264#define POST_MEM(exit) { .exit_code = (exit), \
4265 .stage = X86_ICPT_POST_MEMACCESS, }
4266
4267static const struct __x86_intercept {
4268 u32 exit_code;
4269 enum x86_intercept_stage stage;
4270} x86_intercept_map[] = {
4271 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4272 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4273 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4274 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4275 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
4276 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4277 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
4278 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4279 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4280 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4281 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4282 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4283 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4284 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4285 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
4286 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4287 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4288 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4289 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4290 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4291 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4292 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4293 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
4294 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4295 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4296 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
4297 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4298 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4299 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4300 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4301 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4302 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4303 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4304 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4305 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
4306 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4307 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4308 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4309 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4310 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4311 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4312 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
4313 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4314 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4315 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4316 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
4317 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
4318};
4319
4320#undef PRE_EX
4321#undef POST_EX
4322#undef POST_MEM
4323
4324static int svm_check_intercept(struct kvm_vcpu *vcpu,
4325 struct x86_instruction_info *info,
4326 enum x86_intercept_stage stage,
4327 struct x86_exception *exception)
4328{
4329 struct vcpu_svm *svm = to_svm(vcpu);
4330 int vmexit, ret = X86EMUL_CONTINUE;
4331 struct __x86_intercept icpt_info;
4332 struct vmcb *vmcb = svm->vmcb;
4333
4334 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4335 goto out;
4336
4337 icpt_info = x86_intercept_map[info->intercept];
4338
4339 if (stage != icpt_info.stage)
4340 goto out;
4341
4342 switch (icpt_info.exit_code) {
4343 case SVM_EXIT_READ_CR0:
4344 if (info->intercept == x86_intercept_cr_read)
4345 icpt_info.exit_code += info->modrm_reg;
4346 break;
4347 case SVM_EXIT_WRITE_CR0: {
4348 unsigned long cr0, val;
4349
4350 if (info->intercept == x86_intercept_cr_write)
4351 icpt_info.exit_code += info->modrm_reg;
4352
4353 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4354 info->intercept == x86_intercept_clts)
4355 break;
4356
4357 if (!(vmcb12_is_intercept(&svm->nested.ctl,
4358 INTERCEPT_SELECTIVE_CR0)))
4359 break;
4360
4361 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4362 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4363
4364 if (info->intercept == x86_intercept_lmsw) {
4365 cr0 &= 0xfUL;
4366 val &= 0xfUL;
4367 /* lmsw can't clear PE - catch this here */
4368 if (cr0 & X86_CR0_PE)
4369 val |= X86_CR0_PE;
4370 }
4371
4372 if (cr0 ^ val)
4373 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4374
4375 break;
4376 }
4377 case SVM_EXIT_READ_DR0:
4378 case SVM_EXIT_WRITE_DR0:
4379 icpt_info.exit_code += info->modrm_reg;
4380 break;
4381 case SVM_EXIT_MSR:
4382 if (info->intercept == x86_intercept_wrmsr)
4383 vmcb->control.exit_info_1 = 1;
4384 else
4385 vmcb->control.exit_info_1 = 0;
4386 break;
4387 case SVM_EXIT_PAUSE:
4388 /*
4389 * We get this for NOP only, but pause
4390 * is rep not, check this here
4391 */
4392 if (info->rep_prefix != REPE_PREFIX)
4393 goto out;
4394 break;
4395 case SVM_EXIT_IOIO: {
4396 u64 exit_info;
4397 u32 bytes;
4398
4399 if (info->intercept == x86_intercept_in ||
4400 info->intercept == x86_intercept_ins) {
4401 exit_info = ((info->src_val & 0xffff) << 16) |
4402 SVM_IOIO_TYPE_MASK;
4403 bytes = info->dst_bytes;
4404 } else {
4405 exit_info = (info->dst_val & 0xffff) << 16;
4406 bytes = info->src_bytes;
4407 }
4408
4409 if (info->intercept == x86_intercept_outs ||
4410 info->intercept == x86_intercept_ins)
4411 exit_info |= SVM_IOIO_STR_MASK;
4412
4413 if (info->rep_prefix)
4414 exit_info |= SVM_IOIO_REP_MASK;
4415
4416 bytes = min(bytes, 4u);
4417
4418 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4419
4420 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4421
4422 vmcb->control.exit_info_1 = exit_info;
4423 vmcb->control.exit_info_2 = info->next_rip;
4424
4425 break;
4426 }
4427 default:
4428 break;
4429 }
4430
4431 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4432 if (static_cpu_has(X86_FEATURE_NRIPS))
4433 vmcb->control.next_rip = info->next_rip;
4434 vmcb->control.exit_code = icpt_info.exit_code;
4435 vmexit = nested_svm_exit_handled(svm);
4436
4437 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4438 : X86EMUL_CONTINUE;
4439
4440out:
4441 return ret;
4442}
4443
4444static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4445{
4446 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4447 vcpu->arch.at_instruction_boundary = true;
4448}
4449
4450static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4451{
4452 if (!kvm_pause_in_guest(vcpu->kvm))
4453 shrink_ple_window(vcpu);
4454}
4455
4456static void svm_setup_mce(struct kvm_vcpu *vcpu)
4457{
4458 /* [63:9] are reserved. */
4459 vcpu->arch.mcg_cap &= 0x1ff;
4460}
4461
4462#ifdef CONFIG_KVM_SMM
4463bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4464{
4465 struct vcpu_svm *svm = to_svm(vcpu);
4466
4467 /* Per APM Vol.2 15.22.2 "Response to SMI" */
4468 if (!gif_set(svm))
4469 return true;
4470
4471 return is_smm(vcpu);
4472}
4473
4474static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4475{
4476 struct vcpu_svm *svm = to_svm(vcpu);
4477 if (svm->nested.nested_run_pending)
4478 return -EBUSY;
4479
4480 if (svm_smi_blocked(vcpu))
4481 return 0;
4482
4483 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
4484 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4485 return -EBUSY;
4486
4487 return 1;
4488}
4489
4490static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
4491{
4492 struct vcpu_svm *svm = to_svm(vcpu);
4493 struct kvm_host_map map_save;
4494 int ret;
4495
4496 if (!is_guest_mode(vcpu))
4497 return 0;
4498
4499 /*
4500 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is
4501 * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4502 */
4503
4504 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4505 return 1;
4506
4507 smram->smram64.svm_guest_flag = 1;
4508 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
4509
4510 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4511 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4512 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4513
4514 ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4515 if (ret)
4516 return ret;
4517
4518 /*
4519 * KVM uses VMCB01 to store L1 host state while L2 runs but
4520 * VMCB01 is going to be used during SMM and thus the state will
4521 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4522 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4523 * format of the area is identical to guest save area offsetted
4524 * by 0x400 (matches the offset of 'struct vmcb_save_area'
4525 * within 'struct vmcb'). Note: HSAVE area may also be used by
4526 * L1 hypervisor to save additional host context (e.g. KVM does
4527 * that, see svm_prepare_switch_to_guest()) which must be
4528 * preserved.
4529 */
4530 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4531 return 1;
4532
4533 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4534
4535 svm_copy_vmrun_state(map_save.hva + 0x400,
4536 &svm->vmcb01.ptr->save);
4537
4538 kvm_vcpu_unmap(vcpu, &map_save, true);
4539 return 0;
4540}
4541
4542static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
4543{
4544 struct vcpu_svm *svm = to_svm(vcpu);
4545 struct kvm_host_map map, map_save;
4546 struct vmcb *vmcb12;
4547 int ret;
4548
4549 const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4550
4551 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4552 return 0;
4553
4554 /* Non-zero if SMI arrived while vCPU was in guest mode. */
4555 if (!smram64->svm_guest_flag)
4556 return 0;
4557
4558 if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4559 return 1;
4560
4561 if (!(smram64->efer & EFER_SVME))
4562 return 1;
4563
4564 if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
4565 return 1;
4566
4567 ret = 1;
4568 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4569 goto unmap_map;
4570
4571 if (svm_allocate_nested(svm))
4572 goto unmap_save;
4573
4574 /*
4575 * Restore L1 host state from L1 HSAVE area as VMCB01 was
4576 * used during SMM (see svm_enter_smm())
4577 */
4578
4579 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4580
4581 /*
4582 * Enter the nested guest now
4583 */
4584
4585 vmcb_mark_all_dirty(svm->vmcb01.ptr);
4586
4587 vmcb12 = map.hva;
4588 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
4589 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
4590 ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
4591
4592 if (ret)
4593 goto unmap_save;
4594
4595 svm->nested.nested_run_pending = 1;
4596
4597unmap_save:
4598 kvm_vcpu_unmap(vcpu, &map_save, true);
4599unmap_map:
4600 kvm_vcpu_unmap(vcpu, &map, true);
4601 return ret;
4602}
4603
4604static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
4605{
4606 struct vcpu_svm *svm = to_svm(vcpu);
4607
4608 if (!gif_set(svm)) {
4609 if (vgif)
4610 svm_set_intercept(svm, INTERCEPT_STGI);
4611 /* STGI will cause a vm exit */
4612 } else {
4613 /* We must be in SMM; RSM will cause a vmexit anyway. */
4614 }
4615}
4616#endif
4617
4618static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4619 void *insn, int insn_len)
4620{
4621 bool smep, smap, is_user;
4622 u64 error_code;
4623
4624 /* Emulation is always possible when KVM has access to all guest state. */
4625 if (!sev_guest(vcpu->kvm))
4626 return true;
4627
4628 /* #UD and #GP should never be intercepted for SEV guests. */
4629 WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
4630 EMULTYPE_TRAP_UD_FORCED |
4631 EMULTYPE_VMWARE_GP));
4632
4633 /*
4634 * Emulation is impossible for SEV-ES guests as KVM doesn't have access
4635 * to guest register state.
4636 */
4637 if (sev_es_guest(vcpu->kvm))
4638 return false;
4639
4640 /*
4641 * Emulation is possible if the instruction is already decoded, e.g.
4642 * when completing I/O after returning from userspace.
4643 */
4644 if (emul_type & EMULTYPE_NO_DECODE)
4645 return true;
4646
4647 /*
4648 * Emulation is possible for SEV guests if and only if a prefilled
4649 * buffer containing the bytes of the intercepted instruction is
4650 * available. SEV guest memory is encrypted with a guest specific key
4651 * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
4652 * decode garbage.
4653 *
4654 * Inject #UD if KVM reached this point without an instruction buffer.
4655 * In practice, this path should never be hit by a well-behaved guest,
4656 * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
4657 * is still theoretically reachable, e.g. via unaccelerated fault-like
4658 * AVIC access, and needs to be handled by KVM to avoid putting the
4659 * guest into an infinite loop. Injecting #UD is somewhat arbitrary,
4660 * but its the least awful option given lack of insight into the guest.
4661 */
4662 if (unlikely(!insn)) {
4663 kvm_queue_exception(vcpu, UD_VECTOR);
4664 return false;
4665 }
4666
4667 /*
4668 * Emulate for SEV guests if the insn buffer is not empty. The buffer
4669 * will be empty if the DecodeAssist microcode cannot fetch bytes for
4670 * the faulting instruction because the code fetch itself faulted, e.g.
4671 * the guest attempted to fetch from emulated MMIO or a guest page
4672 * table used to translate CS:RIP resides in emulated MMIO.
4673 */
4674 if (likely(insn_len))
4675 return true;
4676
4677 /*
4678 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4679 *
4680 * Errata:
4681 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4682 * possible that CPU microcode implementing DecodeAssist will fail to
4683 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4684 * be '0'. This happens because microcode reads CS:RIP using a _data_
4685 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
4686 * gives up and does not fill the instruction bytes buffer.
4687 *
4688 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4689 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4690 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4691 * GuestIntrBytes field of the VMCB.
4692 *
4693 * This does _not_ mean that the erratum has been encountered, as the
4694 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4695 * #PF, e.g. if the guest attempt to execute from emulated MMIO and
4696 * encountered a reserved/not-present #PF.
4697 *
4698 * To hit the erratum, the following conditions must be true:
4699 * 1. CR4.SMAP=1 (obviously).
4700 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
4701 * have been hit as the guest would have encountered a SMEP
4702 * violation #PF, not a #NPF.
4703 * 3. The #NPF is not due to a code fetch, in which case failure to
4704 * retrieve the instruction bytes is legitimate (see abvoe).
4705 *
4706 * In addition, don't apply the erratum workaround if the #NPF occurred
4707 * while translating guest page tables (see below).
4708 */
4709 error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
4710 if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
4711 goto resume_guest;
4712
4713 smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
4714 smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
4715 is_user = svm_get_cpl(vcpu) == 3;
4716 if (smap && (!smep || is_user)) {
4717 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
4718
4719 /*
4720 * If the fault occurred in userspace, arbitrarily inject #GP
4721 * to avoid killing the guest and to hopefully avoid confusing
4722 * the guest kernel too much, e.g. injecting #PF would not be
4723 * coherent with respect to the guest's page tables. Request
4724 * triple fault if the fault occurred in the kernel as there's
4725 * no fault that KVM can inject without confusing the guest.
4726 * In practice, the triple fault is moot as no sane SEV kernel
4727 * will execute from user memory while also running with SMAP=1.
4728 */
4729 if (is_user)
4730 kvm_inject_gp(vcpu, 0);
4731 else
4732 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4733 }
4734
4735resume_guest:
4736 /*
4737 * If the erratum was not hit, simply resume the guest and let it fault
4738 * again. While awful, e.g. the vCPU may get stuck in an infinite loop
4739 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to
4740 * userspace will kill the guest, and letting the emulator read garbage
4741 * will yield random behavior and potentially corrupt the guest.
4742 *
4743 * Simply resuming the guest is technically not a violation of the SEV
4744 * architecture. AMD's APM states that all code fetches and page table
4745 * accesses for SEV guest are encrypted, regardless of the C-Bit. The
4746 * APM also states that encrypted accesses to MMIO are "ignored", but
4747 * doesn't explicitly define "ignored", i.e. doing nothing and letting
4748 * the guest spin is technically "ignoring" the access.
4749 */
4750 return false;
4751}
4752
4753static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4754{
4755 struct vcpu_svm *svm = to_svm(vcpu);
4756
4757 return !gif_set(svm);
4758}
4759
4760static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4761{
4762 if (!sev_es_guest(vcpu->kvm))
4763 return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
4764
4765 sev_vcpu_deliver_sipi_vector(vcpu, vector);
4766}
4767
4768static void svm_vm_destroy(struct kvm *kvm)
4769{
4770 avic_vm_destroy(kvm);
4771 sev_vm_destroy(kvm);
4772}
4773
4774static int svm_vm_init(struct kvm *kvm)
4775{
4776 if (!pause_filter_count || !pause_filter_thresh)
4777 kvm->arch.pause_in_guest = true;
4778
4779 if (enable_apicv) {
4780 int ret = avic_vm_init(kvm);
4781 if (ret)
4782 return ret;
4783 }
4784
4785 return 0;
4786}
4787
4788static struct kvm_x86_ops svm_x86_ops __initdata = {
4789 .name = KBUILD_MODNAME,
4790
4791 .check_processor_compatibility = svm_check_processor_compat,
4792
4793 .hardware_unsetup = svm_hardware_unsetup,
4794 .hardware_enable = svm_hardware_enable,
4795 .hardware_disable = svm_hardware_disable,
4796 .has_emulated_msr = svm_has_emulated_msr,
4797
4798 .vcpu_create = svm_vcpu_create,
4799 .vcpu_free = svm_vcpu_free,
4800 .vcpu_reset = svm_vcpu_reset,
4801
4802 .vm_size = sizeof(struct kvm_svm),
4803 .vm_init = svm_vm_init,
4804 .vm_destroy = svm_vm_destroy,
4805
4806 .prepare_switch_to_guest = svm_prepare_switch_to_guest,
4807 .vcpu_load = svm_vcpu_load,
4808 .vcpu_put = svm_vcpu_put,
4809 .vcpu_blocking = avic_vcpu_blocking,
4810 .vcpu_unblocking = avic_vcpu_unblocking,
4811
4812 .update_exception_bitmap = svm_update_exception_bitmap,
4813 .get_msr_feature = svm_get_msr_feature,
4814 .get_msr = svm_get_msr,
4815 .set_msr = svm_set_msr,
4816 .get_segment_base = svm_get_segment_base,
4817 .get_segment = svm_get_segment,
4818 .set_segment = svm_set_segment,
4819 .get_cpl = svm_get_cpl,
4820 .get_cs_db_l_bits = svm_get_cs_db_l_bits,
4821 .is_valid_cr0 = svm_is_valid_cr0,
4822 .set_cr0 = svm_set_cr0,
4823 .post_set_cr3 = sev_post_set_cr3,
4824 .is_valid_cr4 = svm_is_valid_cr4,
4825 .set_cr4 = svm_set_cr4,
4826 .set_efer = svm_set_efer,
4827 .get_idt = svm_get_idt,
4828 .set_idt = svm_set_idt,
4829 .get_gdt = svm_get_gdt,
4830 .set_gdt = svm_set_gdt,
4831 .set_dr7 = svm_set_dr7,
4832 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4833 .cache_reg = svm_cache_reg,
4834 .get_rflags = svm_get_rflags,
4835 .set_rflags = svm_set_rflags,
4836 .get_if_flag = svm_get_if_flag,
4837
4838 .flush_tlb_all = svm_flush_tlb_all,
4839 .flush_tlb_current = svm_flush_tlb_current,
4840 .flush_tlb_gva = svm_flush_tlb_gva,
4841 .flush_tlb_guest = svm_flush_tlb_asid,
4842
4843 .vcpu_pre_run = svm_vcpu_pre_run,
4844 .vcpu_run = svm_vcpu_run,
4845 .handle_exit = svm_handle_exit,
4846 .skip_emulated_instruction = svm_skip_emulated_instruction,
4847 .update_emulated_instruction = NULL,
4848 .set_interrupt_shadow = svm_set_interrupt_shadow,
4849 .get_interrupt_shadow = svm_get_interrupt_shadow,
4850 .patch_hypercall = svm_patch_hypercall,
4851 .inject_irq = svm_inject_irq,
4852 .inject_nmi = svm_inject_nmi,
4853 .is_vnmi_pending = svm_is_vnmi_pending,
4854 .set_vnmi_pending = svm_set_vnmi_pending,
4855 .inject_exception = svm_inject_exception,
4856 .cancel_injection = svm_cancel_injection,
4857 .interrupt_allowed = svm_interrupt_allowed,
4858 .nmi_allowed = svm_nmi_allowed,
4859 .get_nmi_mask = svm_get_nmi_mask,
4860 .set_nmi_mask = svm_set_nmi_mask,
4861 .enable_nmi_window = svm_enable_nmi_window,
4862 .enable_irq_window = svm_enable_irq_window,
4863 .update_cr8_intercept = svm_update_cr8_intercept,
4864 .set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
4865 .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
4866 .apicv_post_state_restore = avic_apicv_post_state_restore,
4867 .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
4868
4869 .get_exit_info = svm_get_exit_info,
4870
4871 .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
4872
4873 .has_wbinvd_exit = svm_has_wbinvd_exit,
4874
4875 .get_l2_tsc_offset = svm_get_l2_tsc_offset,
4876 .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
4877 .write_tsc_offset = svm_write_tsc_offset,
4878 .write_tsc_multiplier = svm_write_tsc_multiplier,
4879
4880 .load_mmu_pgd = svm_load_mmu_pgd,
4881
4882 .check_intercept = svm_check_intercept,
4883 .handle_exit_irqoff = svm_handle_exit_irqoff,
4884
4885 .request_immediate_exit = __kvm_request_immediate_exit,
4886
4887 .sched_in = svm_sched_in,
4888
4889 .nested_ops = &svm_nested_ops,
4890
4891 .deliver_interrupt = svm_deliver_interrupt,
4892 .pi_update_irte = avic_pi_update_irte,
4893 .setup_mce = svm_setup_mce,
4894
4895#ifdef CONFIG_KVM_SMM
4896 .smi_allowed = svm_smi_allowed,
4897 .enter_smm = svm_enter_smm,
4898 .leave_smm = svm_leave_smm,
4899 .enable_smi_window = svm_enable_smi_window,
4900#endif
4901
4902 .mem_enc_ioctl = sev_mem_enc_ioctl,
4903 .mem_enc_register_region = sev_mem_enc_register_region,
4904 .mem_enc_unregister_region = sev_mem_enc_unregister_region,
4905 .guest_memory_reclaimed = sev_guest_memory_reclaimed,
4906
4907 .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
4908 .vm_move_enc_context_from = sev_vm_move_enc_context_from,
4909
4910 .can_emulate_instruction = svm_can_emulate_instruction,
4911
4912 .apic_init_signal_blocked = svm_apic_init_signal_blocked,
4913
4914 .msr_filter_changed = svm_msr_filter_changed,
4915 .complete_emulated_msr = svm_complete_emulated_msr,
4916
4917 .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
4918 .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
4919};
4920
4921/*
4922 * The default MMIO mask is a single bit (excluding the present bit),
4923 * which could conflict with the memory encryption bit. Check for
4924 * memory encryption support and override the default MMIO mask if
4925 * memory encryption is enabled.
4926 */
4927static __init void svm_adjust_mmio_mask(void)
4928{
4929 unsigned int enc_bit, mask_bit;
4930 u64 msr, mask;
4931
4932 /* If there is no memory encryption support, use existing mask */
4933 if (cpuid_eax(0x80000000) < 0x8000001f)
4934 return;
4935
4936 /* If memory encryption is not enabled, use existing mask */
4937 rdmsrl(MSR_AMD64_SYSCFG, msr);
4938 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
4939 return;
4940
4941 enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
4942 mask_bit = boot_cpu_data.x86_phys_bits;
4943
4944 /* Increment the mask bit if it is the same as the encryption bit */
4945 if (enc_bit == mask_bit)
4946 mask_bit++;
4947
4948 /*
4949 * If the mask bit location is below 52, then some bits above the
4950 * physical addressing limit will always be reserved, so use the
4951 * rsvd_bits() function to generate the mask. This mask, along with
4952 * the present bit, will be used to generate a page fault with
4953 * PFER.RSV = 1.
4954 *
4955 * If the mask bit location is 52 (or above), then clear the mask.
4956 */
4957 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
4958
4959 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
4960}
4961
4962static __init void svm_set_cpu_caps(void)
4963{
4964 kvm_set_cpu_caps();
4965
4966 kvm_caps.supported_perf_cap = 0;
4967 kvm_caps.supported_xss = 0;
4968
4969 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
4970 if (nested) {
4971 kvm_cpu_cap_set(X86_FEATURE_SVM);
4972 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
4973
4974 if (nrips)
4975 kvm_cpu_cap_set(X86_FEATURE_NRIPS);
4976
4977 if (npt_enabled)
4978 kvm_cpu_cap_set(X86_FEATURE_NPT);
4979
4980 if (tsc_scaling)
4981 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
4982
4983 if (vls)
4984 kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
4985 if (lbrv)
4986 kvm_cpu_cap_set(X86_FEATURE_LBRV);
4987
4988 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
4989 kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
4990
4991 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
4992 kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
4993
4994 if (vgif)
4995 kvm_cpu_cap_set(X86_FEATURE_VGIF);
4996
4997 if (vnmi)
4998 kvm_cpu_cap_set(X86_FEATURE_VNMI);
4999
5000 /* Nested VM can receive #VMEXIT instead of triggering #GP */
5001 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
5002 }
5003
5004 /* CPUID 0x80000008 */
5005 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5006 boot_cpu_has(X86_FEATURE_AMD_SSBD))
5007 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
5008
5009 if (enable_pmu) {
5010 /*
5011 * Enumerate support for PERFCTR_CORE if and only if KVM has
5012 * access to enough counters to virtualize "core" support,
5013 * otherwise limit vPMU support to the legacy number of counters.
5014 */
5015 if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5016 kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5017 kvm_pmu_cap.num_counters_gp);
5018 else
5019 kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
5020
5021 if (kvm_pmu_cap.version != 2 ||
5022 !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
5023 kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
5024 }
5025
5026 /* CPUID 0x8000001F (SME/SEV features) */
5027 sev_set_cpu_caps();
5028}
5029
5030static __init int svm_hardware_setup(void)
5031{
5032 int cpu;
5033 struct page *iopm_pages;
5034 void *iopm_va;
5035 int r;
5036 unsigned int order = get_order(IOPM_SIZE);
5037
5038 /*
5039 * NX is required for shadow paging and for NPT if the NX huge pages
5040 * mitigation is enabled.
5041 */
5042 if (!boot_cpu_has(X86_FEATURE_NX)) {
5043 pr_err_ratelimited("NX (Execute Disable) not supported\n");
5044 return -EOPNOTSUPP;
5045 }
5046 kvm_enable_efer_bits(EFER_NX);
5047
5048 iopm_pages = alloc_pages(GFP_KERNEL, order);
5049
5050 if (!iopm_pages)
5051 return -ENOMEM;
5052
5053 iopm_va = page_address(iopm_pages);
5054 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
5055 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
5056
5057 init_msrpm_offsets();
5058
5059 kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
5060 XFEATURE_MASK_BNDCSR);
5061
5062 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
5063 kvm_enable_efer_bits(EFER_FFXSR);
5064
5065 if (tsc_scaling) {
5066 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5067 tsc_scaling = false;
5068 } else {
5069 pr_info("TSC scaling supported\n");
5070 kvm_caps.has_tsc_control = true;
5071 }
5072 }
5073 kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
5074 kvm_caps.tsc_scaling_ratio_frac_bits = 32;
5075
5076 tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
5077
5078 if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
5079 kvm_enable_efer_bits(EFER_AUTOIBRS);
5080
5081 /* Check for pause filtering support */
5082 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5083 pause_filter_count = 0;
5084 pause_filter_thresh = 0;
5085 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5086 pause_filter_thresh = 0;
5087 }
5088
5089 if (nested) {
5090 pr_info("Nested Virtualization enabled\n");
5091 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
5092 }
5093
5094 /*
5095 * KVM's MMU doesn't support using 2-level paging for itself, and thus
5096 * NPT isn't supported if the host is using 2-level paging since host
5097 * CR4 is unchanged on VMRUN.
5098 */
5099 if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5100 npt_enabled = false;
5101
5102 if (!boot_cpu_has(X86_FEATURE_NPT))
5103 npt_enabled = false;
5104
5105 /* Force VM NPT level equal to the host's paging level */
5106 kvm_configure_mmu(npt_enabled, get_npt_level(),
5107 get_npt_level(), PG_LEVEL_1G);
5108 pr_info("Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
5109
5110 /* Setup shadow_me_value and shadow_me_mask */
5111 kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5112
5113 svm_adjust_mmio_mask();
5114
5115 /*
5116 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5117 * may be modified by svm_adjust_mmio_mask()).
5118 */
5119 sev_hardware_setup();
5120
5121 svm_hv_hardware_setup();
5122
5123 for_each_possible_cpu(cpu) {
5124 r = svm_cpu_init(cpu);
5125 if (r)
5126 goto err;
5127 }
5128
5129 if (nrips) {
5130 if (!boot_cpu_has(X86_FEATURE_NRIPS))
5131 nrips = false;
5132 }
5133
5134 enable_apicv = avic = avic && avic_hardware_setup();
5135
5136 if (!enable_apicv) {
5137 svm_x86_ops.vcpu_blocking = NULL;
5138 svm_x86_ops.vcpu_unblocking = NULL;
5139 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5140 } else if (!x2avic_enabled) {
5141 svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true;
5142 }
5143
5144 if (vls) {
5145 if (!npt_enabled ||
5146 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5147 !IS_ENABLED(CONFIG_X86_64)) {
5148 vls = false;
5149 } else {
5150 pr_info("Virtual VMLOAD VMSAVE supported\n");
5151 }
5152 }
5153
5154 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5155 svm_gp_erratum_intercept = false;
5156
5157 if (vgif) {
5158 if (!boot_cpu_has(X86_FEATURE_VGIF))
5159 vgif = false;
5160 else
5161 pr_info("Virtual GIF supported\n");
5162 }
5163
5164 vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI);
5165 if (vnmi)
5166 pr_info("Virtual NMI enabled\n");
5167
5168 if (!vnmi) {
5169 svm_x86_ops.is_vnmi_pending = NULL;
5170 svm_x86_ops.set_vnmi_pending = NULL;
5171 }
5172
5173
5174 if (lbrv) {
5175 if (!boot_cpu_has(X86_FEATURE_LBRV))
5176 lbrv = false;
5177 else
5178 pr_info("LBR virtualization supported\n");
5179 }
5180
5181 if (!enable_pmu)
5182 pr_info("PMU virtualization is disabled\n");
5183
5184 svm_set_cpu_caps();
5185
5186 /*
5187 * It seems that on AMD processors PTE's accessed bit is
5188 * being set by the CPU hardware before the NPF vmexit.
5189 * This is not expected behaviour and our tests fail because
5190 * of it.
5191 * A workaround here is to disable support for
5192 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5193 * In this case userspace can know if there is support using
5194 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5195 * it
5196 * If future AMD CPU models change the behaviour described above,
5197 * this variable can be changed accordingly
5198 */
5199 allow_smaller_maxphyaddr = !npt_enabled;
5200
5201 return 0;
5202
5203err:
5204 svm_hardware_unsetup();
5205 return r;
5206}
5207
5208
5209static struct kvm_x86_init_ops svm_init_ops __initdata = {
5210 .hardware_setup = svm_hardware_setup,
5211
5212 .runtime_ops = &svm_x86_ops,
5213 .pmu_ops = &amd_pmu_ops,
5214};
5215
5216static int __init svm_init(void)
5217{
5218 int r;
5219
5220 __unused_size_checks();
5221
5222 if (!kvm_is_svm_supported())
5223 return -EOPNOTSUPP;
5224
5225 r = kvm_x86_vendor_init(&svm_init_ops);
5226 if (r)
5227 return r;
5228
5229 /*
5230 * Common KVM initialization _must_ come last, after this, /dev/kvm is
5231 * exposed to userspace!
5232 */
5233 r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm),
5234 THIS_MODULE);
5235 if (r)
5236 goto err_kvm_init;
5237
5238 return 0;
5239
5240err_kvm_init:
5241 kvm_x86_vendor_exit();
5242 return r;
5243}
5244
5245static void __exit svm_exit(void)
5246{
5247 kvm_exit();
5248 kvm_x86_vendor_exit();
5249}
5250
5251module_init(svm_init)
5252module_exit(svm_exit)