Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: SVM: copy instruction bytes from VMCB

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

authored by

Andre Przywara and committed by
Avi Kivity
dc25e89e df4f3108

+26 -15
+1 -1
arch/x86/include/asm/kvm_emulate.h
··· 265 265 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 266 266 #endif 267 267 268 - int x86_decode_insn(struct x86_emulate_ctxt *ctxt); 268 + int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); 269 269 #define EMULATION_FAILED -1 270 270 #define EMULATION_OK 0 271 271 #define EMULATION_RESTART 1
+5 -4
arch/x86/include/asm/kvm_host.h
··· 634 634 #define EMULTYPE_NO_DECODE (1 << 0) 635 635 #define EMULTYPE_TRAP_UD (1 << 1) 636 636 #define EMULTYPE_SKIP (1 << 2) 637 - int x86_emulate_instruction(struct kvm_vcpu *vcpu, 638 - unsigned long cr2, int emulation_type); 637 + int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 638 + int emulation_type, void *insn, int insn_len); 639 639 640 640 static inline int emulate_instruction(struct kvm_vcpu *vcpu, 641 641 int emulation_type) 642 642 { 643 - return x86_emulate_instruction(vcpu, 0, emulation_type); 643 + return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 644 644 } 645 645 646 646 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); ··· 721 721 722 722 int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 723 723 724 - int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 724 + int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 725 + void *insn, int insn_len); 725 726 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 726 727 727 728 void kvm_enable_tdp(void);
+3 -1
arch/x86/include/asm/svm.h
··· 83 83 u32 clean; 84 84 u32 reserved_5; 85 85 u64 next_rip; 86 - u8 reserved_6[816]; 86 + u8 insn_len; 87 + u8 insn_bytes[15]; 88 + u8 reserved_6[800]; 87 89 }; 88 90 89 91
+5 -2
arch/x86/kvm/emulate.c
··· 2610 2610 } 2611 2611 2612 2612 int 2613 - x86_decode_insn(struct x86_emulate_ctxt *ctxt) 2613 + x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) 2614 2614 { 2615 2615 struct x86_emulate_ops *ops = ctxt->ops; 2616 2616 struct decode_cache *c = &ctxt->decode; ··· 2621 2621 struct operand memop = { .type = OP_NONE }; 2622 2622 2623 2623 c->eip = ctxt->eip; 2624 - c->fetch.start = c->fetch.end = c->eip; 2624 + c->fetch.start = c->eip; 2625 + c->fetch.end = c->fetch.start + insn_len; 2626 + if (insn_len > 0) 2627 + memcpy(c->fetch.data, insn, insn_len); 2625 2628 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS); 2626 2629 2627 2630 switch (mode) {
+3 -2
arch/x86/kvm/mmu.c
··· 3330 3330 } 3331 3331 } 3332 3332 3333 - int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) 3333 + int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, 3334 + void *insn, int insn_len) 3334 3335 { 3335 3336 int r; 3336 3337 enum emulation_result er; ··· 3349 3348 if (r) 3350 3349 goto out; 3351 3350 3352 - er = x86_emulate_instruction(vcpu, cr2, 0); 3351 + er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); 3353 3352 3354 3353 switch (er) { 3355 3354 case EMULATE_DONE:
+3 -1
arch/x86/kvm/svm.c
··· 1527 1527 trace_kvm_page_fault(fault_address, error_code); 1528 1528 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) 1529 1529 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); 1530 - r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1530 + r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, 1531 + svm->vmcb->control.insn_bytes, 1532 + svm->vmcb->control.insn_len); 1531 1533 break; 1532 1534 case KVM_PV_REASON_PAGE_NOT_PRESENT: 1533 1535 svm->apf_reason = 0;
+2 -2
arch/x86/kvm/vmx.c
··· 3055 3055 3056 3056 if (kvm_event_needs_reinjection(vcpu)) 3057 3057 kvm_mmu_unprotect_page_virt(vcpu, cr2); 3058 - return kvm_mmu_page_fault(vcpu, cr2, error_code); 3058 + return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); 3059 3059 } 3060 3060 3061 3061 if (vmx->rmode.vm86_active && ··· 3502 3502 3503 3503 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 3504 3504 trace_kvm_page_fault(gpa, exit_qualification); 3505 - return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3); 3505 + return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0); 3506 3506 } 3507 3507 3508 3508 static u64 ept_rsvd_mask(u64 spte, int level)
+4 -2
arch/x86/kvm/x86.c
··· 4365 4365 4366 4366 int x86_emulate_instruction(struct kvm_vcpu *vcpu, 4367 4367 unsigned long cr2, 4368 - int emulation_type) 4368 + int emulation_type, 4369 + void *insn, 4370 + int insn_len) 4369 4371 { 4370 4372 int r; 4371 4373 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; ··· 4388 4386 vcpu->arch.emulate_ctxt.have_exception = false; 4389 4387 vcpu->arch.emulate_ctxt.perm_ok = false; 4390 4388 4391 - r = x86_decode_insn(&vcpu->arch.emulate_ctxt); 4389 + r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len); 4392 4390 if (r == X86EMUL_PROPAGATE_FAULT) 4393 4391 goto done; 4394 4392