Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: move kvm_inject_gp up from kvm_set_dr to callers

Push the injection of #GP up to the callers, so that they can just use
kvm_complete_insn_gp. __kvm_set_dr is pretty much what the callers can use
together with kvm_complete_insn_gp, so rename it to kvm_set_dr and drop
the old kvm_set_dr wrapper.

This also allows nested VMX code, which really wanted to use __kvm_set_dr,
to use the right function.

While at it, remove the kvm_require_dr() check from the SVM interception.
The APM states:

All normal exception checks take precedence over the SVM intercepts.

which includes the CR4.DE=1 #UD.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

+20 -29
+5 -8
arch/x86/kvm/svm/svm.c
··· 2617 2617 { 2618 2618 int reg, dr; 2619 2619 unsigned long val; 2620 + int err = 0; 2620 2621 2621 2622 if (svm->vcpu.guest_debug == 0) { 2622 2623 /* ··· 2635 2634 2636 2635 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2637 2636 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; 2638 - 2639 - if (dr >= 16) { /* mov to DRn */ 2640 - if (!kvm_require_dr(&svm->vcpu, dr - 16)) 2641 - return 1; 2637 + if (dr >= 16) { /* mov to DRn */ 2638 + dr -= 16; 2642 2639 val = kvm_register_read(&svm->vcpu, reg); 2643 - kvm_set_dr(&svm->vcpu, dr - 16, val); 2640 + err = kvm_set_dr(&svm->vcpu, dr, val); 2644 2641 } else { 2645 - if (!kvm_require_dr(&svm->vcpu, dr)) 2646 - return 1; 2647 2642 kvm_get_dr(&svm->vcpu, dr, &val); 2648 2643 kvm_register_write(&svm->vcpu, reg, val); 2649 2644 } 2650 2645 2651 - return kvm_skip_emulated_instruction(&svm->vcpu); 2646 + return kvm_complete_insn_gp(&svm->vcpu, err); 2652 2647 } 2653 2648 2654 2649 static int cr8_write_interception(struct vcpu_svm *svm)
+10 -7
arch/x86/kvm/vmx/vmx.c
··· 5095 5095 { 5096 5096 unsigned long exit_qualification; 5097 5097 int dr, dr7, reg; 5098 + int err = 1; 5098 5099 5099 5100 exit_qualification = vmx_get_exit_qual(vcpu); 5100 5101 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; ··· 5104 5103 if (!kvm_require_dr(vcpu, dr)) 5105 5104 return 1; 5106 5105 5107 - /* Do not handle if the CPL > 0, will trigger GP on re-entry */ 5108 - if (!kvm_require_cpl(vcpu, 0)) 5109 - return 1; 5106 + if (kvm_x86_ops.get_cpl(vcpu) > 0) 5107 + goto out; 5108 + 5110 5109 dr7 = vmcs_readl(GUEST_DR7); 5111 5110 if (dr7 & DR7_GD) { 5112 5111 /* ··· 5145 5144 5146 5145 kvm_get_dr(vcpu, dr, &val); 5147 5146 kvm_register_write(vcpu, reg, val); 5148 - } else 5149 - if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) 5150 - return 1; 5147 + err = 0; 5148 + } else { 5149 + err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)); 5150 + } 5151 5151 5152 - return kvm_skip_emulated_instruction(vcpu); 5152 + out: 5153 + return kvm_complete_insn_gp(vcpu, err); 5153 5154 } 5154 5155 5155 5156 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+5 -14
arch/x86/kvm/x86.c
··· 1143 1143 return fixed; 1144 1144 } 1145 1145 1146 - static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1146 + int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1147 1147 { 1148 1148 size_t size = ARRAY_SIZE(vcpu->arch.db); 1149 1149 ··· 1156 1156 case 4: 1157 1157 case 6: 1158 1158 if (!kvm_dr6_valid(val)) 1159 - return -1; /* #GP */ 1159 + return 1; /* #GP */ 1160 1160 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1161 1161 break; 1162 1162 case 5: 1163 1163 default: /* 7 */ 1164 1164 if (!kvm_dr7_valid(val)) 1165 - return -1; /* #GP */ 1165 + return 1; /* #GP */ 1166 1166 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1167 1167 kvm_update_dr7(vcpu); 1168 1168 break; 1169 1169 } 1170 1170 1171 - return 0; 1172 - } 1173 - 1174 - int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1175 - { 1176 - if (__kvm_set_dr(vcpu, dr, val)) { 1177 - kvm_inject_gp(vcpu, 0); 1178 - return 1; 1179 - } 1180 1171 return 0; 1181 1172 } 1182 1173 EXPORT_SYMBOL_GPL(kvm_set_dr); ··· 6610 6619 unsigned long value) 6611 6620 { 6612 6621 6613 - return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 6622 + return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 6614 6623 } 6615 6624 6616 6625 static u64 mk_cr_64(u64 curr_cr, u32 new_val) ··· 8655 8664 dt.address = dt.size = 0; 8656 8665 static_call(kvm_x86_set_idt)(vcpu, &dt); 8657 8666 8658 - __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 8667 + kvm_set_dr(vcpu, 7, DR7_FIXED_1); 8659 8668 8660 8669 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 8661 8670 cs.base = vcpu->arch.smbase;