Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-x86-vmx-6.19' of https://github.com/kvm-x86/linux into HEAD

KVM VMX changes for 6.19:

- Use the root role from kvm_mmu_page to construct EPTPs instead of the
current vCPU state, partly as worthwhile cleanup, but mostly to pave the
way for tracking per-root TLB flushes so that KVM can elide EPT flushes on
pCPU migration if KVM has flushed the root at least once.

- Add a few missing nested consistency checks.

- Rip out support for doing "early" consistency checks via hardware as the
functionality hasn't been used in years and is no longer useful in general,
and replace it with an off-by-default module param to detected missed
consistency checks (i.e. WARN if hardware finds a check that KVM does not).

- Fix a currently-benign bug where KVM would drop the guest's SPEC_CTRL[63:32]
on VM-Enter.

- Misc cleanups.

+134 -169
-10
arch/x86/kvm/mmu/mmu_internal.h
··· 39 39 #define INVALID_PAE_ROOT 0 40 40 #define IS_VALID_PAE_ROOT(x) (!!(x)) 41 41 42 - static inline hpa_t kvm_mmu_get_dummy_root(void) 43 - { 44 - return my_zero_pfn(0) << PAGE_SHIFT; 45 - } 46 - 47 - static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) 48 - { 49 - return is_zero_pfn(shadow_page >> PAGE_SHIFT); 50 - } 51 - 52 42 typedef u64 __rcu *tdp_ptep_t; 53 43 54 44 struct kvm_mmu_page {
+10
arch/x86/kvm/mmu/spte.h
··· 246 246 */ 247 247 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 248 248 249 + static inline hpa_t kvm_mmu_get_dummy_root(void) 250 + { 251 + return my_zero_pfn(0) << PAGE_SHIFT; 252 + } 253 + 254 + static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) 255 + { 256 + return is_zero_pfn(shadow_page >> PAGE_SHIFT); 257 + } 258 + 249 259 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) 250 260 { 251 261 struct page *page = pfn_to_page((shadow_page) >> PAGE_SHIFT);
+60 -111
arch/x86/kvm/vmx/nested.c
··· 23 23 static bool __read_mostly enable_shadow_vmcs = 1; 24 24 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 25 25 26 - static bool __read_mostly nested_early_check = 0; 27 - module_param(nested_early_check, bool, S_IRUGO); 26 + static bool __ro_after_init warn_on_missed_cc; 27 + module_param(warn_on_missed_cc, bool, 0444); 28 28 29 29 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 30 30 ··· 555 555 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 556 556 return -EINVAL; 557 557 558 + if (CC(!nested_cpu_has_vid(vmcs12) && vmcs12->tpr_threshold >> 4)) 559 + return -EINVAL; 560 + 558 561 return 0; 559 562 } 560 563 ··· 764 761 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 765 762 return; 766 763 767 - kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 764 + kvm_read_guest_cached(vcpu->kvm, ghc, get_shadow_vmcs12(vcpu), 768 765 VMCS12_SIZE); 769 766 } 770 767 ··· 783 780 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 784 781 return; 785 782 786 - kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 783 + kvm_write_guest_cached(vcpu->kvm, ghc, get_shadow_vmcs12(vcpu), 787 784 VMCS12_SIZE); 788 785 } 789 786 ··· 2299 2296 return; 2300 2297 vmx->nested.vmcs02_initialized = true; 2301 2298 2302 - /* 2303 - * We don't care what the EPTP value is we just need to guarantee 2304 - * it's valid so we don't get a false positive when doing early 2305 - * consistency checks. 2306 - */ 2307 - if (enable_ept && nested_early_check) 2308 - vmcs_write64(EPT_POINTER, 2309 - construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2310 - 2311 2299 if (vmx->ve_info) 2312 2300 vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info)); 2313 2301 ··· 2743 2749 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2744 2750 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2745 2751 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2746 - vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2752 + vmcs_write64(GUEST_IA32_PAT, vcpu->arch.pat); 2747 2753 } 2748 2754 2749 2755 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( ··· 2955 2961 } 2956 2962 } 2957 2963 2964 + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING) && 2965 + CC(!vmcs12->tsc_multiplier)) 2966 + return -EINVAL; 2967 + 2958 2968 return 0; 2959 2969 } 2960 2970 ··· 3072 3074 if (guest_cpu_cap_has_evmcs(vcpu)) 3073 3075 return nested_evmcs_check_controls(vmcs12); 3074 3076 #endif 3077 + 3078 + return 0; 3079 + } 3080 + 3081 + static int nested_vmx_check_controls_late(struct kvm_vcpu *vcpu, 3082 + struct vmcs12 *vmcs12) 3083 + { 3084 + void *vapic = to_vmx(vcpu)->nested.virtual_apic_map.hva; 3085 + u32 vtpr = vapic ? (*(u32 *)(vapic + APIC_TASKPRI)) >> 4 : 0; 3086 + 3087 + /* 3088 + * Don't bother with the consistency checks if KVM isn't configured to 3089 + * WARN on missed consistency checks, as KVM needs to rely on hardware 3090 + * to fully detect an illegal vTPR vs. TRP Threshold combination due to 3091 + * the vTPR being writable by L1 at all times (it's an in-memory value, 3092 + * not a VMCS field). I.e. even if the check passes now, it might fail 3093 + * at the actual VM-Enter. 3094 + * 3095 + * Keying off the module param also allows treating an invalid vAPIC 3096 + * mapping as a consistency check failure without increasing the risk 3097 + * of breaking a "real" VM. 3098 + */ 3099 + if (!warn_on_missed_cc) 3100 + return 0; 3101 + 3102 + if ((exec_controls_get(to_vmx(vcpu)) & CPU_BASED_TPR_SHADOW) && 3103 + nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW) && 3104 + !nested_cpu_has_vid(vmcs12) && 3105 + !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 3106 + (CC(!vapic) || 3107 + CC((vmcs12->tpr_threshold & GENMASK(3, 0)) > (vtpr & GENMASK(3, 0))))) 3108 + return -EINVAL; 3075 3109 3076 3110 return 0; 3077 3111 } ··· 3359 3329 3360 3330 if (nested_check_guest_non_reg_state(vmcs12)) 3361 3331 return -EINVAL; 3362 - 3363 - return 0; 3364 - } 3365 - 3366 - static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3367 - { 3368 - struct vcpu_vmx *vmx = to_vmx(vcpu); 3369 - unsigned long cr3, cr4; 3370 - bool vm_fail; 3371 - 3372 - if (!nested_early_check) 3373 - return 0; 3374 - 3375 - if (vmx->msr_autoload.host.nr) 3376 - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3377 - if (vmx->msr_autoload.guest.nr) 3378 - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3379 - 3380 - preempt_disable(); 3381 - 3382 - vmx_prepare_switch_to_guest(vcpu); 3383 - 3384 - /* 3385 - * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3386 - * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3387 - * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3388 - * there is no need to preserve other bits or save/restore the field. 3389 - */ 3390 - vmcs_writel(GUEST_RFLAGS, 0); 3391 - 3392 - cr3 = __get_current_cr3_fast(); 3393 - if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3394 - vmcs_writel(HOST_CR3, cr3); 3395 - vmx->loaded_vmcs->host_state.cr3 = cr3; 3396 - } 3397 - 3398 - cr4 = cr4_read_shadow(); 3399 - if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3400 - vmcs_writel(HOST_CR4, cr4); 3401 - vmx->loaded_vmcs->host_state.cr4 = cr4; 3402 - } 3403 - 3404 - vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3405 - __vmx_vcpu_run_flags(vmx)); 3406 - 3407 - if (vmx->msr_autoload.host.nr) 3408 - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3409 - if (vmx->msr_autoload.guest.nr) 3410 - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3411 - 3412 - if (vm_fail) { 3413 - u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3414 - 3415 - preempt_enable(); 3416 - 3417 - trace_kvm_nested_vmenter_failed( 3418 - "early hardware check VM-instruction error: ", error); 3419 - WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3420 - return 1; 3421 - } 3422 - 3423 - /* 3424 - * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3425 - */ 3426 - if (hw_breakpoint_active()) 3427 - set_debugreg(__this_cpu_read(cpu_dr7), 7); 3428 - local_irq_enable(); 3429 - preempt_enable(); 3430 - 3431 - /* 3432 - * A non-failing VMEntry means we somehow entered guest mode with 3433 - * an illegal RIP, and that's just the tip of the iceberg. There 3434 - * is no telling what memory has been modified or what state has 3435 - * been exposed to unknown code. Hitting this all but guarantees 3436 - * a (very critical) hardware issue. 3437 - */ 3438 - WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3439 - VMX_EXIT_REASONS_FAILED_VMENTRY)); 3440 3332 3441 3333 return 0; 3442 3334 } ··· 3619 3667 &vmx->nested.pre_vmenter_ssp_tbl); 3620 3668 3621 3669 /* 3622 - * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3623 - * nested early checks are disabled. In the event of a "late" VM-Fail, 3624 - * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3625 - * software model to the pre-VMEntry host state. When EPT is disabled, 3626 - * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3627 - * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3628 - * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3629 - * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3630 - * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3631 - * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3632 - * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3633 - * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3634 - * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3635 - * path would need to manually save/restore vmcs01.GUEST_CR3. 3670 + * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled. In the 3671 + * event of a "late" VM-Fail, i.e. a VM-Fail detected by hardware but 3672 + * not KVM, KVM must unwind its software model to the pre-VM-Entry host 3673 + * state. When EPT is disabled, GUEST_CR3 holds KVM's shadow CR3, not 3674 + * L1's "real" CR3, which causes nested_vmx_restore_host_state() to 3675 + * corrupt vcpu->arch.cr3. Stuffing vmcs01.GUEST_CR3 results in the 3676 + * unwind naturally setting arch.cr3 to the correct value. Smashing 3677 + * vmcs01.GUEST_CR3 is safe because nested VM-Exits, and the unwind, 3678 + * reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is guaranteed to be 3679 + * overwritten with a shadow CR3 prior to re-entering L1. 3636 3680 */ 3637 - if (!enable_ept && !nested_early_check) 3681 + if (!enable_ept) 3638 3682 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3639 3683 3640 3684 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); ··· 3643 3695 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3644 3696 } 3645 3697 3646 - if (nested_vmx_check_vmentry_hw(vcpu)) { 3698 + if (nested_vmx_check_controls_late(vcpu, vmcs12)) { 3647 3699 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3648 3700 return NVMX_VMENTRY_VMFAIL; 3649 3701 } ··· 5112 5164 /* 5113 5165 * The only expected VM-instruction error is "VM entry with 5114 5166 * invalid control field(s)." Anything else indicates a 5115 - * problem with L0. And we should never get here with a 5116 - * VMFail of any type if early consistency checks are enabled. 5167 + * problem with L0. 5117 5168 */ 5118 5169 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 5119 5170 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 5120 - WARN_ON_ONCE(nested_early_check); 5171 + 5172 + /* VM-Fail at VM-Entry means KVM missed a consistency check. */ 5173 + WARN_ON_ONCE(warn_on_missed_cc); 5121 5174 } 5122 5175 5123 5176 /*
+10 -20
arch/x86/kvm/vmx/tdx.c
··· 2706 2706 2707 2707 static int tdx_td_init(struct kvm *kvm, struct kvm_tdx_cmd *cmd) 2708 2708 { 2709 + struct kvm_tdx_init_vm __user *user_data = u64_to_user_ptr(cmd->data); 2709 2710 struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); 2710 2711 struct kvm_tdx_init_vm *init_vm; 2711 2712 struct td_params *td_params = NULL; 2713 + u32 nr_user_entries; 2712 2714 int ret; 2713 2715 2714 2716 BUILD_BUG_ON(sizeof(*init_vm) != 256 + sizeof_field(struct kvm_tdx_init_vm, cpuid)); ··· 2722 2720 if (cmd->flags) 2723 2721 return -EINVAL; 2724 2722 2725 - init_vm = kmalloc(sizeof(*init_vm) + 2726 - sizeof(init_vm->cpuid.entries[0]) * KVM_MAX_CPUID_ENTRIES, 2727 - GFP_KERNEL); 2728 - if (!init_vm) 2729 - return -ENOMEM; 2723 + if (get_user(nr_user_entries, &user_data->cpuid.nent)) 2724 + return -EFAULT; 2730 2725 2731 - if (copy_from_user(init_vm, u64_to_user_ptr(cmd->data), sizeof(*init_vm))) { 2732 - ret = -EFAULT; 2733 - goto out; 2734 - } 2726 + if (nr_user_entries > KVM_MAX_CPUID_ENTRIES) 2727 + return -E2BIG; 2735 2728 2736 - if (init_vm->cpuid.nent > KVM_MAX_CPUID_ENTRIES) { 2737 - ret = -E2BIG; 2738 - goto out; 2739 - } 2740 - 2741 - if (copy_from_user(init_vm->cpuid.entries, 2742 - u64_to_user_ptr(cmd->data) + sizeof(*init_vm), 2743 - flex_array_size(init_vm, cpuid.entries, init_vm->cpuid.nent))) { 2744 - ret = -EFAULT; 2745 - goto out; 2746 - } 2729 + init_vm = memdup_user(user_data, 2730 + struct_size(user_data, cpuid.entries, nr_user_entries)); 2731 + if (IS_ERR(init_vm)) 2732 + return PTR_ERR(init_vm); 2747 2733 2748 2734 if (memchr_inv(init_vm->reserved, 0, sizeof(init_vm->reserved))) { 2749 2735 ret = -EINVAL;
+15 -5
arch/x86/kvm/vmx/vmenter.S
··· 116 116 * and vmentry. 117 117 */ 118 118 mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI 119 - movl VMX_spec_ctrl(%_ASM_DI), %edi 120 - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi 121 - cmp %edi, %esi 119 + #ifdef CONFIG_X86_64 120 + mov VMX_spec_ctrl(%rdi), %rdx 121 + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx 122 122 je .Lspec_ctrl_done 123 + movl %edx, %eax 124 + shr $32, %rdx 125 + #else 126 + mov VMX_spec_ctrl(%edi), %eax 127 + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx 128 + xor %eax, %ecx 129 + mov VMX_spec_ctrl + 4(%edi), %edx 130 + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi 131 + xor %edx, %edi 132 + or %edi, %ecx 133 + je .Lspec_ctrl_done 134 + #endif 123 135 mov $MSR_IA32_SPEC_CTRL, %ecx 124 - xor %edx, %edx 125 - mov %edi, %eax 126 136 wrmsr 127 137 128 138 .Lspec_ctrl_done:
+39 -20
arch/x86/kvm/vmx/vmx.c
··· 862 862 loaded_vmcs->launched = 0; 863 863 } 864 864 865 - void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 865 + static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 866 866 { 867 867 int cpu = loaded_vmcs->cpu; 868 868 ··· 3329 3329 return to_vmx(vcpu)->vpid; 3330 3330 } 3331 3331 3332 + static u64 construct_eptp(hpa_t root_hpa) 3333 + { 3334 + u64 eptp = root_hpa | VMX_EPTP_MT_WB; 3335 + struct kvm_mmu_page *root; 3336 + 3337 + if (kvm_mmu_is_dummy_root(root_hpa)) 3338 + return eptp | VMX_EPTP_PWL_4; 3339 + 3340 + /* 3341 + * EPT roots should always have an associated MMU page. Return a "bad" 3342 + * EPTP to induce VM-Fail instead of continuing on in a unknown state. 3343 + */ 3344 + root = root_to_sp(root_hpa); 3345 + if (WARN_ON_ONCE(!root)) 3346 + return INVALID_PAGE; 3347 + 3348 + eptp |= (root->role.level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 3349 + 3350 + if (enable_ept_ad_bits && !root->role.ad_disabled) 3351 + eptp |= VMX_EPTP_AD_ENABLE_BIT; 3352 + 3353 + return eptp; 3354 + } 3355 + 3356 + static void vmx_flush_tlb_ept_root(hpa_t root_hpa) 3357 + { 3358 + u64 eptp = construct_eptp(root_hpa); 3359 + 3360 + if (VALID_PAGE(eptp)) 3361 + ept_sync_context(eptp); 3362 + else 3363 + ept_sync_global(); 3364 + } 3365 + 3332 3366 void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) 3333 3367 { 3334 3368 struct kvm_mmu *mmu = vcpu->arch.mmu; ··· 3373 3339 return; 3374 3340 3375 3341 if (enable_ept) 3376 - ept_sync_context(construct_eptp(vcpu, root_hpa, 3377 - mmu->root_role.level)); 3342 + vmx_flush_tlb_ept_root(root_hpa); 3378 3343 else 3379 3344 vpid_sync_context(vmx_get_current_vpid(vcpu)); 3380 3345 } ··· 3539 3506 return 4; 3540 3507 } 3541 3508 3542 - u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) 3543 - { 3544 - u64 eptp = VMX_EPTP_MT_WB; 3545 - 3546 - eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 3547 - 3548 - if (enable_ept_ad_bits && 3549 - (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) 3550 - eptp |= VMX_EPTP_AD_ENABLE_BIT; 3551 - eptp |= root_hpa; 3552 - 3553 - return eptp; 3554 - } 3555 - 3556 3509 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) 3557 3510 { 3558 3511 struct kvm *kvm = vcpu->kvm; 3559 3512 bool update_guest_cr3 = true; 3560 3513 unsigned long guest_cr3; 3561 - u64 eptp; 3562 3514 3563 3515 if (enable_ept) { 3564 - eptp = construct_eptp(vcpu, root_hpa, root_level); 3565 - vmcs_write64(EPT_POINTER, eptp); 3516 + KVM_MMU_WARN_ON(root_to_sp(root_hpa) && 3517 + root_level != root_to_sp(root_hpa)->role.level); 3518 + vmcs_write64(EPT_POINTER, construct_eptp(root_hpa)); 3566 3519 3567 3520 hv_track_root_tdp(vcpu, root_hpa); 3568 3521
-2
arch/x86/kvm/vmx/vmx.h
··· 369 369 void ept_save_pdptrs(struct kvm_vcpu *vcpu); 370 370 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 371 371 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 372 - u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); 373 372 374 373 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu); 375 374 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); ··· 680 681 void free_vmcs(struct vmcs *vmcs); 681 682 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); 682 683 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); 683 - void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); 684 684 685 685 static inline struct vmcs *alloc_vmcs(bool shadow) 686 686 {
-1
arch/x86/kvm/vmx/x86_ops.h
··· 73 73 void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 74 74 void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 75 75 void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 76 - void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val); 77 76 void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val); 78 77 void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu); 79 78 void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);