Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9 11377 lines 329 kB view raw
1/* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19#include "irq.h" 20#include "mmu.h" 21#include "cpuid.h" 22#include "lapic.h" 23 24#include <linux/kvm_host.h> 25#include <linux/module.h> 26#include <linux/kernel.h> 27#include <linux/mm.h> 28#include <linux/highmem.h> 29#include <linux/sched.h> 30#include <linux/moduleparam.h> 31#include <linux/mod_devicetable.h> 32#include <linux/trace_events.h> 33#include <linux/slab.h> 34#include <linux/tboot.h> 35#include <linux/hrtimer.h> 36#include "kvm_cache_regs.h" 37#include "x86.h" 38 39#include <asm/cpu.h> 40#include <asm/io.h> 41#include <asm/desc.h> 42#include <asm/vmx.h> 43#include <asm/virtext.h> 44#include <asm/mce.h> 45#include <asm/fpu/internal.h> 46#include <asm/perf_event.h> 47#include <asm/debugreg.h> 48#include <asm/kexec.h> 49#include <asm/apic.h> 50#include <asm/irq_remapping.h> 51 52#include "trace.h" 53#include "pmu.h" 54 55#define __ex(x) __kvm_handle_fault_on_reboot(x) 56#define __ex_clear(x, reg) \ 57 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) 58 59MODULE_AUTHOR("Qumranet"); 60MODULE_LICENSE("GPL"); 61 62static const struct x86_cpu_id vmx_cpu_id[] = { 63 X86_FEATURE_MATCH(X86_FEATURE_VMX), 64 {} 65}; 66MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 67 68static bool __read_mostly enable_vpid = 1; 69module_param_named(vpid, enable_vpid, bool, 0444); 70 71static bool __read_mostly flexpriority_enabled = 1; 72module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 73 74static bool __read_mostly enable_ept = 1; 75module_param_named(ept, enable_ept, bool, S_IRUGO); 76 77static bool __read_mostly enable_unrestricted_guest = 1; 78module_param_named(unrestricted_guest, 79 enable_unrestricted_guest, bool, S_IRUGO); 80 81static bool __read_mostly enable_ept_ad_bits = 1; 82module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); 83 84static bool __read_mostly emulate_invalid_guest_state = true; 85module_param(emulate_invalid_guest_state, bool, S_IRUGO); 86 87static bool __read_mostly vmm_exclusive = 1; 88module_param(vmm_exclusive, bool, S_IRUGO); 89 90static bool __read_mostly fasteoi = 1; 91module_param(fasteoi, bool, S_IRUGO); 92 93static bool __read_mostly enable_apicv = 1; 94module_param(enable_apicv, bool, S_IRUGO); 95 96static bool __read_mostly enable_shadow_vmcs = 1; 97module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 98/* 99 * If nested=1, nested virtualization is supported, i.e., guests may use 100 * VMX and be a hypervisor for its own guests. If nested=0, guests may not 101 * use VMX instructions. 102 */ 103static bool __read_mostly nested = 0; 104module_param(nested, bool, S_IRUGO); 105 106static u64 __read_mostly host_xss; 107 108static bool __read_mostly enable_pml = 1; 109module_param_named(pml, enable_pml, bool, S_IRUGO); 110 111#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 112 113/* Guest_tsc -> host_tsc conversion requires 64-bit division. */ 114static int __read_mostly cpu_preemption_timer_multi; 115static bool __read_mostly enable_preemption_timer = 1; 116#ifdef CONFIG_X86_64 117module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); 118#endif 119 120#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) 121#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) 122#define KVM_VM_CR0_ALWAYS_ON \ 123 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 124#define KVM_CR4_GUEST_OWNED_BITS \ 125 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ 126 | X86_CR4_OSXMMEXCPT | X86_CR4_TSD) 127 128#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 129#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 130 131#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) 132 133#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 134 135/* 136 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 137 * ple_gap: upper bound on the amount of time between two successive 138 * executions of PAUSE in a loop. Also indicate if ple enabled. 139 * According to test, this time is usually smaller than 128 cycles. 140 * ple_window: upper bound on the amount of time a guest is allowed to execute 141 * in a PAUSE loop. Tests indicate that most spinlocks are held for 142 * less than 2^12 cycles 143 * Time is measured based on a counter that runs at the same rate as the TSC, 144 * refer SDM volume 3b section 21.6.13 & 22.1.3. 145 */ 146#define KVM_VMX_DEFAULT_PLE_GAP 128 147#define KVM_VMX_DEFAULT_PLE_WINDOW 4096 148#define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2 149#define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0 150#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \ 151 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW 152 153static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; 154module_param(ple_gap, int, S_IRUGO); 155 156static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 157module_param(ple_window, int, S_IRUGO); 158 159/* Default doubles per-vcpu window every exit. */ 160static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW; 161module_param(ple_window_grow, int, S_IRUGO); 162 163/* Default resets per-vcpu window every exit to ple_window. */ 164static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK; 165module_param(ple_window_shrink, int, S_IRUGO); 166 167/* Default is to compute the maximum so we can never overflow. */ 168static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 169static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 170module_param(ple_window_max, int, S_IRUGO); 171 172extern const ulong vmx_return; 173 174#define NR_AUTOLOAD_MSRS 8 175#define VMCS02_POOL_SIZE 1 176 177struct vmcs { 178 u32 revision_id; 179 u32 abort; 180 char data[0]; 181}; 182 183/* 184 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also 185 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs 186 * loaded on this CPU (so we can clear them if the CPU goes down). 187 */ 188struct loaded_vmcs { 189 struct vmcs *vmcs; 190 struct vmcs *shadow_vmcs; 191 int cpu; 192 int launched; 193 struct list_head loaded_vmcss_on_cpu_link; 194}; 195 196struct shared_msr_entry { 197 unsigned index; 198 u64 data; 199 u64 mask; 200}; 201 202/* 203 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a 204 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has 205 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is 206 * stored in guest memory specified by VMPTRLD, but is opaque to the guest, 207 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. 208 * More than one of these structures may exist, if L1 runs multiple L2 guests. 209 * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the 210 * underlying hardware which will be used to run L2. 211 * This structure is packed to ensure that its layout is identical across 212 * machines (necessary for live migration). 213 * If there are changes in this struct, VMCS12_REVISION must be changed. 214 */ 215typedef u64 natural_width; 216struct __packed vmcs12 { 217 /* According to the Intel spec, a VMCS region must start with the 218 * following two fields. Then follow implementation-specific data. 219 */ 220 u32 revision_id; 221 u32 abort; 222 223 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ 224 u32 padding[7]; /* room for future expansion */ 225 226 u64 io_bitmap_a; 227 u64 io_bitmap_b; 228 u64 msr_bitmap; 229 u64 vm_exit_msr_store_addr; 230 u64 vm_exit_msr_load_addr; 231 u64 vm_entry_msr_load_addr; 232 u64 tsc_offset; 233 u64 virtual_apic_page_addr; 234 u64 apic_access_addr; 235 u64 posted_intr_desc_addr; 236 u64 ept_pointer; 237 u64 eoi_exit_bitmap0; 238 u64 eoi_exit_bitmap1; 239 u64 eoi_exit_bitmap2; 240 u64 eoi_exit_bitmap3; 241 u64 xss_exit_bitmap; 242 u64 guest_physical_address; 243 u64 vmcs_link_pointer; 244 u64 guest_ia32_debugctl; 245 u64 guest_ia32_pat; 246 u64 guest_ia32_efer; 247 u64 guest_ia32_perf_global_ctrl; 248 u64 guest_pdptr0; 249 u64 guest_pdptr1; 250 u64 guest_pdptr2; 251 u64 guest_pdptr3; 252 u64 guest_bndcfgs; 253 u64 host_ia32_pat; 254 u64 host_ia32_efer; 255 u64 host_ia32_perf_global_ctrl; 256 u64 padding64[8]; /* room for future expansion */ 257 /* 258 * To allow migration of L1 (complete with its L2 guests) between 259 * machines of different natural widths (32 or 64 bit), we cannot have 260 * unsigned long fields with no explict size. We use u64 (aliased 261 * natural_width) instead. Luckily, x86 is little-endian. 262 */ 263 natural_width cr0_guest_host_mask; 264 natural_width cr4_guest_host_mask; 265 natural_width cr0_read_shadow; 266 natural_width cr4_read_shadow; 267 natural_width cr3_target_value0; 268 natural_width cr3_target_value1; 269 natural_width cr3_target_value2; 270 natural_width cr3_target_value3; 271 natural_width exit_qualification; 272 natural_width guest_linear_address; 273 natural_width guest_cr0; 274 natural_width guest_cr3; 275 natural_width guest_cr4; 276 natural_width guest_es_base; 277 natural_width guest_cs_base; 278 natural_width guest_ss_base; 279 natural_width guest_ds_base; 280 natural_width guest_fs_base; 281 natural_width guest_gs_base; 282 natural_width guest_ldtr_base; 283 natural_width guest_tr_base; 284 natural_width guest_gdtr_base; 285 natural_width guest_idtr_base; 286 natural_width guest_dr7; 287 natural_width guest_rsp; 288 natural_width guest_rip; 289 natural_width guest_rflags; 290 natural_width guest_pending_dbg_exceptions; 291 natural_width guest_sysenter_esp; 292 natural_width guest_sysenter_eip; 293 natural_width host_cr0; 294 natural_width host_cr3; 295 natural_width host_cr4; 296 natural_width host_fs_base; 297 natural_width host_gs_base; 298 natural_width host_tr_base; 299 natural_width host_gdtr_base; 300 natural_width host_idtr_base; 301 natural_width host_ia32_sysenter_esp; 302 natural_width host_ia32_sysenter_eip; 303 natural_width host_rsp; 304 natural_width host_rip; 305 natural_width paddingl[8]; /* room for future expansion */ 306 u32 pin_based_vm_exec_control; 307 u32 cpu_based_vm_exec_control; 308 u32 exception_bitmap; 309 u32 page_fault_error_code_mask; 310 u32 page_fault_error_code_match; 311 u32 cr3_target_count; 312 u32 vm_exit_controls; 313 u32 vm_exit_msr_store_count; 314 u32 vm_exit_msr_load_count; 315 u32 vm_entry_controls; 316 u32 vm_entry_msr_load_count; 317 u32 vm_entry_intr_info_field; 318 u32 vm_entry_exception_error_code; 319 u32 vm_entry_instruction_len; 320 u32 tpr_threshold; 321 u32 secondary_vm_exec_control; 322 u32 vm_instruction_error; 323 u32 vm_exit_reason; 324 u32 vm_exit_intr_info; 325 u32 vm_exit_intr_error_code; 326 u32 idt_vectoring_info_field; 327 u32 idt_vectoring_error_code; 328 u32 vm_exit_instruction_len; 329 u32 vmx_instruction_info; 330 u32 guest_es_limit; 331 u32 guest_cs_limit; 332 u32 guest_ss_limit; 333 u32 guest_ds_limit; 334 u32 guest_fs_limit; 335 u32 guest_gs_limit; 336 u32 guest_ldtr_limit; 337 u32 guest_tr_limit; 338 u32 guest_gdtr_limit; 339 u32 guest_idtr_limit; 340 u32 guest_es_ar_bytes; 341 u32 guest_cs_ar_bytes; 342 u32 guest_ss_ar_bytes; 343 u32 guest_ds_ar_bytes; 344 u32 guest_fs_ar_bytes; 345 u32 guest_gs_ar_bytes; 346 u32 guest_ldtr_ar_bytes; 347 u32 guest_tr_ar_bytes; 348 u32 guest_interruptibility_info; 349 u32 guest_activity_state; 350 u32 guest_sysenter_cs; 351 u32 host_ia32_sysenter_cs; 352 u32 vmx_preemption_timer_value; 353 u32 padding32[7]; /* room for future expansion */ 354 u16 virtual_processor_id; 355 u16 posted_intr_nv; 356 u16 guest_es_selector; 357 u16 guest_cs_selector; 358 u16 guest_ss_selector; 359 u16 guest_ds_selector; 360 u16 guest_fs_selector; 361 u16 guest_gs_selector; 362 u16 guest_ldtr_selector; 363 u16 guest_tr_selector; 364 u16 guest_intr_status; 365 u16 host_es_selector; 366 u16 host_cs_selector; 367 u16 host_ss_selector; 368 u16 host_ds_selector; 369 u16 host_fs_selector; 370 u16 host_gs_selector; 371 u16 host_tr_selector; 372}; 373 374/* 375 * VMCS12_REVISION is an arbitrary id that should be changed if the content or 376 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and 377 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. 378 */ 379#define VMCS12_REVISION 0x11e57ed0 380 381/* 382 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region 383 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the 384 * current implementation, 4K are reserved to avoid future complications. 385 */ 386#define VMCS12_SIZE 0x1000 387 388/* Used to remember the last vmcs02 used for some recently used vmcs12s */ 389struct vmcs02_list { 390 struct list_head list; 391 gpa_t vmptr; 392 struct loaded_vmcs vmcs02; 393}; 394 395/* 396 * The nested_vmx structure is part of vcpu_vmx, and holds information we need 397 * for correct emulation of VMX (i.e., nested VMX) on this vcpu. 398 */ 399struct nested_vmx { 400 /* Has the level1 guest done vmxon? */ 401 bool vmxon; 402 gpa_t vmxon_ptr; 403 404 /* The guest-physical address of the current VMCS L1 keeps for L2 */ 405 gpa_t current_vmptr; 406 /* The host-usable pointer to the above */ 407 struct page *current_vmcs12_page; 408 struct vmcs12 *current_vmcs12; 409 /* 410 * Cache of the guest's VMCS, existing outside of guest memory. 411 * Loaded from guest memory during VMPTRLD. Flushed to guest 412 * memory during VMXOFF, VMCLEAR, VMPTRLD. 413 */ 414 struct vmcs12 *cached_vmcs12; 415 /* 416 * Indicates if the shadow vmcs must be updated with the 417 * data hold by vmcs12 418 */ 419 bool sync_shadow_vmcs; 420 421 /* vmcs02_list cache of VMCSs recently used to run L2 guests */ 422 struct list_head vmcs02_pool; 423 int vmcs02_num; 424 bool change_vmcs01_virtual_x2apic_mode; 425 /* L2 must run next, and mustn't decide to exit to L1. */ 426 bool nested_run_pending; 427 /* 428 * Guest pages referred to in vmcs02 with host-physical pointers, so 429 * we must keep them pinned while L2 runs. 430 */ 431 struct page *apic_access_page; 432 struct page *virtual_apic_page; 433 struct page *pi_desc_page; 434 struct pi_desc *pi_desc; 435 bool pi_pending; 436 u16 posted_intr_nv; 437 438 unsigned long *msr_bitmap; 439 440 struct hrtimer preemption_timer; 441 bool preemption_timer_expired; 442 443 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ 444 u64 vmcs01_debugctl; 445 446 u16 vpid02; 447 u16 last_vpid; 448 449 u32 nested_vmx_procbased_ctls_low; 450 u32 nested_vmx_procbased_ctls_high; 451 u32 nested_vmx_true_procbased_ctls_low; 452 u32 nested_vmx_secondary_ctls_low; 453 u32 nested_vmx_secondary_ctls_high; 454 u32 nested_vmx_pinbased_ctls_low; 455 u32 nested_vmx_pinbased_ctls_high; 456 u32 nested_vmx_exit_ctls_low; 457 u32 nested_vmx_exit_ctls_high; 458 u32 nested_vmx_true_exit_ctls_low; 459 u32 nested_vmx_entry_ctls_low; 460 u32 nested_vmx_entry_ctls_high; 461 u32 nested_vmx_true_entry_ctls_low; 462 u32 nested_vmx_misc_low; 463 u32 nested_vmx_misc_high; 464 u32 nested_vmx_ept_caps; 465 u32 nested_vmx_vpid_caps; 466}; 467 468#define POSTED_INTR_ON 0 469#define POSTED_INTR_SN 1 470 471/* Posted-Interrupt Descriptor */ 472struct pi_desc { 473 u32 pir[8]; /* Posted interrupt requested */ 474 union { 475 struct { 476 /* bit 256 - Outstanding Notification */ 477 u16 on : 1, 478 /* bit 257 - Suppress Notification */ 479 sn : 1, 480 /* bit 271:258 - Reserved */ 481 rsvd_1 : 14; 482 /* bit 279:272 - Notification Vector */ 483 u8 nv; 484 /* bit 287:280 - Reserved */ 485 u8 rsvd_2; 486 /* bit 319:288 - Notification Destination */ 487 u32 ndst; 488 }; 489 u64 control; 490 }; 491 u32 rsvd[6]; 492} __aligned(64); 493 494static bool pi_test_and_set_on(struct pi_desc *pi_desc) 495{ 496 return test_and_set_bit(POSTED_INTR_ON, 497 (unsigned long *)&pi_desc->control); 498} 499 500static bool pi_test_and_clear_on(struct pi_desc *pi_desc) 501{ 502 return test_and_clear_bit(POSTED_INTR_ON, 503 (unsigned long *)&pi_desc->control); 504} 505 506static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) 507{ 508 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); 509} 510 511static inline void pi_clear_sn(struct pi_desc *pi_desc) 512{ 513 return clear_bit(POSTED_INTR_SN, 514 (unsigned long *)&pi_desc->control); 515} 516 517static inline void pi_set_sn(struct pi_desc *pi_desc) 518{ 519 return set_bit(POSTED_INTR_SN, 520 (unsigned long *)&pi_desc->control); 521} 522 523static inline int pi_test_on(struct pi_desc *pi_desc) 524{ 525 return test_bit(POSTED_INTR_ON, 526 (unsigned long *)&pi_desc->control); 527} 528 529static inline int pi_test_sn(struct pi_desc *pi_desc) 530{ 531 return test_bit(POSTED_INTR_SN, 532 (unsigned long *)&pi_desc->control); 533} 534 535struct vcpu_vmx { 536 struct kvm_vcpu vcpu; 537 unsigned long host_rsp; 538 u8 fail; 539 bool nmi_known_unmasked; 540 u32 exit_intr_info; 541 u32 idt_vectoring_info; 542 ulong rflags; 543 struct shared_msr_entry *guest_msrs; 544 int nmsrs; 545 int save_nmsrs; 546 unsigned long host_idt_base; 547#ifdef CONFIG_X86_64 548 u64 msr_host_kernel_gs_base; 549 u64 msr_guest_kernel_gs_base; 550#endif 551 u32 vm_entry_controls_shadow; 552 u32 vm_exit_controls_shadow; 553 /* 554 * loaded_vmcs points to the VMCS currently used in this vcpu. For a 555 * non-nested (L1) guest, it always points to vmcs01. For a nested 556 * guest (L2), it points to a different VMCS. 557 */ 558 struct loaded_vmcs vmcs01; 559 struct loaded_vmcs *loaded_vmcs; 560 bool __launched; /* temporary, used in vmx_vcpu_run */ 561 struct msr_autoload { 562 unsigned nr; 563 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; 564 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; 565 } msr_autoload; 566 struct { 567 int loaded; 568 u16 fs_sel, gs_sel, ldt_sel; 569#ifdef CONFIG_X86_64 570 u16 ds_sel, es_sel; 571#endif 572 int gs_ldt_reload_needed; 573 int fs_reload_needed; 574 u64 msr_host_bndcfgs; 575 unsigned long vmcs_host_cr4; /* May not match real cr4 */ 576 } host_state; 577 struct { 578 int vm86_active; 579 ulong save_rflags; 580 struct kvm_segment segs[8]; 581 } rmode; 582 struct { 583 u32 bitmask; /* 4 bits per segment (1 bit per field) */ 584 struct kvm_save_segment { 585 u16 selector; 586 unsigned long base; 587 u32 limit; 588 u32 ar; 589 } seg[8]; 590 } segment_cache; 591 int vpid; 592 bool emulation_required; 593 594 /* Support for vnmi-less CPUs */ 595 int soft_vnmi_blocked; 596 ktime_t entry_time; 597 s64 vnmi_blocked_time; 598 u32 exit_reason; 599 600 /* Posted interrupt descriptor */ 601 struct pi_desc pi_desc; 602 603 /* Support for a guest hypervisor (nested VMX) */ 604 struct nested_vmx nested; 605 606 /* Dynamic PLE window. */ 607 int ple_window; 608 bool ple_window_dirty; 609 610 /* Support for PML */ 611#define PML_ENTITY_NUM 512 612 struct page *pml_pg; 613 614 /* apic deadline value in host tsc */ 615 u64 hv_deadline_tsc; 616 617 u64 current_tsc_ratio; 618 619 bool guest_pkru_valid; 620 u32 guest_pkru; 621 u32 host_pkru; 622 623 /* 624 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in 625 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included 626 * in msr_ia32_feature_control_valid_bits. 627 */ 628 u64 msr_ia32_feature_control; 629 u64 msr_ia32_feature_control_valid_bits; 630}; 631 632enum segment_cache_field { 633 SEG_FIELD_SEL = 0, 634 SEG_FIELD_BASE = 1, 635 SEG_FIELD_LIMIT = 2, 636 SEG_FIELD_AR = 3, 637 638 SEG_FIELD_NR = 4 639}; 640 641static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) 642{ 643 return container_of(vcpu, struct vcpu_vmx, vcpu); 644} 645 646static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) 647{ 648 return &(to_vmx(vcpu)->pi_desc); 649} 650 651#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) 652#define FIELD(number, name) [number] = VMCS12_OFFSET(name) 653#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ 654 [number##_HIGH] = VMCS12_OFFSET(name)+4 655 656 657static unsigned long shadow_read_only_fields[] = { 658 /* 659 * We do NOT shadow fields that are modified when L0 660 * traps and emulates any vmx instruction (e.g. VMPTRLD, 661 * VMXON...) executed by L1. 662 * For example, VM_INSTRUCTION_ERROR is read 663 * by L1 if a vmx instruction fails (part of the error path). 664 * Note the code assumes this logic. If for some reason 665 * we start shadowing these fields then we need to 666 * force a shadow sync when L0 emulates vmx instructions 667 * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified 668 * by nested_vmx_failValid) 669 */ 670 VM_EXIT_REASON, 671 VM_EXIT_INTR_INFO, 672 VM_EXIT_INSTRUCTION_LEN, 673 IDT_VECTORING_INFO_FIELD, 674 IDT_VECTORING_ERROR_CODE, 675 VM_EXIT_INTR_ERROR_CODE, 676 EXIT_QUALIFICATION, 677 GUEST_LINEAR_ADDRESS, 678 GUEST_PHYSICAL_ADDRESS 679}; 680static int max_shadow_read_only_fields = 681 ARRAY_SIZE(shadow_read_only_fields); 682 683static unsigned long shadow_read_write_fields[] = { 684 TPR_THRESHOLD, 685 GUEST_RIP, 686 GUEST_RSP, 687 GUEST_CR0, 688 GUEST_CR3, 689 GUEST_CR4, 690 GUEST_INTERRUPTIBILITY_INFO, 691 GUEST_RFLAGS, 692 GUEST_CS_SELECTOR, 693 GUEST_CS_AR_BYTES, 694 GUEST_CS_LIMIT, 695 GUEST_CS_BASE, 696 GUEST_ES_BASE, 697 GUEST_BNDCFGS, 698 CR0_GUEST_HOST_MASK, 699 CR0_READ_SHADOW, 700 CR4_READ_SHADOW, 701 TSC_OFFSET, 702 EXCEPTION_BITMAP, 703 CPU_BASED_VM_EXEC_CONTROL, 704 VM_ENTRY_EXCEPTION_ERROR_CODE, 705 VM_ENTRY_INTR_INFO_FIELD, 706 VM_ENTRY_INSTRUCTION_LEN, 707 VM_ENTRY_EXCEPTION_ERROR_CODE, 708 HOST_FS_BASE, 709 HOST_GS_BASE, 710 HOST_FS_SELECTOR, 711 HOST_GS_SELECTOR 712}; 713static int max_shadow_read_write_fields = 714 ARRAY_SIZE(shadow_read_write_fields); 715 716static const unsigned short vmcs_field_to_offset_table[] = { 717 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), 718 FIELD(POSTED_INTR_NV, posted_intr_nv), 719 FIELD(GUEST_ES_SELECTOR, guest_es_selector), 720 FIELD(GUEST_CS_SELECTOR, guest_cs_selector), 721 FIELD(GUEST_SS_SELECTOR, guest_ss_selector), 722 FIELD(GUEST_DS_SELECTOR, guest_ds_selector), 723 FIELD(GUEST_FS_SELECTOR, guest_fs_selector), 724 FIELD(GUEST_GS_SELECTOR, guest_gs_selector), 725 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), 726 FIELD(GUEST_TR_SELECTOR, guest_tr_selector), 727 FIELD(GUEST_INTR_STATUS, guest_intr_status), 728 FIELD(HOST_ES_SELECTOR, host_es_selector), 729 FIELD(HOST_CS_SELECTOR, host_cs_selector), 730 FIELD(HOST_SS_SELECTOR, host_ss_selector), 731 FIELD(HOST_DS_SELECTOR, host_ds_selector), 732 FIELD(HOST_FS_SELECTOR, host_fs_selector), 733 FIELD(HOST_GS_SELECTOR, host_gs_selector), 734 FIELD(HOST_TR_SELECTOR, host_tr_selector), 735 FIELD64(IO_BITMAP_A, io_bitmap_a), 736 FIELD64(IO_BITMAP_B, io_bitmap_b), 737 FIELD64(MSR_BITMAP, msr_bitmap), 738 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), 739 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), 740 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), 741 FIELD64(TSC_OFFSET, tsc_offset), 742 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), 743 FIELD64(APIC_ACCESS_ADDR, apic_access_addr), 744 FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), 745 FIELD64(EPT_POINTER, ept_pointer), 746 FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), 747 FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), 748 FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), 749 FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), 750 FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), 751 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), 752 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), 753 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), 754 FIELD64(GUEST_IA32_PAT, guest_ia32_pat), 755 FIELD64(GUEST_IA32_EFER, guest_ia32_efer), 756 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), 757 FIELD64(GUEST_PDPTR0, guest_pdptr0), 758 FIELD64(GUEST_PDPTR1, guest_pdptr1), 759 FIELD64(GUEST_PDPTR2, guest_pdptr2), 760 FIELD64(GUEST_PDPTR3, guest_pdptr3), 761 FIELD64(GUEST_BNDCFGS, guest_bndcfgs), 762 FIELD64(HOST_IA32_PAT, host_ia32_pat), 763 FIELD64(HOST_IA32_EFER, host_ia32_efer), 764 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), 765 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), 766 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), 767 FIELD(EXCEPTION_BITMAP, exception_bitmap), 768 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), 769 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), 770 FIELD(CR3_TARGET_COUNT, cr3_target_count), 771 FIELD(VM_EXIT_CONTROLS, vm_exit_controls), 772 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), 773 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), 774 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), 775 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), 776 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), 777 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), 778 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), 779 FIELD(TPR_THRESHOLD, tpr_threshold), 780 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), 781 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), 782 FIELD(VM_EXIT_REASON, vm_exit_reason), 783 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), 784 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), 785 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), 786 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), 787 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), 788 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), 789 FIELD(GUEST_ES_LIMIT, guest_es_limit), 790 FIELD(GUEST_CS_LIMIT, guest_cs_limit), 791 FIELD(GUEST_SS_LIMIT, guest_ss_limit), 792 FIELD(GUEST_DS_LIMIT, guest_ds_limit), 793 FIELD(GUEST_FS_LIMIT, guest_fs_limit), 794 FIELD(GUEST_GS_LIMIT, guest_gs_limit), 795 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), 796 FIELD(GUEST_TR_LIMIT, guest_tr_limit), 797 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), 798 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), 799 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), 800 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), 801 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), 802 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), 803 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), 804 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), 805 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), 806 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), 807 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), 808 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), 809 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), 810 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), 811 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), 812 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), 813 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), 814 FIELD(CR0_READ_SHADOW, cr0_read_shadow), 815 FIELD(CR4_READ_SHADOW, cr4_read_shadow), 816 FIELD(CR3_TARGET_VALUE0, cr3_target_value0), 817 FIELD(CR3_TARGET_VALUE1, cr3_target_value1), 818 FIELD(CR3_TARGET_VALUE2, cr3_target_value2), 819 FIELD(CR3_TARGET_VALUE3, cr3_target_value3), 820 FIELD(EXIT_QUALIFICATION, exit_qualification), 821 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), 822 FIELD(GUEST_CR0, guest_cr0), 823 FIELD(GUEST_CR3, guest_cr3), 824 FIELD(GUEST_CR4, guest_cr4), 825 FIELD(GUEST_ES_BASE, guest_es_base), 826 FIELD(GUEST_CS_BASE, guest_cs_base), 827 FIELD(GUEST_SS_BASE, guest_ss_base), 828 FIELD(GUEST_DS_BASE, guest_ds_base), 829 FIELD(GUEST_FS_BASE, guest_fs_base), 830 FIELD(GUEST_GS_BASE, guest_gs_base), 831 FIELD(GUEST_LDTR_BASE, guest_ldtr_base), 832 FIELD(GUEST_TR_BASE, guest_tr_base), 833 FIELD(GUEST_GDTR_BASE, guest_gdtr_base), 834 FIELD(GUEST_IDTR_BASE, guest_idtr_base), 835 FIELD(GUEST_DR7, guest_dr7), 836 FIELD(GUEST_RSP, guest_rsp), 837 FIELD(GUEST_RIP, guest_rip), 838 FIELD(GUEST_RFLAGS, guest_rflags), 839 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), 840 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), 841 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), 842 FIELD(HOST_CR0, host_cr0), 843 FIELD(HOST_CR3, host_cr3), 844 FIELD(HOST_CR4, host_cr4), 845 FIELD(HOST_FS_BASE, host_fs_base), 846 FIELD(HOST_GS_BASE, host_gs_base), 847 FIELD(HOST_TR_BASE, host_tr_base), 848 FIELD(HOST_GDTR_BASE, host_gdtr_base), 849 FIELD(HOST_IDTR_BASE, host_idtr_base), 850 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), 851 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), 852 FIELD(HOST_RSP, host_rsp), 853 FIELD(HOST_RIP, host_rip), 854}; 855 856static inline short vmcs_field_to_offset(unsigned long field) 857{ 858 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); 859 860 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || 861 vmcs_field_to_offset_table[field] == 0) 862 return -ENOENT; 863 864 return vmcs_field_to_offset_table[field]; 865} 866 867static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) 868{ 869 return to_vmx(vcpu)->nested.cached_vmcs12; 870} 871 872static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) 873{ 874 struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT); 875 if (is_error_page(page)) 876 return NULL; 877 878 return page; 879} 880 881static void nested_release_page(struct page *page) 882{ 883 kvm_release_page_dirty(page); 884} 885 886static void nested_release_page_clean(struct page *page) 887{ 888 kvm_release_page_clean(page); 889} 890 891static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); 892static u64 construct_eptp(unsigned long root_hpa); 893static void kvm_cpu_vmxon(u64 addr); 894static void kvm_cpu_vmxoff(void); 895static bool vmx_xsaves_supported(void); 896static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); 897static void vmx_set_segment(struct kvm_vcpu *vcpu, 898 struct kvm_segment *var, int seg); 899static void vmx_get_segment(struct kvm_vcpu *vcpu, 900 struct kvm_segment *var, int seg); 901static bool guest_state_valid(struct kvm_vcpu *vcpu); 902static u32 vmx_segment_access_rights(struct kvm_segment *var); 903static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); 904static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); 905static int alloc_identity_pagetable(struct kvm *kvm); 906 907static DEFINE_PER_CPU(struct vmcs *, vmxarea); 908static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 909/* 910 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed 911 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. 912 */ 913static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); 914static DEFINE_PER_CPU(struct desc_ptr, host_gdt); 915 916/* 917 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we 918 * can find which vCPU should be waken up. 919 */ 920static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); 921static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); 922 923static unsigned long *vmx_io_bitmap_a; 924static unsigned long *vmx_io_bitmap_b; 925static unsigned long *vmx_msr_bitmap_legacy; 926static unsigned long *vmx_msr_bitmap_longmode; 927static unsigned long *vmx_msr_bitmap_legacy_x2apic; 928static unsigned long *vmx_msr_bitmap_longmode_x2apic; 929static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive; 930static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive; 931static unsigned long *vmx_vmread_bitmap; 932static unsigned long *vmx_vmwrite_bitmap; 933 934static bool cpu_has_load_ia32_efer; 935static bool cpu_has_load_perf_global_ctrl; 936 937static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 938static DEFINE_SPINLOCK(vmx_vpid_lock); 939 940static struct vmcs_config { 941 int size; 942 int order; 943 u32 basic_cap; 944 u32 revision_id; 945 u32 pin_based_exec_ctrl; 946 u32 cpu_based_exec_ctrl; 947 u32 cpu_based_2nd_exec_ctrl; 948 u32 vmexit_ctrl; 949 u32 vmentry_ctrl; 950} vmcs_config; 951 952static struct vmx_capability { 953 u32 ept; 954 u32 vpid; 955} vmx_capability; 956 957#define VMX_SEGMENT_FIELD(seg) \ 958 [VCPU_SREG_##seg] = { \ 959 .selector = GUEST_##seg##_SELECTOR, \ 960 .base = GUEST_##seg##_BASE, \ 961 .limit = GUEST_##seg##_LIMIT, \ 962 .ar_bytes = GUEST_##seg##_AR_BYTES, \ 963 } 964 965static const struct kvm_vmx_segment_field { 966 unsigned selector; 967 unsigned base; 968 unsigned limit; 969 unsigned ar_bytes; 970} kvm_vmx_segment_fields[] = { 971 VMX_SEGMENT_FIELD(CS), 972 VMX_SEGMENT_FIELD(DS), 973 VMX_SEGMENT_FIELD(ES), 974 VMX_SEGMENT_FIELD(FS), 975 VMX_SEGMENT_FIELD(GS), 976 VMX_SEGMENT_FIELD(SS), 977 VMX_SEGMENT_FIELD(TR), 978 VMX_SEGMENT_FIELD(LDTR), 979}; 980 981static u64 host_efer; 982 983static void ept_save_pdptrs(struct kvm_vcpu *vcpu); 984 985/* 986 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it 987 * away by decrementing the array size. 988 */ 989static const u32 vmx_msr_index[] = { 990#ifdef CONFIG_X86_64 991 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 992#endif 993 MSR_EFER, MSR_TSC_AUX, MSR_STAR, 994}; 995 996static inline bool is_exception_n(u32 intr_info, u8 vector) 997{ 998 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 999 INTR_INFO_VALID_MASK)) == 1000 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); 1001} 1002 1003static inline bool is_debug(u32 intr_info) 1004{ 1005 return is_exception_n(intr_info, DB_VECTOR); 1006} 1007 1008static inline bool is_breakpoint(u32 intr_info) 1009{ 1010 return is_exception_n(intr_info, BP_VECTOR); 1011} 1012 1013static inline bool is_page_fault(u32 intr_info) 1014{ 1015 return is_exception_n(intr_info, PF_VECTOR); 1016} 1017 1018static inline bool is_no_device(u32 intr_info) 1019{ 1020 return is_exception_n(intr_info, NM_VECTOR); 1021} 1022 1023static inline bool is_invalid_opcode(u32 intr_info) 1024{ 1025 return is_exception_n(intr_info, UD_VECTOR); 1026} 1027 1028static inline bool is_external_interrupt(u32 intr_info) 1029{ 1030 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) 1031 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 1032} 1033 1034static inline bool is_machine_check(u32 intr_info) 1035{ 1036 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 1037 INTR_INFO_VALID_MASK)) == 1038 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); 1039} 1040 1041static inline bool cpu_has_vmx_msr_bitmap(void) 1042{ 1043 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; 1044} 1045 1046static inline bool cpu_has_vmx_tpr_shadow(void) 1047{ 1048 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; 1049} 1050 1051static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) 1052{ 1053 return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); 1054} 1055 1056static inline bool cpu_has_secondary_exec_ctrls(void) 1057{ 1058 return vmcs_config.cpu_based_exec_ctrl & 1059 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 1060} 1061 1062static inline bool cpu_has_vmx_virtualize_apic_accesses(void) 1063{ 1064 return vmcs_config.cpu_based_2nd_exec_ctrl & 1065 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 1066} 1067 1068static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) 1069{ 1070 return vmcs_config.cpu_based_2nd_exec_ctrl & 1071 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 1072} 1073 1074static inline bool cpu_has_vmx_apic_register_virt(void) 1075{ 1076 return vmcs_config.cpu_based_2nd_exec_ctrl & 1077 SECONDARY_EXEC_APIC_REGISTER_VIRT; 1078} 1079 1080static inline bool cpu_has_vmx_virtual_intr_delivery(void) 1081{ 1082 return vmcs_config.cpu_based_2nd_exec_ctrl & 1083 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; 1084} 1085 1086/* 1087 * Comment's format: document - errata name - stepping - processor name. 1088 * Refer from 1089 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp 1090 */ 1091static u32 vmx_preemption_cpu_tfms[] = { 1092/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 10930x000206E6, 1094/* 323056.pdf - AAX65 - C2 - Xeon L3406 */ 1095/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ 1096/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 10970x00020652, 1098/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 10990x00020655, 1100/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ 1101/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ 1102/* 1103 * 320767.pdf - AAP86 - B1 - 1104 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile 1105 */ 11060x000106E5, 1107/* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 11080x000106A0, 1109/* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 11100x000106A1, 1111/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 11120x000106A4, 1113 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ 1114 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ 1115 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 11160x000106A5, 1117}; 1118 1119static inline bool cpu_has_broken_vmx_preemption_timer(void) 1120{ 1121 u32 eax = cpuid_eax(0x00000001), i; 1122 1123 /* Clear the reserved bits */ 1124 eax &= ~(0x3U << 14 | 0xfU << 28); 1125 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) 1126 if (eax == vmx_preemption_cpu_tfms[i]) 1127 return true; 1128 1129 return false; 1130} 1131 1132static inline bool cpu_has_vmx_preemption_timer(void) 1133{ 1134 return vmcs_config.pin_based_exec_ctrl & 1135 PIN_BASED_VMX_PREEMPTION_TIMER; 1136} 1137 1138static inline bool cpu_has_vmx_posted_intr(void) 1139{ 1140 return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && 1141 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; 1142} 1143 1144static inline bool cpu_has_vmx_apicv(void) 1145{ 1146 return cpu_has_vmx_apic_register_virt() && 1147 cpu_has_vmx_virtual_intr_delivery() && 1148 cpu_has_vmx_posted_intr(); 1149} 1150 1151static inline bool cpu_has_vmx_flexpriority(void) 1152{ 1153 return cpu_has_vmx_tpr_shadow() && 1154 cpu_has_vmx_virtualize_apic_accesses(); 1155} 1156 1157static inline bool cpu_has_vmx_ept_execute_only(void) 1158{ 1159 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; 1160} 1161 1162static inline bool cpu_has_vmx_ept_2m_page(void) 1163{ 1164 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; 1165} 1166 1167static inline bool cpu_has_vmx_ept_1g_page(void) 1168{ 1169 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; 1170} 1171 1172static inline bool cpu_has_vmx_ept_4levels(void) 1173{ 1174 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; 1175} 1176 1177static inline bool cpu_has_vmx_ept_ad_bits(void) 1178{ 1179 return vmx_capability.ept & VMX_EPT_AD_BIT; 1180} 1181 1182static inline bool cpu_has_vmx_invept_context(void) 1183{ 1184 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; 1185} 1186 1187static inline bool cpu_has_vmx_invept_global(void) 1188{ 1189 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; 1190} 1191 1192static inline bool cpu_has_vmx_invvpid_single(void) 1193{ 1194 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; 1195} 1196 1197static inline bool cpu_has_vmx_invvpid_global(void) 1198{ 1199 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 1200} 1201 1202static inline bool cpu_has_vmx_ept(void) 1203{ 1204 return vmcs_config.cpu_based_2nd_exec_ctrl & 1205 SECONDARY_EXEC_ENABLE_EPT; 1206} 1207 1208static inline bool cpu_has_vmx_unrestricted_guest(void) 1209{ 1210 return vmcs_config.cpu_based_2nd_exec_ctrl & 1211 SECONDARY_EXEC_UNRESTRICTED_GUEST; 1212} 1213 1214static inline bool cpu_has_vmx_ple(void) 1215{ 1216 return vmcs_config.cpu_based_2nd_exec_ctrl & 1217 SECONDARY_EXEC_PAUSE_LOOP_EXITING; 1218} 1219 1220static inline bool cpu_has_vmx_basic_inout(void) 1221{ 1222 return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT); 1223} 1224 1225static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) 1226{ 1227 return flexpriority_enabled && lapic_in_kernel(vcpu); 1228} 1229 1230static inline bool cpu_has_vmx_vpid(void) 1231{ 1232 return vmcs_config.cpu_based_2nd_exec_ctrl & 1233 SECONDARY_EXEC_ENABLE_VPID; 1234} 1235 1236static inline bool cpu_has_vmx_rdtscp(void) 1237{ 1238 return vmcs_config.cpu_based_2nd_exec_ctrl & 1239 SECONDARY_EXEC_RDTSCP; 1240} 1241 1242static inline bool cpu_has_vmx_invpcid(void) 1243{ 1244 return vmcs_config.cpu_based_2nd_exec_ctrl & 1245 SECONDARY_EXEC_ENABLE_INVPCID; 1246} 1247 1248static inline bool cpu_has_virtual_nmis(void) 1249{ 1250 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; 1251} 1252 1253static inline bool cpu_has_vmx_wbinvd_exit(void) 1254{ 1255 return vmcs_config.cpu_based_2nd_exec_ctrl & 1256 SECONDARY_EXEC_WBINVD_EXITING; 1257} 1258 1259static inline bool cpu_has_vmx_shadow_vmcs(void) 1260{ 1261 u64 vmx_msr; 1262 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 1263 /* check if the cpu supports writing r/o exit information fields */ 1264 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) 1265 return false; 1266 1267 return vmcs_config.cpu_based_2nd_exec_ctrl & 1268 SECONDARY_EXEC_SHADOW_VMCS; 1269} 1270 1271static inline bool cpu_has_vmx_pml(void) 1272{ 1273 return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; 1274} 1275 1276static inline bool cpu_has_vmx_tsc_scaling(void) 1277{ 1278 return vmcs_config.cpu_based_2nd_exec_ctrl & 1279 SECONDARY_EXEC_TSC_SCALING; 1280} 1281 1282static inline bool report_flexpriority(void) 1283{ 1284 return flexpriority_enabled; 1285} 1286 1287static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) 1288{ 1289 return vmcs12->cpu_based_vm_exec_control & bit; 1290} 1291 1292static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) 1293{ 1294 return (vmcs12->cpu_based_vm_exec_control & 1295 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 1296 (vmcs12->secondary_vm_exec_control & bit); 1297} 1298 1299static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) 1300{ 1301 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; 1302} 1303 1304static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) 1305{ 1306 return vmcs12->pin_based_vm_exec_control & 1307 PIN_BASED_VMX_PREEMPTION_TIMER; 1308} 1309 1310static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) 1311{ 1312 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); 1313} 1314 1315static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) 1316{ 1317 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) && 1318 vmx_xsaves_supported(); 1319} 1320 1321static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) 1322{ 1323 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 1324} 1325 1326static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) 1327{ 1328 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); 1329} 1330 1331static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) 1332{ 1333 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); 1334} 1335 1336static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) 1337{ 1338 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 1339} 1340 1341static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) 1342{ 1343 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; 1344} 1345 1346static inline bool is_exception(u32 intr_info) 1347{ 1348 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) 1349 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); 1350} 1351 1352static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, 1353 u32 exit_intr_info, 1354 unsigned long exit_qualification); 1355static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, 1356 struct vmcs12 *vmcs12, 1357 u32 reason, unsigned long qualification); 1358 1359static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) 1360{ 1361 int i; 1362 1363 for (i = 0; i < vmx->nmsrs; ++i) 1364 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) 1365 return i; 1366 return -1; 1367} 1368 1369static inline void __invvpid(int ext, u16 vpid, gva_t gva) 1370{ 1371 struct { 1372 u64 vpid : 16; 1373 u64 rsvd : 48; 1374 u64 gva; 1375 } operand = { vpid, 0, gva }; 1376 1377 asm volatile (__ex(ASM_VMX_INVVPID) 1378 /* CF==1 or ZF==1 --> rc = -1 */ 1379 "; ja 1f ; ud2 ; 1:" 1380 : : "a"(&operand), "c"(ext) : "cc", "memory"); 1381} 1382 1383static inline void __invept(int ext, u64 eptp, gpa_t gpa) 1384{ 1385 struct { 1386 u64 eptp, gpa; 1387 } operand = {eptp, gpa}; 1388 1389 asm volatile (__ex(ASM_VMX_INVEPT) 1390 /* CF==1 or ZF==1 --> rc = -1 */ 1391 "; ja 1f ; ud2 ; 1:\n" 1392 : : "a" (&operand), "c" (ext) : "cc", "memory"); 1393} 1394 1395static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) 1396{ 1397 int i; 1398 1399 i = __find_msr_index(vmx, msr); 1400 if (i >= 0) 1401 return &vmx->guest_msrs[i]; 1402 return NULL; 1403} 1404 1405static void vmcs_clear(struct vmcs *vmcs) 1406{ 1407 u64 phys_addr = __pa(vmcs); 1408 u8 error; 1409 1410 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" 1411 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) 1412 : "cc", "memory"); 1413 if (error) 1414 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", 1415 vmcs, phys_addr); 1416} 1417 1418static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) 1419{ 1420 vmcs_clear(loaded_vmcs->vmcs); 1421 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) 1422 vmcs_clear(loaded_vmcs->shadow_vmcs); 1423 loaded_vmcs->cpu = -1; 1424 loaded_vmcs->launched = 0; 1425} 1426 1427static void vmcs_load(struct vmcs *vmcs) 1428{ 1429 u64 phys_addr = __pa(vmcs); 1430 u8 error; 1431 1432 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" 1433 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) 1434 : "cc", "memory"); 1435 if (error) 1436 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", 1437 vmcs, phys_addr); 1438} 1439 1440#ifdef CONFIG_KEXEC_CORE 1441/* 1442 * This bitmap is used to indicate whether the vmclear 1443 * operation is enabled on all cpus. All disabled by 1444 * default. 1445 */ 1446static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; 1447 1448static inline void crash_enable_local_vmclear(int cpu) 1449{ 1450 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); 1451} 1452 1453static inline void crash_disable_local_vmclear(int cpu) 1454{ 1455 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); 1456} 1457 1458static inline int crash_local_vmclear_enabled(int cpu) 1459{ 1460 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); 1461} 1462 1463static void crash_vmclear_local_loaded_vmcss(void) 1464{ 1465 int cpu = raw_smp_processor_id(); 1466 struct loaded_vmcs *v; 1467 1468 if (!crash_local_vmclear_enabled(cpu)) 1469 return; 1470 1471 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), 1472 loaded_vmcss_on_cpu_link) 1473 vmcs_clear(v->vmcs); 1474} 1475#else 1476static inline void crash_enable_local_vmclear(int cpu) { } 1477static inline void crash_disable_local_vmclear(int cpu) { } 1478#endif /* CONFIG_KEXEC_CORE */ 1479 1480static void __loaded_vmcs_clear(void *arg) 1481{ 1482 struct loaded_vmcs *loaded_vmcs = arg; 1483 int cpu = raw_smp_processor_id(); 1484 1485 if (loaded_vmcs->cpu != cpu) 1486 return; /* vcpu migration can race with cpu offline */ 1487 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) 1488 per_cpu(current_vmcs, cpu) = NULL; 1489 crash_disable_local_vmclear(cpu); 1490 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); 1491 1492 /* 1493 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link 1494 * is before setting loaded_vmcs->vcpu to -1 which is done in 1495 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist 1496 * then adds the vmcs into percpu list before it is deleted. 1497 */ 1498 smp_wmb(); 1499 1500 loaded_vmcs_init(loaded_vmcs); 1501 crash_enable_local_vmclear(cpu); 1502} 1503 1504static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 1505{ 1506 int cpu = loaded_vmcs->cpu; 1507 1508 if (cpu != -1) 1509 smp_call_function_single(cpu, 1510 __loaded_vmcs_clear, loaded_vmcs, 1); 1511} 1512 1513static inline void vpid_sync_vcpu_single(int vpid) 1514{ 1515 if (vpid == 0) 1516 return; 1517 1518 if (cpu_has_vmx_invvpid_single()) 1519 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); 1520} 1521 1522static inline void vpid_sync_vcpu_global(void) 1523{ 1524 if (cpu_has_vmx_invvpid_global()) 1525 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); 1526} 1527 1528static inline void vpid_sync_context(int vpid) 1529{ 1530 if (cpu_has_vmx_invvpid_single()) 1531 vpid_sync_vcpu_single(vpid); 1532 else 1533 vpid_sync_vcpu_global(); 1534} 1535 1536static inline void ept_sync_global(void) 1537{ 1538 if (cpu_has_vmx_invept_global()) 1539 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); 1540} 1541 1542static inline void ept_sync_context(u64 eptp) 1543{ 1544 if (enable_ept) { 1545 if (cpu_has_vmx_invept_context()) 1546 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); 1547 else 1548 ept_sync_global(); 1549 } 1550} 1551 1552static __always_inline void vmcs_check16(unsigned long field) 1553{ 1554 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, 1555 "16-bit accessor invalid for 64-bit field"); 1556 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, 1557 "16-bit accessor invalid for 64-bit high field"); 1558 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, 1559 "16-bit accessor invalid for 32-bit high field"); 1560 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, 1561 "16-bit accessor invalid for natural width field"); 1562} 1563 1564static __always_inline void vmcs_check32(unsigned long field) 1565{ 1566 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, 1567 "32-bit accessor invalid for 16-bit field"); 1568 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, 1569 "32-bit accessor invalid for natural width field"); 1570} 1571 1572static __always_inline void vmcs_check64(unsigned long field) 1573{ 1574 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, 1575 "64-bit accessor invalid for 16-bit field"); 1576 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, 1577 "64-bit accessor invalid for 64-bit high field"); 1578 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, 1579 "64-bit accessor invalid for 32-bit field"); 1580 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, 1581 "64-bit accessor invalid for natural width field"); 1582} 1583 1584static __always_inline void vmcs_checkl(unsigned long field) 1585{ 1586 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, 1587 "Natural width accessor invalid for 16-bit field"); 1588 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, 1589 "Natural width accessor invalid for 64-bit field"); 1590 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, 1591 "Natural width accessor invalid for 64-bit high field"); 1592 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, 1593 "Natural width accessor invalid for 32-bit field"); 1594} 1595 1596static __always_inline unsigned long __vmcs_readl(unsigned long field) 1597{ 1598 unsigned long value; 1599 1600 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") 1601 : "=a"(value) : "d"(field) : "cc"); 1602 return value; 1603} 1604 1605static __always_inline u16 vmcs_read16(unsigned long field) 1606{ 1607 vmcs_check16(field); 1608 return __vmcs_readl(field); 1609} 1610 1611static __always_inline u32 vmcs_read32(unsigned long field) 1612{ 1613 vmcs_check32(field); 1614 return __vmcs_readl(field); 1615} 1616 1617static __always_inline u64 vmcs_read64(unsigned long field) 1618{ 1619 vmcs_check64(field); 1620#ifdef CONFIG_X86_64 1621 return __vmcs_readl(field); 1622#else 1623 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32); 1624#endif 1625} 1626 1627static __always_inline unsigned long vmcs_readl(unsigned long field) 1628{ 1629 vmcs_checkl(field); 1630 return __vmcs_readl(field); 1631} 1632 1633static noinline void vmwrite_error(unsigned long field, unsigned long value) 1634{ 1635 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", 1636 field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); 1637 dump_stack(); 1638} 1639 1640static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) 1641{ 1642 u8 error; 1643 1644 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" 1645 : "=q"(error) : "a"(value), "d"(field) : "cc"); 1646 if (unlikely(error)) 1647 vmwrite_error(field, value); 1648} 1649 1650static __always_inline void vmcs_write16(unsigned long field, u16 value) 1651{ 1652 vmcs_check16(field); 1653 __vmcs_writel(field, value); 1654} 1655 1656static __always_inline void vmcs_write32(unsigned long field, u32 value) 1657{ 1658 vmcs_check32(field); 1659 __vmcs_writel(field, value); 1660} 1661 1662static __always_inline void vmcs_write64(unsigned long field, u64 value) 1663{ 1664 vmcs_check64(field); 1665 __vmcs_writel(field, value); 1666#ifndef CONFIG_X86_64 1667 asm volatile (""); 1668 __vmcs_writel(field+1, value >> 32); 1669#endif 1670} 1671 1672static __always_inline void vmcs_writel(unsigned long field, unsigned long value) 1673{ 1674 vmcs_checkl(field); 1675 __vmcs_writel(field, value); 1676} 1677 1678static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) 1679{ 1680 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, 1681 "vmcs_clear_bits does not support 64-bit fields"); 1682 __vmcs_writel(field, __vmcs_readl(field) & ~mask); 1683} 1684 1685static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) 1686{ 1687 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, 1688 "vmcs_set_bits does not support 64-bit fields"); 1689 __vmcs_writel(field, __vmcs_readl(field) | mask); 1690} 1691 1692static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) 1693{ 1694 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); 1695} 1696 1697static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) 1698{ 1699 vmcs_write32(VM_ENTRY_CONTROLS, val); 1700 vmx->vm_entry_controls_shadow = val; 1701} 1702 1703static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) 1704{ 1705 if (vmx->vm_entry_controls_shadow != val) 1706 vm_entry_controls_init(vmx, val); 1707} 1708 1709static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) 1710{ 1711 return vmx->vm_entry_controls_shadow; 1712} 1713 1714 1715static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) 1716{ 1717 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); 1718} 1719 1720static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) 1721{ 1722 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); 1723} 1724 1725static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) 1726{ 1727 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); 1728} 1729 1730static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) 1731{ 1732 vmcs_write32(VM_EXIT_CONTROLS, val); 1733 vmx->vm_exit_controls_shadow = val; 1734} 1735 1736static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) 1737{ 1738 if (vmx->vm_exit_controls_shadow != val) 1739 vm_exit_controls_init(vmx, val); 1740} 1741 1742static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) 1743{ 1744 return vmx->vm_exit_controls_shadow; 1745} 1746 1747 1748static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) 1749{ 1750 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); 1751} 1752 1753static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) 1754{ 1755 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); 1756} 1757 1758static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) 1759{ 1760 vmx->segment_cache.bitmask = 0; 1761} 1762 1763static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, 1764 unsigned field) 1765{ 1766 bool ret; 1767 u32 mask = 1 << (seg * SEG_FIELD_NR + field); 1768 1769 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { 1770 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); 1771 vmx->segment_cache.bitmask = 0; 1772 } 1773 ret = vmx->segment_cache.bitmask & mask; 1774 vmx->segment_cache.bitmask |= mask; 1775 return ret; 1776} 1777 1778static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) 1779{ 1780 u16 *p = &vmx->segment_cache.seg[seg].selector; 1781 1782 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) 1783 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); 1784 return *p; 1785} 1786 1787static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) 1788{ 1789 ulong *p = &vmx->segment_cache.seg[seg].base; 1790 1791 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) 1792 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); 1793 return *p; 1794} 1795 1796static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) 1797{ 1798 u32 *p = &vmx->segment_cache.seg[seg].limit; 1799 1800 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) 1801 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); 1802 return *p; 1803} 1804 1805static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) 1806{ 1807 u32 *p = &vmx->segment_cache.seg[seg].ar; 1808 1809 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) 1810 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); 1811 return *p; 1812} 1813 1814static void update_exception_bitmap(struct kvm_vcpu *vcpu) 1815{ 1816 u32 eb; 1817 1818 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | 1819 (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR); 1820 if ((vcpu->guest_debug & 1821 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 1822 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) 1823 eb |= 1u << BP_VECTOR; 1824 if (to_vmx(vcpu)->rmode.vm86_active) 1825 eb = ~0; 1826 if (enable_ept) 1827 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 1828 if (vcpu->fpu_active) 1829 eb &= ~(1u << NM_VECTOR); 1830 1831 /* When we are running a nested L2 guest and L1 specified for it a 1832 * certain exception bitmap, we must trap the same exceptions and pass 1833 * them to L1. When running L2, we will only handle the exceptions 1834 * specified above if L1 did not want them. 1835 */ 1836 if (is_guest_mode(vcpu)) 1837 eb |= get_vmcs12(vcpu)->exception_bitmap; 1838 1839 vmcs_write32(EXCEPTION_BITMAP, eb); 1840} 1841 1842static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, 1843 unsigned long entry, unsigned long exit) 1844{ 1845 vm_entry_controls_clearbit(vmx, entry); 1846 vm_exit_controls_clearbit(vmx, exit); 1847} 1848 1849static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 1850{ 1851 unsigned i; 1852 struct msr_autoload *m = &vmx->msr_autoload; 1853 1854 switch (msr) { 1855 case MSR_EFER: 1856 if (cpu_has_load_ia32_efer) { 1857 clear_atomic_switch_msr_special(vmx, 1858 VM_ENTRY_LOAD_IA32_EFER, 1859 VM_EXIT_LOAD_IA32_EFER); 1860 return; 1861 } 1862 break; 1863 case MSR_CORE_PERF_GLOBAL_CTRL: 1864 if (cpu_has_load_perf_global_ctrl) { 1865 clear_atomic_switch_msr_special(vmx, 1866 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 1867 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 1868 return; 1869 } 1870 break; 1871 } 1872 1873 for (i = 0; i < m->nr; ++i) 1874 if (m->guest[i].index == msr) 1875 break; 1876 1877 if (i == m->nr) 1878 return; 1879 --m->nr; 1880 m->guest[i] = m->guest[m->nr]; 1881 m->host[i] = m->host[m->nr]; 1882 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); 1883 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1884} 1885 1886static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, 1887 unsigned long entry, unsigned long exit, 1888 unsigned long guest_val_vmcs, unsigned long host_val_vmcs, 1889 u64 guest_val, u64 host_val) 1890{ 1891 vmcs_write64(guest_val_vmcs, guest_val); 1892 vmcs_write64(host_val_vmcs, host_val); 1893 vm_entry_controls_setbit(vmx, entry); 1894 vm_exit_controls_setbit(vmx, exit); 1895} 1896 1897static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 1898 u64 guest_val, u64 host_val) 1899{ 1900 unsigned i; 1901 struct msr_autoload *m = &vmx->msr_autoload; 1902 1903 switch (msr) { 1904 case MSR_EFER: 1905 if (cpu_has_load_ia32_efer) { 1906 add_atomic_switch_msr_special(vmx, 1907 VM_ENTRY_LOAD_IA32_EFER, 1908 VM_EXIT_LOAD_IA32_EFER, 1909 GUEST_IA32_EFER, 1910 HOST_IA32_EFER, 1911 guest_val, host_val); 1912 return; 1913 } 1914 break; 1915 case MSR_CORE_PERF_GLOBAL_CTRL: 1916 if (cpu_has_load_perf_global_ctrl) { 1917 add_atomic_switch_msr_special(vmx, 1918 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 1919 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 1920 GUEST_IA32_PERF_GLOBAL_CTRL, 1921 HOST_IA32_PERF_GLOBAL_CTRL, 1922 guest_val, host_val); 1923 return; 1924 } 1925 break; 1926 case MSR_IA32_PEBS_ENABLE: 1927 /* PEBS needs a quiescent period after being disabled (to write 1928 * a record). Disabling PEBS through VMX MSR swapping doesn't 1929 * provide that period, so a CPU could write host's record into 1930 * guest's memory. 1931 */ 1932 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1933 } 1934 1935 for (i = 0; i < m->nr; ++i) 1936 if (m->guest[i].index == msr) 1937 break; 1938 1939 if (i == NR_AUTOLOAD_MSRS) { 1940 printk_once(KERN_WARNING "Not enough msr switch entries. " 1941 "Can't add msr %x\n", msr); 1942 return; 1943 } else if (i == m->nr) { 1944 ++m->nr; 1945 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); 1946 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1947 } 1948 1949 m->guest[i].index = msr; 1950 m->guest[i].value = guest_val; 1951 m->host[i].index = msr; 1952 m->host[i].value = host_val; 1953} 1954 1955static void reload_tss(void) 1956{ 1957 /* 1958 * VT restores TR but not its size. Useless. 1959 */ 1960 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); 1961 struct desc_struct *descs; 1962 1963 descs = (void *)gdt->address; 1964 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ 1965 load_TR_desc(); 1966} 1967 1968static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) 1969{ 1970 u64 guest_efer = vmx->vcpu.arch.efer; 1971 u64 ignore_bits = 0; 1972 1973 if (!enable_ept) { 1974 /* 1975 * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing 1976 * host CPUID is more efficient than testing guest CPUID 1977 * or CR4. Host SMEP is anyway a requirement for guest SMEP. 1978 */ 1979 if (boot_cpu_has(X86_FEATURE_SMEP)) 1980 guest_efer |= EFER_NX; 1981 else if (!(guest_efer & EFER_NX)) 1982 ignore_bits |= EFER_NX; 1983 } 1984 1985 /* 1986 * LMA and LME handled by hardware; SCE meaningless outside long mode. 1987 */ 1988 ignore_bits |= EFER_SCE; 1989#ifdef CONFIG_X86_64 1990 ignore_bits |= EFER_LMA | EFER_LME; 1991 /* SCE is meaningful only in long mode on Intel */ 1992 if (guest_efer & EFER_LMA) 1993 ignore_bits &= ~(u64)EFER_SCE; 1994#endif 1995 1996 clear_atomic_switch_msr(vmx, MSR_EFER); 1997 1998 /* 1999 * On EPT, we can't emulate NX, so we must switch EFER atomically. 2000 * On CPUs that support "load IA32_EFER", always switch EFER 2001 * atomically, since it's faster than switching it manually. 2002 */ 2003 if (cpu_has_load_ia32_efer || 2004 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { 2005 if (!(guest_efer & EFER_LMA)) 2006 guest_efer &= ~EFER_LME; 2007 if (guest_efer != host_efer) 2008 add_atomic_switch_msr(vmx, MSR_EFER, 2009 guest_efer, host_efer); 2010 return false; 2011 } else { 2012 guest_efer &= ~ignore_bits; 2013 guest_efer |= host_efer & ignore_bits; 2014 2015 vmx->guest_msrs[efer_offset].data = guest_efer; 2016 vmx->guest_msrs[efer_offset].mask = ~ignore_bits; 2017 2018 return true; 2019 } 2020} 2021 2022static unsigned long segment_base(u16 selector) 2023{ 2024 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); 2025 struct desc_struct *d; 2026 unsigned long table_base; 2027 unsigned long v; 2028 2029 if (!(selector & ~3)) 2030 return 0; 2031 2032 table_base = gdt->address; 2033 2034 if (selector & 4) { /* from ldt */ 2035 u16 ldt_selector = kvm_read_ldt(); 2036 2037 if (!(ldt_selector & ~3)) 2038 return 0; 2039 2040 table_base = segment_base(ldt_selector); 2041 } 2042 d = (struct desc_struct *)(table_base + (selector & ~7)); 2043 v = get_desc_base(d); 2044#ifdef CONFIG_X86_64 2045 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) 2046 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; 2047#endif 2048 return v; 2049} 2050 2051static inline unsigned long kvm_read_tr_base(void) 2052{ 2053 u16 tr; 2054 asm("str %0" : "=g"(tr)); 2055 return segment_base(tr); 2056} 2057 2058static void vmx_save_host_state(struct kvm_vcpu *vcpu) 2059{ 2060 struct vcpu_vmx *vmx = to_vmx(vcpu); 2061 int i; 2062 2063 if (vmx->host_state.loaded) 2064 return; 2065 2066 vmx->host_state.loaded = 1; 2067 /* 2068 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 2069 * allow segment selectors with cpl > 0 or ti == 1. 2070 */ 2071 vmx->host_state.ldt_sel = kvm_read_ldt(); 2072 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; 2073 savesegment(fs, vmx->host_state.fs_sel); 2074 if (!(vmx->host_state.fs_sel & 7)) { 2075 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); 2076 vmx->host_state.fs_reload_needed = 0; 2077 } else { 2078 vmcs_write16(HOST_FS_SELECTOR, 0); 2079 vmx->host_state.fs_reload_needed = 1; 2080 } 2081 savesegment(gs, vmx->host_state.gs_sel); 2082 if (!(vmx->host_state.gs_sel & 7)) 2083 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); 2084 else { 2085 vmcs_write16(HOST_GS_SELECTOR, 0); 2086 vmx->host_state.gs_ldt_reload_needed = 1; 2087 } 2088 2089#ifdef CONFIG_X86_64 2090 savesegment(ds, vmx->host_state.ds_sel); 2091 savesegment(es, vmx->host_state.es_sel); 2092#endif 2093 2094#ifdef CONFIG_X86_64 2095 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 2096 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 2097#else 2098 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); 2099 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); 2100#endif 2101 2102#ifdef CONFIG_X86_64 2103 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 2104 if (is_long_mode(&vmx->vcpu)) 2105 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 2106#endif 2107 if (boot_cpu_has(X86_FEATURE_MPX)) 2108 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); 2109 for (i = 0; i < vmx->save_nmsrs; ++i) 2110 kvm_set_shared_msr(vmx->guest_msrs[i].index, 2111 vmx->guest_msrs[i].data, 2112 vmx->guest_msrs[i].mask); 2113} 2114 2115static void __vmx_load_host_state(struct vcpu_vmx *vmx) 2116{ 2117 if (!vmx->host_state.loaded) 2118 return; 2119 2120 ++vmx->vcpu.stat.host_state_reload; 2121 vmx->host_state.loaded = 0; 2122#ifdef CONFIG_X86_64 2123 if (is_long_mode(&vmx->vcpu)) 2124 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 2125#endif 2126 if (vmx->host_state.gs_ldt_reload_needed) { 2127 kvm_load_ldt(vmx->host_state.ldt_sel); 2128#ifdef CONFIG_X86_64 2129 load_gs_index(vmx->host_state.gs_sel); 2130#else 2131 loadsegment(gs, vmx->host_state.gs_sel); 2132#endif 2133 } 2134 if (vmx->host_state.fs_reload_needed) 2135 loadsegment(fs, vmx->host_state.fs_sel); 2136#ifdef CONFIG_X86_64 2137 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { 2138 loadsegment(ds, vmx->host_state.ds_sel); 2139 loadsegment(es, vmx->host_state.es_sel); 2140 } 2141#endif 2142 reload_tss(); 2143#ifdef CONFIG_X86_64 2144 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 2145#endif 2146 if (vmx->host_state.msr_host_bndcfgs) 2147 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); 2148 /* 2149 * If the FPU is not active (through the host task or 2150 * the guest vcpu), then restore the cr0.TS bit. 2151 */ 2152 if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded) 2153 stts(); 2154 load_gdt(this_cpu_ptr(&host_gdt)); 2155} 2156 2157static void vmx_load_host_state(struct vcpu_vmx *vmx) 2158{ 2159 preempt_disable(); 2160 __vmx_load_host_state(vmx); 2161 preempt_enable(); 2162} 2163 2164static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) 2165{ 2166 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 2167 struct pi_desc old, new; 2168 unsigned int dest; 2169 2170 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2171 !irq_remapping_cap(IRQ_POSTING_CAP) || 2172 !kvm_vcpu_apicv_active(vcpu)) 2173 return; 2174 2175 do { 2176 old.control = new.control = pi_desc->control; 2177 2178 /* 2179 * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there 2180 * are two possible cases: 2181 * 1. After running 'pre_block', context switch 2182 * happened. For this case, 'sn' was set in 2183 * vmx_vcpu_put(), so we need to clear it here. 2184 * 2. After running 'pre_block', we were blocked, 2185 * and woken up by some other guy. For this case, 2186 * we don't need to do anything, 'pi_post_block' 2187 * will do everything for us. However, we cannot 2188 * check whether it is case #1 or case #2 here 2189 * (maybe, not needed), so we also clear sn here, 2190 * I think it is not a big deal. 2191 */ 2192 if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { 2193 if (vcpu->cpu != cpu) { 2194 dest = cpu_physical_id(cpu); 2195 2196 if (x2apic_enabled()) 2197 new.ndst = dest; 2198 else 2199 new.ndst = (dest << 8) & 0xFF00; 2200 } 2201 2202 /* set 'NV' to 'notification vector' */ 2203 new.nv = POSTED_INTR_VECTOR; 2204 } 2205 2206 /* Allow posting non-urgent interrupts */ 2207 new.sn = 0; 2208 } while (cmpxchg(&pi_desc->control, old.control, 2209 new.control) != old.control); 2210} 2211 2212static void decache_tsc_multiplier(struct vcpu_vmx *vmx) 2213{ 2214 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; 2215 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); 2216} 2217 2218/* 2219 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 2220 * vcpu mutex is already taken. 2221 */ 2222static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2223{ 2224 struct vcpu_vmx *vmx = to_vmx(vcpu); 2225 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2226 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; 2227 2228 if (!vmm_exclusive) 2229 kvm_cpu_vmxon(phys_addr); 2230 else if (!already_loaded) 2231 loaded_vmcs_clear(vmx->loaded_vmcs); 2232 2233 if (!already_loaded) { 2234 local_irq_disable(); 2235 crash_disable_local_vmclear(cpu); 2236 2237 /* 2238 * Read loaded_vmcs->cpu should be before fetching 2239 * loaded_vmcs->loaded_vmcss_on_cpu_link. 2240 * See the comments in __loaded_vmcs_clear(). 2241 */ 2242 smp_rmb(); 2243 2244 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, 2245 &per_cpu(loaded_vmcss_on_cpu, cpu)); 2246 crash_enable_local_vmclear(cpu); 2247 local_irq_enable(); 2248 } 2249 2250 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { 2251 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; 2252 vmcs_load(vmx->loaded_vmcs->vmcs); 2253 } 2254 2255 if (!already_loaded) { 2256 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); 2257 unsigned long sysenter_esp; 2258 2259 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2260 2261 /* 2262 * Linux uses per-cpu TSS and GDT, so set these when switching 2263 * processors. 2264 */ 2265 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ 2266 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ 2267 2268 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 2269 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 2270 2271 vmx->loaded_vmcs->cpu = cpu; 2272 } 2273 2274 /* Setup TSC multiplier */ 2275 if (kvm_has_tsc_control && 2276 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) 2277 decache_tsc_multiplier(vmx); 2278 2279 vmx_vcpu_pi_load(vcpu, cpu); 2280 vmx->host_pkru = read_pkru(); 2281} 2282 2283static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) 2284{ 2285 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 2286 2287 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2288 !irq_remapping_cap(IRQ_POSTING_CAP) || 2289 !kvm_vcpu_apicv_active(vcpu)) 2290 return; 2291 2292 /* Set SN when the vCPU is preempted */ 2293 if (vcpu->preempted) 2294 pi_set_sn(pi_desc); 2295} 2296 2297static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 2298{ 2299 vmx_vcpu_pi_put(vcpu); 2300 2301 __vmx_load_host_state(to_vmx(vcpu)); 2302 if (!vmm_exclusive) { 2303 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); 2304 vcpu->cpu = -1; 2305 kvm_cpu_vmxoff(); 2306 } 2307} 2308 2309static void vmx_fpu_activate(struct kvm_vcpu *vcpu) 2310{ 2311 ulong cr0; 2312 2313 if (vcpu->fpu_active) 2314 return; 2315 vcpu->fpu_active = 1; 2316 cr0 = vmcs_readl(GUEST_CR0); 2317 cr0 &= ~(X86_CR0_TS | X86_CR0_MP); 2318 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); 2319 vmcs_writel(GUEST_CR0, cr0); 2320 update_exception_bitmap(vcpu); 2321 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 2322 if (is_guest_mode(vcpu)) 2323 vcpu->arch.cr0_guest_owned_bits &= 2324 ~get_vmcs12(vcpu)->cr0_guest_host_mask; 2325 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2326} 2327 2328static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); 2329 2330/* 2331 * Return the cr0 value that a nested guest would read. This is a combination 2332 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by 2333 * its hypervisor (cr0_read_shadow). 2334 */ 2335static inline unsigned long nested_read_cr0(struct vmcs12 *fields) 2336{ 2337 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | 2338 (fields->cr0_read_shadow & fields->cr0_guest_host_mask); 2339} 2340static inline unsigned long nested_read_cr4(struct vmcs12 *fields) 2341{ 2342 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | 2343 (fields->cr4_read_shadow & fields->cr4_guest_host_mask); 2344} 2345 2346static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) 2347{ 2348 /* Note that there is no vcpu->fpu_active = 0 here. The caller must 2349 * set this *before* calling this function. 2350 */ 2351 vmx_decache_cr0_guest_bits(vcpu); 2352 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); 2353 update_exception_bitmap(vcpu); 2354 vcpu->arch.cr0_guest_owned_bits = 0; 2355 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2356 if (is_guest_mode(vcpu)) { 2357 /* 2358 * L1's specified read shadow might not contain the TS bit, 2359 * so now that we turned on shadowing of this bit, we need to 2360 * set this bit of the shadow. Like in nested_vmx_run we need 2361 * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet 2362 * up-to-date here because we just decached cr0.TS (and we'll 2363 * only update vmcs12->guest_cr0 on nested exit). 2364 */ 2365 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2366 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) | 2367 (vcpu->arch.cr0 & X86_CR0_TS); 2368 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2369 } else 2370 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); 2371} 2372 2373static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 2374{ 2375 unsigned long rflags, save_rflags; 2376 2377 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { 2378 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 2379 rflags = vmcs_readl(GUEST_RFLAGS); 2380 if (to_vmx(vcpu)->rmode.vm86_active) { 2381 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 2382 save_rflags = to_vmx(vcpu)->rmode.save_rflags; 2383 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 2384 } 2385 to_vmx(vcpu)->rflags = rflags; 2386 } 2387 return to_vmx(vcpu)->rflags; 2388} 2389 2390static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 2391{ 2392 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 2393 to_vmx(vcpu)->rflags = rflags; 2394 if (to_vmx(vcpu)->rmode.vm86_active) { 2395 to_vmx(vcpu)->rmode.save_rflags = rflags; 2396 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2397 } 2398 vmcs_writel(GUEST_RFLAGS, rflags); 2399} 2400 2401static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) 2402{ 2403 return to_vmx(vcpu)->guest_pkru; 2404} 2405 2406static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 2407{ 2408 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 2409 int ret = 0; 2410 2411 if (interruptibility & GUEST_INTR_STATE_STI) 2412 ret |= KVM_X86_SHADOW_INT_STI; 2413 if (interruptibility & GUEST_INTR_STATE_MOV_SS) 2414 ret |= KVM_X86_SHADOW_INT_MOV_SS; 2415 2416 return ret; 2417} 2418 2419static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 2420{ 2421 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 2422 u32 interruptibility = interruptibility_old; 2423 2424 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); 2425 2426 if (mask & KVM_X86_SHADOW_INT_MOV_SS) 2427 interruptibility |= GUEST_INTR_STATE_MOV_SS; 2428 else if (mask & KVM_X86_SHADOW_INT_STI) 2429 interruptibility |= GUEST_INTR_STATE_STI; 2430 2431 if ((interruptibility != interruptibility_old)) 2432 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); 2433} 2434 2435static void skip_emulated_instruction(struct kvm_vcpu *vcpu) 2436{ 2437 unsigned long rip; 2438 2439 rip = kvm_rip_read(vcpu); 2440 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 2441 kvm_rip_write(vcpu, rip); 2442 2443 /* skipping an emulated instruction also counts */ 2444 vmx_set_interrupt_shadow(vcpu, 0); 2445} 2446 2447/* 2448 * KVM wants to inject page-faults which it got to the guest. This function 2449 * checks whether in a nested guest, we need to inject them to L1 or L2. 2450 */ 2451static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) 2452{ 2453 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2454 2455 if (!(vmcs12->exception_bitmap & (1u << nr))) 2456 return 0; 2457 2458 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 2459 vmcs_read32(VM_EXIT_INTR_INFO), 2460 vmcs_readl(EXIT_QUALIFICATION)); 2461 return 1; 2462} 2463 2464static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 2465 bool has_error_code, u32 error_code, 2466 bool reinject) 2467{ 2468 struct vcpu_vmx *vmx = to_vmx(vcpu); 2469 u32 intr_info = nr | INTR_INFO_VALID_MASK; 2470 2471 if (!reinject && is_guest_mode(vcpu) && 2472 nested_vmx_check_exception(vcpu, nr)) 2473 return; 2474 2475 if (has_error_code) { 2476 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 2477 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 2478 } 2479 2480 if (vmx->rmode.vm86_active) { 2481 int inc_eip = 0; 2482 if (kvm_exception_is_soft(nr)) 2483 inc_eip = vcpu->arch.event_exit_inst_len; 2484 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) 2485 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2486 return; 2487 } 2488 2489 if (kvm_exception_is_soft(nr)) { 2490 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2491 vmx->vcpu.arch.event_exit_inst_len); 2492 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 2493 } else 2494 intr_info |= INTR_TYPE_HARD_EXCEPTION; 2495 2496 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 2497} 2498 2499static bool vmx_rdtscp_supported(void) 2500{ 2501 return cpu_has_vmx_rdtscp(); 2502} 2503 2504static bool vmx_invpcid_supported(void) 2505{ 2506 return cpu_has_vmx_invpcid() && enable_ept; 2507} 2508 2509/* 2510 * Swap MSR entry in host/guest MSR entry array. 2511 */ 2512static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) 2513{ 2514 struct shared_msr_entry tmp; 2515 2516 tmp = vmx->guest_msrs[to]; 2517 vmx->guest_msrs[to] = vmx->guest_msrs[from]; 2518 vmx->guest_msrs[from] = tmp; 2519} 2520 2521static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) 2522{ 2523 unsigned long *msr_bitmap; 2524 2525 if (is_guest_mode(vcpu)) 2526 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap; 2527 else if (cpu_has_secondary_exec_ctrls() && 2528 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & 2529 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 2530 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) { 2531 if (is_long_mode(vcpu)) 2532 msr_bitmap = vmx_msr_bitmap_longmode_x2apic; 2533 else 2534 msr_bitmap = vmx_msr_bitmap_legacy_x2apic; 2535 } else { 2536 if (is_long_mode(vcpu)) 2537 msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv_inactive; 2538 else 2539 msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv_inactive; 2540 } 2541 } else { 2542 if (is_long_mode(vcpu)) 2543 msr_bitmap = vmx_msr_bitmap_longmode; 2544 else 2545 msr_bitmap = vmx_msr_bitmap_legacy; 2546 } 2547 2548 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); 2549} 2550 2551/* 2552 * Set up the vmcs to automatically save and restore system 2553 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 2554 * mode, as fiddling with msrs is very expensive. 2555 */ 2556static void setup_msrs(struct vcpu_vmx *vmx) 2557{ 2558 int save_nmsrs, index; 2559 2560 save_nmsrs = 0; 2561#ifdef CONFIG_X86_64 2562 if (is_long_mode(&vmx->vcpu)) { 2563 index = __find_msr_index(vmx, MSR_SYSCALL_MASK); 2564 if (index >= 0) 2565 move_msr_up(vmx, index, save_nmsrs++); 2566 index = __find_msr_index(vmx, MSR_LSTAR); 2567 if (index >= 0) 2568 move_msr_up(vmx, index, save_nmsrs++); 2569 index = __find_msr_index(vmx, MSR_CSTAR); 2570 if (index >= 0) 2571 move_msr_up(vmx, index, save_nmsrs++); 2572 index = __find_msr_index(vmx, MSR_TSC_AUX); 2573 if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu)) 2574 move_msr_up(vmx, index, save_nmsrs++); 2575 /* 2576 * MSR_STAR is only needed on long mode guests, and only 2577 * if efer.sce is enabled. 2578 */ 2579 index = __find_msr_index(vmx, MSR_STAR); 2580 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) 2581 move_msr_up(vmx, index, save_nmsrs++); 2582 } 2583#endif 2584 index = __find_msr_index(vmx, MSR_EFER); 2585 if (index >= 0 && update_transition_efer(vmx, index)) 2586 move_msr_up(vmx, index, save_nmsrs++); 2587 2588 vmx->save_nmsrs = save_nmsrs; 2589 2590 if (cpu_has_vmx_msr_bitmap()) 2591 vmx_set_msr_bitmap(&vmx->vcpu); 2592} 2593 2594/* 2595 * reads and returns guest's timestamp counter "register" 2596 * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset 2597 * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 2598 */ 2599static u64 guest_read_tsc(struct kvm_vcpu *vcpu) 2600{ 2601 u64 host_tsc, tsc_offset; 2602 2603 host_tsc = rdtsc(); 2604 tsc_offset = vmcs_read64(TSC_OFFSET); 2605 return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; 2606} 2607 2608/* 2609 * writes 'offset' into guest's timestamp counter offset register 2610 */ 2611static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 2612{ 2613 if (is_guest_mode(vcpu)) { 2614 /* 2615 * We're here if L1 chose not to trap WRMSR to TSC. According 2616 * to the spec, this should set L1's TSC; The offset that L1 2617 * set for L2 remains unchanged, and still needs to be added 2618 * to the newly set TSC to get L2's TSC. 2619 */ 2620 struct vmcs12 *vmcs12; 2621 /* recalculate vmcs02.TSC_OFFSET: */ 2622 vmcs12 = get_vmcs12(vcpu); 2623 vmcs_write64(TSC_OFFSET, offset + 2624 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? 2625 vmcs12->tsc_offset : 0)); 2626 } else { 2627 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2628 vmcs_read64(TSC_OFFSET), offset); 2629 vmcs_write64(TSC_OFFSET, offset); 2630 } 2631} 2632 2633static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) 2634{ 2635 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); 2636 return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); 2637} 2638 2639/* 2640 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX 2641 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for 2642 * all guests if the "nested" module option is off, and can also be disabled 2643 * for a single guest by disabling its VMX cpuid bit. 2644 */ 2645static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) 2646{ 2647 return nested && guest_cpuid_has_vmx(vcpu); 2648} 2649 2650/* 2651 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 2652 * returned for the various VMX controls MSRs when nested VMX is enabled. 2653 * The same values should also be used to verify that vmcs12 control fields are 2654 * valid during nested entry from L1 to L2. 2655 * Each of these control msrs has a low and high 32-bit half: A low bit is on 2656 * if the corresponding bit in the (32-bit) control field *must* be on, and a 2657 * bit in the high half is on if the corresponding bit in the control field 2658 * may be on. See also vmx_control_verify(). 2659 */ 2660static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) 2661{ 2662 /* 2663 * Note that as a general rule, the high half of the MSRs (bits in 2664 * the control fields which may be 1) should be initialized by the 2665 * intersection of the underlying hardware's MSR (i.e., features which 2666 * can be supported) and the list of features we want to expose - 2667 * because they are known to be properly supported in our code. 2668 * Also, usually, the low half of the MSRs (bits which must be 1) can 2669 * be set to 0, meaning that L1 may turn off any of these bits. The 2670 * reason is that if one of these bits is necessary, it will appear 2671 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 2672 * fields of vmcs01 and vmcs02, will turn these bits off - and 2673 * nested_vmx_exit_handled() will not pass related exits to L1. 2674 * These rules have exceptions below. 2675 */ 2676 2677 /* pin-based controls */ 2678 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 2679 vmx->nested.nested_vmx_pinbased_ctls_low, 2680 vmx->nested.nested_vmx_pinbased_ctls_high); 2681 vmx->nested.nested_vmx_pinbased_ctls_low |= 2682 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 2683 vmx->nested.nested_vmx_pinbased_ctls_high &= 2684 PIN_BASED_EXT_INTR_MASK | 2685 PIN_BASED_NMI_EXITING | 2686 PIN_BASED_VIRTUAL_NMIS; 2687 vmx->nested.nested_vmx_pinbased_ctls_high |= 2688 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 2689 PIN_BASED_VMX_PREEMPTION_TIMER; 2690 if (kvm_vcpu_apicv_active(&vmx->vcpu)) 2691 vmx->nested.nested_vmx_pinbased_ctls_high |= 2692 PIN_BASED_POSTED_INTR; 2693 2694 /* exit controls */ 2695 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 2696 vmx->nested.nested_vmx_exit_ctls_low, 2697 vmx->nested.nested_vmx_exit_ctls_high); 2698 vmx->nested.nested_vmx_exit_ctls_low = 2699 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 2700 2701 vmx->nested.nested_vmx_exit_ctls_high &= 2702#ifdef CONFIG_X86_64 2703 VM_EXIT_HOST_ADDR_SPACE_SIZE | 2704#endif 2705 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; 2706 vmx->nested.nested_vmx_exit_ctls_high |= 2707 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 2708 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 2709 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 2710 2711 if (kvm_mpx_supported()) 2712 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 2713 2714 /* We support free control of debug control saving. */ 2715 vmx->nested.nested_vmx_true_exit_ctls_low = 2716 vmx->nested.nested_vmx_exit_ctls_low & 2717 ~VM_EXIT_SAVE_DEBUG_CONTROLS; 2718 2719 /* entry controls */ 2720 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 2721 vmx->nested.nested_vmx_entry_ctls_low, 2722 vmx->nested.nested_vmx_entry_ctls_high); 2723 vmx->nested.nested_vmx_entry_ctls_low = 2724 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 2725 vmx->nested.nested_vmx_entry_ctls_high &= 2726#ifdef CONFIG_X86_64 2727 VM_ENTRY_IA32E_MODE | 2728#endif 2729 VM_ENTRY_LOAD_IA32_PAT; 2730 vmx->nested.nested_vmx_entry_ctls_high |= 2731 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 2732 if (kvm_mpx_supported()) 2733 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 2734 2735 /* We support free control of debug control loading. */ 2736 vmx->nested.nested_vmx_true_entry_ctls_low = 2737 vmx->nested.nested_vmx_entry_ctls_low & 2738 ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 2739 2740 /* cpu-based controls */ 2741 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 2742 vmx->nested.nested_vmx_procbased_ctls_low, 2743 vmx->nested.nested_vmx_procbased_ctls_high); 2744 vmx->nested.nested_vmx_procbased_ctls_low = 2745 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 2746 vmx->nested.nested_vmx_procbased_ctls_high &= 2747 CPU_BASED_VIRTUAL_INTR_PENDING | 2748 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | 2749 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 2750 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 2751 CPU_BASED_CR3_STORE_EXITING | 2752#ifdef CONFIG_X86_64 2753 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 2754#endif 2755 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 2756 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 2757 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 2758 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 2759 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 2760 /* 2761 * We can allow some features even when not supported by the 2762 * hardware. For example, L1 can specify an MSR bitmap - and we 2763 * can use it to avoid exits to L1 - even when L0 runs L2 2764 * without MSR bitmaps. 2765 */ 2766 vmx->nested.nested_vmx_procbased_ctls_high |= 2767 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 2768 CPU_BASED_USE_MSR_BITMAPS; 2769 2770 /* We support free control of CR3 access interception. */ 2771 vmx->nested.nested_vmx_true_procbased_ctls_low = 2772 vmx->nested.nested_vmx_procbased_ctls_low & 2773 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 2774 2775 /* secondary cpu-based controls */ 2776 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 2777 vmx->nested.nested_vmx_secondary_ctls_low, 2778 vmx->nested.nested_vmx_secondary_ctls_high); 2779 vmx->nested.nested_vmx_secondary_ctls_low = 0; 2780 vmx->nested.nested_vmx_secondary_ctls_high &= 2781 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2782 SECONDARY_EXEC_RDTSCP | 2783 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2784 SECONDARY_EXEC_ENABLE_VPID | 2785 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2786 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2787 SECONDARY_EXEC_WBINVD_EXITING | 2788 SECONDARY_EXEC_XSAVES; 2789 2790 if (enable_ept) { 2791 /* nested EPT: emulate EPT also to L1 */ 2792 vmx->nested.nested_vmx_secondary_ctls_high |= 2793 SECONDARY_EXEC_ENABLE_EPT; 2794 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 2795 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | 2796 VMX_EPT_INVEPT_BIT; 2797 if (cpu_has_vmx_ept_execute_only()) 2798 vmx->nested.nested_vmx_ept_caps |= 2799 VMX_EPT_EXECUTE_ONLY_BIT; 2800 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; 2801 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 2802 VMX_EPT_EXTENT_CONTEXT_BIT; 2803 } else 2804 vmx->nested.nested_vmx_ept_caps = 0; 2805 2806 /* 2807 * Old versions of KVM use the single-context version without 2808 * checking for support, so declare that it is supported even 2809 * though it is treated as global context. The alternative is 2810 * not failing the single-context invvpid, and it is worse. 2811 */ 2812 if (enable_vpid) 2813 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2814 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | 2815 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 2816 else 2817 vmx->nested.nested_vmx_vpid_caps = 0; 2818 2819 if (enable_unrestricted_guest) 2820 vmx->nested.nested_vmx_secondary_ctls_high |= 2821 SECONDARY_EXEC_UNRESTRICTED_GUEST; 2822 2823 /* miscellaneous data */ 2824 rdmsr(MSR_IA32_VMX_MISC, 2825 vmx->nested.nested_vmx_misc_low, 2826 vmx->nested.nested_vmx_misc_high); 2827 vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; 2828 vmx->nested.nested_vmx_misc_low |= 2829 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 2830 VMX_MISC_ACTIVITY_HLT; 2831 vmx->nested.nested_vmx_misc_high = 0; 2832} 2833 2834static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 2835{ 2836 /* 2837 * Bits 0 in high must be 0, and bits 1 in low must be 1. 2838 */ 2839 return ((control & high) | low) == control; 2840} 2841 2842static inline u64 vmx_control_msr(u32 low, u32 high) 2843{ 2844 return low | ((u64)high << 32); 2845} 2846 2847/* Returns 0 on success, non-0 otherwise. */ 2848static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 2849{ 2850 struct vcpu_vmx *vmx = to_vmx(vcpu); 2851 2852 switch (msr_index) { 2853 case MSR_IA32_VMX_BASIC: 2854 /* 2855 * This MSR reports some information about VMX support. We 2856 * should return information about the VMX we emulate for the 2857 * guest, and the VMCS structure we give it - not about the 2858 * VMX support of the underlying hardware. 2859 */ 2860 *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | 2861 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 2862 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 2863 if (cpu_has_vmx_basic_inout()) 2864 *pdata |= VMX_BASIC_INOUT; 2865 break; 2866 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 2867 case MSR_IA32_VMX_PINBASED_CTLS: 2868 *pdata = vmx_control_msr( 2869 vmx->nested.nested_vmx_pinbased_ctls_low, 2870 vmx->nested.nested_vmx_pinbased_ctls_high); 2871 break; 2872 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 2873 *pdata = vmx_control_msr( 2874 vmx->nested.nested_vmx_true_procbased_ctls_low, 2875 vmx->nested.nested_vmx_procbased_ctls_high); 2876 break; 2877 case MSR_IA32_VMX_PROCBASED_CTLS: 2878 *pdata = vmx_control_msr( 2879 vmx->nested.nested_vmx_procbased_ctls_low, 2880 vmx->nested.nested_vmx_procbased_ctls_high); 2881 break; 2882 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 2883 *pdata = vmx_control_msr( 2884 vmx->nested.nested_vmx_true_exit_ctls_low, 2885 vmx->nested.nested_vmx_exit_ctls_high); 2886 break; 2887 case MSR_IA32_VMX_EXIT_CTLS: 2888 *pdata = vmx_control_msr( 2889 vmx->nested.nested_vmx_exit_ctls_low, 2890 vmx->nested.nested_vmx_exit_ctls_high); 2891 break; 2892 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 2893 *pdata = vmx_control_msr( 2894 vmx->nested.nested_vmx_true_entry_ctls_low, 2895 vmx->nested.nested_vmx_entry_ctls_high); 2896 break; 2897 case MSR_IA32_VMX_ENTRY_CTLS: 2898 *pdata = vmx_control_msr( 2899 vmx->nested.nested_vmx_entry_ctls_low, 2900 vmx->nested.nested_vmx_entry_ctls_high); 2901 break; 2902 case MSR_IA32_VMX_MISC: 2903 *pdata = vmx_control_msr( 2904 vmx->nested.nested_vmx_misc_low, 2905 vmx->nested.nested_vmx_misc_high); 2906 break; 2907 /* 2908 * These MSRs specify bits which the guest must keep fixed (on or off) 2909 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 2910 * We picked the standard core2 setting. 2911 */ 2912#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 2913#define VMXON_CR4_ALWAYSON X86_CR4_VMXE 2914 case MSR_IA32_VMX_CR0_FIXED0: 2915 *pdata = VMXON_CR0_ALWAYSON; 2916 break; 2917 case MSR_IA32_VMX_CR0_FIXED1: 2918 *pdata = -1ULL; 2919 break; 2920 case MSR_IA32_VMX_CR4_FIXED0: 2921 *pdata = VMXON_CR4_ALWAYSON; 2922 break; 2923 case MSR_IA32_VMX_CR4_FIXED1: 2924 *pdata = -1ULL; 2925 break; 2926 case MSR_IA32_VMX_VMCS_ENUM: 2927 *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */ 2928 break; 2929 case MSR_IA32_VMX_PROCBASED_CTLS2: 2930 *pdata = vmx_control_msr( 2931 vmx->nested.nested_vmx_secondary_ctls_low, 2932 vmx->nested.nested_vmx_secondary_ctls_high); 2933 break; 2934 case MSR_IA32_VMX_EPT_VPID_CAP: 2935 *pdata = vmx->nested.nested_vmx_ept_caps | 2936 ((u64)vmx->nested.nested_vmx_vpid_caps << 32); 2937 break; 2938 default: 2939 return 1; 2940 } 2941 2942 return 0; 2943} 2944 2945static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, 2946 uint64_t val) 2947{ 2948 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; 2949 2950 return !(val & ~valid_bits); 2951} 2952 2953/* 2954 * Reads an msr value (of 'msr_index') into 'pdata'. 2955 * Returns 0 on success, non-0 otherwise. 2956 * Assumes vcpu_load() was already called. 2957 */ 2958static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2959{ 2960 struct shared_msr_entry *msr; 2961 2962 switch (msr_info->index) { 2963#ifdef CONFIG_X86_64 2964 case MSR_FS_BASE: 2965 msr_info->data = vmcs_readl(GUEST_FS_BASE); 2966 break; 2967 case MSR_GS_BASE: 2968 msr_info->data = vmcs_readl(GUEST_GS_BASE); 2969 break; 2970 case MSR_KERNEL_GS_BASE: 2971 vmx_load_host_state(to_vmx(vcpu)); 2972 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; 2973 break; 2974#endif 2975 case MSR_EFER: 2976 return kvm_get_msr_common(vcpu, msr_info); 2977 case MSR_IA32_TSC: 2978 msr_info->data = guest_read_tsc(vcpu); 2979 break; 2980 case MSR_IA32_SYSENTER_CS: 2981 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 2982 break; 2983 case MSR_IA32_SYSENTER_EIP: 2984 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); 2985 break; 2986 case MSR_IA32_SYSENTER_ESP: 2987 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 2988 break; 2989 case MSR_IA32_BNDCFGS: 2990 if (!kvm_mpx_supported()) 2991 return 1; 2992 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 2993 break; 2994 case MSR_IA32_MCG_EXT_CTL: 2995 if (!msr_info->host_initiated && 2996 !(to_vmx(vcpu)->msr_ia32_feature_control & 2997 FEATURE_CONTROL_LMCE)) 2998 return 1; 2999 msr_info->data = vcpu->arch.mcg_ext_ctl; 3000 break; 3001 case MSR_IA32_FEATURE_CONTROL: 3002 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control; 3003 break; 3004 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 3005 if (!nested_vmx_allowed(vcpu)) 3006 return 1; 3007 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data); 3008 case MSR_IA32_XSS: 3009 if (!vmx_xsaves_supported()) 3010 return 1; 3011 msr_info->data = vcpu->arch.ia32_xss; 3012 break; 3013 case MSR_TSC_AUX: 3014 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) 3015 return 1; 3016 /* Otherwise falls through */ 3017 default: 3018 msr = find_msr_entry(to_vmx(vcpu), msr_info->index); 3019 if (msr) { 3020 msr_info->data = msr->data; 3021 break; 3022 } 3023 return kvm_get_msr_common(vcpu, msr_info); 3024 } 3025 3026 return 0; 3027} 3028 3029static void vmx_leave_nested(struct kvm_vcpu *vcpu); 3030 3031/* 3032 * Writes msr value into into the appropriate "register". 3033 * Returns 0 on success, non-0 otherwise. 3034 * Assumes vcpu_load() was already called. 3035 */ 3036static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3037{ 3038 struct vcpu_vmx *vmx = to_vmx(vcpu); 3039 struct shared_msr_entry *msr; 3040 int ret = 0; 3041 u32 msr_index = msr_info->index; 3042 u64 data = msr_info->data; 3043 3044 switch (msr_index) { 3045 case MSR_EFER: 3046 ret = kvm_set_msr_common(vcpu, msr_info); 3047 break; 3048#ifdef CONFIG_X86_64 3049 case MSR_FS_BASE: 3050 vmx_segment_cache_clear(vmx); 3051 vmcs_writel(GUEST_FS_BASE, data); 3052 break; 3053 case MSR_GS_BASE: 3054 vmx_segment_cache_clear(vmx); 3055 vmcs_writel(GUEST_GS_BASE, data); 3056 break; 3057 case MSR_KERNEL_GS_BASE: 3058 vmx_load_host_state(vmx); 3059 vmx->msr_guest_kernel_gs_base = data; 3060 break; 3061#endif 3062 case MSR_IA32_SYSENTER_CS: 3063 vmcs_write32(GUEST_SYSENTER_CS, data); 3064 break; 3065 case MSR_IA32_SYSENTER_EIP: 3066 vmcs_writel(GUEST_SYSENTER_EIP, data); 3067 break; 3068 case MSR_IA32_SYSENTER_ESP: 3069 vmcs_writel(GUEST_SYSENTER_ESP, data); 3070 break; 3071 case MSR_IA32_BNDCFGS: 3072 if (!kvm_mpx_supported()) 3073 return 1; 3074 vmcs_write64(GUEST_BNDCFGS, data); 3075 break; 3076 case MSR_IA32_TSC: 3077 kvm_write_tsc(vcpu, msr_info); 3078 break; 3079 case MSR_IA32_CR_PAT: 3080 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 3081 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) 3082 return 1; 3083 vmcs_write64(GUEST_IA32_PAT, data); 3084 vcpu->arch.pat = data; 3085 break; 3086 } 3087 ret = kvm_set_msr_common(vcpu, msr_info); 3088 break; 3089 case MSR_IA32_TSC_ADJUST: 3090 ret = kvm_set_msr_common(vcpu, msr_info); 3091 break; 3092 case MSR_IA32_MCG_EXT_CTL: 3093 if ((!msr_info->host_initiated && 3094 !(to_vmx(vcpu)->msr_ia32_feature_control & 3095 FEATURE_CONTROL_LMCE)) || 3096 (data & ~MCG_EXT_CTL_LMCE_EN)) 3097 return 1; 3098 vcpu->arch.mcg_ext_ctl = data; 3099 break; 3100 case MSR_IA32_FEATURE_CONTROL: 3101 if (!vmx_feature_control_msr_valid(vcpu, data) || 3102 (to_vmx(vcpu)->msr_ia32_feature_control & 3103 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) 3104 return 1; 3105 vmx->msr_ia32_feature_control = data; 3106 if (msr_info->host_initiated && data == 0) 3107 vmx_leave_nested(vcpu); 3108 break; 3109 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 3110 return 1; /* they are read-only */ 3111 case MSR_IA32_XSS: 3112 if (!vmx_xsaves_supported()) 3113 return 1; 3114 /* 3115 * The only supported bit as of Skylake is bit 8, but 3116 * it is not supported on KVM. 3117 */ 3118 if (data != 0) 3119 return 1; 3120 vcpu->arch.ia32_xss = data; 3121 if (vcpu->arch.ia32_xss != host_xss) 3122 add_atomic_switch_msr(vmx, MSR_IA32_XSS, 3123 vcpu->arch.ia32_xss, host_xss); 3124 else 3125 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 3126 break; 3127 case MSR_TSC_AUX: 3128 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) 3129 return 1; 3130 /* Check reserved bit, higher 32 bits should be zero */ 3131 if ((data >> 32) != 0) 3132 return 1; 3133 /* Otherwise falls through */ 3134 default: 3135 msr = find_msr_entry(vmx, msr_index); 3136 if (msr) { 3137 u64 old_msr_data = msr->data; 3138 msr->data = data; 3139 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { 3140 preempt_disable(); 3141 ret = kvm_set_shared_msr(msr->index, msr->data, 3142 msr->mask); 3143 preempt_enable(); 3144 if (ret) 3145 msr->data = old_msr_data; 3146 } 3147 break; 3148 } 3149 ret = kvm_set_msr_common(vcpu, msr_info); 3150 } 3151 3152 return ret; 3153} 3154 3155static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 3156{ 3157 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); 3158 switch (reg) { 3159 case VCPU_REGS_RSP: 3160 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 3161 break; 3162 case VCPU_REGS_RIP: 3163 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 3164 break; 3165 case VCPU_EXREG_PDPTR: 3166 if (enable_ept) 3167 ept_save_pdptrs(vcpu); 3168 break; 3169 default: 3170 break; 3171 } 3172} 3173 3174static __init int cpu_has_kvm_support(void) 3175{ 3176 return cpu_has_vmx(); 3177} 3178 3179static __init int vmx_disabled_by_bios(void) 3180{ 3181 u64 msr; 3182 3183 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); 3184 if (msr & FEATURE_CONTROL_LOCKED) { 3185 /* launched w/ TXT and VMX disabled */ 3186 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) 3187 && tboot_enabled()) 3188 return 1; 3189 /* launched w/o TXT and VMX only enabled w/ TXT */ 3190 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) 3191 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) 3192 && !tboot_enabled()) { 3193 printk(KERN_WARNING "kvm: disable TXT in the BIOS or " 3194 "activate TXT before enabling KVM\n"); 3195 return 1; 3196 } 3197 /* launched w/o TXT and VMX disabled */ 3198 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) 3199 && !tboot_enabled()) 3200 return 1; 3201 } 3202 3203 return 0; 3204} 3205 3206static void kvm_cpu_vmxon(u64 addr) 3207{ 3208 intel_pt_handle_vmx(1); 3209 3210 asm volatile (ASM_VMX_VMXON_RAX 3211 : : "a"(&addr), "m"(addr) 3212 : "memory", "cc"); 3213} 3214 3215static int hardware_enable(void) 3216{ 3217 int cpu = raw_smp_processor_id(); 3218 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 3219 u64 old, test_bits; 3220 3221 if (cr4_read_shadow() & X86_CR4_VMXE) 3222 return -EBUSY; 3223 3224 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 3225 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); 3226 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 3227 3228 /* 3229 * Now we can enable the vmclear operation in kdump 3230 * since the loaded_vmcss_on_cpu list on this cpu 3231 * has been initialized. 3232 * 3233 * Though the cpu is not in VMX operation now, there 3234 * is no problem to enable the vmclear operation 3235 * for the loaded_vmcss_on_cpu list is empty! 3236 */ 3237 crash_enable_local_vmclear(cpu); 3238 3239 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 3240 3241 test_bits = FEATURE_CONTROL_LOCKED; 3242 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 3243 if (tboot_enabled()) 3244 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; 3245 3246 if ((old & test_bits) != test_bits) { 3247 /* enable and lock */ 3248 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); 3249 } 3250 cr4_set_bits(X86_CR4_VMXE); 3251 3252 if (vmm_exclusive) { 3253 kvm_cpu_vmxon(phys_addr); 3254 ept_sync_global(); 3255 } 3256 3257 native_store_gdt(this_cpu_ptr(&host_gdt)); 3258 3259 return 0; 3260} 3261 3262static void vmclear_local_loaded_vmcss(void) 3263{ 3264 int cpu = raw_smp_processor_id(); 3265 struct loaded_vmcs *v, *n; 3266 3267 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), 3268 loaded_vmcss_on_cpu_link) 3269 __loaded_vmcs_clear(v); 3270} 3271 3272 3273/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() 3274 * tricks. 3275 */ 3276static void kvm_cpu_vmxoff(void) 3277{ 3278 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); 3279 3280 intel_pt_handle_vmx(0); 3281} 3282 3283static void hardware_disable(void) 3284{ 3285 if (vmm_exclusive) { 3286 vmclear_local_loaded_vmcss(); 3287 kvm_cpu_vmxoff(); 3288 } 3289 cr4_clear_bits(X86_CR4_VMXE); 3290} 3291 3292static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 3293 u32 msr, u32 *result) 3294{ 3295 u32 vmx_msr_low, vmx_msr_high; 3296 u32 ctl = ctl_min | ctl_opt; 3297 3298 rdmsr(msr, vmx_msr_low, vmx_msr_high); 3299 3300 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ 3301 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ 3302 3303 /* Ensure minimum (required) set of control bits are supported. */ 3304 if (ctl_min & ~ctl) 3305 return -EIO; 3306 3307 *result = ctl; 3308 return 0; 3309} 3310 3311static __init bool allow_1_setting(u32 msr, u32 ctl) 3312{ 3313 u32 vmx_msr_low, vmx_msr_high; 3314 3315 rdmsr(msr, vmx_msr_low, vmx_msr_high); 3316 return vmx_msr_high & ctl; 3317} 3318 3319static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) 3320{ 3321 u32 vmx_msr_low, vmx_msr_high; 3322 u32 min, opt, min2, opt2; 3323 u32 _pin_based_exec_control = 0; 3324 u32 _cpu_based_exec_control = 0; 3325 u32 _cpu_based_2nd_exec_control = 0; 3326 u32 _vmexit_control = 0; 3327 u32 _vmentry_control = 0; 3328 3329 min = CPU_BASED_HLT_EXITING | 3330#ifdef CONFIG_X86_64 3331 CPU_BASED_CR8_LOAD_EXITING | 3332 CPU_BASED_CR8_STORE_EXITING | 3333#endif 3334 CPU_BASED_CR3_LOAD_EXITING | 3335 CPU_BASED_CR3_STORE_EXITING | 3336 CPU_BASED_USE_IO_BITMAPS | 3337 CPU_BASED_MOV_DR_EXITING | 3338 CPU_BASED_USE_TSC_OFFSETING | 3339 CPU_BASED_MWAIT_EXITING | 3340 CPU_BASED_MONITOR_EXITING | 3341 CPU_BASED_INVLPG_EXITING | 3342 CPU_BASED_RDPMC_EXITING; 3343 3344 opt = CPU_BASED_TPR_SHADOW | 3345 CPU_BASED_USE_MSR_BITMAPS | 3346 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 3347 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 3348 &_cpu_based_exec_control) < 0) 3349 return -EIO; 3350#ifdef CONFIG_X86_64 3351 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 3352 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & 3353 ~CPU_BASED_CR8_STORE_EXITING; 3354#endif 3355 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 3356 min2 = 0; 3357 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3358 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3359 SECONDARY_EXEC_WBINVD_EXITING | 3360 SECONDARY_EXEC_ENABLE_VPID | 3361 SECONDARY_EXEC_ENABLE_EPT | 3362 SECONDARY_EXEC_UNRESTRICTED_GUEST | 3363 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 3364 SECONDARY_EXEC_RDTSCP | 3365 SECONDARY_EXEC_ENABLE_INVPCID | 3366 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3367 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3368 SECONDARY_EXEC_SHADOW_VMCS | 3369 SECONDARY_EXEC_XSAVES | 3370 SECONDARY_EXEC_ENABLE_PML | 3371 SECONDARY_EXEC_TSC_SCALING; 3372 if (adjust_vmx_controls(min2, opt2, 3373 MSR_IA32_VMX_PROCBASED_CTLS2, 3374 &_cpu_based_2nd_exec_control) < 0) 3375 return -EIO; 3376 } 3377#ifndef CONFIG_X86_64 3378 if (!(_cpu_based_2nd_exec_control & 3379 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 3380 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 3381#endif 3382 3383 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 3384 _cpu_based_2nd_exec_control &= ~( 3385 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3386 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3387 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 3388 3389 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 3390 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 3391 enabled */ 3392 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | 3393 CPU_BASED_CR3_STORE_EXITING | 3394 CPU_BASED_INVLPG_EXITING); 3395 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, 3396 vmx_capability.ept, vmx_capability.vpid); 3397 } 3398 3399 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; 3400#ifdef CONFIG_X86_64 3401 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; 3402#endif 3403 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | 3404 VM_EXIT_CLEAR_BNDCFGS; 3405 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, 3406 &_vmexit_control) < 0) 3407 return -EIO; 3408 3409 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 3410 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | 3411 PIN_BASED_VMX_PREEMPTION_TIMER; 3412 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 3413 &_pin_based_exec_control) < 0) 3414 return -EIO; 3415 3416 if (cpu_has_broken_vmx_preemption_timer()) 3417 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 3418 if (!(_cpu_based_2nd_exec_control & 3419 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) 3420 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; 3421 3422 min = VM_ENTRY_LOAD_DEBUG_CONTROLS; 3423 opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; 3424 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 3425 &_vmentry_control) < 0) 3426 return -EIO; 3427 3428 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 3429 3430 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 3431 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) 3432 return -EIO; 3433 3434#ifdef CONFIG_X86_64 3435 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ 3436 if (vmx_msr_high & (1u<<16)) 3437 return -EIO; 3438#endif 3439 3440 /* Require Write-Back (WB) memory type for VMCS accesses. */ 3441 if (((vmx_msr_high >> 18) & 15) != 6) 3442 return -EIO; 3443 3444 vmcs_conf->size = vmx_msr_high & 0x1fff; 3445 vmcs_conf->order = get_order(vmcs_conf->size); 3446 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; 3447 vmcs_conf->revision_id = vmx_msr_low; 3448 3449 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; 3450 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; 3451 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; 3452 vmcs_conf->vmexit_ctrl = _vmexit_control; 3453 vmcs_conf->vmentry_ctrl = _vmentry_control; 3454 3455 cpu_has_load_ia32_efer = 3456 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, 3457 VM_ENTRY_LOAD_IA32_EFER) 3458 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, 3459 VM_EXIT_LOAD_IA32_EFER); 3460 3461 cpu_has_load_perf_global_ctrl = 3462 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, 3463 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 3464 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, 3465 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 3466 3467 /* 3468 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL 3469 * but due to errata below it can't be used. Workaround is to use 3470 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. 3471 * 3472 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] 3473 * 3474 * AAK155 (model 26) 3475 * AAP115 (model 30) 3476 * AAT100 (model 37) 3477 * BC86,AAY89,BD102 (model 44) 3478 * BA97 (model 46) 3479 * 3480 */ 3481 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { 3482 switch (boot_cpu_data.x86_model) { 3483 case 26: 3484 case 30: 3485 case 37: 3486 case 44: 3487 case 46: 3488 cpu_has_load_perf_global_ctrl = false; 3489 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 3490 "does not work properly. Using workaround\n"); 3491 break; 3492 default: 3493 break; 3494 } 3495 } 3496 3497 if (boot_cpu_has(X86_FEATURE_XSAVES)) 3498 rdmsrl(MSR_IA32_XSS, host_xss); 3499 3500 return 0; 3501} 3502 3503static struct vmcs *alloc_vmcs_cpu(int cpu) 3504{ 3505 int node = cpu_to_node(cpu); 3506 struct page *pages; 3507 struct vmcs *vmcs; 3508 3509 pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); 3510 if (!pages) 3511 return NULL; 3512 vmcs = page_address(pages); 3513 memset(vmcs, 0, vmcs_config.size); 3514 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ 3515 return vmcs; 3516} 3517 3518static struct vmcs *alloc_vmcs(void) 3519{ 3520 return alloc_vmcs_cpu(raw_smp_processor_id()); 3521} 3522 3523static void free_vmcs(struct vmcs *vmcs) 3524{ 3525 free_pages((unsigned long)vmcs, vmcs_config.order); 3526} 3527 3528/* 3529 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded 3530 */ 3531static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 3532{ 3533 if (!loaded_vmcs->vmcs) 3534 return; 3535 loaded_vmcs_clear(loaded_vmcs); 3536 free_vmcs(loaded_vmcs->vmcs); 3537 loaded_vmcs->vmcs = NULL; 3538 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); 3539} 3540 3541static void free_kvm_area(void) 3542{ 3543 int cpu; 3544 3545 for_each_possible_cpu(cpu) { 3546 free_vmcs(per_cpu(vmxarea, cpu)); 3547 per_cpu(vmxarea, cpu) = NULL; 3548 } 3549} 3550 3551static void init_vmcs_shadow_fields(void) 3552{ 3553 int i, j; 3554 3555 /* No checks for read only fields yet */ 3556 3557 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 3558 switch (shadow_read_write_fields[i]) { 3559 case GUEST_BNDCFGS: 3560 if (!kvm_mpx_supported()) 3561 continue; 3562 break; 3563 default: 3564 break; 3565 } 3566 3567 if (j < i) 3568 shadow_read_write_fields[j] = 3569 shadow_read_write_fields[i]; 3570 j++; 3571 } 3572 max_shadow_read_write_fields = j; 3573 3574 /* shadowed fields guest access without vmexit */ 3575 for (i = 0; i < max_shadow_read_write_fields; i++) { 3576 clear_bit(shadow_read_write_fields[i], 3577 vmx_vmwrite_bitmap); 3578 clear_bit(shadow_read_write_fields[i], 3579 vmx_vmread_bitmap); 3580 } 3581 for (i = 0; i < max_shadow_read_only_fields; i++) 3582 clear_bit(shadow_read_only_fields[i], 3583 vmx_vmread_bitmap); 3584} 3585 3586static __init int alloc_kvm_area(void) 3587{ 3588 int cpu; 3589 3590 for_each_possible_cpu(cpu) { 3591 struct vmcs *vmcs; 3592 3593 vmcs = alloc_vmcs_cpu(cpu); 3594 if (!vmcs) { 3595 free_kvm_area(); 3596 return -ENOMEM; 3597 } 3598 3599 per_cpu(vmxarea, cpu) = vmcs; 3600 } 3601 return 0; 3602} 3603 3604static bool emulation_required(struct kvm_vcpu *vcpu) 3605{ 3606 return emulate_invalid_guest_state && !guest_state_valid(vcpu); 3607} 3608 3609static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 3610 struct kvm_segment *save) 3611{ 3612 if (!emulate_invalid_guest_state) { 3613 /* 3614 * CS and SS RPL should be equal during guest entry according 3615 * to VMX spec, but in reality it is not always so. Since vcpu 3616 * is in the middle of the transition from real mode to 3617 * protected mode it is safe to assume that RPL 0 is a good 3618 * default value. 3619 */ 3620 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) 3621 save->selector &= ~SEGMENT_RPL_MASK; 3622 save->dpl = save->selector & SEGMENT_RPL_MASK; 3623 save->s = 1; 3624 } 3625 vmx_set_segment(vcpu, save, seg); 3626} 3627 3628static void enter_pmode(struct kvm_vcpu *vcpu) 3629{ 3630 unsigned long flags; 3631 struct vcpu_vmx *vmx = to_vmx(vcpu); 3632 3633 /* 3634 * Update real mode segment cache. It may be not up-to-date if sement 3635 * register was written while vcpu was in a guest mode. 3636 */ 3637 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 3638 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 3639 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 3640 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 3641 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 3642 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 3643 3644 vmx->rmode.vm86_active = 0; 3645 3646 vmx_segment_cache_clear(vmx); 3647 3648 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 3649 3650 flags = vmcs_readl(GUEST_RFLAGS); 3651 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 3652 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 3653 vmcs_writel(GUEST_RFLAGS, flags); 3654 3655 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 3656 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 3657 3658 update_exception_bitmap(vcpu); 3659 3660 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 3661 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 3662 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 3663 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 3664 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 3665 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 3666} 3667 3668static void fix_rmode_seg(int seg, struct kvm_segment *save) 3669{ 3670 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3671 struct kvm_segment var = *save; 3672 3673 var.dpl = 0x3; 3674 if (seg == VCPU_SREG_CS) 3675 var.type = 0x3; 3676 3677 if (!emulate_invalid_guest_state) { 3678 var.selector = var.base >> 4; 3679 var.base = var.base & 0xffff0; 3680 var.limit = 0xffff; 3681 var.g = 0; 3682 var.db = 0; 3683 var.present = 1; 3684 var.s = 1; 3685 var.l = 0; 3686 var.unusable = 0; 3687 var.type = 0x3; 3688 var.avl = 0; 3689 if (save->base & 0xf) 3690 printk_once(KERN_WARNING "kvm: segment base is not " 3691 "paragraph aligned when entering " 3692 "protected mode (seg=%d)", seg); 3693 } 3694 3695 vmcs_write16(sf->selector, var.selector); 3696 vmcs_write32(sf->base, var.base); 3697 vmcs_write32(sf->limit, var.limit); 3698 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 3699} 3700 3701static void enter_rmode(struct kvm_vcpu *vcpu) 3702{ 3703 unsigned long flags; 3704 struct vcpu_vmx *vmx = to_vmx(vcpu); 3705 3706 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 3707 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 3708 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 3709 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 3710 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 3711 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 3712 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 3713 3714 vmx->rmode.vm86_active = 1; 3715 3716 /* 3717 * Very old userspace does not call KVM_SET_TSS_ADDR before entering 3718 * vcpu. Warn the user that an update is overdue. 3719 */ 3720 if (!vcpu->kvm->arch.tss_addr) 3721 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 3722 "called before entering vcpu\n"); 3723 3724 vmx_segment_cache_clear(vmx); 3725 3726 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); 3727 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 3728 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 3729 3730 flags = vmcs_readl(GUEST_RFLAGS); 3731 vmx->rmode.save_rflags = flags; 3732 3733 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 3734 3735 vmcs_writel(GUEST_RFLAGS, flags); 3736 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 3737 update_exception_bitmap(vcpu); 3738 3739 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 3740 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 3741 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 3742 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 3743 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 3744 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 3745 3746 kvm_mmu_reset_context(vcpu); 3747} 3748 3749static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 3750{ 3751 struct vcpu_vmx *vmx = to_vmx(vcpu); 3752 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 3753 3754 if (!msr) 3755 return; 3756 3757 /* 3758 * Force kernel_gs_base reloading before EFER changes, as control 3759 * of this msr depends on is_long_mode(). 3760 */ 3761 vmx_load_host_state(to_vmx(vcpu)); 3762 vcpu->arch.efer = efer; 3763 if (efer & EFER_LMA) { 3764 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 3765 msr->data = efer; 3766 } else { 3767 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 3768 3769 msr->data = efer & ~EFER_LME; 3770 } 3771 setup_msrs(vmx); 3772} 3773 3774#ifdef CONFIG_X86_64 3775 3776static void enter_lmode(struct kvm_vcpu *vcpu) 3777{ 3778 u32 guest_tr_ar; 3779 3780 vmx_segment_cache_clear(to_vmx(vcpu)); 3781 3782 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 3783 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { 3784 pr_debug_ratelimited("%s: tss fixup for long mode. \n", 3785 __func__); 3786 vmcs_write32(GUEST_TR_AR_BYTES, 3787 (guest_tr_ar & ~VMX_AR_TYPE_MASK) 3788 | VMX_AR_TYPE_BUSY_64_TSS); 3789 } 3790 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); 3791} 3792 3793static void exit_lmode(struct kvm_vcpu *vcpu) 3794{ 3795 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 3796 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 3797} 3798 3799#endif 3800 3801static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) 3802{ 3803 vpid_sync_context(vpid); 3804 if (enable_ept) { 3805 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 3806 return; 3807 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); 3808 } 3809} 3810 3811static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 3812{ 3813 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); 3814} 3815 3816static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 3817{ 3818 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 3819 3820 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; 3821 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; 3822} 3823 3824static void vmx_decache_cr3(struct kvm_vcpu *vcpu) 3825{ 3826 if (enable_ept && is_paging(vcpu)) 3827 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 3828 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 3829} 3830 3831static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 3832{ 3833 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 3834 3835 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; 3836 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; 3837} 3838 3839static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 3840{ 3841 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 3842 3843 if (!test_bit(VCPU_EXREG_PDPTR, 3844 (unsigned long *)&vcpu->arch.regs_dirty)) 3845 return; 3846 3847 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3848 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); 3849 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); 3850 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); 3851 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); 3852 } 3853} 3854 3855static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3856{ 3857 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 3858 3859 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3860 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3861 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3862 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3863 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3864 } 3865 3866 __set_bit(VCPU_EXREG_PDPTR, 3867 (unsigned long *)&vcpu->arch.regs_avail); 3868 __set_bit(VCPU_EXREG_PDPTR, 3869 (unsigned long *)&vcpu->arch.regs_dirty); 3870} 3871 3872static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 3873 3874static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, 3875 unsigned long cr0, 3876 struct kvm_vcpu *vcpu) 3877{ 3878 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) 3879 vmx_decache_cr3(vcpu); 3880 if (!(cr0 & X86_CR0_PG)) { 3881 /* From paging/starting to nonpaging */ 3882 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 3883 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | 3884 (CPU_BASED_CR3_LOAD_EXITING | 3885 CPU_BASED_CR3_STORE_EXITING)); 3886 vcpu->arch.cr0 = cr0; 3887 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 3888 } else if (!is_paging(vcpu)) { 3889 /* From nonpaging to paging */ 3890 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 3891 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & 3892 ~(CPU_BASED_CR3_LOAD_EXITING | 3893 CPU_BASED_CR3_STORE_EXITING)); 3894 vcpu->arch.cr0 = cr0; 3895 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 3896 } 3897 3898 if (!(cr0 & X86_CR0_WP)) 3899 *hw_cr0 &= ~X86_CR0_WP; 3900} 3901 3902static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 3903{ 3904 struct vcpu_vmx *vmx = to_vmx(vcpu); 3905 unsigned long hw_cr0; 3906 3907 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); 3908 if (enable_unrestricted_guest) 3909 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 3910 else { 3911 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; 3912 3913 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 3914 enter_pmode(vcpu); 3915 3916 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) 3917 enter_rmode(vcpu); 3918 } 3919 3920#ifdef CONFIG_X86_64 3921 if (vcpu->arch.efer & EFER_LME) { 3922 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) 3923 enter_lmode(vcpu); 3924 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) 3925 exit_lmode(vcpu); 3926 } 3927#endif 3928 3929 if (enable_ept) 3930 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); 3931 3932 if (!vcpu->fpu_active) 3933 hw_cr0 |= X86_CR0_TS | X86_CR0_MP; 3934 3935 vmcs_writel(CR0_READ_SHADOW, cr0); 3936 vmcs_writel(GUEST_CR0, hw_cr0); 3937 vcpu->arch.cr0 = cr0; 3938 3939 /* depends on vcpu->arch.cr0 to be set to a new value */ 3940 vmx->emulation_required = emulation_required(vcpu); 3941} 3942 3943static u64 construct_eptp(unsigned long root_hpa) 3944{ 3945 u64 eptp; 3946 3947 /* TODO write the value reading from MSR */ 3948 eptp = VMX_EPT_DEFAULT_MT | 3949 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; 3950 if (enable_ept_ad_bits) 3951 eptp |= VMX_EPT_AD_ENABLE_BIT; 3952 eptp |= (root_hpa & PAGE_MASK); 3953 3954 return eptp; 3955} 3956 3957static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 3958{ 3959 unsigned long guest_cr3; 3960 u64 eptp; 3961 3962 guest_cr3 = cr3; 3963 if (enable_ept) { 3964 eptp = construct_eptp(cr3); 3965 vmcs_write64(EPT_POINTER, eptp); 3966 if (is_paging(vcpu) || is_guest_mode(vcpu)) 3967 guest_cr3 = kvm_read_cr3(vcpu); 3968 else 3969 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; 3970 ept_load_pdptrs(vcpu); 3971 } 3972 3973 vmx_flush_tlb(vcpu); 3974 vmcs_writel(GUEST_CR3, guest_cr3); 3975} 3976 3977static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3978{ 3979 /* 3980 * Pass through host's Machine Check Enable value to hw_cr4, which 3981 * is in force while we are in guest mode. Do not let guests control 3982 * this bit, even if host CR4.MCE == 0. 3983 */ 3984 unsigned long hw_cr4 = 3985 (cr4_read_shadow() & X86_CR4_MCE) | 3986 (cr4 & ~X86_CR4_MCE) | 3987 (to_vmx(vcpu)->rmode.vm86_active ? 3988 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); 3989 3990 if (cr4 & X86_CR4_VMXE) { 3991 /* 3992 * To use VMXON (and later other VMX instructions), a guest 3993 * must first be able to turn on cr4.VMXE (see handle_vmon()). 3994 * So basically the check on whether to allow nested VMX 3995 * is here. 3996 */ 3997 if (!nested_vmx_allowed(vcpu)) 3998 return 1; 3999 } 4000 if (to_vmx(vcpu)->nested.vmxon && 4001 ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) 4002 return 1; 4003 4004 vcpu->arch.cr4 = cr4; 4005 if (enable_ept) { 4006 if (!is_paging(vcpu)) { 4007 hw_cr4 &= ~X86_CR4_PAE; 4008 hw_cr4 |= X86_CR4_PSE; 4009 } else if (!(cr4 & X86_CR4_PAE)) { 4010 hw_cr4 &= ~X86_CR4_PAE; 4011 } 4012 } 4013 4014 if (!enable_unrestricted_guest && !is_paging(vcpu)) 4015 /* 4016 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in 4017 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs 4018 * to be manually disabled when guest switches to non-paging 4019 * mode. 4020 * 4021 * If !enable_unrestricted_guest, the CPU is always running 4022 * with CR0.PG=1 and CR4 needs to be modified. 4023 * If enable_unrestricted_guest, the CPU automatically 4024 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. 4025 */ 4026 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 4027 4028 vmcs_writel(CR4_READ_SHADOW, cr4); 4029 vmcs_writel(GUEST_CR4, hw_cr4); 4030 return 0; 4031} 4032 4033static void vmx_get_segment(struct kvm_vcpu *vcpu, 4034 struct kvm_segment *var, int seg) 4035{ 4036 struct vcpu_vmx *vmx = to_vmx(vcpu); 4037 u32 ar; 4038 4039 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 4040 *var = vmx->rmode.segs[seg]; 4041 if (seg == VCPU_SREG_TR 4042 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) 4043 return; 4044 var->base = vmx_read_guest_seg_base(vmx, seg); 4045 var->selector = vmx_read_guest_seg_selector(vmx, seg); 4046 return; 4047 } 4048 var->base = vmx_read_guest_seg_base(vmx, seg); 4049 var->limit = vmx_read_guest_seg_limit(vmx, seg); 4050 var->selector = vmx_read_guest_seg_selector(vmx, seg); 4051 ar = vmx_read_guest_seg_ar(vmx, seg); 4052 var->unusable = (ar >> 16) & 1; 4053 var->type = ar & 15; 4054 var->s = (ar >> 4) & 1; 4055 var->dpl = (ar >> 5) & 3; 4056 /* 4057 * Some userspaces do not preserve unusable property. Since usable 4058 * segment has to be present according to VMX spec we can use present 4059 * property to amend userspace bug by making unusable segment always 4060 * nonpresent. vmx_segment_access_rights() already marks nonpresent 4061 * segment as unusable. 4062 */ 4063 var->present = !var->unusable; 4064 var->avl = (ar >> 12) & 1; 4065 var->l = (ar >> 13) & 1; 4066 var->db = (ar >> 14) & 1; 4067 var->g = (ar >> 15) & 1; 4068} 4069 4070static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 4071{ 4072 struct kvm_segment s; 4073 4074 if (to_vmx(vcpu)->rmode.vm86_active) { 4075 vmx_get_segment(vcpu, &s, seg); 4076 return s.base; 4077 } 4078 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 4079} 4080 4081static int vmx_get_cpl(struct kvm_vcpu *vcpu) 4082{ 4083 struct vcpu_vmx *vmx = to_vmx(vcpu); 4084 4085 if (unlikely(vmx->rmode.vm86_active)) 4086 return 0; 4087 else { 4088 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); 4089 return VMX_AR_DPL(ar); 4090 } 4091} 4092 4093static u32 vmx_segment_access_rights(struct kvm_segment *var) 4094{ 4095 u32 ar; 4096 4097 if (var->unusable || !var->present) 4098 ar = 1 << 16; 4099 else { 4100 ar = var->type & 15; 4101 ar |= (var->s & 1) << 4; 4102 ar |= (var->dpl & 3) << 5; 4103 ar |= (var->present & 1) << 7; 4104 ar |= (var->avl & 1) << 12; 4105 ar |= (var->l & 1) << 13; 4106 ar |= (var->db & 1) << 14; 4107 ar |= (var->g & 1) << 15; 4108 } 4109 4110 return ar; 4111} 4112 4113static void vmx_set_segment(struct kvm_vcpu *vcpu, 4114 struct kvm_segment *var, int seg) 4115{ 4116 struct vcpu_vmx *vmx = to_vmx(vcpu); 4117 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 4118 4119 vmx_segment_cache_clear(vmx); 4120 4121 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 4122 vmx->rmode.segs[seg] = *var; 4123 if (seg == VCPU_SREG_TR) 4124 vmcs_write16(sf->selector, var->selector); 4125 else if (var->s) 4126 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); 4127 goto out; 4128 } 4129 4130 vmcs_writel(sf->base, var->base); 4131 vmcs_write32(sf->limit, var->limit); 4132 vmcs_write16(sf->selector, var->selector); 4133 4134 /* 4135 * Fix the "Accessed" bit in AR field of segment registers for older 4136 * qemu binaries. 4137 * IA32 arch specifies that at the time of processor reset the 4138 * "Accessed" bit in the AR field of segment registers is 1. And qemu 4139 * is setting it to 0 in the userland code. This causes invalid guest 4140 * state vmexit when "unrestricted guest" mode is turned on. 4141 * Fix for this setup issue in cpu_reset is being pushed in the qemu 4142 * tree. Newer qemu binaries with that qemu fix would not need this 4143 * kvm hack. 4144 */ 4145 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) 4146 var->type |= 0x1; /* Accessed */ 4147 4148 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 4149 4150out: 4151 vmx->emulation_required = emulation_required(vcpu); 4152} 4153 4154static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 4155{ 4156 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); 4157 4158 *db = (ar >> 14) & 1; 4159 *l = (ar >> 13) & 1; 4160} 4161 4162static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 4163{ 4164 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); 4165 dt->address = vmcs_readl(GUEST_IDTR_BASE); 4166} 4167 4168static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 4169{ 4170 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); 4171 vmcs_writel(GUEST_IDTR_BASE, dt->address); 4172} 4173 4174static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 4175{ 4176 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); 4177 dt->address = vmcs_readl(GUEST_GDTR_BASE); 4178} 4179 4180static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 4181{ 4182 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); 4183 vmcs_writel(GUEST_GDTR_BASE, dt->address); 4184} 4185 4186static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 4187{ 4188 struct kvm_segment var; 4189 u32 ar; 4190 4191 vmx_get_segment(vcpu, &var, seg); 4192 var.dpl = 0x3; 4193 if (seg == VCPU_SREG_CS) 4194 var.type = 0x3; 4195 ar = vmx_segment_access_rights(&var); 4196 4197 if (var.base != (var.selector << 4)) 4198 return false; 4199 if (var.limit != 0xffff) 4200 return false; 4201 if (ar != 0xf3) 4202 return false; 4203 4204 return true; 4205} 4206 4207static bool code_segment_valid(struct kvm_vcpu *vcpu) 4208{ 4209 struct kvm_segment cs; 4210 unsigned int cs_rpl; 4211 4212 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 4213 cs_rpl = cs.selector & SEGMENT_RPL_MASK; 4214 4215 if (cs.unusable) 4216 return false; 4217 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) 4218 return false; 4219 if (!cs.s) 4220 return false; 4221 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { 4222 if (cs.dpl > cs_rpl) 4223 return false; 4224 } else { 4225 if (cs.dpl != cs_rpl) 4226 return false; 4227 } 4228 if (!cs.present) 4229 return false; 4230 4231 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 4232 return true; 4233} 4234 4235static bool stack_segment_valid(struct kvm_vcpu *vcpu) 4236{ 4237 struct kvm_segment ss; 4238 unsigned int ss_rpl; 4239 4240 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 4241 ss_rpl = ss.selector & SEGMENT_RPL_MASK; 4242 4243 if (ss.unusable) 4244 return true; 4245 if (ss.type != 3 && ss.type != 7) 4246 return false; 4247 if (!ss.s) 4248 return false; 4249 if (ss.dpl != ss_rpl) /* DPL != RPL */ 4250 return false; 4251 if (!ss.present) 4252 return false; 4253 4254 return true; 4255} 4256 4257static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 4258{ 4259 struct kvm_segment var; 4260 unsigned int rpl; 4261 4262 vmx_get_segment(vcpu, &var, seg); 4263 rpl = var.selector & SEGMENT_RPL_MASK; 4264 4265 if (var.unusable) 4266 return true; 4267 if (!var.s) 4268 return false; 4269 if (!var.present) 4270 return false; 4271 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { 4272 if (var.dpl < rpl) /* DPL < RPL */ 4273 return false; 4274 } 4275 4276 /* TODO: Add other members to kvm_segment_field to allow checking for other access 4277 * rights flags 4278 */ 4279 return true; 4280} 4281 4282static bool tr_valid(struct kvm_vcpu *vcpu) 4283{ 4284 struct kvm_segment tr; 4285 4286 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 4287 4288 if (tr.unusable) 4289 return false; 4290 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 4291 return false; 4292 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ 4293 return false; 4294 if (!tr.present) 4295 return false; 4296 4297 return true; 4298} 4299 4300static bool ldtr_valid(struct kvm_vcpu *vcpu) 4301{ 4302 struct kvm_segment ldtr; 4303 4304 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 4305 4306 if (ldtr.unusable) 4307 return true; 4308 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 4309 return false; 4310 if (ldtr.type != 2) 4311 return false; 4312 if (!ldtr.present) 4313 return false; 4314 4315 return true; 4316} 4317 4318static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 4319{ 4320 struct kvm_segment cs, ss; 4321 4322 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 4323 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 4324 4325 return ((cs.selector & SEGMENT_RPL_MASK) == 4326 (ss.selector & SEGMENT_RPL_MASK)); 4327} 4328 4329/* 4330 * Check if guest state is valid. Returns true if valid, false if 4331 * not. 4332 * We assume that registers are always usable 4333 */ 4334static bool guest_state_valid(struct kvm_vcpu *vcpu) 4335{ 4336 if (enable_unrestricted_guest) 4337 return true; 4338 4339 /* real mode guest state checks */ 4340 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 4341 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 4342 return false; 4343 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 4344 return false; 4345 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 4346 return false; 4347 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 4348 return false; 4349 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 4350 return false; 4351 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 4352 return false; 4353 } else { 4354 /* protected mode guest state checks */ 4355 if (!cs_ss_rpl_check(vcpu)) 4356 return false; 4357 if (!code_segment_valid(vcpu)) 4358 return false; 4359 if (!stack_segment_valid(vcpu)) 4360 return false; 4361 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 4362 return false; 4363 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 4364 return false; 4365 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 4366 return false; 4367 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 4368 return false; 4369 if (!tr_valid(vcpu)) 4370 return false; 4371 if (!ldtr_valid(vcpu)) 4372 return false; 4373 } 4374 /* TODO: 4375 * - Add checks on RIP 4376 * - Add checks on RFLAGS 4377 */ 4378 4379 return true; 4380} 4381 4382static int init_rmode_tss(struct kvm *kvm) 4383{ 4384 gfn_t fn; 4385 u16 data = 0; 4386 int idx, r; 4387 4388 idx = srcu_read_lock(&kvm->srcu); 4389 fn = kvm->arch.tss_addr >> PAGE_SHIFT; 4390 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 4391 if (r < 0) 4392 goto out; 4393 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 4394 r = kvm_write_guest_page(kvm, fn++, &data, 4395 TSS_IOPB_BASE_OFFSET, sizeof(u16)); 4396 if (r < 0) 4397 goto out; 4398 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); 4399 if (r < 0) 4400 goto out; 4401 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 4402 if (r < 0) 4403 goto out; 4404 data = ~0; 4405 r = kvm_write_guest_page(kvm, fn, &data, 4406 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, 4407 sizeof(u8)); 4408out: 4409 srcu_read_unlock(&kvm->srcu, idx); 4410 return r; 4411} 4412 4413static int init_rmode_identity_map(struct kvm *kvm) 4414{ 4415 int i, idx, r = 0; 4416 kvm_pfn_t identity_map_pfn; 4417 u32 tmp; 4418 4419 if (!enable_ept) 4420 return 0; 4421 4422 /* Protect kvm->arch.ept_identity_pagetable_done. */ 4423 mutex_lock(&kvm->slots_lock); 4424 4425 if (likely(kvm->arch.ept_identity_pagetable_done)) 4426 goto out2; 4427 4428 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; 4429 4430 r = alloc_identity_pagetable(kvm); 4431 if (r < 0) 4432 goto out2; 4433 4434 idx = srcu_read_lock(&kvm->srcu); 4435 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); 4436 if (r < 0) 4437 goto out; 4438 /* Set up identity-mapping pagetable for EPT in real mode */ 4439 for (i = 0; i < PT32_ENT_PER_PAGE; i++) { 4440 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | 4441 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); 4442 r = kvm_write_guest_page(kvm, identity_map_pfn, 4443 &tmp, i * sizeof(tmp), sizeof(tmp)); 4444 if (r < 0) 4445 goto out; 4446 } 4447 kvm->arch.ept_identity_pagetable_done = true; 4448 4449out: 4450 srcu_read_unlock(&kvm->srcu, idx); 4451 4452out2: 4453 mutex_unlock(&kvm->slots_lock); 4454 return r; 4455} 4456 4457static void seg_setup(int seg) 4458{ 4459 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 4460 unsigned int ar; 4461 4462 vmcs_write16(sf->selector, 0); 4463 vmcs_writel(sf->base, 0); 4464 vmcs_write32(sf->limit, 0xffff); 4465 ar = 0x93; 4466 if (seg == VCPU_SREG_CS) 4467 ar |= 0x08; /* code segment */ 4468 4469 vmcs_write32(sf->ar_bytes, ar); 4470} 4471 4472static int alloc_apic_access_page(struct kvm *kvm) 4473{ 4474 struct page *page; 4475 int r = 0; 4476 4477 mutex_lock(&kvm->slots_lock); 4478 if (kvm->arch.apic_access_page_done) 4479 goto out; 4480 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 4481 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 4482 if (r) 4483 goto out; 4484 4485 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 4486 if (is_error_page(page)) { 4487 r = -EFAULT; 4488 goto out; 4489 } 4490 4491 /* 4492 * Do not pin the page in memory, so that memory hot-unplug 4493 * is able to migrate it. 4494 */ 4495 put_page(page); 4496 kvm->arch.apic_access_page_done = true; 4497out: 4498 mutex_unlock(&kvm->slots_lock); 4499 return r; 4500} 4501 4502static int alloc_identity_pagetable(struct kvm *kvm) 4503{ 4504 /* Called with kvm->slots_lock held. */ 4505 4506 int r = 0; 4507 4508 BUG_ON(kvm->arch.ept_identity_pagetable_done); 4509 4510 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 4511 kvm->arch.ept_identity_map_addr, PAGE_SIZE); 4512 4513 return r; 4514} 4515 4516static int allocate_vpid(void) 4517{ 4518 int vpid; 4519 4520 if (!enable_vpid) 4521 return 0; 4522 spin_lock(&vmx_vpid_lock); 4523 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); 4524 if (vpid < VMX_NR_VPIDS) 4525 __set_bit(vpid, vmx_vpid_bitmap); 4526 else 4527 vpid = 0; 4528 spin_unlock(&vmx_vpid_lock); 4529 return vpid; 4530} 4531 4532static void free_vpid(int vpid) 4533{ 4534 if (!enable_vpid || vpid == 0) 4535 return; 4536 spin_lock(&vmx_vpid_lock); 4537 __clear_bit(vpid, vmx_vpid_bitmap); 4538 spin_unlock(&vmx_vpid_lock); 4539} 4540 4541#define MSR_TYPE_R 1 4542#define MSR_TYPE_W 2 4543static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 4544 u32 msr, int type) 4545{ 4546 int f = sizeof(unsigned long); 4547 4548 if (!cpu_has_vmx_msr_bitmap()) 4549 return; 4550 4551 /* 4552 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 4553 * have the write-low and read-high bitmap offsets the wrong way round. 4554 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 4555 */ 4556 if (msr <= 0x1fff) { 4557 if (type & MSR_TYPE_R) 4558 /* read-low */ 4559 __clear_bit(msr, msr_bitmap + 0x000 / f); 4560 4561 if (type & MSR_TYPE_W) 4562 /* write-low */ 4563 __clear_bit(msr, msr_bitmap + 0x800 / f); 4564 4565 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 4566 msr &= 0x1fff; 4567 if (type & MSR_TYPE_R) 4568 /* read-high */ 4569 __clear_bit(msr, msr_bitmap + 0x400 / f); 4570 4571 if (type & MSR_TYPE_W) 4572 /* write-high */ 4573 __clear_bit(msr, msr_bitmap + 0xc00 / f); 4574 4575 } 4576} 4577 4578static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, 4579 u32 msr, int type) 4580{ 4581 int f = sizeof(unsigned long); 4582 4583 if (!cpu_has_vmx_msr_bitmap()) 4584 return; 4585 4586 /* 4587 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 4588 * have the write-low and read-high bitmap offsets the wrong way round. 4589 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 4590 */ 4591 if (msr <= 0x1fff) { 4592 if (type & MSR_TYPE_R) 4593 /* read-low */ 4594 __set_bit(msr, msr_bitmap + 0x000 / f); 4595 4596 if (type & MSR_TYPE_W) 4597 /* write-low */ 4598 __set_bit(msr, msr_bitmap + 0x800 / f); 4599 4600 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 4601 msr &= 0x1fff; 4602 if (type & MSR_TYPE_R) 4603 /* read-high */ 4604 __set_bit(msr, msr_bitmap + 0x400 / f); 4605 4606 if (type & MSR_TYPE_W) 4607 /* write-high */ 4608 __set_bit(msr, msr_bitmap + 0xc00 / f); 4609 4610 } 4611} 4612 4613/* 4614 * If a msr is allowed by L0, we should check whether it is allowed by L1. 4615 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 4616 */ 4617static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 4618 unsigned long *msr_bitmap_nested, 4619 u32 msr, int type) 4620{ 4621 int f = sizeof(unsigned long); 4622 4623 if (!cpu_has_vmx_msr_bitmap()) { 4624 WARN_ON(1); 4625 return; 4626 } 4627 4628 /* 4629 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 4630 * have the write-low and read-high bitmap offsets the wrong way round. 4631 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 4632 */ 4633 if (msr <= 0x1fff) { 4634 if (type & MSR_TYPE_R && 4635 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 4636 /* read-low */ 4637 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 4638 4639 if (type & MSR_TYPE_W && 4640 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 4641 /* write-low */ 4642 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 4643 4644 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 4645 msr &= 0x1fff; 4646 if (type & MSR_TYPE_R && 4647 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 4648 /* read-high */ 4649 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 4650 4651 if (type & MSR_TYPE_W && 4652 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 4653 /* write-high */ 4654 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 4655 4656 } 4657} 4658 4659static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) 4660{ 4661 if (!longmode_only) 4662 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, 4663 msr, MSR_TYPE_R | MSR_TYPE_W); 4664 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, 4665 msr, MSR_TYPE_R | MSR_TYPE_W); 4666} 4667 4668static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active) 4669{ 4670 if (apicv_active) { 4671 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, 4672 msr, MSR_TYPE_R); 4673 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, 4674 msr, MSR_TYPE_R); 4675 } else { 4676 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive, 4677 msr, MSR_TYPE_R); 4678 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive, 4679 msr, MSR_TYPE_R); 4680 } 4681} 4682 4683static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active) 4684{ 4685 if (apicv_active) { 4686 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, 4687 msr, MSR_TYPE_R); 4688 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, 4689 msr, MSR_TYPE_R); 4690 } else { 4691 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive, 4692 msr, MSR_TYPE_R); 4693 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive, 4694 msr, MSR_TYPE_R); 4695 } 4696} 4697 4698static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active) 4699{ 4700 if (apicv_active) { 4701 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, 4702 msr, MSR_TYPE_W); 4703 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, 4704 msr, MSR_TYPE_W); 4705 } else { 4706 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive, 4707 msr, MSR_TYPE_W); 4708 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive, 4709 msr, MSR_TYPE_W); 4710 } 4711} 4712 4713static bool vmx_get_enable_apicv(void) 4714{ 4715 return enable_apicv; 4716} 4717 4718static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 4719{ 4720 struct vcpu_vmx *vmx = to_vmx(vcpu); 4721 int max_irr; 4722 void *vapic_page; 4723 u16 status; 4724 4725 if (vmx->nested.pi_desc && 4726 vmx->nested.pi_pending) { 4727 vmx->nested.pi_pending = false; 4728 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 4729 return 0; 4730 4731 max_irr = find_last_bit( 4732 (unsigned long *)vmx->nested.pi_desc->pir, 256); 4733 4734 if (max_irr == 256) 4735 return 0; 4736 4737 vapic_page = kmap(vmx->nested.virtual_apic_page); 4738 if (!vapic_page) { 4739 WARN_ON(1); 4740 return -ENOMEM; 4741 } 4742 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); 4743 kunmap(vmx->nested.virtual_apic_page); 4744 4745 status = vmcs_read16(GUEST_INTR_STATUS); 4746 if ((u8)max_irr > ((u8)status & 0xff)) { 4747 status &= ~0xff; 4748 status |= (u8)max_irr; 4749 vmcs_write16(GUEST_INTR_STATUS, status); 4750 } 4751 } 4752 return 0; 4753} 4754 4755static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) 4756{ 4757#ifdef CONFIG_SMP 4758 if (vcpu->mode == IN_GUEST_MODE) { 4759 struct vcpu_vmx *vmx = to_vmx(vcpu); 4760 4761 /* 4762 * Currently, we don't support urgent interrupt, 4763 * all interrupts are recognized as non-urgent 4764 * interrupt, so we cannot post interrupts when 4765 * 'SN' is set. 4766 * 4767 * If the vcpu is in guest mode, it means it is 4768 * running instead of being scheduled out and 4769 * waiting in the run queue, and that's the only 4770 * case when 'SN' is set currently, warning if 4771 * 'SN' is set. 4772 */ 4773 WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); 4774 4775 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), 4776 POSTED_INTR_VECTOR); 4777 return true; 4778 } 4779#endif 4780 return false; 4781} 4782 4783static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 4784 int vector) 4785{ 4786 struct vcpu_vmx *vmx = to_vmx(vcpu); 4787 4788 if (is_guest_mode(vcpu) && 4789 vector == vmx->nested.posted_intr_nv) { 4790 /* the PIR and ON have been set by L1. */ 4791 kvm_vcpu_trigger_posted_interrupt(vcpu); 4792 /* 4793 * If a posted intr is not recognized by hardware, 4794 * we will accomplish it in the next vmentry. 4795 */ 4796 vmx->nested.pi_pending = true; 4797 kvm_make_request(KVM_REQ_EVENT, vcpu); 4798 return 0; 4799 } 4800 return -1; 4801} 4802/* 4803 * Send interrupt to vcpu via posted interrupt way. 4804 * 1. If target vcpu is running(non-root mode), send posted interrupt 4805 * notification to vcpu and hardware will sync PIR to vIRR atomically. 4806 * 2. If target vcpu isn't running(root mode), kick it to pick up the 4807 * interrupt from PIR in next vmentry. 4808 */ 4809static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) 4810{ 4811 struct vcpu_vmx *vmx = to_vmx(vcpu); 4812 int r; 4813 4814 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); 4815 if (!r) 4816 return; 4817 4818 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) 4819 return; 4820 4821 r = pi_test_and_set_on(&vmx->pi_desc); 4822 kvm_make_request(KVM_REQ_EVENT, vcpu); 4823 if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) 4824 kvm_vcpu_kick(vcpu); 4825} 4826 4827static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) 4828{ 4829 struct vcpu_vmx *vmx = to_vmx(vcpu); 4830 4831 if (!pi_test_and_clear_on(&vmx->pi_desc)) 4832 return; 4833 4834 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); 4835} 4836 4837/* 4838 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 4839 * will not change in the lifetime of the guest. 4840 * Note that host-state that does change is set elsewhere. E.g., host-state 4841 * that is set differently for each CPU is set in vmx_vcpu_load(), not here. 4842 */ 4843static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) 4844{ 4845 u32 low32, high32; 4846 unsigned long tmpl; 4847 struct desc_ptr dt; 4848 unsigned long cr4; 4849 4850 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ 4851 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ 4852 4853 /* Save the most likely value for this task's CR4 in the VMCS. */ 4854 cr4 = cr4_read_shadow(); 4855 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 4856 vmx->host_state.vmcs_host_cr4 = cr4; 4857 4858 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 4859#ifdef CONFIG_X86_64 4860 /* 4861 * Load null selectors, so we can avoid reloading them in 4862 * __vmx_load_host_state(), in case userspace uses the null selectors 4863 * too (the expected case). 4864 */ 4865 vmcs_write16(HOST_DS_SELECTOR, 0); 4866 vmcs_write16(HOST_ES_SELECTOR, 0); 4867#else 4868 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4869 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4870#endif 4871 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4872 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 4873 4874 native_store_idt(&dt); 4875 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ 4876 vmx->host_idt_base = dt.address; 4877 4878 vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ 4879 4880 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); 4881 vmcs_write32(HOST_IA32_SYSENTER_CS, low32); 4882 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 4883 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 4884 4885 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { 4886 rdmsr(MSR_IA32_CR_PAT, low32, high32); 4887 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); 4888 } 4889} 4890 4891static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 4892{ 4893 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; 4894 if (enable_ept) 4895 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; 4896 if (is_guest_mode(&vmx->vcpu)) 4897 vmx->vcpu.arch.cr4_guest_owned_bits &= 4898 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; 4899 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); 4900} 4901 4902static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) 4903{ 4904 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 4905 4906 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 4907 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 4908 /* Enable the preemption timer dynamically */ 4909 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 4910 return pin_based_exec_ctrl; 4911} 4912 4913static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) 4914{ 4915 struct vcpu_vmx *vmx = to_vmx(vcpu); 4916 4917 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); 4918 if (cpu_has_secondary_exec_ctrls()) { 4919 if (kvm_vcpu_apicv_active(vcpu)) 4920 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 4921 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4922 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4923 else 4924 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, 4925 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4926 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4927 } 4928 4929 if (cpu_has_vmx_msr_bitmap()) 4930 vmx_set_msr_bitmap(vcpu); 4931} 4932 4933static u32 vmx_exec_control(struct vcpu_vmx *vmx) 4934{ 4935 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 4936 4937 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) 4938 exec_control &= ~CPU_BASED_MOV_DR_EXITING; 4939 4940 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { 4941 exec_control &= ~CPU_BASED_TPR_SHADOW; 4942#ifdef CONFIG_X86_64 4943 exec_control |= CPU_BASED_CR8_STORE_EXITING | 4944 CPU_BASED_CR8_LOAD_EXITING; 4945#endif 4946 } 4947 if (!enable_ept) 4948 exec_control |= CPU_BASED_CR3_STORE_EXITING | 4949 CPU_BASED_CR3_LOAD_EXITING | 4950 CPU_BASED_INVLPG_EXITING; 4951 return exec_control; 4952} 4953 4954static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) 4955{ 4956 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 4957 if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu)) 4958 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 4959 if (vmx->vpid == 0) 4960 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 4961 if (!enable_ept) { 4962 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 4963 enable_unrestricted_guest = 0; 4964 /* Enable INVPCID for non-ept guests may cause performance regression. */ 4965 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; 4966 } 4967 if (!enable_unrestricted_guest) 4968 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 4969 if (!ple_gap) 4970 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 4971 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 4972 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 4973 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4974 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 4975 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD 4976 (handle_vmptrld). 4977 We can NOT enable shadow_vmcs here because we don't have yet 4978 a current VMCS12 4979 */ 4980 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 4981 4982 if (!enable_pml) 4983 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 4984 4985 return exec_control; 4986} 4987 4988static void ept_set_mmio_spte_mask(void) 4989{ 4990 /* 4991 * EPT Misconfigurations can be generated if the value of bits 2:0 4992 * of an EPT paging-structure entry is 110b (write/execute). 4993 * Also, magic bits (0x3ull << 62) is set to quickly identify mmio 4994 * spte. 4995 */ 4996 kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull); 4997} 4998 4999#define VMX_XSS_EXIT_BITMAP 0 5000/* 5001 * Sets up the vmcs for emulated real mode. 5002 */ 5003static int vmx_vcpu_setup(struct vcpu_vmx *vmx) 5004{ 5005#ifdef CONFIG_X86_64 5006 unsigned long a; 5007#endif 5008 int i; 5009 5010 /* I/O */ 5011 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); 5012 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); 5013 5014 if (enable_shadow_vmcs) { 5015 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 5016 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 5017 } 5018 if (cpu_has_vmx_msr_bitmap()) 5019 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); 5020 5021 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 5022 5023 /* Control */ 5024 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); 5025 vmx->hv_deadline_tsc = -1; 5026 5027 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); 5028 5029 if (cpu_has_secondary_exec_ctrls()) { 5030 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 5031 vmx_secondary_exec_control(vmx)); 5032 } 5033 5034 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { 5035 vmcs_write64(EOI_EXIT_BITMAP0, 0); 5036 vmcs_write64(EOI_EXIT_BITMAP1, 0); 5037 vmcs_write64(EOI_EXIT_BITMAP2, 0); 5038 vmcs_write64(EOI_EXIT_BITMAP3, 0); 5039 5040 vmcs_write16(GUEST_INTR_STATUS, 0); 5041 5042 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 5043 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); 5044 } 5045 5046 if (ple_gap) { 5047 vmcs_write32(PLE_GAP, ple_gap); 5048 vmx->ple_window = ple_window; 5049 vmx->ple_window_dirty = true; 5050 } 5051 5052 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 5053 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 5054 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 5055 5056 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ 5057 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ 5058 vmx_set_constant_host_state(vmx); 5059#ifdef CONFIG_X86_64 5060 rdmsrl(MSR_FS_BASE, a); 5061 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ 5062 rdmsrl(MSR_GS_BASE, a); 5063 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ 5064#else 5065 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ 5066 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ 5067#endif 5068 5069 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 5070 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 5071 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); 5072 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 5073 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); 5074 5075 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 5076 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 5077 5078 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { 5079 u32 index = vmx_msr_index[i]; 5080 u32 data_low, data_high; 5081 int j = vmx->nmsrs; 5082 5083 if (rdmsr_safe(index, &data_low, &data_high) < 0) 5084 continue; 5085 if (wrmsr_safe(index, data_low, data_high) < 0) 5086 continue; 5087 vmx->guest_msrs[j].index = i; 5088 vmx->guest_msrs[j].data = 0; 5089 vmx->guest_msrs[j].mask = -1ull; 5090 ++vmx->nmsrs; 5091 } 5092 5093 5094 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); 5095 5096 /* 22.2.1, 20.8.1 */ 5097 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); 5098 5099 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 5100 set_cr4_guest_host_mask(vmx); 5101 5102 if (vmx_xsaves_supported()) 5103 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 5104 5105 if (enable_pml) { 5106 ASSERT(vmx->pml_pg); 5107 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 5108 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 5109 } 5110 5111 return 0; 5112} 5113 5114static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 5115{ 5116 struct vcpu_vmx *vmx = to_vmx(vcpu); 5117 struct msr_data apic_base_msr; 5118 u64 cr0; 5119 5120 vmx->rmode.vm86_active = 0; 5121 5122 vmx->soft_vnmi_blocked = 0; 5123 5124 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 5125 kvm_set_cr8(vcpu, 0); 5126 5127 if (!init_event) { 5128 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | 5129 MSR_IA32_APICBASE_ENABLE; 5130 if (kvm_vcpu_is_reset_bsp(vcpu)) 5131 apic_base_msr.data |= MSR_IA32_APICBASE_BSP; 5132 apic_base_msr.host_initiated = true; 5133 kvm_set_apic_base(vcpu, &apic_base_msr); 5134 } 5135 5136 vmx_segment_cache_clear(vmx); 5137 5138 seg_setup(VCPU_SREG_CS); 5139 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 5140 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); 5141 5142 seg_setup(VCPU_SREG_DS); 5143 seg_setup(VCPU_SREG_ES); 5144 seg_setup(VCPU_SREG_FS); 5145 seg_setup(VCPU_SREG_GS); 5146 seg_setup(VCPU_SREG_SS); 5147 5148 vmcs_write16(GUEST_TR_SELECTOR, 0); 5149 vmcs_writel(GUEST_TR_BASE, 0); 5150 vmcs_write32(GUEST_TR_LIMIT, 0xffff); 5151 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 5152 5153 vmcs_write16(GUEST_LDTR_SELECTOR, 0); 5154 vmcs_writel(GUEST_LDTR_BASE, 0); 5155 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); 5156 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); 5157 5158 if (!init_event) { 5159 vmcs_write32(GUEST_SYSENTER_CS, 0); 5160 vmcs_writel(GUEST_SYSENTER_ESP, 0); 5161 vmcs_writel(GUEST_SYSENTER_EIP, 0); 5162 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 5163 } 5164 5165 vmcs_writel(GUEST_RFLAGS, 0x02); 5166 kvm_rip_write(vcpu, 0xfff0); 5167 5168 vmcs_writel(GUEST_GDTR_BASE, 0); 5169 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 5170 5171 vmcs_writel(GUEST_IDTR_BASE, 0); 5172 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); 5173 5174 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 5175 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 5176 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); 5177 5178 setup_msrs(vmx); 5179 5180 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 5181 5182 if (cpu_has_vmx_tpr_shadow() && !init_event) { 5183 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 5184 if (cpu_need_tpr_shadow(vcpu)) 5185 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 5186 __pa(vcpu->arch.apic->regs)); 5187 vmcs_write32(TPR_THRESHOLD, 0); 5188 } 5189 5190 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 5191 5192 if (kvm_vcpu_apicv_active(vcpu)) 5193 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 5194 5195 if (vmx->vpid != 0) 5196 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 5197 5198 cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 5199 vmx->vcpu.arch.cr0 = cr0; 5200 vmx_set_cr0(vcpu, cr0); /* enter rmode */ 5201 vmx_set_cr4(vcpu, 0); 5202 vmx_set_efer(vcpu, 0); 5203 vmx_fpu_activate(vcpu); 5204 update_exception_bitmap(vcpu); 5205 5206 vpid_sync_context(vmx->vpid); 5207} 5208 5209/* 5210 * In nested virtualization, check if L1 asked to exit on external interrupts. 5211 * For most existing hypervisors, this will always return true. 5212 */ 5213static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) 5214{ 5215 return get_vmcs12(vcpu)->pin_based_vm_exec_control & 5216 PIN_BASED_EXT_INTR_MASK; 5217} 5218 5219/* 5220 * In nested virtualization, check if L1 has set 5221 * VM_EXIT_ACK_INTR_ON_EXIT 5222 */ 5223static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 5224{ 5225 return get_vmcs12(vcpu)->vm_exit_controls & 5226 VM_EXIT_ACK_INTR_ON_EXIT; 5227} 5228 5229static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) 5230{ 5231 return get_vmcs12(vcpu)->pin_based_vm_exec_control & 5232 PIN_BASED_NMI_EXITING; 5233} 5234 5235static void enable_irq_window(struct kvm_vcpu *vcpu) 5236{ 5237 u32 cpu_based_vm_exec_control; 5238 5239 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5240 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; 5241 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 5242} 5243 5244static void enable_nmi_window(struct kvm_vcpu *vcpu) 5245{ 5246 u32 cpu_based_vm_exec_control; 5247 5248 if (!cpu_has_virtual_nmis() || 5249 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 5250 enable_irq_window(vcpu); 5251 return; 5252 } 5253 5254 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5255 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; 5256 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 5257} 5258 5259static void vmx_inject_irq(struct kvm_vcpu *vcpu) 5260{ 5261 struct vcpu_vmx *vmx = to_vmx(vcpu); 5262 uint32_t intr; 5263 int irq = vcpu->arch.interrupt.nr; 5264 5265 trace_kvm_inj_virq(irq); 5266 5267 ++vcpu->stat.irq_injections; 5268 if (vmx->rmode.vm86_active) { 5269 int inc_eip = 0; 5270 if (vcpu->arch.interrupt.soft) 5271 inc_eip = vcpu->arch.event_exit_inst_len; 5272 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) 5273 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5274 return; 5275 } 5276 intr = irq | INTR_INFO_VALID_MASK; 5277 if (vcpu->arch.interrupt.soft) { 5278 intr |= INTR_TYPE_SOFT_INTR; 5279 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 5280 vmx->vcpu.arch.event_exit_inst_len); 5281 } else 5282 intr |= INTR_TYPE_EXT_INTR; 5283 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); 5284} 5285 5286static void vmx_inject_nmi(struct kvm_vcpu *vcpu) 5287{ 5288 struct vcpu_vmx *vmx = to_vmx(vcpu); 5289 5290 if (!is_guest_mode(vcpu)) { 5291 if (!cpu_has_virtual_nmis()) { 5292 /* 5293 * Tracking the NMI-blocked state in software is built upon 5294 * finding the next open IRQ window. This, in turn, depends on 5295 * well-behaving guests: They have to keep IRQs disabled at 5296 * least as long as the NMI handler runs. Otherwise we may 5297 * cause NMI nesting, maybe breaking the guest. But as this is 5298 * highly unlikely, we can live with the residual risk. 5299 */ 5300 vmx->soft_vnmi_blocked = 1; 5301 vmx->vnmi_blocked_time = 0; 5302 } 5303 5304 ++vcpu->stat.nmi_injections; 5305 vmx->nmi_known_unmasked = false; 5306 } 5307 5308 if (vmx->rmode.vm86_active) { 5309 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) 5310 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5311 return; 5312 } 5313 5314 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 5315 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 5316} 5317 5318static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 5319{ 5320 if (!cpu_has_virtual_nmis()) 5321 return to_vmx(vcpu)->soft_vnmi_blocked; 5322 if (to_vmx(vcpu)->nmi_known_unmasked) 5323 return false; 5324 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 5325} 5326 5327static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 5328{ 5329 struct vcpu_vmx *vmx = to_vmx(vcpu); 5330 5331 if (!cpu_has_virtual_nmis()) { 5332 if (vmx->soft_vnmi_blocked != masked) { 5333 vmx->soft_vnmi_blocked = masked; 5334 vmx->vnmi_blocked_time = 0; 5335 } 5336 } else { 5337 vmx->nmi_known_unmasked = !masked; 5338 if (masked) 5339 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5340 GUEST_INTR_STATE_NMI); 5341 else 5342 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 5343 GUEST_INTR_STATE_NMI); 5344 } 5345} 5346 5347static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) 5348{ 5349 if (to_vmx(vcpu)->nested.nested_run_pending) 5350 return 0; 5351 5352 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) 5353 return 0; 5354 5355 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 5356 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI 5357 | GUEST_INTR_STATE_NMI)); 5358} 5359 5360static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 5361{ 5362 return (!to_vmx(vcpu)->nested.nested_run_pending && 5363 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 5364 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 5365 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); 5366} 5367 5368static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 5369{ 5370 int ret; 5371 5372 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 5373 PAGE_SIZE * 3); 5374 if (ret) 5375 return ret; 5376 kvm->arch.tss_addr = addr; 5377 return init_rmode_tss(kvm); 5378} 5379 5380static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) 5381{ 5382 switch (vec) { 5383 case BP_VECTOR: 5384 /* 5385 * Update instruction length as we may reinject the exception 5386 * from user space while in guest debugging mode. 5387 */ 5388 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = 5389 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 5390 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5391 return false; 5392 /* fall through */ 5393 case DB_VECTOR: 5394 if (vcpu->guest_debug & 5395 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5396 return false; 5397 /* fall through */ 5398 case DE_VECTOR: 5399 case OF_VECTOR: 5400 case BR_VECTOR: 5401 case UD_VECTOR: 5402 case DF_VECTOR: 5403 case SS_VECTOR: 5404 case GP_VECTOR: 5405 case MF_VECTOR: 5406 return true; 5407 break; 5408 } 5409 return false; 5410} 5411 5412static int handle_rmode_exception(struct kvm_vcpu *vcpu, 5413 int vec, u32 err_code) 5414{ 5415 /* 5416 * Instruction with address size override prefix opcode 0x67 5417 * Cause the #SS fault with 0 error code in VM86 mode. 5418 */ 5419 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 5420 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { 5421 if (vcpu->arch.halt_request) { 5422 vcpu->arch.halt_request = 0; 5423 return kvm_vcpu_halt(vcpu); 5424 } 5425 return 1; 5426 } 5427 return 0; 5428 } 5429 5430 /* 5431 * Forward all other exceptions that are valid in real mode. 5432 * FIXME: Breaks guest debugging in real mode, needs to be fixed with 5433 * the required debugging infrastructure rework. 5434 */ 5435 kvm_queue_exception(vcpu, vec); 5436 return 1; 5437} 5438 5439/* 5440 * Trigger machine check on the host. We assume all the MSRs are already set up 5441 * by the CPU and that we still run on the same CPU as the MCE occurred on. 5442 * We pass a fake environment to the machine check handler because we want 5443 * the guest to be always treated like user space, no matter what context 5444 * it used internally. 5445 */ 5446static void kvm_machine_check(void) 5447{ 5448#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) 5449 struct pt_regs regs = { 5450 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 5451 .flags = X86_EFLAGS_IF, 5452 }; 5453 5454 do_machine_check(&regs, 0); 5455#endif 5456} 5457 5458static int handle_machine_check(struct kvm_vcpu *vcpu) 5459{ 5460 /* already handled by vcpu_run */ 5461 return 1; 5462} 5463 5464static int handle_exception(struct kvm_vcpu *vcpu) 5465{ 5466 struct vcpu_vmx *vmx = to_vmx(vcpu); 5467 struct kvm_run *kvm_run = vcpu->run; 5468 u32 intr_info, ex_no, error_code; 5469 unsigned long cr2, rip, dr6; 5470 u32 vect_info; 5471 enum emulation_result er; 5472 5473 vect_info = vmx->idt_vectoring_info; 5474 intr_info = vmx->exit_intr_info; 5475 5476 if (is_machine_check(intr_info)) 5477 return handle_machine_check(vcpu); 5478 5479 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) 5480 return 1; /* already handled by vmx_vcpu_run() */ 5481 5482 if (is_no_device(intr_info)) { 5483 vmx_fpu_activate(vcpu); 5484 return 1; 5485 } 5486 5487 if (is_invalid_opcode(intr_info)) { 5488 if (is_guest_mode(vcpu)) { 5489 kvm_queue_exception(vcpu, UD_VECTOR); 5490 return 1; 5491 } 5492 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); 5493 if (er != EMULATE_DONE) 5494 kvm_queue_exception(vcpu, UD_VECTOR); 5495 return 1; 5496 } 5497 5498 error_code = 0; 5499 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 5500 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 5501 5502 /* 5503 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing 5504 * MMIO, it is better to report an internal error. 5505 * See the comments in vmx_handle_exit. 5506 */ 5507 if ((vect_info & VECTORING_INFO_VALID_MASK) && 5508 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { 5509 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5510 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; 5511 vcpu->run->internal.ndata = 3; 5512 vcpu->run->internal.data[0] = vect_info; 5513 vcpu->run->internal.data[1] = intr_info; 5514 vcpu->run->internal.data[2] = error_code; 5515 return 0; 5516 } 5517 5518 if (is_page_fault(intr_info)) { 5519 /* EPT won't cause page fault directly */ 5520 BUG_ON(enable_ept); 5521 cr2 = vmcs_readl(EXIT_QUALIFICATION); 5522 trace_kvm_page_fault(cr2, error_code); 5523 5524 if (kvm_event_needs_reinjection(vcpu)) 5525 kvm_mmu_unprotect_page_virt(vcpu, cr2); 5526 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); 5527 } 5528 5529 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 5530 5531 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) 5532 return handle_rmode_exception(vcpu, ex_no, error_code); 5533 5534 switch (ex_no) { 5535 case AC_VECTOR: 5536 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); 5537 return 1; 5538 case DB_VECTOR: 5539 dr6 = vmcs_readl(EXIT_QUALIFICATION); 5540 if (!(vcpu->guest_debug & 5541 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 5542 vcpu->arch.dr6 &= ~15; 5543 vcpu->arch.dr6 |= dr6 | DR6_RTM; 5544 if (!(dr6 & ~DR6_RESERVED)) /* icebp */ 5545 skip_emulated_instruction(vcpu); 5546 5547 kvm_queue_exception(vcpu, DB_VECTOR); 5548 return 1; 5549 } 5550 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; 5551 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 5552 /* fall through */ 5553 case BP_VECTOR: 5554 /* 5555 * Update instruction length as we may reinject #BP from 5556 * user space while in guest debugging mode. Reading it for 5557 * #DB as well causes no harm, it is not used in that case. 5558 */ 5559 vmx->vcpu.arch.event_exit_inst_len = 5560 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 5561 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5562 rip = kvm_rip_read(vcpu); 5563 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; 5564 kvm_run->debug.arch.exception = ex_no; 5565 break; 5566 default: 5567 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 5568 kvm_run->ex.exception = ex_no; 5569 kvm_run->ex.error_code = error_code; 5570 break; 5571 } 5572 return 0; 5573} 5574 5575static int handle_external_interrupt(struct kvm_vcpu *vcpu) 5576{ 5577 ++vcpu->stat.irq_exits; 5578 return 1; 5579} 5580 5581static int handle_triple_fault(struct kvm_vcpu *vcpu) 5582{ 5583 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 5584 return 0; 5585} 5586 5587static int handle_io(struct kvm_vcpu *vcpu) 5588{ 5589 unsigned long exit_qualification; 5590 int size, in, string; 5591 unsigned port; 5592 5593 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5594 string = (exit_qualification & 16) != 0; 5595 in = (exit_qualification & 8) != 0; 5596 5597 ++vcpu->stat.io_exits; 5598 5599 if (string || in) 5600 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 5601 5602 port = exit_qualification >> 16; 5603 size = (exit_qualification & 7) + 1; 5604 skip_emulated_instruction(vcpu); 5605 5606 return kvm_fast_pio_out(vcpu, size, port); 5607} 5608 5609static void 5610vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 5611{ 5612 /* 5613 * Patch in the VMCALL instruction: 5614 */ 5615 hypercall[0] = 0x0f; 5616 hypercall[1] = 0x01; 5617 hypercall[2] = 0xc1; 5618} 5619 5620static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) 5621{ 5622 unsigned long always_on = VMXON_CR0_ALWAYSON; 5623 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5624 5625 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & 5626 SECONDARY_EXEC_UNRESTRICTED_GUEST && 5627 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 5628 always_on &= ~(X86_CR0_PE | X86_CR0_PG); 5629 return (val & always_on) == always_on; 5630} 5631 5632/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 5633static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 5634{ 5635 if (is_guest_mode(vcpu)) { 5636 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5637 unsigned long orig_val = val; 5638 5639 /* 5640 * We get here when L2 changed cr0 in a way that did not change 5641 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 5642 * but did change L0 shadowed bits. So we first calculate the 5643 * effective cr0 value that L1 would like to write into the 5644 * hardware. It consists of the L2-owned bits from the new 5645 * value combined with the L1-owned bits from L1's guest_cr0. 5646 */ 5647 val = (val & ~vmcs12->cr0_guest_host_mask) | 5648 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 5649 5650 if (!nested_cr0_valid(vcpu, val)) 5651 return 1; 5652 5653 if (kvm_set_cr0(vcpu, val)) 5654 return 1; 5655 vmcs_writel(CR0_READ_SHADOW, orig_val); 5656 return 0; 5657 } else { 5658 if (to_vmx(vcpu)->nested.vmxon && 5659 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) 5660 return 1; 5661 return kvm_set_cr0(vcpu, val); 5662 } 5663} 5664 5665static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 5666{ 5667 if (is_guest_mode(vcpu)) { 5668 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5669 unsigned long orig_val = val; 5670 5671 /* analogously to handle_set_cr0 */ 5672 val = (val & ~vmcs12->cr4_guest_host_mask) | 5673 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); 5674 if (kvm_set_cr4(vcpu, val)) 5675 return 1; 5676 vmcs_writel(CR4_READ_SHADOW, orig_val); 5677 return 0; 5678 } else 5679 return kvm_set_cr4(vcpu, val); 5680} 5681 5682/* called to set cr0 as appropriate for clts instruction exit. */ 5683static void handle_clts(struct kvm_vcpu *vcpu) 5684{ 5685 if (is_guest_mode(vcpu)) { 5686 /* 5687 * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS 5688 * but we did (!fpu_active). We need to keep GUEST_CR0.TS on, 5689 * just pretend it's off (also in arch.cr0 for fpu_activate). 5690 */ 5691 vmcs_writel(CR0_READ_SHADOW, 5692 vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); 5693 vcpu->arch.cr0 &= ~X86_CR0_TS; 5694 } else 5695 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); 5696} 5697 5698static int handle_cr(struct kvm_vcpu *vcpu) 5699{ 5700 unsigned long exit_qualification, val; 5701 int cr; 5702 int reg; 5703 int err; 5704 5705 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5706 cr = exit_qualification & 15; 5707 reg = (exit_qualification >> 8) & 15; 5708 switch ((exit_qualification >> 4) & 3) { 5709 case 0: /* mov to cr */ 5710 val = kvm_register_readl(vcpu, reg); 5711 trace_kvm_cr_write(cr, val); 5712 switch (cr) { 5713 case 0: 5714 err = handle_set_cr0(vcpu, val); 5715 kvm_complete_insn_gp(vcpu, err); 5716 return 1; 5717 case 3: 5718 err = kvm_set_cr3(vcpu, val); 5719 kvm_complete_insn_gp(vcpu, err); 5720 return 1; 5721 case 4: 5722 err = handle_set_cr4(vcpu, val); 5723 kvm_complete_insn_gp(vcpu, err); 5724 return 1; 5725 case 8: { 5726 u8 cr8_prev = kvm_get_cr8(vcpu); 5727 u8 cr8 = (u8)val; 5728 err = kvm_set_cr8(vcpu, cr8); 5729 kvm_complete_insn_gp(vcpu, err); 5730 if (lapic_in_kernel(vcpu)) 5731 return 1; 5732 if (cr8_prev <= cr8) 5733 return 1; 5734 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 5735 return 0; 5736 } 5737 } 5738 break; 5739 case 2: /* clts */ 5740 handle_clts(vcpu); 5741 trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); 5742 skip_emulated_instruction(vcpu); 5743 vmx_fpu_activate(vcpu); 5744 return 1; 5745 case 1: /*mov from cr*/ 5746 switch (cr) { 5747 case 3: 5748 val = kvm_read_cr3(vcpu); 5749 kvm_register_write(vcpu, reg, val); 5750 trace_kvm_cr_read(cr, val); 5751 skip_emulated_instruction(vcpu); 5752 return 1; 5753 case 8: 5754 val = kvm_get_cr8(vcpu); 5755 kvm_register_write(vcpu, reg, val); 5756 trace_kvm_cr_read(cr, val); 5757 skip_emulated_instruction(vcpu); 5758 return 1; 5759 } 5760 break; 5761 case 3: /* lmsw */ 5762 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5763 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); 5764 kvm_lmsw(vcpu, val); 5765 5766 skip_emulated_instruction(vcpu); 5767 return 1; 5768 default: 5769 break; 5770 } 5771 vcpu->run->exit_reason = 0; 5772 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", 5773 (int)(exit_qualification >> 4) & 3, cr); 5774 return 0; 5775} 5776 5777static int handle_dr(struct kvm_vcpu *vcpu) 5778{ 5779 unsigned long exit_qualification; 5780 int dr, dr7, reg; 5781 5782 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5783 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; 5784 5785 /* First, if DR does not exist, trigger UD */ 5786 if (!kvm_require_dr(vcpu, dr)) 5787 return 1; 5788 5789 /* Do not handle if the CPL > 0, will trigger GP on re-entry */ 5790 if (!kvm_require_cpl(vcpu, 0)) 5791 return 1; 5792 dr7 = vmcs_readl(GUEST_DR7); 5793 if (dr7 & DR7_GD) { 5794 /* 5795 * As the vm-exit takes precedence over the debug trap, we 5796 * need to emulate the latter, either for the host or the 5797 * guest debugging itself. 5798 */ 5799 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 5800 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; 5801 vcpu->run->debug.arch.dr7 = dr7; 5802 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); 5803 vcpu->run->debug.arch.exception = DB_VECTOR; 5804 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 5805 return 0; 5806 } else { 5807 vcpu->arch.dr6 &= ~15; 5808 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; 5809 kvm_queue_exception(vcpu, DB_VECTOR); 5810 return 1; 5811 } 5812 } 5813 5814 if (vcpu->guest_debug == 0) { 5815 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 5816 CPU_BASED_MOV_DR_EXITING); 5817 5818 /* 5819 * No more DR vmexits; force a reload of the debug registers 5820 * and reenter on this instruction. The next vmexit will 5821 * retrieve the full state of the debug registers. 5822 */ 5823 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 5824 return 1; 5825 } 5826 5827 reg = DEBUG_REG_ACCESS_REG(exit_qualification); 5828 if (exit_qualification & TYPE_MOV_FROM_DR) { 5829 unsigned long val; 5830 5831 if (kvm_get_dr(vcpu, dr, &val)) 5832 return 1; 5833 kvm_register_write(vcpu, reg, val); 5834 } else 5835 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) 5836 return 1; 5837 5838 skip_emulated_instruction(vcpu); 5839 return 1; 5840} 5841 5842static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) 5843{ 5844 return vcpu->arch.dr6; 5845} 5846 5847static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) 5848{ 5849} 5850 5851static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 5852{ 5853 get_debugreg(vcpu->arch.db[0], 0); 5854 get_debugreg(vcpu->arch.db[1], 1); 5855 get_debugreg(vcpu->arch.db[2], 2); 5856 get_debugreg(vcpu->arch.db[3], 3); 5857 get_debugreg(vcpu->arch.dr6, 6); 5858 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); 5859 5860 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 5861 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); 5862} 5863 5864static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) 5865{ 5866 vmcs_writel(GUEST_DR7, val); 5867} 5868 5869static int handle_cpuid(struct kvm_vcpu *vcpu) 5870{ 5871 kvm_emulate_cpuid(vcpu); 5872 return 1; 5873} 5874 5875static int handle_rdmsr(struct kvm_vcpu *vcpu) 5876{ 5877 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 5878 struct msr_data msr_info; 5879 5880 msr_info.index = ecx; 5881 msr_info.host_initiated = false; 5882 if (vmx_get_msr(vcpu, &msr_info)) { 5883 trace_kvm_msr_read_ex(ecx); 5884 kvm_inject_gp(vcpu, 0); 5885 return 1; 5886 } 5887 5888 trace_kvm_msr_read(ecx, msr_info.data); 5889 5890 /* FIXME: handling of bits 32:63 of rax, rdx */ 5891 vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; 5892 vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; 5893 skip_emulated_instruction(vcpu); 5894 return 1; 5895} 5896 5897static int handle_wrmsr(struct kvm_vcpu *vcpu) 5898{ 5899 struct msr_data msr; 5900 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 5901 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) 5902 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); 5903 5904 msr.data = data; 5905 msr.index = ecx; 5906 msr.host_initiated = false; 5907 if (kvm_set_msr(vcpu, &msr) != 0) { 5908 trace_kvm_msr_write_ex(ecx, data); 5909 kvm_inject_gp(vcpu, 0); 5910 return 1; 5911 } 5912 5913 trace_kvm_msr_write(ecx, data); 5914 skip_emulated_instruction(vcpu); 5915 return 1; 5916} 5917 5918static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 5919{ 5920 kvm_make_request(KVM_REQ_EVENT, vcpu); 5921 return 1; 5922} 5923 5924static int handle_interrupt_window(struct kvm_vcpu *vcpu) 5925{ 5926 u32 cpu_based_vm_exec_control; 5927 5928 /* clear pending irq */ 5929 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5930 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 5931 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 5932 5933 kvm_make_request(KVM_REQ_EVENT, vcpu); 5934 5935 ++vcpu->stat.irq_window_exits; 5936 return 1; 5937} 5938 5939static int handle_halt(struct kvm_vcpu *vcpu) 5940{ 5941 return kvm_emulate_halt(vcpu); 5942} 5943 5944static int handle_vmcall(struct kvm_vcpu *vcpu) 5945{ 5946 return kvm_emulate_hypercall(vcpu); 5947} 5948 5949static int handle_invd(struct kvm_vcpu *vcpu) 5950{ 5951 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 5952} 5953 5954static int handle_invlpg(struct kvm_vcpu *vcpu) 5955{ 5956 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5957 5958 kvm_mmu_invlpg(vcpu, exit_qualification); 5959 skip_emulated_instruction(vcpu); 5960 return 1; 5961} 5962 5963static int handle_rdpmc(struct kvm_vcpu *vcpu) 5964{ 5965 int err; 5966 5967 err = kvm_rdpmc(vcpu); 5968 kvm_complete_insn_gp(vcpu, err); 5969 5970 return 1; 5971} 5972 5973static int handle_wbinvd(struct kvm_vcpu *vcpu) 5974{ 5975 kvm_emulate_wbinvd(vcpu); 5976 return 1; 5977} 5978 5979static int handle_xsetbv(struct kvm_vcpu *vcpu) 5980{ 5981 u64 new_bv = kvm_read_edx_eax(vcpu); 5982 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); 5983 5984 if (kvm_set_xcr(vcpu, index, new_bv) == 0) 5985 skip_emulated_instruction(vcpu); 5986 return 1; 5987} 5988 5989static int handle_xsaves(struct kvm_vcpu *vcpu) 5990{ 5991 skip_emulated_instruction(vcpu); 5992 WARN(1, "this should never happen\n"); 5993 return 1; 5994} 5995 5996static int handle_xrstors(struct kvm_vcpu *vcpu) 5997{ 5998 skip_emulated_instruction(vcpu); 5999 WARN(1, "this should never happen\n"); 6000 return 1; 6001} 6002 6003static int handle_apic_access(struct kvm_vcpu *vcpu) 6004{ 6005 if (likely(fasteoi)) { 6006 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 6007 int access_type, offset; 6008 6009 access_type = exit_qualification & APIC_ACCESS_TYPE; 6010 offset = exit_qualification & APIC_ACCESS_OFFSET; 6011 /* 6012 * Sane guest uses MOV to write EOI, with written value 6013 * not cared. So make a short-circuit here by avoiding 6014 * heavy instruction emulation. 6015 */ 6016 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && 6017 (offset == APIC_EOI)) { 6018 kvm_lapic_set_eoi(vcpu); 6019 skip_emulated_instruction(vcpu); 6020 return 1; 6021 } 6022 } 6023 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 6024} 6025 6026static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 6027{ 6028 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 6029 int vector = exit_qualification & 0xff; 6030 6031 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ 6032 kvm_apic_set_eoi_accelerated(vcpu, vector); 6033 return 1; 6034} 6035 6036static int handle_apic_write(struct kvm_vcpu *vcpu) 6037{ 6038 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 6039 u32 offset = exit_qualification & 0xfff; 6040 6041 /* APIC-write VM exit is trap-like and thus no need to adjust IP */ 6042 kvm_apic_write_nodecode(vcpu, offset); 6043 return 1; 6044} 6045 6046static int handle_task_switch(struct kvm_vcpu *vcpu) 6047{ 6048 struct vcpu_vmx *vmx = to_vmx(vcpu); 6049 unsigned long exit_qualification; 6050 bool has_error_code = false; 6051 u32 error_code = 0; 6052 u16 tss_selector; 6053 int reason, type, idt_v, idt_index; 6054 6055 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 6056 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); 6057 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); 6058 6059 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 6060 6061 reason = (u32)exit_qualification >> 30; 6062 if (reason == TASK_SWITCH_GATE && idt_v) { 6063 switch (type) { 6064 case INTR_TYPE_NMI_INTR: 6065 vcpu->arch.nmi_injected = false; 6066 vmx_set_nmi_mask(vcpu, true); 6067 break; 6068 case INTR_TYPE_EXT_INTR: 6069 case INTR_TYPE_SOFT_INTR: 6070 kvm_clear_interrupt_queue(vcpu); 6071 break; 6072 case INTR_TYPE_HARD_EXCEPTION: 6073 if (vmx->idt_vectoring_info & 6074 VECTORING_INFO_DELIVER_CODE_MASK) { 6075 has_error_code = true; 6076 error_code = 6077 vmcs_read32(IDT_VECTORING_ERROR_CODE); 6078 } 6079 /* fall through */ 6080 case INTR_TYPE_SOFT_EXCEPTION: 6081 kvm_clear_exception_queue(vcpu); 6082 break; 6083 default: 6084 break; 6085 } 6086 } 6087 tss_selector = exit_qualification; 6088 6089 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && 6090 type != INTR_TYPE_EXT_INTR && 6091 type != INTR_TYPE_NMI_INTR)) 6092 skip_emulated_instruction(vcpu); 6093 6094 if (kvm_task_switch(vcpu, tss_selector, 6095 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, 6096 has_error_code, error_code) == EMULATE_FAIL) { 6097 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6098 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 6099 vcpu->run->internal.ndata = 0; 6100 return 0; 6101 } 6102 6103 /* 6104 * TODO: What about debug traps on tss switch? 6105 * Are we supposed to inject them and update dr6? 6106 */ 6107 6108 return 1; 6109} 6110 6111static int handle_ept_violation(struct kvm_vcpu *vcpu) 6112{ 6113 unsigned long exit_qualification; 6114 gpa_t gpa; 6115 u32 error_code; 6116 int gla_validity; 6117 6118 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 6119 6120 gla_validity = (exit_qualification >> 7) & 0x3; 6121 if (gla_validity == 0x2) { 6122 printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); 6123 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", 6124 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), 6125 vmcs_readl(GUEST_LINEAR_ADDRESS)); 6126 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", 6127 (long unsigned int)exit_qualification); 6128 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; 6129 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; 6130 return 0; 6131 } 6132 6133 /* 6134 * EPT violation happened while executing iret from NMI, 6135 * "blocked by NMI" bit has to be set before next VM entry. 6136 * There are errata that may cause this bit to not be set: 6137 * AAK134, BY25. 6138 */ 6139 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 6140 cpu_has_virtual_nmis() && 6141 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 6142 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 6143 6144 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 6145 trace_kvm_page_fault(gpa, exit_qualification); 6146 6147 /* it is a read fault? */ 6148 error_code = (exit_qualification << 2) & PFERR_USER_MASK; 6149 /* it is a write fault? */ 6150 error_code |= exit_qualification & PFERR_WRITE_MASK; 6151 /* It is a fetch fault? */ 6152 error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK; 6153 /* ept page table is present? */ 6154 error_code |= (exit_qualification & 0x38) != 0; 6155 6156 vcpu->arch.exit_qualification = exit_qualification; 6157 6158 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 6159} 6160 6161static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 6162{ 6163 int ret; 6164 gpa_t gpa; 6165 6166 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 6167 if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 6168 skip_emulated_instruction(vcpu); 6169 trace_kvm_fast_mmio(gpa); 6170 return 1; 6171 } 6172 6173 ret = handle_mmio_page_fault(vcpu, gpa, true); 6174 if (likely(ret == RET_MMIO_PF_EMULATE)) 6175 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == 6176 EMULATE_DONE; 6177 6178 if (unlikely(ret == RET_MMIO_PF_INVALID)) 6179 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); 6180 6181 if (unlikely(ret == RET_MMIO_PF_RETRY)) 6182 return 1; 6183 6184 /* It is the real ept misconfig */ 6185 WARN_ON(1); 6186 6187 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; 6188 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; 6189 6190 return 0; 6191} 6192 6193static int handle_nmi_window(struct kvm_vcpu *vcpu) 6194{ 6195 u32 cpu_based_vm_exec_control; 6196 6197 /* clear pending NMI */ 6198 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 6199 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; 6200 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 6201 ++vcpu->stat.nmi_window_exits; 6202 kvm_make_request(KVM_REQ_EVENT, vcpu); 6203 6204 return 1; 6205} 6206 6207static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) 6208{ 6209 struct vcpu_vmx *vmx = to_vmx(vcpu); 6210 enum emulation_result err = EMULATE_DONE; 6211 int ret = 1; 6212 u32 cpu_exec_ctrl; 6213 bool intr_window_requested; 6214 unsigned count = 130; 6215 6216 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 6217 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; 6218 6219 while (vmx->emulation_required && count-- != 0) { 6220 if (intr_window_requested && vmx_interrupt_allowed(vcpu)) 6221 return handle_interrupt_window(&vmx->vcpu); 6222 6223 if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) 6224 return 1; 6225 6226 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); 6227 6228 if (err == EMULATE_USER_EXIT) { 6229 ++vcpu->stat.mmio_exits; 6230 ret = 0; 6231 goto out; 6232 } 6233 6234 if (err != EMULATE_DONE) { 6235 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6236 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 6237 vcpu->run->internal.ndata = 0; 6238 return 0; 6239 } 6240 6241 if (vcpu->arch.halt_request) { 6242 vcpu->arch.halt_request = 0; 6243 ret = kvm_vcpu_halt(vcpu); 6244 goto out; 6245 } 6246 6247 if (signal_pending(current)) 6248 goto out; 6249 if (need_resched()) 6250 schedule(); 6251 } 6252 6253out: 6254 return ret; 6255} 6256 6257static int __grow_ple_window(int val) 6258{ 6259 if (ple_window_grow < 1) 6260 return ple_window; 6261 6262 val = min(val, ple_window_actual_max); 6263 6264 if (ple_window_grow < ple_window) 6265 val *= ple_window_grow; 6266 else 6267 val += ple_window_grow; 6268 6269 return val; 6270} 6271 6272static int __shrink_ple_window(int val, int modifier, int minimum) 6273{ 6274 if (modifier < 1) 6275 return ple_window; 6276 6277 if (modifier < ple_window) 6278 val /= modifier; 6279 else 6280 val -= modifier; 6281 6282 return max(val, minimum); 6283} 6284 6285static void grow_ple_window(struct kvm_vcpu *vcpu) 6286{ 6287 struct vcpu_vmx *vmx = to_vmx(vcpu); 6288 int old = vmx->ple_window; 6289 6290 vmx->ple_window = __grow_ple_window(old); 6291 6292 if (vmx->ple_window != old) 6293 vmx->ple_window_dirty = true; 6294 6295 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); 6296} 6297 6298static void shrink_ple_window(struct kvm_vcpu *vcpu) 6299{ 6300 struct vcpu_vmx *vmx = to_vmx(vcpu); 6301 int old = vmx->ple_window; 6302 6303 vmx->ple_window = __shrink_ple_window(old, 6304 ple_window_shrink, ple_window); 6305 6306 if (vmx->ple_window != old) 6307 vmx->ple_window_dirty = true; 6308 6309 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); 6310} 6311 6312/* 6313 * ple_window_actual_max is computed to be one grow_ple_window() below 6314 * ple_window_max. (See __grow_ple_window for the reason.) 6315 * This prevents overflows, because ple_window_max is int. 6316 * ple_window_max effectively rounded down to a multiple of ple_window_grow in 6317 * this process. 6318 * ple_window_max is also prevented from setting vmx->ple_window < ple_window. 6319 */ 6320static void update_ple_window_actual_max(void) 6321{ 6322 ple_window_actual_max = 6323 __shrink_ple_window(max(ple_window_max, ple_window), 6324 ple_window_grow, INT_MIN); 6325} 6326 6327/* 6328 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 6329 */ 6330static void wakeup_handler(void) 6331{ 6332 struct kvm_vcpu *vcpu; 6333 int cpu = smp_processor_id(); 6334 6335 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 6336 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), 6337 blocked_vcpu_list) { 6338 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 6339 6340 if (pi_test_on(pi_desc) == 1) 6341 kvm_vcpu_kick(vcpu); 6342 } 6343 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 6344} 6345 6346static __init int hardware_setup(void) 6347{ 6348 int r = -ENOMEM, i, msr; 6349 6350 rdmsrl_safe(MSR_EFER, &host_efer); 6351 6352 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) 6353 kvm_define_shared_msr(i, vmx_msr_index[i]); 6354 6355 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); 6356 if (!vmx_io_bitmap_a) 6357 return r; 6358 6359 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); 6360 if (!vmx_io_bitmap_b) 6361 goto out; 6362 6363 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); 6364 if (!vmx_msr_bitmap_legacy) 6365 goto out1; 6366 6367 vmx_msr_bitmap_legacy_x2apic = 6368 (unsigned long *)__get_free_page(GFP_KERNEL); 6369 if (!vmx_msr_bitmap_legacy_x2apic) 6370 goto out2; 6371 6372 vmx_msr_bitmap_legacy_x2apic_apicv_inactive = 6373 (unsigned long *)__get_free_page(GFP_KERNEL); 6374 if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive) 6375 goto out3; 6376 6377 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); 6378 if (!vmx_msr_bitmap_longmode) 6379 goto out4; 6380 6381 vmx_msr_bitmap_longmode_x2apic = 6382 (unsigned long *)__get_free_page(GFP_KERNEL); 6383 if (!vmx_msr_bitmap_longmode_x2apic) 6384 goto out5; 6385 6386 vmx_msr_bitmap_longmode_x2apic_apicv_inactive = 6387 (unsigned long *)__get_free_page(GFP_KERNEL); 6388 if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive) 6389 goto out6; 6390 6391 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 6392 if (!vmx_vmread_bitmap) 6393 goto out7; 6394 6395 vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 6396 if (!vmx_vmwrite_bitmap) 6397 goto out8; 6398 6399 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 6400 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 6401 6402 /* 6403 * Allow direct access to the PC debug port (it is often used for I/O 6404 * delays, but the vmexits simply slow things down). 6405 */ 6406 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); 6407 clear_bit(0x80, vmx_io_bitmap_a); 6408 6409 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); 6410 6411 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); 6412 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); 6413 6414 if (setup_vmcs_config(&vmcs_config) < 0) { 6415 r = -EIO; 6416 goto out9; 6417 } 6418 6419 if (boot_cpu_has(X86_FEATURE_NX)) 6420 kvm_enable_efer_bits(EFER_NX); 6421 6422 if (!cpu_has_vmx_vpid()) 6423 enable_vpid = 0; 6424 if (!cpu_has_vmx_shadow_vmcs()) 6425 enable_shadow_vmcs = 0; 6426 if (enable_shadow_vmcs) 6427 init_vmcs_shadow_fields(); 6428 6429 if (!cpu_has_vmx_ept() || 6430 !cpu_has_vmx_ept_4levels()) { 6431 enable_ept = 0; 6432 enable_unrestricted_guest = 0; 6433 enable_ept_ad_bits = 0; 6434 } 6435 6436 if (!cpu_has_vmx_ept_ad_bits()) 6437 enable_ept_ad_bits = 0; 6438 6439 if (!cpu_has_vmx_unrestricted_guest()) 6440 enable_unrestricted_guest = 0; 6441 6442 if (!cpu_has_vmx_flexpriority()) 6443 flexpriority_enabled = 0; 6444 6445 /* 6446 * set_apic_access_page_addr() is used to reload apic access 6447 * page upon invalidation. No need to do anything if not 6448 * using the APIC_ACCESS_ADDR VMCS field. 6449 */ 6450 if (!flexpriority_enabled) 6451 kvm_x86_ops->set_apic_access_page_addr = NULL; 6452 6453 if (!cpu_has_vmx_tpr_shadow()) 6454 kvm_x86_ops->update_cr8_intercept = NULL; 6455 6456 if (enable_ept && !cpu_has_vmx_ept_2m_page()) 6457 kvm_disable_largepages(); 6458 6459 if (!cpu_has_vmx_ple()) 6460 ple_gap = 0; 6461 6462 if (!cpu_has_vmx_apicv()) 6463 enable_apicv = 0; 6464 6465 if (cpu_has_vmx_tsc_scaling()) { 6466 kvm_has_tsc_control = true; 6467 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; 6468 kvm_tsc_scaling_ratio_frac_bits = 48; 6469 } 6470 6471 vmx_disable_intercept_for_msr(MSR_FS_BASE, false); 6472 vmx_disable_intercept_for_msr(MSR_GS_BASE, false); 6473 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); 6474 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); 6475 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); 6476 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); 6477 vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); 6478 6479 memcpy(vmx_msr_bitmap_legacy_x2apic, 6480 vmx_msr_bitmap_legacy, PAGE_SIZE); 6481 memcpy(vmx_msr_bitmap_longmode_x2apic, 6482 vmx_msr_bitmap_longmode, PAGE_SIZE); 6483 memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive, 6484 vmx_msr_bitmap_legacy, PAGE_SIZE); 6485 memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive, 6486 vmx_msr_bitmap_longmode, PAGE_SIZE); 6487 6488 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 6489 6490 /* 6491 * enable_apicv && kvm_vcpu_apicv_active() 6492 */ 6493 for (msr = 0x800; msr <= 0x8ff; msr++) 6494 vmx_disable_intercept_msr_read_x2apic(msr, true); 6495 6496 /* TMCCT */ 6497 vmx_enable_intercept_msr_read_x2apic(0x839, true); 6498 /* TPR */ 6499 vmx_disable_intercept_msr_write_x2apic(0x808, true); 6500 /* EOI */ 6501 vmx_disable_intercept_msr_write_x2apic(0x80b, true); 6502 /* SELF-IPI */ 6503 vmx_disable_intercept_msr_write_x2apic(0x83f, true); 6504 6505 /* 6506 * (enable_apicv && !kvm_vcpu_apicv_active()) || 6507 * !enable_apicv 6508 */ 6509 /* TPR */ 6510 vmx_disable_intercept_msr_read_x2apic(0x808, false); 6511 vmx_disable_intercept_msr_write_x2apic(0x808, false); 6512 6513 if (enable_ept) { 6514 kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, 6515 (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, 6516 (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, 6517 0ull, VMX_EPT_EXECUTABLE_MASK, 6518 cpu_has_vmx_ept_execute_only() ? 6519 0ull : VMX_EPT_READABLE_MASK); 6520 ept_set_mmio_spte_mask(); 6521 kvm_enable_tdp(); 6522 } else 6523 kvm_disable_tdp(); 6524 6525 update_ple_window_actual_max(); 6526 6527 /* 6528 * Only enable PML when hardware supports PML feature, and both EPT 6529 * and EPT A/D bit features are enabled -- PML depends on them to work. 6530 */ 6531 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) 6532 enable_pml = 0; 6533 6534 if (!enable_pml) { 6535 kvm_x86_ops->slot_enable_log_dirty = NULL; 6536 kvm_x86_ops->slot_disable_log_dirty = NULL; 6537 kvm_x86_ops->flush_log_dirty = NULL; 6538 kvm_x86_ops->enable_log_dirty_pt_masked = NULL; 6539 } 6540 6541 if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { 6542 u64 vmx_msr; 6543 6544 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 6545 cpu_preemption_timer_multi = 6546 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 6547 } else { 6548 kvm_x86_ops->set_hv_timer = NULL; 6549 kvm_x86_ops->cancel_hv_timer = NULL; 6550 } 6551 6552 kvm_set_posted_intr_wakeup_handler(wakeup_handler); 6553 6554 kvm_mce_cap_supported |= MCG_LMCE_P; 6555 6556 return alloc_kvm_area(); 6557 6558out9: 6559 free_page((unsigned long)vmx_vmwrite_bitmap); 6560out8: 6561 free_page((unsigned long)vmx_vmread_bitmap); 6562out7: 6563 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive); 6564out6: 6565 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); 6566out5: 6567 free_page((unsigned long)vmx_msr_bitmap_longmode); 6568out4: 6569 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive); 6570out3: 6571 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); 6572out2: 6573 free_page((unsigned long)vmx_msr_bitmap_legacy); 6574out1: 6575 free_page((unsigned long)vmx_io_bitmap_b); 6576out: 6577 free_page((unsigned long)vmx_io_bitmap_a); 6578 6579 return r; 6580} 6581 6582static __exit void hardware_unsetup(void) 6583{ 6584 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); 6585 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive); 6586 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); 6587 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive); 6588 free_page((unsigned long)vmx_msr_bitmap_legacy); 6589 free_page((unsigned long)vmx_msr_bitmap_longmode); 6590 free_page((unsigned long)vmx_io_bitmap_b); 6591 free_page((unsigned long)vmx_io_bitmap_a); 6592 free_page((unsigned long)vmx_vmwrite_bitmap); 6593 free_page((unsigned long)vmx_vmread_bitmap); 6594 6595 free_kvm_area(); 6596} 6597 6598/* 6599 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE 6600 * exiting, so only get here on cpu with PAUSE-Loop-Exiting. 6601 */ 6602static int handle_pause(struct kvm_vcpu *vcpu) 6603{ 6604 if (ple_gap) 6605 grow_ple_window(vcpu); 6606 6607 skip_emulated_instruction(vcpu); 6608 kvm_vcpu_on_spin(vcpu); 6609 6610 return 1; 6611} 6612 6613static int handle_nop(struct kvm_vcpu *vcpu) 6614{ 6615 skip_emulated_instruction(vcpu); 6616 return 1; 6617} 6618 6619static int handle_mwait(struct kvm_vcpu *vcpu) 6620{ 6621 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); 6622 return handle_nop(vcpu); 6623} 6624 6625static int handle_monitor_trap(struct kvm_vcpu *vcpu) 6626{ 6627 return 1; 6628} 6629 6630static int handle_monitor(struct kvm_vcpu *vcpu) 6631{ 6632 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); 6633 return handle_nop(vcpu); 6634} 6635 6636/* 6637 * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. 6638 * We could reuse a single VMCS for all the L2 guests, but we also want the 6639 * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this 6640 * allows keeping them loaded on the processor, and in the future will allow 6641 * optimizations where prepare_vmcs02 doesn't need to set all the fields on 6642 * every entry if they never change. 6643 * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE 6644 * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. 6645 * 6646 * The following functions allocate and free a vmcs02 in this pool. 6647 */ 6648 6649/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ 6650static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) 6651{ 6652 struct vmcs02_list *item; 6653 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) 6654 if (item->vmptr == vmx->nested.current_vmptr) { 6655 list_move(&item->list, &vmx->nested.vmcs02_pool); 6656 return &item->vmcs02; 6657 } 6658 6659 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { 6660 /* Recycle the least recently used VMCS. */ 6661 item = list_last_entry(&vmx->nested.vmcs02_pool, 6662 struct vmcs02_list, list); 6663 item->vmptr = vmx->nested.current_vmptr; 6664 list_move(&item->list, &vmx->nested.vmcs02_pool); 6665 return &item->vmcs02; 6666 } 6667 6668 /* Create a new VMCS */ 6669 item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); 6670 if (!item) 6671 return NULL; 6672 item->vmcs02.vmcs = alloc_vmcs(); 6673 item->vmcs02.shadow_vmcs = NULL; 6674 if (!item->vmcs02.vmcs) { 6675 kfree(item); 6676 return NULL; 6677 } 6678 loaded_vmcs_init(&item->vmcs02); 6679 item->vmptr = vmx->nested.current_vmptr; 6680 list_add(&(item->list), &(vmx->nested.vmcs02_pool)); 6681 vmx->nested.vmcs02_num++; 6682 return &item->vmcs02; 6683} 6684 6685/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ 6686static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) 6687{ 6688 struct vmcs02_list *item; 6689 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) 6690 if (item->vmptr == vmptr) { 6691 free_loaded_vmcs(&item->vmcs02); 6692 list_del(&item->list); 6693 kfree(item); 6694 vmx->nested.vmcs02_num--; 6695 return; 6696 } 6697} 6698 6699/* 6700 * Free all VMCSs saved for this vcpu, except the one pointed by 6701 * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs 6702 * must be &vmx->vmcs01. 6703 */ 6704static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) 6705{ 6706 struct vmcs02_list *item, *n; 6707 6708 WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); 6709 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { 6710 /* 6711 * Something will leak if the above WARN triggers. Better than 6712 * a use-after-free. 6713 */ 6714 if (vmx->loaded_vmcs == &item->vmcs02) 6715 continue; 6716 6717 free_loaded_vmcs(&item->vmcs02); 6718 list_del(&item->list); 6719 kfree(item); 6720 vmx->nested.vmcs02_num--; 6721 } 6722} 6723 6724/* 6725 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 6726 * set the success or error code of an emulated VMX instruction, as specified 6727 * by Vol 2B, VMX Instruction Reference, "Conventions". 6728 */ 6729static void nested_vmx_succeed(struct kvm_vcpu *vcpu) 6730{ 6731 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 6732 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 6733 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 6734} 6735 6736static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 6737{ 6738 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 6739 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 6740 X86_EFLAGS_SF | X86_EFLAGS_OF)) 6741 | X86_EFLAGS_CF); 6742} 6743 6744static void nested_vmx_failValid(struct kvm_vcpu *vcpu, 6745 u32 vm_instruction_error) 6746{ 6747 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { 6748 /* 6749 * failValid writes the error number to the current VMCS, which 6750 * can't be done there isn't a current VMCS. 6751 */ 6752 nested_vmx_failInvalid(vcpu); 6753 return; 6754 } 6755 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 6756 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 6757 X86_EFLAGS_SF | X86_EFLAGS_OF)) 6758 | X86_EFLAGS_ZF); 6759 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 6760 /* 6761 * We don't need to force a shadow sync because 6762 * VM_INSTRUCTION_ERROR is not shadowed 6763 */ 6764} 6765 6766static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 6767{ 6768 /* TODO: not to reset guest simply here. */ 6769 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 6770 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 6771} 6772 6773static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 6774{ 6775 struct vcpu_vmx *vmx = 6776 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 6777 6778 vmx->nested.preemption_timer_expired = true; 6779 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 6780 kvm_vcpu_kick(&vmx->vcpu); 6781 6782 return HRTIMER_NORESTART; 6783} 6784 6785/* 6786 * Decode the memory-address operand of a vmx instruction, as recorded on an 6787 * exit caused by such an instruction (run by a guest hypervisor). 6788 * On success, returns 0. When the operand is invalid, returns 1 and throws 6789 * #UD or #GP. 6790 */ 6791static int get_vmx_mem_address(struct kvm_vcpu *vcpu, 6792 unsigned long exit_qualification, 6793 u32 vmx_instruction_info, bool wr, gva_t *ret) 6794{ 6795 gva_t off; 6796 bool exn; 6797 struct kvm_segment s; 6798 6799 /* 6800 * According to Vol. 3B, "Information for VM Exits Due to Instruction 6801 * Execution", on an exit, vmx_instruction_info holds most of the 6802 * addressing components of the operand. Only the displacement part 6803 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 6804 * For how an actual address is calculated from all these components, 6805 * refer to Vol. 1, "Operand Addressing". 6806 */ 6807 int scaling = vmx_instruction_info & 3; 6808 int addr_size = (vmx_instruction_info >> 7) & 7; 6809 bool is_reg = vmx_instruction_info & (1u << 10); 6810 int seg_reg = (vmx_instruction_info >> 15) & 7; 6811 int index_reg = (vmx_instruction_info >> 18) & 0xf; 6812 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 6813 int base_reg = (vmx_instruction_info >> 23) & 0xf; 6814 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 6815 6816 if (is_reg) { 6817 kvm_queue_exception(vcpu, UD_VECTOR); 6818 return 1; 6819 } 6820 6821 /* Addr = segment_base + offset */ 6822 /* offset = base + [index * scale] + displacement */ 6823 off = exit_qualification; /* holds the displacement */ 6824 if (base_is_valid) 6825 off += kvm_register_read(vcpu, base_reg); 6826 if (index_is_valid) 6827 off += kvm_register_read(vcpu, index_reg)<<scaling; 6828 vmx_get_segment(vcpu, &s, seg_reg); 6829 *ret = s.base + off; 6830 6831 if (addr_size == 1) /* 32 bit */ 6832 *ret &= 0xffffffff; 6833 6834 /* Checks for #GP/#SS exceptions. */ 6835 exn = false; 6836 if (is_long_mode(vcpu)) { 6837 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 6838 * non-canonical form. This is the only check on the memory 6839 * destination for long mode! 6840 */ 6841 exn = is_noncanonical_address(*ret); 6842 } else if (is_protmode(vcpu)) { 6843 /* Protected mode: apply checks for segment validity in the 6844 * following order: 6845 * - segment type check (#GP(0) may be thrown) 6846 * - usability check (#GP(0)/#SS(0)) 6847 * - limit check (#GP(0)/#SS(0)) 6848 */ 6849 if (wr) 6850 /* #GP(0) if the destination operand is located in a 6851 * read-only data segment or any code segment. 6852 */ 6853 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 6854 else 6855 /* #GP(0) if the source operand is located in an 6856 * execute-only code segment 6857 */ 6858 exn = ((s.type & 0xa) == 8); 6859 if (exn) { 6860 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 6861 return 1; 6862 } 6863 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 6864 */ 6865 exn = (s.unusable != 0); 6866 /* Protected mode: #GP(0)/#SS(0) if the memory 6867 * operand is outside the segment limit. 6868 */ 6869 exn = exn || (off + sizeof(u64) > s.limit); 6870 } 6871 if (exn) { 6872 kvm_queue_exception_e(vcpu, 6873 seg_reg == VCPU_SREG_SS ? 6874 SS_VECTOR : GP_VECTOR, 6875 0); 6876 return 1; 6877 } 6878 6879 return 0; 6880} 6881 6882/* 6883 * This function performs the various checks including 6884 * - if it's 4KB aligned 6885 * - No bits beyond the physical address width are set 6886 * - Returns 0 on success or else 1 6887 * (Intel SDM Section 30.3) 6888 */ 6889static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, 6890 gpa_t *vmpointer) 6891{ 6892 gva_t gva; 6893 gpa_t vmptr; 6894 struct x86_exception e; 6895 struct page *page; 6896 struct vcpu_vmx *vmx = to_vmx(vcpu); 6897 int maxphyaddr = cpuid_maxphyaddr(vcpu); 6898 6899 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6900 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 6901 return 1; 6902 6903 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, 6904 sizeof(vmptr), &e)) { 6905 kvm_inject_page_fault(vcpu, &e); 6906 return 1; 6907 } 6908 6909 switch (exit_reason) { 6910 case EXIT_REASON_VMON: 6911 /* 6912 * SDM 3: 24.11.5 6913 * The first 4 bytes of VMXON region contain the supported 6914 * VMCS revision identifier 6915 * 6916 * Note - IA32_VMX_BASIC[48] will never be 1 6917 * for the nested case; 6918 * which replaces physical address width with 32 6919 * 6920 */ 6921 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6922 nested_vmx_failInvalid(vcpu); 6923 skip_emulated_instruction(vcpu); 6924 return 1; 6925 } 6926 6927 page = nested_get_page(vcpu, vmptr); 6928 if (page == NULL || 6929 *(u32 *)kmap(page) != VMCS12_REVISION) { 6930 nested_vmx_failInvalid(vcpu); 6931 kunmap(page); 6932 skip_emulated_instruction(vcpu); 6933 return 1; 6934 } 6935 kunmap(page); 6936 vmx->nested.vmxon_ptr = vmptr; 6937 break; 6938 case EXIT_REASON_VMCLEAR: 6939 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6940 nested_vmx_failValid(vcpu, 6941 VMXERR_VMCLEAR_INVALID_ADDRESS); 6942 skip_emulated_instruction(vcpu); 6943 return 1; 6944 } 6945 6946 if (vmptr == vmx->nested.vmxon_ptr) { 6947 nested_vmx_failValid(vcpu, 6948 VMXERR_VMCLEAR_VMXON_POINTER); 6949 skip_emulated_instruction(vcpu); 6950 return 1; 6951 } 6952 break; 6953 case EXIT_REASON_VMPTRLD: 6954 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6955 nested_vmx_failValid(vcpu, 6956 VMXERR_VMPTRLD_INVALID_ADDRESS); 6957 skip_emulated_instruction(vcpu); 6958 return 1; 6959 } 6960 6961 if (vmptr == vmx->nested.vmxon_ptr) { 6962 nested_vmx_failValid(vcpu, 6963 VMXERR_VMCLEAR_VMXON_POINTER); 6964 skip_emulated_instruction(vcpu); 6965 return 1; 6966 } 6967 break; 6968 default: 6969 return 1; /* shouldn't happen */ 6970 } 6971 6972 if (vmpointer) 6973 *vmpointer = vmptr; 6974 return 0; 6975} 6976 6977/* 6978 * Emulate the VMXON instruction. 6979 * Currently, we just remember that VMX is active, and do not save or even 6980 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 6981 * do not currently need to store anything in that guest-allocated memory 6982 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 6983 * argument is different from the VMXON pointer (which the spec says they do). 6984 */ 6985static int handle_vmon(struct kvm_vcpu *vcpu) 6986{ 6987 struct kvm_segment cs; 6988 struct vcpu_vmx *vmx = to_vmx(vcpu); 6989 struct vmcs *shadow_vmcs; 6990 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED 6991 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 6992 6993 /* The Intel VMX Instruction Reference lists a bunch of bits that 6994 * are prerequisite to running VMXON, most notably cr4.VMXE must be 6995 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this). 6996 * Otherwise, we should fail with #UD. We test these now: 6997 */ 6998 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || 6999 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || 7000 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 7001 kvm_queue_exception(vcpu, UD_VECTOR); 7002 return 1; 7003 } 7004 7005 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 7006 if (is_long_mode(vcpu) && !cs.l) { 7007 kvm_queue_exception(vcpu, UD_VECTOR); 7008 return 1; 7009 } 7010 7011 if (vmx_get_cpl(vcpu)) { 7012 kvm_inject_gp(vcpu, 0); 7013 return 1; 7014 } 7015 7016 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) 7017 return 1; 7018 7019 if (vmx->nested.vmxon) { 7020 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 7021 skip_emulated_instruction(vcpu); 7022 return 1; 7023 } 7024 7025 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 7026 != VMXON_NEEDED_FEATURES) { 7027 kvm_inject_gp(vcpu, 0); 7028 return 1; 7029 } 7030 7031 if (cpu_has_vmx_msr_bitmap()) { 7032 vmx->nested.msr_bitmap = 7033 (unsigned long *)__get_free_page(GFP_KERNEL); 7034 if (!vmx->nested.msr_bitmap) 7035 goto out_msr_bitmap; 7036 } 7037 7038 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 7039 if (!vmx->nested.cached_vmcs12) 7040 goto out_cached_vmcs12; 7041 7042 if (enable_shadow_vmcs) { 7043 shadow_vmcs = alloc_vmcs(); 7044 if (!shadow_vmcs) 7045 goto out_shadow_vmcs; 7046 /* mark vmcs as shadow */ 7047 shadow_vmcs->revision_id |= (1u << 31); 7048 /* init shadow vmcs */ 7049 vmcs_clear(shadow_vmcs); 7050 vmx->vmcs01.shadow_vmcs = shadow_vmcs; 7051 } 7052 7053 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); 7054 vmx->nested.vmcs02_num = 0; 7055 7056 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 7057 HRTIMER_MODE_REL_PINNED); 7058 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 7059 7060 vmx->nested.vmxon = true; 7061 7062 skip_emulated_instruction(vcpu); 7063 nested_vmx_succeed(vcpu); 7064 return 1; 7065 7066out_shadow_vmcs: 7067 kfree(vmx->nested.cached_vmcs12); 7068 7069out_cached_vmcs12: 7070 free_page((unsigned long)vmx->nested.msr_bitmap); 7071 7072out_msr_bitmap: 7073 return -ENOMEM; 7074} 7075 7076/* 7077 * Intel's VMX Instruction Reference specifies a common set of prerequisites 7078 * for running VMX instructions (except VMXON, whose prerequisites are 7079 * slightly different). It also specifies what exception to inject otherwise. 7080 */ 7081static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 7082{ 7083 struct kvm_segment cs; 7084 struct vcpu_vmx *vmx = to_vmx(vcpu); 7085 7086 if (!vmx->nested.vmxon) { 7087 kvm_queue_exception(vcpu, UD_VECTOR); 7088 return 0; 7089 } 7090 7091 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 7092 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || 7093 (is_long_mode(vcpu) && !cs.l)) { 7094 kvm_queue_exception(vcpu, UD_VECTOR); 7095 return 0; 7096 } 7097 7098 if (vmx_get_cpl(vcpu)) { 7099 kvm_inject_gp(vcpu, 0); 7100 return 0; 7101 } 7102 7103 return 1; 7104} 7105 7106static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) 7107{ 7108 if (vmx->nested.current_vmptr == -1ull) 7109 return; 7110 7111 /* current_vmptr and current_vmcs12 are always set/reset together */ 7112 if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) 7113 return; 7114 7115 if (enable_shadow_vmcs) { 7116 /* copy to memory all shadowed fields in case 7117 they were modified */ 7118 copy_shadow_to_vmcs12(vmx); 7119 vmx->nested.sync_shadow_vmcs = false; 7120 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, 7121 SECONDARY_EXEC_SHADOW_VMCS); 7122 vmcs_write64(VMCS_LINK_POINTER, -1ull); 7123 } 7124 vmx->nested.posted_intr_nv = -1; 7125 7126 /* Flush VMCS12 to guest memory */ 7127 memcpy(vmx->nested.current_vmcs12, vmx->nested.cached_vmcs12, 7128 VMCS12_SIZE); 7129 7130 kunmap(vmx->nested.current_vmcs12_page); 7131 nested_release_page(vmx->nested.current_vmcs12_page); 7132 vmx->nested.current_vmptr = -1ull; 7133 vmx->nested.current_vmcs12 = NULL; 7134} 7135 7136/* 7137 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 7138 * just stops using VMX. 7139 */ 7140static void free_nested(struct vcpu_vmx *vmx) 7141{ 7142 if (!vmx->nested.vmxon) 7143 return; 7144 7145 vmx->nested.vmxon = false; 7146 free_vpid(vmx->nested.vpid02); 7147 nested_release_vmcs12(vmx); 7148 if (vmx->nested.msr_bitmap) { 7149 free_page((unsigned long)vmx->nested.msr_bitmap); 7150 vmx->nested.msr_bitmap = NULL; 7151 } 7152 if (enable_shadow_vmcs) { 7153 vmcs_clear(vmx->vmcs01.shadow_vmcs); 7154 free_vmcs(vmx->vmcs01.shadow_vmcs); 7155 vmx->vmcs01.shadow_vmcs = NULL; 7156 } 7157 kfree(vmx->nested.cached_vmcs12); 7158 /* Unpin physical memory we referred to in current vmcs02 */ 7159 if (vmx->nested.apic_access_page) { 7160 nested_release_page(vmx->nested.apic_access_page); 7161 vmx->nested.apic_access_page = NULL; 7162 } 7163 if (vmx->nested.virtual_apic_page) { 7164 nested_release_page(vmx->nested.virtual_apic_page); 7165 vmx->nested.virtual_apic_page = NULL; 7166 } 7167 if (vmx->nested.pi_desc_page) { 7168 kunmap(vmx->nested.pi_desc_page); 7169 nested_release_page(vmx->nested.pi_desc_page); 7170 vmx->nested.pi_desc_page = NULL; 7171 vmx->nested.pi_desc = NULL; 7172 } 7173 7174 nested_free_all_saved_vmcss(vmx); 7175} 7176 7177/* Emulate the VMXOFF instruction */ 7178static int handle_vmoff(struct kvm_vcpu *vcpu) 7179{ 7180 if (!nested_vmx_check_permission(vcpu)) 7181 return 1; 7182 free_nested(to_vmx(vcpu)); 7183 skip_emulated_instruction(vcpu); 7184 nested_vmx_succeed(vcpu); 7185 return 1; 7186} 7187 7188/* Emulate the VMCLEAR instruction */ 7189static int handle_vmclear(struct kvm_vcpu *vcpu) 7190{ 7191 struct vcpu_vmx *vmx = to_vmx(vcpu); 7192 gpa_t vmptr; 7193 struct vmcs12 *vmcs12; 7194 struct page *page; 7195 7196 if (!nested_vmx_check_permission(vcpu)) 7197 return 1; 7198 7199 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) 7200 return 1; 7201 7202 if (vmptr == vmx->nested.current_vmptr) 7203 nested_release_vmcs12(vmx); 7204 7205 page = nested_get_page(vcpu, vmptr); 7206 if (page == NULL) { 7207 /* 7208 * For accurate processor emulation, VMCLEAR beyond available 7209 * physical memory should do nothing at all. However, it is 7210 * possible that a nested vmx bug, not a guest hypervisor bug, 7211 * resulted in this case, so let's shut down before doing any 7212 * more damage: 7213 */ 7214 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 7215 return 1; 7216 } 7217 vmcs12 = kmap(page); 7218 vmcs12->launch_state = 0; 7219 kunmap(page); 7220 nested_release_page(page); 7221 7222 nested_free_vmcs02(vmx, vmptr); 7223 7224 skip_emulated_instruction(vcpu); 7225 nested_vmx_succeed(vcpu); 7226 return 1; 7227} 7228 7229static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); 7230 7231/* Emulate the VMLAUNCH instruction */ 7232static int handle_vmlaunch(struct kvm_vcpu *vcpu) 7233{ 7234 return nested_vmx_run(vcpu, true); 7235} 7236 7237/* Emulate the VMRESUME instruction */ 7238static int handle_vmresume(struct kvm_vcpu *vcpu) 7239{ 7240 7241 return nested_vmx_run(vcpu, false); 7242} 7243 7244enum vmcs_field_type { 7245 VMCS_FIELD_TYPE_U16 = 0, 7246 VMCS_FIELD_TYPE_U64 = 1, 7247 VMCS_FIELD_TYPE_U32 = 2, 7248 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 7249}; 7250 7251static inline int vmcs_field_type(unsigned long field) 7252{ 7253 if (0x1 & field) /* the *_HIGH fields are all 32 bit */ 7254 return VMCS_FIELD_TYPE_U32; 7255 return (field >> 13) & 0x3 ; 7256} 7257 7258static inline int vmcs_field_readonly(unsigned long field) 7259{ 7260 return (((field >> 10) & 0x3) == 1); 7261} 7262 7263/* 7264 * Read a vmcs12 field. Since these can have varying lengths and we return 7265 * one type, we chose the biggest type (u64) and zero-extend the return value 7266 * to that size. Note that the caller, handle_vmread, might need to use only 7267 * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of 7268 * 64-bit fields are to be returned). 7269 */ 7270static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, 7271 unsigned long field, u64 *ret) 7272{ 7273 short offset = vmcs_field_to_offset(field); 7274 char *p; 7275 7276 if (offset < 0) 7277 return offset; 7278 7279 p = ((char *)(get_vmcs12(vcpu))) + offset; 7280 7281 switch (vmcs_field_type(field)) { 7282 case VMCS_FIELD_TYPE_NATURAL_WIDTH: 7283 *ret = *((natural_width *)p); 7284 return 0; 7285 case VMCS_FIELD_TYPE_U16: 7286 *ret = *((u16 *)p); 7287 return 0; 7288 case VMCS_FIELD_TYPE_U32: 7289 *ret = *((u32 *)p); 7290 return 0; 7291 case VMCS_FIELD_TYPE_U64: 7292 *ret = *((u64 *)p); 7293 return 0; 7294 default: 7295 WARN_ON(1); 7296 return -ENOENT; 7297 } 7298} 7299 7300 7301static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, 7302 unsigned long field, u64 field_value){ 7303 short offset = vmcs_field_to_offset(field); 7304 char *p = ((char *) get_vmcs12(vcpu)) + offset; 7305 if (offset < 0) 7306 return offset; 7307 7308 switch (vmcs_field_type(field)) { 7309 case VMCS_FIELD_TYPE_U16: 7310 *(u16 *)p = field_value; 7311 return 0; 7312 case VMCS_FIELD_TYPE_U32: 7313 *(u32 *)p = field_value; 7314 return 0; 7315 case VMCS_FIELD_TYPE_U64: 7316 *(u64 *)p = field_value; 7317 return 0; 7318 case VMCS_FIELD_TYPE_NATURAL_WIDTH: 7319 *(natural_width *)p = field_value; 7320 return 0; 7321 default: 7322 WARN_ON(1); 7323 return -ENOENT; 7324 } 7325 7326} 7327 7328static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 7329{ 7330 int i; 7331 unsigned long field; 7332 u64 field_value; 7333 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 7334 const unsigned long *fields = shadow_read_write_fields; 7335 const int num_fields = max_shadow_read_write_fields; 7336 7337 preempt_disable(); 7338 7339 vmcs_load(shadow_vmcs); 7340 7341 for (i = 0; i < num_fields; i++) { 7342 field = fields[i]; 7343 switch (vmcs_field_type(field)) { 7344 case VMCS_FIELD_TYPE_U16: 7345 field_value = vmcs_read16(field); 7346 break; 7347 case VMCS_FIELD_TYPE_U32: 7348 field_value = vmcs_read32(field); 7349 break; 7350 case VMCS_FIELD_TYPE_U64: 7351 field_value = vmcs_read64(field); 7352 break; 7353 case VMCS_FIELD_TYPE_NATURAL_WIDTH: 7354 field_value = vmcs_readl(field); 7355 break; 7356 default: 7357 WARN_ON(1); 7358 continue; 7359 } 7360 vmcs12_write_any(&vmx->vcpu, field, field_value); 7361 } 7362 7363 vmcs_clear(shadow_vmcs); 7364 vmcs_load(vmx->loaded_vmcs->vmcs); 7365 7366 preempt_enable(); 7367} 7368 7369static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 7370{ 7371 const unsigned long *fields[] = { 7372 shadow_read_write_fields, 7373 shadow_read_only_fields 7374 }; 7375 const int max_fields[] = { 7376 max_shadow_read_write_fields, 7377 max_shadow_read_only_fields 7378 }; 7379 int i, q; 7380 unsigned long field; 7381 u64 field_value = 0; 7382 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 7383 7384 vmcs_load(shadow_vmcs); 7385 7386 for (q = 0; q < ARRAY_SIZE(fields); q++) { 7387 for (i = 0; i < max_fields[q]; i++) { 7388 field = fields[q][i]; 7389 vmcs12_read_any(&vmx->vcpu, field, &field_value); 7390 7391 switch (vmcs_field_type(field)) { 7392 case VMCS_FIELD_TYPE_U16: 7393 vmcs_write16(field, (u16)field_value); 7394 break; 7395 case VMCS_FIELD_TYPE_U32: 7396 vmcs_write32(field, (u32)field_value); 7397 break; 7398 case VMCS_FIELD_TYPE_U64: 7399 vmcs_write64(field, (u64)field_value); 7400 break; 7401 case VMCS_FIELD_TYPE_NATURAL_WIDTH: 7402 vmcs_writel(field, (long)field_value); 7403 break; 7404 default: 7405 WARN_ON(1); 7406 break; 7407 } 7408 } 7409 } 7410 7411 vmcs_clear(shadow_vmcs); 7412 vmcs_load(vmx->loaded_vmcs->vmcs); 7413} 7414 7415/* 7416 * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was 7417 * used before) all generate the same failure when it is missing. 7418 */ 7419static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) 7420{ 7421 struct vcpu_vmx *vmx = to_vmx(vcpu); 7422 if (vmx->nested.current_vmptr == -1ull) { 7423 nested_vmx_failInvalid(vcpu); 7424 skip_emulated_instruction(vcpu); 7425 return 0; 7426 } 7427 return 1; 7428} 7429 7430static int handle_vmread(struct kvm_vcpu *vcpu) 7431{ 7432 unsigned long field; 7433 u64 field_value; 7434 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7435 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 7436 gva_t gva = 0; 7437 7438 if (!nested_vmx_check_permission(vcpu) || 7439 !nested_vmx_check_vmcs12(vcpu)) 7440 return 1; 7441 7442 /* Decode instruction info and find the field to read */ 7443 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 7444 /* Read the field, zero-extended to a u64 field_value */ 7445 if (vmcs12_read_any(vcpu, field, &field_value) < 0) { 7446 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 7447 skip_emulated_instruction(vcpu); 7448 return 1; 7449 } 7450 /* 7451 * Now copy part of this value to register or memory, as requested. 7452 * Note that the number of bits actually copied is 32 or 64 depending 7453 * on the guest's mode (32 or 64 bit), not on the given field's length. 7454 */ 7455 if (vmx_instruction_info & (1u << 10)) { 7456 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), 7457 field_value); 7458 } else { 7459 if (get_vmx_mem_address(vcpu, exit_qualification, 7460 vmx_instruction_info, true, &gva)) 7461 return 1; 7462 /* _system ok, as nested_vmx_check_permission verified cpl=0 */ 7463 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, 7464 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); 7465 } 7466 7467 nested_vmx_succeed(vcpu); 7468 skip_emulated_instruction(vcpu); 7469 return 1; 7470} 7471 7472 7473static int handle_vmwrite(struct kvm_vcpu *vcpu) 7474{ 7475 unsigned long field; 7476 gva_t gva; 7477 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7478 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 7479 /* The value to write might be 32 or 64 bits, depending on L1's long 7480 * mode, and eventually we need to write that into a field of several 7481 * possible lengths. The code below first zero-extends the value to 64 7482 * bit (field_value), and then copies only the appropriate number of 7483 * bits into the vmcs12 field. 7484 */ 7485 u64 field_value = 0; 7486 struct x86_exception e; 7487 7488 if (!nested_vmx_check_permission(vcpu) || 7489 !nested_vmx_check_vmcs12(vcpu)) 7490 return 1; 7491 7492 if (vmx_instruction_info & (1u << 10)) 7493 field_value = kvm_register_readl(vcpu, 7494 (((vmx_instruction_info) >> 3) & 0xf)); 7495 else { 7496 if (get_vmx_mem_address(vcpu, exit_qualification, 7497 vmx_instruction_info, false, &gva)) 7498 return 1; 7499 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, 7500 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { 7501 kvm_inject_page_fault(vcpu, &e); 7502 return 1; 7503 } 7504 } 7505 7506 7507 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 7508 if (vmcs_field_readonly(field)) { 7509 nested_vmx_failValid(vcpu, 7510 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 7511 skip_emulated_instruction(vcpu); 7512 return 1; 7513 } 7514 7515 if (vmcs12_write_any(vcpu, field, field_value) < 0) { 7516 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 7517 skip_emulated_instruction(vcpu); 7518 return 1; 7519 } 7520 7521 nested_vmx_succeed(vcpu); 7522 skip_emulated_instruction(vcpu); 7523 return 1; 7524} 7525 7526/* Emulate the VMPTRLD instruction */ 7527static int handle_vmptrld(struct kvm_vcpu *vcpu) 7528{ 7529 struct vcpu_vmx *vmx = to_vmx(vcpu); 7530 gpa_t vmptr; 7531 7532 if (!nested_vmx_check_permission(vcpu)) 7533 return 1; 7534 7535 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) 7536 return 1; 7537 7538 if (vmx->nested.current_vmptr != vmptr) { 7539 struct vmcs12 *new_vmcs12; 7540 struct page *page; 7541 page = nested_get_page(vcpu, vmptr); 7542 if (page == NULL) { 7543 nested_vmx_failInvalid(vcpu); 7544 skip_emulated_instruction(vcpu); 7545 return 1; 7546 } 7547 new_vmcs12 = kmap(page); 7548 if (new_vmcs12->revision_id != VMCS12_REVISION) { 7549 kunmap(page); 7550 nested_release_page_clean(page); 7551 nested_vmx_failValid(vcpu, 7552 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 7553 skip_emulated_instruction(vcpu); 7554 return 1; 7555 } 7556 7557 nested_release_vmcs12(vmx); 7558 vmx->nested.current_vmptr = vmptr; 7559 vmx->nested.current_vmcs12 = new_vmcs12; 7560 vmx->nested.current_vmcs12_page = page; 7561 /* 7562 * Load VMCS12 from guest memory since it is not already 7563 * cached. 7564 */ 7565 memcpy(vmx->nested.cached_vmcs12, 7566 vmx->nested.current_vmcs12, VMCS12_SIZE); 7567 7568 if (enable_shadow_vmcs) { 7569 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 7570 SECONDARY_EXEC_SHADOW_VMCS); 7571 vmcs_write64(VMCS_LINK_POINTER, 7572 __pa(vmx->vmcs01.shadow_vmcs)); 7573 vmx->nested.sync_shadow_vmcs = true; 7574 } 7575 } 7576 7577 nested_vmx_succeed(vcpu); 7578 skip_emulated_instruction(vcpu); 7579 return 1; 7580} 7581 7582/* Emulate the VMPTRST instruction */ 7583static int handle_vmptrst(struct kvm_vcpu *vcpu) 7584{ 7585 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7586 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 7587 gva_t vmcs_gva; 7588 struct x86_exception e; 7589 7590 if (!nested_vmx_check_permission(vcpu)) 7591 return 1; 7592 7593 if (get_vmx_mem_address(vcpu, exit_qualification, 7594 vmx_instruction_info, true, &vmcs_gva)) 7595 return 1; 7596 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ 7597 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, 7598 (void *)&to_vmx(vcpu)->nested.current_vmptr, 7599 sizeof(u64), &e)) { 7600 kvm_inject_page_fault(vcpu, &e); 7601 return 1; 7602 } 7603 nested_vmx_succeed(vcpu); 7604 skip_emulated_instruction(vcpu); 7605 return 1; 7606} 7607 7608/* Emulate the INVEPT instruction */ 7609static int handle_invept(struct kvm_vcpu *vcpu) 7610{ 7611 struct vcpu_vmx *vmx = to_vmx(vcpu); 7612 u32 vmx_instruction_info, types; 7613 unsigned long type; 7614 gva_t gva; 7615 struct x86_exception e; 7616 struct { 7617 u64 eptp, gpa; 7618 } operand; 7619 7620 if (!(vmx->nested.nested_vmx_secondary_ctls_high & 7621 SECONDARY_EXEC_ENABLE_EPT) || 7622 !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { 7623 kvm_queue_exception(vcpu, UD_VECTOR); 7624 return 1; 7625 } 7626 7627 if (!nested_vmx_check_permission(vcpu)) 7628 return 1; 7629 7630 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { 7631 kvm_queue_exception(vcpu, UD_VECTOR); 7632 return 1; 7633 } 7634 7635 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 7636 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 7637 7638 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 7639 7640 if (type >= 32 || !(types & (1 << type))) { 7641 nested_vmx_failValid(vcpu, 7642 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7643 skip_emulated_instruction(vcpu); 7644 return 1; 7645 } 7646 7647 /* According to the Intel VMX instruction reference, the memory 7648 * operand is read even if it isn't needed (e.g., for type==global) 7649 */ 7650 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 7651 vmx_instruction_info, false, &gva)) 7652 return 1; 7653 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, 7654 sizeof(operand), &e)) { 7655 kvm_inject_page_fault(vcpu, &e); 7656 return 1; 7657 } 7658 7659 switch (type) { 7660 case VMX_EPT_EXTENT_GLOBAL: 7661 /* 7662 * TODO: track mappings and invalidate 7663 * single context requests appropriately 7664 */ 7665 case VMX_EPT_EXTENT_CONTEXT: 7666 kvm_mmu_sync_roots(vcpu); 7667 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 7668 nested_vmx_succeed(vcpu); 7669 break; 7670 default: 7671 BUG_ON(1); 7672 break; 7673 } 7674 7675 skip_emulated_instruction(vcpu); 7676 return 1; 7677} 7678 7679static int handle_invvpid(struct kvm_vcpu *vcpu) 7680{ 7681 struct vcpu_vmx *vmx = to_vmx(vcpu); 7682 u32 vmx_instruction_info; 7683 unsigned long type, types; 7684 gva_t gva; 7685 struct x86_exception e; 7686 int vpid; 7687 7688 if (!(vmx->nested.nested_vmx_secondary_ctls_high & 7689 SECONDARY_EXEC_ENABLE_VPID) || 7690 !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) { 7691 kvm_queue_exception(vcpu, UD_VECTOR); 7692 return 1; 7693 } 7694 7695 if (!nested_vmx_check_permission(vcpu)) 7696 return 1; 7697 7698 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 7699 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 7700 7701 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; 7702 7703 if (type >= 32 || !(types & (1 << type))) { 7704 nested_vmx_failValid(vcpu, 7705 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7706 skip_emulated_instruction(vcpu); 7707 return 1; 7708 } 7709 7710 /* according to the intel vmx instruction reference, the memory 7711 * operand is read even if it isn't needed (e.g., for type==global) 7712 */ 7713 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 7714 vmx_instruction_info, false, &gva)) 7715 return 1; 7716 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, 7717 sizeof(u32), &e)) { 7718 kvm_inject_page_fault(vcpu, &e); 7719 return 1; 7720 } 7721 7722 switch (type) { 7723 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 7724 /* 7725 * Old versions of KVM use the single-context version so we 7726 * have to support it; just treat it the same as all-context. 7727 */ 7728 case VMX_VPID_EXTENT_ALL_CONTEXT: 7729 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 7730 nested_vmx_succeed(vcpu); 7731 break; 7732 default: 7733 /* Trap individual address invalidation invvpid calls */ 7734 BUG_ON(1); 7735 break; 7736 } 7737 7738 skip_emulated_instruction(vcpu); 7739 return 1; 7740} 7741 7742static int handle_pml_full(struct kvm_vcpu *vcpu) 7743{ 7744 unsigned long exit_qualification; 7745 7746 trace_kvm_pml_full(vcpu->vcpu_id); 7747 7748 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7749 7750 /* 7751 * PML buffer FULL happened while executing iret from NMI, 7752 * "blocked by NMI" bit has to be set before next VM entry. 7753 */ 7754 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 7755 cpu_has_virtual_nmis() && 7756 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 7757 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 7758 GUEST_INTR_STATE_NMI); 7759 7760 /* 7761 * PML buffer already flushed at beginning of VMEXIT. Nothing to do 7762 * here.., and there's no userspace involvement needed for PML. 7763 */ 7764 return 1; 7765} 7766 7767static int handle_preemption_timer(struct kvm_vcpu *vcpu) 7768{ 7769 kvm_lapic_expired_hv_timer(vcpu); 7770 return 1; 7771} 7772 7773/* 7774 * The exit handlers return 1 if the exit was handled fully and guest execution 7775 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 7776 * to be done to userspace and return 0. 7777 */ 7778static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { 7779 [EXIT_REASON_EXCEPTION_NMI] = handle_exception, 7780 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 7781 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 7782 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, 7783 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 7784 [EXIT_REASON_CR_ACCESS] = handle_cr, 7785 [EXIT_REASON_DR_ACCESS] = handle_dr, 7786 [EXIT_REASON_CPUID] = handle_cpuid, 7787 [EXIT_REASON_MSR_READ] = handle_rdmsr, 7788 [EXIT_REASON_MSR_WRITE] = handle_wrmsr, 7789 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 7790 [EXIT_REASON_HLT] = handle_halt, 7791 [EXIT_REASON_INVD] = handle_invd, 7792 [EXIT_REASON_INVLPG] = handle_invlpg, 7793 [EXIT_REASON_RDPMC] = handle_rdpmc, 7794 [EXIT_REASON_VMCALL] = handle_vmcall, 7795 [EXIT_REASON_VMCLEAR] = handle_vmclear, 7796 [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, 7797 [EXIT_REASON_VMPTRLD] = handle_vmptrld, 7798 [EXIT_REASON_VMPTRST] = handle_vmptrst, 7799 [EXIT_REASON_VMREAD] = handle_vmread, 7800 [EXIT_REASON_VMRESUME] = handle_vmresume, 7801 [EXIT_REASON_VMWRITE] = handle_vmwrite, 7802 [EXIT_REASON_VMOFF] = handle_vmoff, 7803 [EXIT_REASON_VMON] = handle_vmon, 7804 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 7805 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 7806 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 7807 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, 7808 [EXIT_REASON_WBINVD] = handle_wbinvd, 7809 [EXIT_REASON_XSETBV] = handle_xsetbv, 7810 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 7811 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 7812 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 7813 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 7814 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 7815 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, 7816 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, 7817 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, 7818 [EXIT_REASON_INVEPT] = handle_invept, 7819 [EXIT_REASON_INVVPID] = handle_invvpid, 7820 [EXIT_REASON_XSAVES] = handle_xsaves, 7821 [EXIT_REASON_XRSTORS] = handle_xrstors, 7822 [EXIT_REASON_PML_FULL] = handle_pml_full, 7823 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 7824}; 7825 7826static const int kvm_vmx_max_exit_handlers = 7827 ARRAY_SIZE(kvm_vmx_exit_handlers); 7828 7829static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 7830 struct vmcs12 *vmcs12) 7831{ 7832 unsigned long exit_qualification; 7833 gpa_t bitmap, last_bitmap; 7834 unsigned int port; 7835 int size; 7836 u8 b; 7837 7838 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 7839 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 7840 7841 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7842 7843 port = exit_qualification >> 16; 7844 size = (exit_qualification & 7) + 1; 7845 7846 last_bitmap = (gpa_t)-1; 7847 b = -1; 7848 7849 while (size > 0) { 7850 if (port < 0x8000) 7851 bitmap = vmcs12->io_bitmap_a; 7852 else if (port < 0x10000) 7853 bitmap = vmcs12->io_bitmap_b; 7854 else 7855 return true; 7856 bitmap += (port & 0x7fff) / 8; 7857 7858 if (last_bitmap != bitmap) 7859 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 7860 return true; 7861 if (b & (1 << (port & 7))) 7862 return true; 7863 7864 port++; 7865 size--; 7866 last_bitmap = bitmap; 7867 } 7868 7869 return false; 7870} 7871 7872/* 7873 * Return 1 if we should exit from L2 to L1 to handle an MSR access access, 7874 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 7875 * disinterest in the current event (read or write a specific MSR) by using an 7876 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 7877 */ 7878static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 7879 struct vmcs12 *vmcs12, u32 exit_reason) 7880{ 7881 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; 7882 gpa_t bitmap; 7883 7884 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 7885 return true; 7886 7887 /* 7888 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 7889 * for the four combinations of read/write and low/high MSR numbers. 7890 * First we need to figure out which of the four to use: 7891 */ 7892 bitmap = vmcs12->msr_bitmap; 7893 if (exit_reason == EXIT_REASON_MSR_WRITE) 7894 bitmap += 2048; 7895 if (msr_index >= 0xc0000000) { 7896 msr_index -= 0xc0000000; 7897 bitmap += 1024; 7898 } 7899 7900 /* Then read the msr_index'th bit from this bitmap: */ 7901 if (msr_index < 1024*8) { 7902 unsigned char b; 7903 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 7904 return true; 7905 return 1 & (b >> (msr_index & 7)); 7906 } else 7907 return true; /* let L1 handle the wrong parameter */ 7908} 7909 7910/* 7911 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 7912 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 7913 * intercept (via guest_host_mask etc.) the current event. 7914 */ 7915static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 7916 struct vmcs12 *vmcs12) 7917{ 7918 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7919 int cr = exit_qualification & 15; 7920 int reg = (exit_qualification >> 8) & 15; 7921 unsigned long val = kvm_register_readl(vcpu, reg); 7922 7923 switch ((exit_qualification >> 4) & 3) { 7924 case 0: /* mov to cr */ 7925 switch (cr) { 7926 case 0: 7927 if (vmcs12->cr0_guest_host_mask & 7928 (val ^ vmcs12->cr0_read_shadow)) 7929 return true; 7930 break; 7931 case 3: 7932 if ((vmcs12->cr3_target_count >= 1 && 7933 vmcs12->cr3_target_value0 == val) || 7934 (vmcs12->cr3_target_count >= 2 && 7935 vmcs12->cr3_target_value1 == val) || 7936 (vmcs12->cr3_target_count >= 3 && 7937 vmcs12->cr3_target_value2 == val) || 7938 (vmcs12->cr3_target_count >= 4 && 7939 vmcs12->cr3_target_value3 == val)) 7940 return false; 7941 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 7942 return true; 7943 break; 7944 case 4: 7945 if (vmcs12->cr4_guest_host_mask & 7946 (vmcs12->cr4_read_shadow ^ val)) 7947 return true; 7948 break; 7949 case 8: 7950 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 7951 return true; 7952 break; 7953 } 7954 break; 7955 case 2: /* clts */ 7956 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 7957 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 7958 return true; 7959 break; 7960 case 1: /* mov from cr */ 7961 switch (cr) { 7962 case 3: 7963 if (vmcs12->cpu_based_vm_exec_control & 7964 CPU_BASED_CR3_STORE_EXITING) 7965 return true; 7966 break; 7967 case 8: 7968 if (vmcs12->cpu_based_vm_exec_control & 7969 CPU_BASED_CR8_STORE_EXITING) 7970 return true; 7971 break; 7972 } 7973 break; 7974 case 3: /* lmsw */ 7975 /* 7976 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 7977 * cr0. Other attempted changes are ignored, with no exit. 7978 */ 7979 if (vmcs12->cr0_guest_host_mask & 0xe & 7980 (val ^ vmcs12->cr0_read_shadow)) 7981 return true; 7982 if ((vmcs12->cr0_guest_host_mask & 0x1) && 7983 !(vmcs12->cr0_read_shadow & 0x1) && 7984 (val & 0x1)) 7985 return true; 7986 break; 7987 } 7988 return false; 7989} 7990 7991/* 7992 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we 7993 * should handle it ourselves in L0 (and then continue L2). Only call this 7994 * when in is_guest_mode (L2). 7995 */ 7996static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) 7997{ 7998 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 7999 struct vcpu_vmx *vmx = to_vmx(vcpu); 8000 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 8001 u32 exit_reason = vmx->exit_reason; 8002 8003 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, 8004 vmcs_readl(EXIT_QUALIFICATION), 8005 vmx->idt_vectoring_info, 8006 intr_info, 8007 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 8008 KVM_ISA_VMX); 8009 8010 if (vmx->nested.nested_run_pending) 8011 return false; 8012 8013 if (unlikely(vmx->fail)) { 8014 pr_info_ratelimited("%s failed vm entry %x\n", __func__, 8015 vmcs_read32(VM_INSTRUCTION_ERROR)); 8016 return true; 8017 } 8018 8019 switch (exit_reason) { 8020 case EXIT_REASON_EXCEPTION_NMI: 8021 if (!is_exception(intr_info)) 8022 return false; 8023 else if (is_page_fault(intr_info)) 8024 return enable_ept; 8025 else if (is_no_device(intr_info) && 8026 !(vmcs12->guest_cr0 & X86_CR0_TS)) 8027 return false; 8028 else if (is_debug(intr_info) && 8029 vcpu->guest_debug & 8030 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 8031 return false; 8032 else if (is_breakpoint(intr_info) && 8033 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 8034 return false; 8035 return vmcs12->exception_bitmap & 8036 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 8037 case EXIT_REASON_EXTERNAL_INTERRUPT: 8038 return false; 8039 case EXIT_REASON_TRIPLE_FAULT: 8040 return true; 8041 case EXIT_REASON_PENDING_INTERRUPT: 8042 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); 8043 case EXIT_REASON_NMI_WINDOW: 8044 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); 8045 case EXIT_REASON_TASK_SWITCH: 8046 return true; 8047 case EXIT_REASON_CPUID: 8048 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) 8049 return false; 8050 return true; 8051 case EXIT_REASON_HLT: 8052 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 8053 case EXIT_REASON_INVD: 8054 return true; 8055 case EXIT_REASON_INVLPG: 8056 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 8057 case EXIT_REASON_RDPMC: 8058 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 8059 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 8060 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 8061 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 8062 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 8063 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: 8064 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: 8065 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 8066 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 8067 /* 8068 * VMX instructions trap unconditionally. This allows L1 to 8069 * emulate them for its L2 guest, i.e., allows 3-level nesting! 8070 */ 8071 return true; 8072 case EXIT_REASON_CR_ACCESS: 8073 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 8074 case EXIT_REASON_DR_ACCESS: 8075 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 8076 case EXIT_REASON_IO_INSTRUCTION: 8077 return nested_vmx_exit_handled_io(vcpu, vmcs12); 8078 case EXIT_REASON_MSR_READ: 8079 case EXIT_REASON_MSR_WRITE: 8080 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 8081 case EXIT_REASON_INVALID_STATE: 8082 return true; 8083 case EXIT_REASON_MWAIT_INSTRUCTION: 8084 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 8085 case EXIT_REASON_MONITOR_TRAP_FLAG: 8086 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); 8087 case EXIT_REASON_MONITOR_INSTRUCTION: 8088 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 8089 case EXIT_REASON_PAUSE_INSTRUCTION: 8090 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 8091 nested_cpu_has2(vmcs12, 8092 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 8093 case EXIT_REASON_MCE_DURING_VMENTRY: 8094 return false; 8095 case EXIT_REASON_TPR_BELOW_THRESHOLD: 8096 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 8097 case EXIT_REASON_APIC_ACCESS: 8098 return nested_cpu_has2(vmcs12, 8099 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); 8100 case EXIT_REASON_APIC_WRITE: 8101 case EXIT_REASON_EOI_INDUCED: 8102 /* apic_write and eoi_induced should exit unconditionally. */ 8103 return true; 8104 case EXIT_REASON_EPT_VIOLATION: 8105 /* 8106 * L0 always deals with the EPT violation. If nested EPT is 8107 * used, and the nested mmu code discovers that the address is 8108 * missing in the guest EPT table (EPT12), the EPT violation 8109 * will be injected with nested_ept_inject_page_fault() 8110 */ 8111 return false; 8112 case EXIT_REASON_EPT_MISCONFIG: 8113 /* 8114 * L2 never uses directly L1's EPT, but rather L0's own EPT 8115 * table (shadow on EPT) or a merged EPT table that L0 built 8116 * (EPT on EPT). So any problems with the structure of the 8117 * table is L0's fault. 8118 */ 8119 return false; 8120 case EXIT_REASON_WBINVD: 8121 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 8122 case EXIT_REASON_XSETBV: 8123 return true; 8124 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 8125 /* 8126 * This should never happen, since it is not possible to 8127 * set XSS to a non-zero value---neither in L1 nor in L2. 8128 * If if it were, XSS would have to be checked against 8129 * the XSS exit bitmap in vmcs12. 8130 */ 8131 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 8132 case EXIT_REASON_PREEMPTION_TIMER: 8133 return false; 8134 default: 8135 return true; 8136 } 8137} 8138 8139static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) 8140{ 8141 *info1 = vmcs_readl(EXIT_QUALIFICATION); 8142 *info2 = vmcs_read32(VM_EXIT_INTR_INFO); 8143} 8144 8145static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 8146{ 8147 if (vmx->pml_pg) { 8148 __free_page(vmx->pml_pg); 8149 vmx->pml_pg = NULL; 8150 } 8151} 8152 8153static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) 8154{ 8155 struct vcpu_vmx *vmx = to_vmx(vcpu); 8156 u64 *pml_buf; 8157 u16 pml_idx; 8158 8159 pml_idx = vmcs_read16(GUEST_PML_INDEX); 8160 8161 /* Do nothing if PML buffer is empty */ 8162 if (pml_idx == (PML_ENTITY_NUM - 1)) 8163 return; 8164 8165 /* PML index always points to next available PML buffer entity */ 8166 if (pml_idx >= PML_ENTITY_NUM) 8167 pml_idx = 0; 8168 else 8169 pml_idx++; 8170 8171 pml_buf = page_address(vmx->pml_pg); 8172 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { 8173 u64 gpa; 8174 8175 gpa = pml_buf[pml_idx]; 8176 WARN_ON(gpa & (PAGE_SIZE - 1)); 8177 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 8178 } 8179 8180 /* reset PML index */ 8181 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 8182} 8183 8184/* 8185 * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. 8186 * Called before reporting dirty_bitmap to userspace. 8187 */ 8188static void kvm_flush_pml_buffers(struct kvm *kvm) 8189{ 8190 int i; 8191 struct kvm_vcpu *vcpu; 8192 /* 8193 * We only need to kick vcpu out of guest mode here, as PML buffer 8194 * is flushed at beginning of all VMEXITs, and it's obvious that only 8195 * vcpus running in guest are possible to have unflushed GPAs in PML 8196 * buffer. 8197 */ 8198 kvm_for_each_vcpu(i, vcpu, kvm) 8199 kvm_vcpu_kick(vcpu); 8200} 8201 8202static void vmx_dump_sel(char *name, uint32_t sel) 8203{ 8204 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 8205 name, vmcs_read32(sel), 8206 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 8207 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 8208 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 8209} 8210 8211static void vmx_dump_dtsel(char *name, uint32_t limit) 8212{ 8213 pr_err("%s limit=0x%08x, base=0x%016lx\n", 8214 name, vmcs_read32(limit), 8215 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); 8216} 8217 8218static void dump_vmcs(void) 8219{ 8220 u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); 8221 u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); 8222 u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 8223 u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); 8224 u32 secondary_exec_control = 0; 8225 unsigned long cr4 = vmcs_readl(GUEST_CR4); 8226 u64 efer = vmcs_read64(GUEST_IA32_EFER); 8227 int i, n; 8228 8229 if (cpu_has_secondary_exec_ctrls()) 8230 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 8231 8232 pr_err("*** Guest State ***\n"); 8233 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 8234 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), 8235 vmcs_readl(CR0_GUEST_HOST_MASK)); 8236 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 8237 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); 8238 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); 8239 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && 8240 (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) 8241 { 8242 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", 8243 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); 8244 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", 8245 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); 8246 } 8247 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", 8248 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); 8249 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", 8250 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); 8251 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 8252 vmcs_readl(GUEST_SYSENTER_ESP), 8253 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); 8254 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); 8255 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); 8256 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); 8257 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); 8258 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); 8259 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); 8260 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); 8261 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); 8262 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); 8263 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); 8264 if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || 8265 (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) 8266 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", 8267 efer, vmcs_read64(GUEST_IA32_PAT)); 8268 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", 8269 vmcs_read64(GUEST_IA32_DEBUGCTL), 8270 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); 8271 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 8272 pr_err("PerfGlobCtl = 0x%016llx\n", 8273 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); 8274 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) 8275 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); 8276 pr_err("Interruptibility = %08x ActivityState = %08x\n", 8277 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), 8278 vmcs_read32(GUEST_ACTIVITY_STATE)); 8279 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 8280 pr_err("InterruptStatus = %04x\n", 8281 vmcs_read16(GUEST_INTR_STATUS)); 8282 8283 pr_err("*** Host State ***\n"); 8284 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", 8285 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); 8286 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", 8287 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), 8288 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), 8289 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), 8290 vmcs_read16(HOST_TR_SELECTOR)); 8291 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", 8292 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), 8293 vmcs_readl(HOST_TR_BASE)); 8294 pr_err("GDTBase=%016lx IDTBase=%016lx\n", 8295 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); 8296 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", 8297 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), 8298 vmcs_readl(HOST_CR4)); 8299 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 8300 vmcs_readl(HOST_IA32_SYSENTER_ESP), 8301 vmcs_read32(HOST_IA32_SYSENTER_CS), 8302 vmcs_readl(HOST_IA32_SYSENTER_EIP)); 8303 if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) 8304 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", 8305 vmcs_read64(HOST_IA32_EFER), 8306 vmcs_read64(HOST_IA32_PAT)); 8307 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 8308 pr_err("PerfGlobCtl = 0x%016llx\n", 8309 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); 8310 8311 pr_err("*** Control State ***\n"); 8312 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", 8313 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); 8314 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); 8315 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", 8316 vmcs_read32(EXCEPTION_BITMAP), 8317 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), 8318 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); 8319 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", 8320 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 8321 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), 8322 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); 8323 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", 8324 vmcs_read32(VM_EXIT_INTR_INFO), 8325 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 8326 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 8327 pr_err(" reason=%08x qualification=%016lx\n", 8328 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); 8329 pr_err("IDTVectoring: info=%08x errcode=%08x\n", 8330 vmcs_read32(IDT_VECTORING_INFO_FIELD), 8331 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 8332 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); 8333 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) 8334 pr_err("TSC Multiplier = 0x%016llx\n", 8335 vmcs_read64(TSC_MULTIPLIER)); 8336 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) 8337 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); 8338 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) 8339 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); 8340 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) 8341 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); 8342 n = vmcs_read32(CR3_TARGET_COUNT); 8343 for (i = 0; i + 1 < n; i += 4) 8344 pr_err("CR3 target%u=%016lx target%u=%016lx\n", 8345 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), 8346 i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); 8347 if (i < n) 8348 pr_err("CR3 target%u=%016lx\n", 8349 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); 8350 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) 8351 pr_err("PLE Gap=%08x Window=%08x\n", 8352 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); 8353 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) 8354 pr_err("Virtual processor ID = 0x%04x\n", 8355 vmcs_read16(VIRTUAL_PROCESSOR_ID)); 8356} 8357 8358/* 8359 * The guest has exited. See if we can fix it or if we need userspace 8360 * assistance. 8361 */ 8362static int vmx_handle_exit(struct kvm_vcpu *vcpu) 8363{ 8364 struct vcpu_vmx *vmx = to_vmx(vcpu); 8365 u32 exit_reason = vmx->exit_reason; 8366 u32 vectoring_info = vmx->idt_vectoring_info; 8367 8368 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); 8369 8370 /* 8371 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 8372 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 8373 * querying dirty_bitmap, we only need to kick all vcpus out of guest 8374 * mode as if vcpus is in root mode, the PML buffer must has been 8375 * flushed already. 8376 */ 8377 if (enable_pml) 8378 vmx_flush_pml_buffer(vcpu); 8379 8380 /* If guest state is invalid, start emulating */ 8381 if (vmx->emulation_required) 8382 return handle_invalid_guest_state(vcpu); 8383 8384 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { 8385 nested_vmx_vmexit(vcpu, exit_reason, 8386 vmcs_read32(VM_EXIT_INTR_INFO), 8387 vmcs_readl(EXIT_QUALIFICATION)); 8388 return 1; 8389 } 8390 8391 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { 8392 dump_vmcs(); 8393 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 8394 vcpu->run->fail_entry.hardware_entry_failure_reason 8395 = exit_reason; 8396 return 0; 8397 } 8398 8399 if (unlikely(vmx->fail)) { 8400 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 8401 vcpu->run->fail_entry.hardware_entry_failure_reason 8402 = vmcs_read32(VM_INSTRUCTION_ERROR); 8403 return 0; 8404 } 8405 8406 /* 8407 * Note: 8408 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by 8409 * delivery event since it indicates guest is accessing MMIO. 8410 * The vm-exit can be triggered again after return to guest that 8411 * will cause infinite loop. 8412 */ 8413 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 8414 (exit_reason != EXIT_REASON_EXCEPTION_NMI && 8415 exit_reason != EXIT_REASON_EPT_VIOLATION && 8416 exit_reason != EXIT_REASON_PML_FULL && 8417 exit_reason != EXIT_REASON_TASK_SWITCH)) { 8418 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8419 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 8420 vcpu->run->internal.ndata = 2; 8421 vcpu->run->internal.data[0] = vectoring_info; 8422 vcpu->run->internal.data[1] = exit_reason; 8423 return 0; 8424 } 8425 8426 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && 8427 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( 8428 get_vmcs12(vcpu))))) { 8429 if (vmx_interrupt_allowed(vcpu)) { 8430 vmx->soft_vnmi_blocked = 0; 8431 } else if (vmx->vnmi_blocked_time > 1000000000LL && 8432 vcpu->arch.nmi_pending) { 8433 /* 8434 * This CPU don't support us in finding the end of an 8435 * NMI-blocked window if the guest runs with IRQs 8436 * disabled. So we pull the trigger after 1 s of 8437 * futile waiting, but inform the user about this. 8438 */ 8439 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " 8440 "state on VCPU %d after 1 s timeout\n", 8441 __func__, vcpu->vcpu_id); 8442 vmx->soft_vnmi_blocked = 0; 8443 } 8444 } 8445 8446 if (exit_reason < kvm_vmx_max_exit_handlers 8447 && kvm_vmx_exit_handlers[exit_reason]) 8448 return kvm_vmx_exit_handlers[exit_reason](vcpu); 8449 else { 8450 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); 8451 kvm_queue_exception(vcpu, UD_VECTOR); 8452 return 1; 8453 } 8454} 8455 8456static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 8457{ 8458 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 8459 8460 if (is_guest_mode(vcpu) && 8461 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 8462 return; 8463 8464 if (irr == -1 || tpr < irr) { 8465 vmcs_write32(TPR_THRESHOLD, 0); 8466 return; 8467 } 8468 8469 vmcs_write32(TPR_THRESHOLD, irr); 8470} 8471 8472static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) 8473{ 8474 u32 sec_exec_control; 8475 8476 /* Postpone execution until vmcs01 is the current VMCS. */ 8477 if (is_guest_mode(vcpu)) { 8478 to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; 8479 return; 8480 } 8481 8482 if (!cpu_has_vmx_virtualize_x2apic_mode()) 8483 return; 8484 8485 if (!cpu_need_tpr_shadow(vcpu)) 8486 return; 8487 8488 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 8489 8490 if (set) { 8491 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8492 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8493 } else { 8494 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8495 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8496 } 8497 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 8498 8499 vmx_set_msr_bitmap(vcpu); 8500} 8501 8502static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) 8503{ 8504 struct vcpu_vmx *vmx = to_vmx(vcpu); 8505 8506 /* 8507 * Currently we do not handle the nested case where L2 has an 8508 * APIC access page of its own; that page is still pinned. 8509 * Hence, we skip the case where the VCPU is in guest mode _and_ 8510 * L1 prepared an APIC access page for L2. 8511 * 8512 * For the case where L1 and L2 share the same APIC access page 8513 * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear 8514 * in the vmcs12), this function will only update either the vmcs01 8515 * or the vmcs02. If the former, the vmcs02 will be updated by 8516 * prepare_vmcs02. If the latter, the vmcs01 will be updated in 8517 * the next L2->L1 exit. 8518 */ 8519 if (!is_guest_mode(vcpu) || 8520 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 8521 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 8522 vmcs_write64(APIC_ACCESS_ADDR, hpa); 8523} 8524 8525static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 8526{ 8527 u16 status; 8528 u8 old; 8529 8530 if (max_isr == -1) 8531 max_isr = 0; 8532 8533 status = vmcs_read16(GUEST_INTR_STATUS); 8534 old = status >> 8; 8535 if (max_isr != old) { 8536 status &= 0xff; 8537 status |= max_isr << 8; 8538 vmcs_write16(GUEST_INTR_STATUS, status); 8539 } 8540} 8541 8542static void vmx_set_rvi(int vector) 8543{ 8544 u16 status; 8545 u8 old; 8546 8547 if (vector == -1) 8548 vector = 0; 8549 8550 status = vmcs_read16(GUEST_INTR_STATUS); 8551 old = (u8)status & 0xff; 8552 if ((u8)vector != old) { 8553 status &= ~0xff; 8554 status |= (u8)vector; 8555 vmcs_write16(GUEST_INTR_STATUS, status); 8556 } 8557} 8558 8559static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) 8560{ 8561 if (!is_guest_mode(vcpu)) { 8562 vmx_set_rvi(max_irr); 8563 return; 8564 } 8565 8566 if (max_irr == -1) 8567 return; 8568 8569 /* 8570 * In guest mode. If a vmexit is needed, vmx_check_nested_events 8571 * handles it. 8572 */ 8573 if (nested_exit_on_intr(vcpu)) 8574 return; 8575 8576 /* 8577 * Else, fall back to pre-APICv interrupt injection since L2 8578 * is run without virtual interrupt delivery. 8579 */ 8580 if (!kvm_event_needs_reinjection(vcpu) && 8581 vmx_interrupt_allowed(vcpu)) { 8582 kvm_queue_interrupt(vcpu, max_irr, false); 8583 vmx_inject_irq(vcpu); 8584 } 8585} 8586 8587static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 8588{ 8589 if (!kvm_vcpu_apicv_active(vcpu)) 8590 return; 8591 8592 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 8593 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); 8594 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); 8595 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); 8596} 8597 8598static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) 8599{ 8600 u32 exit_intr_info; 8601 8602 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY 8603 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)) 8604 return; 8605 8606 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 8607 exit_intr_info = vmx->exit_intr_info; 8608 8609 /* Handle machine checks before interrupts are enabled */ 8610 if (is_machine_check(exit_intr_info)) 8611 kvm_machine_check(); 8612 8613 /* We need to handle NMIs before interrupts are enabled */ 8614 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && 8615 (exit_intr_info & INTR_INFO_VALID_MASK)) { 8616 kvm_before_handle_nmi(&vmx->vcpu); 8617 asm("int $2"); 8618 kvm_after_handle_nmi(&vmx->vcpu); 8619 } 8620} 8621 8622static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) 8623{ 8624 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 8625 register void *__sp asm(_ASM_SP); 8626 8627 /* 8628 * If external interrupt exists, IF bit is set in rflags/eflags on the 8629 * interrupt stack frame, and interrupt will be enabled on a return 8630 * from interrupt handler. 8631 */ 8632 if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) 8633 == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { 8634 unsigned int vector; 8635 unsigned long entry; 8636 gate_desc *desc; 8637 struct vcpu_vmx *vmx = to_vmx(vcpu); 8638#ifdef CONFIG_X86_64 8639 unsigned long tmp; 8640#endif 8641 8642 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 8643 desc = (gate_desc *)vmx->host_idt_base + vector; 8644 entry = gate_offset(*desc); 8645 asm volatile( 8646#ifdef CONFIG_X86_64 8647 "mov %%" _ASM_SP ", %[sp]\n\t" 8648 "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" 8649 "push $%c[ss]\n\t" 8650 "push %[sp]\n\t" 8651#endif 8652 "pushf\n\t" 8653 __ASM_SIZE(push) " $%c[cs]\n\t" 8654 "call *%[entry]\n\t" 8655 : 8656#ifdef CONFIG_X86_64 8657 [sp]"=&r"(tmp), 8658#endif 8659 "+r"(__sp) 8660 : 8661 [entry]"r"(entry), 8662 [ss]"i"(__KERNEL_DS), 8663 [cs]"i"(__KERNEL_CS) 8664 ); 8665 } 8666} 8667 8668static bool vmx_has_high_real_mode_segbase(void) 8669{ 8670 return enable_unrestricted_guest || emulate_invalid_guest_state; 8671} 8672 8673static bool vmx_mpx_supported(void) 8674{ 8675 return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && 8676 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); 8677} 8678 8679static bool vmx_xsaves_supported(void) 8680{ 8681 return vmcs_config.cpu_based_2nd_exec_ctrl & 8682 SECONDARY_EXEC_XSAVES; 8683} 8684 8685static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 8686{ 8687 u32 exit_intr_info; 8688 bool unblock_nmi; 8689 u8 vector; 8690 bool idtv_info_valid; 8691 8692 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 8693 8694 if (cpu_has_virtual_nmis()) { 8695 if (vmx->nmi_known_unmasked) 8696 return; 8697 /* 8698 * Can't use vmx->exit_intr_info since we're not sure what 8699 * the exit reason is. 8700 */ 8701 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 8702 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 8703 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 8704 /* 8705 * SDM 3: 27.7.1.2 (September 2008) 8706 * Re-set bit "block by NMI" before VM entry if vmexit caused by 8707 * a guest IRET fault. 8708 * SDM 3: 23.2.2 (September 2008) 8709 * Bit 12 is undefined in any of the following cases: 8710 * If the VM exit sets the valid bit in the IDT-vectoring 8711 * information field. 8712 * If the VM exit is due to a double fault. 8713 */ 8714 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && 8715 vector != DF_VECTOR && !idtv_info_valid) 8716 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 8717 GUEST_INTR_STATE_NMI); 8718 else 8719 vmx->nmi_known_unmasked = 8720 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 8721 & GUEST_INTR_STATE_NMI); 8722 } else if (unlikely(vmx->soft_vnmi_blocked)) 8723 vmx->vnmi_blocked_time += 8724 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); 8725} 8726 8727static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, 8728 u32 idt_vectoring_info, 8729 int instr_len_field, 8730 int error_code_field) 8731{ 8732 u8 vector; 8733 int type; 8734 bool idtv_info_valid; 8735 8736 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 8737 8738 vcpu->arch.nmi_injected = false; 8739 kvm_clear_exception_queue(vcpu); 8740 kvm_clear_interrupt_queue(vcpu); 8741 8742 if (!idtv_info_valid) 8743 return; 8744 8745 kvm_make_request(KVM_REQ_EVENT, vcpu); 8746 8747 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 8748 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 8749 8750 switch (type) { 8751 case INTR_TYPE_NMI_INTR: 8752 vcpu->arch.nmi_injected = true; 8753 /* 8754 * SDM 3: 27.7.1.2 (September 2008) 8755 * Clear bit "block by NMI" before VM entry if a NMI 8756 * delivery faulted. 8757 */ 8758 vmx_set_nmi_mask(vcpu, false); 8759 break; 8760 case INTR_TYPE_SOFT_EXCEPTION: 8761 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 8762 /* fall through */ 8763 case INTR_TYPE_HARD_EXCEPTION: 8764 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 8765 u32 err = vmcs_read32(error_code_field); 8766 kvm_requeue_exception_e(vcpu, vector, err); 8767 } else 8768 kvm_requeue_exception(vcpu, vector); 8769 break; 8770 case INTR_TYPE_SOFT_INTR: 8771 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 8772 /* fall through */ 8773 case INTR_TYPE_EXT_INTR: 8774 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); 8775 break; 8776 default: 8777 break; 8778 } 8779} 8780 8781static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 8782{ 8783 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, 8784 VM_EXIT_INSTRUCTION_LEN, 8785 IDT_VECTORING_ERROR_CODE); 8786} 8787 8788static void vmx_cancel_injection(struct kvm_vcpu *vcpu) 8789{ 8790 __vmx_complete_interrupts(vcpu, 8791 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 8792 VM_ENTRY_INSTRUCTION_LEN, 8793 VM_ENTRY_EXCEPTION_ERROR_CODE); 8794 8795 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 8796} 8797 8798static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) 8799{ 8800 int i, nr_msrs; 8801 struct perf_guest_switch_msr *msrs; 8802 8803 msrs = perf_guest_get_msrs(&nr_msrs); 8804 8805 if (!msrs) 8806 return; 8807 8808 for (i = 0; i < nr_msrs; i++) 8809 if (msrs[i].host == msrs[i].guest) 8810 clear_atomic_switch_msr(vmx, msrs[i].msr); 8811 else 8812 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, 8813 msrs[i].host); 8814} 8815 8816void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) 8817{ 8818 struct vcpu_vmx *vmx = to_vmx(vcpu); 8819 u64 tscl; 8820 u32 delta_tsc; 8821 8822 if (vmx->hv_deadline_tsc == -1) 8823 return; 8824 8825 tscl = rdtsc(); 8826 if (vmx->hv_deadline_tsc > tscl) 8827 /* sure to be 32 bit only because checked on set_hv_timer */ 8828 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 8829 cpu_preemption_timer_multi); 8830 else 8831 delta_tsc = 0; 8832 8833 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 8834} 8835 8836static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) 8837{ 8838 struct vcpu_vmx *vmx = to_vmx(vcpu); 8839 unsigned long debugctlmsr, cr4; 8840 8841 /* Record the guest's net vcpu time for enforced NMI injections. */ 8842 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) 8843 vmx->entry_time = ktime_get(); 8844 8845 /* Don't enter VMX if guest state is invalid, let the exit handler 8846 start emulation until we arrive back to a valid state */ 8847 if (vmx->emulation_required) 8848 return; 8849 8850 if (vmx->ple_window_dirty) { 8851 vmx->ple_window_dirty = false; 8852 vmcs_write32(PLE_WINDOW, vmx->ple_window); 8853 } 8854 8855 if (vmx->nested.sync_shadow_vmcs) { 8856 copy_vmcs12_to_shadow(vmx); 8857 vmx->nested.sync_shadow_vmcs = false; 8858 } 8859 8860 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) 8861 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 8862 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 8863 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 8864 8865 cr4 = cr4_read_shadow(); 8866 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { 8867 vmcs_writel(HOST_CR4, cr4); 8868 vmx->host_state.vmcs_host_cr4 = cr4; 8869 } 8870 8871 /* When single-stepping over STI and MOV SS, we must clear the 8872 * corresponding interruptibility bits in the guest state. Otherwise 8873 * vmentry fails as it then expects bit 14 (BS) in pending debug 8874 * exceptions being set, but that's not correct for the guest debugging 8875 * case. */ 8876 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 8877 vmx_set_interrupt_shadow(vcpu, 0); 8878 8879 if (vmx->guest_pkru_valid) 8880 __write_pkru(vmx->guest_pkru); 8881 8882 atomic_switch_perf_msrs(vmx); 8883 debugctlmsr = get_debugctlmsr(); 8884 8885 vmx_arm_hv_timer(vcpu); 8886 8887 vmx->__launched = vmx->loaded_vmcs->launched; 8888 asm( 8889 /* Store host registers */ 8890 "push %%" _ASM_DX "; push %%" _ASM_BP ";" 8891 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ 8892 "push %%" _ASM_CX " \n\t" 8893 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" 8894 "je 1f \n\t" 8895 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" 8896 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" 8897 "1: \n\t" 8898 /* Reload cr2 if changed */ 8899 "mov %c[cr2](%0), %%" _ASM_AX " \n\t" 8900 "mov %%cr2, %%" _ASM_DX " \n\t" 8901 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" 8902 "je 2f \n\t" 8903 "mov %%" _ASM_AX", %%cr2 \n\t" 8904 "2: \n\t" 8905 /* Check if vmlaunch of vmresume is needed */ 8906 "cmpl $0, %c[launched](%0) \n\t" 8907 /* Load guest registers. Don't clobber flags. */ 8908 "mov %c[rax](%0), %%" _ASM_AX " \n\t" 8909 "mov %c[rbx](%0), %%" _ASM_BX " \n\t" 8910 "mov %c[rdx](%0), %%" _ASM_DX " \n\t" 8911 "mov %c[rsi](%0), %%" _ASM_SI " \n\t" 8912 "mov %c[rdi](%0), %%" _ASM_DI " \n\t" 8913 "mov %c[rbp](%0), %%" _ASM_BP " \n\t" 8914#ifdef CONFIG_X86_64 8915 "mov %c[r8](%0), %%r8 \n\t" 8916 "mov %c[r9](%0), %%r9 \n\t" 8917 "mov %c[r10](%0), %%r10 \n\t" 8918 "mov %c[r11](%0), %%r11 \n\t" 8919 "mov %c[r12](%0), %%r12 \n\t" 8920 "mov %c[r13](%0), %%r13 \n\t" 8921 "mov %c[r14](%0), %%r14 \n\t" 8922 "mov %c[r15](%0), %%r15 \n\t" 8923#endif 8924 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ 8925 8926 /* Enter guest mode */ 8927 "jne 1f \n\t" 8928 __ex(ASM_VMX_VMLAUNCH) "\n\t" 8929 "jmp 2f \n\t" 8930 "1: " __ex(ASM_VMX_VMRESUME) "\n\t" 8931 "2: " 8932 /* Save guest registers, load host registers, keep flags */ 8933 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" 8934 "pop %0 \n\t" 8935 "mov %%" _ASM_AX ", %c[rax](%0) \n\t" 8936 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" 8937 __ASM_SIZE(pop) " %c[rcx](%0) \n\t" 8938 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" 8939 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" 8940 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" 8941 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" 8942#ifdef CONFIG_X86_64 8943 "mov %%r8, %c[r8](%0) \n\t" 8944 "mov %%r9, %c[r9](%0) \n\t" 8945 "mov %%r10, %c[r10](%0) \n\t" 8946 "mov %%r11, %c[r11](%0) \n\t" 8947 "mov %%r12, %c[r12](%0) \n\t" 8948 "mov %%r13, %c[r13](%0) \n\t" 8949 "mov %%r14, %c[r14](%0) \n\t" 8950 "mov %%r15, %c[r15](%0) \n\t" 8951#endif 8952 "mov %%cr2, %%" _ASM_AX " \n\t" 8953 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" 8954 8955 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" 8956 "setbe %c[fail](%0) \n\t" 8957 ".pushsection .rodata \n\t" 8958 ".global vmx_return \n\t" 8959 "vmx_return: " _ASM_PTR " 2b \n\t" 8960 ".popsection" 8961 : : "c"(vmx), "d"((unsigned long)HOST_RSP), 8962 [launched]"i"(offsetof(struct vcpu_vmx, __launched)), 8963 [fail]"i"(offsetof(struct vcpu_vmx, fail)), 8964 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), 8965 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), 8966 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), 8967 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), 8968 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), 8969 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), 8970 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), 8971 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), 8972#ifdef CONFIG_X86_64 8973 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), 8974 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), 8975 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), 8976 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), 8977 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), 8978 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), 8979 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), 8980 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), 8981#endif 8982 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), 8983 [wordsize]"i"(sizeof(ulong)) 8984 : "cc", "memory" 8985#ifdef CONFIG_X86_64 8986 , "rax", "rbx", "rdi", "rsi" 8987 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 8988#else 8989 , "eax", "ebx", "edi", "esi" 8990#endif 8991 ); 8992 8993 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 8994 if (debugctlmsr) 8995 update_debugctlmsr(debugctlmsr); 8996 8997#ifndef CONFIG_X86_64 8998 /* 8999 * The sysexit path does not restore ds/es, so we must set them to 9000 * a reasonable value ourselves. 9001 * 9002 * We can't defer this to vmx_load_host_state() since that function 9003 * may be executed in interrupt context, which saves and restore segments 9004 * around it, nullifying its effect. 9005 */ 9006 loadsegment(ds, __USER_DS); 9007 loadsegment(es, __USER_DS); 9008#endif 9009 9010 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 9011 | (1 << VCPU_EXREG_RFLAGS) 9012 | (1 << VCPU_EXREG_PDPTR) 9013 | (1 << VCPU_EXREG_SEGMENTS) 9014 | (1 << VCPU_EXREG_CR3)); 9015 vcpu->arch.regs_dirty = 0; 9016 9017 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 9018 9019 vmx->loaded_vmcs->launched = 1; 9020 9021 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 9022 9023 /* 9024 * eager fpu is enabled if PKEY is supported and CR4 is switched 9025 * back on host, so it is safe to read guest PKRU from current 9026 * XSAVE. 9027 */ 9028 if (boot_cpu_has(X86_FEATURE_OSPKE)) { 9029 vmx->guest_pkru = __read_pkru(); 9030 if (vmx->guest_pkru != vmx->host_pkru) { 9031 vmx->guest_pkru_valid = true; 9032 __write_pkru(vmx->host_pkru); 9033 } else 9034 vmx->guest_pkru_valid = false; 9035 } 9036 9037 /* 9038 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if 9039 * we did not inject a still-pending event to L1 now because of 9040 * nested_run_pending, we need to re-enable this bit. 9041 */ 9042 if (vmx->nested.nested_run_pending) 9043 kvm_make_request(KVM_REQ_EVENT, vcpu); 9044 9045 vmx->nested.nested_run_pending = 0; 9046 9047 vmx_complete_atomic_exit(vmx); 9048 vmx_recover_nmi_blocking(vmx); 9049 vmx_complete_interrupts(vmx); 9050} 9051 9052static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) 9053{ 9054 struct vcpu_vmx *vmx = to_vmx(vcpu); 9055 int cpu; 9056 9057 if (vmx->loaded_vmcs == &vmx->vmcs01) 9058 return; 9059 9060 cpu = get_cpu(); 9061 vmx->loaded_vmcs = &vmx->vmcs01; 9062 vmx_vcpu_put(vcpu); 9063 vmx_vcpu_load(vcpu, cpu); 9064 vcpu->cpu = cpu; 9065 put_cpu(); 9066} 9067 9068/* 9069 * Ensure that the current vmcs of the logical processor is the 9070 * vmcs01 of the vcpu before calling free_nested(). 9071 */ 9072static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) 9073{ 9074 struct vcpu_vmx *vmx = to_vmx(vcpu); 9075 int r; 9076 9077 r = vcpu_load(vcpu); 9078 BUG_ON(r); 9079 vmx_load_vmcs01(vcpu); 9080 free_nested(vmx); 9081 vcpu_put(vcpu); 9082} 9083 9084static void vmx_free_vcpu(struct kvm_vcpu *vcpu) 9085{ 9086 struct vcpu_vmx *vmx = to_vmx(vcpu); 9087 9088 if (enable_pml) 9089 vmx_destroy_pml_buffer(vmx); 9090 free_vpid(vmx->vpid); 9091 leave_guest_mode(vcpu); 9092 vmx_free_vcpu_nested(vcpu); 9093 free_loaded_vmcs(vmx->loaded_vmcs); 9094 kfree(vmx->guest_msrs); 9095 kvm_vcpu_uninit(vcpu); 9096 kmem_cache_free(kvm_vcpu_cache, vmx); 9097} 9098 9099static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) 9100{ 9101 int err; 9102 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 9103 int cpu; 9104 9105 if (!vmx) 9106 return ERR_PTR(-ENOMEM); 9107 9108 vmx->vpid = allocate_vpid(); 9109 9110 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 9111 if (err) 9112 goto free_vcpu; 9113 9114 err = -ENOMEM; 9115 9116 /* 9117 * If PML is turned on, failure on enabling PML just results in failure 9118 * of creating the vcpu, therefore we can simplify PML logic (by 9119 * avoiding dealing with cases, such as enabling PML partially on vcpus 9120 * for the guest, etc. 9121 */ 9122 if (enable_pml) { 9123 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); 9124 if (!vmx->pml_pg) 9125 goto uninit_vcpu; 9126 } 9127 9128 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 9129 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) 9130 > PAGE_SIZE); 9131 9132 if (!vmx->guest_msrs) 9133 goto free_pml; 9134 9135 vmx->loaded_vmcs = &vmx->vmcs01; 9136 vmx->loaded_vmcs->vmcs = alloc_vmcs(); 9137 vmx->loaded_vmcs->shadow_vmcs = NULL; 9138 if (!vmx->loaded_vmcs->vmcs) 9139 goto free_msrs; 9140 if (!vmm_exclusive) 9141 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id()))); 9142 loaded_vmcs_init(vmx->loaded_vmcs); 9143 if (!vmm_exclusive) 9144 kvm_cpu_vmxoff(); 9145 9146 cpu = get_cpu(); 9147 vmx_vcpu_load(&vmx->vcpu, cpu); 9148 vmx->vcpu.cpu = cpu; 9149 err = vmx_vcpu_setup(vmx); 9150 vmx_vcpu_put(&vmx->vcpu); 9151 put_cpu(); 9152 if (err) 9153 goto free_vmcs; 9154 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { 9155 err = alloc_apic_access_page(kvm); 9156 if (err) 9157 goto free_vmcs; 9158 } 9159 9160 if (enable_ept) { 9161 if (!kvm->arch.ept_identity_map_addr) 9162 kvm->arch.ept_identity_map_addr = 9163 VMX_EPT_IDENTITY_PAGETABLE_ADDR; 9164 err = init_rmode_identity_map(kvm); 9165 if (err) 9166 goto free_vmcs; 9167 } 9168 9169 if (nested) { 9170 nested_vmx_setup_ctls_msrs(vmx); 9171 vmx->nested.vpid02 = allocate_vpid(); 9172 } 9173 9174 vmx->nested.posted_intr_nv = -1; 9175 vmx->nested.current_vmptr = -1ull; 9176 vmx->nested.current_vmcs12 = NULL; 9177 9178 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; 9179 9180 return &vmx->vcpu; 9181 9182free_vmcs: 9183 free_vpid(vmx->nested.vpid02); 9184 free_loaded_vmcs(vmx->loaded_vmcs); 9185free_msrs: 9186 kfree(vmx->guest_msrs); 9187free_pml: 9188 vmx_destroy_pml_buffer(vmx); 9189uninit_vcpu: 9190 kvm_vcpu_uninit(&vmx->vcpu); 9191free_vcpu: 9192 free_vpid(vmx->vpid); 9193 kmem_cache_free(kvm_vcpu_cache, vmx); 9194 return ERR_PTR(err); 9195} 9196 9197static void __init vmx_check_processor_compat(void *rtn) 9198{ 9199 struct vmcs_config vmcs_conf; 9200 9201 *(int *)rtn = 0; 9202 if (setup_vmcs_config(&vmcs_conf) < 0) 9203 *(int *)rtn = -EIO; 9204 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 9205 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 9206 smp_processor_id()); 9207 *(int *)rtn = -EIO; 9208 } 9209} 9210 9211static int get_ept_level(void) 9212{ 9213 return VMX_EPT_DEFAULT_GAW + 1; 9214} 9215 9216static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 9217{ 9218 u8 cache; 9219 u64 ipat = 0; 9220 9221 /* For VT-d and EPT combination 9222 * 1. MMIO: always map as UC 9223 * 2. EPT with VT-d: 9224 * a. VT-d without snooping control feature: can't guarantee the 9225 * result, try to trust guest. 9226 * b. VT-d with snooping control feature: snooping control feature of 9227 * VT-d engine can guarantee the cache correctness. Just set it 9228 * to WB to keep consistent with host. So the same as item 3. 9229 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 9230 * consistent with host MTRR 9231 */ 9232 if (is_mmio) { 9233 cache = MTRR_TYPE_UNCACHABLE; 9234 goto exit; 9235 } 9236 9237 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 9238 ipat = VMX_EPT_IPAT_BIT; 9239 cache = MTRR_TYPE_WRBACK; 9240 goto exit; 9241 } 9242 9243 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 9244 ipat = VMX_EPT_IPAT_BIT; 9245 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 9246 cache = MTRR_TYPE_WRBACK; 9247 else 9248 cache = MTRR_TYPE_UNCACHABLE; 9249 goto exit; 9250 } 9251 9252 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); 9253 9254exit: 9255 return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; 9256} 9257 9258static int vmx_get_lpage_level(void) 9259{ 9260 if (enable_ept && !cpu_has_vmx_ept_1g_page()) 9261 return PT_DIRECTORY_LEVEL; 9262 else 9263 /* For shadow and EPT supported 1GB page */ 9264 return PT_PDPE_LEVEL; 9265} 9266 9267static void vmcs_set_secondary_exec_control(u32 new_ctl) 9268{ 9269 /* 9270 * These bits in the secondary execution controls field 9271 * are dynamic, the others are mostly based on the hypervisor 9272 * architecture and the guest's CPUID. Do not touch the 9273 * dynamic bits. 9274 */ 9275 u32 mask = 9276 SECONDARY_EXEC_SHADOW_VMCS | 9277 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 9278 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9279 9280 u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 9281 9282 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 9283 (new_ctl & ~mask) | (cur_ctl & mask)); 9284} 9285 9286static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 9287{ 9288 struct kvm_cpuid_entry2 *best; 9289 struct vcpu_vmx *vmx = to_vmx(vcpu); 9290 u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx); 9291 9292 if (vmx_rdtscp_supported()) { 9293 bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu); 9294 if (!rdtscp_enabled) 9295 secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP; 9296 9297 if (nested) { 9298 if (rdtscp_enabled) 9299 vmx->nested.nested_vmx_secondary_ctls_high |= 9300 SECONDARY_EXEC_RDTSCP; 9301 else 9302 vmx->nested.nested_vmx_secondary_ctls_high &= 9303 ~SECONDARY_EXEC_RDTSCP; 9304 } 9305 } 9306 9307 /* Exposing INVPCID only when PCID is exposed */ 9308 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); 9309 if (vmx_invpcid_supported() && 9310 (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) || 9311 !guest_cpuid_has_pcid(vcpu))) { 9312 secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID; 9313 9314 if (best) 9315 best->ebx &= ~bit(X86_FEATURE_INVPCID); 9316 } 9317 9318 if (cpu_has_secondary_exec_ctrls()) 9319 vmcs_set_secondary_exec_control(secondary_exec_ctl); 9320 9321 if (nested_vmx_allowed(vcpu)) 9322 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 9323 FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 9324 else 9325 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 9326 ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 9327} 9328 9329static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 9330{ 9331 if (func == 1 && nested) 9332 entry->ecx |= bit(X86_FEATURE_VMX); 9333} 9334 9335static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 9336 struct x86_exception *fault) 9337{ 9338 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 9339 u32 exit_reason; 9340 9341 if (fault->error_code & PFERR_RSVD_MASK) 9342 exit_reason = EXIT_REASON_EPT_MISCONFIG; 9343 else 9344 exit_reason = EXIT_REASON_EPT_VIOLATION; 9345 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); 9346 vmcs12->guest_physical_address = fault->address; 9347} 9348 9349/* Callbacks for nested_ept_init_mmu_context: */ 9350 9351static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) 9352{ 9353 /* return the page table to be shadowed - in our case, EPT12 */ 9354 return get_vmcs12(vcpu)->ept_pointer; 9355} 9356 9357static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 9358{ 9359 WARN_ON(mmu_is_nested(vcpu)); 9360 kvm_init_shadow_ept_mmu(vcpu, 9361 to_vmx(vcpu)->nested.nested_vmx_ept_caps & 9362 VMX_EPT_EXECUTE_ONLY_BIT); 9363 vcpu->arch.mmu.set_cr3 = vmx_set_cr3; 9364 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; 9365 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; 9366 9367 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 9368} 9369 9370static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 9371{ 9372 vcpu->arch.walk_mmu = &vcpu->arch.mmu; 9373} 9374 9375static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 9376 u16 error_code) 9377{ 9378 bool inequality, bit; 9379 9380 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 9381 inequality = 9382 (error_code & vmcs12->page_fault_error_code_mask) != 9383 vmcs12->page_fault_error_code_match; 9384 return inequality ^ bit; 9385} 9386 9387static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 9388 struct x86_exception *fault) 9389{ 9390 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 9391 9392 WARN_ON(!is_guest_mode(vcpu)); 9393 9394 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) 9395 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 9396 vmcs_read32(VM_EXIT_INTR_INFO), 9397 vmcs_readl(EXIT_QUALIFICATION)); 9398 else 9399 kvm_inject_page_fault(vcpu, fault); 9400} 9401 9402static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, 9403 struct vmcs12 *vmcs12) 9404{ 9405 struct vcpu_vmx *vmx = to_vmx(vcpu); 9406 int maxphyaddr = cpuid_maxphyaddr(vcpu); 9407 9408 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 9409 if (!PAGE_ALIGNED(vmcs12->apic_access_addr) || 9410 vmcs12->apic_access_addr >> maxphyaddr) 9411 return false; 9412 9413 /* 9414 * Translate L1 physical address to host physical 9415 * address for vmcs02. Keep the page pinned, so this 9416 * physical address remains valid. We keep a reference 9417 * to it so we can release it later. 9418 */ 9419 if (vmx->nested.apic_access_page) /* shouldn't happen */ 9420 nested_release_page(vmx->nested.apic_access_page); 9421 vmx->nested.apic_access_page = 9422 nested_get_page(vcpu, vmcs12->apic_access_addr); 9423 } 9424 9425 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 9426 if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) || 9427 vmcs12->virtual_apic_page_addr >> maxphyaddr) 9428 return false; 9429 9430 if (vmx->nested.virtual_apic_page) /* shouldn't happen */ 9431 nested_release_page(vmx->nested.virtual_apic_page); 9432 vmx->nested.virtual_apic_page = 9433 nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); 9434 9435 /* 9436 * Failing the vm entry is _not_ what the processor does 9437 * but it's basically the only possibility we have. 9438 * We could still enter the guest if CR8 load exits are 9439 * enabled, CR8 store exits are enabled, and virtualize APIC 9440 * access is disabled; in this case the processor would never 9441 * use the TPR shadow and we could simply clear the bit from 9442 * the execution control. But such a configuration is useless, 9443 * so let's keep the code simple. 9444 */ 9445 if (!vmx->nested.virtual_apic_page) 9446 return false; 9447 } 9448 9449 if (nested_cpu_has_posted_intr(vmcs12)) { 9450 if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) || 9451 vmcs12->posted_intr_desc_addr >> maxphyaddr) 9452 return false; 9453 9454 if (vmx->nested.pi_desc_page) { /* shouldn't happen */ 9455 kunmap(vmx->nested.pi_desc_page); 9456 nested_release_page(vmx->nested.pi_desc_page); 9457 } 9458 vmx->nested.pi_desc_page = 9459 nested_get_page(vcpu, vmcs12->posted_intr_desc_addr); 9460 if (!vmx->nested.pi_desc_page) 9461 return false; 9462 9463 vmx->nested.pi_desc = 9464 (struct pi_desc *)kmap(vmx->nested.pi_desc_page); 9465 if (!vmx->nested.pi_desc) { 9466 nested_release_page_clean(vmx->nested.pi_desc_page); 9467 return false; 9468 } 9469 vmx->nested.pi_desc = 9470 (struct pi_desc *)((void *)vmx->nested.pi_desc + 9471 (unsigned long)(vmcs12->posted_intr_desc_addr & 9472 (PAGE_SIZE - 1))); 9473 } 9474 9475 return true; 9476} 9477 9478static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) 9479{ 9480 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; 9481 struct vcpu_vmx *vmx = to_vmx(vcpu); 9482 9483 if (vcpu->arch.virtual_tsc_khz == 0) 9484 return; 9485 9486 /* Make sure short timeouts reliably trigger an immediate vmexit. 9487 * hrtimer_start does not guarantee this. */ 9488 if (preemption_timeout <= 1) { 9489 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 9490 return; 9491 } 9492 9493 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 9494 preemption_timeout *= 1000000; 9495 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 9496 hrtimer_start(&vmx->nested.preemption_timer, 9497 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); 9498} 9499 9500static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 9501 struct vmcs12 *vmcs12) 9502{ 9503 int maxphyaddr; 9504 u64 addr; 9505 9506 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 9507 return 0; 9508 9509 if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) { 9510 WARN_ON(1); 9511 return -EINVAL; 9512 } 9513 maxphyaddr = cpuid_maxphyaddr(vcpu); 9514 9515 if (!PAGE_ALIGNED(vmcs12->msr_bitmap) || 9516 ((addr + PAGE_SIZE) >> maxphyaddr)) 9517 return -EINVAL; 9518 9519 return 0; 9520} 9521 9522/* 9523 * Merge L0's and L1's MSR bitmap, return false to indicate that 9524 * we do not use the hardware. 9525 */ 9526static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, 9527 struct vmcs12 *vmcs12) 9528{ 9529 int msr; 9530 struct page *page; 9531 unsigned long *msr_bitmap_l1; 9532 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; 9533 9534 /* This shortcut is ok because we support only x2APIC MSRs so far. */ 9535 if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) 9536 return false; 9537 9538 page = nested_get_page(vcpu, vmcs12->msr_bitmap); 9539 if (!page) { 9540 WARN_ON(1); 9541 return false; 9542 } 9543 msr_bitmap_l1 = (unsigned long *)kmap(page); 9544 if (!msr_bitmap_l1) { 9545 nested_release_page_clean(page); 9546 WARN_ON(1); 9547 return false; 9548 } 9549 9550 memset(msr_bitmap_l0, 0xff, PAGE_SIZE); 9551 9552 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 9553 if (nested_cpu_has_apic_reg_virt(vmcs12)) 9554 for (msr = 0x800; msr <= 0x8ff; msr++) 9555 nested_vmx_disable_intercept_for_msr( 9556 msr_bitmap_l1, msr_bitmap_l0, 9557 msr, MSR_TYPE_R); 9558 9559 nested_vmx_disable_intercept_for_msr( 9560 msr_bitmap_l1, msr_bitmap_l0, 9561 APIC_BASE_MSR + (APIC_TASKPRI >> 4), 9562 MSR_TYPE_R | MSR_TYPE_W); 9563 9564 if (nested_cpu_has_vid(vmcs12)) { 9565 nested_vmx_disable_intercept_for_msr( 9566 msr_bitmap_l1, msr_bitmap_l0, 9567 APIC_BASE_MSR + (APIC_EOI >> 4), 9568 MSR_TYPE_W); 9569 nested_vmx_disable_intercept_for_msr( 9570 msr_bitmap_l1, msr_bitmap_l0, 9571 APIC_BASE_MSR + (APIC_SELF_IPI >> 4), 9572 MSR_TYPE_W); 9573 } 9574 } 9575 kunmap(page); 9576 nested_release_page_clean(page); 9577 9578 return true; 9579} 9580 9581static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 9582 struct vmcs12 *vmcs12) 9583{ 9584 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 9585 !nested_cpu_has_apic_reg_virt(vmcs12) && 9586 !nested_cpu_has_vid(vmcs12) && 9587 !nested_cpu_has_posted_intr(vmcs12)) 9588 return 0; 9589 9590 /* 9591 * If virtualize x2apic mode is enabled, 9592 * virtualize apic access must be disabled. 9593 */ 9594 if (nested_cpu_has_virt_x2apic_mode(vmcs12) && 9595 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 9596 return -EINVAL; 9597 9598 /* 9599 * If virtual interrupt delivery is enabled, 9600 * we must exit on external interrupts. 9601 */ 9602 if (nested_cpu_has_vid(vmcs12) && 9603 !nested_exit_on_intr(vcpu)) 9604 return -EINVAL; 9605 9606 /* 9607 * bits 15:8 should be zero in posted_intr_nv, 9608 * the descriptor address has been already checked 9609 * in nested_get_vmcs12_pages. 9610 */ 9611 if (nested_cpu_has_posted_intr(vmcs12) && 9612 (!nested_cpu_has_vid(vmcs12) || 9613 !nested_exit_intr_ack_set(vcpu) || 9614 vmcs12->posted_intr_nv & 0xff00)) 9615 return -EINVAL; 9616 9617 /* tpr shadow is needed by all apicv features. */ 9618 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 9619 return -EINVAL; 9620 9621 return 0; 9622} 9623 9624static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 9625 unsigned long count_field, 9626 unsigned long addr_field) 9627{ 9628 int maxphyaddr; 9629 u64 count, addr; 9630 9631 if (vmcs12_read_any(vcpu, count_field, &count) || 9632 vmcs12_read_any(vcpu, addr_field, &addr)) { 9633 WARN_ON(1); 9634 return -EINVAL; 9635 } 9636 if (count == 0) 9637 return 0; 9638 maxphyaddr = cpuid_maxphyaddr(vcpu); 9639 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || 9640 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { 9641 pr_debug_ratelimited( 9642 "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", 9643 addr_field, maxphyaddr, count, addr); 9644 return -EINVAL; 9645 } 9646 return 0; 9647} 9648 9649static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, 9650 struct vmcs12 *vmcs12) 9651{ 9652 if (vmcs12->vm_exit_msr_load_count == 0 && 9653 vmcs12->vm_exit_msr_store_count == 0 && 9654 vmcs12->vm_entry_msr_load_count == 0) 9655 return 0; /* Fast path */ 9656 if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, 9657 VM_EXIT_MSR_LOAD_ADDR) || 9658 nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, 9659 VM_EXIT_MSR_STORE_ADDR) || 9660 nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, 9661 VM_ENTRY_MSR_LOAD_ADDR)) 9662 return -EINVAL; 9663 return 0; 9664} 9665 9666static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 9667 struct vmx_msr_entry *e) 9668{ 9669 /* x2APIC MSR accesses are not allowed */ 9670 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) 9671 return -EINVAL; 9672 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ 9673 e->index == MSR_IA32_UCODE_REV) 9674 return -EINVAL; 9675 if (e->reserved != 0) 9676 return -EINVAL; 9677 return 0; 9678} 9679 9680static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 9681 struct vmx_msr_entry *e) 9682{ 9683 if (e->index == MSR_FS_BASE || 9684 e->index == MSR_GS_BASE || 9685 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ 9686 nested_vmx_msr_check_common(vcpu, e)) 9687 return -EINVAL; 9688 return 0; 9689} 9690 9691static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 9692 struct vmx_msr_entry *e) 9693{ 9694 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ 9695 nested_vmx_msr_check_common(vcpu, e)) 9696 return -EINVAL; 9697 return 0; 9698} 9699 9700/* 9701 * Load guest's/host's msr at nested entry/exit. 9702 * return 0 for success, entry index for failure. 9703 */ 9704static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 9705{ 9706 u32 i; 9707 struct vmx_msr_entry e; 9708 struct msr_data msr; 9709 9710 msr.host_initiated = false; 9711 for (i = 0; i < count; i++) { 9712 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 9713 &e, sizeof(e))) { 9714 pr_debug_ratelimited( 9715 "%s cannot read MSR entry (%u, 0x%08llx)\n", 9716 __func__, i, gpa + i * sizeof(e)); 9717 goto fail; 9718 } 9719 if (nested_vmx_load_msr_check(vcpu, &e)) { 9720 pr_debug_ratelimited( 9721 "%s check failed (%u, 0x%x, 0x%x)\n", 9722 __func__, i, e.index, e.reserved); 9723 goto fail; 9724 } 9725 msr.index = e.index; 9726 msr.data = e.value; 9727 if (kvm_set_msr(vcpu, &msr)) { 9728 pr_debug_ratelimited( 9729 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 9730 __func__, i, e.index, e.value); 9731 goto fail; 9732 } 9733 } 9734 return 0; 9735fail: 9736 return i + 1; 9737} 9738 9739static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 9740{ 9741 u32 i; 9742 struct vmx_msr_entry e; 9743 9744 for (i = 0; i < count; i++) { 9745 struct msr_data msr_info; 9746 if (kvm_vcpu_read_guest(vcpu, 9747 gpa + i * sizeof(e), 9748 &e, 2 * sizeof(u32))) { 9749 pr_debug_ratelimited( 9750 "%s cannot read MSR entry (%u, 0x%08llx)\n", 9751 __func__, i, gpa + i * sizeof(e)); 9752 return -EINVAL; 9753 } 9754 if (nested_vmx_store_msr_check(vcpu, &e)) { 9755 pr_debug_ratelimited( 9756 "%s check failed (%u, 0x%x, 0x%x)\n", 9757 __func__, i, e.index, e.reserved); 9758 return -EINVAL; 9759 } 9760 msr_info.host_initiated = false; 9761 msr_info.index = e.index; 9762 if (kvm_get_msr(vcpu, &msr_info)) { 9763 pr_debug_ratelimited( 9764 "%s cannot read MSR (%u, 0x%x)\n", 9765 __func__, i, e.index); 9766 return -EINVAL; 9767 } 9768 if (kvm_vcpu_write_guest(vcpu, 9769 gpa + i * sizeof(e) + 9770 offsetof(struct vmx_msr_entry, value), 9771 &msr_info.data, sizeof(msr_info.data))) { 9772 pr_debug_ratelimited( 9773 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 9774 __func__, i, e.index, msr_info.data); 9775 return -EINVAL; 9776 } 9777 } 9778 return 0; 9779} 9780 9781/* 9782 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 9783 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 9784 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 9785 * guest in a way that will both be appropriate to L1's requests, and our 9786 * needs. In addition to modifying the active vmcs (which is vmcs02), this 9787 * function also has additional necessary side-effects, like setting various 9788 * vcpu->arch fields. 9789 */ 9790static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 9791{ 9792 struct vcpu_vmx *vmx = to_vmx(vcpu); 9793 u32 exec_control; 9794 9795 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 9796 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 9797 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 9798 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 9799 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 9800 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 9801 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 9802 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 9803 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 9804 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 9805 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 9806 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 9807 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 9808 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 9809 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 9810 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 9811 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 9812 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 9813 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 9814 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 9815 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 9816 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 9817 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 9818 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 9819 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 9820 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 9821 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 9822 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 9823 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 9824 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 9825 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 9826 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 9827 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 9828 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 9829 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 9830 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 9831 9832 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 9833 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 9834 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 9835 } else { 9836 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 9837 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 9838 } 9839 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 9840 vmcs12->vm_entry_intr_info_field); 9841 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 9842 vmcs12->vm_entry_exception_error_code); 9843 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 9844 vmcs12->vm_entry_instruction_len); 9845 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 9846 vmcs12->guest_interruptibility_info); 9847 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 9848 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 9849 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 9850 vmcs12->guest_pending_dbg_exceptions); 9851 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 9852 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 9853 9854 if (nested_cpu_has_xsaves(vmcs12)) 9855 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 9856 vmcs_write64(VMCS_LINK_POINTER, -1ull); 9857 9858 exec_control = vmcs12->pin_based_vm_exec_control; 9859 9860 /* Preemption timer setting is only taken from vmcs01. */ 9861 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 9862 exec_control |= vmcs_config.pin_based_exec_ctrl; 9863 if (vmx->hv_deadline_tsc == -1) 9864 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 9865 9866 /* Posted interrupts setting is only taken from vmcs12. */ 9867 if (nested_cpu_has_posted_intr(vmcs12)) { 9868 /* 9869 * Note that we use L0's vector here and in 9870 * vmx_deliver_nested_posted_interrupt. 9871 */ 9872 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 9873 vmx->nested.pi_pending = false; 9874 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 9875 vmcs_write64(POSTED_INTR_DESC_ADDR, 9876 page_to_phys(vmx->nested.pi_desc_page) + 9877 (unsigned long)(vmcs12->posted_intr_desc_addr & 9878 (PAGE_SIZE - 1))); 9879 } else 9880 exec_control &= ~PIN_BASED_POSTED_INTR; 9881 9882 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); 9883 9884 vmx->nested.preemption_timer_expired = false; 9885 if (nested_cpu_has_preemption_timer(vmcs12)) 9886 vmx_start_preemption_timer(vcpu); 9887 9888 /* 9889 * Whether page-faults are trapped is determined by a combination of 9890 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. 9891 * If enable_ept, L0 doesn't care about page faults and we should 9892 * set all of these to L1's desires. However, if !enable_ept, L0 does 9893 * care about (at least some) page faults, and because it is not easy 9894 * (if at all possible?) to merge L0 and L1's desires, we simply ask 9895 * to exit on each and every L2 page fault. This is done by setting 9896 * MASK=MATCH=0 and (see below) EB.PF=1. 9897 * Note that below we don't need special code to set EB.PF beyond the 9898 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 9899 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 9900 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 9901 * 9902 * A problem with this approach (when !enable_ept) is that L1 may be 9903 * injected with more page faults than it asked for. This could have 9904 * caused problems, but in practice existing hypervisors don't care. 9905 * To fix this, we will need to emulate the PFEC checking (on the L1 9906 * page tables), using walk_addr(), when injecting PFs to L1. 9907 */ 9908 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 9909 enable_ept ? vmcs12->page_fault_error_code_mask : 0); 9910 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 9911 enable_ept ? vmcs12->page_fault_error_code_match : 0); 9912 9913 if (cpu_has_secondary_exec_ctrls()) { 9914 exec_control = vmx_secondary_exec_control(vmx); 9915 9916 /* Take the following fields only from vmcs12 */ 9917 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 9918 SECONDARY_EXEC_RDTSCP | 9919 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 9920 SECONDARY_EXEC_APIC_REGISTER_VIRT); 9921 if (nested_cpu_has(vmcs12, 9922 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 9923 exec_control |= vmcs12->secondary_vm_exec_control; 9924 9925 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { 9926 /* 9927 * If translation failed, no matter: This feature asks 9928 * to exit when accessing the given address, and if it 9929 * can never be accessed, this feature won't do 9930 * anything anyway. 9931 */ 9932 if (!vmx->nested.apic_access_page) 9933 exec_control &= 9934 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9935 else 9936 vmcs_write64(APIC_ACCESS_ADDR, 9937 page_to_phys(vmx->nested.apic_access_page)); 9938 } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) && 9939 cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { 9940 exec_control |= 9941 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9942 kvm_vcpu_reload_apic_access_page(vcpu); 9943 } 9944 9945 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { 9946 vmcs_write64(EOI_EXIT_BITMAP0, 9947 vmcs12->eoi_exit_bitmap0); 9948 vmcs_write64(EOI_EXIT_BITMAP1, 9949 vmcs12->eoi_exit_bitmap1); 9950 vmcs_write64(EOI_EXIT_BITMAP2, 9951 vmcs12->eoi_exit_bitmap2); 9952 vmcs_write64(EOI_EXIT_BITMAP3, 9953 vmcs12->eoi_exit_bitmap3); 9954 vmcs_write16(GUEST_INTR_STATUS, 9955 vmcs12->guest_intr_status); 9956 } 9957 9958 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 9959 } 9960 9961 9962 /* 9963 * Set host-state according to L0's settings (vmcs12 is irrelevant here) 9964 * Some constant fields are set here by vmx_set_constant_host_state(). 9965 * Other fields are different per CPU, and will be set later when 9966 * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. 9967 */ 9968 vmx_set_constant_host_state(vmx); 9969 9970 /* 9971 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before 9972 * entry, but only if the current (host) sp changed from the value 9973 * we wrote last (vmx->host_rsp). This cache is no longer relevant 9974 * if we switch vmcs, and rather than hold a separate cache per vmcs, 9975 * here we just force the write to happen on entry. 9976 */ 9977 vmx->host_rsp = 0; 9978 9979 exec_control = vmx_exec_control(vmx); /* L0's desires */ 9980 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 9981 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; 9982 exec_control &= ~CPU_BASED_TPR_SHADOW; 9983 exec_control |= vmcs12->cpu_based_vm_exec_control; 9984 9985 if (exec_control & CPU_BASED_TPR_SHADOW) { 9986 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 9987 page_to_phys(vmx->nested.virtual_apic_page)); 9988 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 9989 } 9990 9991 if (cpu_has_vmx_msr_bitmap() && 9992 exec_control & CPU_BASED_USE_MSR_BITMAPS && 9993 nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) 9994 ; /* MSR_BITMAP will be set by following vmx_set_efer. */ 9995 else 9996 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 9997 9998 /* 9999 * Merging of IO bitmap not currently supported. 10000 * Rather, exit every time. 10001 */ 10002 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 10003 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 10004 10005 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); 10006 10007 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 10008 * bitwise-or of what L1 wants to trap for L2, and what we want to 10009 * trap. Note that CR0.TS also needs updating - we do this later. 10010 */ 10011 update_exception_bitmap(vcpu); 10012 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 10013 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 10014 10015 /* L2->L1 exit controls are emulated - the hardware exit is to L0 so 10016 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 10017 * bits are further modified by vmx_set_efer() below. 10018 */ 10019 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); 10020 10021 /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are 10022 * emulated by vmx_set_efer(), below. 10023 */ 10024 vm_entry_controls_init(vmx, 10025 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & 10026 ~VM_ENTRY_IA32E_MODE) | 10027 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); 10028 10029 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) { 10030 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 10031 vcpu->arch.pat = vmcs12->guest_ia32_pat; 10032 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 10033 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 10034 10035 10036 set_cr4_guest_host_mask(vmx); 10037 10038 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) 10039 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 10040 10041 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 10042 vmcs_write64(TSC_OFFSET, 10043 vcpu->arch.tsc_offset + vmcs12->tsc_offset); 10044 else 10045 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 10046 if (kvm_has_tsc_control) 10047 decache_tsc_multiplier(vmx); 10048 10049 if (enable_vpid) { 10050 /* 10051 * There is no direct mapping between vpid02 and vpid12, the 10052 * vpid02 is per-vCPU for L0 and reused while the value of 10053 * vpid12 is changed w/ one invvpid during nested vmentry. 10054 * The vpid12 is allocated by L1 for L2, so it will not 10055 * influence global bitmap(for vpid01 and vpid02 allocation) 10056 * even if spawn a lot of nested vCPUs. 10057 */ 10058 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { 10059 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 10060 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 10061 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 10062 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 10063 } 10064 } else { 10065 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 10066 vmx_flush_tlb(vcpu); 10067 } 10068 10069 } 10070 10071 if (nested_cpu_has_ept(vmcs12)) { 10072 kvm_mmu_unload(vcpu); 10073 nested_ept_init_mmu_context(vcpu); 10074 } 10075 10076 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) 10077 vcpu->arch.efer = vmcs12->guest_ia32_efer; 10078 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 10079 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 10080 else 10081 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 10082 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 10083 vmx_set_efer(vcpu, vcpu->arch.efer); 10084 10085 /* 10086 * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified 10087 * TS bit (for lazy fpu) and bits which we consider mandatory enabled. 10088 * The CR0_READ_SHADOW is what L2 should have expected to read given 10089 * the specifications by L1; It's not enough to take 10090 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 10091 * have more bits than L1 expected. 10092 */ 10093 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 10094 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 10095 10096 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 10097 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 10098 10099 /* shadow page tables on either EPT or shadow page tables */ 10100 kvm_set_cr3(vcpu, vmcs12->guest_cr3); 10101 kvm_mmu_reset_context(vcpu); 10102 10103 if (!enable_ept) 10104 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 10105 10106 /* 10107 * L1 may access the L2's PDPTR, so save them to construct vmcs12 10108 */ 10109 if (enable_ept) { 10110 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 10111 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 10112 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 10113 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 10114 } 10115 10116 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 10117 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); 10118} 10119 10120/* 10121 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 10122 * for running an L2 nested guest. 10123 */ 10124static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 10125{ 10126 struct vmcs12 *vmcs12; 10127 struct vcpu_vmx *vmx = to_vmx(vcpu); 10128 int cpu; 10129 struct loaded_vmcs *vmcs02; 10130 bool ia32e; 10131 u32 msr_entry_idx; 10132 10133 if (!nested_vmx_check_permission(vcpu) || 10134 !nested_vmx_check_vmcs12(vcpu)) 10135 return 1; 10136 10137 skip_emulated_instruction(vcpu); 10138 vmcs12 = get_vmcs12(vcpu); 10139 10140 if (enable_shadow_vmcs) 10141 copy_shadow_to_vmcs12(vmx); 10142 10143 /* 10144 * The nested entry process starts with enforcing various prerequisites 10145 * on vmcs12 as required by the Intel SDM, and act appropriately when 10146 * they fail: As the SDM explains, some conditions should cause the 10147 * instruction to fail, while others will cause the instruction to seem 10148 * to succeed, but return an EXIT_REASON_INVALID_STATE. 10149 * To speed up the normal (success) code path, we should avoid checking 10150 * for misconfigurations which will anyway be caught by the processor 10151 * when using the merged vmcs02. 10152 */ 10153 if (vmcs12->launch_state == launch) { 10154 nested_vmx_failValid(vcpu, 10155 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 10156 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 10157 return 1; 10158 } 10159 10160 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 10161 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { 10162 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 10163 return 1; 10164 } 10165 10166 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { 10167 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 10168 return 1; 10169 } 10170 10171 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { 10172 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 10173 return 1; 10174 } 10175 10176 if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) { 10177 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 10178 return 1; 10179 } 10180 10181 if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { 10182 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 10183 return 1; 10184 } 10185 10186 if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 10187 vmx->nested.nested_vmx_true_procbased_ctls_low, 10188 vmx->nested.nested_vmx_procbased_ctls_high) || 10189 !vmx_control_verify(vmcs12->secondary_vm_exec_control, 10190 vmx->nested.nested_vmx_secondary_ctls_low, 10191 vmx->nested.nested_vmx_secondary_ctls_high) || 10192 !vmx_control_verify(vmcs12->pin_based_vm_exec_control, 10193 vmx->nested.nested_vmx_pinbased_ctls_low, 10194 vmx->nested.nested_vmx_pinbased_ctls_high) || 10195 !vmx_control_verify(vmcs12->vm_exit_controls, 10196 vmx->nested.nested_vmx_true_exit_ctls_low, 10197 vmx->nested.nested_vmx_exit_ctls_high) || 10198 !vmx_control_verify(vmcs12->vm_entry_controls, 10199 vmx->nested.nested_vmx_true_entry_ctls_low, 10200 vmx->nested.nested_vmx_entry_ctls_high)) 10201 { 10202 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 10203 return 1; 10204 } 10205 10206 if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || 10207 ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { 10208 nested_vmx_failValid(vcpu, 10209 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 10210 return 1; 10211 } 10212 10213 if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || 10214 ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { 10215 nested_vmx_entry_failure(vcpu, vmcs12, 10216 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10217 return 1; 10218 } 10219 if (vmcs12->vmcs_link_pointer != -1ull) { 10220 nested_vmx_entry_failure(vcpu, vmcs12, 10221 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); 10222 return 1; 10223 } 10224 10225 /* 10226 * If the load IA32_EFER VM-entry control is 1, the following checks 10227 * are performed on the field for the IA32_EFER MSR: 10228 * - Bits reserved in the IA32_EFER MSR must be 0. 10229 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 10230 * the IA-32e mode guest VM-exit control. It must also be identical 10231 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 10232 * CR0.PG) is 1. 10233 */ 10234 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) { 10235 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 10236 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || 10237 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || 10238 ((vmcs12->guest_cr0 & X86_CR0_PG) && 10239 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { 10240 nested_vmx_entry_failure(vcpu, vmcs12, 10241 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10242 return 1; 10243 } 10244 } 10245 10246 /* 10247 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 10248 * IA32_EFER MSR must be 0 in the field for that register. In addition, 10249 * the values of the LMA and LME bits in the field must each be that of 10250 * the host address-space size VM-exit control. 10251 */ 10252 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 10253 ia32e = (vmcs12->vm_exit_controls & 10254 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; 10255 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || 10256 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || 10257 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { 10258 nested_vmx_entry_failure(vcpu, vmcs12, 10259 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10260 return 1; 10261 } 10262 } 10263 10264 /* 10265 * We're finally done with prerequisite checking, and can start with 10266 * the nested entry. 10267 */ 10268 10269 vmcs02 = nested_get_current_vmcs02(vmx); 10270 if (!vmcs02) 10271 return -ENOMEM; 10272 10273 enter_guest_mode(vcpu); 10274 10275 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 10276 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 10277 10278 cpu = get_cpu(); 10279 vmx->loaded_vmcs = vmcs02; 10280 vmx_vcpu_put(vcpu); 10281 vmx_vcpu_load(vcpu, cpu); 10282 vcpu->cpu = cpu; 10283 put_cpu(); 10284 10285 vmx_segment_cache_clear(vmx); 10286 10287 prepare_vmcs02(vcpu, vmcs12); 10288 10289 msr_entry_idx = nested_vmx_load_msr(vcpu, 10290 vmcs12->vm_entry_msr_load_addr, 10291 vmcs12->vm_entry_msr_load_count); 10292 if (msr_entry_idx) { 10293 leave_guest_mode(vcpu); 10294 vmx_load_vmcs01(vcpu); 10295 nested_vmx_entry_failure(vcpu, vmcs12, 10296 EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); 10297 return 1; 10298 } 10299 10300 vmcs12->launch_state = 1; 10301 10302 if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) 10303 return kvm_vcpu_halt(vcpu); 10304 10305 vmx->nested.nested_run_pending = 1; 10306 10307 /* 10308 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 10309 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 10310 * returned as far as L1 is concerned. It will only return (and set 10311 * the success flag) when L2 exits (see nested_vmx_vmexit()). 10312 */ 10313 return 1; 10314} 10315 10316/* 10317 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 10318 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). 10319 * This function returns the new value we should put in vmcs12.guest_cr0. 10320 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 10321 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 10322 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 10323 * didn't trap the bit, because if L1 did, so would L0). 10324 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 10325 * been modified by L2, and L1 knows it. So just leave the old value of 10326 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 10327 * isn't relevant, because if L0 traps this bit it can set it to anything. 10328 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 10329 * changed these bits, and therefore they need to be updated, but L0 10330 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 10331 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 10332 */ 10333static inline unsigned long 10334vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 10335{ 10336 return 10337 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 10338 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 10339 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 10340 vcpu->arch.cr0_guest_owned_bits)); 10341} 10342 10343static inline unsigned long 10344vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 10345{ 10346 return 10347 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 10348 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 10349 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 10350 vcpu->arch.cr4_guest_owned_bits)); 10351} 10352 10353static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 10354 struct vmcs12 *vmcs12) 10355{ 10356 u32 idt_vectoring; 10357 unsigned int nr; 10358 10359 if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { 10360 nr = vcpu->arch.exception.nr; 10361 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 10362 10363 if (kvm_exception_is_soft(nr)) { 10364 vmcs12->vm_exit_instruction_len = 10365 vcpu->arch.event_exit_inst_len; 10366 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 10367 } else 10368 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 10369 10370 if (vcpu->arch.exception.has_error_code) { 10371 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 10372 vmcs12->idt_vectoring_error_code = 10373 vcpu->arch.exception.error_code; 10374 } 10375 10376 vmcs12->idt_vectoring_info_field = idt_vectoring; 10377 } else if (vcpu->arch.nmi_injected) { 10378 vmcs12->idt_vectoring_info_field = 10379 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 10380 } else if (vcpu->arch.interrupt.pending) { 10381 nr = vcpu->arch.interrupt.nr; 10382 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 10383 10384 if (vcpu->arch.interrupt.soft) { 10385 idt_vectoring |= INTR_TYPE_SOFT_INTR; 10386 vmcs12->vm_entry_instruction_len = 10387 vcpu->arch.event_exit_inst_len; 10388 } else 10389 idt_vectoring |= INTR_TYPE_EXT_INTR; 10390 10391 vmcs12->idt_vectoring_info_field = idt_vectoring; 10392 } 10393} 10394 10395static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) 10396{ 10397 struct vcpu_vmx *vmx = to_vmx(vcpu); 10398 10399 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 10400 vmx->nested.preemption_timer_expired) { 10401 if (vmx->nested.nested_run_pending) 10402 return -EBUSY; 10403 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 10404 return 0; 10405 } 10406 10407 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { 10408 if (vmx->nested.nested_run_pending || 10409 vcpu->arch.interrupt.pending) 10410 return -EBUSY; 10411 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 10412 NMI_VECTOR | INTR_TYPE_NMI_INTR | 10413 INTR_INFO_VALID_MASK, 0); 10414 /* 10415 * The NMI-triggered VM exit counts as injection: 10416 * clear this one and block further NMIs. 10417 */ 10418 vcpu->arch.nmi_pending = 0; 10419 vmx_set_nmi_mask(vcpu, true); 10420 return 0; 10421 } 10422 10423 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && 10424 nested_exit_on_intr(vcpu)) { 10425 if (vmx->nested.nested_run_pending) 10426 return -EBUSY; 10427 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 10428 return 0; 10429 } 10430 10431 return vmx_complete_nested_posted_interrupt(vcpu); 10432} 10433 10434static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 10435{ 10436 ktime_t remaining = 10437 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 10438 u64 value; 10439 10440 if (ktime_to_ns(remaining) <= 0) 10441 return 0; 10442 10443 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 10444 do_div(value, 1000000); 10445 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 10446} 10447 10448/* 10449 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 10450 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 10451 * and this function updates it to reflect the changes to the guest state while 10452 * L2 was running (and perhaps made some exits which were handled directly by L0 10453 * without going back to L1), and to reflect the exit reason. 10454 * Note that we do not have to copy here all VMCS fields, just those that 10455 * could have changed by the L2 guest or the exit - i.e., the guest-state and 10456 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 10457 * which already writes to vmcs12 directly. 10458 */ 10459static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 10460 u32 exit_reason, u32 exit_intr_info, 10461 unsigned long exit_qualification) 10462{ 10463 /* update guest state fields: */ 10464 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 10465 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 10466 10467 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 10468 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); 10469 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 10470 10471 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 10472 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 10473 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 10474 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 10475 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 10476 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 10477 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 10478 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 10479 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 10480 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 10481 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 10482 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 10483 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 10484 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 10485 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 10486 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 10487 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 10488 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 10489 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 10490 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 10491 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 10492 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 10493 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 10494 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 10495 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 10496 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 10497 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 10498 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 10499 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 10500 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 10501 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 10502 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 10503 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 10504 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 10505 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 10506 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 10507 10508 vmcs12->guest_interruptibility_info = 10509 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 10510 vmcs12->guest_pending_dbg_exceptions = 10511 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 10512 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10513 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 10514 else 10515 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 10516 10517 if (nested_cpu_has_preemption_timer(vmcs12)) { 10518 if (vmcs12->vm_exit_controls & 10519 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) 10520 vmcs12->vmx_preemption_timer_value = 10521 vmx_get_preemption_timer_value(vcpu); 10522 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 10523 } 10524 10525 /* 10526 * In some cases (usually, nested EPT), L2 is allowed to change its 10527 * own CR3 without exiting. If it has changed it, we must keep it. 10528 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 10529 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 10530 * 10531 * Additionally, restore L2's PDPTR to vmcs12. 10532 */ 10533 if (enable_ept) { 10534 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 10535 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 10536 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 10537 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 10538 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 10539 } 10540 10541 if (nested_cpu_has_ept(vmcs12)) 10542 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 10543 10544 if (nested_cpu_has_vid(vmcs12)) 10545 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 10546 10547 vmcs12->vm_entry_controls = 10548 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 10549 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 10550 10551 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { 10552 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 10553 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 10554 } 10555 10556 /* TODO: These cannot have changed unless we have MSR bitmaps and 10557 * the relevant bit asks not to trap the change */ 10558 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 10559 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); 10560 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 10561 vmcs12->guest_ia32_efer = vcpu->arch.efer; 10562 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); 10563 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); 10564 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); 10565 if (kvm_mpx_supported()) 10566 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 10567 if (nested_cpu_has_xsaves(vmcs12)) 10568 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); 10569 10570 /* update exit information fields: */ 10571 10572 vmcs12->vm_exit_reason = exit_reason; 10573 vmcs12->exit_qualification = exit_qualification; 10574 10575 vmcs12->vm_exit_intr_info = exit_intr_info; 10576 if ((vmcs12->vm_exit_intr_info & 10577 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == 10578 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) 10579 vmcs12->vm_exit_intr_error_code = 10580 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 10581 vmcs12->idt_vectoring_info_field = 0; 10582 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 10583 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 10584 10585 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 10586 /* vm_entry_intr_info_field is cleared on exit. Emulate this 10587 * instead of reading the real value. */ 10588 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 10589 10590 /* 10591 * Transfer the event that L0 or L1 may wanted to inject into 10592 * L2 to IDT_VECTORING_INFO_FIELD. 10593 */ 10594 vmcs12_save_pending_event(vcpu, vmcs12); 10595 } 10596 10597 /* 10598 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 10599 * preserved above and would only end up incorrectly in L1. 10600 */ 10601 vcpu->arch.nmi_injected = false; 10602 kvm_clear_exception_queue(vcpu); 10603 kvm_clear_interrupt_queue(vcpu); 10604} 10605 10606/* 10607 * A part of what we need to when the nested L2 guest exits and we want to 10608 * run its L1 parent, is to reset L1's guest state to the host state specified 10609 * in vmcs12. 10610 * This function is to be called not only on normal nested exit, but also on 10611 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 10612 * Failures During or After Loading Guest State"). 10613 * This function should be called when the active VMCS is L1's (vmcs01). 10614 */ 10615static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 10616 struct vmcs12 *vmcs12) 10617{ 10618 struct kvm_segment seg; 10619 10620 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 10621 vcpu->arch.efer = vmcs12->host_ia32_efer; 10622 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 10623 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 10624 else 10625 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 10626 vmx_set_efer(vcpu, vcpu->arch.efer); 10627 10628 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); 10629 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); 10630 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 10631 /* 10632 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 10633 * actually changed, because it depends on the current state of 10634 * fpu_active (which may have changed). 10635 * Note that vmx_set_cr0 refers to efer set above. 10636 */ 10637 vmx_set_cr0(vcpu, vmcs12->host_cr0); 10638 /* 10639 * If we did fpu_activate()/fpu_deactivate() during L2's run, we need 10640 * to apply the same changes to L1's vmcs. We just set cr0 correctly, 10641 * but we also need to update cr0_guest_host_mask and exception_bitmap. 10642 */ 10643 update_exception_bitmap(vcpu); 10644 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); 10645 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 10646 10647 /* 10648 * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01 10649 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask(); 10650 */ 10651 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 10652 kvm_set_cr4(vcpu, vmcs12->host_cr4); 10653 10654 nested_ept_uninit_mmu_context(vcpu); 10655 10656 kvm_set_cr3(vcpu, vmcs12->host_cr3); 10657 kvm_mmu_reset_context(vcpu); 10658 10659 if (!enable_ept) 10660 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 10661 10662 if (enable_vpid) { 10663 /* 10664 * Trivially support vpid by letting L2s share their parent 10665 * L1's vpid. TODO: move to a more elaborate solution, giving 10666 * each L2 its own vpid and exposing the vpid feature to L1. 10667 */ 10668 vmx_flush_tlb(vcpu); 10669 } 10670 10671 10672 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 10673 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 10674 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 10675 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 10676 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 10677 10678 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 10679 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 10680 vmcs_write64(GUEST_BNDCFGS, 0); 10681 10682 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 10683 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 10684 vcpu->arch.pat = vmcs12->host_ia32_pat; 10685 } 10686 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 10687 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, 10688 vmcs12->host_ia32_perf_global_ctrl); 10689 10690 /* Set L1 segment info according to Intel SDM 10691 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 10692 seg = (struct kvm_segment) { 10693 .base = 0, 10694 .limit = 0xFFFFFFFF, 10695 .selector = vmcs12->host_cs_selector, 10696 .type = 11, 10697 .present = 1, 10698 .s = 1, 10699 .g = 1 10700 }; 10701 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 10702 seg.l = 1; 10703 else 10704 seg.db = 1; 10705 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 10706 seg = (struct kvm_segment) { 10707 .base = 0, 10708 .limit = 0xFFFFFFFF, 10709 .type = 3, 10710 .present = 1, 10711 .s = 1, 10712 .db = 1, 10713 .g = 1 10714 }; 10715 seg.selector = vmcs12->host_ds_selector; 10716 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 10717 seg.selector = vmcs12->host_es_selector; 10718 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 10719 seg.selector = vmcs12->host_ss_selector; 10720 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 10721 seg.selector = vmcs12->host_fs_selector; 10722 seg.base = vmcs12->host_fs_base; 10723 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 10724 seg.selector = vmcs12->host_gs_selector; 10725 seg.base = vmcs12->host_gs_base; 10726 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 10727 seg = (struct kvm_segment) { 10728 .base = vmcs12->host_tr_base, 10729 .limit = 0x67, 10730 .selector = vmcs12->host_tr_selector, 10731 .type = 11, 10732 .present = 1 10733 }; 10734 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 10735 10736 kvm_set_dr(vcpu, 7, 0x400); 10737 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 10738 10739 if (cpu_has_vmx_msr_bitmap()) 10740 vmx_set_msr_bitmap(vcpu); 10741 10742 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 10743 vmcs12->vm_exit_msr_load_count)) 10744 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 10745} 10746 10747/* 10748 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 10749 * and modify vmcs12 to make it see what it would expect to see there if 10750 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 10751 */ 10752static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, 10753 u32 exit_intr_info, 10754 unsigned long exit_qualification) 10755{ 10756 struct vcpu_vmx *vmx = to_vmx(vcpu); 10757 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 10758 10759 /* trying to cancel vmlaunch/vmresume is a bug */ 10760 WARN_ON_ONCE(vmx->nested.nested_run_pending); 10761 10762 leave_guest_mode(vcpu); 10763 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, 10764 exit_qualification); 10765 10766 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, 10767 vmcs12->vm_exit_msr_store_count)) 10768 nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); 10769 10770 vmx_load_vmcs01(vcpu); 10771 10772 if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) 10773 && nested_exit_intr_ack_set(vcpu)) { 10774 int irq = kvm_cpu_get_interrupt(vcpu); 10775 WARN_ON(irq < 0); 10776 vmcs12->vm_exit_intr_info = irq | 10777 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 10778 } 10779 10780 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 10781 vmcs12->exit_qualification, 10782 vmcs12->idt_vectoring_info_field, 10783 vmcs12->vm_exit_intr_info, 10784 vmcs12->vm_exit_intr_error_code, 10785 KVM_ISA_VMX); 10786 10787 vm_entry_controls_reset_shadow(vmx); 10788 vm_exit_controls_reset_shadow(vmx); 10789 vmx_segment_cache_clear(vmx); 10790 10791 /* if no vmcs02 cache requested, remove the one we used */ 10792 if (VMCS02_POOL_SIZE == 0) 10793 nested_free_vmcs02(vmx, vmx->nested.current_vmptr); 10794 10795 load_vmcs12_host_state(vcpu, vmcs12); 10796 10797 /* Update any VMCS fields that might have changed while L2 ran */ 10798 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 10799 if (vmx->hv_deadline_tsc == -1) 10800 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, 10801 PIN_BASED_VMX_PREEMPTION_TIMER); 10802 else 10803 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, 10804 PIN_BASED_VMX_PREEMPTION_TIMER); 10805 if (kvm_has_tsc_control) 10806 decache_tsc_multiplier(vmx); 10807 10808 if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { 10809 vmx->nested.change_vmcs01_virtual_x2apic_mode = false; 10810 vmx_set_virtual_x2apic_mode(vcpu, 10811 vcpu->arch.apic_base & X2APIC_ENABLE); 10812 } 10813 10814 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 10815 vmx->host_rsp = 0; 10816 10817 /* Unpin physical memory we referred to in vmcs02 */ 10818 if (vmx->nested.apic_access_page) { 10819 nested_release_page(vmx->nested.apic_access_page); 10820 vmx->nested.apic_access_page = NULL; 10821 } 10822 if (vmx->nested.virtual_apic_page) { 10823 nested_release_page(vmx->nested.virtual_apic_page); 10824 vmx->nested.virtual_apic_page = NULL; 10825 } 10826 if (vmx->nested.pi_desc_page) { 10827 kunmap(vmx->nested.pi_desc_page); 10828 nested_release_page(vmx->nested.pi_desc_page); 10829 vmx->nested.pi_desc_page = NULL; 10830 vmx->nested.pi_desc = NULL; 10831 } 10832 10833 /* 10834 * We are now running in L2, mmu_notifier will force to reload the 10835 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. 10836 */ 10837 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 10838 10839 /* 10840 * Exiting from L2 to L1, we're now back to L1 which thinks it just 10841 * finished a VMLAUNCH or VMRESUME instruction, so we need to set the 10842 * success or failure flag accordingly. 10843 */ 10844 if (unlikely(vmx->fail)) { 10845 vmx->fail = 0; 10846 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); 10847 } else 10848 nested_vmx_succeed(vcpu); 10849 if (enable_shadow_vmcs) 10850 vmx->nested.sync_shadow_vmcs = true; 10851 10852 /* in case we halted in L2 */ 10853 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 10854} 10855 10856/* 10857 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 10858 */ 10859static void vmx_leave_nested(struct kvm_vcpu *vcpu) 10860{ 10861 if (is_guest_mode(vcpu)) 10862 nested_vmx_vmexit(vcpu, -1, 0, 0); 10863 free_nested(to_vmx(vcpu)); 10864} 10865 10866/* 10867 * L1's failure to enter L2 is a subset of a normal exit, as explained in 10868 * 23.7 "VM-entry failures during or after loading guest state" (this also 10869 * lists the acceptable exit-reason and exit-qualification parameters). 10870 * It should only be called before L2 actually succeeded to run, and when 10871 * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). 10872 */ 10873static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, 10874 struct vmcs12 *vmcs12, 10875 u32 reason, unsigned long qualification) 10876{ 10877 load_vmcs12_host_state(vcpu, vmcs12); 10878 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; 10879 vmcs12->exit_qualification = qualification; 10880 nested_vmx_succeed(vcpu); 10881 if (enable_shadow_vmcs) 10882 to_vmx(vcpu)->nested.sync_shadow_vmcs = true; 10883} 10884 10885static int vmx_check_intercept(struct kvm_vcpu *vcpu, 10886 struct x86_instruction_info *info, 10887 enum x86_intercept_stage stage) 10888{ 10889 return X86EMUL_CONTINUE; 10890} 10891 10892#ifdef CONFIG_X86_64 10893/* (a << shift) / divisor, return 1 if overflow otherwise 0 */ 10894static inline int u64_shl_div_u64(u64 a, unsigned int shift, 10895 u64 divisor, u64 *result) 10896{ 10897 u64 low = a << shift, high = a >> (64 - shift); 10898 10899 /* To avoid the overflow on divq */ 10900 if (high >= divisor) 10901 return 1; 10902 10903 /* Low hold the result, high hold rem which is discarded */ 10904 asm("divq %2\n\t" : "=a" (low), "=d" (high) : 10905 "rm" (divisor), "0" (low), "1" (high)); 10906 *result = low; 10907 10908 return 0; 10909} 10910 10911static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) 10912{ 10913 struct vcpu_vmx *vmx = to_vmx(vcpu); 10914 u64 tscl = rdtsc(); 10915 u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 10916 u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 10917 10918 /* Convert to host delta tsc if tsc scaling is enabled */ 10919 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 10920 u64_shl_div_u64(delta_tsc, 10921 kvm_tsc_scaling_ratio_frac_bits, 10922 vcpu->arch.tsc_scaling_ratio, 10923 &delta_tsc)) 10924 return -ERANGE; 10925 10926 /* 10927 * If the delta tsc can't fit in the 32 bit after the multi shift, 10928 * we can't use the preemption timer. 10929 * It's possible that it fits on later vmentries, but checking 10930 * on every vmentry is costly so we just use an hrtimer. 10931 */ 10932 if (delta_tsc >> (cpu_preemption_timer_multi + 32)) 10933 return -ERANGE; 10934 10935 vmx->hv_deadline_tsc = tscl + delta_tsc; 10936 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, 10937 PIN_BASED_VMX_PREEMPTION_TIMER); 10938 return 0; 10939} 10940 10941static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 10942{ 10943 struct vcpu_vmx *vmx = to_vmx(vcpu); 10944 vmx->hv_deadline_tsc = -1; 10945 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, 10946 PIN_BASED_VMX_PREEMPTION_TIMER); 10947} 10948#endif 10949 10950static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) 10951{ 10952 if (ple_gap) 10953 shrink_ple_window(vcpu); 10954} 10955 10956static void vmx_slot_enable_log_dirty(struct kvm *kvm, 10957 struct kvm_memory_slot *slot) 10958{ 10959 kvm_mmu_slot_leaf_clear_dirty(kvm, slot); 10960 kvm_mmu_slot_largepage_remove_write_access(kvm, slot); 10961} 10962 10963static void vmx_slot_disable_log_dirty(struct kvm *kvm, 10964 struct kvm_memory_slot *slot) 10965{ 10966 kvm_mmu_slot_set_dirty(kvm, slot); 10967} 10968 10969static void vmx_flush_log_dirty(struct kvm *kvm) 10970{ 10971 kvm_flush_pml_buffers(kvm); 10972} 10973 10974static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, 10975 struct kvm_memory_slot *memslot, 10976 gfn_t offset, unsigned long mask) 10977{ 10978 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); 10979} 10980 10981/* 10982 * This routine does the following things for vCPU which is going 10983 * to be blocked if VT-d PI is enabled. 10984 * - Store the vCPU to the wakeup list, so when interrupts happen 10985 * we can find the right vCPU to wake up. 10986 * - Change the Posted-interrupt descriptor as below: 10987 * 'NDST' <-- vcpu->pre_pcpu 10988 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR 10989 * - If 'ON' is set during this process, which means at least one 10990 * interrupt is posted for this vCPU, we cannot block it, in 10991 * this case, return 1, otherwise, return 0. 10992 * 10993 */ 10994static int pi_pre_block(struct kvm_vcpu *vcpu) 10995{ 10996 unsigned long flags; 10997 unsigned int dest; 10998 struct pi_desc old, new; 10999 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 11000 11001 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 11002 !irq_remapping_cap(IRQ_POSTING_CAP) || 11003 !kvm_vcpu_apicv_active(vcpu)) 11004 return 0; 11005 11006 vcpu->pre_pcpu = vcpu->cpu; 11007 spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, 11008 vcpu->pre_pcpu), flags); 11009 list_add_tail(&vcpu->blocked_vcpu_list, 11010 &per_cpu(blocked_vcpu_on_cpu, 11011 vcpu->pre_pcpu)); 11012 spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, 11013 vcpu->pre_pcpu), flags); 11014 11015 do { 11016 old.control = new.control = pi_desc->control; 11017 11018 /* 11019 * We should not block the vCPU if 11020 * an interrupt is posted for it. 11021 */ 11022 if (pi_test_on(pi_desc) == 1) { 11023 spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, 11024 vcpu->pre_pcpu), flags); 11025 list_del(&vcpu->blocked_vcpu_list); 11026 spin_unlock_irqrestore( 11027 &per_cpu(blocked_vcpu_on_cpu_lock, 11028 vcpu->pre_pcpu), flags); 11029 vcpu->pre_pcpu = -1; 11030 11031 return 1; 11032 } 11033 11034 WARN((pi_desc->sn == 1), 11035 "Warning: SN field of posted-interrupts " 11036 "is set before blocking\n"); 11037 11038 /* 11039 * Since vCPU can be preempted during this process, 11040 * vcpu->cpu could be different with pre_pcpu, we 11041 * need to set pre_pcpu as the destination of wakeup 11042 * notification event, then we can find the right vCPU 11043 * to wakeup in wakeup handler if interrupts happen 11044 * when the vCPU is in blocked state. 11045 */ 11046 dest = cpu_physical_id(vcpu->pre_pcpu); 11047 11048 if (x2apic_enabled()) 11049 new.ndst = dest; 11050 else 11051 new.ndst = (dest << 8) & 0xFF00; 11052 11053 /* set 'NV' to 'wakeup vector' */ 11054 new.nv = POSTED_INTR_WAKEUP_VECTOR; 11055 } while (cmpxchg(&pi_desc->control, old.control, 11056 new.control) != old.control); 11057 11058 return 0; 11059} 11060 11061static int vmx_pre_block(struct kvm_vcpu *vcpu) 11062{ 11063 if (pi_pre_block(vcpu)) 11064 return 1; 11065 11066 if (kvm_lapic_hv_timer_in_use(vcpu)) 11067 kvm_lapic_switch_to_sw_timer(vcpu); 11068 11069 return 0; 11070} 11071 11072static void pi_post_block(struct kvm_vcpu *vcpu) 11073{ 11074 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 11075 struct pi_desc old, new; 11076 unsigned int dest; 11077 unsigned long flags; 11078 11079 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 11080 !irq_remapping_cap(IRQ_POSTING_CAP) || 11081 !kvm_vcpu_apicv_active(vcpu)) 11082 return; 11083 11084 do { 11085 old.control = new.control = pi_desc->control; 11086 11087 dest = cpu_physical_id(vcpu->cpu); 11088 11089 if (x2apic_enabled()) 11090 new.ndst = dest; 11091 else 11092 new.ndst = (dest << 8) & 0xFF00; 11093 11094 /* Allow posting non-urgent interrupts */ 11095 new.sn = 0; 11096 11097 /* set 'NV' to 'notification vector' */ 11098 new.nv = POSTED_INTR_VECTOR; 11099 } while (cmpxchg(&pi_desc->control, old.control, 11100 new.control) != old.control); 11101 11102 if(vcpu->pre_pcpu != -1) { 11103 spin_lock_irqsave( 11104 &per_cpu(blocked_vcpu_on_cpu_lock, 11105 vcpu->pre_pcpu), flags); 11106 list_del(&vcpu->blocked_vcpu_list); 11107 spin_unlock_irqrestore( 11108 &per_cpu(blocked_vcpu_on_cpu_lock, 11109 vcpu->pre_pcpu), flags); 11110 vcpu->pre_pcpu = -1; 11111 } 11112} 11113 11114static void vmx_post_block(struct kvm_vcpu *vcpu) 11115{ 11116 if (kvm_x86_ops->set_hv_timer) 11117 kvm_lapic_switch_to_hv_timer(vcpu); 11118 11119 pi_post_block(vcpu); 11120} 11121 11122/* 11123 * vmx_update_pi_irte - set IRTE for Posted-Interrupts 11124 * 11125 * @kvm: kvm 11126 * @host_irq: host irq of the interrupt 11127 * @guest_irq: gsi of the interrupt 11128 * @set: set or unset PI 11129 * returns 0 on success, < 0 on failure 11130 */ 11131static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, 11132 uint32_t guest_irq, bool set) 11133{ 11134 struct kvm_kernel_irq_routing_entry *e; 11135 struct kvm_irq_routing_table *irq_rt; 11136 struct kvm_lapic_irq irq; 11137 struct kvm_vcpu *vcpu; 11138 struct vcpu_data vcpu_info; 11139 int idx, ret = -EINVAL; 11140 11141 if (!kvm_arch_has_assigned_device(kvm) || 11142 !irq_remapping_cap(IRQ_POSTING_CAP) || 11143 !kvm_vcpu_apicv_active(kvm->vcpus[0])) 11144 return 0; 11145 11146 idx = srcu_read_lock(&kvm->irq_srcu); 11147 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); 11148 BUG_ON(guest_irq >= irq_rt->nr_rt_entries); 11149 11150 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { 11151 if (e->type != KVM_IRQ_ROUTING_MSI) 11152 continue; 11153 /* 11154 * VT-d PI cannot support posting multicast/broadcast 11155 * interrupts to a vCPU, we still use interrupt remapping 11156 * for these kind of interrupts. 11157 * 11158 * For lowest-priority interrupts, we only support 11159 * those with single CPU as the destination, e.g. user 11160 * configures the interrupts via /proc/irq or uses 11161 * irqbalance to make the interrupts single-CPU. 11162 * 11163 * We will support full lowest-priority interrupt later. 11164 */ 11165 11166 kvm_set_msi_irq(kvm, e, &irq); 11167 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { 11168 /* 11169 * Make sure the IRTE is in remapped mode if 11170 * we don't handle it in posted mode. 11171 */ 11172 ret = irq_set_vcpu_affinity(host_irq, NULL); 11173 if (ret < 0) { 11174 printk(KERN_INFO 11175 "failed to back to remapped mode, irq: %u\n", 11176 host_irq); 11177 goto out; 11178 } 11179 11180 continue; 11181 } 11182 11183 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); 11184 vcpu_info.vector = irq.vector; 11185 11186 trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, 11187 vcpu_info.vector, vcpu_info.pi_desc_addr, set); 11188 11189 if (set) 11190 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); 11191 else { 11192 /* suppress notification event before unposting */ 11193 pi_set_sn(vcpu_to_pi_desc(vcpu)); 11194 ret = irq_set_vcpu_affinity(host_irq, NULL); 11195 pi_clear_sn(vcpu_to_pi_desc(vcpu)); 11196 } 11197 11198 if (ret < 0) { 11199 printk(KERN_INFO "%s: failed to update PI IRTE\n", 11200 __func__); 11201 goto out; 11202 } 11203 } 11204 11205 ret = 0; 11206out: 11207 srcu_read_unlock(&kvm->irq_srcu, idx); 11208 return ret; 11209} 11210 11211static void vmx_setup_mce(struct kvm_vcpu *vcpu) 11212{ 11213 if (vcpu->arch.mcg_cap & MCG_LMCE_P) 11214 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 11215 FEATURE_CONTROL_LMCE; 11216 else 11217 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 11218 ~FEATURE_CONTROL_LMCE; 11219} 11220 11221static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { 11222 .cpu_has_kvm_support = cpu_has_kvm_support, 11223 .disabled_by_bios = vmx_disabled_by_bios, 11224 .hardware_setup = hardware_setup, 11225 .hardware_unsetup = hardware_unsetup, 11226 .check_processor_compatibility = vmx_check_processor_compat, 11227 .hardware_enable = hardware_enable, 11228 .hardware_disable = hardware_disable, 11229 .cpu_has_accelerated_tpr = report_flexpriority, 11230 .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, 11231 11232 .vcpu_create = vmx_create_vcpu, 11233 .vcpu_free = vmx_free_vcpu, 11234 .vcpu_reset = vmx_vcpu_reset, 11235 11236 .prepare_guest_switch = vmx_save_host_state, 11237 .vcpu_load = vmx_vcpu_load, 11238 .vcpu_put = vmx_vcpu_put, 11239 11240 .update_bp_intercept = update_exception_bitmap, 11241 .get_msr = vmx_get_msr, 11242 .set_msr = vmx_set_msr, 11243 .get_segment_base = vmx_get_segment_base, 11244 .get_segment = vmx_get_segment, 11245 .set_segment = vmx_set_segment, 11246 .get_cpl = vmx_get_cpl, 11247 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 11248 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, 11249 .decache_cr3 = vmx_decache_cr3, 11250 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, 11251 .set_cr0 = vmx_set_cr0, 11252 .set_cr3 = vmx_set_cr3, 11253 .set_cr4 = vmx_set_cr4, 11254 .set_efer = vmx_set_efer, 11255 .get_idt = vmx_get_idt, 11256 .set_idt = vmx_set_idt, 11257 .get_gdt = vmx_get_gdt, 11258 .set_gdt = vmx_set_gdt, 11259 .get_dr6 = vmx_get_dr6, 11260 .set_dr6 = vmx_set_dr6, 11261 .set_dr7 = vmx_set_dr7, 11262 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, 11263 .cache_reg = vmx_cache_reg, 11264 .get_rflags = vmx_get_rflags, 11265 .set_rflags = vmx_set_rflags, 11266 11267 .get_pkru = vmx_get_pkru, 11268 11269 .fpu_activate = vmx_fpu_activate, 11270 .fpu_deactivate = vmx_fpu_deactivate, 11271 11272 .tlb_flush = vmx_flush_tlb, 11273 11274 .run = vmx_vcpu_run, 11275 .handle_exit = vmx_handle_exit, 11276 .skip_emulated_instruction = skip_emulated_instruction, 11277 .set_interrupt_shadow = vmx_set_interrupt_shadow, 11278 .get_interrupt_shadow = vmx_get_interrupt_shadow, 11279 .patch_hypercall = vmx_patch_hypercall, 11280 .set_irq = vmx_inject_irq, 11281 .set_nmi = vmx_inject_nmi, 11282 .queue_exception = vmx_queue_exception, 11283 .cancel_injection = vmx_cancel_injection, 11284 .interrupt_allowed = vmx_interrupt_allowed, 11285 .nmi_allowed = vmx_nmi_allowed, 11286 .get_nmi_mask = vmx_get_nmi_mask, 11287 .set_nmi_mask = vmx_set_nmi_mask, 11288 .enable_nmi_window = enable_nmi_window, 11289 .enable_irq_window = enable_irq_window, 11290 .update_cr8_intercept = update_cr8_intercept, 11291 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, 11292 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 11293 .get_enable_apicv = vmx_get_enable_apicv, 11294 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, 11295 .load_eoi_exitmap = vmx_load_eoi_exitmap, 11296 .hwapic_irr_update = vmx_hwapic_irr_update, 11297 .hwapic_isr_update = vmx_hwapic_isr_update, 11298 .sync_pir_to_irr = vmx_sync_pir_to_irr, 11299 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 11300 11301 .set_tss_addr = vmx_set_tss_addr, 11302 .get_tdp_level = get_ept_level, 11303 .get_mt_mask = vmx_get_mt_mask, 11304 11305 .get_exit_info = vmx_get_exit_info, 11306 11307 .get_lpage_level = vmx_get_lpage_level, 11308 11309 .cpuid_update = vmx_cpuid_update, 11310 11311 .rdtscp_supported = vmx_rdtscp_supported, 11312 .invpcid_supported = vmx_invpcid_supported, 11313 11314 .set_supported_cpuid = vmx_set_supported_cpuid, 11315 11316 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 11317 11318 .write_tsc_offset = vmx_write_tsc_offset, 11319 11320 .set_tdp_cr3 = vmx_set_cr3, 11321 11322 .check_intercept = vmx_check_intercept, 11323 .handle_external_intr = vmx_handle_external_intr, 11324 .mpx_supported = vmx_mpx_supported, 11325 .xsaves_supported = vmx_xsaves_supported, 11326 11327 .check_nested_events = vmx_check_nested_events, 11328 11329 .sched_in = vmx_sched_in, 11330 11331 .slot_enable_log_dirty = vmx_slot_enable_log_dirty, 11332 .slot_disable_log_dirty = vmx_slot_disable_log_dirty, 11333 .flush_log_dirty = vmx_flush_log_dirty, 11334 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, 11335 11336 .pre_block = vmx_pre_block, 11337 .post_block = vmx_post_block, 11338 11339 .pmu_ops = &intel_pmu_ops, 11340 11341 .update_pi_irte = vmx_update_pi_irte, 11342 11343#ifdef CONFIG_X86_64 11344 .set_hv_timer = vmx_set_hv_timer, 11345 .cancel_hv_timer = vmx_cancel_hv_timer, 11346#endif 11347 11348 .setup_mce = vmx_setup_mce, 11349}; 11350 11351static int __init vmx_init(void) 11352{ 11353 int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), 11354 __alignof__(struct vcpu_vmx), THIS_MODULE); 11355 if (r) 11356 return r; 11357 11358#ifdef CONFIG_KEXEC_CORE 11359 rcu_assign_pointer(crash_vmclear_loaded_vmcss, 11360 crash_vmclear_local_loaded_vmcss); 11361#endif 11362 11363 return 0; 11364} 11365 11366static void __exit vmx_exit(void) 11367{ 11368#ifdef CONFIG_KEXEC_CORE 11369 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); 11370 synchronize_rcu(); 11371#endif 11372 11373 kvm_exit(); 11374} 11375 11376module_init(vmx_init) 11377module_exit(vmx_exit)