Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.22-rc1 1805 lines 46 kB view raw
1/* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * AMD SVM support 5 * 6 * Copyright (C) 2006 Qumranet, Inc. 7 * 8 * Authors: 9 * Yaniv Kamay <yaniv@qumranet.com> 10 * Avi Kivity <avi@qumranet.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2. See 13 * the COPYING file in the top-level directory. 14 * 15 */ 16 17#include <linux/module.h> 18#include <linux/kernel.h> 19#include <linux/vmalloc.h> 20#include <linux/highmem.h> 21#include <linux/profile.h> 22#include <asm/desc.h> 23 24#include "kvm_svm.h" 25#include "x86_emulate.h" 26 27MODULE_AUTHOR("Qumranet"); 28MODULE_LICENSE("GPL"); 29 30#define IOPM_ALLOC_ORDER 2 31#define MSRPM_ALLOC_ORDER 1 32 33#define DB_VECTOR 1 34#define UD_VECTOR 6 35#define GP_VECTOR 13 36 37#define DR7_GD_MASK (1 << 13) 38#define DR6_BD_MASK (1 << 13) 39#define CR4_DE_MASK (1UL << 3) 40 41#define SEG_TYPE_LDT 2 42#define SEG_TYPE_BUSY_TSS16 3 43 44#define KVM_EFER_LMA (1 << 10) 45#define KVM_EFER_LME (1 << 8) 46 47#define SVM_FEATURE_NPT (1 << 0) 48#define SVM_FEATURE_LBRV (1 << 1) 49#define SVM_DEATURE_SVML (1 << 2) 50 51unsigned long iopm_base; 52unsigned long msrpm_base; 53 54struct kvm_ldttss_desc { 55 u16 limit0; 56 u16 base0; 57 unsigned base1 : 8, type : 5, dpl : 2, p : 1; 58 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; 59 u32 base3; 60 u32 zero1; 61} __attribute__((packed)); 62 63struct svm_cpu_data { 64 int cpu; 65 66 u64 asid_generation; 67 u32 max_asid; 68 u32 next_asid; 69 struct kvm_ldttss_desc *tss_desc; 70 71 struct page *save_area; 72}; 73 74static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); 75static uint32_t svm_features; 76 77struct svm_init_data { 78 int cpu; 79 int r; 80}; 81 82static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; 83 84#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) 85#define MSRS_RANGE_SIZE 2048 86#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) 87 88#define MAX_INST_SIZE 15 89 90static inline u32 svm_has(u32 feat) 91{ 92 return svm_features & feat; 93} 94 95static unsigned get_addr_size(struct kvm_vcpu *vcpu) 96{ 97 struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; 98 u16 cs_attrib; 99 100 if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM)) 101 return 2; 102 103 cs_attrib = sa->cs.attrib; 104 105 return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 : 106 (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2; 107} 108 109static inline u8 pop_irq(struct kvm_vcpu *vcpu) 110{ 111 int word_index = __ffs(vcpu->irq_summary); 112 int bit_index = __ffs(vcpu->irq_pending[word_index]); 113 int irq = word_index * BITS_PER_LONG + bit_index; 114 115 clear_bit(bit_index, &vcpu->irq_pending[word_index]); 116 if (!vcpu->irq_pending[word_index]) 117 clear_bit(word_index, &vcpu->irq_summary); 118 return irq; 119} 120 121static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) 122{ 123 set_bit(irq, vcpu->irq_pending); 124 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); 125} 126 127static inline void clgi(void) 128{ 129 asm volatile (SVM_CLGI); 130} 131 132static inline void stgi(void) 133{ 134 asm volatile (SVM_STGI); 135} 136 137static inline void invlpga(unsigned long addr, u32 asid) 138{ 139 asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid)); 140} 141 142static inline unsigned long kvm_read_cr2(void) 143{ 144 unsigned long cr2; 145 146 asm volatile ("mov %%cr2, %0" : "=r" (cr2)); 147 return cr2; 148} 149 150static inline void kvm_write_cr2(unsigned long val) 151{ 152 asm volatile ("mov %0, %%cr2" :: "r" (val)); 153} 154 155static inline unsigned long read_dr6(void) 156{ 157 unsigned long dr6; 158 159 asm volatile ("mov %%dr6, %0" : "=r" (dr6)); 160 return dr6; 161} 162 163static inline void write_dr6(unsigned long val) 164{ 165 asm volatile ("mov %0, %%dr6" :: "r" (val)); 166} 167 168static inline unsigned long read_dr7(void) 169{ 170 unsigned long dr7; 171 172 asm volatile ("mov %%dr7, %0" : "=r" (dr7)); 173 return dr7; 174} 175 176static inline void write_dr7(unsigned long val) 177{ 178 asm volatile ("mov %0, %%dr7" :: "r" (val)); 179} 180 181static inline void force_new_asid(struct kvm_vcpu *vcpu) 182{ 183 vcpu->svm->asid_generation--; 184} 185 186static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) 187{ 188 force_new_asid(vcpu); 189} 190 191static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 192{ 193 if (!(efer & KVM_EFER_LMA)) 194 efer &= ~KVM_EFER_LME; 195 196 vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; 197 vcpu->shadow_efer = efer; 198} 199 200static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) 201{ 202 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 203 SVM_EVTINJ_VALID_ERR | 204 SVM_EVTINJ_TYPE_EXEPT | 205 GP_VECTOR; 206 vcpu->svm->vmcb->control.event_inj_err = error_code; 207} 208 209static void inject_ud(struct kvm_vcpu *vcpu) 210{ 211 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 212 SVM_EVTINJ_TYPE_EXEPT | 213 UD_VECTOR; 214} 215 216static int is_page_fault(uint32_t info) 217{ 218 info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; 219 return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT); 220} 221 222static int is_external_interrupt(u32 info) 223{ 224 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; 225 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); 226} 227 228static void skip_emulated_instruction(struct kvm_vcpu *vcpu) 229{ 230 if (!vcpu->svm->next_rip) { 231 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); 232 return; 233 } 234 if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) { 235 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 236 __FUNCTION__, 237 vcpu->svm->vmcb->save.rip, 238 vcpu->svm->next_rip); 239 } 240 241 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; 242 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 243 244 vcpu->interrupt_window_open = 1; 245} 246 247static int has_svm(void) 248{ 249 uint32_t eax, ebx, ecx, edx; 250 251 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { 252 printk(KERN_INFO "has_svm: not amd\n"); 253 return 0; 254 } 255 256 cpuid(0x80000000, &eax, &ebx, &ecx, &edx); 257 if (eax < SVM_CPUID_FUNC) { 258 printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n"); 259 return 0; 260 } 261 262 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 263 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { 264 printk(KERN_DEBUG "has_svm: svm not available\n"); 265 return 0; 266 } 267 return 1; 268} 269 270static void svm_hardware_disable(void *garbage) 271{ 272 struct svm_cpu_data *svm_data 273 = per_cpu(svm_data, raw_smp_processor_id()); 274 275 if (svm_data) { 276 uint64_t efer; 277 278 wrmsrl(MSR_VM_HSAVE_PA, 0); 279 rdmsrl(MSR_EFER, efer); 280 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); 281 per_cpu(svm_data, raw_smp_processor_id()) = NULL; 282 __free_page(svm_data->save_area); 283 kfree(svm_data); 284 } 285} 286 287static void svm_hardware_enable(void *garbage) 288{ 289 290 struct svm_cpu_data *svm_data; 291 uint64_t efer; 292#ifdef CONFIG_X86_64 293 struct desc_ptr gdt_descr; 294#else 295 struct Xgt_desc_struct gdt_descr; 296#endif 297 struct desc_struct *gdt; 298 int me = raw_smp_processor_id(); 299 300 if (!has_svm()) { 301 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); 302 return; 303 } 304 svm_data = per_cpu(svm_data, me); 305 306 if (!svm_data) { 307 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", 308 me); 309 return; 310 } 311 312 svm_data->asid_generation = 1; 313 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 314 svm_data->next_asid = svm_data->max_asid + 1; 315 svm_features = cpuid_edx(SVM_CPUID_FUNC); 316 317 asm volatile ( "sgdt %0" : "=m"(gdt_descr) ); 318 gdt = (struct desc_struct *)gdt_descr.address; 319 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 320 321 rdmsrl(MSR_EFER, efer); 322 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); 323 324 wrmsrl(MSR_VM_HSAVE_PA, 325 page_to_pfn(svm_data->save_area) << PAGE_SHIFT); 326} 327 328static int svm_cpu_init(int cpu) 329{ 330 struct svm_cpu_data *svm_data; 331 int r; 332 333 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); 334 if (!svm_data) 335 return -ENOMEM; 336 svm_data->cpu = cpu; 337 svm_data->save_area = alloc_page(GFP_KERNEL); 338 r = -ENOMEM; 339 if (!svm_data->save_area) 340 goto err_1; 341 342 per_cpu(svm_data, cpu) = svm_data; 343 344 return 0; 345 346err_1: 347 kfree(svm_data); 348 return r; 349 350} 351 352static int set_msr_interception(u32 *msrpm, unsigned msr, 353 int read, int write) 354{ 355 int i; 356 357 for (i = 0; i < NUM_MSR_MAPS; i++) { 358 if (msr >= msrpm_ranges[i] && 359 msr < msrpm_ranges[i] + MSRS_IN_RANGE) { 360 u32 msr_offset = (i * MSRS_IN_RANGE + msr - 361 msrpm_ranges[i]) * 2; 362 363 u32 *base = msrpm + (msr_offset / 32); 364 u32 msr_shift = msr_offset % 32; 365 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); 366 *base = (*base & ~(0x3 << msr_shift)) | 367 (mask << msr_shift); 368 return 1; 369 } 370 } 371 printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr); 372 return 0; 373} 374 375static __init int svm_hardware_setup(void) 376{ 377 int cpu; 378 struct page *iopm_pages; 379 struct page *msrpm_pages; 380 void *msrpm_va; 381 int r; 382 383 kvm_emulator_want_group7_invlpg(); 384 385 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); 386 387 if (!iopm_pages) 388 return -ENOMEM; 389 memset(page_address(iopm_pages), 0xff, 390 PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 391 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 392 393 394 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 395 396 r = -ENOMEM; 397 if (!msrpm_pages) 398 goto err_1; 399 400 msrpm_va = page_address(msrpm_pages); 401 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); 402 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT; 403 404#ifdef CONFIG_X86_64 405 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); 406 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); 407 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); 408 set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1); 409 set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1); 410 set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1); 411#endif 412 set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1); 413 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1); 414 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1); 415 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1); 416 417 for_each_online_cpu(cpu) { 418 r = svm_cpu_init(cpu); 419 if (r) 420 goto err_2; 421 } 422 return 0; 423 424err_2: 425 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); 426 msrpm_base = 0; 427err_1: 428 __free_pages(iopm_pages, IOPM_ALLOC_ORDER); 429 iopm_base = 0; 430 return r; 431} 432 433static __exit void svm_hardware_unsetup(void) 434{ 435 __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER); 436 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); 437 iopm_base = msrpm_base = 0; 438} 439 440static void init_seg(struct vmcb_seg *seg) 441{ 442 seg->selector = 0; 443 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | 444 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ 445 seg->limit = 0xffff; 446 seg->base = 0; 447} 448 449static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) 450{ 451 seg->selector = 0; 452 seg->attrib = SVM_SELECTOR_P_MASK | type; 453 seg->limit = 0xffff; 454 seg->base = 0; 455} 456 457static int svm_vcpu_setup(struct kvm_vcpu *vcpu) 458{ 459 return 0; 460} 461 462static void init_vmcb(struct vmcb *vmcb) 463{ 464 struct vmcb_control_area *control = &vmcb->control; 465 struct vmcb_save_area *save = &vmcb->save; 466 467 control->intercept_cr_read = INTERCEPT_CR0_MASK | 468 INTERCEPT_CR3_MASK | 469 INTERCEPT_CR4_MASK; 470 471 control->intercept_cr_write = INTERCEPT_CR0_MASK | 472 INTERCEPT_CR3_MASK | 473 INTERCEPT_CR4_MASK; 474 475 control->intercept_dr_read = INTERCEPT_DR0_MASK | 476 INTERCEPT_DR1_MASK | 477 INTERCEPT_DR2_MASK | 478 INTERCEPT_DR3_MASK; 479 480 control->intercept_dr_write = INTERCEPT_DR0_MASK | 481 INTERCEPT_DR1_MASK | 482 INTERCEPT_DR2_MASK | 483 INTERCEPT_DR3_MASK | 484 INTERCEPT_DR5_MASK | 485 INTERCEPT_DR7_MASK; 486 487 control->intercept_exceptions = 1 << PF_VECTOR; 488 489 490 control->intercept = (1ULL << INTERCEPT_INTR) | 491 (1ULL << INTERCEPT_NMI) | 492 (1ULL << INTERCEPT_SMI) | 493 /* 494 * selective cr0 intercept bug? 495 * 0: 0f 22 d8 mov %eax,%cr3 496 * 3: 0f 20 c0 mov %cr0,%eax 497 * 6: 0d 00 00 00 80 or $0x80000000,%eax 498 * b: 0f 22 c0 mov %eax,%cr0 499 * set cr3 ->interception 500 * get cr0 ->interception 501 * set cr0 -> no interception 502 */ 503 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */ 504 (1ULL << INTERCEPT_CPUID) | 505 (1ULL << INTERCEPT_HLT) | 506 (1ULL << INTERCEPT_INVLPGA) | 507 (1ULL << INTERCEPT_IOIO_PROT) | 508 (1ULL << INTERCEPT_MSR_PROT) | 509 (1ULL << INTERCEPT_TASK_SWITCH) | 510 (1ULL << INTERCEPT_SHUTDOWN) | 511 (1ULL << INTERCEPT_VMRUN) | 512 (1ULL << INTERCEPT_VMMCALL) | 513 (1ULL << INTERCEPT_VMLOAD) | 514 (1ULL << INTERCEPT_VMSAVE) | 515 (1ULL << INTERCEPT_STGI) | 516 (1ULL << INTERCEPT_CLGI) | 517 (1ULL << INTERCEPT_SKINIT) | 518 (1ULL << INTERCEPT_MONITOR) | 519 (1ULL << INTERCEPT_MWAIT); 520 521 control->iopm_base_pa = iopm_base; 522 control->msrpm_base_pa = msrpm_base; 523 control->tsc_offset = 0; 524 control->int_ctl = V_INTR_MASKING_MASK; 525 526 init_seg(&save->es); 527 init_seg(&save->ss); 528 init_seg(&save->ds); 529 init_seg(&save->fs); 530 init_seg(&save->gs); 531 532 save->cs.selector = 0xf000; 533 /* Executable/Readable Code Segment */ 534 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | 535 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; 536 save->cs.limit = 0xffff; 537 /* 538 * cs.base should really be 0xffff0000, but vmx can't handle that, so 539 * be consistent with it. 540 * 541 * Replace when we have real mode working for vmx. 542 */ 543 save->cs.base = 0xf0000; 544 545 save->gdtr.limit = 0xffff; 546 save->idtr.limit = 0xffff; 547 548 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 549 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 550 551 save->efer = MSR_EFER_SVME_MASK; 552 553 save->dr6 = 0xffff0ff0; 554 save->dr7 = 0x400; 555 save->rflags = 2; 556 save->rip = 0x0000fff0; 557 558 /* 559 * cr0 val on cpu init should be 0x60000010, we enable cpu 560 * cache by default. the orderly way is to enable cache in bios. 561 */ 562 save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; 563 save->cr4 = CR4_PAE_MASK; 564 /* rdx = ?? */ 565} 566 567static int svm_create_vcpu(struct kvm_vcpu *vcpu) 568{ 569 struct page *page; 570 int r; 571 572 r = -ENOMEM; 573 vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL); 574 if (!vcpu->svm) 575 goto out1; 576 page = alloc_page(GFP_KERNEL); 577 if (!page) 578 goto out2; 579 580 vcpu->svm->vmcb = page_address(page); 581 memset(vcpu->svm->vmcb, 0, PAGE_SIZE); 582 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 583 vcpu->svm->asid_generation = 0; 584 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); 585 init_vmcb(vcpu->svm->vmcb); 586 587 fx_init(vcpu); 588 vcpu->fpu_active = 1; 589 vcpu->apic_base = 0xfee00000 | 590 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | 591 MSR_IA32_APICBASE_ENABLE; 592 593 return 0; 594 595out2: 596 kfree(vcpu->svm); 597out1: 598 return r; 599} 600 601static void svm_free_vcpu(struct kvm_vcpu *vcpu) 602{ 603 if (!vcpu->svm) 604 return; 605 if (vcpu->svm->vmcb) 606 __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT)); 607 kfree(vcpu->svm); 608} 609 610static void svm_vcpu_load(struct kvm_vcpu *vcpu) 611{ 612 int cpu, i; 613 614 cpu = get_cpu(); 615 if (unlikely(cpu != vcpu->cpu)) { 616 u64 tsc_this, delta; 617 618 /* 619 * Make sure that the guest sees a monotonically 620 * increasing TSC. 621 */ 622 rdtscll(tsc_this); 623 delta = vcpu->host_tsc - tsc_this; 624 vcpu->svm->vmcb->control.tsc_offset += delta; 625 vcpu->cpu = cpu; 626 } 627 628 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 629 rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); 630} 631 632static void svm_vcpu_put(struct kvm_vcpu *vcpu) 633{ 634 int i; 635 636 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 637 wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); 638 639 rdtscll(vcpu->host_tsc); 640 put_cpu(); 641} 642 643static void svm_vcpu_decache(struct kvm_vcpu *vcpu) 644{ 645} 646 647static void svm_cache_regs(struct kvm_vcpu *vcpu) 648{ 649 vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; 650 vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp; 651 vcpu->rip = vcpu->svm->vmcb->save.rip; 652} 653 654static void svm_decache_regs(struct kvm_vcpu *vcpu) 655{ 656 vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; 657 vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; 658 vcpu->svm->vmcb->save.rip = vcpu->rip; 659} 660 661static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 662{ 663 return vcpu->svm->vmcb->save.rflags; 664} 665 666static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 667{ 668 vcpu->svm->vmcb->save.rflags = rflags; 669} 670 671static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 672{ 673 struct vmcb_save_area *save = &vcpu->svm->vmcb->save; 674 675 switch (seg) { 676 case VCPU_SREG_CS: return &save->cs; 677 case VCPU_SREG_DS: return &save->ds; 678 case VCPU_SREG_ES: return &save->es; 679 case VCPU_SREG_FS: return &save->fs; 680 case VCPU_SREG_GS: return &save->gs; 681 case VCPU_SREG_SS: return &save->ss; 682 case VCPU_SREG_TR: return &save->tr; 683 case VCPU_SREG_LDTR: return &save->ldtr; 684 } 685 BUG(); 686 return NULL; 687} 688 689static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) 690{ 691 struct vmcb_seg *s = svm_seg(vcpu, seg); 692 693 return s->base; 694} 695 696static void svm_get_segment(struct kvm_vcpu *vcpu, 697 struct kvm_segment *var, int seg) 698{ 699 struct vmcb_seg *s = svm_seg(vcpu, seg); 700 701 var->base = s->base; 702 var->limit = s->limit; 703 var->selector = s->selector; 704 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; 705 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; 706 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 707 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; 708 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; 709 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; 710 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 711 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; 712 var->unusable = !var->present; 713} 714 715static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 716{ 717 struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS); 718 719 *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 720 *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; 721} 722 723static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 724{ 725 dt->limit = vcpu->svm->vmcb->save.idtr.limit; 726 dt->base = vcpu->svm->vmcb->save.idtr.base; 727} 728 729static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 730{ 731 vcpu->svm->vmcb->save.idtr.limit = dt->limit; 732 vcpu->svm->vmcb->save.idtr.base = dt->base ; 733} 734 735static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 736{ 737 dt->limit = vcpu->svm->vmcb->save.gdtr.limit; 738 dt->base = vcpu->svm->vmcb->save.gdtr.base; 739} 740 741static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 742{ 743 vcpu->svm->vmcb->save.gdtr.limit = dt->limit; 744 vcpu->svm->vmcb->save.gdtr.base = dt->base ; 745} 746 747static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 748{ 749} 750 751static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 752{ 753#ifdef CONFIG_X86_64 754 if (vcpu->shadow_efer & KVM_EFER_LME) { 755 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 756 vcpu->shadow_efer |= KVM_EFER_LMA; 757 vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; 758 } 759 760 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { 761 vcpu->shadow_efer &= ~KVM_EFER_LMA; 762 vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); 763 } 764 } 765#endif 766 if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { 767 vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 768 vcpu->fpu_active = 1; 769 } 770 771 vcpu->cr0 = cr0; 772 cr0 |= CR0_PG_MASK | CR0_WP_MASK; 773 cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); 774 vcpu->svm->vmcb->save.cr0 = cr0; 775} 776 777static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 778{ 779 vcpu->cr4 = cr4; 780 vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK; 781} 782 783static void svm_set_segment(struct kvm_vcpu *vcpu, 784 struct kvm_segment *var, int seg) 785{ 786 struct vmcb_seg *s = svm_seg(vcpu, seg); 787 788 s->base = var->base; 789 s->limit = var->limit; 790 s->selector = var->selector; 791 if (var->unusable) 792 s->attrib = 0; 793 else { 794 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 795 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 796 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 797 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; 798 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 799 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 800 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 801 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 802 } 803 if (seg == VCPU_SREG_CS) 804 vcpu->svm->vmcb->save.cpl 805 = (vcpu->svm->vmcb->save.cs.attrib 806 >> SVM_SELECTOR_DPL_SHIFT) & 3; 807 808} 809 810/* FIXME: 811 812 vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK; 813 vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); 814 815*/ 816 817static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 818{ 819 return -EOPNOTSUPP; 820} 821 822static void load_host_msrs(struct kvm_vcpu *vcpu) 823{ 824#ifdef CONFIG_X86_64 825 wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); 826#endif 827} 828 829static void save_host_msrs(struct kvm_vcpu *vcpu) 830{ 831#ifdef CONFIG_X86_64 832 rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); 833#endif 834} 835 836static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) 837{ 838 if (svm_data->next_asid > svm_data->max_asid) { 839 ++svm_data->asid_generation; 840 svm_data->next_asid = 1; 841 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 842 } 843 844 vcpu->cpu = svm_data->cpu; 845 vcpu->svm->asid_generation = svm_data->asid_generation; 846 vcpu->svm->vmcb->control.asid = svm_data->next_asid++; 847} 848 849static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) 850{ 851 invlpga(address, vcpu->svm->vmcb->control.asid); // is needed? 852} 853 854static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 855{ 856 return vcpu->svm->db_regs[dr]; 857} 858 859static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, 860 int *exception) 861{ 862 *exception = 0; 863 864 if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) { 865 vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK; 866 vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK; 867 *exception = DB_VECTOR; 868 return; 869 } 870 871 switch (dr) { 872 case 0 ... 3: 873 vcpu->svm->db_regs[dr] = value; 874 return; 875 case 4 ... 5: 876 if (vcpu->cr4 & CR4_DE_MASK) { 877 *exception = UD_VECTOR; 878 return; 879 } 880 case 7: { 881 if (value & ~((1ULL << 32) - 1)) { 882 *exception = GP_VECTOR; 883 return; 884 } 885 vcpu->svm->vmcb->save.dr7 = value; 886 return; 887 } 888 default: 889 printk(KERN_DEBUG "%s: unexpected dr %u\n", 890 __FUNCTION__, dr); 891 *exception = UD_VECTOR; 892 return; 893 } 894} 895 896static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 897{ 898 u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info; 899 u64 fault_address; 900 u32 error_code; 901 enum emulation_result er; 902 int r; 903 904 if (is_external_interrupt(exit_int_info)) 905 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 906 907 spin_lock(&vcpu->kvm->lock); 908 909 fault_address = vcpu->svm->vmcb->control.exit_info_2; 910 error_code = vcpu->svm->vmcb->control.exit_info_1; 911 r = kvm_mmu_page_fault(vcpu, fault_address, error_code); 912 if (r < 0) { 913 spin_unlock(&vcpu->kvm->lock); 914 return r; 915 } 916 if (!r) { 917 spin_unlock(&vcpu->kvm->lock); 918 return 1; 919 } 920 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); 921 spin_unlock(&vcpu->kvm->lock); 922 923 switch (er) { 924 case EMULATE_DONE: 925 return 1; 926 case EMULATE_DO_MMIO: 927 ++vcpu->stat.mmio_exits; 928 kvm_run->exit_reason = KVM_EXIT_MMIO; 929 return 0; 930 case EMULATE_FAIL: 931 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__); 932 break; 933 default: 934 BUG(); 935 } 936 937 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 938 return 0; 939} 940 941static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 942{ 943 vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 944 if (!(vcpu->cr0 & CR0_TS_MASK)) 945 vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; 946 vcpu->fpu_active = 1; 947 948 return 1; 949} 950 951static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 952{ 953 /* 954 * VMCB is undefined after a SHUTDOWN intercept 955 * so reinitialize it. 956 */ 957 memset(vcpu->svm->vmcb, 0, PAGE_SIZE); 958 init_vmcb(vcpu->svm->vmcb); 959 960 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 961 return 0; 962} 963 964static int io_get_override(struct kvm_vcpu *vcpu, 965 struct vmcb_seg **seg, 966 int *addr_override) 967{ 968 u8 inst[MAX_INST_SIZE]; 969 unsigned ins_length; 970 gva_t rip; 971 int i; 972 973 rip = vcpu->svm->vmcb->save.rip; 974 ins_length = vcpu->svm->next_rip - rip; 975 rip += vcpu->svm->vmcb->save.cs.base; 976 977 if (ins_length > MAX_INST_SIZE) 978 printk(KERN_DEBUG 979 "%s: inst length err, cs base 0x%llx rip 0x%llx " 980 "next rip 0x%llx ins_length %u\n", 981 __FUNCTION__, 982 vcpu->svm->vmcb->save.cs.base, 983 vcpu->svm->vmcb->save.rip, 984 vcpu->svm->vmcb->control.exit_info_2, 985 ins_length); 986 987 if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) 988 /* #PF */ 989 return 0; 990 991 *addr_override = 0; 992 *seg = NULL; 993 for (i = 0; i < ins_length; i++) 994 switch (inst[i]) { 995 case 0xf0: 996 case 0xf2: 997 case 0xf3: 998 case 0x66: 999 continue; 1000 case 0x67: 1001 *addr_override = 1; 1002 continue; 1003 case 0x2e: 1004 *seg = &vcpu->svm->vmcb->save.cs; 1005 continue; 1006 case 0x36: 1007 *seg = &vcpu->svm->vmcb->save.ss; 1008 continue; 1009 case 0x3e: 1010 *seg = &vcpu->svm->vmcb->save.ds; 1011 continue; 1012 case 0x26: 1013 *seg = &vcpu->svm->vmcb->save.es; 1014 continue; 1015 case 0x64: 1016 *seg = &vcpu->svm->vmcb->save.fs; 1017 continue; 1018 case 0x65: 1019 *seg = &vcpu->svm->vmcb->save.gs; 1020 continue; 1021 default: 1022 return 1; 1023 } 1024 printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__); 1025 return 0; 1026} 1027 1028static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) 1029{ 1030 unsigned long addr_mask; 1031 unsigned long *reg; 1032 struct vmcb_seg *seg; 1033 int addr_override; 1034 struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save; 1035 u16 cs_attrib = save_area->cs.attrib; 1036 unsigned addr_size = get_addr_size(vcpu); 1037 1038 if (!io_get_override(vcpu, &seg, &addr_override)) 1039 return 0; 1040 1041 if (addr_override) 1042 addr_size = (addr_size == 2) ? 4: (addr_size >> 1); 1043 1044 if (ins) { 1045 reg = &vcpu->regs[VCPU_REGS_RDI]; 1046 seg = &vcpu->svm->vmcb->save.es; 1047 } else { 1048 reg = &vcpu->regs[VCPU_REGS_RSI]; 1049 seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds; 1050 } 1051 1052 addr_mask = ~0ULL >> (64 - (addr_size * 8)); 1053 1054 if ((cs_attrib & SVM_SELECTOR_L_MASK) && 1055 !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) { 1056 *address = (*reg & addr_mask); 1057 return addr_mask; 1058 } 1059 1060 if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) { 1061 svm_inject_gp(vcpu, 0); 1062 return 0; 1063 } 1064 1065 *address = (*reg & addr_mask) + seg->base; 1066 return addr_mask; 1067} 1068 1069static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1070{ 1071 u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? 1072 int size, down, in, string, rep; 1073 unsigned port; 1074 unsigned long count; 1075 gva_t address = 0; 1076 1077 ++vcpu->stat.io_exits; 1078 1079 vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; 1080 1081 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 1082 port = io_info >> 16; 1083 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 1084 string = (io_info & SVM_IOIO_STR_MASK) != 0; 1085 rep = (io_info & SVM_IOIO_REP_MASK) != 0; 1086 count = 1; 1087 down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; 1088 1089 if (string) { 1090 unsigned addr_mask; 1091 1092 addr_mask = io_adress(vcpu, in, &address); 1093 if (!addr_mask) { 1094 printk(KERN_DEBUG "%s: get io address failed\n", 1095 __FUNCTION__); 1096 return 1; 1097 } 1098 1099 if (rep) 1100 count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; 1101 } 1102 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down, 1103 address, rep, port); 1104} 1105 1106static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1107{ 1108 return 1; 1109} 1110 1111static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1112{ 1113 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1114 skip_emulated_instruction(vcpu); 1115 if (vcpu->irq_summary) 1116 return 1; 1117 1118 kvm_run->exit_reason = KVM_EXIT_HLT; 1119 ++vcpu->stat.halt_exits; 1120 return 0; 1121} 1122 1123static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1124{ 1125 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; 1126 skip_emulated_instruction(vcpu); 1127 return kvm_hypercall(vcpu, kvm_run); 1128} 1129 1130static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1131{ 1132 inject_ud(vcpu); 1133 return 1; 1134} 1135 1136static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1137{ 1138 printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__); 1139 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1140 return 0; 1141} 1142 1143static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1144{ 1145 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; 1146 kvm_emulate_cpuid(vcpu); 1147 return 1; 1148} 1149 1150static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1151{ 1152 if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE) 1153 printk(KERN_ERR "%s: failed\n", __FUNCTION__); 1154 return 1; 1155} 1156 1157static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) 1158{ 1159 switch (ecx) { 1160 case MSR_IA32_TIME_STAMP_COUNTER: { 1161 u64 tsc; 1162 1163 rdtscll(tsc); 1164 *data = vcpu->svm->vmcb->control.tsc_offset + tsc; 1165 break; 1166 } 1167 case MSR_K6_STAR: 1168 *data = vcpu->svm->vmcb->save.star; 1169 break; 1170#ifdef CONFIG_X86_64 1171 case MSR_LSTAR: 1172 *data = vcpu->svm->vmcb->save.lstar; 1173 break; 1174 case MSR_CSTAR: 1175 *data = vcpu->svm->vmcb->save.cstar; 1176 break; 1177 case MSR_KERNEL_GS_BASE: 1178 *data = vcpu->svm->vmcb->save.kernel_gs_base; 1179 break; 1180 case MSR_SYSCALL_MASK: 1181 *data = vcpu->svm->vmcb->save.sfmask; 1182 break; 1183#endif 1184 case MSR_IA32_SYSENTER_CS: 1185 *data = vcpu->svm->vmcb->save.sysenter_cs; 1186 break; 1187 case MSR_IA32_SYSENTER_EIP: 1188 *data = vcpu->svm->vmcb->save.sysenter_eip; 1189 break; 1190 case MSR_IA32_SYSENTER_ESP: 1191 *data = vcpu->svm->vmcb->save.sysenter_esp; 1192 break; 1193 default: 1194 return kvm_get_msr_common(vcpu, ecx, data); 1195 } 1196 return 0; 1197} 1198 1199static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1200{ 1201 u32 ecx = vcpu->regs[VCPU_REGS_RCX]; 1202 u64 data; 1203 1204 if (svm_get_msr(vcpu, ecx, &data)) 1205 svm_inject_gp(vcpu, 0); 1206 else { 1207 vcpu->svm->vmcb->save.rax = data & 0xffffffff; 1208 vcpu->regs[VCPU_REGS_RDX] = data >> 32; 1209 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; 1210 skip_emulated_instruction(vcpu); 1211 } 1212 return 1; 1213} 1214 1215static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) 1216{ 1217 switch (ecx) { 1218 case MSR_IA32_TIME_STAMP_COUNTER: { 1219 u64 tsc; 1220 1221 rdtscll(tsc); 1222 vcpu->svm->vmcb->control.tsc_offset = data - tsc; 1223 break; 1224 } 1225 case MSR_K6_STAR: 1226 vcpu->svm->vmcb->save.star = data; 1227 break; 1228#ifdef CONFIG_X86_64 1229 case MSR_LSTAR: 1230 vcpu->svm->vmcb->save.lstar = data; 1231 break; 1232 case MSR_CSTAR: 1233 vcpu->svm->vmcb->save.cstar = data; 1234 break; 1235 case MSR_KERNEL_GS_BASE: 1236 vcpu->svm->vmcb->save.kernel_gs_base = data; 1237 break; 1238 case MSR_SYSCALL_MASK: 1239 vcpu->svm->vmcb->save.sfmask = data; 1240 break; 1241#endif 1242 case MSR_IA32_SYSENTER_CS: 1243 vcpu->svm->vmcb->save.sysenter_cs = data; 1244 break; 1245 case MSR_IA32_SYSENTER_EIP: 1246 vcpu->svm->vmcb->save.sysenter_eip = data; 1247 break; 1248 case MSR_IA32_SYSENTER_ESP: 1249 vcpu->svm->vmcb->save.sysenter_esp = data; 1250 break; 1251 default: 1252 return kvm_set_msr_common(vcpu, ecx, data); 1253 } 1254 return 0; 1255} 1256 1257static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1258{ 1259 u32 ecx = vcpu->regs[VCPU_REGS_RCX]; 1260 u64 data = (vcpu->svm->vmcb->save.rax & -1u) 1261 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); 1262 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; 1263 if (svm_set_msr(vcpu, ecx, data)) 1264 svm_inject_gp(vcpu, 0); 1265 else 1266 skip_emulated_instruction(vcpu); 1267 return 1; 1268} 1269 1270static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1271{ 1272 if (vcpu->svm->vmcb->control.exit_info_1) 1273 return wrmsr_interception(vcpu, kvm_run); 1274 else 1275 return rdmsr_interception(vcpu, kvm_run); 1276} 1277 1278static int interrupt_window_interception(struct kvm_vcpu *vcpu, 1279 struct kvm_run *kvm_run) 1280{ 1281 /* 1282 * If the user space waits to inject interrupts, exit as soon as 1283 * possible 1284 */ 1285 if (kvm_run->request_interrupt_window && 1286 !vcpu->irq_summary) { 1287 ++vcpu->stat.irq_window_exits; 1288 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 1289 return 0; 1290 } 1291 1292 return 1; 1293} 1294 1295static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, 1296 struct kvm_run *kvm_run) = { 1297 [SVM_EXIT_READ_CR0] = emulate_on_interception, 1298 [SVM_EXIT_READ_CR3] = emulate_on_interception, 1299 [SVM_EXIT_READ_CR4] = emulate_on_interception, 1300 /* for now: */ 1301 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 1302 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 1303 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 1304 [SVM_EXIT_READ_DR0] = emulate_on_interception, 1305 [SVM_EXIT_READ_DR1] = emulate_on_interception, 1306 [SVM_EXIT_READ_DR2] = emulate_on_interception, 1307 [SVM_EXIT_READ_DR3] = emulate_on_interception, 1308 [SVM_EXIT_WRITE_DR0] = emulate_on_interception, 1309 [SVM_EXIT_WRITE_DR1] = emulate_on_interception, 1310 [SVM_EXIT_WRITE_DR2] = emulate_on_interception, 1311 [SVM_EXIT_WRITE_DR3] = emulate_on_interception, 1312 [SVM_EXIT_WRITE_DR5] = emulate_on_interception, 1313 [SVM_EXIT_WRITE_DR7] = emulate_on_interception, 1314 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 1315 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, 1316 [SVM_EXIT_INTR] = nop_on_interception, 1317 [SVM_EXIT_NMI] = nop_on_interception, 1318 [SVM_EXIT_SMI] = nop_on_interception, 1319 [SVM_EXIT_INIT] = nop_on_interception, 1320 [SVM_EXIT_VINTR] = interrupt_window_interception, 1321 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ 1322 [SVM_EXIT_CPUID] = cpuid_interception, 1323 [SVM_EXIT_HLT] = halt_interception, 1324 [SVM_EXIT_INVLPG] = emulate_on_interception, 1325 [SVM_EXIT_INVLPGA] = invalid_op_interception, 1326 [SVM_EXIT_IOIO] = io_interception, 1327 [SVM_EXIT_MSR] = msr_interception, 1328 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 1329 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 1330 [SVM_EXIT_VMRUN] = invalid_op_interception, 1331 [SVM_EXIT_VMMCALL] = vmmcall_interception, 1332 [SVM_EXIT_VMLOAD] = invalid_op_interception, 1333 [SVM_EXIT_VMSAVE] = invalid_op_interception, 1334 [SVM_EXIT_STGI] = invalid_op_interception, 1335 [SVM_EXIT_CLGI] = invalid_op_interception, 1336 [SVM_EXIT_SKINIT] = invalid_op_interception, 1337 [SVM_EXIT_MONITOR] = invalid_op_interception, 1338 [SVM_EXIT_MWAIT] = invalid_op_interception, 1339}; 1340 1341 1342static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1343{ 1344 u32 exit_code = vcpu->svm->vmcb->control.exit_code; 1345 1346 if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && 1347 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) 1348 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " 1349 "exit_code 0x%x\n", 1350 __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, 1351 exit_code); 1352 1353 if (exit_code >= ARRAY_SIZE(svm_exit_handlers) 1354 || svm_exit_handlers[exit_code] == 0) { 1355 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1356 kvm_run->hw.hardware_exit_reason = exit_code; 1357 return 0; 1358 } 1359 1360 return svm_exit_handlers[exit_code](vcpu, kvm_run); 1361} 1362 1363static void reload_tss(struct kvm_vcpu *vcpu) 1364{ 1365 int cpu = raw_smp_processor_id(); 1366 1367 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 1368 svm_data->tss_desc->type = 9; //available 32/64-bit TSS 1369 load_TR_desc(); 1370} 1371 1372static void pre_svm_run(struct kvm_vcpu *vcpu) 1373{ 1374 int cpu = raw_smp_processor_id(); 1375 1376 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 1377 1378 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 1379 if (vcpu->cpu != cpu || 1380 vcpu->svm->asid_generation != svm_data->asid_generation) 1381 new_asid(vcpu, svm_data); 1382} 1383 1384 1385static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 1386{ 1387 struct vmcb_control_area *control; 1388 1389 control = &vcpu->svm->vmcb->control; 1390 control->int_vector = pop_irq(vcpu); 1391 control->int_ctl &= ~V_INTR_PRIO_MASK; 1392 control->int_ctl |= V_IRQ_MASK | 1393 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 1394} 1395 1396static void kvm_reput_irq(struct kvm_vcpu *vcpu) 1397{ 1398 struct vmcb_control_area *control = &vcpu->svm->vmcb->control; 1399 1400 if (control->int_ctl & V_IRQ_MASK) { 1401 control->int_ctl &= ~V_IRQ_MASK; 1402 push_irq(vcpu, control->int_vector); 1403 } 1404 1405 vcpu->interrupt_window_open = 1406 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); 1407} 1408 1409static void do_interrupt_requests(struct kvm_vcpu *vcpu, 1410 struct kvm_run *kvm_run) 1411{ 1412 struct vmcb_control_area *control = &vcpu->svm->vmcb->control; 1413 1414 vcpu->interrupt_window_open = 1415 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && 1416 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); 1417 1418 if (vcpu->interrupt_window_open && vcpu->irq_summary) 1419 /* 1420 * If interrupts enabled, and not blocked by sti or mov ss. Good. 1421 */ 1422 kvm_do_inject_irq(vcpu); 1423 1424 /* 1425 * Interrupts blocked. Wait for unblock. 1426 */ 1427 if (!vcpu->interrupt_window_open && 1428 (vcpu->irq_summary || kvm_run->request_interrupt_window)) { 1429 control->intercept |= 1ULL << INTERCEPT_VINTR; 1430 } else 1431 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1432} 1433 1434static void post_kvm_run_save(struct kvm_vcpu *vcpu, 1435 struct kvm_run *kvm_run) 1436{ 1437 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && 1438 vcpu->irq_summary == 0); 1439 kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; 1440 kvm_run->cr8 = vcpu->cr8; 1441 kvm_run->apic_base = vcpu->apic_base; 1442} 1443 1444/* 1445 * Check if userspace requested an interrupt window, and that the 1446 * interrupt window is open. 1447 * 1448 * No need to exit to userspace if we already have an interrupt queued. 1449 */ 1450static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, 1451 struct kvm_run *kvm_run) 1452{ 1453 return (!vcpu->irq_summary && 1454 kvm_run->request_interrupt_window && 1455 vcpu->interrupt_window_open && 1456 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); 1457} 1458 1459static void save_db_regs(unsigned long *db_regs) 1460{ 1461 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); 1462 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); 1463 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); 1464 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); 1465} 1466 1467static void load_db_regs(unsigned long *db_regs) 1468{ 1469 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); 1470 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); 1471 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); 1472 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); 1473} 1474 1475static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1476{ 1477 u16 fs_selector; 1478 u16 gs_selector; 1479 u16 ldt_selector; 1480 int r; 1481 1482again: 1483 if (!vcpu->mmio_read_completed) 1484 do_interrupt_requests(vcpu, kvm_run); 1485 1486 clgi(); 1487 1488 pre_svm_run(vcpu); 1489 1490 save_host_msrs(vcpu); 1491 fs_selector = read_fs(); 1492 gs_selector = read_gs(); 1493 ldt_selector = read_ldt(); 1494 vcpu->svm->host_cr2 = kvm_read_cr2(); 1495 vcpu->svm->host_dr6 = read_dr6(); 1496 vcpu->svm->host_dr7 = read_dr7(); 1497 vcpu->svm->vmcb->save.cr2 = vcpu->cr2; 1498 1499 if (vcpu->svm->vmcb->save.dr7 & 0xff) { 1500 write_dr7(0); 1501 save_db_regs(vcpu->svm->host_db_regs); 1502 load_db_regs(vcpu->svm->db_regs); 1503 } 1504 1505 if (vcpu->fpu_active) { 1506 fx_save(vcpu->host_fx_image); 1507 fx_restore(vcpu->guest_fx_image); 1508 } 1509 1510 asm volatile ( 1511#ifdef CONFIG_X86_64 1512 "push %%rbx; push %%rcx; push %%rdx;" 1513 "push %%rsi; push %%rdi; push %%rbp;" 1514 "push %%r8; push %%r9; push %%r10; push %%r11;" 1515 "push %%r12; push %%r13; push %%r14; push %%r15;" 1516#else 1517 "push %%ebx; push %%ecx; push %%edx;" 1518 "push %%esi; push %%edi; push %%ebp;" 1519#endif 1520 1521#ifdef CONFIG_X86_64 1522 "mov %c[rbx](%[vcpu]), %%rbx \n\t" 1523 "mov %c[rcx](%[vcpu]), %%rcx \n\t" 1524 "mov %c[rdx](%[vcpu]), %%rdx \n\t" 1525 "mov %c[rsi](%[vcpu]), %%rsi \n\t" 1526 "mov %c[rdi](%[vcpu]), %%rdi \n\t" 1527 "mov %c[rbp](%[vcpu]), %%rbp \n\t" 1528 "mov %c[r8](%[vcpu]), %%r8 \n\t" 1529 "mov %c[r9](%[vcpu]), %%r9 \n\t" 1530 "mov %c[r10](%[vcpu]), %%r10 \n\t" 1531 "mov %c[r11](%[vcpu]), %%r11 \n\t" 1532 "mov %c[r12](%[vcpu]), %%r12 \n\t" 1533 "mov %c[r13](%[vcpu]), %%r13 \n\t" 1534 "mov %c[r14](%[vcpu]), %%r14 \n\t" 1535 "mov %c[r15](%[vcpu]), %%r15 \n\t" 1536#else 1537 "mov %c[rbx](%[vcpu]), %%ebx \n\t" 1538 "mov %c[rcx](%[vcpu]), %%ecx \n\t" 1539 "mov %c[rdx](%[vcpu]), %%edx \n\t" 1540 "mov %c[rsi](%[vcpu]), %%esi \n\t" 1541 "mov %c[rdi](%[vcpu]), %%edi \n\t" 1542 "mov %c[rbp](%[vcpu]), %%ebp \n\t" 1543#endif 1544 1545#ifdef CONFIG_X86_64 1546 /* Enter guest mode */ 1547 "push %%rax \n\t" 1548 "mov %c[svm](%[vcpu]), %%rax \n\t" 1549 "mov %c[vmcb](%%rax), %%rax \n\t" 1550 SVM_VMLOAD "\n\t" 1551 SVM_VMRUN "\n\t" 1552 SVM_VMSAVE "\n\t" 1553 "pop %%rax \n\t" 1554#else 1555 /* Enter guest mode */ 1556 "push %%eax \n\t" 1557 "mov %c[svm](%[vcpu]), %%eax \n\t" 1558 "mov %c[vmcb](%%eax), %%eax \n\t" 1559 SVM_VMLOAD "\n\t" 1560 SVM_VMRUN "\n\t" 1561 SVM_VMSAVE "\n\t" 1562 "pop %%eax \n\t" 1563#endif 1564 1565 /* Save guest registers, load host registers */ 1566#ifdef CONFIG_X86_64 1567 "mov %%rbx, %c[rbx](%[vcpu]) \n\t" 1568 "mov %%rcx, %c[rcx](%[vcpu]) \n\t" 1569 "mov %%rdx, %c[rdx](%[vcpu]) \n\t" 1570 "mov %%rsi, %c[rsi](%[vcpu]) \n\t" 1571 "mov %%rdi, %c[rdi](%[vcpu]) \n\t" 1572 "mov %%rbp, %c[rbp](%[vcpu]) \n\t" 1573 "mov %%r8, %c[r8](%[vcpu]) \n\t" 1574 "mov %%r9, %c[r9](%[vcpu]) \n\t" 1575 "mov %%r10, %c[r10](%[vcpu]) \n\t" 1576 "mov %%r11, %c[r11](%[vcpu]) \n\t" 1577 "mov %%r12, %c[r12](%[vcpu]) \n\t" 1578 "mov %%r13, %c[r13](%[vcpu]) \n\t" 1579 "mov %%r14, %c[r14](%[vcpu]) \n\t" 1580 "mov %%r15, %c[r15](%[vcpu]) \n\t" 1581 1582 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" 1583 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" 1584 "pop %%rbp; pop %%rdi; pop %%rsi;" 1585 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t" 1586#else 1587 "mov %%ebx, %c[rbx](%[vcpu]) \n\t" 1588 "mov %%ecx, %c[rcx](%[vcpu]) \n\t" 1589 "mov %%edx, %c[rdx](%[vcpu]) \n\t" 1590 "mov %%esi, %c[rsi](%[vcpu]) \n\t" 1591 "mov %%edi, %c[rdi](%[vcpu]) \n\t" 1592 "mov %%ebp, %c[rbp](%[vcpu]) \n\t" 1593 1594 "pop %%ebp; pop %%edi; pop %%esi;" 1595 "pop %%edx; pop %%ecx; pop %%ebx; \n\t" 1596#endif 1597 : 1598 : [vcpu]"a"(vcpu), 1599 [svm]"i"(offsetof(struct kvm_vcpu, svm)), 1600 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), 1601 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), 1602 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), 1603 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])), 1604 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), 1605 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 1606 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) 1607#ifdef CONFIG_X86_64 1608 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 1609 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 1610 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 1611 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), 1612 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), 1613 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])), 1614 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])), 1615 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])) 1616#endif 1617 : "cc", "memory" ); 1618 1619 if (vcpu->fpu_active) { 1620 fx_save(vcpu->guest_fx_image); 1621 fx_restore(vcpu->host_fx_image); 1622 } 1623 1624 if ((vcpu->svm->vmcb->save.dr7 & 0xff)) 1625 load_db_regs(vcpu->svm->host_db_regs); 1626 1627 vcpu->cr2 = vcpu->svm->vmcb->save.cr2; 1628 1629 write_dr6(vcpu->svm->host_dr6); 1630 write_dr7(vcpu->svm->host_dr7); 1631 kvm_write_cr2(vcpu->svm->host_cr2); 1632 1633 load_fs(fs_selector); 1634 load_gs(gs_selector); 1635 load_ldt(ldt_selector); 1636 load_host_msrs(vcpu); 1637 1638 reload_tss(vcpu); 1639 1640 /* 1641 * Profile KVM exit RIPs: 1642 */ 1643 if (unlikely(prof_on == KVM_PROFILING)) 1644 profile_hit(KVM_PROFILING, 1645 (void *)(unsigned long)vcpu->svm->vmcb->save.rip); 1646 1647 stgi(); 1648 1649 kvm_reput_irq(vcpu); 1650 1651 vcpu->svm->next_rip = 0; 1652 1653 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 1654 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 1655 kvm_run->fail_entry.hardware_entry_failure_reason 1656 = vcpu->svm->vmcb->control.exit_code; 1657 post_kvm_run_save(vcpu, kvm_run); 1658 return 0; 1659 } 1660 1661 r = handle_exit(vcpu, kvm_run); 1662 if (r > 0) { 1663 if (signal_pending(current)) { 1664 ++vcpu->stat.signal_exits; 1665 post_kvm_run_save(vcpu, kvm_run); 1666 kvm_run->exit_reason = KVM_EXIT_INTR; 1667 return -EINTR; 1668 } 1669 1670 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 1671 ++vcpu->stat.request_irq_exits; 1672 post_kvm_run_save(vcpu, kvm_run); 1673 kvm_run->exit_reason = KVM_EXIT_INTR; 1674 return -EINTR; 1675 } 1676 kvm_resched(vcpu); 1677 goto again; 1678 } 1679 post_kvm_run_save(vcpu, kvm_run); 1680 return r; 1681} 1682 1683static void svm_flush_tlb(struct kvm_vcpu *vcpu) 1684{ 1685 force_new_asid(vcpu); 1686} 1687 1688static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1689{ 1690 vcpu->svm->vmcb->save.cr3 = root; 1691 force_new_asid(vcpu); 1692 1693 if (vcpu->fpu_active) { 1694 vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); 1695 vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; 1696 vcpu->fpu_active = 0; 1697 } 1698} 1699 1700static void svm_inject_page_fault(struct kvm_vcpu *vcpu, 1701 unsigned long addr, 1702 uint32_t err_code) 1703{ 1704 uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; 1705 1706 ++vcpu->stat.pf_guest; 1707 1708 if (is_page_fault(exit_int_info)) { 1709 1710 vcpu->svm->vmcb->control.event_inj_err = 0; 1711 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 1712 SVM_EVTINJ_VALID_ERR | 1713 SVM_EVTINJ_TYPE_EXEPT | 1714 DF_VECTOR; 1715 return; 1716 } 1717 vcpu->cr2 = addr; 1718 vcpu->svm->vmcb->save.cr2 = addr; 1719 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 1720 SVM_EVTINJ_VALID_ERR | 1721 SVM_EVTINJ_TYPE_EXEPT | 1722 PF_VECTOR; 1723 vcpu->svm->vmcb->control.event_inj_err = err_code; 1724} 1725 1726 1727static int is_disabled(void) 1728{ 1729 return 0; 1730} 1731 1732static void 1733svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 1734{ 1735 /* 1736 * Patch in the VMMCALL instruction: 1737 */ 1738 hypercall[0] = 0x0f; 1739 hypercall[1] = 0x01; 1740 hypercall[2] = 0xd9; 1741 hypercall[3] = 0xc3; 1742} 1743 1744static struct kvm_arch_ops svm_arch_ops = { 1745 .cpu_has_kvm_support = has_svm, 1746 .disabled_by_bios = is_disabled, 1747 .hardware_setup = svm_hardware_setup, 1748 .hardware_unsetup = svm_hardware_unsetup, 1749 .hardware_enable = svm_hardware_enable, 1750 .hardware_disable = svm_hardware_disable, 1751 1752 .vcpu_create = svm_create_vcpu, 1753 .vcpu_free = svm_free_vcpu, 1754 1755 .vcpu_load = svm_vcpu_load, 1756 .vcpu_put = svm_vcpu_put, 1757 .vcpu_decache = svm_vcpu_decache, 1758 1759 .set_guest_debug = svm_guest_debug, 1760 .get_msr = svm_get_msr, 1761 .set_msr = svm_set_msr, 1762 .get_segment_base = svm_get_segment_base, 1763 .get_segment = svm_get_segment, 1764 .set_segment = svm_set_segment, 1765 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 1766 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, 1767 .set_cr0 = svm_set_cr0, 1768 .set_cr3 = svm_set_cr3, 1769 .set_cr4 = svm_set_cr4, 1770 .set_efer = svm_set_efer, 1771 .get_idt = svm_get_idt, 1772 .set_idt = svm_set_idt, 1773 .get_gdt = svm_get_gdt, 1774 .set_gdt = svm_set_gdt, 1775 .get_dr = svm_get_dr, 1776 .set_dr = svm_set_dr, 1777 .cache_regs = svm_cache_regs, 1778 .decache_regs = svm_decache_regs, 1779 .get_rflags = svm_get_rflags, 1780 .set_rflags = svm_set_rflags, 1781 1782 .invlpg = svm_invlpg, 1783 .tlb_flush = svm_flush_tlb, 1784 .inject_page_fault = svm_inject_page_fault, 1785 1786 .inject_gp = svm_inject_gp, 1787 1788 .run = svm_vcpu_run, 1789 .skip_emulated_instruction = skip_emulated_instruction, 1790 .vcpu_setup = svm_vcpu_setup, 1791 .patch_hypercall = svm_patch_hypercall, 1792}; 1793 1794static int __init svm_init(void) 1795{ 1796 return kvm_init_arch(&svm_arch_ops, THIS_MODULE); 1797} 1798 1799static void __exit svm_exit(void) 1800{ 1801 kvm_exit_arch(); 1802} 1803 1804module_init(svm_init) 1805module_exit(svm_exit)