[PATCH] KVM: Simplify is_long_mode()

Instead of doing tricky stuff with the arch dependent virtualization
registers, take a peek at the guest's efer.

This simlifies some code, and fixes some confusion in the mmu branch.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Avi Kivity and committed by Linus Torvalds a9058ecd 1e885461

+14 -18
+9 -1
drivers/kvm/kvm.h
··· 278 struct kvm_segment *var, int seg); 279 void (*set_segment)(struct kvm_vcpu *vcpu, 280 struct kvm_segment *var, int seg); 281 - int (*is_long_mode)(struct kvm_vcpu *vcpu); 282 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 283 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 284 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, ··· 400 { 401 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 402 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL; 403 } 404 405 static inline int is_pae(struct kvm_vcpu *vcpu)
··· 278 struct kvm_segment *var, int seg); 279 void (*set_segment)(struct kvm_vcpu *vcpu, 280 struct kvm_segment *var, int seg); 281 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 282 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 283 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, ··· 401 { 402 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 403 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL; 404 + } 405 + 406 + static inline int is_long_mode(struct kvm_vcpu *vcpu) 407 + { 408 + #ifdef CONFIG_X86_64 409 + return vcpu->shadow_efer & EFER_LME; 410 + #else 411 + return 0; 412 + #endif 413 } 414 415 static inline int is_pae(struct kvm_vcpu *vcpu)
+2 -2
drivers/kvm/kvm_main.c
··· 398 return; 399 } 400 401 - if (kvm_arch_ops->is_long_mode(vcpu)) { 402 if (!(cr4 & CR4_PAE_MASK)) { 403 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " 404 "in long mode\n"); ··· 425 426 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 427 { 428 - if (kvm_arch_ops->is_long_mode(vcpu)) { 429 if ( cr3 & CR3_L_MODE_RESEVED_BITS) { 430 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); 431 inject_gp(vcpu);
··· 398 return; 399 } 400 401 + if (is_long_mode(vcpu)) { 402 if (!(cr4 & CR4_PAE_MASK)) { 403 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " 404 "in long mode\n"); ··· 425 426 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 427 { 428 + if (is_long_mode(vcpu)) { 429 if ( cr3 & CR3_L_MODE_RESEVED_BITS) { 430 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); 431 inject_gp(vcpu);
+1 -1
drivers/kvm/mmu.c
··· 578 579 if (!is_paging(vcpu)) 580 return nonpaging_init_context(vcpu); 581 - else if (kvm_arch_ops->is_long_mode(vcpu)) 582 return paging64_init_context(vcpu); 583 else if (is_pae(vcpu)) 584 return paging32E_init_context(vcpu);
··· 578 579 if (!is_paging(vcpu)) 580 return nonpaging_init_context(vcpu); 581 + else if (is_long_mode(vcpu)) 582 return paging64_init_context(vcpu); 583 else if (is_pae(vcpu)) 584 return paging32E_init_context(vcpu);
+2 -2
drivers/kvm/paging_tmpl.h
··· 68 hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); 69 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); 70 71 - ASSERT((!kvm_arch_ops->is_long_mode(vcpu) && is_pae(vcpu)) || 72 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); 73 74 walker->table = (pt_element_t *)( (unsigned long)walker->table | ··· 131 (walker->table[index] & PT_PAGE_SIZE_MASK) && 132 (PTTYPE == 64 || is_pse(vcpu)))) 133 return &walker->table[index]; 134 - if (walker->level != 3 || kvm_arch_ops->is_long_mode(vcpu)) 135 walker->inherited_ar &= walker->table[index]; 136 paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK); 137 kunmap_atomic(walker->table, KM_USER0);
··· 68 hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); 69 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); 70 71 + ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || 72 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); 73 74 walker->table = (pt_element_t *)( (unsigned long)walker->table | ··· 131 (walker->table[index] & PT_PAGE_SIZE_MASK) && 132 (PTTYPE == 64 || is_pse(vcpu)))) 133 return &walker->table[index]; 134 + if (walker->level != 3 || is_long_mode(vcpu)) 135 walker->inherited_ar &= walker->table[index]; 136 paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK); 137 kunmap_atomic(walker->table, KM_USER0);
-6
drivers/kvm/svm.c
··· 166 asm volatile ("mov %0, %%dr7" :: "r" (val)); 167 } 168 169 - static inline int svm_is_long_mode(struct kvm_vcpu *vcpu) 170 - { 171 - return vcpu->svm->vmcb->save.efer & KVM_EFER_LMA; 172 - } 173 - 174 static inline void force_new_asid(struct kvm_vcpu *vcpu) 175 { 176 vcpu->svm->asid_generation--; ··· 1604 .get_segment_base = svm_get_segment_base, 1605 .get_segment = svm_get_segment, 1606 .set_segment = svm_set_segment, 1607 - .is_long_mode = svm_is_long_mode, 1608 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 1609 .set_cr0 = svm_set_cr0, 1610 .set_cr0_no_modeswitch = svm_set_cr0,
··· 166 asm volatile ("mov %0, %%dr7" :: "r" (val)); 167 } 168 169 static inline void force_new_asid(struct kvm_vcpu *vcpu) 170 { 171 vcpu->svm->asid_generation--; ··· 1609 .get_segment_base = svm_get_segment_base, 1610 .get_segment = svm_get_segment, 1611 .set_segment = svm_set_segment, 1612 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 1613 .set_cr0 = svm_set_cr0, 1614 .set_cr0_no_modeswitch = svm_set_cr0,
-6
drivers/kvm/vmx.c
··· 900 vmcs_write32(sf->ar_bytes, ar); 901 } 902 903 - static int vmx_is_long_mode(struct kvm_vcpu *vcpu) 904 - { 905 - return vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_CONTROLS_IA32E_MASK; 906 - } 907 - 908 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 909 { 910 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); ··· 1970 .get_segment_base = vmx_get_segment_base, 1971 .get_segment = vmx_get_segment, 1972 .set_segment = vmx_set_segment, 1973 - .is_long_mode = vmx_is_long_mode, 1974 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 1975 .set_cr0 = vmx_set_cr0, 1976 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
··· 900 vmcs_write32(sf->ar_bytes, ar); 901 } 902 903 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 904 { 905 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); ··· 1975 .get_segment_base = vmx_get_segment_base, 1976 .get_segment = vmx_get_segment, 1977 .set_segment = vmx_set_segment, 1978 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 1979 .set_cr0 = vmx_set_cr0, 1980 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,