+1
-1
arch/s390/kvm/vsie.c
+1
-1
arch/s390/kvm/vsie.c
+2
arch/x86/include/asm/kvm_host.h
+2
arch/x86/include/asm/kvm_host.h
···
299
299
unsigned int cr4_smap:1;
300
300
unsigned int cr4_smep:1;
301
301
unsigned int cr4_la57:1;
302
+
unsigned int maxphyaddr:6;
302
303
};
303
304
};
304
305
···
398
397
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
399
398
u64 *spte, const void *pte);
400
399
hpa_t root_hpa;
400
+
gpa_t root_cr3;
401
401
union kvm_mmu_role mmu_role;
402
402
u8 root_level;
403
403
u8 shadow_root_level;
+4
arch/x86/kvm/cpuid.c
+4
arch/x86/kvm/cpuid.c
···
335
335
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
336
336
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
337
337
unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
338
+
unsigned f_la57 = 0;
338
339
339
340
/* cpuid 1.edx */
340
341
const u32 kvm_cpuid_1_edx_x86_features =
···
490
489
// TSC_ADJUST is emulated
491
490
entry->ebx |= F(TSC_ADJUST);
492
491
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
492
+
f_la57 = entry->ecx & F(LA57);
493
493
cpuid_mask(&entry->ecx, CPUID_7_ECX);
494
+
/* Set LA57 based on hardware capability. */
495
+
entry->ecx |= f_la57;
494
496
entry->ecx |= f_umip;
495
497
/* PKU is not yet implemented for shadow paging. */
496
498
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
+14
-4
arch/x86/kvm/mmu.c
+14
-4
arch/x86/kvm/mmu.c
···
3555
3555
&invalid_list);
3556
3556
mmu->root_hpa = INVALID_PAGE;
3557
3557
}
3558
+
mmu->root_cr3 = 0;
3558
3559
}
3559
3560
3560
3561
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
···
3611
3610
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3612
3611
} else
3613
3612
BUG();
3613
+
vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3614
3614
3615
3615
return 0;
3616
3616
}
···
3620
3618
{
3621
3619
struct kvm_mmu_page *sp;
3622
3620
u64 pdptr, pm_mask;
3623
-
gfn_t root_gfn;
3621
+
gfn_t root_gfn, root_cr3;
3624
3622
int i;
3625
3623
3626
-
root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
3624
+
root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3625
+
root_gfn = root_cr3 >> PAGE_SHIFT;
3627
3626
3628
3627
if (mmu_check_root(vcpu, root_gfn))
3629
3628
return 1;
···
3649
3646
++sp->root_count;
3650
3647
spin_unlock(&vcpu->kvm->mmu_lock);
3651
3648
vcpu->arch.mmu->root_hpa = root;
3652
-
return 0;
3649
+
goto set_root_cr3;
3653
3650
}
3654
3651
3655
3652
/*
···
3714
3711
3715
3712
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3716
3713
}
3714
+
3715
+
set_root_cr3:
3716
+
vcpu->arch.mmu->root_cr3 = root_cr3;
3717
3717
3718
3718
return 0;
3719
3719
}
···
4169
4163
struct kvm_mmu_root_info root;
4170
4164
struct kvm_mmu *mmu = vcpu->arch.mmu;
4171
4165
4172
-
root.cr3 = mmu->get_cr3(vcpu);
4166
+
root.cr3 = mmu->root_cr3;
4173
4167
root.hpa = mmu->root_hpa;
4174
4168
4175
4169
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
···
4182
4176
}
4183
4177
4184
4178
mmu->root_hpa = root.hpa;
4179
+
mmu->root_cr3 = root.cr3;
4185
4180
4186
4181
return i < KVM_MMU_NUM_PREV_ROOTS;
4187
4182
}
···
4777
4770
ext.cr4_pse = !!is_pse(vcpu);
4778
4771
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4779
4772
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
4773
+
ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4780
4774
4781
4775
ext.valid = 1;
4782
4776
···
5524
5516
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5525
5517
5526
5518
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
5519
+
vcpu->arch.root_mmu.root_cr3 = 0;
5527
5520
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
5528
5521
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5529
5522
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5530
5523
5531
5524
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
5525
+
vcpu->arch.guest_mmu.root_cr3 = 0;
5532
5526
vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
5533
5527
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5534
5528
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;