+7
arch/arm64/kvm/hyp/nvhe/tlb.c
+7
arch/arm64/kvm/hyp/nvhe/tlb.c
···
31
31
isb();
32
32
}
33
33
34
+
/*
35
+
* __load_guest_stage2() includes an ISB only when the AT
36
+
* workaround is applied. Take care of the opposite condition,
37
+
* ensuring that we always have an ISB, but not two ISBs back
38
+
* to back.
39
+
*/
34
40
__load_guest_stage2(mmu);
41
+
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
35
42
}
36
43
37
44
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
+12
-10
arch/x86/kvm/vmx/vmx.c
+12
-10
arch/x86/kvm/vmx/vmx.c
···
794
794
*/
795
795
if (is_guest_mode(vcpu))
796
796
eb |= get_vmcs12(vcpu)->exception_bitmap;
797
+
else {
798
+
/*
799
+
* If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
800
+
* between guest and host. In that case we only care about present
801
+
* faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
802
+
* prepare_vmcs02_rare.
803
+
*/
804
+
bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
805
+
int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
806
+
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
807
+
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
808
+
}
797
809
798
810
vmcs_write32(EXCEPTION_BITMAP, eb);
799
811
}
···
4366
4354
/* Bit[6~0] are forced to 1, writes are ignored. */
4367
4355
vmx->pt_desc.guest.output_mask = 0x7F;
4368
4356
vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4369
-
}
4370
-
4371
-
/*
4372
-
* If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
4373
-
* between guest and host. In that case we only care about present
4374
-
* faults.
4375
-
*/
4376
-
if (enable_ept) {
4377
-
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, PFERR_PRESENT_MASK);
4378
-
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, PFERR_PRESENT_MASK);
4379
4357
}
4380
4358
}
4381
4359