KVM: MMU: handle page removal with shadow mapping

Do not assume that a shadow mapping will always point to the same host
frame number. Fixes crash with madvise(MADV_DONTNEED).

[avi: move after first printk(), add another printk()]

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>

authored by Marcelo Tosatti and committed by Avi Kivity 15aaa819 4b1a80fa

+12 -1
+12 -1
arch/x86/kvm/mmu.c
··· 892 892 int *ptwrite, gfn_t gfn, struct page *page) 893 893 { 894 894 u64 spte; 895 - int was_rmapped = is_rmap_pte(*shadow_pte); 895 + int was_rmapped = 0; 896 896 int was_writeble = is_writeble_pte(*shadow_pte); 897 + hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 897 898 898 899 pgprintk("%s: spte %llx access %x write_fault %d" 899 900 " user_fault %d gfn %lx\n", 900 901 __FUNCTION__, *shadow_pte, pt_access, 901 902 write_fault, user_fault, gfn); 903 + 904 + if (is_rmap_pte(*shadow_pte)) { 905 + if (host_pfn != page_to_pfn(page)) { 906 + pgprintk("hfn old %lx new %lx\n", 907 + host_pfn, page_to_pfn(page)); 908 + rmap_remove(vcpu->kvm, shadow_pte); 909 + } 910 + else 911 + was_rmapped = 1; 912 + } 902 913 903 914 /* 904 915 * We don't set the accessed bit, since we sometimes want to see