Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.38-rc2 3843 lines 95 kB view raw
1/* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * MMU support 8 * 9 * Copyright (C) 2006 Qumranet, Inc. 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Yaniv Kamay <yaniv@qumranet.com> 14 * Avi Kivity <avi@qumranet.com> 15 * 16 * This work is licensed under the terms of the GNU GPL, version 2. See 17 * the COPYING file in the top-level directory. 18 * 19 */ 20 21#include "irq.h" 22#include "mmu.h" 23#include "x86.h" 24#include "kvm_cache_regs.h" 25#include "x86.h" 26 27#include <linux/kvm_host.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/mm.h> 31#include <linux/highmem.h> 32#include <linux/module.h> 33#include <linux/swap.h> 34#include <linux/hugetlb.h> 35#include <linux/compiler.h> 36#include <linux/srcu.h> 37#include <linux/slab.h> 38#include <linux/uaccess.h> 39 40#include <asm/page.h> 41#include <asm/cmpxchg.h> 42#include <asm/io.h> 43#include <asm/vmx.h> 44 45/* 46 * When setting this variable to true it enables Two-Dimensional-Paging 47 * where the hardware walks 2 page tables: 48 * 1. the guest-virtual to guest-physical 49 * 2. while doing 1. it walks guest-physical to host-physical 50 * If the hardware supports that we don't need to do shadow paging. 51 */ 52bool tdp_enabled = false; 53 54enum { 55 AUDIT_PRE_PAGE_FAULT, 56 AUDIT_POST_PAGE_FAULT, 57 AUDIT_PRE_PTE_WRITE, 58 AUDIT_POST_PTE_WRITE, 59 AUDIT_PRE_SYNC, 60 AUDIT_POST_SYNC 61}; 62 63char *audit_point_name[] = { 64 "pre page fault", 65 "post page fault", 66 "pre pte write", 67 "post pte write", 68 "pre sync", 69 "post sync" 70}; 71 72#undef MMU_DEBUG 73 74#ifdef MMU_DEBUG 75 76#define pgprintk(x...) do { if (dbg) printk(x); } while (0) 77#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) 78 79#else 80 81#define pgprintk(x...) do { } while (0) 82#define rmap_printk(x...) do { } while (0) 83 84#endif 85 86#ifdef MMU_DEBUG 87static int dbg = 0; 88module_param(dbg, bool, 0644); 89#endif 90 91static int oos_shadow = 1; 92module_param(oos_shadow, bool, 0644); 93 94#ifndef MMU_DEBUG 95#define ASSERT(x) do { } while (0) 96#else 97#define ASSERT(x) \ 98 if (!(x)) { \ 99 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \ 100 __FILE__, __LINE__, #x); \ 101 } 102#endif 103 104#define PTE_PREFETCH_NUM 8 105 106#define PT_FIRST_AVAIL_BITS_SHIFT 9 107#define PT64_SECOND_AVAIL_BITS_SHIFT 52 108 109#define PT64_LEVEL_BITS 9 110 111#define PT64_LEVEL_SHIFT(level) \ 112 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) 113 114#define PT64_LEVEL_MASK(level) \ 115 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level)) 116 117#define PT64_INDEX(address, level)\ 118 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) 119 120 121#define PT32_LEVEL_BITS 10 122 123#define PT32_LEVEL_SHIFT(level) \ 124 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) 125 126#define PT32_LEVEL_MASK(level) \ 127 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level)) 128#define PT32_LVL_OFFSET_MASK(level) \ 129 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ 130 * PT32_LEVEL_BITS))) - 1)) 131 132#define PT32_INDEX(address, level)\ 133 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) 134 135 136#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) 137#define PT64_DIR_BASE_ADDR_MASK \ 138 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) 139#define PT64_LVL_ADDR_MASK(level) \ 140 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ 141 * PT64_LEVEL_BITS))) - 1)) 142#define PT64_LVL_OFFSET_MASK(level) \ 143 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ 144 * PT64_LEVEL_BITS))) - 1)) 145 146#define PT32_BASE_ADDR_MASK PAGE_MASK 147#define PT32_DIR_BASE_ADDR_MASK \ 148 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) 149#define PT32_LVL_ADDR_MASK(level) \ 150 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ 151 * PT32_LEVEL_BITS))) - 1)) 152 153#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 154 | PT64_NX_MASK) 155 156#define RMAP_EXT 4 157 158#define ACC_EXEC_MASK 1 159#define ACC_WRITE_MASK PT_WRITABLE_MASK 160#define ACC_USER_MASK PT_USER_MASK 161#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 162 163#include <trace/events/kvm.h> 164 165#define CREATE_TRACE_POINTS 166#include "mmutrace.h" 167 168#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) 169 170#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 171 172struct kvm_rmap_desc { 173 u64 *sptes[RMAP_EXT]; 174 struct kvm_rmap_desc *more; 175}; 176 177struct kvm_shadow_walk_iterator { 178 u64 addr; 179 hpa_t shadow_addr; 180 int level; 181 u64 *sptep; 182 unsigned index; 183}; 184 185#define for_each_shadow_entry(_vcpu, _addr, _walker) \ 186 for (shadow_walk_init(&(_walker), _vcpu, _addr); \ 187 shadow_walk_okay(&(_walker)); \ 188 shadow_walk_next(&(_walker))) 189 190typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); 191 192static struct kmem_cache *pte_chain_cache; 193static struct kmem_cache *rmap_desc_cache; 194static struct kmem_cache *mmu_page_header_cache; 195static struct percpu_counter kvm_total_used_mmu_pages; 196 197static u64 __read_mostly shadow_trap_nonpresent_pte; 198static u64 __read_mostly shadow_notrap_nonpresent_pte; 199static u64 __read_mostly shadow_nx_mask; 200static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 201static u64 __read_mostly shadow_user_mask; 202static u64 __read_mostly shadow_accessed_mask; 203static u64 __read_mostly shadow_dirty_mask; 204 205static inline u64 rsvd_bits(int s, int e) 206{ 207 return ((1ULL << (e - s + 1)) - 1) << s; 208} 209 210void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) 211{ 212 shadow_trap_nonpresent_pte = trap_pte; 213 shadow_notrap_nonpresent_pte = notrap_pte; 214} 215EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); 216 217void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 218 u64 dirty_mask, u64 nx_mask, u64 x_mask) 219{ 220 shadow_user_mask = user_mask; 221 shadow_accessed_mask = accessed_mask; 222 shadow_dirty_mask = dirty_mask; 223 shadow_nx_mask = nx_mask; 224 shadow_x_mask = x_mask; 225} 226EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 227 228static bool is_write_protection(struct kvm_vcpu *vcpu) 229{ 230 return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 231} 232 233static int is_cpuid_PSE36(void) 234{ 235 return 1; 236} 237 238static int is_nx(struct kvm_vcpu *vcpu) 239{ 240 return vcpu->arch.efer & EFER_NX; 241} 242 243static int is_shadow_present_pte(u64 pte) 244{ 245 return pte != shadow_trap_nonpresent_pte 246 && pte != shadow_notrap_nonpresent_pte; 247} 248 249static int is_large_pte(u64 pte) 250{ 251 return pte & PT_PAGE_SIZE_MASK; 252} 253 254static int is_writable_pte(unsigned long pte) 255{ 256 return pte & PT_WRITABLE_MASK; 257} 258 259static int is_dirty_gpte(unsigned long pte) 260{ 261 return pte & PT_DIRTY_MASK; 262} 263 264static int is_rmap_spte(u64 pte) 265{ 266 return is_shadow_present_pte(pte); 267} 268 269static int is_last_spte(u64 pte, int level) 270{ 271 if (level == PT_PAGE_TABLE_LEVEL) 272 return 1; 273 if (is_large_pte(pte)) 274 return 1; 275 return 0; 276} 277 278static pfn_t spte_to_pfn(u64 pte) 279{ 280 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 281} 282 283static gfn_t pse36_gfn_delta(u32 gpte) 284{ 285 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; 286 287 return (gpte & PT32_DIR_PSE36_MASK) << shift; 288} 289 290static void __set_spte(u64 *sptep, u64 spte) 291{ 292 set_64bit(sptep, spte); 293} 294 295static u64 __xchg_spte(u64 *sptep, u64 new_spte) 296{ 297#ifdef CONFIG_X86_64 298 return xchg(sptep, new_spte); 299#else 300 u64 old_spte; 301 302 do { 303 old_spte = *sptep; 304 } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte); 305 306 return old_spte; 307#endif 308} 309 310static bool spte_has_volatile_bits(u64 spte) 311{ 312 if (!shadow_accessed_mask) 313 return false; 314 315 if (!is_shadow_present_pte(spte)) 316 return false; 317 318 if ((spte & shadow_accessed_mask) && 319 (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) 320 return false; 321 322 return true; 323} 324 325static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) 326{ 327 return (old_spte & bit_mask) && !(new_spte & bit_mask); 328} 329 330static void update_spte(u64 *sptep, u64 new_spte) 331{ 332 u64 mask, old_spte = *sptep; 333 334 WARN_ON(!is_rmap_spte(new_spte)); 335 336 new_spte |= old_spte & shadow_dirty_mask; 337 338 mask = shadow_accessed_mask; 339 if (is_writable_pte(old_spte)) 340 mask |= shadow_dirty_mask; 341 342 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) 343 __set_spte(sptep, new_spte); 344 else 345 old_spte = __xchg_spte(sptep, new_spte); 346 347 if (!shadow_accessed_mask) 348 return; 349 350 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) 351 kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 352 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) 353 kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 354} 355 356static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 357 struct kmem_cache *base_cache, int min) 358{ 359 void *obj; 360 361 if (cache->nobjs >= min) 362 return 0; 363 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 364 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); 365 if (!obj) 366 return -ENOMEM; 367 cache->objects[cache->nobjs++] = obj; 368 } 369 return 0; 370} 371 372static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, 373 struct kmem_cache *cache) 374{ 375 while (mc->nobjs) 376 kmem_cache_free(cache, mc->objects[--mc->nobjs]); 377} 378 379static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, 380 int min) 381{ 382 struct page *page; 383 384 if (cache->nobjs >= min) 385 return 0; 386 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 387 page = alloc_page(GFP_KERNEL); 388 if (!page) 389 return -ENOMEM; 390 cache->objects[cache->nobjs++] = page_address(page); 391 } 392 return 0; 393} 394 395static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) 396{ 397 while (mc->nobjs) 398 free_page((unsigned long)mc->objects[--mc->nobjs]); 399} 400 401static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) 402{ 403 int r; 404 405 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, 406 pte_chain_cache, 4); 407 if (r) 408 goto out; 409 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, 410 rmap_desc_cache, 4 + PTE_PREFETCH_NUM); 411 if (r) 412 goto out; 413 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); 414 if (r) 415 goto out; 416 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, 417 mmu_page_header_cache, 4); 418out: 419 return r; 420} 421 422static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) 423{ 424 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache); 425 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache); 426 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); 427 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, 428 mmu_page_header_cache); 429} 430 431static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, 432 size_t size) 433{ 434 void *p; 435 436 BUG_ON(!mc->nobjs); 437 p = mc->objects[--mc->nobjs]; 438 return p; 439} 440 441static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) 442{ 443 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache, 444 sizeof(struct kvm_pte_chain)); 445} 446 447static void mmu_free_pte_chain(struct kvm_pte_chain *pc) 448{ 449 kmem_cache_free(pte_chain_cache, pc); 450} 451 452static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) 453{ 454 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache, 455 sizeof(struct kvm_rmap_desc)); 456} 457 458static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) 459{ 460 kmem_cache_free(rmap_desc_cache, rd); 461} 462 463static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) 464{ 465 if (!sp->role.direct) 466 return sp->gfns[index]; 467 468 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); 469} 470 471static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) 472{ 473 if (sp->role.direct) 474 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); 475 else 476 sp->gfns[index] = gfn; 477} 478 479/* 480 * Return the pointer to the large page information for a given gfn, 481 * handling slots that are not large page aligned. 482 */ 483static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, 484 struct kvm_memory_slot *slot, 485 int level) 486{ 487 unsigned long idx; 488 489 idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - 490 (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 491 return &slot->lpage_info[level - 2][idx]; 492} 493 494static void account_shadowed(struct kvm *kvm, gfn_t gfn) 495{ 496 struct kvm_memory_slot *slot; 497 struct kvm_lpage_info *linfo; 498 int i; 499 500 slot = gfn_to_memslot(kvm, gfn); 501 for (i = PT_DIRECTORY_LEVEL; 502 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 503 linfo = lpage_info_slot(gfn, slot, i); 504 linfo->write_count += 1; 505 } 506} 507 508static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) 509{ 510 struct kvm_memory_slot *slot; 511 struct kvm_lpage_info *linfo; 512 int i; 513 514 slot = gfn_to_memslot(kvm, gfn); 515 for (i = PT_DIRECTORY_LEVEL; 516 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 517 linfo = lpage_info_slot(gfn, slot, i); 518 linfo->write_count -= 1; 519 WARN_ON(linfo->write_count < 0); 520 } 521} 522 523static int has_wrprotected_page(struct kvm *kvm, 524 gfn_t gfn, 525 int level) 526{ 527 struct kvm_memory_slot *slot; 528 struct kvm_lpage_info *linfo; 529 530 slot = gfn_to_memslot(kvm, gfn); 531 if (slot) { 532 linfo = lpage_info_slot(gfn, slot, level); 533 return linfo->write_count; 534 } 535 536 return 1; 537} 538 539static int host_mapping_level(struct kvm *kvm, gfn_t gfn) 540{ 541 unsigned long page_size; 542 int i, ret = 0; 543 544 page_size = kvm_host_page_size(kvm, gfn); 545 546 for (i = PT_PAGE_TABLE_LEVEL; 547 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { 548 if (page_size >= KVM_HPAGE_SIZE(i)) 549 ret = i; 550 else 551 break; 552 } 553 554 return ret; 555} 556 557static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 558{ 559 struct kvm_memory_slot *slot; 560 slot = gfn_to_memslot(vcpu->kvm, large_gfn); 561 if (slot && slot->dirty_bitmap) 562 return true; 563 return false; 564} 565 566static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 567{ 568 int host_level, level, max_level; 569 570 host_level = host_mapping_level(vcpu->kvm, large_gfn); 571 572 if (host_level == PT_PAGE_TABLE_LEVEL) 573 return host_level; 574 575 max_level = kvm_x86_ops->get_lpage_level() < host_level ? 576 kvm_x86_ops->get_lpage_level() : host_level; 577 578 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) 579 if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) 580 break; 581 582 return level - 1; 583} 584 585/* 586 * Take gfn and return the reverse mapping to it. 587 */ 588 589static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) 590{ 591 struct kvm_memory_slot *slot; 592 struct kvm_lpage_info *linfo; 593 594 slot = gfn_to_memslot(kvm, gfn); 595 if (likely(level == PT_PAGE_TABLE_LEVEL)) 596 return &slot->rmap[gfn - slot->base_gfn]; 597 598 linfo = lpage_info_slot(gfn, slot, level); 599 600 return &linfo->rmap_pde; 601} 602 603/* 604 * Reverse mapping data structures: 605 * 606 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry 607 * that points to page_address(page). 608 * 609 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc 610 * containing more mappings. 611 * 612 * Returns the number of rmap entries before the spte was added or zero if 613 * the spte was not added. 614 * 615 */ 616static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 617{ 618 struct kvm_mmu_page *sp; 619 struct kvm_rmap_desc *desc; 620 unsigned long *rmapp; 621 int i, count = 0; 622 623 if (!is_rmap_spte(*spte)) 624 return count; 625 sp = page_header(__pa(spte)); 626 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); 627 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 628 if (!*rmapp) { 629 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 630 *rmapp = (unsigned long)spte; 631 } else if (!(*rmapp & 1)) { 632 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 633 desc = mmu_alloc_rmap_desc(vcpu); 634 desc->sptes[0] = (u64 *)*rmapp; 635 desc->sptes[1] = spte; 636 *rmapp = (unsigned long)desc | 1; 637 ++count; 638 } else { 639 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 640 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 641 while (desc->sptes[RMAP_EXT-1] && desc->more) { 642 desc = desc->more; 643 count += RMAP_EXT; 644 } 645 if (desc->sptes[RMAP_EXT-1]) { 646 desc->more = mmu_alloc_rmap_desc(vcpu); 647 desc = desc->more; 648 } 649 for (i = 0; desc->sptes[i]; ++i) 650 ++count; 651 desc->sptes[i] = spte; 652 } 653 return count; 654} 655 656static void rmap_desc_remove_entry(unsigned long *rmapp, 657 struct kvm_rmap_desc *desc, 658 int i, 659 struct kvm_rmap_desc *prev_desc) 660{ 661 int j; 662 663 for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j) 664 ; 665 desc->sptes[i] = desc->sptes[j]; 666 desc->sptes[j] = NULL; 667 if (j != 0) 668 return; 669 if (!prev_desc && !desc->more) 670 *rmapp = (unsigned long)desc->sptes[0]; 671 else 672 if (prev_desc) 673 prev_desc->more = desc->more; 674 else 675 *rmapp = (unsigned long)desc->more | 1; 676 mmu_free_rmap_desc(desc); 677} 678 679static void rmap_remove(struct kvm *kvm, u64 *spte) 680{ 681 struct kvm_rmap_desc *desc; 682 struct kvm_rmap_desc *prev_desc; 683 struct kvm_mmu_page *sp; 684 gfn_t gfn; 685 unsigned long *rmapp; 686 int i; 687 688 sp = page_header(__pa(spte)); 689 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); 690 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); 691 if (!*rmapp) { 692 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte); 693 BUG(); 694 } else if (!(*rmapp & 1)) { 695 rmap_printk("rmap_remove: %p 1->0\n", spte); 696 if ((u64 *)*rmapp != spte) { 697 printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte); 698 BUG(); 699 } 700 *rmapp = 0; 701 } else { 702 rmap_printk("rmap_remove: %p many->many\n", spte); 703 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 704 prev_desc = NULL; 705 while (desc) { 706 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) 707 if (desc->sptes[i] == spte) { 708 rmap_desc_remove_entry(rmapp, 709 desc, i, 710 prev_desc); 711 return; 712 } 713 prev_desc = desc; 714 desc = desc->more; 715 } 716 pr_err("rmap_remove: %p many->many\n", spte); 717 BUG(); 718 } 719} 720 721static int set_spte_track_bits(u64 *sptep, u64 new_spte) 722{ 723 pfn_t pfn; 724 u64 old_spte = *sptep; 725 726 if (!spte_has_volatile_bits(old_spte)) 727 __set_spte(sptep, new_spte); 728 else 729 old_spte = __xchg_spte(sptep, new_spte); 730 731 if (!is_rmap_spte(old_spte)) 732 return 0; 733 734 pfn = spte_to_pfn(old_spte); 735 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 736 kvm_set_pfn_accessed(pfn); 737 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 738 kvm_set_pfn_dirty(pfn); 739 return 1; 740} 741 742static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 743{ 744 if (set_spte_track_bits(sptep, new_spte)) 745 rmap_remove(kvm, sptep); 746} 747 748static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 749{ 750 struct kvm_rmap_desc *desc; 751 u64 *prev_spte; 752 int i; 753 754 if (!*rmapp) 755 return NULL; 756 else if (!(*rmapp & 1)) { 757 if (!spte) 758 return (u64 *)*rmapp; 759 return NULL; 760 } 761 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 762 prev_spte = NULL; 763 while (desc) { 764 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) { 765 if (prev_spte == spte) 766 return desc->sptes[i]; 767 prev_spte = desc->sptes[i]; 768 } 769 desc = desc->more; 770 } 771 return NULL; 772} 773 774static int rmap_write_protect(struct kvm *kvm, u64 gfn) 775{ 776 unsigned long *rmapp; 777 u64 *spte; 778 int i, write_protected = 0; 779 780 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); 781 782 spte = rmap_next(kvm, rmapp, NULL); 783 while (spte) { 784 BUG_ON(!spte); 785 BUG_ON(!(*spte & PT_PRESENT_MASK)); 786 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 787 if (is_writable_pte(*spte)) { 788 update_spte(spte, *spte & ~PT_WRITABLE_MASK); 789 write_protected = 1; 790 } 791 spte = rmap_next(kvm, rmapp, spte); 792 } 793 794 /* check for huge page mappings */ 795 for (i = PT_DIRECTORY_LEVEL; 796 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 797 rmapp = gfn_to_rmap(kvm, gfn, i); 798 spte = rmap_next(kvm, rmapp, NULL); 799 while (spte) { 800 BUG_ON(!spte); 801 BUG_ON(!(*spte & PT_PRESENT_MASK)); 802 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 803 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 804 if (is_writable_pte(*spte)) { 805 drop_spte(kvm, spte, 806 shadow_trap_nonpresent_pte); 807 --kvm->stat.lpages; 808 spte = NULL; 809 write_protected = 1; 810 } 811 spte = rmap_next(kvm, rmapp, spte); 812 } 813 } 814 815 return write_protected; 816} 817 818static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, 819 unsigned long data) 820{ 821 u64 *spte; 822 int need_tlb_flush = 0; 823 824 while ((spte = rmap_next(kvm, rmapp, NULL))) { 825 BUG_ON(!(*spte & PT_PRESENT_MASK)); 826 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 827 drop_spte(kvm, spte, shadow_trap_nonpresent_pte); 828 need_tlb_flush = 1; 829 } 830 return need_tlb_flush; 831} 832 833static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, 834 unsigned long data) 835{ 836 int need_flush = 0; 837 u64 *spte, new_spte; 838 pte_t *ptep = (pte_t *)data; 839 pfn_t new_pfn; 840 841 WARN_ON(pte_huge(*ptep)); 842 new_pfn = pte_pfn(*ptep); 843 spte = rmap_next(kvm, rmapp, NULL); 844 while (spte) { 845 BUG_ON(!is_shadow_present_pte(*spte)); 846 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); 847 need_flush = 1; 848 if (pte_write(*ptep)) { 849 drop_spte(kvm, spte, shadow_trap_nonpresent_pte); 850 spte = rmap_next(kvm, rmapp, NULL); 851 } else { 852 new_spte = *spte &~ (PT64_BASE_ADDR_MASK); 853 new_spte |= (u64)new_pfn << PAGE_SHIFT; 854 855 new_spte &= ~PT_WRITABLE_MASK; 856 new_spte &= ~SPTE_HOST_WRITEABLE; 857 new_spte &= ~shadow_accessed_mask; 858 set_spte_track_bits(spte, new_spte); 859 spte = rmap_next(kvm, rmapp, spte); 860 } 861 } 862 if (need_flush) 863 kvm_flush_remote_tlbs(kvm); 864 865 return 0; 866} 867 868static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 869 unsigned long data, 870 int (*handler)(struct kvm *kvm, unsigned long *rmapp, 871 unsigned long data)) 872{ 873 int i, j; 874 int ret; 875 int retval = 0; 876 struct kvm_memslots *slots; 877 878 slots = kvm_memslots(kvm); 879 880 for (i = 0; i < slots->nmemslots; i++) { 881 struct kvm_memory_slot *memslot = &slots->memslots[i]; 882 unsigned long start = memslot->userspace_addr; 883 unsigned long end; 884 885 end = start + (memslot->npages << PAGE_SHIFT); 886 if (hva >= start && hva < end) { 887 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 888 gfn_t gfn = memslot->base_gfn + gfn_offset; 889 890 ret = handler(kvm, &memslot->rmap[gfn_offset], data); 891 892 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { 893 struct kvm_lpage_info *linfo; 894 895 linfo = lpage_info_slot(gfn, memslot, 896 PT_DIRECTORY_LEVEL + j); 897 ret |= handler(kvm, &linfo->rmap_pde, data); 898 } 899 trace_kvm_age_page(hva, memslot, ret); 900 retval |= ret; 901 } 902 } 903 904 return retval; 905} 906 907int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 908{ 909 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); 910} 911 912void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 913{ 914 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); 915} 916 917static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 918 unsigned long data) 919{ 920 u64 *spte; 921 int young = 0; 922 923 /* 924 * Emulate the accessed bit for EPT, by checking if this page has 925 * an EPT mapping, and clearing it if it does. On the next access, 926 * a new EPT mapping will be established. 927 * This has some overhead, but not as much as the cost of swapping 928 * out actively used pages or breaking up actively used hugepages. 929 */ 930 if (!shadow_accessed_mask) 931 return kvm_unmap_rmapp(kvm, rmapp, data); 932 933 spte = rmap_next(kvm, rmapp, NULL); 934 while (spte) { 935 int _young; 936 u64 _spte = *spte; 937 BUG_ON(!(_spte & PT_PRESENT_MASK)); 938 _young = _spte & PT_ACCESSED_MASK; 939 if (_young) { 940 young = 1; 941 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); 942 } 943 spte = rmap_next(kvm, rmapp, spte); 944 } 945 return young; 946} 947 948static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 949 unsigned long data) 950{ 951 u64 *spte; 952 int young = 0; 953 954 /* 955 * If there's no access bit in the secondary pte set by the 956 * hardware it's up to gup-fast/gup to set the access bit in 957 * the primary pte or in the page structure. 958 */ 959 if (!shadow_accessed_mask) 960 goto out; 961 962 spte = rmap_next(kvm, rmapp, NULL); 963 while (spte) { 964 u64 _spte = *spte; 965 BUG_ON(!(_spte & PT_PRESENT_MASK)); 966 young = _spte & PT_ACCESSED_MASK; 967 if (young) { 968 young = 1; 969 break; 970 } 971 spte = rmap_next(kvm, rmapp, spte); 972 } 973out: 974 return young; 975} 976 977#define RMAP_RECYCLE_THRESHOLD 1000 978 979static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 980{ 981 unsigned long *rmapp; 982 struct kvm_mmu_page *sp; 983 984 sp = page_header(__pa(spte)); 985 986 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 987 988 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); 989 kvm_flush_remote_tlbs(vcpu->kvm); 990} 991 992int kvm_age_hva(struct kvm *kvm, unsigned long hva) 993{ 994 return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); 995} 996 997int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 998{ 999 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); 1000} 1001 1002#ifdef MMU_DEBUG 1003static int is_empty_shadow_page(u64 *spt) 1004{ 1005 u64 *pos; 1006 u64 *end; 1007 1008 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 1009 if (is_shadow_present_pte(*pos)) { 1010 printk(KERN_ERR "%s: %p %llx\n", __func__, 1011 pos, *pos); 1012 return 0; 1013 } 1014 return 1; 1015} 1016#endif 1017 1018/* 1019 * This value is the sum of all of the kvm instances's 1020 * kvm->arch.n_used_mmu_pages values. We need a global, 1021 * aggregate version in order to make the slab shrinker 1022 * faster 1023 */ 1024static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) 1025{ 1026 kvm->arch.n_used_mmu_pages += nr; 1027 percpu_counter_add(&kvm_total_used_mmu_pages, nr); 1028} 1029 1030static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1031{ 1032 ASSERT(is_empty_shadow_page(sp->spt)); 1033 hlist_del(&sp->hash_link); 1034 list_del(&sp->link); 1035 __free_page(virt_to_page(sp->spt)); 1036 if (!sp->role.direct) 1037 __free_page(virt_to_page(sp->gfns)); 1038 kmem_cache_free(mmu_page_header_cache, sp); 1039 kvm_mod_used_mmu_pages(kvm, -1); 1040} 1041 1042static unsigned kvm_page_table_hashfn(gfn_t gfn) 1043{ 1044 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); 1045} 1046 1047static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, 1048 u64 *parent_pte, int direct) 1049{ 1050 struct kvm_mmu_page *sp; 1051 1052 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); 1053 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); 1054 if (!direct) 1055 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, 1056 PAGE_SIZE); 1057 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 1058 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 1059 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 1060 sp->multimapped = 0; 1061 sp->parent_pte = parent_pte; 1062 kvm_mod_used_mmu_pages(vcpu->kvm, +1); 1063 return sp; 1064} 1065 1066static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, 1067 struct kvm_mmu_page *sp, u64 *parent_pte) 1068{ 1069 struct kvm_pte_chain *pte_chain; 1070 struct hlist_node *node; 1071 int i; 1072 1073 if (!parent_pte) 1074 return; 1075 if (!sp->multimapped) { 1076 u64 *old = sp->parent_pte; 1077 1078 if (!old) { 1079 sp->parent_pte = parent_pte; 1080 return; 1081 } 1082 sp->multimapped = 1; 1083 pte_chain = mmu_alloc_pte_chain(vcpu); 1084 INIT_HLIST_HEAD(&sp->parent_ptes); 1085 hlist_add_head(&pte_chain->link, &sp->parent_ptes); 1086 pte_chain->parent_ptes[0] = old; 1087 } 1088 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) { 1089 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) 1090 continue; 1091 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) 1092 if (!pte_chain->parent_ptes[i]) { 1093 pte_chain->parent_ptes[i] = parent_pte; 1094 return; 1095 } 1096 } 1097 pte_chain = mmu_alloc_pte_chain(vcpu); 1098 BUG_ON(!pte_chain); 1099 hlist_add_head(&pte_chain->link, &sp->parent_ptes); 1100 pte_chain->parent_ptes[0] = parent_pte; 1101} 1102 1103static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, 1104 u64 *parent_pte) 1105{ 1106 struct kvm_pte_chain *pte_chain; 1107 struct hlist_node *node; 1108 int i; 1109 1110 if (!sp->multimapped) { 1111 BUG_ON(sp->parent_pte != parent_pte); 1112 sp->parent_pte = NULL; 1113 return; 1114 } 1115 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) 1116 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { 1117 if (!pte_chain->parent_ptes[i]) 1118 break; 1119 if (pte_chain->parent_ptes[i] != parent_pte) 1120 continue; 1121 while (i + 1 < NR_PTE_CHAIN_ENTRIES 1122 && pte_chain->parent_ptes[i + 1]) { 1123 pte_chain->parent_ptes[i] 1124 = pte_chain->parent_ptes[i + 1]; 1125 ++i; 1126 } 1127 pte_chain->parent_ptes[i] = NULL; 1128 if (i == 0) { 1129 hlist_del(&pte_chain->link); 1130 mmu_free_pte_chain(pte_chain); 1131 if (hlist_empty(&sp->parent_ptes)) { 1132 sp->multimapped = 0; 1133 sp->parent_pte = NULL; 1134 } 1135 } 1136 return; 1137 } 1138 BUG(); 1139} 1140 1141static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) 1142{ 1143 struct kvm_pte_chain *pte_chain; 1144 struct hlist_node *node; 1145 struct kvm_mmu_page *parent_sp; 1146 int i; 1147 1148 if (!sp->multimapped && sp->parent_pte) { 1149 parent_sp = page_header(__pa(sp->parent_pte)); 1150 fn(parent_sp, sp->parent_pte); 1151 return; 1152 } 1153 1154 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) 1155 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { 1156 u64 *spte = pte_chain->parent_ptes[i]; 1157 1158 if (!spte) 1159 break; 1160 parent_sp = page_header(__pa(spte)); 1161 fn(parent_sp, spte); 1162 } 1163} 1164 1165static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte); 1166static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) 1167{ 1168 mmu_parent_walk(sp, mark_unsync); 1169} 1170 1171static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte) 1172{ 1173 unsigned int index; 1174 1175 index = spte - sp->spt; 1176 if (__test_and_set_bit(index, sp->unsync_child_bitmap)) 1177 return; 1178 if (sp->unsync_children++) 1179 return; 1180 kvm_mmu_mark_parents_unsync(sp); 1181} 1182 1183static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, 1184 struct kvm_mmu_page *sp) 1185{ 1186 int i; 1187 1188 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1189 sp->spt[i] = shadow_trap_nonpresent_pte; 1190} 1191 1192static int nonpaging_sync_page(struct kvm_vcpu *vcpu, 1193 struct kvm_mmu_page *sp) 1194{ 1195 return 1; 1196} 1197 1198static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 1199{ 1200} 1201 1202#define KVM_PAGE_ARRAY_NR 16 1203 1204struct kvm_mmu_pages { 1205 struct mmu_page_and_offset { 1206 struct kvm_mmu_page *sp; 1207 unsigned int idx; 1208 } page[KVM_PAGE_ARRAY_NR]; 1209 unsigned int nr; 1210}; 1211 1212#define for_each_unsync_children(bitmap, idx) \ 1213 for (idx = find_first_bit(bitmap, 512); \ 1214 idx < 512; \ 1215 idx = find_next_bit(bitmap, 512, idx+1)) 1216 1217static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, 1218 int idx) 1219{ 1220 int i; 1221 1222 if (sp->unsync) 1223 for (i=0; i < pvec->nr; i++) 1224 if (pvec->page[i].sp == sp) 1225 return 0; 1226 1227 pvec->page[pvec->nr].sp = sp; 1228 pvec->page[pvec->nr].idx = idx; 1229 pvec->nr++; 1230 return (pvec->nr == KVM_PAGE_ARRAY_NR); 1231} 1232 1233static int __mmu_unsync_walk(struct kvm_mmu_page *sp, 1234 struct kvm_mmu_pages *pvec) 1235{ 1236 int i, ret, nr_unsync_leaf = 0; 1237 1238 for_each_unsync_children(sp->unsync_child_bitmap, i) { 1239 struct kvm_mmu_page *child; 1240 u64 ent = sp->spt[i]; 1241 1242 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) 1243 goto clear_child_bitmap; 1244 1245 child = page_header(ent & PT64_BASE_ADDR_MASK); 1246 1247 if (child->unsync_children) { 1248 if (mmu_pages_add(pvec, child, i)) 1249 return -ENOSPC; 1250 1251 ret = __mmu_unsync_walk(child, pvec); 1252 if (!ret) 1253 goto clear_child_bitmap; 1254 else if (ret > 0) 1255 nr_unsync_leaf += ret; 1256 else 1257 return ret; 1258 } else if (child->unsync) { 1259 nr_unsync_leaf++; 1260 if (mmu_pages_add(pvec, child, i)) 1261 return -ENOSPC; 1262 } else 1263 goto clear_child_bitmap; 1264 1265 continue; 1266 1267clear_child_bitmap: 1268 __clear_bit(i, sp->unsync_child_bitmap); 1269 sp->unsync_children--; 1270 WARN_ON((int)sp->unsync_children < 0); 1271 } 1272 1273 1274 return nr_unsync_leaf; 1275} 1276 1277static int mmu_unsync_walk(struct kvm_mmu_page *sp, 1278 struct kvm_mmu_pages *pvec) 1279{ 1280 if (!sp->unsync_children) 1281 return 0; 1282 1283 mmu_pages_add(pvec, sp, 0); 1284 return __mmu_unsync_walk(sp, pvec); 1285} 1286 1287static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1288{ 1289 WARN_ON(!sp->unsync); 1290 trace_kvm_mmu_sync_page(sp); 1291 sp->unsync = 0; 1292 --kvm->stat.mmu_unsync; 1293} 1294 1295static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 1296 struct list_head *invalid_list); 1297static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1298 struct list_head *invalid_list); 1299 1300#define for_each_gfn_sp(kvm, sp, gfn, pos) \ 1301 hlist_for_each_entry(sp, pos, \ 1302 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1303 if ((sp)->gfn != (gfn)) {} else 1304 1305#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ 1306 hlist_for_each_entry(sp, pos, \ 1307 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1308 if ((sp)->gfn != (gfn) || (sp)->role.direct || \ 1309 (sp)->role.invalid) {} else 1310 1311/* @sp->gfn should be write-protected at the call site */ 1312static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1313 struct list_head *invalid_list, bool clear_unsync) 1314{ 1315 if (sp->role.cr4_pae != !!is_pae(vcpu)) { 1316 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 1317 return 1; 1318 } 1319 1320 if (clear_unsync) 1321 kvm_unlink_unsync_page(vcpu->kvm, sp); 1322 1323 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1324 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 1325 return 1; 1326 } 1327 1328 kvm_mmu_flush_tlb(vcpu); 1329 return 0; 1330} 1331 1332static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, 1333 struct kvm_mmu_page *sp) 1334{ 1335 LIST_HEAD(invalid_list); 1336 int ret; 1337 1338 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); 1339 if (ret) 1340 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 1341 1342 return ret; 1343} 1344 1345static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1346 struct list_head *invalid_list) 1347{ 1348 return __kvm_sync_page(vcpu, sp, invalid_list, true); 1349} 1350 1351/* @gfn should be write-protected at the call site */ 1352static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1353{ 1354 struct kvm_mmu_page *s; 1355 struct hlist_node *node; 1356 LIST_HEAD(invalid_list); 1357 bool flush = false; 1358 1359 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1360 if (!s->unsync) 1361 continue; 1362 1363 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1364 kvm_unlink_unsync_page(vcpu->kvm, s); 1365 if ((s->role.cr4_pae != !!is_pae(vcpu)) || 1366 (vcpu->arch.mmu.sync_page(vcpu, s))) { 1367 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); 1368 continue; 1369 } 1370 flush = true; 1371 } 1372 1373 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 1374 if (flush) 1375 kvm_mmu_flush_tlb(vcpu); 1376} 1377 1378struct mmu_page_path { 1379 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; 1380 unsigned int idx[PT64_ROOT_LEVEL-1]; 1381}; 1382 1383#define for_each_sp(pvec, sp, parents, i) \ 1384 for (i = mmu_pages_next(&pvec, &parents, -1), \ 1385 sp = pvec.page[i].sp; \ 1386 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ 1387 i = mmu_pages_next(&pvec, &parents, i)) 1388 1389static int mmu_pages_next(struct kvm_mmu_pages *pvec, 1390 struct mmu_page_path *parents, 1391 int i) 1392{ 1393 int n; 1394 1395 for (n = i+1; n < pvec->nr; n++) { 1396 struct kvm_mmu_page *sp = pvec->page[n].sp; 1397 1398 if (sp->role.level == PT_PAGE_TABLE_LEVEL) { 1399 parents->idx[0] = pvec->page[n].idx; 1400 return n; 1401 } 1402 1403 parents->parent[sp->role.level-2] = sp; 1404 parents->idx[sp->role.level-1] = pvec->page[n].idx; 1405 } 1406 1407 return n; 1408} 1409 1410static void mmu_pages_clear_parents(struct mmu_page_path *parents) 1411{ 1412 struct kvm_mmu_page *sp; 1413 unsigned int level = 0; 1414 1415 do { 1416 unsigned int idx = parents->idx[level]; 1417 1418 sp = parents->parent[level]; 1419 if (!sp) 1420 return; 1421 1422 --sp->unsync_children; 1423 WARN_ON((int)sp->unsync_children < 0); 1424 __clear_bit(idx, sp->unsync_child_bitmap); 1425 level++; 1426 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); 1427} 1428 1429static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, 1430 struct mmu_page_path *parents, 1431 struct kvm_mmu_pages *pvec) 1432{ 1433 parents->parent[parent->role.level-1] = NULL; 1434 pvec->nr = 0; 1435} 1436 1437static void mmu_sync_children(struct kvm_vcpu *vcpu, 1438 struct kvm_mmu_page *parent) 1439{ 1440 int i; 1441 struct kvm_mmu_page *sp; 1442 struct mmu_page_path parents; 1443 struct kvm_mmu_pages pages; 1444 LIST_HEAD(invalid_list); 1445 1446 kvm_mmu_pages_init(parent, &parents, &pages); 1447 while (mmu_unsync_walk(parent, &pages)) { 1448 int protected = 0; 1449 1450 for_each_sp(pages, sp, parents, i) 1451 protected |= rmap_write_protect(vcpu->kvm, sp->gfn); 1452 1453 if (protected) 1454 kvm_flush_remote_tlbs(vcpu->kvm); 1455 1456 for_each_sp(pages, sp, parents, i) { 1457 kvm_sync_page(vcpu, sp, &invalid_list); 1458 mmu_pages_clear_parents(&parents); 1459 } 1460 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 1461 cond_resched_lock(&vcpu->kvm->mmu_lock); 1462 kvm_mmu_pages_init(parent, &parents, &pages); 1463 } 1464} 1465 1466static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, 1467 gfn_t gfn, 1468 gva_t gaddr, 1469 unsigned level, 1470 int direct, 1471 unsigned access, 1472 u64 *parent_pte) 1473{ 1474 union kvm_mmu_page_role role; 1475 unsigned quadrant; 1476 struct kvm_mmu_page *sp; 1477 struct hlist_node *node; 1478 bool need_sync = false; 1479 1480 role = vcpu->arch.mmu.base_role; 1481 role.level = level; 1482 role.direct = direct; 1483 if (role.direct) 1484 role.cr4_pae = 0; 1485 role.access = access; 1486 if (!vcpu->arch.mmu.direct_map 1487 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 1488 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 1489 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1490 role.quadrant = quadrant; 1491 } 1492 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { 1493 if (!need_sync && sp->unsync) 1494 need_sync = true; 1495 1496 if (sp->role.word != role.word) 1497 continue; 1498 1499 if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) 1500 break; 1501 1502 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1503 if (sp->unsync_children) { 1504 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1505 kvm_mmu_mark_parents_unsync(sp); 1506 } else if (sp->unsync) 1507 kvm_mmu_mark_parents_unsync(sp); 1508 1509 trace_kvm_mmu_get_page(sp, false); 1510 return sp; 1511 } 1512 ++vcpu->kvm->stat.mmu_cache_miss; 1513 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); 1514 if (!sp) 1515 return sp; 1516 sp->gfn = gfn; 1517 sp->role = role; 1518 hlist_add_head(&sp->hash_link, 1519 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); 1520 if (!direct) { 1521 if (rmap_write_protect(vcpu->kvm, gfn)) 1522 kvm_flush_remote_tlbs(vcpu->kvm); 1523 if (level > PT_PAGE_TABLE_LEVEL && need_sync) 1524 kvm_sync_pages(vcpu, gfn); 1525 1526 account_shadowed(vcpu->kvm, gfn); 1527 } 1528 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) 1529 vcpu->arch.mmu.prefetch_page(vcpu, sp); 1530 else 1531 nonpaging_prefetch_page(vcpu, sp); 1532 trace_kvm_mmu_get_page(sp, true); 1533 return sp; 1534} 1535 1536static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, 1537 struct kvm_vcpu *vcpu, u64 addr) 1538{ 1539 iterator->addr = addr; 1540 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; 1541 iterator->level = vcpu->arch.mmu.shadow_root_level; 1542 1543 if (iterator->level == PT64_ROOT_LEVEL && 1544 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && 1545 !vcpu->arch.mmu.direct_map) 1546 --iterator->level; 1547 1548 if (iterator->level == PT32E_ROOT_LEVEL) { 1549 iterator->shadow_addr 1550 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 1551 iterator->shadow_addr &= PT64_BASE_ADDR_MASK; 1552 --iterator->level; 1553 if (!iterator->shadow_addr) 1554 iterator->level = 0; 1555 } 1556} 1557 1558static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) 1559{ 1560 if (iterator->level < PT_PAGE_TABLE_LEVEL) 1561 return false; 1562 1563 if (iterator->level == PT_PAGE_TABLE_LEVEL) 1564 if (is_large_pte(*iterator->sptep)) 1565 return false; 1566 1567 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); 1568 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; 1569 return true; 1570} 1571 1572static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) 1573{ 1574 iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; 1575 --iterator->level; 1576} 1577 1578static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) 1579{ 1580 u64 spte; 1581 1582 spte = __pa(sp->spt) 1583 | PT_PRESENT_MASK | PT_ACCESSED_MASK 1584 | PT_WRITABLE_MASK | PT_USER_MASK; 1585 __set_spte(sptep, spte); 1586} 1587 1588static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) 1589{ 1590 if (is_large_pte(*sptep)) { 1591 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); 1592 kvm_flush_remote_tlbs(vcpu->kvm); 1593 } 1594} 1595 1596static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, 1597 unsigned direct_access) 1598{ 1599 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { 1600 struct kvm_mmu_page *child; 1601 1602 /* 1603 * For the direct sp, if the guest pte's dirty bit 1604 * changed form clean to dirty, it will corrupt the 1605 * sp's access: allow writable in the read-only sp, 1606 * so we should update the spte at this point to get 1607 * a new sp with the correct access. 1608 */ 1609 child = page_header(*sptep & PT64_BASE_ADDR_MASK); 1610 if (child->role.access == direct_access) 1611 return; 1612 1613 mmu_page_remove_parent_pte(child, sptep); 1614 __set_spte(sptep, shadow_trap_nonpresent_pte); 1615 kvm_flush_remote_tlbs(vcpu->kvm); 1616 } 1617} 1618 1619static void kvm_mmu_page_unlink_children(struct kvm *kvm, 1620 struct kvm_mmu_page *sp) 1621{ 1622 unsigned i; 1623 u64 *pt; 1624 u64 ent; 1625 1626 pt = sp->spt; 1627 1628 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 1629 ent = pt[i]; 1630 1631 if (is_shadow_present_pte(ent)) { 1632 if (!is_last_spte(ent, sp->role.level)) { 1633 ent &= PT64_BASE_ADDR_MASK; 1634 mmu_page_remove_parent_pte(page_header(ent), 1635 &pt[i]); 1636 } else { 1637 if (is_large_pte(ent)) 1638 --kvm->stat.lpages; 1639 drop_spte(kvm, &pt[i], 1640 shadow_trap_nonpresent_pte); 1641 } 1642 } 1643 pt[i] = shadow_trap_nonpresent_pte; 1644 } 1645} 1646 1647static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) 1648{ 1649 mmu_page_remove_parent_pte(sp, parent_pte); 1650} 1651 1652static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) 1653{ 1654 int i; 1655 struct kvm_vcpu *vcpu; 1656 1657 kvm_for_each_vcpu(i, vcpu, kvm) 1658 vcpu->arch.last_pte_updated = NULL; 1659} 1660 1661static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 1662{ 1663 u64 *parent_pte; 1664 1665 while (sp->multimapped || sp->parent_pte) { 1666 if (!sp->multimapped) 1667 parent_pte = sp->parent_pte; 1668 else { 1669 struct kvm_pte_chain *chain; 1670 1671 chain = container_of(sp->parent_ptes.first, 1672 struct kvm_pte_chain, link); 1673 parent_pte = chain->parent_ptes[0]; 1674 } 1675 BUG_ON(!parent_pte); 1676 kvm_mmu_put_page(sp, parent_pte); 1677 __set_spte(parent_pte, shadow_trap_nonpresent_pte); 1678 } 1679} 1680 1681static int mmu_zap_unsync_children(struct kvm *kvm, 1682 struct kvm_mmu_page *parent, 1683 struct list_head *invalid_list) 1684{ 1685 int i, zapped = 0; 1686 struct mmu_page_path parents; 1687 struct kvm_mmu_pages pages; 1688 1689 if (parent->role.level == PT_PAGE_TABLE_LEVEL) 1690 return 0; 1691 1692 kvm_mmu_pages_init(parent, &parents, &pages); 1693 while (mmu_unsync_walk(parent, &pages)) { 1694 struct kvm_mmu_page *sp; 1695 1696 for_each_sp(pages, sp, parents, i) { 1697 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); 1698 mmu_pages_clear_parents(&parents); 1699 zapped++; 1700 } 1701 kvm_mmu_pages_init(parent, &parents, &pages); 1702 } 1703 1704 return zapped; 1705} 1706 1707static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 1708 struct list_head *invalid_list) 1709{ 1710 int ret; 1711 1712 trace_kvm_mmu_prepare_zap_page(sp); 1713 ++kvm->stat.mmu_shadow_zapped; 1714 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); 1715 kvm_mmu_page_unlink_children(kvm, sp); 1716 kvm_mmu_unlink_parents(kvm, sp); 1717 if (!sp->role.invalid && !sp->role.direct) 1718 unaccount_shadowed(kvm, sp->gfn); 1719 if (sp->unsync) 1720 kvm_unlink_unsync_page(kvm, sp); 1721 if (!sp->root_count) { 1722 /* Count self */ 1723 ret++; 1724 list_move(&sp->link, invalid_list); 1725 } else { 1726 list_move(&sp->link, &kvm->arch.active_mmu_pages); 1727 kvm_reload_remote_mmus(kvm); 1728 } 1729 1730 sp->role.invalid = 1; 1731 kvm_mmu_reset_last_pte_updated(kvm); 1732 return ret; 1733} 1734 1735static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1736 struct list_head *invalid_list) 1737{ 1738 struct kvm_mmu_page *sp; 1739 1740 if (list_empty(invalid_list)) 1741 return; 1742 1743 kvm_flush_remote_tlbs(kvm); 1744 1745 do { 1746 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); 1747 WARN_ON(!sp->role.invalid || sp->root_count); 1748 kvm_mmu_free_page(kvm, sp); 1749 } while (!list_empty(invalid_list)); 1750 1751} 1752 1753/* 1754 * Changing the number of mmu pages allocated to the vm 1755 * Note: if goal_nr_mmu_pages is too small, you will get dead lock 1756 */ 1757void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) 1758{ 1759 LIST_HEAD(invalid_list); 1760 /* 1761 * If we set the number of mmu pages to be smaller be than the 1762 * number of actived pages , we must to free some mmu pages before we 1763 * change the value 1764 */ 1765 1766 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { 1767 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && 1768 !list_empty(&kvm->arch.active_mmu_pages)) { 1769 struct kvm_mmu_page *page; 1770 1771 page = container_of(kvm->arch.active_mmu_pages.prev, 1772 struct kvm_mmu_page, link); 1773 kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); 1774 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1775 } 1776 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; 1777 } 1778 1779 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; 1780} 1781 1782static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 1783{ 1784 struct kvm_mmu_page *sp; 1785 struct hlist_node *node; 1786 LIST_HEAD(invalid_list); 1787 int r; 1788 1789 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 1790 r = 0; 1791 1792 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 1793 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 1794 sp->role.word); 1795 r = 1; 1796 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 1797 } 1798 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1799 return r; 1800} 1801 1802static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) 1803{ 1804 struct kvm_mmu_page *sp; 1805 struct hlist_node *node; 1806 LIST_HEAD(invalid_list); 1807 1808 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 1809 pgprintk("%s: zap %llx %x\n", 1810 __func__, gfn, sp->role.word); 1811 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 1812 } 1813 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1814} 1815 1816static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) 1817{ 1818 int slot = memslot_id(kvm, gfn); 1819 struct kvm_mmu_page *sp = page_header(__pa(pte)); 1820 1821 __set_bit(slot, sp->slot_bitmap); 1822} 1823 1824static void mmu_convert_notrap(struct kvm_mmu_page *sp) 1825{ 1826 int i; 1827 u64 *pt = sp->spt; 1828 1829 if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte) 1830 return; 1831 1832 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 1833 if (pt[i] == shadow_notrap_nonpresent_pte) 1834 __set_spte(&pt[i], shadow_trap_nonpresent_pte); 1835 } 1836} 1837 1838/* 1839 * The function is based on mtrr_type_lookup() in 1840 * arch/x86/kernel/cpu/mtrr/generic.c 1841 */ 1842static int get_mtrr_type(struct mtrr_state_type *mtrr_state, 1843 u64 start, u64 end) 1844{ 1845 int i; 1846 u64 base, mask; 1847 u8 prev_match, curr_match; 1848 int num_var_ranges = KVM_NR_VAR_MTRR; 1849 1850 if (!mtrr_state->enabled) 1851 return 0xFF; 1852 1853 /* Make end inclusive end, instead of exclusive */ 1854 end--; 1855 1856 /* Look in fixed ranges. Just return the type as per start */ 1857 if (mtrr_state->have_fixed && (start < 0x100000)) { 1858 int idx; 1859 1860 if (start < 0x80000) { 1861 idx = 0; 1862 idx += (start >> 16); 1863 return mtrr_state->fixed_ranges[idx]; 1864 } else if (start < 0xC0000) { 1865 idx = 1 * 8; 1866 idx += ((start - 0x80000) >> 14); 1867 return mtrr_state->fixed_ranges[idx]; 1868 } else if (start < 0x1000000) { 1869 idx = 3 * 8; 1870 idx += ((start - 0xC0000) >> 12); 1871 return mtrr_state->fixed_ranges[idx]; 1872 } 1873 } 1874 1875 /* 1876 * Look in variable ranges 1877 * Look of multiple ranges matching this address and pick type 1878 * as per MTRR precedence 1879 */ 1880 if (!(mtrr_state->enabled & 2)) 1881 return mtrr_state->def_type; 1882 1883 prev_match = 0xFF; 1884 for (i = 0; i < num_var_ranges; ++i) { 1885 unsigned short start_state, end_state; 1886 1887 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) 1888 continue; 1889 1890 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + 1891 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); 1892 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + 1893 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); 1894 1895 start_state = ((start & mask) == (base & mask)); 1896 end_state = ((end & mask) == (base & mask)); 1897 if (start_state != end_state) 1898 return 0xFE; 1899 1900 if ((start & mask) != (base & mask)) 1901 continue; 1902 1903 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; 1904 if (prev_match == 0xFF) { 1905 prev_match = curr_match; 1906 continue; 1907 } 1908 1909 if (prev_match == MTRR_TYPE_UNCACHABLE || 1910 curr_match == MTRR_TYPE_UNCACHABLE) 1911 return MTRR_TYPE_UNCACHABLE; 1912 1913 if ((prev_match == MTRR_TYPE_WRBACK && 1914 curr_match == MTRR_TYPE_WRTHROUGH) || 1915 (prev_match == MTRR_TYPE_WRTHROUGH && 1916 curr_match == MTRR_TYPE_WRBACK)) { 1917 prev_match = MTRR_TYPE_WRTHROUGH; 1918 curr_match = MTRR_TYPE_WRTHROUGH; 1919 } 1920 1921 if (prev_match != curr_match) 1922 return MTRR_TYPE_UNCACHABLE; 1923 } 1924 1925 if (prev_match != 0xFF) 1926 return prev_match; 1927 1928 return mtrr_state->def_type; 1929} 1930 1931u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) 1932{ 1933 u8 mtrr; 1934 1935 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, 1936 (gfn << PAGE_SHIFT) + PAGE_SIZE); 1937 if (mtrr == 0xfe || mtrr == 0xff) 1938 mtrr = MTRR_TYPE_WRBACK; 1939 return mtrr; 1940} 1941EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type); 1942 1943static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1944{ 1945 trace_kvm_mmu_unsync_page(sp); 1946 ++vcpu->kvm->stat.mmu_unsync; 1947 sp->unsync = 1; 1948 1949 kvm_mmu_mark_parents_unsync(sp); 1950 mmu_convert_notrap(sp); 1951} 1952 1953static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1954{ 1955 struct kvm_mmu_page *s; 1956 struct hlist_node *node; 1957 1958 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1959 if (s->unsync) 1960 continue; 1961 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1962 __kvm_unsync_page(vcpu, s); 1963 } 1964} 1965 1966static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 1967 bool can_unsync) 1968{ 1969 struct kvm_mmu_page *s; 1970 struct hlist_node *node; 1971 bool need_unsync = false; 1972 1973 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1974 if (!can_unsync) 1975 return 1; 1976 1977 if (s->role.level != PT_PAGE_TABLE_LEVEL) 1978 return 1; 1979 1980 if (!need_unsync && !s->unsync) { 1981 if (!oos_shadow) 1982 return 1; 1983 need_unsync = true; 1984 } 1985 } 1986 if (need_unsync) 1987 kvm_unsync_pages(vcpu, gfn); 1988 return 0; 1989} 1990 1991static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 1992 unsigned pte_access, int user_fault, 1993 int write_fault, int dirty, int level, 1994 gfn_t gfn, pfn_t pfn, bool speculative, 1995 bool can_unsync, bool host_writable) 1996{ 1997 u64 spte, entry = *sptep; 1998 int ret = 0; 1999 2000 /* 2001 * We don't set the accessed bit, since we sometimes want to see 2002 * whether the guest actually used the pte (in order to detect 2003 * demand paging). 2004 */ 2005 spte = PT_PRESENT_MASK; 2006 if (!speculative) 2007 spte |= shadow_accessed_mask; 2008 if (!dirty) 2009 pte_access &= ~ACC_WRITE_MASK; 2010 if (pte_access & ACC_EXEC_MASK) 2011 spte |= shadow_x_mask; 2012 else 2013 spte |= shadow_nx_mask; 2014 if (pte_access & ACC_USER_MASK) 2015 spte |= shadow_user_mask; 2016 if (level > PT_PAGE_TABLE_LEVEL) 2017 spte |= PT_PAGE_SIZE_MASK; 2018 if (tdp_enabled) 2019 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2020 kvm_is_mmio_pfn(pfn)); 2021 2022 if (host_writable) 2023 spte |= SPTE_HOST_WRITEABLE; 2024 else 2025 pte_access &= ~ACC_WRITE_MASK; 2026 2027 spte |= (u64)pfn << PAGE_SHIFT; 2028 2029 if ((pte_access & ACC_WRITE_MASK) 2030 || (!vcpu->arch.mmu.direct_map && write_fault 2031 && !is_write_protection(vcpu) && !user_fault)) { 2032 2033 if (level > PT_PAGE_TABLE_LEVEL && 2034 has_wrprotected_page(vcpu->kvm, gfn, level)) { 2035 ret = 1; 2036 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); 2037 goto done; 2038 } 2039 2040 spte |= PT_WRITABLE_MASK; 2041 2042 if (!vcpu->arch.mmu.direct_map 2043 && !(pte_access & ACC_WRITE_MASK)) 2044 spte &= ~PT_USER_MASK; 2045 2046 /* 2047 * Optimization: for pte sync, if spte was writable the hash 2048 * lookup is unnecessary (and expensive). Write protection 2049 * is responsibility of mmu_get_page / kvm_sync_page. 2050 * Same reasoning can be applied to dirty page accounting. 2051 */ 2052 if (!can_unsync && is_writable_pte(*sptep)) 2053 goto set_pte; 2054 2055 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 2056 pgprintk("%s: found shadow page for %llx, marking ro\n", 2057 __func__, gfn); 2058 ret = 1; 2059 pte_access &= ~ACC_WRITE_MASK; 2060 if (is_writable_pte(spte)) 2061 spte &= ~PT_WRITABLE_MASK; 2062 } 2063 } 2064 2065 if (pte_access & ACC_WRITE_MASK) 2066 mark_page_dirty(vcpu->kvm, gfn); 2067 2068set_pte: 2069 update_spte(sptep, spte); 2070 /* 2071 * If we overwrite a writable spte with a read-only one we 2072 * should flush remote TLBs. Otherwise rmap_write_protect 2073 * will find a read-only spte, even though the writable spte 2074 * might be cached on a CPU's TLB. 2075 */ 2076 if (is_writable_pte(entry) && !is_writable_pte(*sptep)) 2077 kvm_flush_remote_tlbs(vcpu->kvm); 2078done: 2079 return ret; 2080} 2081 2082static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2083 unsigned pt_access, unsigned pte_access, 2084 int user_fault, int write_fault, int dirty, 2085 int *ptwrite, int level, gfn_t gfn, 2086 pfn_t pfn, bool speculative, 2087 bool host_writable) 2088{ 2089 int was_rmapped = 0; 2090 int rmap_count; 2091 2092 pgprintk("%s: spte %llx access %x write_fault %d" 2093 " user_fault %d gfn %llx\n", 2094 __func__, *sptep, pt_access, 2095 write_fault, user_fault, gfn); 2096 2097 if (is_rmap_spte(*sptep)) { 2098 /* 2099 * If we overwrite a PTE page pointer with a 2MB PMD, unlink 2100 * the parent of the now unreachable PTE. 2101 */ 2102 if (level > PT_PAGE_TABLE_LEVEL && 2103 !is_large_pte(*sptep)) { 2104 struct kvm_mmu_page *child; 2105 u64 pte = *sptep; 2106 2107 child = page_header(pte & PT64_BASE_ADDR_MASK); 2108 mmu_page_remove_parent_pte(child, sptep); 2109 __set_spte(sptep, shadow_trap_nonpresent_pte); 2110 kvm_flush_remote_tlbs(vcpu->kvm); 2111 } else if (pfn != spte_to_pfn(*sptep)) { 2112 pgprintk("hfn old %llx new %llx\n", 2113 spte_to_pfn(*sptep), pfn); 2114 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); 2115 kvm_flush_remote_tlbs(vcpu->kvm); 2116 } else 2117 was_rmapped = 1; 2118 } 2119 2120 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 2121 dirty, level, gfn, pfn, speculative, true, 2122 host_writable)) { 2123 if (write_fault) 2124 *ptwrite = 1; 2125 kvm_mmu_flush_tlb(vcpu); 2126 } 2127 2128 pgprintk("%s: setting spte %llx\n", __func__, *sptep); 2129 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", 2130 is_large_pte(*sptep)? "2MB" : "4kB", 2131 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, 2132 *sptep, sptep); 2133 if (!was_rmapped && is_large_pte(*sptep)) 2134 ++vcpu->kvm->stat.lpages; 2135 2136 page_header_update_slot(vcpu->kvm, sptep, gfn); 2137 if (!was_rmapped) { 2138 rmap_count = rmap_add(vcpu, sptep, gfn); 2139 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 2140 rmap_recycle(vcpu, sptep, gfn); 2141 } 2142 kvm_release_pfn_clean(pfn); 2143 if (speculative) { 2144 vcpu->arch.last_pte_updated = sptep; 2145 vcpu->arch.last_pte_gfn = gfn; 2146 } 2147} 2148 2149static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 2150{ 2151} 2152 2153static struct kvm_memory_slot * 2154pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) 2155{ 2156 struct kvm_memory_slot *slot; 2157 2158 slot = gfn_to_memslot(vcpu->kvm, gfn); 2159 if (!slot || slot->flags & KVM_MEMSLOT_INVALID || 2160 (no_dirty_log && slot->dirty_bitmap)) 2161 slot = NULL; 2162 2163 return slot; 2164} 2165 2166static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, 2167 bool no_dirty_log) 2168{ 2169 struct kvm_memory_slot *slot; 2170 unsigned long hva; 2171 2172 slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log); 2173 if (!slot) { 2174 get_page(bad_page); 2175 return page_to_pfn(bad_page); 2176 } 2177 2178 hva = gfn_to_hva_memslot(slot, gfn); 2179 2180 return hva_to_pfn_atomic(vcpu->kvm, hva); 2181} 2182 2183static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, 2184 struct kvm_mmu_page *sp, 2185 u64 *start, u64 *end) 2186{ 2187 struct page *pages[PTE_PREFETCH_NUM]; 2188 unsigned access = sp->role.access; 2189 int i, ret; 2190 gfn_t gfn; 2191 2192 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); 2193 if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK)) 2194 return -1; 2195 2196 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); 2197 if (ret <= 0) 2198 return -1; 2199 2200 for (i = 0; i < ret; i++, gfn++, start++) 2201 mmu_set_spte(vcpu, start, ACC_ALL, 2202 access, 0, 0, 1, NULL, 2203 sp->role.level, gfn, 2204 page_to_pfn(pages[i]), true, true); 2205 2206 return 0; 2207} 2208 2209static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, 2210 struct kvm_mmu_page *sp, u64 *sptep) 2211{ 2212 u64 *spte, *start = NULL; 2213 int i; 2214 2215 WARN_ON(!sp->role.direct); 2216 2217 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); 2218 spte = sp->spt + i; 2219 2220 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { 2221 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) { 2222 if (!start) 2223 continue; 2224 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) 2225 break; 2226 start = NULL; 2227 } else if (!start) 2228 start = spte; 2229 } 2230} 2231 2232static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) 2233{ 2234 struct kvm_mmu_page *sp; 2235 2236 /* 2237 * Since it's no accessed bit on EPT, it's no way to 2238 * distinguish between actually accessed translations 2239 * and prefetched, so disable pte prefetch if EPT is 2240 * enabled. 2241 */ 2242 if (!shadow_accessed_mask) 2243 return; 2244 2245 sp = page_header(__pa(sptep)); 2246 if (sp->role.level > PT_PAGE_TABLE_LEVEL) 2247 return; 2248 2249 __direct_pte_prefetch(vcpu, sp, sptep); 2250} 2251 2252static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 2253 int map_writable, int level, gfn_t gfn, pfn_t pfn, 2254 bool prefault) 2255{ 2256 struct kvm_shadow_walk_iterator iterator; 2257 struct kvm_mmu_page *sp; 2258 int pt_write = 0; 2259 gfn_t pseudo_gfn; 2260 2261 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { 2262 if (iterator.level == level) { 2263 unsigned pte_access = ACC_ALL; 2264 2265 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, 2266 0, write, 1, &pt_write, 2267 level, gfn, pfn, prefault, map_writable); 2268 direct_pte_prefetch(vcpu, iterator.sptep); 2269 ++vcpu->stat.pf_fixed; 2270 break; 2271 } 2272 2273 if (*iterator.sptep == shadow_trap_nonpresent_pte) { 2274 u64 base_addr = iterator.addr; 2275 2276 base_addr &= PT64_LVL_ADDR_MASK(iterator.level); 2277 pseudo_gfn = base_addr >> PAGE_SHIFT; 2278 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, 2279 iterator.level - 1, 2280 1, ACC_ALL, iterator.sptep); 2281 if (!sp) { 2282 pgprintk("nonpaging_map: ENOMEM\n"); 2283 kvm_release_pfn_clean(pfn); 2284 return -ENOMEM; 2285 } 2286 2287 __set_spte(iterator.sptep, 2288 __pa(sp->spt) 2289 | PT_PRESENT_MASK | PT_WRITABLE_MASK 2290 | shadow_user_mask | shadow_x_mask 2291 | shadow_accessed_mask); 2292 } 2293 } 2294 return pt_write; 2295} 2296 2297static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) 2298{ 2299 siginfo_t info; 2300 2301 info.si_signo = SIGBUS; 2302 info.si_errno = 0; 2303 info.si_code = BUS_MCEERR_AR; 2304 info.si_addr = (void __user *)address; 2305 info.si_addr_lsb = PAGE_SHIFT; 2306 2307 send_sig_info(SIGBUS, &info, tsk); 2308} 2309 2310static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) 2311{ 2312 kvm_release_pfn_clean(pfn); 2313 if (is_hwpoison_pfn(pfn)) { 2314 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current); 2315 return 0; 2316 } else if (is_fault_pfn(pfn)) 2317 return -EFAULT; 2318 2319 return 1; 2320} 2321 2322static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, 2323 gfn_t *gfnp, pfn_t *pfnp, int *levelp) 2324{ 2325 pfn_t pfn = *pfnp; 2326 gfn_t gfn = *gfnp; 2327 int level = *levelp; 2328 2329 /* 2330 * Check if it's a transparent hugepage. If this would be an 2331 * hugetlbfs page, level wouldn't be set to 2332 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2333 * here. 2334 */ 2335 if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && 2336 level == PT_PAGE_TABLE_LEVEL && 2337 PageTransCompound(pfn_to_page(pfn)) && 2338 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2339 unsigned long mask; 2340 /* 2341 * mmu_notifier_retry was successful and we hold the 2342 * mmu_lock here, so the pmd can't become splitting 2343 * from under us, and in turn 2344 * __split_huge_page_refcount() can't run from under 2345 * us and we can safely transfer the refcount from 2346 * PG_tail to PG_head as we switch the pfn to tail to 2347 * head. 2348 */ 2349 *levelp = level = PT_DIRECTORY_LEVEL; 2350 mask = KVM_PAGES_PER_HPAGE(level) - 1; 2351 VM_BUG_ON((gfn & mask) != (pfn & mask)); 2352 if (pfn & mask) { 2353 gfn &= ~mask; 2354 *gfnp = gfn; 2355 kvm_release_pfn_clean(pfn); 2356 pfn &= ~mask; 2357 if (!get_page_unless_zero(pfn_to_page(pfn))) 2358 BUG(); 2359 *pfnp = pfn; 2360 } 2361 } 2362} 2363 2364static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 2365 gva_t gva, pfn_t *pfn, bool write, bool *writable); 2366 2367static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, 2368 bool prefault) 2369{ 2370 int r; 2371 int level; 2372 int force_pt_level; 2373 pfn_t pfn; 2374 unsigned long mmu_seq; 2375 bool map_writable; 2376 2377 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); 2378 if (likely(!force_pt_level)) { 2379 level = mapping_level(vcpu, gfn); 2380 /* 2381 * This path builds a PAE pagetable - so we can map 2382 * 2mb pages at maximum. Therefore check if the level 2383 * is larger than that. 2384 */ 2385 if (level > PT_DIRECTORY_LEVEL) 2386 level = PT_DIRECTORY_LEVEL; 2387 2388 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); 2389 } else 2390 level = PT_PAGE_TABLE_LEVEL; 2391 2392 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2393 smp_rmb(); 2394 2395 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) 2396 return 0; 2397 2398 /* mmio */ 2399 if (is_error_pfn(pfn)) 2400 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2401 2402 spin_lock(&vcpu->kvm->mmu_lock); 2403 if (mmu_notifier_retry(vcpu, mmu_seq)) 2404 goto out_unlock; 2405 kvm_mmu_free_some_pages(vcpu); 2406 if (likely(!force_pt_level)) 2407 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); 2408 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, 2409 prefault); 2410 spin_unlock(&vcpu->kvm->mmu_lock); 2411 2412 2413 return r; 2414 2415out_unlock: 2416 spin_unlock(&vcpu->kvm->mmu_lock); 2417 kvm_release_pfn_clean(pfn); 2418 return 0; 2419} 2420 2421 2422static void mmu_free_roots(struct kvm_vcpu *vcpu) 2423{ 2424 int i; 2425 struct kvm_mmu_page *sp; 2426 LIST_HEAD(invalid_list); 2427 2428 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2429 return; 2430 spin_lock(&vcpu->kvm->mmu_lock); 2431 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && 2432 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || 2433 vcpu->arch.mmu.direct_map)) { 2434 hpa_t root = vcpu->arch.mmu.root_hpa; 2435 2436 sp = page_header(root); 2437 --sp->root_count; 2438 if (!sp->root_count && sp->role.invalid) { 2439 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); 2440 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 2441 } 2442 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 2443 spin_unlock(&vcpu->kvm->mmu_lock); 2444 return; 2445 } 2446 for (i = 0; i < 4; ++i) { 2447 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2448 2449 if (root) { 2450 root &= PT64_BASE_ADDR_MASK; 2451 sp = page_header(root); 2452 --sp->root_count; 2453 if (!sp->root_count && sp->role.invalid) 2454 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 2455 &invalid_list); 2456 } 2457 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 2458 } 2459 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 2460 spin_unlock(&vcpu->kvm->mmu_lock); 2461 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 2462} 2463 2464static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) 2465{ 2466 int ret = 0; 2467 2468 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { 2469 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2470 ret = 1; 2471 } 2472 2473 return ret; 2474} 2475 2476static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) 2477{ 2478 struct kvm_mmu_page *sp; 2479 unsigned i; 2480 2481 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 2482 spin_lock(&vcpu->kvm->mmu_lock); 2483 kvm_mmu_free_some_pages(vcpu); 2484 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 2485 1, ACC_ALL, NULL); 2486 ++sp->root_count; 2487 spin_unlock(&vcpu->kvm->mmu_lock); 2488 vcpu->arch.mmu.root_hpa = __pa(sp->spt); 2489 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { 2490 for (i = 0; i < 4; ++i) { 2491 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2492 2493 ASSERT(!VALID_PAGE(root)); 2494 spin_lock(&vcpu->kvm->mmu_lock); 2495 kvm_mmu_free_some_pages(vcpu); 2496 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), 2497 i << 30, 2498 PT32_ROOT_LEVEL, 1, ACC_ALL, 2499 NULL); 2500 root = __pa(sp->spt); 2501 ++sp->root_count; 2502 spin_unlock(&vcpu->kvm->mmu_lock); 2503 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; 2504 } 2505 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); 2506 } else 2507 BUG(); 2508 2509 return 0; 2510} 2511 2512static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) 2513{ 2514 struct kvm_mmu_page *sp; 2515 u64 pdptr, pm_mask; 2516 gfn_t root_gfn; 2517 int i; 2518 2519 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; 2520 2521 if (mmu_check_root(vcpu, root_gfn)) 2522 return 1; 2523 2524 /* 2525 * Do we shadow a long mode page table? If so we need to 2526 * write-protect the guests page table root. 2527 */ 2528 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 2529 hpa_t root = vcpu->arch.mmu.root_hpa; 2530 2531 ASSERT(!VALID_PAGE(root)); 2532 2533 spin_lock(&vcpu->kvm->mmu_lock); 2534 kvm_mmu_free_some_pages(vcpu); 2535 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, 2536 0, ACC_ALL, NULL); 2537 root = __pa(sp->spt); 2538 ++sp->root_count; 2539 spin_unlock(&vcpu->kvm->mmu_lock); 2540 vcpu->arch.mmu.root_hpa = root; 2541 return 0; 2542 } 2543 2544 /* 2545 * We shadow a 32 bit page table. This may be a legacy 2-level 2546 * or a PAE 3-level page table. In either case we need to be aware that 2547 * the shadow page table may be a PAE or a long mode page table. 2548 */ 2549 pm_mask = PT_PRESENT_MASK; 2550 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) 2551 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; 2552 2553 for (i = 0; i < 4; ++i) { 2554 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2555 2556 ASSERT(!VALID_PAGE(root)); 2557 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 2558 pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i); 2559 if (!is_present_gpte(pdptr)) { 2560 vcpu->arch.mmu.pae_root[i] = 0; 2561 continue; 2562 } 2563 root_gfn = pdptr >> PAGE_SHIFT; 2564 if (mmu_check_root(vcpu, root_gfn)) 2565 return 1; 2566 } 2567 spin_lock(&vcpu->kvm->mmu_lock); 2568 kvm_mmu_free_some_pages(vcpu); 2569 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 2570 PT32_ROOT_LEVEL, 0, 2571 ACC_ALL, NULL); 2572 root = __pa(sp->spt); 2573 ++sp->root_count; 2574 spin_unlock(&vcpu->kvm->mmu_lock); 2575 2576 vcpu->arch.mmu.pae_root[i] = root | pm_mask; 2577 } 2578 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); 2579 2580 /* 2581 * If we shadow a 32 bit page table with a long mode page 2582 * table we enter this path. 2583 */ 2584 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 2585 if (vcpu->arch.mmu.lm_root == NULL) { 2586 /* 2587 * The additional page necessary for this is only 2588 * allocated on demand. 2589 */ 2590 2591 u64 *lm_root; 2592 2593 lm_root = (void*)get_zeroed_page(GFP_KERNEL); 2594 if (lm_root == NULL) 2595 return 1; 2596 2597 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; 2598 2599 vcpu->arch.mmu.lm_root = lm_root; 2600 } 2601 2602 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); 2603 } 2604 2605 return 0; 2606} 2607 2608static int mmu_alloc_roots(struct kvm_vcpu *vcpu) 2609{ 2610 if (vcpu->arch.mmu.direct_map) 2611 return mmu_alloc_direct_roots(vcpu); 2612 else 2613 return mmu_alloc_shadow_roots(vcpu); 2614} 2615 2616static void mmu_sync_roots(struct kvm_vcpu *vcpu) 2617{ 2618 int i; 2619 struct kvm_mmu_page *sp; 2620 2621 if (vcpu->arch.mmu.direct_map) 2622 return; 2623 2624 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2625 return; 2626 2627 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 2628 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 2629 hpa_t root = vcpu->arch.mmu.root_hpa; 2630 sp = page_header(root); 2631 mmu_sync_children(vcpu, sp); 2632 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 2633 return; 2634 } 2635 for (i = 0; i < 4; ++i) { 2636 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2637 2638 if (root && VALID_PAGE(root)) { 2639 root &= PT64_BASE_ADDR_MASK; 2640 sp = page_header(root); 2641 mmu_sync_children(vcpu, sp); 2642 } 2643 } 2644 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 2645} 2646 2647void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 2648{ 2649 spin_lock(&vcpu->kvm->mmu_lock); 2650 mmu_sync_roots(vcpu); 2651 spin_unlock(&vcpu->kvm->mmu_lock); 2652} 2653 2654static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, 2655 u32 access, struct x86_exception *exception) 2656{ 2657 if (exception) 2658 exception->error_code = 0; 2659 return vaddr; 2660} 2661 2662static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, 2663 u32 access, 2664 struct x86_exception *exception) 2665{ 2666 if (exception) 2667 exception->error_code = 0; 2668 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); 2669} 2670 2671static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 2672 u32 error_code, bool prefault) 2673{ 2674 gfn_t gfn; 2675 int r; 2676 2677 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); 2678 r = mmu_topup_memory_caches(vcpu); 2679 if (r) 2680 return r; 2681 2682 ASSERT(vcpu); 2683 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2684 2685 gfn = gva >> PAGE_SHIFT; 2686 2687 return nonpaging_map(vcpu, gva & PAGE_MASK, 2688 error_code & PFERR_WRITE_MASK, gfn, prefault); 2689} 2690 2691static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) 2692{ 2693 struct kvm_arch_async_pf arch; 2694 2695 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; 2696 arch.gfn = gfn; 2697 arch.direct_map = vcpu->arch.mmu.direct_map; 2698 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); 2699 2700 return kvm_setup_async_pf(vcpu, gva, gfn, &arch); 2701} 2702 2703static bool can_do_async_pf(struct kvm_vcpu *vcpu) 2704{ 2705 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || 2706 kvm_event_needs_reinjection(vcpu))) 2707 return false; 2708 2709 return kvm_x86_ops->interrupt_allowed(vcpu); 2710} 2711 2712static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 2713 gva_t gva, pfn_t *pfn, bool write, bool *writable) 2714{ 2715 bool async; 2716 2717 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); 2718 2719 if (!async) 2720 return false; /* *pfn has correct page already */ 2721 2722 put_page(pfn_to_page(*pfn)); 2723 2724 if (!prefault && can_do_async_pf(vcpu)) { 2725 trace_kvm_try_async_get_page(gva, gfn); 2726 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 2727 trace_kvm_async_pf_doublefault(gva, gfn); 2728 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 2729 return true; 2730 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) 2731 return true; 2732 } 2733 2734 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); 2735 2736 return false; 2737} 2738 2739static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, 2740 bool prefault) 2741{ 2742 pfn_t pfn; 2743 int r; 2744 int level; 2745 int force_pt_level; 2746 gfn_t gfn = gpa >> PAGE_SHIFT; 2747 unsigned long mmu_seq; 2748 int write = error_code & PFERR_WRITE_MASK; 2749 bool map_writable; 2750 2751 ASSERT(vcpu); 2752 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2753 2754 r = mmu_topup_memory_caches(vcpu); 2755 if (r) 2756 return r; 2757 2758 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); 2759 if (likely(!force_pt_level)) { 2760 level = mapping_level(vcpu, gfn); 2761 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); 2762 } else 2763 level = PT_PAGE_TABLE_LEVEL; 2764 2765 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2766 smp_rmb(); 2767 2768 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) 2769 return 0; 2770 2771 /* mmio */ 2772 if (is_error_pfn(pfn)) 2773 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2774 spin_lock(&vcpu->kvm->mmu_lock); 2775 if (mmu_notifier_retry(vcpu, mmu_seq)) 2776 goto out_unlock; 2777 kvm_mmu_free_some_pages(vcpu); 2778 if (likely(!force_pt_level)) 2779 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); 2780 r = __direct_map(vcpu, gpa, write, map_writable, 2781 level, gfn, pfn, prefault); 2782 spin_unlock(&vcpu->kvm->mmu_lock); 2783 2784 return r; 2785 2786out_unlock: 2787 spin_unlock(&vcpu->kvm->mmu_lock); 2788 kvm_release_pfn_clean(pfn); 2789 return 0; 2790} 2791 2792static void nonpaging_free(struct kvm_vcpu *vcpu) 2793{ 2794 mmu_free_roots(vcpu); 2795} 2796 2797static int nonpaging_init_context(struct kvm_vcpu *vcpu, 2798 struct kvm_mmu *context) 2799{ 2800 context->new_cr3 = nonpaging_new_cr3; 2801 context->page_fault = nonpaging_page_fault; 2802 context->gva_to_gpa = nonpaging_gva_to_gpa; 2803 context->free = nonpaging_free; 2804 context->prefetch_page = nonpaging_prefetch_page; 2805 context->sync_page = nonpaging_sync_page; 2806 context->invlpg = nonpaging_invlpg; 2807 context->root_level = 0; 2808 context->shadow_root_level = PT32E_ROOT_LEVEL; 2809 context->root_hpa = INVALID_PAGE; 2810 context->direct_map = true; 2811 context->nx = false; 2812 return 0; 2813} 2814 2815void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2816{ 2817 ++vcpu->stat.tlb_flush; 2818 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2819} 2820 2821static void paging_new_cr3(struct kvm_vcpu *vcpu) 2822{ 2823 pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); 2824 mmu_free_roots(vcpu); 2825} 2826 2827static unsigned long get_cr3(struct kvm_vcpu *vcpu) 2828{ 2829 return kvm_read_cr3(vcpu); 2830} 2831 2832static void inject_page_fault(struct kvm_vcpu *vcpu, 2833 struct x86_exception *fault) 2834{ 2835 vcpu->arch.mmu.inject_page_fault(vcpu, fault); 2836} 2837 2838static void paging_free(struct kvm_vcpu *vcpu) 2839{ 2840 nonpaging_free(vcpu); 2841} 2842 2843static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) 2844{ 2845 int bit7; 2846 2847 bit7 = (gpte >> 7) & 1; 2848 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; 2849} 2850 2851#define PTTYPE 64 2852#include "paging_tmpl.h" 2853#undef PTTYPE 2854 2855#define PTTYPE 32 2856#include "paging_tmpl.h" 2857#undef PTTYPE 2858 2859static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, 2860 struct kvm_mmu *context, 2861 int level) 2862{ 2863 int maxphyaddr = cpuid_maxphyaddr(vcpu); 2864 u64 exb_bit_rsvd = 0; 2865 2866 if (!context->nx) 2867 exb_bit_rsvd = rsvd_bits(63, 63); 2868 switch (level) { 2869 case PT32_ROOT_LEVEL: 2870 /* no rsvd bits for 2 level 4K page table entries */ 2871 context->rsvd_bits_mask[0][1] = 0; 2872 context->rsvd_bits_mask[0][0] = 0; 2873 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 2874 2875 if (!is_pse(vcpu)) { 2876 context->rsvd_bits_mask[1][1] = 0; 2877 break; 2878 } 2879 2880 if (is_cpuid_PSE36()) 2881 /* 36bits PSE 4MB page */ 2882 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); 2883 else 2884 /* 32 bits PSE 4MB page */ 2885 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); 2886 break; 2887 case PT32E_ROOT_LEVEL: 2888 context->rsvd_bits_mask[0][2] = 2889 rsvd_bits(maxphyaddr, 63) | 2890 rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */ 2891 context->rsvd_bits_mask[0][1] = exb_bit_rsvd | 2892 rsvd_bits(maxphyaddr, 62); /* PDE */ 2893 context->rsvd_bits_mask[0][0] = exb_bit_rsvd | 2894 rsvd_bits(maxphyaddr, 62); /* PTE */ 2895 context->rsvd_bits_mask[1][1] = exb_bit_rsvd | 2896 rsvd_bits(maxphyaddr, 62) | 2897 rsvd_bits(13, 20); /* large page */ 2898 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 2899 break; 2900 case PT64_ROOT_LEVEL: 2901 context->rsvd_bits_mask[0][3] = exb_bit_rsvd | 2902 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); 2903 context->rsvd_bits_mask[0][2] = exb_bit_rsvd | 2904 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); 2905 context->rsvd_bits_mask[0][1] = exb_bit_rsvd | 2906 rsvd_bits(maxphyaddr, 51); 2907 context->rsvd_bits_mask[0][0] = exb_bit_rsvd | 2908 rsvd_bits(maxphyaddr, 51); 2909 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; 2910 context->rsvd_bits_mask[1][2] = exb_bit_rsvd | 2911 rsvd_bits(maxphyaddr, 51) | 2912 rsvd_bits(13, 29); 2913 context->rsvd_bits_mask[1][1] = exb_bit_rsvd | 2914 rsvd_bits(maxphyaddr, 51) | 2915 rsvd_bits(13, 20); /* large page */ 2916 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 2917 break; 2918 } 2919} 2920 2921static int paging64_init_context_common(struct kvm_vcpu *vcpu, 2922 struct kvm_mmu *context, 2923 int level) 2924{ 2925 context->nx = is_nx(vcpu); 2926 2927 reset_rsvds_bits_mask(vcpu, context, level); 2928 2929 ASSERT(is_pae(vcpu)); 2930 context->new_cr3 = paging_new_cr3; 2931 context->page_fault = paging64_page_fault; 2932 context->gva_to_gpa = paging64_gva_to_gpa; 2933 context->prefetch_page = paging64_prefetch_page; 2934 context->sync_page = paging64_sync_page; 2935 context->invlpg = paging64_invlpg; 2936 context->free = paging_free; 2937 context->root_level = level; 2938 context->shadow_root_level = level; 2939 context->root_hpa = INVALID_PAGE; 2940 context->direct_map = false; 2941 return 0; 2942} 2943 2944static int paging64_init_context(struct kvm_vcpu *vcpu, 2945 struct kvm_mmu *context) 2946{ 2947 return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); 2948} 2949 2950static int paging32_init_context(struct kvm_vcpu *vcpu, 2951 struct kvm_mmu *context) 2952{ 2953 context->nx = false; 2954 2955 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); 2956 2957 context->new_cr3 = paging_new_cr3; 2958 context->page_fault = paging32_page_fault; 2959 context->gva_to_gpa = paging32_gva_to_gpa; 2960 context->free = paging_free; 2961 context->prefetch_page = paging32_prefetch_page; 2962 context->sync_page = paging32_sync_page; 2963 context->invlpg = paging32_invlpg; 2964 context->root_level = PT32_ROOT_LEVEL; 2965 context->shadow_root_level = PT32E_ROOT_LEVEL; 2966 context->root_hpa = INVALID_PAGE; 2967 context->direct_map = false; 2968 return 0; 2969} 2970 2971static int paging32E_init_context(struct kvm_vcpu *vcpu, 2972 struct kvm_mmu *context) 2973{ 2974 return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); 2975} 2976 2977static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 2978{ 2979 struct kvm_mmu *context = vcpu->arch.walk_mmu; 2980 2981 context->base_role.word = 0; 2982 context->new_cr3 = nonpaging_new_cr3; 2983 context->page_fault = tdp_page_fault; 2984 context->free = nonpaging_free; 2985 context->prefetch_page = nonpaging_prefetch_page; 2986 context->sync_page = nonpaging_sync_page; 2987 context->invlpg = nonpaging_invlpg; 2988 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 2989 context->root_hpa = INVALID_PAGE; 2990 context->direct_map = true; 2991 context->set_cr3 = kvm_x86_ops->set_tdp_cr3; 2992 context->get_cr3 = get_cr3; 2993 context->inject_page_fault = kvm_inject_page_fault; 2994 context->nx = is_nx(vcpu); 2995 2996 if (!is_paging(vcpu)) { 2997 context->nx = false; 2998 context->gva_to_gpa = nonpaging_gva_to_gpa; 2999 context->root_level = 0; 3000 } else if (is_long_mode(vcpu)) { 3001 context->nx = is_nx(vcpu); 3002 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); 3003 context->gva_to_gpa = paging64_gva_to_gpa; 3004 context->root_level = PT64_ROOT_LEVEL; 3005 } else if (is_pae(vcpu)) { 3006 context->nx = is_nx(vcpu); 3007 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); 3008 context->gva_to_gpa = paging64_gva_to_gpa; 3009 context->root_level = PT32E_ROOT_LEVEL; 3010 } else { 3011 context->nx = false; 3012 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); 3013 context->gva_to_gpa = paging32_gva_to_gpa; 3014 context->root_level = PT32_ROOT_LEVEL; 3015 } 3016 3017 return 0; 3018} 3019 3020int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) 3021{ 3022 int r; 3023 ASSERT(vcpu); 3024 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3025 3026 if (!is_paging(vcpu)) 3027 r = nonpaging_init_context(vcpu, context); 3028 else if (is_long_mode(vcpu)) 3029 r = paging64_init_context(vcpu, context); 3030 else if (is_pae(vcpu)) 3031 r = paging32E_init_context(vcpu, context); 3032 else 3033 r = paging32_init_context(vcpu, context); 3034 3035 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 3036 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); 3037 3038 return r; 3039} 3040EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3041 3042static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 3043{ 3044 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); 3045 3046 vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; 3047 vcpu->arch.walk_mmu->get_cr3 = get_cr3; 3048 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 3049 3050 return r; 3051} 3052 3053static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) 3054{ 3055 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; 3056 3057 g_context->get_cr3 = get_cr3; 3058 g_context->inject_page_fault = kvm_inject_page_fault; 3059 3060 /* 3061 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The 3062 * translation of l2_gpa to l1_gpa addresses is done using the 3063 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa 3064 * functions between mmu and nested_mmu are swapped. 3065 */ 3066 if (!is_paging(vcpu)) { 3067 g_context->nx = false; 3068 g_context->root_level = 0; 3069 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; 3070 } else if (is_long_mode(vcpu)) { 3071 g_context->nx = is_nx(vcpu); 3072 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); 3073 g_context->root_level = PT64_ROOT_LEVEL; 3074 g_context->gva_to_gpa = paging64_gva_to_gpa_nested; 3075 } else if (is_pae(vcpu)) { 3076 g_context->nx = is_nx(vcpu); 3077 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); 3078 g_context->root_level = PT32E_ROOT_LEVEL; 3079 g_context->gva_to_gpa = paging64_gva_to_gpa_nested; 3080 } else { 3081 g_context->nx = false; 3082 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); 3083 g_context->root_level = PT32_ROOT_LEVEL; 3084 g_context->gva_to_gpa = paging32_gva_to_gpa_nested; 3085 } 3086 3087 return 0; 3088} 3089 3090static int init_kvm_mmu(struct kvm_vcpu *vcpu) 3091{ 3092 vcpu->arch.update_pte.pfn = bad_pfn; 3093 3094 if (mmu_is_nested(vcpu)) 3095 return init_kvm_nested_mmu(vcpu); 3096 else if (tdp_enabled) 3097 return init_kvm_tdp_mmu(vcpu); 3098 else 3099 return init_kvm_softmmu(vcpu); 3100} 3101 3102static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) 3103{ 3104 ASSERT(vcpu); 3105 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) 3106 /* mmu.free() should set root_hpa = INVALID_PAGE */ 3107 vcpu->arch.mmu.free(vcpu); 3108} 3109 3110int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 3111{ 3112 destroy_kvm_mmu(vcpu); 3113 return init_kvm_mmu(vcpu); 3114} 3115EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); 3116 3117int kvm_mmu_load(struct kvm_vcpu *vcpu) 3118{ 3119 int r; 3120 3121 r = mmu_topup_memory_caches(vcpu); 3122 if (r) 3123 goto out; 3124 r = mmu_alloc_roots(vcpu); 3125 spin_lock(&vcpu->kvm->mmu_lock); 3126 mmu_sync_roots(vcpu); 3127 spin_unlock(&vcpu->kvm->mmu_lock); 3128 if (r) 3129 goto out; 3130 /* set_cr3() should ensure TLB has been flushed */ 3131 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 3132out: 3133 return r; 3134} 3135EXPORT_SYMBOL_GPL(kvm_mmu_load); 3136 3137void kvm_mmu_unload(struct kvm_vcpu *vcpu) 3138{ 3139 mmu_free_roots(vcpu); 3140} 3141EXPORT_SYMBOL_GPL(kvm_mmu_unload); 3142 3143static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, 3144 struct kvm_mmu_page *sp, 3145 u64 *spte) 3146{ 3147 u64 pte; 3148 struct kvm_mmu_page *child; 3149 3150 pte = *spte; 3151 if (is_shadow_present_pte(pte)) { 3152 if (is_last_spte(pte, sp->role.level)) 3153 drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte); 3154 else { 3155 child = page_header(pte & PT64_BASE_ADDR_MASK); 3156 mmu_page_remove_parent_pte(child, spte); 3157 } 3158 } 3159 __set_spte(spte, shadow_trap_nonpresent_pte); 3160 if (is_large_pte(pte)) 3161 --vcpu->kvm->stat.lpages; 3162} 3163 3164static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, 3165 struct kvm_mmu_page *sp, 3166 u64 *spte, 3167 const void *new) 3168{ 3169 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { 3170 ++vcpu->kvm->stat.mmu_pde_zapped; 3171 return; 3172 } 3173 3174 ++vcpu->kvm->stat.mmu_pte_updated; 3175 if (!sp->role.cr4_pae) 3176 paging32_update_pte(vcpu, sp, spte, new); 3177 else 3178 paging64_update_pte(vcpu, sp, spte, new); 3179} 3180 3181static bool need_remote_flush(u64 old, u64 new) 3182{ 3183 if (!is_shadow_present_pte(old)) 3184 return false; 3185 if (!is_shadow_present_pte(new)) 3186 return true; 3187 if ((old ^ new) & PT64_BASE_ADDR_MASK) 3188 return true; 3189 old ^= PT64_NX_MASK; 3190 new ^= PT64_NX_MASK; 3191 return (old & ~new & PT64_PERM_MASK) != 0; 3192} 3193 3194static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, 3195 bool remote_flush, bool local_flush) 3196{ 3197 if (zap_page) 3198 return; 3199 3200 if (remote_flush) 3201 kvm_flush_remote_tlbs(vcpu->kvm); 3202 else if (local_flush) 3203 kvm_mmu_flush_tlb(vcpu); 3204} 3205 3206static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) 3207{ 3208 u64 *spte = vcpu->arch.last_pte_updated; 3209 3210 return !!(spte && (*spte & shadow_accessed_mask)); 3211} 3212 3213static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 3214 u64 gpte) 3215{ 3216 gfn_t gfn; 3217 pfn_t pfn; 3218 3219 if (!is_present_gpte(gpte)) 3220 return; 3221 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 3222 3223 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; 3224 smp_rmb(); 3225 pfn = gfn_to_pfn(vcpu->kvm, gfn); 3226 3227 if (is_error_pfn(pfn)) { 3228 kvm_release_pfn_clean(pfn); 3229 return; 3230 } 3231 vcpu->arch.update_pte.gfn = gfn; 3232 vcpu->arch.update_pte.pfn = pfn; 3233} 3234 3235static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) 3236{ 3237 u64 *spte = vcpu->arch.last_pte_updated; 3238 3239 if (spte 3240 && vcpu->arch.last_pte_gfn == gfn 3241 && shadow_accessed_mask 3242 && !(*spte & shadow_accessed_mask) 3243 && is_shadow_present_pte(*spte)) 3244 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); 3245} 3246 3247void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 3248 const u8 *new, int bytes, 3249 bool guest_initiated) 3250{ 3251 gfn_t gfn = gpa >> PAGE_SHIFT; 3252 union kvm_mmu_page_role mask = { .word = 0 }; 3253 struct kvm_mmu_page *sp; 3254 struct hlist_node *node; 3255 LIST_HEAD(invalid_list); 3256 u64 entry, gentry; 3257 u64 *spte; 3258 unsigned offset = offset_in_page(gpa); 3259 unsigned pte_size; 3260 unsigned page_offset; 3261 unsigned misaligned; 3262 unsigned quadrant; 3263 int level; 3264 int flooded = 0; 3265 int npte; 3266 int r; 3267 int invlpg_counter; 3268 bool remote_flush, local_flush, zap_page; 3269 3270 zap_page = remote_flush = local_flush = false; 3271 3272 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); 3273 3274 invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter); 3275 3276 /* 3277 * Assume that the pte write on a page table of the same type 3278 * as the current vcpu paging mode. This is nearly always true 3279 * (might be false while changing modes). Note it is verified later 3280 * by update_pte(). 3281 */ 3282 if ((is_pae(vcpu) && bytes == 4) || !new) { 3283 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ 3284 if (is_pae(vcpu)) { 3285 gpa &= ~(gpa_t)7; 3286 bytes = 8; 3287 } 3288 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8)); 3289 if (r) 3290 gentry = 0; 3291 new = (const u8 *)&gentry; 3292 } 3293 3294 switch (bytes) { 3295 case 4: 3296 gentry = *(const u32 *)new; 3297 break; 3298 case 8: 3299 gentry = *(const u64 *)new; 3300 break; 3301 default: 3302 gentry = 0; 3303 break; 3304 } 3305 3306 mmu_guess_page_from_pte_write(vcpu, gpa, gentry); 3307 spin_lock(&vcpu->kvm->mmu_lock); 3308 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) 3309 gentry = 0; 3310 kvm_mmu_access_page(vcpu, gfn); 3311 kvm_mmu_free_some_pages(vcpu); 3312 ++vcpu->kvm->stat.mmu_pte_write; 3313 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3314 if (guest_initiated) { 3315 if (gfn == vcpu->arch.last_pt_write_gfn 3316 && !last_updated_pte_accessed(vcpu)) { 3317 ++vcpu->arch.last_pt_write_count; 3318 if (vcpu->arch.last_pt_write_count >= 3) 3319 flooded = 1; 3320 } else { 3321 vcpu->arch.last_pt_write_gfn = gfn; 3322 vcpu->arch.last_pt_write_count = 1; 3323 vcpu->arch.last_pte_updated = NULL; 3324 } 3325 } 3326 3327 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 3328 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { 3329 pte_size = sp->role.cr4_pae ? 8 : 4; 3330 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 3331 misaligned |= bytes < 4; 3332 if (misaligned || flooded) { 3333 /* 3334 * Misaligned accesses are too much trouble to fix 3335 * up; also, they usually indicate a page is not used 3336 * as a page table. 3337 * 3338 * If we're seeing too many writes to a page, 3339 * it may no longer be a page table, or we may be 3340 * forking, in which case it is better to unmap the 3341 * page. 3342 */ 3343 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 3344 gpa, bytes, sp->role.word); 3345 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3346 &invalid_list); 3347 ++vcpu->kvm->stat.mmu_flooded; 3348 continue; 3349 } 3350 page_offset = offset; 3351 level = sp->role.level; 3352 npte = 1; 3353 if (!sp->role.cr4_pae) { 3354 page_offset <<= 1; /* 32->64 */ 3355 /* 3356 * A 32-bit pde maps 4MB while the shadow pdes map 3357 * only 2MB. So we need to double the offset again 3358 * and zap two pdes instead of one. 3359 */ 3360 if (level == PT32_ROOT_LEVEL) { 3361 page_offset &= ~7; /* kill rounding error */ 3362 page_offset <<= 1; 3363 npte = 2; 3364 } 3365 quadrant = page_offset >> PAGE_SHIFT; 3366 page_offset &= ~PAGE_MASK; 3367 if (quadrant != sp->role.quadrant) 3368 continue; 3369 } 3370 local_flush = true; 3371 spte = &sp->spt[page_offset / sizeof(*spte)]; 3372 while (npte--) { 3373 entry = *spte; 3374 mmu_pte_write_zap_pte(vcpu, sp, spte); 3375 if (gentry && 3376 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) 3377 & mask.word)) 3378 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); 3379 if (!remote_flush && need_remote_flush(entry, *spte)) 3380 remote_flush = true; 3381 ++spte; 3382 } 3383 } 3384 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); 3385 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3386 trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); 3387 spin_unlock(&vcpu->kvm->mmu_lock); 3388 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) { 3389 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn); 3390 vcpu->arch.update_pte.pfn = bad_pfn; 3391 } 3392} 3393 3394int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 3395{ 3396 gpa_t gpa; 3397 int r; 3398 3399 if (vcpu->arch.mmu.direct_map) 3400 return 0; 3401 3402 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); 3403 3404 spin_lock(&vcpu->kvm->mmu_lock); 3405 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 3406 spin_unlock(&vcpu->kvm->mmu_lock); 3407 return r; 3408} 3409EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); 3410 3411void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 3412{ 3413 LIST_HEAD(invalid_list); 3414 3415 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES && 3416 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { 3417 struct kvm_mmu_page *sp; 3418 3419 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 3420 struct kvm_mmu_page, link); 3421 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); 3422 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3423 ++vcpu->kvm->stat.mmu_recycled; 3424 } 3425} 3426 3427int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, 3428 void *insn, int insn_len) 3429{ 3430 int r; 3431 enum emulation_result er; 3432 3433 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); 3434 if (r < 0) 3435 goto out; 3436 3437 if (!r) { 3438 r = 1; 3439 goto out; 3440 } 3441 3442 r = mmu_topup_memory_caches(vcpu); 3443 if (r) 3444 goto out; 3445 3446 er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); 3447 3448 switch (er) { 3449 case EMULATE_DONE: 3450 return 1; 3451 case EMULATE_DO_MMIO: 3452 ++vcpu->stat.mmio_exits; 3453 /* fall through */ 3454 case EMULATE_FAIL: 3455 return 0; 3456 default: 3457 BUG(); 3458 } 3459out: 3460 return r; 3461} 3462EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 3463 3464void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 3465{ 3466 vcpu->arch.mmu.invlpg(vcpu, gva); 3467 kvm_mmu_flush_tlb(vcpu); 3468 ++vcpu->stat.invlpg; 3469} 3470EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); 3471 3472void kvm_enable_tdp(void) 3473{ 3474 tdp_enabled = true; 3475} 3476EXPORT_SYMBOL_GPL(kvm_enable_tdp); 3477 3478void kvm_disable_tdp(void) 3479{ 3480 tdp_enabled = false; 3481} 3482EXPORT_SYMBOL_GPL(kvm_disable_tdp); 3483 3484static void free_mmu_pages(struct kvm_vcpu *vcpu) 3485{ 3486 free_page((unsigned long)vcpu->arch.mmu.pae_root); 3487 if (vcpu->arch.mmu.lm_root != NULL) 3488 free_page((unsigned long)vcpu->arch.mmu.lm_root); 3489} 3490 3491static int alloc_mmu_pages(struct kvm_vcpu *vcpu) 3492{ 3493 struct page *page; 3494 int i; 3495 3496 ASSERT(vcpu); 3497 3498 /* 3499 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 3500 * Therefore we need to allocate shadow page tables in the first 3501 * 4GB of memory, which happens to fit the DMA32 zone. 3502 */ 3503 page = alloc_page(GFP_KERNEL | __GFP_DMA32); 3504 if (!page) 3505 return -ENOMEM; 3506 3507 vcpu->arch.mmu.pae_root = page_address(page); 3508 for (i = 0; i < 4; ++i) 3509 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 3510 3511 return 0; 3512} 3513 3514int kvm_mmu_create(struct kvm_vcpu *vcpu) 3515{ 3516 ASSERT(vcpu); 3517 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3518 3519 return alloc_mmu_pages(vcpu); 3520} 3521 3522int kvm_mmu_setup(struct kvm_vcpu *vcpu) 3523{ 3524 ASSERT(vcpu); 3525 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3526 3527 return init_kvm_mmu(vcpu); 3528} 3529 3530void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) 3531{ 3532 struct kvm_mmu_page *sp; 3533 3534 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { 3535 int i; 3536 u64 *pt; 3537 3538 if (!test_bit(slot, sp->slot_bitmap)) 3539 continue; 3540 3541 if (sp->role.level != PT_PAGE_TABLE_LEVEL) 3542 continue; 3543 3544 pt = sp->spt; 3545 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 3546 /* avoid RMW */ 3547 if (is_writable_pte(pt[i])) 3548 update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK); 3549 } 3550 kvm_flush_remote_tlbs(kvm); 3551} 3552 3553void kvm_mmu_zap_all(struct kvm *kvm) 3554{ 3555 struct kvm_mmu_page *sp, *node; 3556 LIST_HEAD(invalid_list); 3557 3558 spin_lock(&kvm->mmu_lock); 3559restart: 3560 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) 3561 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) 3562 goto restart; 3563 3564 kvm_mmu_commit_zap_page(kvm, &invalid_list); 3565 spin_unlock(&kvm->mmu_lock); 3566} 3567 3568static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, 3569 struct list_head *invalid_list) 3570{ 3571 struct kvm_mmu_page *page; 3572 3573 page = container_of(kvm->arch.active_mmu_pages.prev, 3574 struct kvm_mmu_page, link); 3575 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); 3576} 3577 3578static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3579{ 3580 struct kvm *kvm; 3581 struct kvm *kvm_freed = NULL; 3582 3583 if (nr_to_scan == 0) 3584 goto out; 3585 3586 spin_lock(&kvm_lock); 3587 3588 list_for_each_entry(kvm, &vm_list, vm_list) { 3589 int idx, freed_pages; 3590 LIST_HEAD(invalid_list); 3591 3592 idx = srcu_read_lock(&kvm->srcu); 3593 spin_lock(&kvm->mmu_lock); 3594 if (!kvm_freed && nr_to_scan > 0 && 3595 kvm->arch.n_used_mmu_pages > 0) { 3596 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, 3597 &invalid_list); 3598 kvm_freed = kvm; 3599 } 3600 nr_to_scan--; 3601 3602 kvm_mmu_commit_zap_page(kvm, &invalid_list); 3603 spin_unlock(&kvm->mmu_lock); 3604 srcu_read_unlock(&kvm->srcu, idx); 3605 } 3606 if (kvm_freed) 3607 list_move_tail(&kvm_freed->vm_list, &vm_list); 3608 3609 spin_unlock(&kvm_lock); 3610 3611out: 3612 return percpu_counter_read_positive(&kvm_total_used_mmu_pages); 3613} 3614 3615static struct shrinker mmu_shrinker = { 3616 .shrink = mmu_shrink, 3617 .seeks = DEFAULT_SEEKS * 10, 3618}; 3619 3620static void mmu_destroy_caches(void) 3621{ 3622 if (pte_chain_cache) 3623 kmem_cache_destroy(pte_chain_cache); 3624 if (rmap_desc_cache) 3625 kmem_cache_destroy(rmap_desc_cache); 3626 if (mmu_page_header_cache) 3627 kmem_cache_destroy(mmu_page_header_cache); 3628} 3629 3630int kvm_mmu_module_init(void) 3631{ 3632 pte_chain_cache = kmem_cache_create("kvm_pte_chain", 3633 sizeof(struct kvm_pte_chain), 3634 0, 0, NULL); 3635 if (!pte_chain_cache) 3636 goto nomem; 3637 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", 3638 sizeof(struct kvm_rmap_desc), 3639 0, 0, NULL); 3640 if (!rmap_desc_cache) 3641 goto nomem; 3642 3643 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 3644 sizeof(struct kvm_mmu_page), 3645 0, 0, NULL); 3646 if (!mmu_page_header_cache) 3647 goto nomem; 3648 3649 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0)) 3650 goto nomem; 3651 3652 register_shrinker(&mmu_shrinker); 3653 3654 return 0; 3655 3656nomem: 3657 mmu_destroy_caches(); 3658 return -ENOMEM; 3659} 3660 3661/* 3662 * Caculate mmu pages needed for kvm. 3663 */ 3664unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) 3665{ 3666 int i; 3667 unsigned int nr_mmu_pages; 3668 unsigned int nr_pages = 0; 3669 struct kvm_memslots *slots; 3670 3671 slots = kvm_memslots(kvm); 3672 3673 for (i = 0; i < slots->nmemslots; i++) 3674 nr_pages += slots->memslots[i].npages; 3675 3676 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; 3677 nr_mmu_pages = max(nr_mmu_pages, 3678 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); 3679 3680 return nr_mmu_pages; 3681} 3682 3683static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer, 3684 unsigned len) 3685{ 3686 if (len > buffer->len) 3687 return NULL; 3688 return buffer->ptr; 3689} 3690 3691static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer, 3692 unsigned len) 3693{ 3694 void *ret; 3695 3696 ret = pv_mmu_peek_buffer(buffer, len); 3697 if (!ret) 3698 return ret; 3699 buffer->ptr += len; 3700 buffer->len -= len; 3701 buffer->processed += len; 3702 return ret; 3703} 3704 3705static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, 3706 gpa_t addr, gpa_t value) 3707{ 3708 int bytes = 8; 3709 int r; 3710 3711 if (!is_long_mode(vcpu) && !is_pae(vcpu)) 3712 bytes = 4; 3713 3714 r = mmu_topup_memory_caches(vcpu); 3715 if (r) 3716 return r; 3717 3718 if (!emulator_write_phys(vcpu, addr, &value, bytes)) 3719 return -EFAULT; 3720 3721 return 1; 3722} 3723 3724static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 3725{ 3726 (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu)); 3727 return 1; 3728} 3729 3730static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr) 3731{ 3732 spin_lock(&vcpu->kvm->mmu_lock); 3733 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT); 3734 spin_unlock(&vcpu->kvm->mmu_lock); 3735 return 1; 3736} 3737 3738static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu, 3739 struct kvm_pv_mmu_op_buffer *buffer) 3740{ 3741 struct kvm_mmu_op_header *header; 3742 3743 header = pv_mmu_peek_buffer(buffer, sizeof *header); 3744 if (!header) 3745 return 0; 3746 switch (header->op) { 3747 case KVM_MMU_OP_WRITE_PTE: { 3748 struct kvm_mmu_op_write_pte *wpte; 3749 3750 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte); 3751 if (!wpte) 3752 return 0; 3753 return kvm_pv_mmu_write(vcpu, wpte->pte_phys, 3754 wpte->pte_val); 3755 } 3756 case KVM_MMU_OP_FLUSH_TLB: { 3757 struct kvm_mmu_op_flush_tlb *ftlb; 3758 3759 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb); 3760 if (!ftlb) 3761 return 0; 3762 return kvm_pv_mmu_flush_tlb(vcpu); 3763 } 3764 case KVM_MMU_OP_RELEASE_PT: { 3765 struct kvm_mmu_op_release_pt *rpt; 3766 3767 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt); 3768 if (!rpt) 3769 return 0; 3770 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys); 3771 } 3772 default: return 0; 3773 } 3774} 3775 3776int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, 3777 gpa_t addr, unsigned long *ret) 3778{ 3779 int r; 3780 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer; 3781 3782 buffer->ptr = buffer->buf; 3783 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf); 3784 buffer->processed = 0; 3785 3786 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len); 3787 if (r) 3788 goto out; 3789 3790 while (buffer->len) { 3791 r = kvm_pv_mmu_op_one(vcpu, buffer); 3792 if (r < 0) 3793 goto out; 3794 if (r == 0) 3795 break; 3796 } 3797 3798 r = 1; 3799out: 3800 *ret = buffer->processed; 3801 return r; 3802} 3803 3804int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) 3805{ 3806 struct kvm_shadow_walk_iterator iterator; 3807 int nr_sptes = 0; 3808 3809 spin_lock(&vcpu->kvm->mmu_lock); 3810 for_each_shadow_entry(vcpu, addr, iterator) { 3811 sptes[iterator.level-1] = *iterator.sptep; 3812 nr_sptes++; 3813 if (!is_shadow_present_pte(*iterator.sptep)) 3814 break; 3815 } 3816 spin_unlock(&vcpu->kvm->mmu_lock); 3817 3818 return nr_sptes; 3819} 3820EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); 3821 3822void kvm_mmu_destroy(struct kvm_vcpu *vcpu) 3823{ 3824 ASSERT(vcpu); 3825 3826 destroy_kvm_mmu(vcpu); 3827 free_mmu_pages(vcpu); 3828 mmu_free_memory_caches(vcpu); 3829} 3830 3831#ifdef CONFIG_KVM_MMU_AUDIT 3832#include "mmu_audit.c" 3833#else 3834static void mmu_audit_disable(void) { } 3835#endif 3836 3837void kvm_mmu_module_exit(void) 3838{ 3839 mmu_destroy_caches(); 3840 percpu_counter_destroy(&kvm_total_used_mmu_pages); 3841 unregister_shrinker(&mmu_shrinker); 3842 mmu_audit_disable(); 3843}