Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86/mmu: Make kvm_mmu_page definition and accessor internal-only

Make 'struct kvm_mmu_page' MMU-only, nothing outside of the MMU should
be poking into the gory details of shadow pages.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200622202034.15093-5-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
985ab278 6ca9a6f3

+50 -44
+2 -44
arch/x86/include/asm/kvm_host.h
··· 322 322 unsigned long val; 323 323 }; 324 324 325 - struct kvm_mmu_page { 326 - struct list_head link; 327 - struct hlist_node hash_link; 328 - struct list_head lpage_disallowed_link; 329 - 330 - bool unsync; 331 - u8 mmu_valid_gen; 332 - bool mmio_cached; 333 - bool lpage_disallowed; /* Can't be replaced by an equiv large page */ 334 - 335 - /* 336 - * The following two entries are used to key the shadow page in the 337 - * hash table. 338 - */ 339 - union kvm_mmu_page_role role; 340 - gfn_t gfn; 341 - 342 - u64 *spt; 343 - /* hold the gfn of each spte inside spt */ 344 - gfn_t *gfns; 345 - int root_count; /* Currently serving as active root */ 346 - unsigned int unsync_children; 347 - struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 348 - DECLARE_BITMAP(unsync_child_bitmap, 512); 349 - 350 - #ifdef CONFIG_X86_32 351 - /* 352 - * Used out of the mmu-lock to avoid reading spte values while an 353 - * update is in progress; see the comments in __get_spte_lockless(). 354 - */ 355 - int clear_spte_count; 356 - #endif 357 - 358 - /* Number of writes since the last time traversal visited this page. */ 359 - atomic_t write_flooding_count; 360 - }; 361 - 362 325 struct kvm_pio_request { 363 326 unsigned long linear_rip; 364 327 unsigned long count; ··· 346 383 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE }) 347 384 348 385 #define KVM_MMU_NUM_PREV_ROOTS 3 386 + 387 + struct kvm_mmu_page; 349 388 350 389 /* 351 390 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, ··· 1523 1558 struct x86_exception *exception) 1524 1559 { 1525 1560 return gpa; 1526 - } 1527 - 1528 - static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 1529 - { 1530 - struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 1531 - 1532 - return (struct kvm_mmu_page *)page_private(page); 1533 1561 } 1534 1562 1535 1563 static inline u16 kvm_read_ldt(void)
+48
arch/x86/kvm/mmu/mmu_internal.h
··· 2 2 #ifndef __KVM_X86_MMU_INTERNAL_H 3 3 #define __KVM_X86_MMU_INTERNAL_H 4 4 5 + #include <linux/types.h> 6 + 7 + #include <asm/kvm_host.h> 8 + 9 + struct kvm_mmu_page { 10 + struct list_head link; 11 + struct hlist_node hash_link; 12 + struct list_head lpage_disallowed_link; 13 + 14 + bool unsync; 15 + u8 mmu_valid_gen; 16 + bool mmio_cached; 17 + bool lpage_disallowed; /* Can't be replaced by an equiv large page */ 18 + 19 + /* 20 + * The following two entries are used to key the shadow page in the 21 + * hash table. 22 + */ 23 + union kvm_mmu_page_role role; 24 + gfn_t gfn; 25 + 26 + u64 *spt; 27 + /* hold the gfn of each spte inside spt */ 28 + gfn_t *gfns; 29 + int root_count; /* Currently serving as active root */ 30 + unsigned int unsync_children; 31 + struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 32 + DECLARE_BITMAP(unsync_child_bitmap, 512); 33 + 34 + #ifdef CONFIG_X86_32 35 + /* 36 + * Used out of the mmu-lock to avoid reading spte values while an 37 + * update is in progress; see the comments in __get_spte_lockless(). 38 + */ 39 + int clear_spte_count; 40 + #endif 41 + 42 + /* Number of writes since the last time traversal visited this page. */ 43 + atomic_t write_flooding_count; 44 + }; 45 + 46 + static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 47 + { 48 + struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 49 + 50 + return (struct kvm_mmu_page *)page_private(page); 51 + } 52 + 5 53 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 6 54 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 7 55 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,