at master 2.8 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2 3#ifndef __KVM_MM_H__ 4#define __KVM_MM_H__ 1 5 6/* 7 * Architectures can choose whether to use an rwlock or spinlock 8 * for the mmu_lock. These macros, for use in common code 9 * only, avoids using #ifdefs in places that must deal with 10 * multiple architectures. 11 */ 12 13#ifdef KVM_HAVE_MMU_RWLOCK 14#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) 15#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) 16#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) 17#else 18#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) 19#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) 20#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) 21#endif /* KVM_HAVE_MMU_RWLOCK */ 22 23 24struct kvm_follow_pfn { 25 const struct kvm_memory_slot *slot; 26 const gfn_t gfn; 27 28 unsigned long hva; 29 30 /* FOLL_* flags modifying lookup behavior, e.g. FOLL_WRITE. */ 31 unsigned int flags; 32 33 /* 34 * Pin the page (effectively FOLL_PIN, which is an mm/ internal flag). 35 * The page *must* be pinned if KVM will write to the page via a kernel 36 * mapping, e.g. via kmap(), mremap(), etc. 37 */ 38 bool pin; 39 40 /* 41 * If non-NULL, try to get a writable mapping even for a read fault. 42 * Set to true if a writable mapping was obtained. 43 */ 44 bool *map_writable; 45 46 /* 47 * Optional output. Set to a valid "struct page" if the returned pfn 48 * is for a refcounted or pinned struct page, NULL if the returned pfn 49 * has no struct page or if the struct page is not being refcounted 50 * (e.g. tail pages of non-compound higher order allocations from 51 * IO/PFNMAP mappings). 52 */ 53 struct page **refcounted_page; 54}; 55 56kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp); 57 58#ifdef CONFIG_HAVE_KVM_PFNCACHE 59void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, 60 unsigned long start, 61 unsigned long end); 62#else 63static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, 64 unsigned long start, 65 unsigned long end) 66{ 67} 68#endif /* HAVE_KVM_PFNCACHE */ 69 70#ifdef CONFIG_KVM_GUEST_MEMFD 71int kvm_gmem_init(struct module *module); 72void kvm_gmem_exit(void); 73int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args); 74int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, 75 unsigned int fd, loff_t offset); 76void kvm_gmem_unbind(struct kvm_memory_slot *slot); 77#else 78static inline int kvm_gmem_init(struct module *module) 79{ 80 return 0; 81} 82static inline void kvm_gmem_exit(void) {}; 83static inline int kvm_gmem_bind(struct kvm *kvm, 84 struct kvm_memory_slot *slot, 85 unsigned int fd, loff_t offset) 86{ 87 WARN_ON_ONCE(1); 88 return -EIO; 89} 90 91static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot) 92{ 93 WARN_ON_ONCE(1); 94} 95#endif /* CONFIG_KVM_GUEST_MEMFD */ 96 97#endif /* __KVM_MM_H__ */