Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-x86-gmem-6.19' of https://github.com/kvm-x86/linux into HEAD

KVM guest_memfd changes for 6.19:

- Add NUMA mempolicy support for guest_memfd, and clean up a variety of
rough edges in guest_memfd along the way.

- Define a CLASS to automatically handle get+put when grabbing a guest_memfd
from a memslot to make it harder to leak references.

- Enhance KVM selftests to make it easer to develop and debug selftests like
those added for guest_memfd NUMA support, e.g. where test and/or KVM bugs
often result in hard-to-debug SIGBUS errors.

- Misc cleanups.

+645 -253
+2 -2
fs/btrfs/compression.c
··· 491 491 continue; 492 492 } 493 493 494 - folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, 495 - ~__GFP_FS), 0); 494 + folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS), 495 + 0, NULL); 496 496 if (!folio) 497 497 break; 498 498
+1 -1
fs/btrfs/verity.c
··· 742 742 } 743 743 744 744 folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS), 745 - 0); 745 + 0, NULL); 746 746 if (!folio) 747 747 return ERR_PTR(-ENOMEM); 748 748
+1 -1
fs/erofs/zdata.c
··· 562 562 * Allocate a managed folio for cached I/O, or it may be 563 563 * then filled with a file-backed folio for in-place I/O 564 564 */ 565 - newfolio = filemap_alloc_folio(gfp, 0); 565 + newfolio = filemap_alloc_folio(gfp, 0, NULL); 566 566 if (!newfolio) 567 567 continue; 568 568 newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
+1 -1
fs/f2fs/compress.c
··· 1947 1947 return; 1948 1948 } 1949 1949 1950 - cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0); 1950 + cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL); 1951 1951 if (!cfolio) 1952 1952 return; 1953 1953
+13 -5
include/linux/pagemap.h
··· 654 654 } 655 655 656 656 #ifdef CONFIG_NUMA 657 - struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); 657 + struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, 658 + struct mempolicy *policy); 658 659 #else 659 - static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 660 + static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, 661 + struct mempolicy *policy) 660 662 { 661 663 return folio_alloc_noprof(gfp, order); 662 664 } ··· 669 667 670 668 static inline struct page *__page_cache_alloc(gfp_t gfp) 671 669 { 672 - return &filemap_alloc_folio(gfp, 0)->page; 670 + return &filemap_alloc_folio(gfp, 0, NULL)->page; 673 671 } 674 672 675 673 static inline gfp_t readahead_gfp_mask(struct address_space *x) ··· 755 753 } 756 754 757 755 void *filemap_get_entry(struct address_space *mapping, pgoff_t index); 758 - struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 759 - fgf_t fgp_flags, gfp_t gfp); 756 + struct folio *__filemap_get_folio_mpol(struct address_space *mapping, 757 + pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy); 760 758 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 761 759 fgf_t fgp_flags, gfp_t gfp); 760 + 761 + static inline struct folio *__filemap_get_folio(struct address_space *mapping, 762 + pgoff_t index, fgf_t fgf_flags, gfp_t gfp) 763 + { 764 + return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL); 765 + } 762 766 763 767 /** 764 768 * write_begin_get_folio - Get folio for write_begin with flags.
+1
include/uapi/linux/magic.h
··· 103 103 #define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ 104 104 #define SECRETMEM_MAGIC 0x5345434d /* "SECM" */ 105 105 #define PID_FS_MAGIC 0x50494446 /* "PIDF" */ 106 + #define GUEST_MEMFD_MAGIC 0x474d454d /* "GMEM" */ 106 107 107 108 #endif /* __LINUX_MAGIC_H__ */
+14 -9
mm/filemap.c
··· 1002 1002 EXPORT_SYMBOL_GPL(filemap_add_folio); 1003 1003 1004 1004 #ifdef CONFIG_NUMA 1005 - struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 1005 + struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, 1006 + struct mempolicy *policy) 1006 1007 { 1007 1008 int n; 1008 1009 struct folio *folio; 1010 + 1011 + if (policy) 1012 + return folio_alloc_mpol_noprof(gfp, order, policy, 1013 + NO_INTERLEAVE_INDEX, numa_node_id()); 1009 1014 1010 1015 if (cpuset_do_page_mem_spread()) { 1011 1016 unsigned int cpuset_mems_cookie; ··· 1928 1923 } 1929 1924 1930 1925 /** 1931 - * __filemap_get_folio - Find and get a reference to a folio. 1926 + * __filemap_get_folio_mpol - Find and get a reference to a folio. 1932 1927 * @mapping: The address_space to search. 1933 1928 * @index: The page index. 1934 1929 * @fgp_flags: %FGP flags modify how the folio is returned. 1935 1930 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. 1931 + * @policy: NUMA memory allocation policy to follow. 1936 1932 * 1937 1933 * Looks up the page cache entry at @mapping & @index. 1938 1934 * ··· 1944 1938 * 1945 1939 * Return: The found folio or an ERR_PTR() otherwise. 1946 1940 */ 1947 - struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 1948 - fgf_t fgp_flags, gfp_t gfp) 1941 + struct folio *__filemap_get_folio_mpol(struct address_space *mapping, 1942 + pgoff_t index, fgf_t fgp_flags, gfp_t gfp, struct mempolicy *policy) 1949 1943 { 1950 1944 struct folio *folio; 1951 1945 ··· 2015 2009 err = -ENOMEM; 2016 2010 if (order > min_order) 2017 2011 alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; 2018 - folio = filemap_alloc_folio(alloc_gfp, order); 2012 + folio = filemap_alloc_folio(alloc_gfp, order, policy); 2019 2013 if (!folio) 2020 2014 continue; 2021 2015 ··· 2062 2056 folio_clear_dropbehind(folio); 2063 2057 return folio; 2064 2058 } 2065 - EXPORT_SYMBOL(__filemap_get_folio); 2059 + EXPORT_SYMBOL(__filemap_get_folio_mpol); 2066 2060 2067 2061 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, 2068 2062 xa_mark_t mark) ··· 2557 2551 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) 2558 2552 return -EAGAIN; 2559 2553 2560 - folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); 2554 + folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order, NULL); 2561 2555 if (!folio) 2562 2556 return -ENOMEM; 2563 2557 if (iocb->ki_flags & IOCB_DONTCACHE) ··· 4001 3995 repeat: 4002 3996 folio = filemap_get_folio(mapping, index); 4003 3997 if (IS_ERR(folio)) { 4004 - folio = filemap_alloc_folio(gfp, 4005 - mapping_min_folio_order(mapping)); 3998 + folio = filemap_alloc_folio(gfp, mapping_min_folio_order(mapping), NULL); 4006 3999 if (!folio) 4007 4000 return ERR_PTR(-ENOMEM); 4008 4001 index = mapping_align_index(mapping, index);
+6
mm/mempolicy.c
··· 354 354 355 355 return &default_policy; 356 356 } 357 + EXPORT_SYMBOL_FOR_MODULES(get_task_policy, "kvm"); 357 358 358 359 static const struct mempolicy_operations { 359 360 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); ··· 488 487 return; 489 488 kmem_cache_free(policy_cache, pol); 490 489 } 490 + EXPORT_SYMBOL_FOR_MODULES(__mpol_put, "kvm"); 491 491 492 492 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 493 493 { ··· 2887 2885 read_unlock(&sp->lock); 2888 2886 return pol; 2889 2887 } 2888 + EXPORT_SYMBOL_FOR_MODULES(mpol_shared_policy_lookup, "kvm"); 2890 2889 2891 2890 static void sp_free(struct sp_node *n) 2892 2891 { ··· 3173 3170 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 3174 3171 } 3175 3172 } 3173 + EXPORT_SYMBOL_FOR_MODULES(mpol_shared_policy_init, "kvm"); 3176 3174 3177 3175 int mpol_set_shared_policy(struct shared_policy *sp, 3178 3176 struct vm_area_struct *vma, struct mempolicy *pol) ··· 3192 3188 sp_free(new); 3193 3189 return err; 3194 3190 } 3191 + EXPORT_SYMBOL_FOR_MODULES(mpol_set_shared_policy, "kvm"); 3195 3192 3196 3193 /* Free a backing policy store on inode delete. */ 3197 3194 void mpol_free_shared_policy(struct shared_policy *sp) ··· 3211 3206 } 3212 3207 write_unlock(&sp->lock); 3213 3208 } 3209 + EXPORT_SYMBOL_FOR_MODULES(mpol_free_shared_policy, "kvm"); 3214 3210 3215 3211 #ifdef CONFIG_NUMA_BALANCING 3216 3212 static int __initdata numabalancing_override;
+1 -1
mm/readahead.c
··· 186 186 { 187 187 struct folio *folio; 188 188 189 - folio = filemap_alloc_folio(gfp_mask, order); 189 + folio = filemap_alloc_folio(gfp_mask, order, NULL); 190 190 if (folio && ractl->dropbehind) 191 191 __folio_set_dropbehind(folio); 192 192
+1 -1
tools/testing/selftests/kvm/arm64/vgic_irq.c
··· 636 636 } 637 637 638 638 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) 639 - close(fd[f]); 639 + kvm_close(fd[f]); 640 640 } 641 641 642 642 /* handles the valid case: intid=0xffffffff num=1 */
+98
tools/testing/selftests/kvm/guest_memfd_test.c
··· 19 19 #include <sys/stat.h> 20 20 21 21 #include "kvm_util.h" 22 + #include "numaif.h" 22 23 #include "test_util.h" 23 24 #include "ucall_common.h" 24 25 ··· 72 71 memset(mem, val, page_size); 73 72 for (i = 0; i < total_size; i++) 74 73 TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); 74 + 75 + kvm_munmap(mem, total_size); 76 + } 77 + 78 + static void test_mbind(int fd, size_t total_size) 79 + { 80 + const unsigned long nodemask_0 = 1; /* nid: 0 */ 81 + unsigned long nodemask = 0; 82 + unsigned long maxnode = 8; 83 + int policy; 84 + char *mem; 85 + int ret; 86 + 87 + if (!is_multi_numa_node_system()) 88 + return; 89 + 90 + mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd); 91 + 92 + /* Test MPOL_INTERLEAVE policy */ 93 + kvm_mbind(mem, page_size * 2, MPOL_INTERLEAVE, &nodemask_0, maxnode, 0); 94 + kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR); 95 + TEST_ASSERT(policy == MPOL_INTERLEAVE && nodemask == nodemask_0, 96 + "Wanted MPOL_INTERLEAVE (%u) and nodemask 0x%lx, got %u and 0x%lx", 97 + MPOL_INTERLEAVE, nodemask_0, policy, nodemask); 98 + 99 + /* Test basic MPOL_BIND policy */ 100 + kvm_mbind(mem + page_size * 2, page_size * 2, MPOL_BIND, &nodemask_0, maxnode, 0); 101 + kvm_get_mempolicy(&policy, &nodemask, maxnode, mem + page_size * 2, MPOL_F_ADDR); 102 + TEST_ASSERT(policy == MPOL_BIND && nodemask == nodemask_0, 103 + "Wanted MPOL_BIND (%u) and nodemask 0x%lx, got %u and 0x%lx", 104 + MPOL_BIND, nodemask_0, policy, nodemask); 105 + 106 + /* Test MPOL_DEFAULT policy */ 107 + kvm_mbind(mem, total_size, MPOL_DEFAULT, NULL, 0, 0); 108 + kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR); 109 + TEST_ASSERT(policy == MPOL_DEFAULT && !nodemask, 110 + "Wanted MPOL_DEFAULT (%u) and nodemask 0x0, got %u and 0x%lx", 111 + MPOL_DEFAULT, policy, nodemask); 112 + 113 + /* Test with invalid policy */ 114 + ret = mbind(mem, page_size, 999, &nodemask_0, maxnode, 0); 115 + TEST_ASSERT(ret == -1 && errno == EINVAL, 116 + "mbind with invalid policy should fail with EINVAL"); 117 + 118 + kvm_munmap(mem, total_size); 119 + } 120 + 121 + static void test_numa_allocation(int fd, size_t total_size) 122 + { 123 + unsigned long node0_mask = 1; /* Node 0 */ 124 + unsigned long node1_mask = 2; /* Node 1 */ 125 + unsigned long maxnode = 8; 126 + void *pages[4]; 127 + int status[4]; 128 + char *mem; 129 + int i; 130 + 131 + if (!is_multi_numa_node_system()) 132 + return; 133 + 134 + mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd); 135 + 136 + for (i = 0; i < 4; i++) 137 + pages[i] = (char *)mem + page_size * i; 138 + 139 + /* Set NUMA policy after allocation */ 140 + memset(mem, 0xaa, page_size); 141 + kvm_mbind(pages[0], page_size, MPOL_BIND, &node0_mask, maxnode, 0); 142 + kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, page_size); 143 + 144 + /* Set NUMA policy before allocation */ 145 + kvm_mbind(pages[0], page_size * 2, MPOL_BIND, &node1_mask, maxnode, 0); 146 + kvm_mbind(pages[2], page_size * 2, MPOL_BIND, &node0_mask, maxnode, 0); 147 + memset(mem, 0xaa, total_size); 148 + 149 + /* Validate if pages are allocated on specified NUMA nodes */ 150 + kvm_move_pages(0, 4, pages, NULL, status, 0); 151 + TEST_ASSERT(status[0] == 1, "Expected page 0 on node 1, got it on node %d", status[0]); 152 + TEST_ASSERT(status[1] == 1, "Expected page 1 on node 1, got it on node %d", status[1]); 153 + TEST_ASSERT(status[2] == 0, "Expected page 2 on node 0, got it on node %d", status[2]); 154 + TEST_ASSERT(status[3] == 0, "Expected page 3 on node 0, got it on node %d", status[3]); 155 + 156 + /* Punch hole for all pages */ 157 + kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, total_size); 158 + 159 + /* Change NUMA policy nodes and reallocate */ 160 + kvm_mbind(pages[0], page_size * 2, MPOL_BIND, &node0_mask, maxnode, 0); 161 + kvm_mbind(pages[2], page_size * 2, MPOL_BIND, &node1_mask, maxnode, 0); 162 + memset(mem, 0xaa, total_size); 163 + 164 + kvm_move_pages(0, 4, pages, NULL, status, 0); 165 + TEST_ASSERT(status[0] == 0, "Expected page 0 on node 0, got it on node %d", status[0]); 166 + TEST_ASSERT(status[1] == 0, "Expected page 1 on node 0, got it on node %d", status[1]); 167 + TEST_ASSERT(status[2] == 1, "Expected page 2 on node 1, got it on node %d", status[2]); 168 + TEST_ASSERT(status[3] == 1, "Expected page 3 on node 1, got it on node %d", status[3]); 75 169 76 170 kvm_munmap(mem, total_size); 77 171 } ··· 369 273 if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) { 370 274 gmem_test(mmap_supported, vm, flags); 371 275 gmem_test(fault_overflow, vm, flags); 276 + gmem_test(numa_allocation, vm, flags); 372 277 } else { 373 278 gmem_test(fault_private, vm, flags); 374 279 } 375 280 376 281 gmem_test(mmap_cow, vm, flags); 282 + gmem_test(mbind, vm, flags); 377 283 } else { 378 284 gmem_test(mmap_not_supported, vm, flags); 379 285 }
+81
tools/testing/selftests/kvm/include/kvm_syscalls.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + #ifndef SELFTEST_KVM_SYSCALLS_H 3 + #define SELFTEST_KVM_SYSCALLS_H 4 + 5 + #include <sys/syscall.h> 6 + 7 + #define MAP_ARGS0(m,...) 8 + #define MAP_ARGS1(m,t,a,...) m(t,a) 9 + #define MAP_ARGS2(m,t,a,...) m(t,a), MAP_ARGS1(m,__VA_ARGS__) 10 + #define MAP_ARGS3(m,t,a,...) m(t,a), MAP_ARGS2(m,__VA_ARGS__) 11 + #define MAP_ARGS4(m,t,a,...) m(t,a), MAP_ARGS3(m,__VA_ARGS__) 12 + #define MAP_ARGS5(m,t,a,...) m(t,a), MAP_ARGS4(m,__VA_ARGS__) 13 + #define MAP_ARGS6(m,t,a,...) m(t,a), MAP_ARGS5(m,__VA_ARGS__) 14 + #define MAP_ARGS(n,...) MAP_ARGS##n(__VA_ARGS__) 15 + 16 + #define __DECLARE_ARGS(t, a) t a 17 + #define __UNPACK_ARGS(t, a) a 18 + 19 + #define DECLARE_ARGS(nr_args, args...) MAP_ARGS(nr_args, __DECLARE_ARGS, args) 20 + #define UNPACK_ARGS(nr_args, args...) MAP_ARGS(nr_args, __UNPACK_ARGS, args) 21 + 22 + #define __KVM_SYSCALL_ERROR(_name, _ret) \ 23 + "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) 24 + 25 + /* Define a kvm_<syscall>() API to assert success. */ 26 + #define __KVM_SYSCALL_DEFINE(name, nr_args, args...) \ 27 + static inline void kvm_##name(DECLARE_ARGS(nr_args, args)) \ 28 + { \ 29 + int r; \ 30 + \ 31 + r = name(UNPACK_ARGS(nr_args, args)); \ 32 + TEST_ASSERT(!r, __KVM_SYSCALL_ERROR(#name, r)); \ 33 + } 34 + 35 + /* 36 + * Macro to define syscall APIs, either because KVM selftests doesn't link to 37 + * the standard library, e.g. libnuma, or because there is no library that yet 38 + * provides the syscall. These 39 + */ 40 + #define KVM_SYSCALL_DEFINE(name, nr_args, args...) \ 41 + static inline long name(DECLARE_ARGS(nr_args, args)) \ 42 + { \ 43 + return syscall(__NR_##name, UNPACK_ARGS(nr_args, args)); \ 44 + } \ 45 + __KVM_SYSCALL_DEFINE(name, nr_args, args) 46 + 47 + /* 48 + * Special case mmap(), as KVM selftest rarely/never specific an address, 49 + * rarely specify an offset, and because the unique return code requires 50 + * special handling anyways. 51 + */ 52 + static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd, 53 + off_t offset) 54 + { 55 + void *mem; 56 + 57 + mem = mmap(NULL, size, prot, flags, fd, offset); 58 + TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()", 59 + (int)(unsigned long)MAP_FAILED)); 60 + return mem; 61 + } 62 + 63 + static inline void *kvm_mmap(size_t size, int prot, int flags, int fd) 64 + { 65 + return __kvm_mmap(size, prot, flags, fd, 0); 66 + } 67 + 68 + static inline int kvm_dup(int fd) 69 + { 70 + int new_fd = dup(fd); 71 + 72 + TEST_ASSERT(new_fd >= 0, __KVM_SYSCALL_ERROR("dup()", new_fd)); 73 + return new_fd; 74 + } 75 + 76 + __KVM_SYSCALL_DEFINE(munmap, 2, void *, mem, size_t, size); 77 + __KVM_SYSCALL_DEFINE(close, 1, int, fd); 78 + __KVM_SYSCALL_DEFINE(fallocate, 4, int, fd, int, mode, loff_t, offset, loff_t, len); 79 + __KVM_SYSCALL_DEFINE(ftruncate, 2, unsigned int, fd, off_t, length); 80 + 81 + #endif /* SELFTEST_KVM_SYSCALLS_H */
+6 -33
tools/testing/selftests/kvm/include/kvm_util.h
··· 23 23 24 24 #include <pthread.h> 25 25 26 + #include "kvm_syscalls.h" 26 27 #include "kvm_util_arch.h" 27 28 #include "kvm_util_types.h" 28 29 #include "sparsebit.h" ··· 282 281 static inline bool kvm_has_cap(long cap) 283 282 { 284 283 return kvm_check_cap(cap); 285 - } 286 - 287 - #define __KVM_SYSCALL_ERROR(_name, _ret) \ 288 - "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) 289 - 290 - static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd, 291 - off_t offset) 292 - { 293 - void *mem; 294 - 295 - mem = mmap(NULL, size, prot, flags, fd, offset); 296 - TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()", 297 - (int)(unsigned long)MAP_FAILED)); 298 - 299 - return mem; 300 - } 301 - 302 - static inline void *kvm_mmap(size_t size, int prot, int flags, int fd) 303 - { 304 - return __kvm_mmap(size, prot, flags, fd, 0); 305 - } 306 - 307 - static inline void kvm_munmap(void *mem, size_t size) 308 - { 309 - int ret; 310 - 311 - ret = munmap(mem, size); 312 - TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 313 284 } 314 285 315 286 /* ··· 673 700 uint32_t guest_memfd, uint64_t guest_memfd_offset); 674 701 675 702 void vm_userspace_mem_region_add(struct kvm_vm *vm, 676 - enum vm_mem_backing_src_type src_type, 677 - uint64_t guest_paddr, uint32_t slot, uint64_t npages, 678 - uint32_t flags); 703 + enum vm_mem_backing_src_type src_type, 704 + uint64_t gpa, uint32_t slot, uint64_t npages, 705 + uint32_t flags); 679 706 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 680 - uint64_t guest_paddr, uint32_t slot, uint64_t npages, 681 - uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); 707 + uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, 708 + int guest_memfd_fd, uint64_t guest_memfd_offset); 682 709 683 710 #ifndef vm_arch_has_protected_memory 684 711 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
+69 -41
tools/testing/selftests/kvm/include/numaif.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * tools/testing/selftests/kvm/include/numaif.h 4 - * 5 - * Copyright (C) 2020, Google LLC. 6 - * 7 - * This work is licensed under the terms of the GNU GPL, version 2. 8 - * 9 - * Header file that provides access to NUMA API functions not explicitly 10 - * exported to user space. 11 - */ 2 + /* Copyright (C) 2020, Google LLC. */ 12 3 13 4 #ifndef SELFTEST_KVM_NUMAIF_H 14 5 #define SELFTEST_KVM_NUMAIF_H 15 6 16 - #define __NR_get_mempolicy 239 17 - #define __NR_migrate_pages 256 7 + #include <dirent.h> 18 8 19 - /* System calls */ 20 - long get_mempolicy(int *policy, const unsigned long *nmask, 21 - unsigned long maxnode, void *addr, int flags) 9 + #include <linux/mempolicy.h> 10 + 11 + #include "kvm_syscalls.h" 12 + 13 + KVM_SYSCALL_DEFINE(get_mempolicy, 5, int *, policy, const unsigned long *, nmask, 14 + unsigned long, maxnode, void *, addr, int, flags); 15 + 16 + KVM_SYSCALL_DEFINE(set_mempolicy, 3, int, mode, const unsigned long *, nmask, 17 + unsigned long, maxnode); 18 + 19 + KVM_SYSCALL_DEFINE(set_mempolicy_home_node, 4, unsigned long, start, 20 + unsigned long, len, unsigned long, home_node, 21 + unsigned long, flags); 22 + 23 + KVM_SYSCALL_DEFINE(migrate_pages, 4, int, pid, unsigned long, maxnode, 24 + const unsigned long *, frommask, const unsigned long *, tomask); 25 + 26 + KVM_SYSCALL_DEFINE(move_pages, 6, int, pid, unsigned long, count, void *, pages, 27 + const int *, nodes, int *, status, int, flags); 28 + 29 + KVM_SYSCALL_DEFINE(mbind, 6, void *, addr, unsigned long, size, int, mode, 30 + const unsigned long *, nodemask, unsigned long, maxnode, 31 + unsigned int, flags); 32 + 33 + static inline int get_max_numa_node(void) 22 34 { 23 - return syscall(__NR_get_mempolicy, policy, nmask, 24 - maxnode, addr, flags); 35 + struct dirent *de; 36 + int max_node = 0; 37 + DIR *d; 38 + 39 + /* 40 + * Assume there's a single node if the kernel doesn't support NUMA, 41 + * or if no nodes are found. 42 + */ 43 + d = opendir("/sys/devices/system/node"); 44 + if (!d) 45 + return 0; 46 + 47 + while ((de = readdir(d)) != NULL) { 48 + int node_id; 49 + char *endptr; 50 + 51 + if (strncmp(de->d_name, "node", 4) != 0) 52 + continue; 53 + 54 + node_id = strtol(de->d_name + 4, &endptr, 10); 55 + if (*endptr != '\0') 56 + continue; 57 + 58 + if (node_id > max_node) 59 + max_node = node_id; 60 + } 61 + closedir(d); 62 + 63 + return max_node; 25 64 } 26 65 27 - long migrate_pages(int pid, unsigned long maxnode, 28 - const unsigned long *frommask, 29 - const unsigned long *tomask) 66 + static bool is_numa_available(void) 30 67 { 31 - return syscall(__NR_migrate_pages, pid, maxnode, frommask, tomask); 68 + /* 69 + * Probe for NUMA by doing a dummy get_mempolicy(). If the syscall 70 + * fails with ENOSYS, then the kernel was built without NUMA support. 71 + * if the syscall fails with EPERM, then the process/user lacks the 72 + * necessary capabilities (CAP_SYS_NICE). 73 + */ 74 + return !get_mempolicy(NULL, NULL, 0, NULL, 0) || 75 + (errno != ENOSYS && errno != EPERM); 32 76 } 33 77 34 - /* Policies */ 35 - #define MPOL_DEFAULT 0 36 - #define MPOL_PREFERRED 1 37 - #define MPOL_BIND 2 38 - #define MPOL_INTERLEAVE 3 39 - 40 - #define MPOL_MAX MPOL_INTERLEAVE 41 - 42 - /* Flags for get_mem_policy */ 43 - #define MPOL_F_NODE (1<<0) /* return next il node or node of address */ 44 - /* Warning: MPOL_F_NODE is unsupported and 45 - * subject to change. Don't use. 46 - */ 47 - #define MPOL_F_ADDR (1<<1) /* look up vma using address */ 48 - #define MPOL_F_MEMS_ALLOWED (1<<2) /* query nodes allowed in cpuset */ 49 - 50 - /* Flags for mbind */ 51 - #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ 52 - #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ 53 - #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ 78 + static inline bool is_multi_numa_node_system(void) 79 + { 80 + return is_numa_available() && get_max_numa_node() >= 1; 81 + } 54 82 55 83 #endif /* SELFTEST_KVM_NUMAIF_H */
+2 -2
tools/testing/selftests/kvm/kvm_binary_stats_test.c
··· 239 239 * single stats file works and doesn't cause explosions. 240 240 */ 241 241 vm_stats_fds = vm_get_stats_fd(vms[i]); 242 - stats_test(dup(vm_stats_fds)); 242 + stats_test(kvm_dup(vm_stats_fds)); 243 243 244 244 /* Verify userspace can instantiate multiple stats files. */ 245 245 stats_test(vm_get_stats_fd(vms[i])); 246 246 247 247 for (j = 0; j < max_vcpu; ++j) { 248 248 vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]); 249 - stats_test(dup(vcpu_stats_fds[j])); 249 + stats_test(kvm_dup(vcpu_stats_fds[j])); 250 250 stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j])); 251 251 } 252 252
+53 -48
tools/testing/selftests/kvm/lib/kvm_util.c
··· 704 704 705 705 static void kvm_stats_release(struct kvm_binary_stats *stats) 706 706 { 707 - int ret; 708 - 709 707 if (stats->fd < 0) 710 708 return; 711 709 ··· 712 714 stats->desc = NULL; 713 715 } 714 716 715 - ret = close(stats->fd); 716 - TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 717 + kvm_close(stats->fd); 717 718 stats->fd = -1; 718 719 } 719 720 ··· 735 738 */ 736 739 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 737 740 { 738 - int ret; 739 - 740 741 if (vcpu->dirty_gfns) { 741 742 kvm_munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 742 743 vcpu->dirty_gfns = NULL; ··· 742 747 743 748 kvm_munmap(vcpu->run, vcpu_mmap_sz()); 744 749 745 - ret = close(vcpu->fd); 746 - TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 747 - 750 + kvm_close(vcpu->fd); 748 751 kvm_stats_release(&vcpu->stats); 749 752 750 753 list_del(&vcpu->list); ··· 754 761 void kvm_vm_release(struct kvm_vm *vmp) 755 762 { 756 763 struct kvm_vcpu *vcpu, *tmp; 757 - int ret; 758 764 759 765 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 760 766 vm_vcpu_rm(vmp, vcpu); 761 767 762 - ret = close(vmp->fd); 763 - TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 764 - 765 - ret = close(vmp->kvm_fd); 766 - TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 768 + kvm_close(vmp->fd); 769 + kvm_close(vmp->kvm_fd); 767 770 768 771 /* Free cached stats metadata and close FD */ 769 772 kvm_stats_release(&vmp->stats); ··· 817 828 int kvm_memfd_alloc(size_t size, bool hugepages) 818 829 { 819 830 int memfd_flags = MFD_CLOEXEC; 820 - int fd, r; 831 + int fd; 821 832 822 833 if (hugepages) 823 834 memfd_flags |= MFD_HUGETLB; ··· 825 836 fd = memfd_create("kvm_selftest", memfd_flags); 826 837 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); 827 838 828 - r = ftruncate(fd, size); 829 - TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r)); 830 - 831 - r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size); 832 - TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); 839 + kvm_ftruncate(fd, size); 840 + kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size); 833 841 834 842 return fd; 835 843 } ··· 943 957 944 958 /* FIXME: This thing needs to be ripped apart and rewritten. */ 945 959 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 946 - uint64_t guest_paddr, uint32_t slot, uint64_t npages, 947 - uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset) 960 + uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, 961 + int guest_memfd, uint64_t guest_memfd_offset) 948 962 { 949 963 int ret; 950 964 struct userspace_mem_region *region; ··· 958 972 "Number of guest pages is not compatible with the host. " 959 973 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 960 974 961 - TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 975 + TEST_ASSERT((gpa % vm->page_size) == 0, "Guest physical " 962 976 "address not on a page boundary.\n" 963 - " guest_paddr: 0x%lx vm->page_size: 0x%x", 964 - guest_paddr, vm->page_size); 965 - TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 977 + " gpa: 0x%lx vm->page_size: 0x%x", 978 + gpa, vm->page_size); 979 + TEST_ASSERT((((gpa >> vm->page_shift) + npages) - 1) 966 980 <= vm->max_gfn, "Physical range beyond maximum " 967 981 "supported physical address,\n" 968 - " guest_paddr: 0x%lx npages: 0x%lx\n" 982 + " gpa: 0x%lx npages: 0x%lx\n" 969 983 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 970 - guest_paddr, npages, vm->max_gfn, vm->page_size); 984 + gpa, npages, vm->max_gfn, vm->page_size); 971 985 972 986 /* 973 987 * Confirm a mem region with an overlapping address doesn't 974 988 * already exist. 975 989 */ 976 990 region = (struct userspace_mem_region *) userspace_mem_region_find( 977 - vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 991 + vm, gpa, (gpa + npages * vm->page_size) - 1); 978 992 if (region != NULL) 979 993 TEST_FAIL("overlapping userspace_mem_region already " 980 994 "exists\n" 981 - " requested guest_paddr: 0x%lx npages: 0x%lx " 982 - "page_size: 0x%x\n" 983 - " existing guest_paddr: 0x%lx size: 0x%lx", 984 - guest_paddr, npages, vm->page_size, 995 + " requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n" 996 + " existing gpa: 0x%lx size: 0x%lx", 997 + gpa, npages, vm->page_size, 985 998 (uint64_t) region->region.guest_phys_addr, 986 999 (uint64_t) region->region.memory_size); 987 1000 ··· 994 1009 "already exists.\n" 995 1010 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 996 1011 " existing slot: %u paddr: 0x%lx size: 0x%lx", 997 - slot, guest_paddr, npages, 998 - region->region.slot, 1012 + slot, gpa, npages, region->region.slot, 999 1013 (uint64_t) region->region.guest_phys_addr, 1000 1014 (uint64_t) region->region.memory_size); 1001 1015 } ··· 1020 1036 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 1021 1037 alignment = max(backing_src_pagesz, alignment); 1022 1038 1023 - TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); 1039 + TEST_ASSERT_EQ(gpa, align_up(gpa, backing_src_pagesz)); 1024 1040 1025 1041 /* Add enough memory to align up if necessary */ 1026 1042 if (alignment > 1) ··· 1068 1084 * needing to track if the fd is owned by the framework 1069 1085 * or by the caller. 1070 1086 */ 1071 - guest_memfd = dup(guest_memfd); 1072 - TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); 1087 + guest_memfd = kvm_dup(guest_memfd); 1073 1088 } 1074 1089 1075 1090 region->region.guest_memfd = guest_memfd; ··· 1080 1097 region->unused_phy_pages = sparsebit_alloc(); 1081 1098 if (vm_arch_has_protected_memory(vm)) 1082 1099 region->protected_phy_pages = sparsebit_alloc(); 1083 - sparsebit_set_num(region->unused_phy_pages, 1084 - guest_paddr >> vm->page_shift, npages); 1100 + sparsebit_set_num(region->unused_phy_pages, gpa >> vm->page_shift, npages); 1085 1101 region->region.slot = slot; 1086 1102 region->region.flags = flags; 1087 - region->region.guest_phys_addr = guest_paddr; 1103 + region->region.guest_phys_addr = gpa; 1088 1104 region->region.memory_size = npages * vm->page_size; 1089 1105 region->region.userspace_addr = (uintptr_t) region->host_mem; 1090 1106 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); 1091 1107 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 1092 1108 " rc: %i errno: %i\n" 1093 1109 " slot: %u flags: 0x%x\n" 1094 - " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d", 1095 - ret, errno, slot, flags, 1096 - guest_paddr, (uint64_t) region->region.memory_size, 1110 + " guest_phys_addr: 0x%lx size: 0x%llx guest_memfd: %d", 1111 + ret, errno, slot, flags, gpa, region->region.memory_size, 1097 1112 region->region.guest_memfd); 1098 1113 1099 1114 /* Add to quick lookup data structures */ ··· 1113 1132 1114 1133 void vm_userspace_mem_region_add(struct kvm_vm *vm, 1115 1134 enum vm_mem_backing_src_type src_type, 1116 - uint64_t guest_paddr, uint32_t slot, 1117 - uint64_t npages, uint32_t flags) 1135 + uint64_t gpa, uint32_t slot, uint64_t npages, 1136 + uint32_t flags) 1118 1137 { 1119 - vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0); 1138 + vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0); 1120 1139 } 1121 1140 1122 1141 /* ··· 2286 2305 { 2287 2306 } 2288 2307 2308 + static void report_unexpected_signal(int signum) 2309 + { 2310 + #define KVM_CASE_SIGNUM(sig) \ 2311 + case sig: TEST_FAIL("Unexpected " #sig " (%d)\n", signum) 2312 + 2313 + switch (signum) { 2314 + KVM_CASE_SIGNUM(SIGBUS); 2315 + KVM_CASE_SIGNUM(SIGSEGV); 2316 + KVM_CASE_SIGNUM(SIGILL); 2317 + KVM_CASE_SIGNUM(SIGFPE); 2318 + default: 2319 + TEST_FAIL("Unexpected signal %d\n", signum); 2320 + } 2321 + } 2322 + 2289 2323 void __attribute((constructor)) kvm_selftest_init(void) 2290 2324 { 2325 + struct sigaction sig_sa = { 2326 + .sa_handler = report_unexpected_signal, 2327 + }; 2328 + 2291 2329 /* Tell stdout not to buffer its content. */ 2292 2330 setbuf(stdout, NULL); 2331 + 2332 + sigaction(SIGBUS, &sig_sa, NULL); 2333 + sigaction(SIGSEGV, &sig_sa, NULL); 2334 + sigaction(SIGILL, &sig_sa, NULL); 2335 + sigaction(SIGFPE, &sig_sa, NULL); 2293 2336 2294 2337 guest_random_seed = last_guest_seed = random(); 2295 2338 pr_info("Random seed: 0x%x\n", guest_random_seed);
+3 -6
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
··· 380 380 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 381 381 pthread_t threads[KVM_MAX_VCPUS]; 382 382 struct kvm_vm *vm; 383 - int memfd, i, r; 383 + int memfd, i; 384 384 385 385 const struct vm_shape shape = { 386 386 .mode = VM_MODE_DEFAULT, ··· 428 428 * should prevent the VM from being fully destroyed until the last 429 429 * reference to the guest_memfd is also put. 430 430 */ 431 - r = fallocate(memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, memfd_size); 432 - TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); 433 - 434 - r = fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size); 435 - TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); 431 + kvm_fallocate(memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, memfd_size); 432 + kvm_fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size); 436 433 437 434 close(memfd); 438 435 }
+2 -3
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
··· 256 256 int nodes = 0; 257 257 time_t start_time, last_update, now; 258 258 time_t interval_secs = 1; 259 - int i, r; 259 + int i; 260 260 int from, to; 261 261 unsigned long bit; 262 262 uint64_t hlt_count; ··· 267 267 delay_usecs); 268 268 269 269 /* Get set of first 64 numa nodes available */ 270 - r = get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8, 270 + kvm_get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8, 271 271 0, MPOL_F_MEMS_ALLOWED); 272 - TEST_ASSERT(r == 0, "get_mempolicy failed errno=%d", errno); 273 272 274 273 fprintf(stderr, "Numa nodes found amongst first %lu possible nodes " 275 274 "(each 1-bit indicates node is present): %#lx\n",
+279 -94
virt/kvm/guest_memfd.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/anon_inodes.h> 2 3 #include <linux/backing-dev.h> 3 4 #include <linux/falloc.h> 5 + #include <linux/fs.h> 4 6 #include <linux/kvm_host.h> 7 + #include <linux/mempolicy.h> 8 + #include <linux/pseudo_fs.h> 5 9 #include <linux/pagemap.h> 6 - #include <linux/anon_inodes.h> 7 10 8 11 #include "kvm_mm.h" 9 12 10 - struct kvm_gmem { 13 + static struct vfsmount *kvm_gmem_mnt; 14 + 15 + /* 16 + * A guest_memfd instance can be associated multiple VMs, each with its own 17 + * "view" of the underlying physical memory. 18 + * 19 + * The gmem's inode is effectively the raw underlying physical storage, and is 20 + * used to track properties of the physical memory, while each gmem file is 21 + * effectively a single VM's view of that storage, and is used to track assets 22 + * specific to its associated VM, e.g. memslots=>gmem bindings. 23 + */ 24 + struct gmem_file { 11 25 struct kvm *kvm; 12 26 struct xarray bindings; 13 27 struct list_head entry; 14 28 }; 29 + 30 + struct gmem_inode { 31 + struct shared_policy policy; 32 + struct inode vfs_inode; 33 + 34 + u64 flags; 35 + }; 36 + 37 + static __always_inline struct gmem_inode *GMEM_I(struct inode *inode) 38 + { 39 + return container_of(inode, struct gmem_inode, vfs_inode); 40 + } 41 + 42 + #define kvm_gmem_for_each_file(f, mapping) \ 43 + list_for_each_entry(f, &(mapping)->i_private_list, entry) 15 44 16 45 /** 17 46 * folio_file_pfn - like folio_file_page, but return a pfn. ··· 52 23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index) 53 24 { 54 25 return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1)); 26 + } 27 + 28 + static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) 29 + { 30 + return gfn - slot->base_gfn + slot->gmem.pgoff; 55 31 } 56 32 57 33 static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, ··· 111 77 * The order will be passed when creating the guest_memfd, and 112 78 * checked when creating memslots. 113 79 */ 114 - WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio))); 115 - index = gfn - slot->base_gfn + slot->gmem.pgoff; 116 - index = ALIGN_DOWN(index, 1 << folio_order(folio)); 80 + WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, folio_nr_pages(folio))); 81 + index = kvm_gmem_get_index(slot, gfn); 82 + index = ALIGN_DOWN(index, folio_nr_pages(folio)); 117 83 r = __kvm_gmem_prepare_folio(kvm, slot, index, folio); 118 84 if (!r) 119 85 kvm_gmem_mark_prepared(folio); ··· 133 99 static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) 134 100 { 135 101 /* TODO: Support huge pages. */ 136 - return filemap_grab_folio(inode->i_mapping, index); 102 + struct mempolicy *policy; 103 + struct folio *folio; 104 + 105 + /* 106 + * Fast-path: See if folio is already present in mapping to avoid 107 + * policy_lookup. 108 + */ 109 + folio = __filemap_get_folio(inode->i_mapping, index, 110 + FGP_LOCK | FGP_ACCESSED, 0); 111 + if (!IS_ERR(folio)) 112 + return folio; 113 + 114 + policy = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, index); 115 + folio = __filemap_get_folio_mpol(inode->i_mapping, index, 116 + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 117 + mapping_gfp_mask(inode->i_mapping), policy); 118 + mpol_cond_put(policy); 119 + 120 + return folio; 137 121 } 138 122 139 123 static enum kvm_gfn_range_filter kvm_gmem_get_invalidate_filter(struct inode *inode) 140 124 { 141 - if ((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED) 125 + if (GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_INIT_SHARED) 142 126 return KVM_FILTER_SHARED; 143 127 144 128 return KVM_FILTER_PRIVATE; 145 129 } 146 130 147 - static void __kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start, 131 + static void __kvm_gmem_invalidate_begin(struct gmem_file *f, pgoff_t start, 148 132 pgoff_t end, 149 133 enum kvm_gfn_range_filter attr_filter) 150 134 { 151 135 bool flush = false, found_memslot = false; 152 136 struct kvm_memory_slot *slot; 153 - struct kvm *kvm = gmem->kvm; 137 + struct kvm *kvm = f->kvm; 154 138 unsigned long index; 155 139 156 - xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) { 140 + xa_for_each_range(&f->bindings, index, slot, start, end - 1) { 157 141 pgoff_t pgoff = slot->gmem.pgoff; 158 142 159 143 struct kvm_gfn_range gfn_range = { ··· 202 150 static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start, 203 151 pgoff_t end) 204 152 { 205 - struct list_head *gmem_list = &inode->i_mapping->i_private_list; 206 153 enum kvm_gfn_range_filter attr_filter; 207 - struct kvm_gmem *gmem; 154 + struct gmem_file *f; 208 155 209 156 attr_filter = kvm_gmem_get_invalidate_filter(inode); 210 157 211 - list_for_each_entry(gmem, gmem_list, entry) 212 - __kvm_gmem_invalidate_begin(gmem, start, end, attr_filter); 158 + kvm_gmem_for_each_file(f, inode->i_mapping) 159 + __kvm_gmem_invalidate_begin(f, start, end, attr_filter); 213 160 } 214 161 215 - static void __kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start, 162 + static void __kvm_gmem_invalidate_end(struct gmem_file *f, pgoff_t start, 216 163 pgoff_t end) 217 164 { 218 - struct kvm *kvm = gmem->kvm; 165 + struct kvm *kvm = f->kvm; 219 166 220 - if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) { 167 + if (xa_find(&f->bindings, &start, end - 1, XA_PRESENT)) { 221 168 KVM_MMU_LOCK(kvm); 222 169 kvm_mmu_invalidate_end(kvm); 223 170 KVM_MMU_UNLOCK(kvm); ··· 226 175 static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start, 227 176 pgoff_t end) 228 177 { 229 - struct list_head *gmem_list = &inode->i_mapping->i_private_list; 230 - struct kvm_gmem *gmem; 178 + struct gmem_file *f; 231 179 232 - list_for_each_entry(gmem, gmem_list, entry) 233 - __kvm_gmem_invalidate_end(gmem, start, end); 180 + kvm_gmem_for_each_file(f, inode->i_mapping) 181 + __kvm_gmem_invalidate_end(f, start, end); 234 182 } 235 183 236 184 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len) ··· 327 277 328 278 static int kvm_gmem_release(struct inode *inode, struct file *file) 329 279 { 330 - struct kvm_gmem *gmem = file->private_data; 280 + struct gmem_file *f = file->private_data; 331 281 struct kvm_memory_slot *slot; 332 - struct kvm *kvm = gmem->kvm; 282 + struct kvm *kvm = f->kvm; 333 283 unsigned long index; 334 284 335 285 /* ··· 349 299 350 300 filemap_invalidate_lock(inode->i_mapping); 351 301 352 - xa_for_each(&gmem->bindings, index, slot) 302 + xa_for_each(&f->bindings, index, slot) 353 303 WRITE_ONCE(slot->gmem.file, NULL); 354 304 355 305 /* ··· 357 307 * Zap all SPTEs pointed at by this file. Do not free the backing 358 308 * memory, as its lifetime is associated with the inode, not the file. 359 309 */ 360 - __kvm_gmem_invalidate_begin(gmem, 0, -1ul, 310 + __kvm_gmem_invalidate_begin(f, 0, -1ul, 361 311 kvm_gmem_get_invalidate_filter(inode)); 362 - __kvm_gmem_invalidate_end(gmem, 0, -1ul); 312 + __kvm_gmem_invalidate_end(f, 0, -1ul); 363 313 364 - list_del(&gmem->entry); 314 + list_del(&f->entry); 365 315 366 316 filemap_invalidate_unlock(inode->i_mapping); 367 317 368 318 mutex_unlock(&kvm->slots_lock); 369 319 370 - xa_destroy(&gmem->bindings); 371 - kfree(gmem); 320 + xa_destroy(&f->bindings); 321 + kfree(f); 372 322 373 323 kvm_put_kvm(kvm); 374 324 ··· 385 335 return get_file_active(&slot->gmem.file); 386 336 } 387 337 388 - static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) 389 - { 390 - return gfn - slot->base_gfn + slot->gmem.pgoff; 391 - } 338 + DEFINE_CLASS(gmem_get_file, struct file *, if (_T) fput(_T), 339 + kvm_gmem_get_file(slot), struct kvm_memory_slot *slot); 392 340 393 341 static bool kvm_gmem_supports_mmap(struct inode *inode) 394 342 { 395 - const u64 flags = (u64)inode->i_private; 396 - 397 - return flags & GUEST_MEMFD_FLAG_MMAP; 343 + return GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_MMAP; 398 344 } 399 345 400 346 static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) ··· 402 356 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) 403 357 return VM_FAULT_SIGBUS; 404 358 405 - if (!((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED)) 359 + if (!(GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_INIT_SHARED)) 406 360 return VM_FAULT_SIGBUS; 407 361 408 362 folio = kvm_gmem_get_folio(inode, vmf->pgoff); 409 363 if (IS_ERR(folio)) { 410 - int err = PTR_ERR(folio); 411 - 412 - if (err == -EAGAIN) 364 + if (PTR_ERR(folio) == -EAGAIN) 413 365 return VM_FAULT_RETRY; 414 366 415 - return vmf_error(err); 367 + return vmf_error(PTR_ERR(folio)); 416 368 } 417 369 418 370 if (WARN_ON_ONCE(folio_test_large(folio))) { ··· 434 390 return ret; 435 391 } 436 392 393 + #ifdef CONFIG_NUMA 394 + static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 395 + { 396 + struct inode *inode = file_inode(vma->vm_file); 397 + 398 + return mpol_set_shared_policy(&GMEM_I(inode)->policy, vma, mpol); 399 + } 400 + 401 + static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma, 402 + unsigned long addr, pgoff_t *pgoff) 403 + { 404 + struct inode *inode = file_inode(vma->vm_file); 405 + 406 + *pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); 407 + 408 + /* 409 + * Return the memory policy for this index, or NULL if none is set. 410 + * 411 + * Returning NULL, e.g. instead of the current task's memory policy, is 412 + * important for the .get_policy kernel ABI: it indicates that no 413 + * explicit policy has been set via mbind() for this memory. The caller 414 + * can then replace NULL with the default memory policy instead of the 415 + * current task's memory policy. 416 + */ 417 + return mpol_shared_policy_lookup(&GMEM_I(inode)->policy, *pgoff); 418 + } 419 + #endif /* CONFIG_NUMA */ 420 + 437 421 static const struct vm_operations_struct kvm_gmem_vm_ops = { 438 - .fault = kvm_gmem_fault_user_mapping, 422 + .fault = kvm_gmem_fault_user_mapping, 423 + #ifdef CONFIG_NUMA 424 + .get_policy = kvm_gmem_get_policy, 425 + .set_policy = kvm_gmem_set_policy, 426 + #endif 439 427 }; 440 428 441 429 static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) ··· 491 415 .release = kvm_gmem_release, 492 416 .fallocate = kvm_gmem_fallocate, 493 417 }; 494 - 495 - void kvm_gmem_init(struct module *module) 496 - { 497 - kvm_gmem_fops.owner = module; 498 - } 499 418 500 419 static int kvm_gmem_migrate_folio(struct address_space *mapping, 501 420 struct folio *dst, struct folio *src, ··· 563 492 564 493 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) 565 494 { 566 - const char *anon_name = "[kvm-gmem]"; 567 - struct kvm_gmem *gmem; 495 + static const char *name = "[kvm-gmem]"; 496 + struct gmem_file *f; 568 497 struct inode *inode; 569 498 struct file *file; 570 499 int fd, err; ··· 573 502 if (fd < 0) 574 503 return fd; 575 504 576 - gmem = kzalloc(sizeof(*gmem), GFP_KERNEL); 577 - if (!gmem) { 505 + f = kzalloc(sizeof(*f), GFP_KERNEL); 506 + if (!f) { 578 507 err = -ENOMEM; 579 508 goto err_fd; 580 509 } 581 510 582 - file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem, 583 - O_RDWR, NULL); 584 - if (IS_ERR(file)) { 585 - err = PTR_ERR(file); 511 + /* __fput() will take care of fops_put(). */ 512 + if (!fops_get(&kvm_gmem_fops)) { 513 + err = -ENOENT; 586 514 goto err_gmem; 587 515 } 588 516 589 - file->f_flags |= O_LARGEFILE; 517 + inode = anon_inode_make_secure_inode(kvm_gmem_mnt->mnt_sb, name, NULL); 518 + if (IS_ERR(inode)) { 519 + err = PTR_ERR(inode); 520 + goto err_fops; 521 + } 590 522 591 - inode = file->f_inode; 592 - WARN_ON(file->f_mapping != inode->i_mapping); 593 - 594 - inode->i_private = (void *)(unsigned long)flags; 595 523 inode->i_op = &kvm_gmem_iops; 596 524 inode->i_mapping->a_ops = &kvm_gmem_aops; 597 525 inode->i_mode |= S_IFREG; ··· 600 530 /* Unmovable mappings are supposed to be marked unevictable as well. */ 601 531 WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping)); 602 532 533 + GMEM_I(inode)->flags = flags; 534 + 535 + file = alloc_file_pseudo(inode, kvm_gmem_mnt, name, O_RDWR, &kvm_gmem_fops); 536 + if (IS_ERR(file)) { 537 + err = PTR_ERR(file); 538 + goto err_inode; 539 + } 540 + 541 + file->f_flags |= O_LARGEFILE; 542 + file->private_data = f; 543 + 603 544 kvm_get_kvm(kvm); 604 - gmem->kvm = kvm; 605 - xa_init(&gmem->bindings); 606 - list_add(&gmem->entry, &inode->i_mapping->i_private_list); 545 + f->kvm = kvm; 546 + xa_init(&f->bindings); 547 + list_add(&f->entry, &inode->i_mapping->i_private_list); 607 548 608 549 fd_install(fd, file); 609 550 return fd; 610 551 552 + err_inode: 553 + iput(inode); 554 + err_fops: 555 + fops_put(&kvm_gmem_fops); 611 556 err_gmem: 612 - kfree(gmem); 557 + kfree(f); 613 558 err_fd: 614 559 put_unused_fd(fd); 615 560 return err; ··· 649 564 { 650 565 loff_t size = slot->npages << PAGE_SHIFT; 651 566 unsigned long start, end; 652 - struct kvm_gmem *gmem; 567 + struct gmem_file *f; 653 568 struct inode *inode; 654 569 struct file *file; 655 570 int r = -EINVAL; ··· 663 578 if (file->f_op != &kvm_gmem_fops) 664 579 goto err; 665 580 666 - gmem = file->private_data; 667 - if (gmem->kvm != kvm) 581 + f = file->private_data; 582 + if (f->kvm != kvm) 668 583 goto err; 669 584 670 585 inode = file_inode(file); ··· 678 593 start = offset >> PAGE_SHIFT; 679 594 end = start + slot->npages; 680 595 681 - if (!xa_empty(&gmem->bindings) && 682 - xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) { 596 + if (!xa_empty(&f->bindings) && 597 + xa_find(&f->bindings, &start, end - 1, XA_PRESENT)) { 683 598 filemap_invalidate_unlock(inode->i_mapping); 684 599 goto err; 685 600 } ··· 694 609 if (kvm_gmem_supports_mmap(inode)) 695 610 slot->flags |= KVM_MEMSLOT_GMEM_ONLY; 696 611 697 - xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL); 612 + xa_store_range(&f->bindings, start, end - 1, slot, GFP_KERNEL); 698 613 filemap_invalidate_unlock(inode->i_mapping); 699 614 700 615 /* ··· 708 623 return r; 709 624 } 710 625 711 - static void __kvm_gmem_unbind(struct kvm_memory_slot *slot, struct kvm_gmem *gmem) 626 + static void __kvm_gmem_unbind(struct kvm_memory_slot *slot, struct gmem_file *f) 712 627 { 713 628 unsigned long start = slot->gmem.pgoff; 714 629 unsigned long end = start + slot->npages; 715 630 716 - xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL); 631 + xa_store_range(&f->bindings, start, end - 1, NULL, GFP_KERNEL); 717 632 718 633 /* 719 634 * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn() ··· 724 639 725 640 void kvm_gmem_unbind(struct kvm_memory_slot *slot) 726 641 { 727 - struct file *file; 728 - 729 642 /* 730 643 * Nothing to do if the underlying file was _already_ closed, as 731 644 * kvm_gmem_release() invalidates and nullifies all bindings. ··· 731 648 if (!slot->gmem.file) 732 649 return; 733 650 734 - file = kvm_gmem_get_file(slot); 651 + CLASS(gmem_get_file, file)(slot); 735 652 736 653 /* 737 654 * However, if the file is _being_ closed, then the bindings need to be ··· 751 668 filemap_invalidate_lock(file->f_mapping); 752 669 __kvm_gmem_unbind(slot, file->private_data); 753 670 filemap_invalidate_unlock(file->f_mapping); 754 - 755 - fput(file); 756 671 } 757 672 758 673 /* Returns a locked folio on success. */ ··· 759 678 pgoff_t index, kvm_pfn_t *pfn, 760 679 bool *is_prepared, int *max_order) 761 680 { 762 - struct file *gmem_file = READ_ONCE(slot->gmem.file); 763 - struct kvm_gmem *gmem = file->private_data; 681 + struct file *slot_file = READ_ONCE(slot->gmem.file); 682 + struct gmem_file *f = file->private_data; 764 683 struct folio *folio; 765 684 766 - if (file != gmem_file) { 767 - WARN_ON_ONCE(gmem_file); 685 + if (file != slot_file) { 686 + WARN_ON_ONCE(slot_file); 768 687 return ERR_PTR(-EFAULT); 769 688 } 770 689 771 - gmem = file->private_data; 772 - if (xa_load(&gmem->bindings, index) != slot) { 773 - WARN_ON_ONCE(xa_load(&gmem->bindings, index)); 690 + if (xa_load(&f->bindings, index) != slot) { 691 + WARN_ON_ONCE(xa_load(&f->bindings, index)); 774 692 return ERR_PTR(-EIO); 775 693 } 776 694 ··· 796 716 int *max_order) 797 717 { 798 718 pgoff_t index = kvm_gmem_get_index(slot, gfn); 799 - struct file *file = kvm_gmem_get_file(slot); 800 719 struct folio *folio; 801 720 bool is_prepared = false; 802 721 int r = 0; 803 722 723 + CLASS(gmem_get_file, file)(slot); 804 724 if (!file) 805 725 return -EFAULT; 806 726 807 727 folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order); 808 - if (IS_ERR(folio)) { 809 - r = PTR_ERR(folio); 810 - goto out; 811 - } 728 + if (IS_ERR(folio)) 729 + return PTR_ERR(folio); 812 730 813 731 if (!is_prepared) 814 732 r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); ··· 818 740 else 819 741 folio_put(folio); 820 742 821 - out: 822 - fput(file); 823 743 return r; 824 744 } 825 745 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn); ··· 826 750 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, 827 751 kvm_gmem_populate_cb post_populate, void *opaque) 828 752 { 829 - struct file *file; 830 753 struct kvm_memory_slot *slot; 831 754 void __user *p; 832 755 ··· 841 766 if (!kvm_slot_has_gmem(slot)) 842 767 return -EINVAL; 843 768 844 - file = kvm_gmem_get_file(slot); 769 + CLASS(gmem_get_file, file)(slot); 845 770 if (!file) 846 771 return -EFAULT; 847 772 ··· 899 824 900 825 filemap_invalidate_unlock(file->f_mapping); 901 826 902 - fput(file); 903 827 return ret && !i ? ret : i; 904 828 } 905 829 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate); 906 830 #endif 831 + 832 + static struct kmem_cache *kvm_gmem_inode_cachep; 833 + 834 + static void kvm_gmem_init_inode_once(void *__gi) 835 + { 836 + struct gmem_inode *gi = __gi; 837 + 838 + /* 839 + * Note! Don't initialize the inode with anything specific to the 840 + * guest_memfd instance, or that might be specific to how the inode is 841 + * used (from the VFS-layer's perspective). This hook is called only 842 + * during the initial slab allocation, i.e. only fields/state that are 843 + * idempotent across _all_ use of the inode _object_ can be initialized 844 + * at this time! 845 + */ 846 + inode_init_once(&gi->vfs_inode); 847 + } 848 + 849 + static struct inode *kvm_gmem_alloc_inode(struct super_block *sb) 850 + { 851 + struct gmem_inode *gi; 852 + 853 + gi = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL); 854 + if (!gi) 855 + return NULL; 856 + 857 + mpol_shared_policy_init(&gi->policy, NULL); 858 + 859 + gi->flags = 0; 860 + return &gi->vfs_inode; 861 + } 862 + 863 + static void kvm_gmem_destroy_inode(struct inode *inode) 864 + { 865 + mpol_free_shared_policy(&GMEM_I(inode)->policy); 866 + } 867 + 868 + static void kvm_gmem_free_inode(struct inode *inode) 869 + { 870 + kmem_cache_free(kvm_gmem_inode_cachep, GMEM_I(inode)); 871 + } 872 + 873 + static const struct super_operations kvm_gmem_super_operations = { 874 + .statfs = simple_statfs, 875 + .alloc_inode = kvm_gmem_alloc_inode, 876 + .destroy_inode = kvm_gmem_destroy_inode, 877 + .free_inode = kvm_gmem_free_inode, 878 + }; 879 + 880 + static int kvm_gmem_init_fs_context(struct fs_context *fc) 881 + { 882 + struct pseudo_fs_context *ctx; 883 + 884 + if (!init_pseudo(fc, GUEST_MEMFD_MAGIC)) 885 + return -ENOMEM; 886 + 887 + fc->s_iflags |= SB_I_NOEXEC; 888 + fc->s_iflags |= SB_I_NODEV; 889 + ctx = fc->fs_private; 890 + ctx->ops = &kvm_gmem_super_operations; 891 + 892 + return 0; 893 + } 894 + 895 + static struct file_system_type kvm_gmem_fs = { 896 + .name = "guest_memfd", 897 + .init_fs_context = kvm_gmem_init_fs_context, 898 + .kill_sb = kill_anon_super, 899 + }; 900 + 901 + static int kvm_gmem_init_mount(void) 902 + { 903 + kvm_gmem_mnt = kern_mount(&kvm_gmem_fs); 904 + 905 + if (IS_ERR(kvm_gmem_mnt)) 906 + return PTR_ERR(kvm_gmem_mnt); 907 + 908 + kvm_gmem_mnt->mnt_flags |= MNT_NOEXEC; 909 + return 0; 910 + } 911 + 912 + int kvm_gmem_init(struct module *module) 913 + { 914 + struct kmem_cache_args args = { 915 + .align = 0, 916 + .ctor = kvm_gmem_init_inode_once, 917 + }; 918 + int ret; 919 + 920 + kvm_gmem_fops.owner = module; 921 + kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache", 922 + sizeof(struct gmem_inode), 923 + &args, SLAB_ACCOUNT); 924 + if (!kvm_gmem_inode_cachep) 925 + return -ENOMEM; 926 + 927 + ret = kvm_gmem_init_mount(); 928 + if (ret) { 929 + kmem_cache_destroy(kvm_gmem_inode_cachep); 930 + return ret; 931 + } 932 + return 0; 933 + } 934 + 935 + void kvm_gmem_exit(void) 936 + { 937 + kern_unmount(kvm_gmem_mnt); 938 + kvm_gmem_mnt = NULL; 939 + rcu_barrier(); 940 + kmem_cache_destroy(kvm_gmem_inode_cachep); 941 + }
+6 -1
virt/kvm/kvm_main.c
··· 6517 6517 if (WARN_ON_ONCE(r)) 6518 6518 goto err_vfio; 6519 6519 6520 - kvm_gmem_init(module); 6520 + r = kvm_gmem_init(module); 6521 + if (r) 6522 + goto err_gmem; 6521 6523 6522 6524 r = kvm_init_virtualization(); 6523 6525 if (r) ··· 6540 6538 err_register: 6541 6539 kvm_uninit_virtualization(); 6542 6540 err_virt: 6541 + kvm_gmem_exit(); 6542 + err_gmem: 6543 6543 kvm_vfio_ops_exit(); 6544 6544 err_vfio: 6545 6545 kvm_async_pf_deinit(); ··· 6573 6569 for_each_possible_cpu(cpu) 6574 6570 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6575 6571 kmem_cache_destroy(kvm_vcpu_cache); 6572 + kvm_gmem_exit(); 6576 6573 kvm_vfio_ops_exit(); 6577 6574 kvm_async_pf_deinit(); 6578 6575 kvm_irqfd_exit();
+5 -4
virt/kvm/kvm_mm.h
··· 68 68 #endif /* HAVE_KVM_PFNCACHE */ 69 69 70 70 #ifdef CONFIG_KVM_GUEST_MEMFD 71 - void kvm_gmem_init(struct module *module); 71 + int kvm_gmem_init(struct module *module); 72 + void kvm_gmem_exit(void); 72 73 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args); 73 74 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, 74 75 unsigned int fd, loff_t offset); 75 76 void kvm_gmem_unbind(struct kvm_memory_slot *slot); 76 77 #else 77 - static inline void kvm_gmem_init(struct module *module) 78 + static inline int kvm_gmem_init(struct module *module) 78 79 { 79 - 80 + return 0; 80 81 } 81 - 82 + static inline void kvm_gmem_exit(void) {}; 82 83 static inline int kvm_gmem_bind(struct kvm *kvm, 83 84 struct kvm_memory_slot *slot, 84 85 unsigned int fd, loff_t offset)