Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kvm/vfio: Prepare for accepting vfio device fd

This renames kvm_vfio_group related helpers to prepare for accepting
vfio device fd. No functional change is intended.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Terrence Xu <terrence.xu@intel.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com>
Tested-by: Yanting Jiang <yanting.jiang@intel.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Tested-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Link: https://lore.kernel.org/r/20230718135551.6592-5-yi.l.liu@intel.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>

authored by

Yi Liu and committed by
Alex Williamson
2f99073a 34aeeecd

+58 -57
+58 -57
virt/kvm/vfio.c
··· 21 21 #include <asm/kvm_ppc.h> 22 22 #endif 23 23 24 - struct kvm_vfio_group { 24 + struct kvm_vfio_file { 25 25 struct list_head node; 26 26 struct file *file; 27 27 #ifdef CONFIG_SPAPR_TCE_IOMMU ··· 30 30 }; 31 31 32 32 struct kvm_vfio { 33 - struct list_head group_list; 33 + struct list_head file_list; 34 34 struct mutex lock; 35 35 bool noncoherent; 36 36 }; ··· 98 98 } 99 99 100 100 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, 101 - struct kvm_vfio_group *kvg) 101 + struct kvm_vfio_file *kvf) 102 102 { 103 - if (WARN_ON_ONCE(!kvg->iommu_group)) 103 + if (WARN_ON_ONCE(!kvf->iommu_group)) 104 104 return; 105 105 106 - kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group); 107 - iommu_group_put(kvg->iommu_group); 108 - kvg->iommu_group = NULL; 106 + kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group); 107 + iommu_group_put(kvf->iommu_group); 108 + kvf->iommu_group = NULL; 109 109 } 110 110 #endif 111 111 112 112 /* 113 - * Groups can use the same or different IOMMU domains. If the same then 114 - * adding a new group may change the coherency of groups we've previously 115 - * been told about. We don't want to care about any of that so we retest 116 - * each group and bail as soon as we find one that's noncoherent. This 117 - * means we only ever [un]register_noncoherent_dma once for the whole device. 113 + * Groups/devices can use the same or different IOMMU domains. If the same 114 + * then adding a new group/device may change the coherency of groups/devices 115 + * we've previously been told about. We don't want to care about any of 116 + * that so we retest each group/device and bail as soon as we find one that's 117 + * noncoherent. This means we only ever [un]register_noncoherent_dma once 118 + * for the whole device. 118 119 */ 119 120 static void kvm_vfio_update_coherency(struct kvm_device *dev) 120 121 { 121 122 struct kvm_vfio *kv = dev->private; 122 123 bool noncoherent = false; 123 - struct kvm_vfio_group *kvg; 124 + struct kvm_vfio_file *kvf; 124 125 125 126 mutex_lock(&kv->lock); 126 127 127 - list_for_each_entry(kvg, &kv->group_list, node) { 128 - if (!kvm_vfio_file_enforced_coherent(kvg->file)) { 128 + list_for_each_entry(kvf, &kv->file_list, node) { 129 + if (!kvm_vfio_file_enforced_coherent(kvf->file)) { 129 130 noncoherent = true; 130 131 break; 131 132 } ··· 144 143 mutex_unlock(&kv->lock); 145 144 } 146 145 147 - static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) 146 + static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd) 148 147 { 149 148 struct kvm_vfio *kv = dev->private; 150 - struct kvm_vfio_group *kvg; 149 + struct kvm_vfio_file *kvf; 151 150 struct file *filp; 152 151 int ret; 153 152 ··· 163 162 164 163 mutex_lock(&kv->lock); 165 164 166 - list_for_each_entry(kvg, &kv->group_list, node) { 167 - if (kvg->file == filp) { 165 + list_for_each_entry(kvf, &kv->file_list, node) { 166 + if (kvf->file == filp) { 168 167 ret = -EEXIST; 169 168 goto err_unlock; 170 169 } 171 170 } 172 171 173 - kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT); 174 - if (!kvg) { 172 + kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT); 173 + if (!kvf) { 175 174 ret = -ENOMEM; 176 175 goto err_unlock; 177 176 } 178 177 179 - kvg->file = filp; 180 - list_add_tail(&kvg->node, &kv->group_list); 178 + kvf->file = filp; 179 + list_add_tail(&kvf->node, &kv->file_list); 181 180 182 181 kvm_arch_start_assignment(dev->kvm); 183 182 184 183 mutex_unlock(&kv->lock); 185 184 186 - kvm_vfio_file_set_kvm(kvg->file, dev->kvm); 185 + kvm_vfio_file_set_kvm(kvf->file, dev->kvm); 187 186 kvm_vfio_update_coherency(dev); 188 187 189 188 return 0; ··· 194 193 return ret; 195 194 } 196 195 197 - static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd) 196 + static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd) 198 197 { 199 198 struct kvm_vfio *kv = dev->private; 200 - struct kvm_vfio_group *kvg; 199 + struct kvm_vfio_file *kvf; 201 200 struct fd f; 202 201 int ret; 203 202 ··· 209 208 210 209 mutex_lock(&kv->lock); 211 210 212 - list_for_each_entry(kvg, &kv->group_list, node) { 213 - if (kvg->file != f.file) 211 + list_for_each_entry(kvf, &kv->file_list, node) { 212 + if (kvf->file != f.file) 214 213 continue; 215 214 216 - list_del(&kvg->node); 215 + list_del(&kvf->node); 217 216 kvm_arch_end_assignment(dev->kvm); 218 217 #ifdef CONFIG_SPAPR_TCE_IOMMU 219 - kvm_spapr_tce_release_vfio_group(dev->kvm, kvg); 218 + kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); 220 219 #endif 221 - kvm_vfio_file_set_kvm(kvg->file, NULL); 222 - fput(kvg->file); 223 - kfree(kvg); 220 + kvm_vfio_file_set_kvm(kvf->file, NULL); 221 + fput(kvf->file); 222 + kfree(kvf); 224 223 ret = 0; 225 224 break; 226 225 } ··· 235 234 } 236 235 237 236 #ifdef CONFIG_SPAPR_TCE_IOMMU 238 - static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, 239 - void __user *arg) 237 + static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev, 238 + void __user *arg) 240 239 { 241 240 struct kvm_vfio_spapr_tce param; 242 241 struct kvm_vfio *kv = dev->private; 243 - struct kvm_vfio_group *kvg; 242 + struct kvm_vfio_file *kvf; 244 243 struct fd f; 245 244 int ret; 246 245 ··· 255 254 256 255 mutex_lock(&kv->lock); 257 256 258 - list_for_each_entry(kvg, &kv->group_list, node) { 259 - if (kvg->file != f.file) 257 + list_for_each_entry(kvf, &kv->file_list, node) { 258 + if (kvf->file != f.file) 260 259 continue; 261 260 262 - if (!kvg->iommu_group) { 263 - kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file); 264 - if (WARN_ON_ONCE(!kvg->iommu_group)) { 261 + if (!kvf->iommu_group) { 262 + kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file); 263 + if (WARN_ON_ONCE(!kvf->iommu_group)) { 265 264 ret = -EIO; 266 265 goto err_fdput; 267 266 } 268 267 } 269 268 270 269 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd, 271 - kvg->iommu_group); 270 + kvf->iommu_group); 272 271 break; 273 272 } 274 273 ··· 279 278 } 280 279 #endif 281 280 282 - static int kvm_vfio_set_group(struct kvm_device *dev, long attr, 283 - void __user *arg) 281 + static int kvm_vfio_set_file(struct kvm_device *dev, long attr, 282 + void __user *arg) 284 283 { 285 284 int32_t __user *argp = arg; 286 285 int32_t fd; ··· 289 288 case KVM_DEV_VFIO_GROUP_ADD: 290 289 if (get_user(fd, argp)) 291 290 return -EFAULT; 292 - return kvm_vfio_group_add(dev, fd); 291 + return kvm_vfio_file_add(dev, fd); 293 292 294 293 case KVM_DEV_VFIO_GROUP_DEL: 295 294 if (get_user(fd, argp)) 296 295 return -EFAULT; 297 - return kvm_vfio_group_del(dev, fd); 296 + return kvm_vfio_file_del(dev, fd); 298 297 299 298 #ifdef CONFIG_SPAPR_TCE_IOMMU 300 299 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: 301 - return kvm_vfio_group_set_spapr_tce(dev, arg); 300 + return kvm_vfio_file_set_spapr_tce(dev, arg); 302 301 #endif 303 302 } 304 303 ··· 310 309 { 311 310 switch (attr->group) { 312 311 case KVM_DEV_VFIO_GROUP: 313 - return kvm_vfio_set_group(dev, attr->attr, 314 - u64_to_user_ptr(attr->addr)); 312 + return kvm_vfio_set_file(dev, attr->attr, 313 + u64_to_user_ptr(attr->addr)); 315 314 } 316 315 317 316 return -ENXIO; ··· 340 339 static void kvm_vfio_release(struct kvm_device *dev) 341 340 { 342 341 struct kvm_vfio *kv = dev->private; 343 - struct kvm_vfio_group *kvg, *tmp; 342 + struct kvm_vfio_file *kvf, *tmp; 344 343 345 - list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { 344 + list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) { 346 345 #ifdef CONFIG_SPAPR_TCE_IOMMU 347 - kvm_spapr_tce_release_vfio_group(dev->kvm, kvg); 346 + kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); 348 347 #endif 349 - kvm_vfio_file_set_kvm(kvg->file, NULL); 350 - fput(kvg->file); 351 - list_del(&kvg->node); 352 - kfree(kvg); 348 + kvm_vfio_file_set_kvm(kvf->file, NULL); 349 + fput(kvf->file); 350 + list_del(&kvf->node); 351 + kfree(kvf); 353 352 kvm_arch_end_assignment(dev->kvm); 354 353 } 355 354 ··· 383 382 if (!kv) 384 383 return -ENOMEM; 385 384 386 - INIT_LIST_HEAD(&kv->group_list); 385 + INIT_LIST_HEAD(&kv->file_list); 387 386 mutex_init(&kv->lock); 388 387 389 388 dev->private = kv;