Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
"Some fixes and cleanups all over the place"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
vhost-vdpa: set v->config_ctx to NULL if eventfd_ctx_fdget() fails
vhost-vdpa: fix use-after-free of v->config_ctx
vhost: Fix vhost_vq_reset()
vhost_vdpa: fix the missing irq_bypass_unregister_producer() invocation
vdpa_sim: Skip typecasting from void*
virtio: remove export for virtio_config_{enable, disable}
virtio-mmio: Use to_virtio_mmio_device() to simply code
vdpa: set the virtqueue num during register

+37 -40
+2 -3
drivers/vdpa/ifcvf/ifcvf_main.c
··· 431 431 } 432 432 433 433 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, 434 - dev, &ifc_vdpa_ops, 435 - IFCVF_MAX_QUEUE_PAIRS * 2, NULL); 434 + dev, &ifc_vdpa_ops, NULL); 436 435 if (adapter == NULL) { 437 436 IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); 438 437 return -ENOMEM; ··· 455 456 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) 456 457 vf->vring[i].irq = -EINVAL; 457 458 458 - ret = vdpa_register_device(&adapter->vdpa); 459 + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2); 459 460 if (ret) { 460 461 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); 461 462 goto err;
+2 -2
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 1982 1982 max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); 1983 1983 1984 1984 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, 1985 - 2 * mlx5_vdpa_max_qps(max_vqs), NULL); 1985 + NULL); 1986 1986 if (IS_ERR(ndev)) 1987 1987 return PTR_ERR(ndev); 1988 1988 ··· 2009 2009 if (err) 2010 2010 goto err_res; 2011 2011 2012 - err = vdpa_register_device(&mvdev->vdev); 2012 + err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); 2013 2013 if (err) 2014 2014 goto err_reg; 2015 2015
+10 -8
drivers/vdpa/vdpa.c
··· 69 69 * initialized but before registered. 70 70 * @parent: the parent device 71 71 * @config: the bus operations that is supported by this device 72 - * @nvqs: number of virtqueues supported by this device 73 72 * @size: size of the parent structure that contains private data 74 73 * @name: name of the vdpa device; optional. 75 74 * ··· 80 81 */ 81 82 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 82 83 const struct vdpa_config_ops *config, 83 - int nvqs, size_t size, const char *name) 84 + size_t size, const char *name) 84 85 { 85 86 struct vdpa_device *vdev; 86 87 int err = -EINVAL; ··· 106 107 vdev->index = err; 107 108 vdev->config = config; 108 109 vdev->features_valid = false; 109 - vdev->nvqs = nvqs; 110 110 111 111 if (name) 112 112 err = dev_set_name(&vdev->dev, "%s", name); ··· 134 136 return (strcmp(dev_name(&vdev->dev), data) == 0); 135 137 } 136 138 137 - static int __vdpa_register_device(struct vdpa_device *vdev) 139 + static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) 138 140 { 139 141 struct device *dev; 142 + 143 + vdev->nvqs = nvqs; 140 144 141 145 lockdep_assert_held(&vdpa_dev_mutex); 142 146 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); ··· 155 155 * Caller must invoke this routine in the management device dev_add() 156 156 * callback after setting up valid mgmtdev for this vdpa device. 157 157 * @vdev: the vdpa device to be registered to vDPA bus 158 + * @nvqs: number of virtqueues supported by this device 158 159 * 159 160 * Returns an error when fail to add device to vDPA bus 160 161 */ 161 - int _vdpa_register_device(struct vdpa_device *vdev) 162 + int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) 162 163 { 163 164 if (!vdev->mdev) 164 165 return -EINVAL; 165 166 166 - return __vdpa_register_device(vdev); 167 + return __vdpa_register_device(vdev, nvqs); 167 168 } 168 169 EXPORT_SYMBOL_GPL(_vdpa_register_device); 169 170 ··· 172 171 * vdpa_register_device - register a vDPA device 173 172 * Callers must have a succeed call of vdpa_alloc_device() before. 174 173 * @vdev: the vdpa device to be registered to vDPA bus 174 + * @nvqs: number of virtqueues supported by this device 175 175 * 176 176 * Returns an error when fail to add to vDPA bus 177 177 */ 178 - int vdpa_register_device(struct vdpa_device *vdev) 178 + int vdpa_register_device(struct vdpa_device *vdev, int nvqs) 179 179 { 180 180 int err; 181 181 182 182 mutex_lock(&vdpa_dev_mutex); 183 - err = __vdpa_register_device(vdev); 183 + err = __vdpa_register_device(vdev, nvqs); 184 184 mutex_unlock(&vdpa_dev_mutex); 185 185 return err; 186 186 }
+1 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 235 235 ops = &vdpasim_config_ops; 236 236 237 237 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 238 - dev_attr->nvqs, dev_attr->name); 238 + dev_attr->name); 239 239 if (!vdpasim) 240 240 goto err_alloc; 241 241
+2 -3
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
··· 110 110 111 111 static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) 112 112 { 113 - struct virtio_net_config *net_config = 114 - (struct virtio_net_config *)config; 113 + struct virtio_net_config *net_config = config; 115 114 116 115 net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500); 117 116 net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); ··· 146 147 if (IS_ERR(simdev)) 147 148 return PTR_ERR(simdev); 148 149 149 - ret = _vdpa_register_device(&simdev->vdpa); 150 + ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM); 150 151 if (ret) 151 152 goto reg_err; 152 153
+11 -9
drivers/vhost/vdpa.c
··· 308 308 309 309 static void vhost_vdpa_config_put(struct vhost_vdpa *v) 310 310 { 311 - if (v->config_ctx) 311 + if (v->config_ctx) { 312 312 eventfd_ctx_put(v->config_ctx); 313 + v->config_ctx = NULL; 314 + } 313 315 } 314 316 315 317 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) ··· 331 329 if (!IS_ERR_OR_NULL(ctx)) 332 330 eventfd_ctx_put(ctx); 333 331 334 - if (IS_ERR(v->config_ctx)) 335 - return PTR_ERR(v->config_ctx); 332 + if (IS_ERR(v->config_ctx)) { 333 + long ret = PTR_ERR(v->config_ctx); 334 + 335 + v->config_ctx = NULL; 336 + return ret; 337 + } 336 338 337 339 v->vdpa->config->set_config_cb(v->vdpa, &cb); 338 340 ··· 906 900 907 901 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) 908 902 { 909 - struct vhost_virtqueue *vq; 910 903 int i; 911 904 912 - for (i = 0; i < v->nvqs; i++) { 913 - vq = &v->vqs[i]; 914 - if (vq->call_ctx.producer.irq) 915 - irq_bypass_unregister_producer(&vq->call_ctx.producer); 916 - } 905 + for (i = 0; i < v->nvqs; i++) 906 + vhost_vdpa_unsetup_vq_irq(v, i); 917 907 } 918 908 919 909 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+1 -1
drivers/vhost/vhost.c
··· 332 332 vq->error_ctx = NULL; 333 333 vq->kick = NULL; 334 334 vq->log_ctx = NULL; 335 - vhost_reset_is_le(vq); 336 335 vhost_disable_cross_endian(vq); 336 + vhost_reset_is_le(vq); 337 337 vq->busyloop_timeout = 0; 338 338 vq->umem = NULL; 339 339 vq->iotlb = NULL;
+2 -4
drivers/virtio/virtio.c
··· 141 141 } 142 142 EXPORT_SYMBOL_GPL(virtio_config_changed); 143 143 144 - void virtio_config_disable(struct virtio_device *dev) 144 + static void virtio_config_disable(struct virtio_device *dev) 145 145 { 146 146 spin_lock_irq(&dev->config_lock); 147 147 dev->config_enabled = false; 148 148 spin_unlock_irq(&dev->config_lock); 149 149 } 150 - EXPORT_SYMBOL_GPL(virtio_config_disable); 151 150 152 - void virtio_config_enable(struct virtio_device *dev) 151 + static void virtio_config_enable(struct virtio_device *dev) 153 152 { 154 153 spin_lock_irq(&dev->config_lock); 155 154 dev->config_enabled = true; ··· 157 158 dev->config_change_pending = false; 158 159 spin_unlock_irq(&dev->config_lock); 159 160 } 160 - EXPORT_SYMBOL_GPL(virtio_config_enable); 161 161 162 162 void virtio_add_status(struct virtio_device *dev, unsigned int status) 163 163 {
+1 -2
drivers/virtio/virtio_mmio.c
··· 548 548 { 549 549 struct virtio_device *vdev = 550 550 container_of(_d, struct virtio_device, dev); 551 - struct virtio_mmio_device *vm_dev = 552 - container_of(vdev, struct virtio_mmio_device, vdev); 551 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 553 552 struct platform_device *pdev = vm_dev->pdev; 554 553 555 554 devm_kfree(&pdev->dev, vm_dev);
+5 -5
include/linux/vdpa.h
··· 250 250 251 251 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 252 252 const struct vdpa_config_ops *config, 253 - int nvqs, size_t size, const char *name); 253 + size_t size, const char *name); 254 254 255 - #define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \ 255 + #define vdpa_alloc_device(dev_struct, member, parent, config, name) \ 256 256 container_of(__vdpa_alloc_device( \ 257 - parent, config, nvqs, \ 257 + parent, config, \ 258 258 sizeof(dev_struct) + \ 259 259 BUILD_BUG_ON_ZERO(offsetof( \ 260 260 dev_struct, member)), name), \ 261 261 dev_struct, member) 262 262 263 - int vdpa_register_device(struct vdpa_device *vdev); 263 + int vdpa_register_device(struct vdpa_device *vdev, int nvqs); 264 264 void vdpa_unregister_device(struct vdpa_device *vdev); 265 265 266 - int _vdpa_register_device(struct vdpa_device *vdev); 266 + int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); 267 267 void _vdpa_unregister_device(struct vdpa_device *vdev); 268 268 269 269 /**
-2
include/linux/virtio.h
··· 132 132 void virtio_break_device(struct virtio_device *dev); 133 133 134 134 void virtio_config_changed(struct virtio_device *dev); 135 - void virtio_config_disable(struct virtio_device *dev); 136 - void virtio_config_enable(struct virtio_device *dev); 137 135 int virtio_finalize_features(struct virtio_device *dev); 138 136 #ifdef CONFIG_PM_SLEEP 139 137 int virtio_device_freeze(struct virtio_device *dev);