Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio_pci: support VIRTIO_F_RING_RESET

This patch implements virtio pci support for QUEUE RESET.

Performing reset on a queue is divided into these steps:

1. notify the device to reset the queue
2. recycle the buffer submitted
3. reset the vring (may re-alloc)
4. mmap vring to device, and enable the queue

This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
pci scenario.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20220801063902.129329-33-xuanzhuo@linux.alibaba.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

authored by

Xuan Zhuo and committed by
Michael S. Tsirkin
04ca0b0b 56bdc061

+97 -3
+9 -3
drivers/virtio/virtio_pci_common.c
··· 214 214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; 215 215 unsigned long flags; 216 216 217 - spin_lock_irqsave(&vp_dev->lock, flags); 218 - list_del(&info->node); 219 - spin_unlock_irqrestore(&vp_dev->lock, flags); 217 + /* 218 + * If it fails during re-enable reset vq. This way we won't rejoin 219 + * info->node to the queue. Prevent unexpected irqs. 220 + */ 221 + if (!vq->reset) { 222 + spin_lock_irqsave(&vp_dev->lock, flags); 223 + list_del(&info->node); 224 + spin_unlock_irqrestore(&vp_dev->lock, flags); 225 + } 220 226 221 227 vp_dev->del_vq(info); 222 228 kfree(info);
+88
drivers/virtio/virtio_pci_modern.c
··· 34 34 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) && 35 35 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV)) 36 36 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV); 37 + 38 + if (features & BIT_ULL(VIRTIO_F_RING_RESET)) 39 + __virtio_set_bit(vdev, VIRTIO_F_RING_RESET); 37 40 } 38 41 39 42 /* virtio config->finalize_features() implementation */ ··· 198 195 if (msix_vec == VIRTIO_MSI_NO_VECTOR) 199 196 return -EBUSY; 200 197 } 198 + 199 + return 0; 200 + } 201 + 202 + static int vp_modern_disable_vq_and_reset(struct virtqueue *vq) 203 + { 204 + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 205 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 206 + struct virtio_pci_vq_info *info; 207 + unsigned long flags; 208 + 209 + if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET)) 210 + return -ENOENT; 211 + 212 + vp_modern_set_queue_reset(mdev, vq->index); 213 + 214 + info = vp_dev->vqs[vq->index]; 215 + 216 + /* delete vq from irq handler */ 217 + spin_lock_irqsave(&vp_dev->lock, flags); 218 + list_del(&info->node); 219 + spin_unlock_irqrestore(&vp_dev->lock, flags); 220 + 221 + INIT_LIST_HEAD(&info->node); 222 + 223 + #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 224 + __virtqueue_break(vq); 225 + #endif 226 + 227 + /* For the case where vq has an exclusive irq, call synchronize_irq() to 228 + * wait for completion. 229 + * 230 + * note: We can't use disable_irq() since it conflicts with the affinity 231 + * managed IRQ that is used by some drivers. 232 + */ 233 + if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) 234 + synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector)); 235 + 236 + vq->reset = true; 237 + 238 + return 0; 239 + } 240 + 241 + static int vp_modern_enable_vq_after_reset(struct virtqueue *vq) 242 + { 243 + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 244 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 245 + struct virtio_pci_vq_info *info; 246 + unsigned long flags, index; 247 + int err; 248 + 249 + if (!vq->reset) 250 + return -EBUSY; 251 + 252 + index = vq->index; 253 + info = vp_dev->vqs[index]; 254 + 255 + if (vp_modern_get_queue_reset(mdev, index)) 256 + return -EBUSY; 257 + 258 + if (vp_modern_get_queue_enable(mdev, index)) 259 + return -EBUSY; 260 + 261 + err = vp_active_vq(vq, info->msix_vector); 262 + if (err) 263 + return err; 264 + 265 + if (vq->callback) { 266 + spin_lock_irqsave(&vp_dev->lock, flags); 267 + list_add(&info->node, &vp_dev->virtqueues); 268 + spin_unlock_irqrestore(&vp_dev->lock, flags); 269 + } else { 270 + INIT_LIST_HEAD(&info->node); 271 + } 272 + 273 + #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 274 + __virtqueue_unbreak(vq); 275 + #endif 276 + 277 + vp_modern_set_queue_enable(&vp_dev->mdev, index, true); 278 + vq->reset = false; 201 279 202 280 return 0; 203 281 } ··· 497 413 .set_vq_affinity = vp_set_vq_affinity, 498 414 .get_vq_affinity = vp_get_vq_affinity, 499 415 .get_shm_region = vp_get_shm_region, 416 + .disable_vq_and_reset = vp_modern_disable_vq_and_reset, 417 + .enable_vq_after_reset = vp_modern_enable_vq_after_reset, 500 418 }; 501 419 502 420 static const struct virtio_config_ops virtio_pci_config_ops = { ··· 517 431 .set_vq_affinity = vp_set_vq_affinity, 518 432 .get_vq_affinity = vp_get_vq_affinity, 519 433 .get_shm_region = vp_get_shm_region, 434 + .disable_vq_and_reset = vp_modern_disable_vq_and_reset, 435 + .enable_vq_after_reset = vp_modern_enable_vq_after_reset, 520 436 }; 521 437 522 438 /* the PCI probing function */