Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result

Currently, the code waits in a busy loop on every admin virtqueue issued
command to get a reply. That prevents callers from issuing multiple
commands in parallel.

To overcome this limitation, introduce a virtqueue event callback for
admin virtqueue. For every issued command, use completion mechanism
to wait on a reply. In the event callback, trigger the completion
is done for every incoming reply.

Alongside with that, introduce a spin lock to protect the admin
virtqueue operations.

Signed-off-by: Jiri Pirko <jiri@nvidia.com>
Message-Id: <20240716113552.80599-13-jiri@resnulli.us>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

authored by

Jiri Pirko and committed by
Michael S. Tsirkin
4c3b54af 7090f2b5

+78 -17
+8 -5
drivers/virtio/virtio_pci_common.c
··· 395 395 if (vqi->name && vqi->callback) 396 396 ++nvectors; 397 397 } 398 + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH) 399 + ++nvectors; 398 400 } else { 399 401 /* Second best: one for change, shared for all vqs. */ 400 402 nvectors = 2; ··· 427 425 if (!avq_num) 428 426 return 0; 429 427 sprintf(avq->name, "avq.%u", avq->vq_index); 430 - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false, 431 - true, &allocated_vectors, vector_policy, 432 - &vp_dev->admin_vq.info); 428 + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done, 429 + avq->name, false, true, &allocated_vectors, 430 + vector_policy, &vp_dev->admin_vq.info); 433 431 if (IS_ERR(vq)) { 434 432 err = PTR_ERR(vq); 435 433 goto error_find; ··· 488 486 if (!avq_num) 489 487 return 0; 490 488 sprintf(avq->name, "avq.%u", avq->vq_index); 491 - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false, 492 - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info); 489 + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name, 490 + false, VIRTIO_MSI_NO_VECTOR, 491 + &vp_dev->admin_vq.info); 493 492 if (IS_ERR(vq)) { 494 493 err = PTR_ERR(vq); 495 494 goto out_del_vqs;
+3
drivers/virtio/virtio_pci_common.h
··· 47 47 struct virtio_pci_vq_info *info; 48 48 /* serializing admin commands execution. */ 49 49 struct mutex cmd_lock; 50 + /* Protects virtqueue access. */ 51 + spinlock_t lock; 50 52 u64 supported_cmds; 51 53 /* Name of the admin queue: avq.$vq_index. */ 52 54 char name[10]; ··· 180 178 #define VIRTIO_ADMIN_CMD_BITMAP 0 181 179 #endif 182 180 181 + void vp_modern_avq_done(struct virtqueue *vq); 183 182 int vp_modern_admin_cmd_exec(struct virtio_device *vdev, 184 183 struct virtio_admin_cmd *cmd); 185 184
+64 -12
drivers/virtio/virtio_pci_modern.c
··· 53 53 return index == vp_dev->admin_vq.vq_index; 54 54 } 55 55 56 + void vp_modern_avq_done(struct virtqueue *vq) 57 + { 58 + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 59 + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; 60 + struct virtio_admin_cmd *cmd; 61 + unsigned long flags; 62 + unsigned int len; 63 + 64 + spin_lock_irqsave(&admin_vq->lock, flags); 65 + do { 66 + virtqueue_disable_cb(vq); 67 + while ((cmd = virtqueue_get_buf(vq, &len))) 68 + complete(&cmd->completion); 69 + } while (!virtqueue_enable_cb(vq)); 70 + spin_unlock_irqrestore(&admin_vq->lock, flags); 71 + } 72 + 56 73 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, 57 74 u16 opcode, 58 75 struct scatterlist **sgs, ··· 78 61 struct virtio_admin_cmd *cmd) 79 62 { 80 63 struct virtqueue *vq; 81 - int ret, len; 64 + unsigned long flags; 65 + int ret; 82 66 83 67 vq = admin_vq->info->vq; 84 68 if (!vq) ··· 90 72 !((1ULL << opcode) & admin_vq->supported_cmds)) 91 73 return -EOPNOTSUPP; 92 74 93 - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); 94 - if (ret < 0) 95 - return -EIO; 75 + init_completion(&cmd->completion); 96 76 97 - if (unlikely(!virtqueue_kick(vq))) 98 - return -EIO; 99 - 100 - while (!virtqueue_get_buf(vq, &len) && 101 - !virtqueue_is_broken(vq)) 102 - cpu_relax(); 103 - 77 + again: 104 78 if (virtqueue_is_broken(vq)) 105 79 return -EIO; 106 80 107 - return 0; 81 + spin_lock_irqsave(&admin_vq->lock, flags); 82 + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); 83 + if (ret < 0) { 84 + if (ret == -ENOSPC) { 85 + spin_unlock_irqrestore(&admin_vq->lock, flags); 86 + cpu_relax(); 87 + goto again; 88 + } 89 + goto unlock_err; 90 + } 91 + if (!virtqueue_kick(vq)) 92 + goto unlock_err; 93 + spin_unlock_irqrestore(&admin_vq->lock, flags); 94 + 95 + wait_for_completion(&cmd->completion); 96 + 97 + return cmd->ret; 98 + 99 + unlock_err: 100 + spin_unlock_irqrestore(&admin_vq->lock, flags); 101 + return -EIO; 108 102 } 109 103 110 104 int vp_modern_admin_cmd_exec(struct virtio_device *vdev, ··· 237 207 return; 238 208 239 209 virtio_pci_admin_cmd_list_init(vdev); 210 + } 211 + 212 + static void vp_modern_avq_cleanup(struct virtio_device *vdev) 213 + { 214 + struct virtio_pci_device *vp_dev = to_vp_device(vdev); 215 + struct virtio_admin_cmd *cmd; 216 + struct virtqueue *vq; 217 + 218 + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 219 + return; 220 + 221 + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq; 222 + if (!vq) 223 + return; 224 + 225 + while ((cmd = virtqueue_detach_unused_buf(vq))) { 226 + cmd->ret = -EIO; 227 + complete(&cmd->completion); 228 + } 240 229 } 241 230 242 231 static void vp_transport_features(struct virtio_device *vdev, u64 features) ··· 451 402 */ 452 403 while (vp_modern_get_status(mdev)) 453 404 msleep(1); 405 + 406 + vp_modern_avq_cleanup(vdev); 454 407 455 408 /* Flush pending VQ/configuration callbacks. */ 456 409 vp_synchronize_vectors(vdev); ··· 836 785 vp_dev->isr = mdev->isr; 837 786 vp_dev->vdev.id = mdev->id; 838 787 788 + spin_lock_init(&vp_dev->admin_vq.lock); 839 789 mutex_init(&vp_dev->admin_vq.cmd_lock); 840 790 return 0; 841 791 }
+3
include/linux/virtio.h
··· 10 10 #include <linux/mod_devicetable.h> 11 11 #include <linux/gfp.h> 12 12 #include <linux/dma-mapping.h> 13 + #include <linux/completion.h> 13 14 14 15 /** 15 16 * struct virtqueue - a queue to register buffers for sending or receiving. ··· 110 109 __le64 group_member_id; 111 110 struct scatterlist *data_sg; 112 111 struct scatterlist *result_sg; 112 + struct completion completion; 113 + int ret; 113 114 }; 114 115 115 116 /**