Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vdpa: mlx5: prevent cvq work from hogging CPU

A userspace triggerable infinite loop could happen in
mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of
cvq requests.

Fixing this by introducing a quota and re-queue the work if we're out
of the budget (currently the implicit budget is one) . While at it,
using a per device work struct to avoid on demand memory allocation
for cvq.

Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting")
Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20220329042109.4029-1-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Eli Cohen <elic@nvidia.com>

authored by

Jason Wang and committed by
Michael S. Tsirkin
55ebf0d6 c18c8680

+9 -12
+9 -12
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 163 163 u32 cur_num_vqs; 164 164 struct notifier_block nb; 165 165 struct vdpa_callback config_cb; 166 + struct mlx5_vdpa_wq_ent cvq_ent; 166 167 }; 167 168 168 169 static void free_resources(struct mlx5_vdpa_net *ndev); ··· 1660 1659 ndev = to_mlx5_vdpa_ndev(mvdev); 1661 1660 cvq = &mvdev->cvq; 1662 1661 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) 1663 - goto out; 1662 + return; 1664 1663 1665 1664 if (!cvq->ready) 1666 - goto out; 1665 + return; 1667 1666 1668 1667 while (true) { 1669 1668 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, ··· 1697 1696 1698 1697 if (vringh_need_notify_iotlb(&cvq->vring)) 1699 1698 vringh_notify(&cvq->vring); 1699 + 1700 + queue_work(mvdev->wq, &wqent->work); 1701 + break; 1700 1702 } 1701 - out: 1702 - kfree(wqent); 1703 1703 } 1704 1704 1705 1705 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) ··· 1708 1706 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); 1709 1707 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); 1710 1708 struct mlx5_vdpa_virtqueue *mvq; 1711 - struct mlx5_vdpa_wq_ent *wqent; 1712 1709 1713 1710 if (!is_index_valid(mvdev, idx)) 1714 1711 return; ··· 1716 1715 if (!mvdev->wq || !mvdev->cvq.ready) 1717 1716 return; 1718 1717 1719 - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); 1720 - if (!wqent) 1721 - return; 1722 - 1723 - wqent->mvdev = mvdev; 1724 - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler); 1725 - queue_work(mvdev->wq, &wqent->work); 1718 + queue_work(mvdev->wq, &ndev->cvq_ent.work); 1726 1719 return; 1727 1720 } 1728 1721 ··· 2735 2740 if (err) 2736 2741 goto err_mr; 2737 2742 2743 + ndev->cvq_ent.mvdev = mvdev; 2744 + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); 2738 2745 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); 2739 2746 if (!mvdev->wq) { 2740 2747 err = -ENOMEM;