Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vdpa: support virtio_map

Virtio core switches from DMA device to virtio_map, let's do that
as well for vDPA.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20250821064641.5025-8-jasowang@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Eugenio Pérez <eperezma@redhat.com>

authored by

Jason Wang and committed by
Michael S. Tsirkin
58aca3db bee8c7c2

+37 -32
+1 -1
drivers/vdpa/alibaba/eni_vdpa.c
··· 496 496 pci_set_master(pdev); 497 497 pci_set_drvdata(pdev, eni_vdpa); 498 498 499 - eni_vdpa->vdpa.dma_dev = &pdev->dev; 499 + eni_vdpa->vdpa.vmap.dma_dev = &pdev->dev; 500 500 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); 501 501 502 502 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
+1 -1
drivers/vdpa/ifcvf/ifcvf_main.c
··· 713 713 714 714 ifcvf_mgmt_dev->adapter = adapter; 715 715 adapter->pdev = pdev; 716 - adapter->vdpa.dma_dev = &pdev->dev; 716 + adapter->vdpa.vmap.dma_dev = &pdev->dev; 717 717 adapter->vdpa.mdev = mdev; 718 718 adapter->vf = vf; 719 719 vdpa_dev = &adapter->vdpa;
+2 -2
drivers/vdpa/mlx5/core/mr.c
··· 378 378 u64 pa, offset; 379 379 u64 paend; 380 380 struct scatterlist *sg; 381 - struct device *dma = mvdev->vdev.dma_dev; 381 + struct device *dma = mvdev->vdev.vmap.dma_dev; 382 382 383 383 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); 384 384 map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { ··· 432 432 433 433 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 434 434 { 435 - struct device *dma = mvdev->vdev.dma_dev; 435 + struct device *dma = mvdev->vdev.vmap.dma_dev; 436 436 437 437 destroy_direct_mr(mvdev, mr); 438 438 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+8 -5
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 3395 3395 return err; 3396 3396 } 3397 3397 3398 - static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx) 3398 + static union virtio_map mlx5_get_vq_map(struct vdpa_device *vdev, u16 idx) 3399 3399 { 3400 3400 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); 3401 + union virtio_map map; 3401 3402 3402 3403 if (is_ctrl_vq_idx(mvdev, idx)) 3403 - return &vdev->dev; 3404 + map.dma_dev = &vdev->dev; 3405 + else 3406 + map.dma_dev = mvdev->vdev.vmap.dma_dev; 3404 3407 3405 - return mvdev->vdev.dma_dev; 3408 + return map; 3406 3409 } 3407 3410 3408 3411 static void free_irqs(struct mlx5_vdpa_net *ndev) ··· 3689 3686 .set_map = mlx5_vdpa_set_map, 3690 3687 .reset_map = mlx5_vdpa_reset_map, 3691 3688 .set_group_asid = mlx5_set_group_asid, 3692 - .get_vq_dma_dev = mlx5_get_vq_dma_dev, 3689 + .get_vq_map = mlx5_get_vq_map, 3693 3690 .free = mlx5_vdpa_free, 3694 3691 .suspend = mlx5_vdpa_suspend, 3695 3692 .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */ ··· 3968 3965 } 3969 3966 3970 3967 ndev->mvdev.mlx_features = device_features; 3971 - mvdev->vdev.dma_dev = &mdev->pdev->dev; 3968 + mvdev->vdev.vmap.dma_dev = &mdev->pdev->dev; 3972 3969 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); 3973 3970 if (err) 3974 3971 goto err_alloc;
+1 -1
drivers/vdpa/octeon_ep/octep_vdpa_main.c
··· 516 516 } 517 517 518 518 oct_vdpa->pdev = pdev; 519 - oct_vdpa->vdpa.dma_dev = &pdev->dev; 519 + oct_vdpa->vdpa.vmap.dma_dev = &pdev->dev; 520 520 oct_vdpa->vdpa.mdev = mdev; 521 521 oct_vdpa->oct_hw = oct_hw; 522 522 vdpa_dev = &oct_vdpa->vdpa;
+1 -1
drivers/vdpa/pds/vdpa_dev.c
··· 643 643 644 644 pdev = vdpa_aux->padev->vf_pdev; 645 645 dma_dev = &pdev->dev; 646 - pdsv->vdpa_dev.dma_dev = dma_dev; 646 + pdsv->vdpa_dev.vmap.dma_dev = dma_dev; 647 647 648 648 status = pds_vdpa_get_status(&pdsv->vdpa_dev); 649 649 if (status == 0xff) {
+2 -2
drivers/vdpa/solidrun/snet_main.c
··· 1052 1052 */ 1053 1053 snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet); 1054 1054 1055 - /*set DMA device*/ 1056 - snet->vdpa.dma_dev = &pdev->dev; 1055 + /* set map metadata */ 1056 + snet->vdpa.vmap.dma_dev = &pdev->dev; 1057 1057 1058 1058 /* Register VDPA device */ 1059 1059 ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
+1 -1
drivers/vdpa/vdpa.c
··· 151 151 * Driver should use vdpa_alloc_device() wrapper macro instead of 152 152 * using this directly. 153 153 * 154 - * Return: Returns an error when parent/config/dma_dev is not set or fail to get 154 + * Return: Returns an error when parent/config/map is not set or fail to get 155 155 * ida. 156 156 */ 157 157 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
+1 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 272 272 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], 273 273 &vdpasim->iommu_lock); 274 274 275 - vdpasim->vdpa.dma_dev = dev; 275 + vdpasim->vdpa.vmap.dma_dev = dev; 276 276 277 277 return vdpasim; 278 278
+1 -1
drivers/vdpa/vdpa_user/vduse_dev.c
··· 2022 2022 return ret; 2023 2023 } 2024 2024 set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops); 2025 - vdev->vdpa.dma_dev = &vdev->vdpa.dev; 2025 + vdev->vdpa.vmap.dma_dev = &vdev->vdpa.dev; 2026 2026 vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; 2027 2027 2028 2028 return 0;
+1 -1
drivers/vdpa/virtio_pci/vp_vdpa.c
··· 520 520 521 521 vp_vdpa_mgtdev->vp_vdpa = vp_vdpa; 522 522 523 - vp_vdpa->vdpa.dma_dev = &pdev->dev; 523 + vp_vdpa->vdpa.vmap.dma_dev = &pdev->dev; 524 524 vp_vdpa->queues = vp_modern_get_num_queues(mdev); 525 525 vp_vdpa->mdev = mdev; 526 526
+4 -2
drivers/vhost/vdpa.c
··· 1318 1318 { 1319 1319 struct vdpa_device *vdpa = v->vdpa; 1320 1320 const struct vdpa_config_ops *ops = vdpa->config; 1321 - struct device *dma_dev = vdpa_get_dma_dev(vdpa); 1321 + union virtio_map map = vdpa_get_map(vdpa); 1322 + struct device *dma_dev = map.dma_dev; 1322 1323 int ret; 1323 1324 1324 1325 /* Device want to do DMA by itself */ ··· 1354 1353 static void vhost_vdpa_free_domain(struct vhost_vdpa *v) 1355 1354 { 1356 1355 struct vdpa_device *vdpa = v->vdpa; 1357 - struct device *dma_dev = vdpa_get_dma_dev(vdpa); 1356 + union virtio_map map = vdpa_get_map(vdpa); 1357 + struct device *dma_dev = map.dma_dev; 1358 1358 1359 1359 if (v->domain) { 1360 1360 iommu_detach_device(v->domain, dma_dev);
+5 -6
drivers/virtio/virtio_vdpa.c
··· 133 133 const char *name, bool ctx) 134 134 { 135 135 struct vdpa_device *vdpa = vd_get_vdpa(vdev); 136 - struct device *dma_dev; 137 136 const struct vdpa_config_ops *ops = vdpa->config; 138 137 bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify; 139 138 struct vdpa_callback cb; ··· 181 182 /* Create the vring */ 182 183 align = ops->get_vq_align(vdpa); 183 184 184 - if (ops->get_vq_dma_dev) 185 - dma_dev = ops->get_vq_dma_dev(vdpa, index); 185 + if (ops->get_vq_map) 186 + map = ops->get_vq_map(vdpa, index); 186 187 else 187 - dma_dev = vdpa_get_dma_dev(vdpa); 188 - map.dma_dev = dma_dev; 188 + map = vdpa_get_map(vdpa); 189 + 189 190 vq = vring_create_virtqueue_map(index, max_num, align, vdev, 190 191 true, may_reduce_num, ctx, 191 192 notify, callback, name, map); ··· 466 467 if (!vd_dev) 467 468 return -ENOMEM; 468 469 469 - vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa); 470 + vd_dev->vdev.dev.parent = vdpa_get_map(vdpa).dma_dev; 470 471 vd_dev->vdev.dev.release = virtio_vdpa_release_dev; 471 472 vd_dev->vdev.config = &virtio_vdpa_config_ops; 472 473 vd_dev->vdpa = vdpa;
+8 -7
include/linux/vdpa.h
··· 5 5 #include <linux/kernel.h> 6 6 #include <linux/device.h> 7 7 #include <linux/interrupt.h> 8 + #include <linux/virtio.h> 8 9 #include <linux/vhost_iotlb.h> 9 10 #include <linux/virtio_net.h> 10 11 #include <linux/virtio_blk.h> ··· 71 70 /** 72 71 * struct vdpa_device - representation of a vDPA device 73 72 * @dev: underlying device 74 - * @dma_dev: the actual device that is performing DMA 73 + * @vmap: the metadata passed to upper layer to be used for mapping 75 74 * @driver_override: driver name to force a match; do not set directly, 76 75 * because core frees it; use driver_set_override() to 77 76 * set or clear it. ··· 88 87 */ 89 88 struct vdpa_device { 90 89 struct device dev; 91 - struct device *dma_dev; 90 + union virtio_map vmap; 92 91 const char *driver_override; 93 92 const struct vdpa_config_ops *config; 94 93 struct rw_semaphore cf_lock; /* Protects get/set config */ ··· 353 352 * @vdev: vdpa device 354 353 * @asid: address space identifier 355 354 * Returns integer: success (0) or error (< 0) 356 - * @get_vq_dma_dev: Get the dma device for a specific 355 + * @get_vq_map: Get the map metadata for a specific 357 356 * virtqueue (optional) 358 357 * @vdev: vdpa device 359 358 * @idx: virtqueue index 360 - * Returns pointer to structure device or error (NULL) 359 + * Returns map token union error (NULL) 361 360 * @bind_mm: Bind the device to a specific address space 362 361 * so the vDPA framework can use VA when this 363 362 * callback is implemented. (optional) ··· 437 436 int (*reset_map)(struct vdpa_device *vdev, unsigned int asid); 438 437 int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group, 439 438 unsigned int asid); 440 - struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx); 439 + union virtio_map (*get_vq_map)(struct vdpa_device *vdev, u16 idx); 441 440 int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm); 442 441 void (*unbind_mm)(struct vdpa_device *vdev); 443 442 ··· 521 520 dev_set_drvdata(&vdev->dev, data); 522 521 } 523 522 524 - static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) 523 + static inline union virtio_map vdpa_get_map(struct vdpa_device *vdev) 525 524 { 526 - return vdev->dma_dev; 525 + return vdev->vmap; 527 526 } 528 527 529 528 static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)