Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/virtio: support mapping exported vram

Implement virtgpu specific map_dma_buf callback to support mapping
exported vram object dma-bufs. The dma-buf callback is used directly, as
vram objects don't have backing pages and thus can't implement the
drm_gem_object_funcs.get_sg_table callback.

Signed-off-by: David Stevens <stevensd@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20210813005441.608293-1-stevensd@chromium.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>

authored by

David Stevens and committed by
Gerd Hoffmann
ea5ea3d8 f492283b

+99 -2
+8
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 26 26 #ifndef VIRTIO_DRV_H 27 27 #define VIRTIO_DRV_H 28 28 29 + #include <linux/dma-direction.h> 29 30 #include <linux/virtio.h> 30 31 #include <linux/virtio_ids.h> 31 32 #include <linux/virtio_config.h> ··· 460 459 int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, 461 460 struct virtio_gpu_object_params *params, 462 461 struct virtio_gpu_object **bo_ptr); 462 + struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, 463 + struct device *dev, 464 + enum dma_data_direction dir); 465 + void virtio_gpu_vram_unmap_dma_buf(struct device *dev, 466 + struct sg_table *sgt, 467 + enum dma_data_direction dir); 468 + 463 469 #endif
+30 -2
drivers/gpu/drm/virtio/virtgpu_prime.c
··· 43 43 return 0; 44 44 } 45 45 46 + static struct sg_table * 47 + virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, 48 + enum dma_data_direction dir) 49 + { 50 + struct drm_gem_object *obj = attach->dmabuf->priv; 51 + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 52 + 53 + if (virtio_gpu_is_vram(bo)) 54 + return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); 55 + 56 + return drm_gem_map_dma_buf(attach, dir); 57 + } 58 + 59 + static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 60 + struct sg_table *sgt, 61 + enum dma_data_direction dir) 62 + { 63 + struct drm_gem_object *obj = attach->dmabuf->priv; 64 + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 65 + 66 + if (virtio_gpu_is_vram(bo)) { 67 + virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); 68 + return; 69 + } 70 + 71 + drm_gem_unmap_dma_buf(attach, sgt, dir); 72 + } 73 + 46 74 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { 47 75 .ops = { 48 76 .cache_sgt_mapping = true, 49 77 .attach = virtio_dma_buf_attach, 50 78 .detach = drm_gem_map_detach, 51 - .map_dma_buf = drm_gem_map_dma_buf, 52 - .unmap_dma_buf = drm_gem_unmap_dma_buf, 79 + .map_dma_buf = virtgpu_gem_map_dma_buf, 80 + .unmap_dma_buf = virtgpu_gem_unmap_dma_buf, 53 81 .release = drm_gem_dmabuf_release, 54 82 .mmap = drm_gem_dmabuf_mmap, 55 83 .vmap = drm_gem_dmabuf_vmap,
+61
drivers/gpu/drm/virtio/virtgpu_vram.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "virtgpu_drv.h" 3 3 4 + #include <linux/dma-mapping.h> 5 + 4 6 static void virtio_gpu_vram_free(struct drm_gem_object *obj) 5 7 { 6 8 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); ··· 64 62 vram->vram_node.start >> PAGE_SHIFT, 65 63 vm_size, vma->vm_page_prot); 66 64 return ret; 65 + } 66 + 67 + struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, 68 + struct device *dev, 69 + enum dma_data_direction dir) 70 + { 71 + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 72 + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); 73 + struct sg_table *sgt; 74 + dma_addr_t addr; 75 + int ret; 76 + 77 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 78 + if (!sgt) 79 + return ERR_PTR(-ENOMEM); 80 + 81 + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) { 82 + // Virtio devices can access the dma-buf via its UUID. Return a stub 83 + // sg_table so the dma-buf API still works. 84 + if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) { 85 + ret = -EIO; 86 + goto out; 87 + } 88 + return sgt; 89 + } 90 + 91 + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 92 + if (ret) 93 + goto out; 94 + 95 + addr = dma_map_resource(dev, vram->vram_node.start, 96 + vram->vram_node.size, dir, 97 + DMA_ATTR_SKIP_CPU_SYNC); 98 + ret = dma_mapping_error(dev, addr); 99 + if (ret) 100 + goto out; 101 + 102 + sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0); 103 + sg_dma_address(sgt->sgl) = addr; 104 + sg_dma_len(sgt->sgl) = vram->vram_node.size; 105 + 106 + return sgt; 107 + out: 108 + sg_free_table(sgt); 109 + kfree(sgt); 110 + return ERR_PTR(ret); 111 + } 112 + 113 + void virtio_gpu_vram_unmap_dma_buf(struct device *dev, 114 + struct sg_table *sgt, 115 + enum dma_data_direction dir) 116 + { 117 + if (sgt->nents) { 118 + dma_unmap_resource(dev, sg_dma_address(sgt->sgl), 119 + sg_dma_len(sgt->sgl), dir, 120 + DMA_ATTR_SKIP_CPU_SYNC); 121 + } 122 + sg_free_table(sgt); 123 + kfree(sgt); 67 124 } 68 125 69 126 static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {