Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/udl: add support to export a handle to a FD on UDL.

Only importing an FD to a handle is currently supported on UDL,
but the exporting functionality is equally useful.

Signed-off-by: Haixia Shi <hshi@chromium.org>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Haixia Shi and committed by
Dave Airlie
ebfdd6d5 09a58da0

+278 -72
+1 -1
drivers/gpu/drm/udl/Makefile
··· 1 1 2 2 ccflags-y := -Iinclude/drm 3 3 4 - udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o 4 + udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o 5 5 6 6 obj-$(CONFIG_DRM_UDL) := udl.o
+273
drivers/gpu/drm/udl/udl_dmabuf.c
··· 1 + /* 2 + * udl_dmabuf.c 3 + * 4 + * Copyright (c) 2014 The Chromium OS Authors 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #include <drm/drmP.h> 21 + #include "udl_drv.h" 22 + #include <linux/shmem_fs.h> 23 + #include <linux/dma-buf.h> 24 + 25 + struct udl_drm_dmabuf_attachment { 26 + struct sg_table sgt; 27 + enum dma_data_direction dir; 28 + bool is_mapped; 29 + }; 30 + 31 + static int udl_attach_dma_buf(struct dma_buf *dmabuf, 32 + struct device *dev, 33 + struct dma_buf_attachment *attach) 34 + { 35 + struct udl_drm_dmabuf_attachment *udl_attach; 36 + 37 + DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev), 38 + attach->dmabuf->size); 39 + 40 + udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL); 41 + if (!udl_attach) 42 + return -ENOMEM; 43 + 44 + udl_attach->dir = DMA_NONE; 45 + attach->priv = udl_attach; 46 + 47 + return 0; 48 + } 49 + 50 + static void udl_detach_dma_buf(struct dma_buf *dmabuf, 51 + struct dma_buf_attachment *attach) 52 + { 53 + struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; 54 + struct sg_table *sgt; 55 + 56 + if (!udl_attach) 57 + return; 58 + 59 + DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev), 60 + attach->dmabuf->size); 61 + 62 + sgt = &udl_attach->sgt; 63 + 64 + if (udl_attach->dir != DMA_NONE) 65 + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 66 + udl_attach->dir); 67 + 68 + sg_free_table(sgt); 69 + kfree(udl_attach); 70 + attach->priv = NULL; 71 + } 72 + 73 + static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach, 74 + enum dma_data_direction dir) 75 + { 76 + struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; 77 + struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv); 78 + struct drm_device *dev = obj->base.dev; 79 + struct scatterlist *rd, *wr; 80 + struct sg_table *sgt = NULL; 81 + unsigned int i; 82 + int page_count; 83 + int nents, ret; 84 + 85 + DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev), 86 + attach->dmabuf->size, dir); 87 + 88 + /* just return current sgt if already requested. */ 89 + if (udl_attach->dir == dir && udl_attach->is_mapped) 90 + return &udl_attach->sgt; 91 + 92 + if (!obj->pages) { 93 + DRM_ERROR("pages is null.\n"); 94 + return ERR_PTR(-ENOMEM); 95 + } 96 + 97 + page_count = obj->base.size / PAGE_SIZE; 98 + obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); 99 + if (!obj->sg) { 100 + DRM_ERROR("sg is null.\n"); 101 + return ERR_PTR(-ENOMEM); 102 + } 103 + 104 + sgt = &udl_attach->sgt; 105 + 106 + ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL); 107 + if (ret) { 108 + DRM_ERROR("failed to alloc sgt.\n"); 109 + return ERR_PTR(-ENOMEM); 110 + } 111 + 112 + mutex_lock(&dev->struct_mutex); 113 + 114 + rd = obj->sg->sgl; 115 + wr = sgt->sgl; 116 + for (i = 0; i < sgt->orig_nents; ++i) { 117 + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); 118 + rd = sg_next(rd); 119 + wr = sg_next(wr); 120 + } 121 + 122 + if (dir != DMA_NONE) { 123 + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); 124 + if (!nents) { 125 + DRM_ERROR("failed to map sgl with iommu.\n"); 126 + sg_free_table(sgt); 127 + sgt = ERR_PTR(-EIO); 128 + goto err_unlock; 129 + } 130 + } 131 + 132 + udl_attach->is_mapped = true; 133 + udl_attach->dir = dir; 134 + attach->priv = udl_attach; 135 + 136 + err_unlock: 137 + mutex_unlock(&dev->struct_mutex); 138 + return sgt; 139 + } 140 + 141 + static void udl_unmap_dma_buf(struct dma_buf_attachment *attach, 142 + struct sg_table *sgt, 143 + enum dma_data_direction dir) 144 + { 145 + /* Nothing to do. */ 146 + DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev), 147 + attach->dmabuf->size, dir); 148 + } 149 + 150 + static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) 151 + { 152 + /* TODO */ 153 + 154 + return NULL; 155 + } 156 + 157 + static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 158 + unsigned long page_num) 159 + { 160 + /* TODO */ 161 + 162 + return NULL; 163 + } 164 + 165 + static void udl_dmabuf_kunmap(struct dma_buf *dma_buf, 166 + unsigned long page_num, void *addr) 167 + { 168 + /* TODO */ 169 + } 170 + 171 + static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 172 + unsigned long page_num, 173 + void *addr) 174 + { 175 + /* TODO */ 176 + } 177 + 178 + static int udl_dmabuf_mmap(struct dma_buf *dma_buf, 179 + struct vm_area_struct *vma) 180 + { 181 + /* TODO */ 182 + 183 + return -EINVAL; 184 + } 185 + 186 + static struct dma_buf_ops udl_dmabuf_ops = { 187 + .attach = udl_attach_dma_buf, 188 + .detach = udl_detach_dma_buf, 189 + .map_dma_buf = udl_map_dma_buf, 190 + .unmap_dma_buf = udl_unmap_dma_buf, 191 + .kmap = udl_dmabuf_kmap, 192 + .kmap_atomic = udl_dmabuf_kmap_atomic, 193 + .kunmap = udl_dmabuf_kunmap, 194 + .kunmap_atomic = udl_dmabuf_kunmap_atomic, 195 + .mmap = udl_dmabuf_mmap, 196 + .release = drm_gem_dmabuf_release, 197 + }; 198 + 199 + struct dma_buf *udl_gem_prime_export(struct drm_device *dev, 200 + struct drm_gem_object *obj, int flags) 201 + { 202 + return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL); 203 + } 204 + 205 + static int udl_prime_create(struct drm_device *dev, 206 + size_t size, 207 + struct sg_table *sg, 208 + struct udl_gem_object **obj_p) 209 + { 210 + struct udl_gem_object *obj; 211 + int npages; 212 + 213 + npages = size / PAGE_SIZE; 214 + 215 + *obj_p = NULL; 216 + obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); 217 + if (!obj) 218 + return -ENOMEM; 219 + 220 + obj->sg = sg; 221 + obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); 222 + if (obj->pages == NULL) { 223 + DRM_ERROR("obj pages is NULL %d\n", npages); 224 + return -ENOMEM; 225 + } 226 + 227 + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); 228 + 229 + *obj_p = obj; 230 + return 0; 231 + } 232 + 233 + struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, 234 + struct dma_buf *dma_buf) 235 + { 236 + struct dma_buf_attachment *attach; 237 + struct sg_table *sg; 238 + struct udl_gem_object *uobj; 239 + int ret; 240 + 241 + /* need to attach */ 242 + get_device(dev->dev); 243 + attach = dma_buf_attach(dma_buf, dev->dev); 244 + if (IS_ERR(attach)) { 245 + put_device(dev->dev); 246 + return ERR_CAST(attach); 247 + } 248 + 249 + get_dma_buf(dma_buf); 250 + 251 + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 252 + if (IS_ERR(sg)) { 253 + ret = PTR_ERR(sg); 254 + goto fail_detach; 255 + } 256 + 257 + ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); 258 + if (ret) 259 + goto fail_unmap; 260 + 261 + uobj->base.import_attach = attach; 262 + uobj->flags = UDL_BO_WC; 263 + 264 + return &uobj->base; 265 + 266 + fail_unmap: 267 + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 268 + fail_detach: 269 + dma_buf_detach(dma_buf, attach); 270 + dma_buf_put(dma_buf); 271 + put_device(dev->dev); 272 + return ERR_PTR(ret); 273 + }
+2
drivers/gpu/drm/udl/udl_drv.c
··· 51 51 .dumb_destroy = drm_gem_dumb_destroy, 52 52 .fops = &udl_driver_fops, 53 53 54 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 54 55 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 56 + .gem_prime_export = udl_gem_prime_export, 55 57 .gem_prime_import = udl_gem_prime_import, 56 58 57 59 .name = DRIVER_NAME,
+2
drivers/gpu/drm/udl/udl_drv.h
··· 124 124 void udl_gem_free_object(struct drm_gem_object *gem_obj); 125 125 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 126 126 size_t size); 127 + struct dma_buf *udl_gem_prime_export(struct drm_device *dev, 128 + struct drm_gem_object *obj, int flags); 127 129 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, 128 130 struct dma_buf *dma_buf); 129 131
-71
drivers/gpu/drm/udl/udl_gem.c
··· 240 240 mutex_unlock(&dev->struct_mutex); 241 241 return ret; 242 242 } 243 - 244 - static int udl_prime_create(struct drm_device *dev, 245 - size_t size, 246 - struct sg_table *sg, 247 - struct udl_gem_object **obj_p) 248 - { 249 - struct udl_gem_object *obj; 250 - int npages; 251 - 252 - npages = size / PAGE_SIZE; 253 - 254 - *obj_p = NULL; 255 - obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); 256 - if (!obj) 257 - return -ENOMEM; 258 - 259 - obj->sg = sg; 260 - obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); 261 - if (obj->pages == NULL) { 262 - DRM_ERROR("obj pages is NULL %d\n", npages); 263 - return -ENOMEM; 264 - } 265 - 266 - drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); 267 - 268 - *obj_p = obj; 269 - return 0; 270 - } 271 - 272 - struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, 273 - struct dma_buf *dma_buf) 274 - { 275 - struct dma_buf_attachment *attach; 276 - struct sg_table *sg; 277 - struct udl_gem_object *uobj; 278 - int ret; 279 - 280 - /* need to attach */ 281 - get_device(dev->dev); 282 - attach = dma_buf_attach(dma_buf, dev->dev); 283 - if (IS_ERR(attach)) { 284 - put_device(dev->dev); 285 - return ERR_CAST(attach); 286 - } 287 - 288 - get_dma_buf(dma_buf); 289 - 290 - sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 291 - if (IS_ERR(sg)) { 292 - ret = PTR_ERR(sg); 293 - goto fail_detach; 294 - } 295 - 296 - ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); 297 - if (ret) { 298 - goto fail_unmap; 299 - } 300 - 301 - uobj->base.import_attach = attach; 302 - uobj->flags = UDL_BO_WC; 303 - 304 - return &uobj->base; 305 - 306 - fail_unmap: 307 - dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 308 - fail_detach: 309 - dma_buf_detach(dma_buf, attach); 310 - dma_buf_put(dma_buf); 311 - put_device(dev->dev); 312 - return ERR_PTR(ret); 313 - }