Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.0-rc5 281 lines 6.7 kB view raw
1/* exynos_drm_dmabuf.c 2 * 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 4 * Author: Inki Dae <inki.dae@samsung.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; either version 2 of the License, or (at your 9 * option) any later version. 10 */ 11 12#include <drm/drmP.h> 13#include <drm/exynos_drm.h> 14#include "exynos_drm_dmabuf.h" 15#include "exynos_drm_drv.h" 16#include "exynos_drm_gem.h" 17 18#include <linux/dma-buf.h> 19 20struct exynos_drm_dmabuf_attachment { 21 struct sg_table sgt; 22 enum dma_data_direction dir; 23 bool is_mapped; 24}; 25 26static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) 27{ 28 return to_exynos_gem_obj(buf->priv); 29} 30 31static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, 32 struct device *dev, 33 struct dma_buf_attachment *attach) 34{ 35 struct exynos_drm_dmabuf_attachment *exynos_attach; 36 37 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); 38 if (!exynos_attach) 39 return -ENOMEM; 40 41 exynos_attach->dir = DMA_NONE; 42 attach->priv = exynos_attach; 43 44 return 0; 45} 46 47static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, 48 struct dma_buf_attachment *attach) 49{ 50 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; 51 struct sg_table *sgt; 52 53 if (!exynos_attach) 54 return; 55 56 sgt = &exynos_attach->sgt; 57 58 if (exynos_attach->dir != DMA_NONE) 59 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 60 exynos_attach->dir); 61 62 sg_free_table(sgt); 63 kfree(exynos_attach); 64 attach->priv = NULL; 65} 66 67static struct sg_table * 68 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, 69 enum dma_data_direction dir) 70{ 71 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; 72 struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); 73 struct drm_device *dev = gem_obj->base.dev; 74 struct exynos_drm_gem_buf *buf; 75 struct scatterlist *rd, *wr; 76 struct sg_table *sgt = NULL; 77 unsigned int i; 78 int nents, ret; 79 80 /* just return current sgt if already requested. */ 81 if (exynos_attach->dir == dir && exynos_attach->is_mapped) 82 return &exynos_attach->sgt; 83 84 buf = gem_obj->buffer; 85 if (!buf) { 86 DRM_ERROR("buffer is null.\n"); 87 return ERR_PTR(-ENOMEM); 88 } 89 90 sgt = &exynos_attach->sgt; 91 92 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); 93 if (ret) { 94 DRM_ERROR("failed to alloc sgt.\n"); 95 return ERR_PTR(-ENOMEM); 96 } 97 98 mutex_lock(&dev->struct_mutex); 99 100 rd = buf->sgt->sgl; 101 wr = sgt->sgl; 102 for (i = 0; i < sgt->orig_nents; ++i) { 103 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); 104 rd = sg_next(rd); 105 wr = sg_next(wr); 106 } 107 108 if (dir != DMA_NONE) { 109 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); 110 if (!nents) { 111 DRM_ERROR("failed to map sgl with iommu.\n"); 112 sg_free_table(sgt); 113 sgt = ERR_PTR(-EIO); 114 goto err_unlock; 115 } 116 } 117 118 exynos_attach->is_mapped = true; 119 exynos_attach->dir = dir; 120 attach->priv = exynos_attach; 121 122 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); 123 124err_unlock: 125 mutex_unlock(&dev->struct_mutex); 126 return sgt; 127} 128 129static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 130 struct sg_table *sgt, 131 enum dma_data_direction dir) 132{ 133 /* Nothing to do. */ 134} 135 136static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 137 unsigned long page_num) 138{ 139 /* TODO */ 140 141 return NULL; 142} 143 144static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 145 unsigned long page_num, 146 void *addr) 147{ 148 /* TODO */ 149} 150 151static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, 152 unsigned long page_num) 153{ 154 /* TODO */ 155 156 return NULL; 157} 158 159static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 160 unsigned long page_num, void *addr) 161{ 162 /* TODO */ 163} 164 165static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, 166 struct vm_area_struct *vma) 167{ 168 return -ENOTTY; 169} 170 171static struct dma_buf_ops exynos_dmabuf_ops = { 172 .attach = exynos_gem_attach_dma_buf, 173 .detach = exynos_gem_detach_dma_buf, 174 .map_dma_buf = exynos_gem_map_dma_buf, 175 .unmap_dma_buf = exynos_gem_unmap_dma_buf, 176 .kmap = exynos_gem_dmabuf_kmap, 177 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, 178 .kunmap = exynos_gem_dmabuf_kunmap, 179 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, 180 .mmap = exynos_gem_dmabuf_mmap, 181 .release = drm_gem_dmabuf_release, 182}; 183 184struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, 185 struct drm_gem_object *obj, int flags) 186{ 187 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 188 189 return dma_buf_export(obj, &exynos_dmabuf_ops, 190 exynos_gem_obj->base.size, flags, NULL); 191} 192 193struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, 194 struct dma_buf *dma_buf) 195{ 196 struct dma_buf_attachment *attach; 197 struct sg_table *sgt; 198 struct scatterlist *sgl; 199 struct exynos_drm_gem_obj *exynos_gem_obj; 200 struct exynos_drm_gem_buf *buffer; 201 int ret; 202 203 /* is this one of own objects? */ 204 if (dma_buf->ops == &exynos_dmabuf_ops) { 205 struct drm_gem_object *obj; 206 207 obj = dma_buf->priv; 208 209 /* is it from our device? */ 210 if (obj->dev == drm_dev) { 211 /* 212 * Importing dmabuf exported from out own gem increases 213 * refcount on gem itself instead of f_count of dmabuf. 214 */ 215 drm_gem_object_reference(obj); 216 return obj; 217 } 218 } 219 220 attach = dma_buf_attach(dma_buf, drm_dev->dev); 221 if (IS_ERR(attach)) 222 return ERR_PTR(-EINVAL); 223 224 get_dma_buf(dma_buf); 225 226 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 227 if (IS_ERR(sgt)) { 228 ret = PTR_ERR(sgt); 229 goto err_buf_detach; 230 } 231 232 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 233 if (!buffer) { 234 ret = -ENOMEM; 235 goto err_unmap_attach; 236 } 237 238 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); 239 if (!exynos_gem_obj) { 240 ret = -ENOMEM; 241 goto err_free_buffer; 242 } 243 244 sgl = sgt->sgl; 245 246 buffer->size = dma_buf->size; 247 buffer->dma_addr = sg_dma_address(sgl); 248 249 if (sgt->nents == 1) { 250 /* always physically continuous memory if sgt->nents is 1. */ 251 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 252 } else { 253 /* 254 * this case could be CONTIG or NONCONTIG type but for now 255 * sets NONCONTIG. 256 * TODO. we have to find a way that exporter can notify 257 * the type of its own buffer to importer. 258 */ 259 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 260 } 261 262 exynos_gem_obj->buffer = buffer; 263 buffer->sgt = sgt; 264 exynos_gem_obj->base.import_attach = attach; 265 266 DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, 267 buffer->size); 268 269 return &exynos_gem_obj->base; 270 271err_free_buffer: 272 kfree(buffer); 273 buffer = NULL; 274err_unmap_attach: 275 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 276err_buf_detach: 277 dma_buf_detach(dma_buf, attach); 278 dma_buf_put(dma_buf); 279 280 return ERR_PTR(ret); 281}