Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

udl: add prime fd->handle support.

udl can only be used as an output offload so doesn't need to support
handle->fd direction.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

+92 -1
+5 -1
drivers/gpu/drm/udl/udl_drv.c
··· 57 57 }; 58 58 59 59 static struct drm_driver driver = { 60 - .driver_features = DRIVER_MODESET | DRIVER_GEM, 60 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 61 61 .load = udl_driver_load, 62 62 .unload = udl_driver_unload, 63 63 ··· 70 70 .dumb_map_offset = udl_gem_mmap, 71 71 .dumb_destroy = udl_dumb_destroy, 72 72 .fops = &udl_driver_fops, 73 + 74 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 75 + .gem_prime_import = udl_gem_prime_import, 76 + 73 77 .name = DRIVER_NAME, 74 78 .desc = DRIVER_DESC, 75 79 .date = DRIVER_DATE,
+3
drivers/gpu/drm/udl/udl_drv.h
··· 66 66 struct drm_gem_object base; 67 67 struct page **pages; 68 68 void *vmapping; 69 + struct sg_table *sg; 69 70 }; 70 71 71 72 #define to_udl_bo(x) container_of(x, struct udl_gem_object, base) ··· 119 118 void udl_gem_free_object(struct drm_gem_object *gem_obj); 120 119 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 121 120 size_t size); 121 + struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, 122 + struct dma_buf *dma_buf); 122 123 123 124 int udl_gem_vmap(struct udl_gem_object *obj); 124 125 void udl_gem_vunmap(struct udl_gem_object *obj);
+9
drivers/gpu/drm/udl/udl_fb.c
··· 593 593 struct drm_gem_object *obj; 594 594 struct udl_framebuffer *ufb; 595 595 int ret; 596 + uint32_t size; 596 597 597 598 obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]); 598 599 if (obj == NULL) 599 600 return ERR_PTR(-ENOENT); 601 + 602 + size = mode_cmd->pitches[0] * mode_cmd->height; 603 + size = ALIGN(size, PAGE_SIZE); 604 + 605 + if (size > obj->size) { 606 + DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height); 607 + return ERR_PTR(-ENOMEM); 608 + } 600 609 601 610 ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); 602 611 if (ufb == NULL)
+75
drivers/gpu/drm/udl/udl_gem.c
··· 9 9 #include "drmP.h" 10 10 #include "udl_drv.h" 11 11 #include <linux/shmem_fs.h> 12 + #include <linux/dma-buf.h> 12 13 13 14 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 14 15 size_t size) ··· 162 161 int page_count = obj->base.size / PAGE_SIZE; 163 162 int i; 164 163 164 + if (obj->base.import_attach) { 165 + drm_free_large(obj->pages); 166 + obj->pages = NULL; 167 + return; 168 + } 169 + 165 170 for (i = 0; i < page_count; i++) 166 171 page_cache_release(obj->pages[i]); 167 172 ··· 201 194 void udl_gem_free_object(struct drm_gem_object *gem_obj) 202 195 { 203 196 struct udl_gem_object *obj = to_udl_bo(gem_obj); 197 + 198 + if (gem_obj->import_attach) 199 + drm_prime_gem_destroy(gem_obj, obj->sg); 204 200 205 201 if (obj->vmapping) 206 202 udl_gem_vunmap(obj); ··· 248 238 unlock: 249 239 mutex_unlock(&dev->struct_mutex); 250 240 return ret; 241 + } 242 + 243 + static int udl_prime_create(struct drm_device *dev, 244 + size_t size, 245 + struct sg_table *sg, 246 + struct udl_gem_object **obj_p) 247 + { 248 + struct udl_gem_object *obj; 249 + int npages; 250 + int i; 251 + struct scatterlist *iter; 252 + 253 + npages = size / PAGE_SIZE; 254 + 255 + *obj_p = NULL; 256 + obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); 257 + if (!obj) 258 + return -ENOMEM; 259 + 260 + obj->sg = sg; 261 + obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); 262 + if (obj->pages == NULL) { 263 + DRM_ERROR("obj pages is NULL %d\n", npages); 264 + return -ENOMEM; 265 + } 266 + 267 + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); 268 + 269 + *obj_p = obj; 270 + return 0; 271 + } 272 + 273 + struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, 274 + struct dma_buf *dma_buf) 275 + { 276 + struct dma_buf_attachment *attach; 277 + struct sg_table *sg; 278 + struct udl_gem_object *uobj; 279 + int ret; 280 + 281 + /* need to attach */ 282 + attach = dma_buf_attach(dma_buf, dev->dev); 283 + if (IS_ERR(attach)) 284 + return ERR_PTR(PTR_ERR(attach)); 285 + 286 + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 287 + if (IS_ERR(sg)) { 288 + ret = PTR_ERR(sg); 289 + goto fail_detach; 290 + } 291 + 292 + ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); 293 + if (ret) { 294 + goto fail_unmap; 295 + } 296 + 297 + uobj->base.import_attach = attach; 298 + 299 + return &uobj->base; 300 + 301 + fail_unmap: 302 + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 303 + fail_detach: 304 + dma_buf_detach(dma_buf, attach); 305 + return ERR_PTR(ret); 251 306 }