Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/dma-buf.h>
17#include <linux/iommu.h>
18#include <drm/tegra_drm.h>
19
20#include "drm.h"
21#include "gem.h"
22
23static void tegra_bo_put(struct host1x_bo *bo)
24{
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26
27 drm_gem_object_put_unlocked(&obj->gem);
28}
29
30static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
31{
32 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
33
34 *sgt = obj->sgt;
35
36 return obj->paddr;
37}
38
39static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
40{
41}
42
43static void *tegra_bo_mmap(struct host1x_bo *bo)
44{
45 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
46
47 if (obj->vaddr)
48 return obj->vaddr;
49 else if (obj->gem.import_attach)
50 return dma_buf_vmap(obj->gem.import_attach->dmabuf);
51 else
52 return vmap(obj->pages, obj->num_pages, VM_MAP,
53 pgprot_writecombine(PAGE_KERNEL));
54}
55
56static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
57{
58 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
59
60 if (obj->vaddr)
61 return;
62 else if (obj->gem.import_attach)
63 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
64 else
65 vunmap(addr);
66}
67
68static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
69{
70 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
71
72 if (obj->vaddr)
73 return obj->vaddr + page * PAGE_SIZE;
74 else if (obj->gem.import_attach)
75 return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
76 else
77 return vmap(obj->pages + page, 1, VM_MAP,
78 pgprot_writecombine(PAGE_KERNEL));
79}
80
81static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
82 void *addr)
83{
84 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
85
86 if (obj->vaddr)
87 return;
88 else if (obj->gem.import_attach)
89 dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
90 else
91 vunmap(addr);
92}
93
94static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
95{
96 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
97
98 drm_gem_object_get(&obj->gem);
99
100 return bo;
101}
102
103static const struct host1x_bo_ops tegra_bo_ops = {
104 .get = tegra_bo_get,
105 .put = tegra_bo_put,
106 .pin = tegra_bo_pin,
107 .unpin = tegra_bo_unpin,
108 .mmap = tegra_bo_mmap,
109 .munmap = tegra_bo_munmap,
110 .kmap = tegra_bo_kmap,
111 .kunmap = tegra_bo_kunmap,
112};
113
114static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
115{
116 int prot = IOMMU_READ | IOMMU_WRITE;
117 ssize_t err;
118
119 if (bo->mm)
120 return -EBUSY;
121
122 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
123 if (!bo->mm)
124 return -ENOMEM;
125
126 mutex_lock(&tegra->mm_lock);
127
128 err = drm_mm_insert_node_generic(&tegra->mm,
129 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
130 if (err < 0) {
131 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
132 err);
133 goto unlock;
134 }
135
136 bo->paddr = bo->mm->start;
137
138 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
139 bo->sgt->nents, prot);
140 if (err < 0) {
141 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
142 goto remove;
143 }
144
145 bo->size = err;
146
147 mutex_unlock(&tegra->mm_lock);
148
149 return 0;
150
151remove:
152 drm_mm_remove_node(bo->mm);
153unlock:
154 mutex_unlock(&tegra->mm_lock);
155 kfree(bo->mm);
156 return err;
157}
158
159static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
160{
161 if (!bo->mm)
162 return 0;
163
164 mutex_lock(&tegra->mm_lock);
165 iommu_unmap(tegra->domain, bo->paddr, bo->size);
166 drm_mm_remove_node(bo->mm);
167 mutex_unlock(&tegra->mm_lock);
168
169 kfree(bo->mm);
170
171 return 0;
172}
173
174static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
175 size_t size)
176{
177 struct tegra_bo *bo;
178 int err;
179
180 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
181 if (!bo)
182 return ERR_PTR(-ENOMEM);
183
184 host1x_bo_init(&bo->base, &tegra_bo_ops);
185 size = round_up(size, PAGE_SIZE);
186
187 err = drm_gem_object_init(drm, &bo->gem, size);
188 if (err < 0)
189 goto free;
190
191 err = drm_gem_create_mmap_offset(&bo->gem);
192 if (err < 0)
193 goto release;
194
195 return bo;
196
197release:
198 drm_gem_object_release(&bo->gem);
199free:
200 kfree(bo);
201 return ERR_PTR(err);
202}
203
204static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
205{
206 if (bo->pages) {
207 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
208 sg_free_table(bo->sgt);
209 kfree(bo->sgt);
210 } else if (bo->vaddr) {
211 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
212 }
213}
214
215static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
216{
217 struct scatterlist *s;
218 unsigned int i;
219
220 bo->pages = drm_gem_get_pages(&bo->gem);
221 if (IS_ERR(bo->pages))
222 return PTR_ERR(bo->pages);
223
224 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
225
226 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
227 if (IS_ERR(bo->sgt))
228 goto put_pages;
229
230 /*
231 * Fake up the SG table so that dma_sync_sg_for_device() can be used
232 * to flush the pages associated with it.
233 *
234 * TODO: Replace this by drm_clflash_sg() once it can be implemented
235 * without relying on symbols that are not exported.
236 */
237 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
238 sg_dma_address(s) = sg_phys(s);
239
240 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
241 DMA_TO_DEVICE);
242
243 return 0;
244
245put_pages:
246 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
247 return PTR_ERR(bo->sgt);
248}
249
250static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
251{
252 struct tegra_drm *tegra = drm->dev_private;
253 int err;
254
255 if (tegra->domain) {
256 err = tegra_bo_get_pages(drm, bo);
257 if (err < 0)
258 return err;
259
260 err = tegra_bo_iommu_map(tegra, bo);
261 if (err < 0) {
262 tegra_bo_free(drm, bo);
263 return err;
264 }
265 } else {
266 size_t size = bo->gem.size;
267
268 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
269 GFP_KERNEL | __GFP_NOWARN);
270 if (!bo->vaddr) {
271 dev_err(drm->dev,
272 "failed to allocate buffer of size %zu\n",
273 size);
274 return -ENOMEM;
275 }
276 }
277
278 return 0;
279}
280
281struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
282 unsigned long flags)
283{
284 struct tegra_bo *bo;
285 int err;
286
287 bo = tegra_bo_alloc_object(drm, size);
288 if (IS_ERR(bo))
289 return bo;
290
291 err = tegra_bo_alloc(drm, bo);
292 if (err < 0)
293 goto release;
294
295 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
296 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
297
298 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
299 bo->flags |= TEGRA_BO_BOTTOM_UP;
300
301 return bo;
302
303release:
304 drm_gem_object_release(&bo->gem);
305 kfree(bo);
306 return ERR_PTR(err);
307}
308
309struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
310 struct drm_device *drm,
311 size_t size,
312 unsigned long flags,
313 u32 *handle)
314{
315 struct tegra_bo *bo;
316 int err;
317
318 bo = tegra_bo_create(drm, size, flags);
319 if (IS_ERR(bo))
320 return bo;
321
322 err = drm_gem_handle_create(file, &bo->gem, handle);
323 if (err) {
324 tegra_bo_free_object(&bo->gem);
325 return ERR_PTR(err);
326 }
327
328 drm_gem_object_put_unlocked(&bo->gem);
329
330 return bo;
331}
332
333static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
334 struct dma_buf *buf)
335{
336 struct tegra_drm *tegra = drm->dev_private;
337 struct dma_buf_attachment *attach;
338 struct tegra_bo *bo;
339 int err;
340
341 bo = tegra_bo_alloc_object(drm, buf->size);
342 if (IS_ERR(bo))
343 return bo;
344
345 attach = dma_buf_attach(buf, drm->dev);
346 if (IS_ERR(attach)) {
347 err = PTR_ERR(attach);
348 goto free;
349 }
350
351 get_dma_buf(buf);
352
353 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
354 if (IS_ERR(bo->sgt)) {
355 err = PTR_ERR(bo->sgt);
356 goto detach;
357 }
358
359 if (tegra->domain) {
360 err = tegra_bo_iommu_map(tegra, bo);
361 if (err < 0)
362 goto detach;
363 } else {
364 if (bo->sgt->nents > 1) {
365 err = -EINVAL;
366 goto detach;
367 }
368
369 bo->paddr = sg_dma_address(bo->sgt->sgl);
370 }
371
372 bo->gem.import_attach = attach;
373
374 return bo;
375
376detach:
377 if (!IS_ERR_OR_NULL(bo->sgt))
378 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
379
380 dma_buf_detach(buf, attach);
381 dma_buf_put(buf);
382free:
383 drm_gem_object_release(&bo->gem);
384 kfree(bo);
385 return ERR_PTR(err);
386}
387
388void tegra_bo_free_object(struct drm_gem_object *gem)
389{
390 struct tegra_drm *tegra = gem->dev->dev_private;
391 struct tegra_bo *bo = to_tegra_bo(gem);
392
393 if (tegra->domain)
394 tegra_bo_iommu_unmap(tegra, bo);
395
396 if (gem->import_attach) {
397 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
398 DMA_TO_DEVICE);
399 drm_prime_gem_destroy(gem, NULL);
400 } else {
401 tegra_bo_free(gem->dev, bo);
402 }
403
404 drm_gem_object_release(gem);
405 kfree(bo);
406}
407
408int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
409 struct drm_mode_create_dumb *args)
410{
411 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
412 struct tegra_drm *tegra = drm->dev_private;
413 struct tegra_bo *bo;
414
415 args->pitch = round_up(min_pitch, tegra->pitch_align);
416 args->size = args->pitch * args->height;
417
418 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
419 &args->handle);
420 if (IS_ERR(bo))
421 return PTR_ERR(bo);
422
423 return 0;
424}
425
426static int tegra_bo_fault(struct vm_fault *vmf)
427{
428 struct vm_area_struct *vma = vmf->vma;
429 struct drm_gem_object *gem = vma->vm_private_data;
430 struct tegra_bo *bo = to_tegra_bo(gem);
431 struct page *page;
432 pgoff_t offset;
433 int err;
434
435 if (!bo->pages)
436 return VM_FAULT_SIGBUS;
437
438 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
439 page = bo->pages[offset];
440
441 err = vm_insert_page(vma, vmf->address, page);
442 switch (err) {
443 case -EAGAIN:
444 case 0:
445 case -ERESTARTSYS:
446 case -EINTR:
447 case -EBUSY:
448 return VM_FAULT_NOPAGE;
449
450 case -ENOMEM:
451 return VM_FAULT_OOM;
452 }
453
454 return VM_FAULT_SIGBUS;
455}
456
457const struct vm_operations_struct tegra_bo_vm_ops = {
458 .fault = tegra_bo_fault,
459 .open = drm_gem_vm_open,
460 .close = drm_gem_vm_close,
461};
462
463static int tegra_gem_mmap(struct drm_gem_object *gem,
464 struct vm_area_struct *vma)
465{
466 struct tegra_bo *bo = to_tegra_bo(gem);
467
468 if (!bo->pages) {
469 unsigned long vm_pgoff = vma->vm_pgoff;
470 int err;
471
472 /*
473 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
474 * and set the vm_pgoff (used as a fake buffer offset by DRM)
475 * to 0 as we want to map the whole buffer.
476 */
477 vma->vm_flags &= ~VM_PFNMAP;
478 vma->vm_pgoff = 0;
479
480 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
481 gem->size);
482 if (err < 0) {
483 drm_gem_vm_close(vma);
484 return err;
485 }
486
487 vma->vm_pgoff = vm_pgoff;
488 } else {
489 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
490
491 vma->vm_flags |= VM_MIXEDMAP;
492 vma->vm_flags &= ~VM_PFNMAP;
493
494 vma->vm_page_prot = pgprot_writecombine(prot);
495 }
496
497 return 0;
498}
499
500int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
501{
502 struct drm_gem_object *gem;
503 int err;
504
505 err = drm_gem_mmap(file, vma);
506 if (err < 0)
507 return err;
508
509 gem = vma->vm_private_data;
510
511 return tegra_gem_mmap(gem, vma);
512}
513
514static struct sg_table *
515tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
516 enum dma_data_direction dir)
517{
518 struct drm_gem_object *gem = attach->dmabuf->priv;
519 struct tegra_bo *bo = to_tegra_bo(gem);
520 struct sg_table *sgt;
521
522 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
523 if (!sgt)
524 return NULL;
525
526 if (bo->pages) {
527 struct scatterlist *sg;
528 unsigned int i;
529
530 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
531 goto free;
532
533 for_each_sg(sgt->sgl, sg, bo->num_pages, i)
534 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
535
536 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
537 goto free;
538 } else {
539 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
540 goto free;
541
542 sg_dma_address(sgt->sgl) = bo->paddr;
543 sg_dma_len(sgt->sgl) = gem->size;
544 }
545
546 return sgt;
547
548free:
549 sg_free_table(sgt);
550 kfree(sgt);
551 return NULL;
552}
553
554static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
555 struct sg_table *sgt,
556 enum dma_data_direction dir)
557{
558 struct drm_gem_object *gem = attach->dmabuf->priv;
559 struct tegra_bo *bo = to_tegra_bo(gem);
560
561 if (bo->pages)
562 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
563
564 sg_free_table(sgt);
565 kfree(sgt);
566}
567
568static void tegra_gem_prime_release(struct dma_buf *buf)
569{
570 drm_gem_dmabuf_release(buf);
571}
572
573static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
574 unsigned long page)
575{
576 return NULL;
577}
578
579static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
580 unsigned long page,
581 void *addr)
582{
583}
584
585static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
586{
587 return NULL;
588}
589
590static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
591 void *addr)
592{
593}
594
595static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
596{
597 struct drm_gem_object *gem = buf->priv;
598 int err;
599
600 err = drm_gem_mmap_obj(gem, gem->size, vma);
601 if (err < 0)
602 return err;
603
604 return tegra_gem_mmap(gem, vma);
605}
606
607static void *tegra_gem_prime_vmap(struct dma_buf *buf)
608{
609 struct drm_gem_object *gem = buf->priv;
610 struct tegra_bo *bo = to_tegra_bo(gem);
611
612 return bo->vaddr;
613}
614
615static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
616{
617}
618
619static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
620 .map_dma_buf = tegra_gem_prime_map_dma_buf,
621 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
622 .release = tegra_gem_prime_release,
623 .map_atomic = tegra_gem_prime_kmap_atomic,
624 .unmap_atomic = tegra_gem_prime_kunmap_atomic,
625 .map = tegra_gem_prime_kmap,
626 .unmap = tegra_gem_prime_kunmap,
627 .mmap = tegra_gem_prime_mmap,
628 .vmap = tegra_gem_prime_vmap,
629 .vunmap = tegra_gem_prime_vunmap,
630};
631
632struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
633 struct drm_gem_object *gem,
634 int flags)
635{
636 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
637
638 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
639 exp_info.size = gem->size;
640 exp_info.flags = flags;
641 exp_info.priv = gem;
642
643 return drm_gem_dmabuf_export(drm, &exp_info);
644}
645
646struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
647 struct dma_buf *buf)
648{
649 struct tegra_bo *bo;
650
651 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
652 struct drm_gem_object *gem = buf->priv;
653
654 if (gem->dev == drm) {
655 drm_gem_object_get(gem);
656 return gem;
657 }
658 }
659
660 bo = tegra_bo_import(drm, buf);
661 if (IS_ERR(bo))
662 return ERR_CAST(bo);
663
664 return &bo->gem;
665}