Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/iommu.h>
15
16#include <drm/drm_drv.h>
17#include <drm/drm_prime.h>
18#include <drm/tegra_drm.h>
19
20#include "drm.h"
21#include "gem.h"
22
23static void tegra_bo_put(struct host1x_bo *bo)
24{
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26
27 drm_gem_object_put_unlocked(&obj->gem);
28}
29
30/* XXX move this into lib/scatterlist.c? */
31static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 unsigned int nents, gfp_t gfp_mask)
33{
34 struct scatterlist *dst;
35 unsigned int i;
36 int err;
37
38 err = sg_alloc_table(sgt, nents, gfp_mask);
39 if (err < 0)
40 return err;
41
42 dst = sgt->sgl;
43
44 for (i = 0; i < nents; i++) {
45 sg_set_page(dst, sg_page(sg), sg->length, 0);
46 dst = sg_next(dst);
47 sg = sg_next(sg);
48 }
49
50 return 0;
51}
52
53static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 dma_addr_t *phys)
55{
56 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 struct sg_table *sgt;
58 int err;
59
60 /*
61 * If we've manually mapped the buffer object through the IOMMU, make
62 * sure to return the IOVA address of our mapping.
63 */
64 if (phys && obj->mm) {
65 *phys = obj->iova;
66 return NULL;
67 }
68
69 /*
70 * If we don't have a mapping for this buffer yet, return an SG table
71 * so that host1x can do the mapping for us via the DMA API.
72 */
73 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
74 if (!sgt)
75 return ERR_PTR(-ENOMEM);
76
77 if (obj->pages) {
78 /*
79 * If the buffer object was allocated from the explicit IOMMU
80 * API code paths, construct an SG table from the pages.
81 */
82 err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
83 0, obj->gem.size, GFP_KERNEL);
84 if (err < 0)
85 goto free;
86 } else if (obj->sgt) {
87 /*
88 * If the buffer object already has an SG table but no pages
89 * were allocated for it, it means the buffer was imported and
90 * the SG table needs to be copied to avoid overwriting any
91 * other potential users of the original SG table.
92 */
93 err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
94 GFP_KERNEL);
95 if (err < 0)
96 goto free;
97 } else {
98 /*
99 * If the buffer object had no pages allocated and if it was
100 * not imported, it had to be allocated with the DMA API, so
101 * the DMA API helper can be used.
102 */
103 err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
104 obj->gem.size);
105 if (err < 0)
106 goto free;
107 }
108
109 return sgt;
110
111free:
112 kfree(sgt);
113 return ERR_PTR(err);
114}
115
116static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
117{
118 if (sgt) {
119 sg_free_table(sgt);
120 kfree(sgt);
121 }
122}
123
124static void *tegra_bo_mmap(struct host1x_bo *bo)
125{
126 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
127
128 if (obj->vaddr)
129 return obj->vaddr;
130 else if (obj->gem.import_attach)
131 return dma_buf_vmap(obj->gem.import_attach->dmabuf);
132 else
133 return vmap(obj->pages, obj->num_pages, VM_MAP,
134 pgprot_writecombine(PAGE_KERNEL));
135}
136
137static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
138{
139 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
140
141 if (obj->vaddr)
142 return;
143 else if (obj->gem.import_attach)
144 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
145 else
146 vunmap(addr);
147}
148
149static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
150{
151 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
152
153 if (obj->vaddr)
154 return obj->vaddr + page * PAGE_SIZE;
155 else if (obj->gem.import_attach)
156 return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
157 else
158 return vmap(obj->pages + page, 1, VM_MAP,
159 pgprot_writecombine(PAGE_KERNEL));
160}
161
162static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
163 void *addr)
164{
165 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
166
167 if (obj->vaddr)
168 return;
169 else if (obj->gem.import_attach)
170 dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
171 else
172 vunmap(addr);
173}
174
175static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
176{
177 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
178
179 drm_gem_object_get(&obj->gem);
180
181 return bo;
182}
183
184static const struct host1x_bo_ops tegra_bo_ops = {
185 .get = tegra_bo_get,
186 .put = tegra_bo_put,
187 .pin = tegra_bo_pin,
188 .unpin = tegra_bo_unpin,
189 .mmap = tegra_bo_mmap,
190 .munmap = tegra_bo_munmap,
191 .kmap = tegra_bo_kmap,
192 .kunmap = tegra_bo_kunmap,
193};
194
195static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
196{
197 int prot = IOMMU_READ | IOMMU_WRITE;
198 int err;
199
200 if (bo->mm)
201 return -EBUSY;
202
203 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
204 if (!bo->mm)
205 return -ENOMEM;
206
207 mutex_lock(&tegra->mm_lock);
208
209 err = drm_mm_insert_node_generic(&tegra->mm,
210 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
211 if (err < 0) {
212 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
213 err);
214 goto unlock;
215 }
216
217 bo->iova = bo->mm->start;
218
219 bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
220 bo->sgt->nents, prot);
221 if (!bo->size) {
222 dev_err(tegra->drm->dev, "failed to map buffer\n");
223 err = -ENOMEM;
224 goto remove;
225 }
226
227 mutex_unlock(&tegra->mm_lock);
228
229 return 0;
230
231remove:
232 drm_mm_remove_node(bo->mm);
233unlock:
234 mutex_unlock(&tegra->mm_lock);
235 kfree(bo->mm);
236 return err;
237}
238
239static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
240{
241 if (!bo->mm)
242 return 0;
243
244 mutex_lock(&tegra->mm_lock);
245 iommu_unmap(tegra->domain, bo->iova, bo->size);
246 drm_mm_remove_node(bo->mm);
247 mutex_unlock(&tegra->mm_lock);
248
249 kfree(bo->mm);
250
251 return 0;
252}
253
254static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
255 size_t size)
256{
257 struct tegra_bo *bo;
258 int err;
259
260 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
261 if (!bo)
262 return ERR_PTR(-ENOMEM);
263
264 host1x_bo_init(&bo->base, &tegra_bo_ops);
265 size = round_up(size, PAGE_SIZE);
266
267 err = drm_gem_object_init(drm, &bo->gem, size);
268 if (err < 0)
269 goto free;
270
271 err = drm_gem_create_mmap_offset(&bo->gem);
272 if (err < 0)
273 goto release;
274
275 return bo;
276
277release:
278 drm_gem_object_release(&bo->gem);
279free:
280 kfree(bo);
281 return ERR_PTR(err);
282}
283
284static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
285{
286 if (bo->pages) {
287 dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
288 DMA_FROM_DEVICE);
289 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
290 sg_free_table(bo->sgt);
291 kfree(bo->sgt);
292 } else if (bo->vaddr) {
293 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
294 }
295}
296
297static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
298{
299 int err;
300
301 bo->pages = drm_gem_get_pages(&bo->gem);
302 if (IS_ERR(bo->pages))
303 return PTR_ERR(bo->pages);
304
305 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
306
307 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
308 if (IS_ERR(bo->sgt)) {
309 err = PTR_ERR(bo->sgt);
310 goto put_pages;
311 }
312
313 err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
314 DMA_FROM_DEVICE);
315 if (err == 0) {
316 err = -EFAULT;
317 goto free_sgt;
318 }
319
320 return 0;
321
322free_sgt:
323 sg_free_table(bo->sgt);
324 kfree(bo->sgt);
325put_pages:
326 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
327 return err;
328}
329
330static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
331{
332 struct tegra_drm *tegra = drm->dev_private;
333 int err;
334
335 if (tegra->domain) {
336 err = tegra_bo_get_pages(drm, bo);
337 if (err < 0)
338 return err;
339
340 err = tegra_bo_iommu_map(tegra, bo);
341 if (err < 0) {
342 tegra_bo_free(drm, bo);
343 return err;
344 }
345 } else {
346 size_t size = bo->gem.size;
347
348 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
349 GFP_KERNEL | __GFP_NOWARN);
350 if (!bo->vaddr) {
351 dev_err(drm->dev,
352 "failed to allocate buffer of size %zu\n",
353 size);
354 return -ENOMEM;
355 }
356 }
357
358 return 0;
359}
360
361struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
362 unsigned long flags)
363{
364 struct tegra_bo *bo;
365 int err;
366
367 bo = tegra_bo_alloc_object(drm, size);
368 if (IS_ERR(bo))
369 return bo;
370
371 err = tegra_bo_alloc(drm, bo);
372 if (err < 0)
373 goto release;
374
375 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
376 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
377
378 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
379 bo->flags |= TEGRA_BO_BOTTOM_UP;
380
381 return bo;
382
383release:
384 drm_gem_object_release(&bo->gem);
385 kfree(bo);
386 return ERR_PTR(err);
387}
388
389struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
390 struct drm_device *drm,
391 size_t size,
392 unsigned long flags,
393 u32 *handle)
394{
395 struct tegra_bo *bo;
396 int err;
397
398 bo = tegra_bo_create(drm, size, flags);
399 if (IS_ERR(bo))
400 return bo;
401
402 err = drm_gem_handle_create(file, &bo->gem, handle);
403 if (err) {
404 tegra_bo_free_object(&bo->gem);
405 return ERR_PTR(err);
406 }
407
408 drm_gem_object_put_unlocked(&bo->gem);
409
410 return bo;
411}
412
413static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
414 struct dma_buf *buf)
415{
416 struct tegra_drm *tegra = drm->dev_private;
417 struct dma_buf_attachment *attach;
418 struct tegra_bo *bo;
419 int err;
420
421 bo = tegra_bo_alloc_object(drm, buf->size);
422 if (IS_ERR(bo))
423 return bo;
424
425 attach = dma_buf_attach(buf, drm->dev);
426 if (IS_ERR(attach)) {
427 err = PTR_ERR(attach);
428 goto free;
429 }
430
431 get_dma_buf(buf);
432
433 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
434 if (IS_ERR(bo->sgt)) {
435 err = PTR_ERR(bo->sgt);
436 goto detach;
437 }
438
439 if (tegra->domain) {
440 err = tegra_bo_iommu_map(tegra, bo);
441 if (err < 0)
442 goto detach;
443 }
444
445 bo->gem.import_attach = attach;
446
447 return bo;
448
449detach:
450 if (!IS_ERR_OR_NULL(bo->sgt))
451 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
452
453 dma_buf_detach(buf, attach);
454 dma_buf_put(buf);
455free:
456 drm_gem_object_release(&bo->gem);
457 kfree(bo);
458 return ERR_PTR(err);
459}
460
461void tegra_bo_free_object(struct drm_gem_object *gem)
462{
463 struct tegra_drm *tegra = gem->dev->dev_private;
464 struct tegra_bo *bo = to_tegra_bo(gem);
465
466 if (tegra->domain)
467 tegra_bo_iommu_unmap(tegra, bo);
468
469 if (gem->import_attach) {
470 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
471 DMA_TO_DEVICE);
472 drm_prime_gem_destroy(gem, NULL);
473 } else {
474 tegra_bo_free(gem->dev, bo);
475 }
476
477 drm_gem_object_release(gem);
478 kfree(bo);
479}
480
481int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
482 struct drm_mode_create_dumb *args)
483{
484 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
485 struct tegra_drm *tegra = drm->dev_private;
486 struct tegra_bo *bo;
487
488 args->pitch = round_up(min_pitch, tegra->pitch_align);
489 args->size = args->pitch * args->height;
490
491 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
492 &args->handle);
493 if (IS_ERR(bo))
494 return PTR_ERR(bo);
495
496 return 0;
497}
498
499static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
500{
501 struct vm_area_struct *vma = vmf->vma;
502 struct drm_gem_object *gem = vma->vm_private_data;
503 struct tegra_bo *bo = to_tegra_bo(gem);
504 struct page *page;
505 pgoff_t offset;
506
507 if (!bo->pages)
508 return VM_FAULT_SIGBUS;
509
510 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
511 page = bo->pages[offset];
512
513 return vmf_insert_page(vma, vmf->address, page);
514}
515
516const struct vm_operations_struct tegra_bo_vm_ops = {
517 .fault = tegra_bo_fault,
518 .open = drm_gem_vm_open,
519 .close = drm_gem_vm_close,
520};
521
522int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
523{
524 struct tegra_bo *bo = to_tegra_bo(gem);
525
526 if (!bo->pages) {
527 unsigned long vm_pgoff = vma->vm_pgoff;
528 int err;
529
530 /*
531 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
532 * and set the vm_pgoff (used as a fake buffer offset by DRM)
533 * to 0 as we want to map the whole buffer.
534 */
535 vma->vm_flags &= ~VM_PFNMAP;
536 vma->vm_pgoff = 0;
537
538 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
539 gem->size);
540 if (err < 0) {
541 drm_gem_vm_close(vma);
542 return err;
543 }
544
545 vma->vm_pgoff = vm_pgoff;
546 } else {
547 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
548
549 vma->vm_flags |= VM_MIXEDMAP;
550 vma->vm_flags &= ~VM_PFNMAP;
551
552 vma->vm_page_prot = pgprot_writecombine(prot);
553 }
554
555 return 0;
556}
557
558int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
559{
560 struct drm_gem_object *gem;
561 int err;
562
563 err = drm_gem_mmap(file, vma);
564 if (err < 0)
565 return err;
566
567 gem = vma->vm_private_data;
568
569 return __tegra_gem_mmap(gem, vma);
570}
571
572static struct sg_table *
573tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
574 enum dma_data_direction dir)
575{
576 struct drm_gem_object *gem = attach->dmabuf->priv;
577 struct tegra_bo *bo = to_tegra_bo(gem);
578 struct sg_table *sgt;
579
580 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
581 if (!sgt)
582 return NULL;
583
584 if (bo->pages) {
585 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
586 0, gem->size, GFP_KERNEL) < 0)
587 goto free;
588 } else {
589 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
590 gem->size) < 0)
591 goto free;
592 }
593
594 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
595 goto free;
596
597 return sgt;
598
599free:
600 sg_free_table(sgt);
601 kfree(sgt);
602 return NULL;
603}
604
605static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
606 struct sg_table *sgt,
607 enum dma_data_direction dir)
608{
609 struct drm_gem_object *gem = attach->dmabuf->priv;
610 struct tegra_bo *bo = to_tegra_bo(gem);
611
612 if (bo->pages)
613 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
614
615 sg_free_table(sgt);
616 kfree(sgt);
617}
618
619static void tegra_gem_prime_release(struct dma_buf *buf)
620{
621 drm_gem_dmabuf_release(buf);
622}
623
624static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
625 enum dma_data_direction direction)
626{
627 struct drm_gem_object *gem = buf->priv;
628 struct tegra_bo *bo = to_tegra_bo(gem);
629 struct drm_device *drm = gem->dev;
630
631 if (bo->pages)
632 dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
633 DMA_FROM_DEVICE);
634
635 return 0;
636}
637
638static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
639 enum dma_data_direction direction)
640{
641 struct drm_gem_object *gem = buf->priv;
642 struct tegra_bo *bo = to_tegra_bo(gem);
643 struct drm_device *drm = gem->dev;
644
645 if (bo->pages)
646 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
647 DMA_TO_DEVICE);
648
649 return 0;
650}
651
652static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
653{
654 return NULL;
655}
656
657static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
658 void *addr)
659{
660}
661
662static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
663{
664 struct drm_gem_object *gem = buf->priv;
665 int err;
666
667 err = drm_gem_mmap_obj(gem, gem->size, vma);
668 if (err < 0)
669 return err;
670
671 return __tegra_gem_mmap(gem, vma);
672}
673
674static void *tegra_gem_prime_vmap(struct dma_buf *buf)
675{
676 struct drm_gem_object *gem = buf->priv;
677 struct tegra_bo *bo = to_tegra_bo(gem);
678
679 return bo->vaddr;
680}
681
682static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
683{
684}
685
686static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
687 .map_dma_buf = tegra_gem_prime_map_dma_buf,
688 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
689 .release = tegra_gem_prime_release,
690 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
691 .end_cpu_access = tegra_gem_prime_end_cpu_access,
692 .map = tegra_gem_prime_kmap,
693 .unmap = tegra_gem_prime_kunmap,
694 .mmap = tegra_gem_prime_mmap,
695 .vmap = tegra_gem_prime_vmap,
696 .vunmap = tegra_gem_prime_vunmap,
697};
698
699struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
700 int flags)
701{
702 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
703
704 exp_info.exp_name = KBUILD_MODNAME;
705 exp_info.owner = gem->dev->driver->fops->owner;
706 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
707 exp_info.size = gem->size;
708 exp_info.flags = flags;
709 exp_info.priv = gem;
710
711 return drm_gem_dmabuf_export(gem->dev, &exp_info);
712}
713
714struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
715 struct dma_buf *buf)
716{
717 struct tegra_bo *bo;
718
719 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
720 struct drm_gem_object *gem = buf->priv;
721
722 if (gem->dev == drm) {
723 drm_gem_object_get(gem);
724 return gem;
725 }
726 }
727
728 bo = tegra_bo_import(drm, buf);
729 if (IS_ERR(bo))
730 return ERR_CAST(bo);
731
732 return &bo->gem;
733}