Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/iommu.h>
15#include <linux/module.h>
16
17#include <drm/drm_drv.h>
18#include <drm/drm_prime.h>
19#include <drm/tegra_drm.h>
20
21#include "drm.h"
22#include "gem.h"
23
24MODULE_IMPORT_NS(DMA_BUF);
25
26static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
27{
28 dma_addr_t next = ~(dma_addr_t)0;
29 unsigned int count = 0, i;
30 struct scatterlist *s;
31
32 for_each_sg(sgl, s, nents, i) {
33 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
34 if (!sg_dma_len(s))
35 continue;
36
37 if (sg_dma_address(s) != next) {
38 next = sg_dma_address(s) + sg_dma_len(s);
39 count++;
40 }
41 }
42
43 return count;
44}
45
46static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
47{
48 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
49}
50
51static void tegra_bo_put(struct host1x_bo *bo)
52{
53 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
54
55 drm_gem_object_put(&obj->gem);
56}
57
58static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
59 enum dma_data_direction direction)
60{
61 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
62 struct drm_gem_object *gem = &obj->gem;
63 struct host1x_bo_mapping *map;
64 int err;
65
66 map = kzalloc(sizeof(*map), GFP_KERNEL);
67 if (!map)
68 return ERR_PTR(-ENOMEM);
69
70 kref_init(&map->ref);
71 map->bo = host1x_bo_get(bo);
72 map->direction = direction;
73 map->dev = dev;
74
75 /*
76 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
77 */
78 if (gem->import_attach) {
79 struct dma_buf *buf = gem->import_attach->dmabuf;
80
81 map->attach = dma_buf_attach(buf, dev);
82 if (IS_ERR(map->attach)) {
83 err = PTR_ERR(map->attach);
84 goto free;
85 }
86
87 map->sgt = dma_buf_map_attachment(map->attach, direction);
88 if (IS_ERR(map->sgt)) {
89 dma_buf_detach(buf, map->attach);
90 err = PTR_ERR(map->sgt);
91 goto free;
92 }
93
94 err = sgt_dma_count_chunks(map->sgt);
95 map->size = gem->size;
96
97 goto out;
98 }
99
100 /*
101 * If we don't have a mapping for this buffer yet, return an SG table
102 * so that host1x can do the mapping for us via the DMA API.
103 */
104 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
105 if (!map->sgt) {
106 err = -ENOMEM;
107 goto free;
108 }
109
110 if (obj->pages) {
111 /*
112 * If the buffer object was allocated from the explicit IOMMU
113 * API code paths, construct an SG table from the pages.
114 */
115 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
116 GFP_KERNEL);
117 if (err < 0)
118 goto free;
119 } else {
120 /*
121 * If the buffer object had no pages allocated and if it was
122 * not imported, it had to be allocated with the DMA API, so
123 * the DMA API helper can be used.
124 */
125 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
126 if (err < 0)
127 goto free;
128 }
129
130 err = dma_map_sgtable(dev, map->sgt, direction, 0);
131 if (err)
132 goto free_sgt;
133
134out:
135 /*
136 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
137 * existing IOVA address of our mapping.
138 */
139 if (!obj->mm) {
140 map->phys = sg_dma_address(map->sgt->sgl);
141 map->chunks = err;
142 } else {
143 map->phys = obj->iova;
144 map->chunks = 1;
145 }
146
147 map->size = gem->size;
148
149 return map;
150
151free_sgt:
152 sg_free_table(map->sgt);
153free:
154 kfree(map->sgt);
155 kfree(map);
156 return ERR_PTR(err);
157}
158
159static void tegra_bo_unpin(struct host1x_bo_mapping *map)
160{
161 if (map->attach) {
162 dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
163 dma_buf_detach(map->attach->dmabuf, map->attach);
164 } else {
165 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
166 sg_free_table(map->sgt);
167 kfree(map->sgt);
168 }
169
170 host1x_bo_put(map->bo);
171 kfree(map);
172}
173
174static void *tegra_bo_mmap(struct host1x_bo *bo)
175{
176 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
177 struct dma_buf_map map;
178 int ret;
179
180 if (obj->vaddr) {
181 return obj->vaddr;
182 } else if (obj->gem.import_attach) {
183 ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
184 return ret ? NULL : map.vaddr;
185 } else {
186 return vmap(obj->pages, obj->num_pages, VM_MAP,
187 pgprot_writecombine(PAGE_KERNEL));
188 }
189}
190
191static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
192{
193 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
194 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
195
196 if (obj->vaddr)
197 return;
198 else if (obj->gem.import_attach)
199 dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
200 else
201 vunmap(addr);
202}
203
204static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
205{
206 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
207
208 drm_gem_object_get(&obj->gem);
209
210 return bo;
211}
212
213static const struct host1x_bo_ops tegra_bo_ops = {
214 .get = tegra_bo_get,
215 .put = tegra_bo_put,
216 .pin = tegra_bo_pin,
217 .unpin = tegra_bo_unpin,
218 .mmap = tegra_bo_mmap,
219 .munmap = tegra_bo_munmap,
220};
221
222static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
223{
224 int prot = IOMMU_READ | IOMMU_WRITE;
225 int err;
226
227 if (bo->mm)
228 return -EBUSY;
229
230 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
231 if (!bo->mm)
232 return -ENOMEM;
233
234 mutex_lock(&tegra->mm_lock);
235
236 err = drm_mm_insert_node_generic(&tegra->mm,
237 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
238 if (err < 0) {
239 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
240 err);
241 goto unlock;
242 }
243
244 bo->iova = bo->mm->start;
245
246 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
247 if (!bo->size) {
248 dev_err(tegra->drm->dev, "failed to map buffer\n");
249 err = -ENOMEM;
250 goto remove;
251 }
252
253 mutex_unlock(&tegra->mm_lock);
254
255 return 0;
256
257remove:
258 drm_mm_remove_node(bo->mm);
259unlock:
260 mutex_unlock(&tegra->mm_lock);
261 kfree(bo->mm);
262 return err;
263}
264
265static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
266{
267 if (!bo->mm)
268 return 0;
269
270 mutex_lock(&tegra->mm_lock);
271 iommu_unmap(tegra->domain, bo->iova, bo->size);
272 drm_mm_remove_node(bo->mm);
273 mutex_unlock(&tegra->mm_lock);
274
275 kfree(bo->mm);
276
277 return 0;
278}
279
280static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
281 .free = tegra_bo_free_object,
282 .export = tegra_gem_prime_export,
283 .vm_ops = &tegra_bo_vm_ops,
284};
285
286static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
287 size_t size)
288{
289 struct tegra_bo *bo;
290 int err;
291
292 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
293 if (!bo)
294 return ERR_PTR(-ENOMEM);
295
296 bo->gem.funcs = &tegra_gem_object_funcs;
297
298 host1x_bo_init(&bo->base, &tegra_bo_ops);
299 size = round_up(size, PAGE_SIZE);
300
301 err = drm_gem_object_init(drm, &bo->gem, size);
302 if (err < 0)
303 goto free;
304
305 err = drm_gem_create_mmap_offset(&bo->gem);
306 if (err < 0)
307 goto release;
308
309 return bo;
310
311release:
312 drm_gem_object_release(&bo->gem);
313free:
314 kfree(bo);
315 return ERR_PTR(err);
316}
317
318static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
319{
320 if (bo->pages) {
321 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
322 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
323 sg_free_table(bo->sgt);
324 kfree(bo->sgt);
325 } else if (bo->vaddr) {
326 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
327 }
328}
329
330static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
331{
332 int err;
333
334 bo->pages = drm_gem_get_pages(&bo->gem);
335 if (IS_ERR(bo->pages))
336 return PTR_ERR(bo->pages);
337
338 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
339
340 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
341 if (IS_ERR(bo->sgt)) {
342 err = PTR_ERR(bo->sgt);
343 goto put_pages;
344 }
345
346 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
347 if (err)
348 goto free_sgt;
349
350 return 0;
351
352free_sgt:
353 sg_free_table(bo->sgt);
354 kfree(bo->sgt);
355put_pages:
356 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
357 return err;
358}
359
360static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
361{
362 struct tegra_drm *tegra = drm->dev_private;
363 int err;
364
365 if (tegra->domain) {
366 err = tegra_bo_get_pages(drm, bo);
367 if (err < 0)
368 return err;
369
370 err = tegra_bo_iommu_map(tegra, bo);
371 if (err < 0) {
372 tegra_bo_free(drm, bo);
373 return err;
374 }
375 } else {
376 size_t size = bo->gem.size;
377
378 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
379 GFP_KERNEL | __GFP_NOWARN);
380 if (!bo->vaddr) {
381 dev_err(drm->dev,
382 "failed to allocate buffer of size %zu\n",
383 size);
384 return -ENOMEM;
385 }
386 }
387
388 return 0;
389}
390
391struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
392 unsigned long flags)
393{
394 struct tegra_bo *bo;
395 int err;
396
397 bo = tegra_bo_alloc_object(drm, size);
398 if (IS_ERR(bo))
399 return bo;
400
401 err = tegra_bo_alloc(drm, bo);
402 if (err < 0)
403 goto release;
404
405 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
406 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
407
408 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
409 bo->flags |= TEGRA_BO_BOTTOM_UP;
410
411 return bo;
412
413release:
414 drm_gem_object_release(&bo->gem);
415 kfree(bo);
416 return ERR_PTR(err);
417}
418
419struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
420 struct drm_device *drm,
421 size_t size,
422 unsigned long flags,
423 u32 *handle)
424{
425 struct tegra_bo *bo;
426 int err;
427
428 bo = tegra_bo_create(drm, size, flags);
429 if (IS_ERR(bo))
430 return bo;
431
432 err = drm_gem_handle_create(file, &bo->gem, handle);
433 if (err) {
434 tegra_bo_free_object(&bo->gem);
435 return ERR_PTR(err);
436 }
437
438 drm_gem_object_put(&bo->gem);
439
440 return bo;
441}
442
443static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
444 struct dma_buf *buf)
445{
446 struct tegra_drm *tegra = drm->dev_private;
447 struct dma_buf_attachment *attach;
448 struct tegra_bo *bo;
449 int err;
450
451 bo = tegra_bo_alloc_object(drm, buf->size);
452 if (IS_ERR(bo))
453 return bo;
454
455 attach = dma_buf_attach(buf, drm->dev);
456 if (IS_ERR(attach)) {
457 err = PTR_ERR(attach);
458 goto free;
459 }
460
461 get_dma_buf(buf);
462
463 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
464 if (IS_ERR(bo->sgt)) {
465 err = PTR_ERR(bo->sgt);
466 goto detach;
467 }
468
469 if (tegra->domain) {
470 err = tegra_bo_iommu_map(tegra, bo);
471 if (err < 0)
472 goto detach;
473 }
474
475 bo->gem.import_attach = attach;
476
477 return bo;
478
479detach:
480 if (!IS_ERR_OR_NULL(bo->sgt))
481 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
482
483 dma_buf_detach(buf, attach);
484 dma_buf_put(buf);
485free:
486 drm_gem_object_release(&bo->gem);
487 kfree(bo);
488 return ERR_PTR(err);
489}
490
491void tegra_bo_free_object(struct drm_gem_object *gem)
492{
493 struct tegra_drm *tegra = gem->dev->dev_private;
494 struct host1x_bo_mapping *mapping, *tmp;
495 struct tegra_bo *bo = to_tegra_bo(gem);
496
497 /* remove all mappings of this buffer object from any caches */
498 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
499 if (mapping->cache)
500 host1x_bo_unpin(mapping);
501 else
502 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
503 dev_name(mapping->dev));
504 }
505
506 if (tegra->domain)
507 tegra_bo_iommu_unmap(tegra, bo);
508
509 if (gem->import_attach) {
510 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
511 DMA_TO_DEVICE);
512 drm_prime_gem_destroy(gem, NULL);
513 } else {
514 tegra_bo_free(gem->dev, bo);
515 }
516
517 drm_gem_object_release(gem);
518 kfree(bo);
519}
520
521int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
522 struct drm_mode_create_dumb *args)
523{
524 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
525 struct tegra_drm *tegra = drm->dev_private;
526 struct tegra_bo *bo;
527
528 args->pitch = round_up(min_pitch, tegra->pitch_align);
529 args->size = args->pitch * args->height;
530
531 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
532 &args->handle);
533 if (IS_ERR(bo))
534 return PTR_ERR(bo);
535
536 return 0;
537}
538
539static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
540{
541 struct vm_area_struct *vma = vmf->vma;
542 struct drm_gem_object *gem = vma->vm_private_data;
543 struct tegra_bo *bo = to_tegra_bo(gem);
544 struct page *page;
545 pgoff_t offset;
546
547 if (!bo->pages)
548 return VM_FAULT_SIGBUS;
549
550 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
551 page = bo->pages[offset];
552
553 return vmf_insert_page(vma, vmf->address, page);
554}
555
556const struct vm_operations_struct tegra_bo_vm_ops = {
557 .fault = tegra_bo_fault,
558 .open = drm_gem_vm_open,
559 .close = drm_gem_vm_close,
560};
561
562int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
563{
564 struct tegra_bo *bo = to_tegra_bo(gem);
565
566 if (!bo->pages) {
567 unsigned long vm_pgoff = vma->vm_pgoff;
568 int err;
569
570 /*
571 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
572 * and set the vm_pgoff (used as a fake buffer offset by DRM)
573 * to 0 as we want to map the whole buffer.
574 */
575 vma->vm_flags &= ~VM_PFNMAP;
576 vma->vm_pgoff = 0;
577
578 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
579 gem->size);
580 if (err < 0) {
581 drm_gem_vm_close(vma);
582 return err;
583 }
584
585 vma->vm_pgoff = vm_pgoff;
586 } else {
587 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
588
589 vma->vm_flags |= VM_MIXEDMAP;
590 vma->vm_flags &= ~VM_PFNMAP;
591
592 vma->vm_page_prot = pgprot_writecombine(prot);
593 }
594
595 return 0;
596}
597
598int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
599{
600 struct drm_gem_object *gem;
601 int err;
602
603 err = drm_gem_mmap(file, vma);
604 if (err < 0)
605 return err;
606
607 gem = vma->vm_private_data;
608
609 return __tegra_gem_mmap(gem, vma);
610}
611
612static struct sg_table *
613tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
614 enum dma_data_direction dir)
615{
616 struct drm_gem_object *gem = attach->dmabuf->priv;
617 struct tegra_bo *bo = to_tegra_bo(gem);
618 struct sg_table *sgt;
619
620 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
621 if (!sgt)
622 return NULL;
623
624 if (bo->pages) {
625 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
626 0, gem->size, GFP_KERNEL) < 0)
627 goto free;
628 } else {
629 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
630 gem->size) < 0)
631 goto free;
632 }
633
634 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
635 goto free;
636
637 return sgt;
638
639free:
640 sg_free_table(sgt);
641 kfree(sgt);
642 return NULL;
643}
644
645static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
646 struct sg_table *sgt,
647 enum dma_data_direction dir)
648{
649 struct drm_gem_object *gem = attach->dmabuf->priv;
650 struct tegra_bo *bo = to_tegra_bo(gem);
651
652 if (bo->pages)
653 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
654
655 sg_free_table(sgt);
656 kfree(sgt);
657}
658
659static void tegra_gem_prime_release(struct dma_buf *buf)
660{
661 drm_gem_dmabuf_release(buf);
662}
663
664static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
665 enum dma_data_direction direction)
666{
667 struct drm_gem_object *gem = buf->priv;
668 struct tegra_bo *bo = to_tegra_bo(gem);
669 struct drm_device *drm = gem->dev;
670
671 if (bo->pages)
672 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
673
674 return 0;
675}
676
677static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
678 enum dma_data_direction direction)
679{
680 struct drm_gem_object *gem = buf->priv;
681 struct tegra_bo *bo = to_tegra_bo(gem);
682 struct drm_device *drm = gem->dev;
683
684 if (bo->pages)
685 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
686
687 return 0;
688}
689
690static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
691{
692 struct drm_gem_object *gem = buf->priv;
693 int err;
694
695 err = drm_gem_mmap_obj(gem, gem->size, vma);
696 if (err < 0)
697 return err;
698
699 return __tegra_gem_mmap(gem, vma);
700}
701
702static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
703{
704 struct drm_gem_object *gem = buf->priv;
705 struct tegra_bo *bo = to_tegra_bo(gem);
706
707 dma_buf_map_set_vaddr(map, bo->vaddr);
708
709 return 0;
710}
711
712static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
713{
714}
715
716static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
717 .map_dma_buf = tegra_gem_prime_map_dma_buf,
718 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
719 .release = tegra_gem_prime_release,
720 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
721 .end_cpu_access = tegra_gem_prime_end_cpu_access,
722 .mmap = tegra_gem_prime_mmap,
723 .vmap = tegra_gem_prime_vmap,
724 .vunmap = tegra_gem_prime_vunmap,
725};
726
727struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
728 int flags)
729{
730 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
731
732 exp_info.exp_name = KBUILD_MODNAME;
733 exp_info.owner = gem->dev->driver->fops->owner;
734 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
735 exp_info.size = gem->size;
736 exp_info.flags = flags;
737 exp_info.priv = gem;
738
739 return drm_gem_dmabuf_export(gem->dev, &exp_info);
740}
741
742struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
743 struct dma_buf *buf)
744{
745 struct tegra_bo *bo;
746
747 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
748 struct drm_gem_object *gem = buf->priv;
749
750 if (gem->dev == drm) {
751 drm_gem_object_get(gem);
752 return gem;
753 }
754 }
755
756 bo = tegra_bo_import(drm, buf);
757 if (IS_ERR(bo))
758 return ERR_CAST(bo);
759
760 return &bo->gem;
761}
762
763struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
764{
765 struct drm_gem_object *gem;
766 struct tegra_bo *bo;
767
768 gem = drm_gem_object_lookup(file, handle);
769 if (!gem)
770 return NULL;
771
772 bo = to_tegra_bo(gem);
773 return &bo->base;
774}