Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/bitops.h>
11#include <linux/host1x.h>
12#include <linux/idr.h>
13#include <linux/iommu.h>
14
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
17
18#include "drm.h"
19#include "gem.h"
20
21#define DRIVER_NAME "tegra"
22#define DRIVER_DESC "NVIDIA Tegra graphics"
23#define DRIVER_DATE "20120330"
24#define DRIVER_MAJOR 0
25#define DRIVER_MINOR 0
26#define DRIVER_PATCHLEVEL 0
27
28#define CARVEOUT_SZ SZ_64M
29#define CDMA_GATHER_FETCHES_MAX_NB 16383
30
31struct tegra_drm_file {
32 struct idr contexts;
33 struct mutex lock;
34};
35
36static void tegra_atomic_schedule(struct tegra_drm *tegra,
37 struct drm_atomic_state *state)
38{
39 tegra->commit.state = state;
40 schedule_work(&tegra->commit.work);
41}
42
43static void tegra_atomic_complete(struct tegra_drm *tegra,
44 struct drm_atomic_state *state)
45{
46 struct drm_device *drm = tegra->drm;
47
48 /*
49 * Everything below can be run asynchronously without the need to grab
50 * any modeset locks at all under one condition: It must be guaranteed
51 * that the asynchronous work has either been cancelled (if the driver
52 * supports it, which at least requires that the framebuffers get
53 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
54 * before the new state gets committed on the software side with
55 * drm_atomic_helper_swap_state().
56 *
57 * This scheme allows new atomic state updates to be prepared and
58 * checked in parallel to the asynchronous completion of the previous
59 * update. Which is important since compositors need to figure out the
60 * composition of the next frame right after having submitted the
61 * current layout.
62 */
63
64 drm_atomic_helper_commit_modeset_disables(drm, state);
65 drm_atomic_helper_commit_modeset_enables(drm, state);
66 drm_atomic_helper_commit_planes(drm, state,
67 DRM_PLANE_COMMIT_ACTIVE_ONLY);
68
69 drm_atomic_helper_wait_for_vblanks(drm, state);
70
71 drm_atomic_helper_cleanup_planes(drm, state);
72 drm_atomic_state_put(state);
73}
74
75static void tegra_atomic_work(struct work_struct *work)
76{
77 struct tegra_drm *tegra = container_of(work, struct tegra_drm,
78 commit.work);
79
80 tegra_atomic_complete(tegra, tegra->commit.state);
81}
82
83static int tegra_atomic_commit(struct drm_device *drm,
84 struct drm_atomic_state *state, bool nonblock)
85{
86 struct tegra_drm *tegra = drm->dev_private;
87 int err;
88
89 err = drm_atomic_helper_prepare_planes(drm, state);
90 if (err)
91 return err;
92
93 /* serialize outstanding nonblocking commits */
94 mutex_lock(&tegra->commit.lock);
95 flush_work(&tegra->commit.work);
96
97 /*
98 * This is the point of no return - everything below never fails except
99 * when the hw goes bonghits. Which means we can commit the new state on
100 * the software side now.
101 */
102
103 err = drm_atomic_helper_swap_state(state, true);
104 if (err) {
105 mutex_unlock(&tegra->commit.lock);
106 drm_atomic_helper_cleanup_planes(drm, state);
107 return err;
108 }
109
110 drm_atomic_state_get(state);
111 if (nonblock)
112 tegra_atomic_schedule(tegra, state);
113 else
114 tegra_atomic_complete(tegra, state);
115
116 mutex_unlock(&tegra->commit.lock);
117 return 0;
118}
119
120static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
121 .fb_create = tegra_fb_create,
122#ifdef CONFIG_DRM_FBDEV_EMULATION
123 .output_poll_changed = tegra_fb_output_poll_changed,
124#endif
125 .atomic_check = drm_atomic_helper_check,
126 .atomic_commit = tegra_atomic_commit,
127};
128
129static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
130{
131 struct host1x_device *device = to_host1x_device(drm->dev);
132 struct tegra_drm *tegra;
133 int err;
134
135 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
136 if (!tegra)
137 return -ENOMEM;
138
139 if (iommu_present(&platform_bus_type)) {
140 u64 carveout_start, carveout_end, gem_start, gem_end;
141 struct iommu_domain_geometry *geometry;
142 unsigned long order;
143
144 tegra->domain = iommu_domain_alloc(&platform_bus_type);
145 if (!tegra->domain) {
146 err = -ENOMEM;
147 goto free;
148 }
149
150 geometry = &tegra->domain->geometry;
151 gem_start = geometry->aperture_start;
152 gem_end = geometry->aperture_end - CARVEOUT_SZ;
153 carveout_start = gem_end + 1;
154 carveout_end = geometry->aperture_end;
155
156 order = __ffs(tegra->domain->pgsize_bitmap);
157 init_iova_domain(&tegra->carveout.domain, 1UL << order,
158 carveout_start >> order);
159
160 tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
161 tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
162
163 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
164 mutex_init(&tegra->mm_lock);
165
166 DRM_DEBUG("IOMMU apertures:\n");
167 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
168 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
169 carveout_end);
170 }
171
172 mutex_init(&tegra->clients_lock);
173 INIT_LIST_HEAD(&tegra->clients);
174
175 mutex_init(&tegra->commit.lock);
176 INIT_WORK(&tegra->commit.work, tegra_atomic_work);
177
178 drm->dev_private = tegra;
179 tegra->drm = drm;
180
181 drm_mode_config_init(drm);
182
183 drm->mode_config.min_width = 0;
184 drm->mode_config.min_height = 0;
185
186 drm->mode_config.max_width = 4096;
187 drm->mode_config.max_height = 4096;
188
189 drm->mode_config.allow_fb_modifiers = true;
190
191 drm->mode_config.funcs = &tegra_drm_mode_funcs;
192
193 err = tegra_drm_fb_prepare(drm);
194 if (err < 0)
195 goto config;
196
197 drm_kms_helper_poll_init(drm);
198
199 err = host1x_device_init(device);
200 if (err < 0)
201 goto fbdev;
202
203 /*
204 * We don't use the drm_irq_install() helpers provided by the DRM
205 * core, so we need to set this manually in order to allow the
206 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
207 */
208 drm->irq_enabled = true;
209
210 /* syncpoints are used for full 32-bit hardware VBLANK counters */
211 drm->max_vblank_count = 0xffffffff;
212
213 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
214 if (err < 0)
215 goto device;
216
217 drm_mode_config_reset(drm);
218
219 err = tegra_drm_fb_init(drm);
220 if (err < 0)
221 goto device;
222
223 return 0;
224
225device:
226 host1x_device_exit(device);
227fbdev:
228 drm_kms_helper_poll_fini(drm);
229 tegra_drm_fb_free(drm);
230config:
231 drm_mode_config_cleanup(drm);
232
233 if (tegra->domain) {
234 iommu_domain_free(tegra->domain);
235 drm_mm_takedown(&tegra->mm);
236 mutex_destroy(&tegra->mm_lock);
237 put_iova_domain(&tegra->carveout.domain);
238 }
239free:
240 kfree(tegra);
241 return err;
242}
243
244static void tegra_drm_unload(struct drm_device *drm)
245{
246 struct host1x_device *device = to_host1x_device(drm->dev);
247 struct tegra_drm *tegra = drm->dev_private;
248 int err;
249
250 drm_kms_helper_poll_fini(drm);
251 tegra_drm_fb_exit(drm);
252 drm_mode_config_cleanup(drm);
253
254 err = host1x_device_exit(device);
255 if (err < 0)
256 return;
257
258 if (tegra->domain) {
259 iommu_domain_free(tegra->domain);
260 drm_mm_takedown(&tegra->mm);
261 mutex_destroy(&tegra->mm_lock);
262 put_iova_domain(&tegra->carveout.domain);
263 }
264
265 kfree(tegra);
266}
267
268static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
269{
270 struct tegra_drm_file *fpriv;
271
272 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
273 if (!fpriv)
274 return -ENOMEM;
275
276 idr_init(&fpriv->contexts);
277 mutex_init(&fpriv->lock);
278 filp->driver_priv = fpriv;
279
280 return 0;
281}
282
283static void tegra_drm_context_free(struct tegra_drm_context *context)
284{
285 context->client->ops->close_channel(context);
286 kfree(context);
287}
288
289static void tegra_drm_lastclose(struct drm_device *drm)
290{
291#ifdef CONFIG_DRM_FBDEV_EMULATION
292 struct tegra_drm *tegra = drm->dev_private;
293
294 tegra_fbdev_restore_mode(tegra->fbdev);
295#endif
296}
297
298static struct host1x_bo *
299host1x_bo_lookup(struct drm_file *file, u32 handle)
300{
301 struct drm_gem_object *gem;
302 struct tegra_bo *bo;
303
304 gem = drm_gem_object_lookup(file, handle);
305 if (!gem)
306 return NULL;
307
308 bo = to_tegra_bo(gem);
309 return &bo->base;
310}
311
312static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
313 struct drm_tegra_reloc __user *src,
314 struct drm_device *drm,
315 struct drm_file *file)
316{
317 u32 cmdbuf, target;
318 int err;
319
320 err = get_user(cmdbuf, &src->cmdbuf.handle);
321 if (err < 0)
322 return err;
323
324 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
325 if (err < 0)
326 return err;
327
328 err = get_user(target, &src->target.handle);
329 if (err < 0)
330 return err;
331
332 err = get_user(dest->target.offset, &src->target.offset);
333 if (err < 0)
334 return err;
335
336 err = get_user(dest->shift, &src->shift);
337 if (err < 0)
338 return err;
339
340 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
341 if (!dest->cmdbuf.bo)
342 return -ENOENT;
343
344 dest->target.bo = host1x_bo_lookup(file, target);
345 if (!dest->target.bo)
346 return -ENOENT;
347
348 return 0;
349}
350
351static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
352 struct drm_tegra_waitchk __user *src,
353 struct drm_file *file)
354{
355 u32 cmdbuf;
356 int err;
357
358 err = get_user(cmdbuf, &src->handle);
359 if (err < 0)
360 return err;
361
362 err = get_user(dest->offset, &src->offset);
363 if (err < 0)
364 return err;
365
366 err = get_user(dest->syncpt_id, &src->syncpt);
367 if (err < 0)
368 return err;
369
370 err = get_user(dest->thresh, &src->thresh);
371 if (err < 0)
372 return err;
373
374 dest->bo = host1x_bo_lookup(file, cmdbuf);
375 if (!dest->bo)
376 return -ENOENT;
377
378 return 0;
379}
380
381int tegra_drm_submit(struct tegra_drm_context *context,
382 struct drm_tegra_submit *args, struct drm_device *drm,
383 struct drm_file *file)
384{
385 unsigned int num_cmdbufs = args->num_cmdbufs;
386 unsigned int num_relocs = args->num_relocs;
387 unsigned int num_waitchks = args->num_waitchks;
388 struct drm_tegra_cmdbuf __user *user_cmdbufs;
389 struct drm_tegra_reloc __user *user_relocs;
390 struct drm_tegra_waitchk __user *user_waitchks;
391 struct drm_tegra_syncpt __user *user_syncpt;
392 struct drm_tegra_syncpt syncpt;
393 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
394 struct drm_gem_object **refs;
395 struct host1x_syncpt *sp;
396 struct host1x_job *job;
397 unsigned int num_refs;
398 int err;
399
400 user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
401 user_relocs = u64_to_user_ptr(args->relocs);
402 user_waitchks = u64_to_user_ptr(args->waitchks);
403 user_syncpt = u64_to_user_ptr(args->syncpts);
404
405 /* We don't yet support other than one syncpt_incr struct per submit */
406 if (args->num_syncpts != 1)
407 return -EINVAL;
408
409 /* We don't yet support waitchks */
410 if (args->num_waitchks != 0)
411 return -EINVAL;
412
413 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
414 args->num_relocs, args->num_waitchks);
415 if (!job)
416 return -ENOMEM;
417
418 job->num_relocs = args->num_relocs;
419 job->num_waitchk = args->num_waitchks;
420 job->client = (u32)args->context;
421 job->class = context->client->base.class;
422 job->serialize = true;
423
424 /*
425 * Track referenced BOs so that they can be unreferenced after the
426 * submission is complete.
427 */
428 num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
429
430 refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
431 if (!refs) {
432 err = -ENOMEM;
433 goto put;
434 }
435
436 /* reuse as an iterator later */
437 num_refs = 0;
438
439 while (num_cmdbufs) {
440 struct drm_tegra_cmdbuf cmdbuf;
441 struct host1x_bo *bo;
442 struct tegra_bo *obj;
443 u64 offset;
444
445 if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
446 err = -EFAULT;
447 goto fail;
448 }
449
450 /*
451 * The maximum number of CDMA gather fetches is 16383, a higher
452 * value means the words count is malformed.
453 */
454 if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
455 err = -EINVAL;
456 goto fail;
457 }
458
459 bo = host1x_bo_lookup(file, cmdbuf.handle);
460 if (!bo) {
461 err = -ENOENT;
462 goto fail;
463 }
464
465 offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
466 obj = host1x_to_tegra_bo(bo);
467 refs[num_refs++] = &obj->gem;
468
469 /*
470 * Gather buffer base address must be 4-bytes aligned,
471 * unaligned offset is malformed and cause commands stream
472 * corruption on the buffer address relocation.
473 */
474 if (offset & 3 || offset >= obj->gem.size) {
475 err = -EINVAL;
476 goto fail;
477 }
478
479 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
480 num_cmdbufs--;
481 user_cmdbufs++;
482 }
483
484 /* copy and resolve relocations from submit */
485 while (num_relocs--) {
486 struct host1x_reloc *reloc;
487 struct tegra_bo *obj;
488
489 err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
490 &user_relocs[num_relocs], drm,
491 file);
492 if (err < 0)
493 goto fail;
494
495 reloc = &job->relocarray[num_relocs];
496 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
497 refs[num_refs++] = &obj->gem;
498
499 /*
500 * The unaligned cmdbuf offset will cause an unaligned write
501 * during of the relocations patching, corrupting the commands
502 * stream.
503 */
504 if (reloc->cmdbuf.offset & 3 ||
505 reloc->cmdbuf.offset >= obj->gem.size) {
506 err = -EINVAL;
507 goto fail;
508 }
509
510 obj = host1x_to_tegra_bo(reloc->target.bo);
511 refs[num_refs++] = &obj->gem;
512
513 if (reloc->target.offset >= obj->gem.size) {
514 err = -EINVAL;
515 goto fail;
516 }
517 }
518
519 /* copy and resolve waitchks from submit */
520 while (num_waitchks--) {
521 struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
522 struct tegra_bo *obj;
523
524 err = host1x_waitchk_copy_from_user(
525 wait, &user_waitchks[num_waitchks], file);
526 if (err < 0)
527 goto fail;
528
529 obj = host1x_to_tegra_bo(wait->bo);
530 refs[num_refs++] = &obj->gem;
531
532 /*
533 * The unaligned offset will cause an unaligned write during
534 * of the waitchks patching, corrupting the commands stream.
535 */
536 if (wait->offset & 3 ||
537 wait->offset >= obj->gem.size) {
538 err = -EINVAL;
539 goto fail;
540 }
541 }
542
543 if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
544 err = -EFAULT;
545 goto fail;
546 }
547
548 /* check whether syncpoint ID is valid */
549 sp = host1x_syncpt_get(host1x, syncpt.id);
550 if (!sp) {
551 err = -ENOENT;
552 goto fail;
553 }
554
555 job->is_addr_reg = context->client->ops->is_addr_reg;
556 job->is_valid_class = context->client->ops->is_valid_class;
557 job->syncpt_incrs = syncpt.incrs;
558 job->syncpt_id = syncpt.id;
559 job->timeout = 10000;
560
561 if (args->timeout && args->timeout < 10000)
562 job->timeout = args->timeout;
563
564 err = host1x_job_pin(job, context->client->base.dev);
565 if (err)
566 goto fail;
567
568 err = host1x_job_submit(job);
569 if (err) {
570 host1x_job_unpin(job);
571 goto fail;
572 }
573
574 args->fence = job->syncpt_end;
575
576fail:
577 while (num_refs--)
578 drm_gem_object_put_unlocked(refs[num_refs]);
579
580 kfree(refs);
581
582put:
583 host1x_job_put(job);
584 return err;
585}
586
587
588#ifdef CONFIG_DRM_TEGRA_STAGING
589static int tegra_gem_create(struct drm_device *drm, void *data,
590 struct drm_file *file)
591{
592 struct drm_tegra_gem_create *args = data;
593 struct tegra_bo *bo;
594
595 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
596 &args->handle);
597 if (IS_ERR(bo))
598 return PTR_ERR(bo);
599
600 return 0;
601}
602
603static int tegra_gem_mmap(struct drm_device *drm, void *data,
604 struct drm_file *file)
605{
606 struct drm_tegra_gem_mmap *args = data;
607 struct drm_gem_object *gem;
608 struct tegra_bo *bo;
609
610 gem = drm_gem_object_lookup(file, args->handle);
611 if (!gem)
612 return -EINVAL;
613
614 bo = to_tegra_bo(gem);
615
616 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
617
618 drm_gem_object_put_unlocked(gem);
619
620 return 0;
621}
622
623static int tegra_syncpt_read(struct drm_device *drm, void *data,
624 struct drm_file *file)
625{
626 struct host1x *host = dev_get_drvdata(drm->dev->parent);
627 struct drm_tegra_syncpt_read *args = data;
628 struct host1x_syncpt *sp;
629
630 sp = host1x_syncpt_get(host, args->id);
631 if (!sp)
632 return -EINVAL;
633
634 args->value = host1x_syncpt_read_min(sp);
635 return 0;
636}
637
638static int tegra_syncpt_incr(struct drm_device *drm, void *data,
639 struct drm_file *file)
640{
641 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
642 struct drm_tegra_syncpt_incr *args = data;
643 struct host1x_syncpt *sp;
644
645 sp = host1x_syncpt_get(host1x, args->id);
646 if (!sp)
647 return -EINVAL;
648
649 return host1x_syncpt_incr(sp);
650}
651
652static int tegra_syncpt_wait(struct drm_device *drm, void *data,
653 struct drm_file *file)
654{
655 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
656 struct drm_tegra_syncpt_wait *args = data;
657 struct host1x_syncpt *sp;
658
659 sp = host1x_syncpt_get(host1x, args->id);
660 if (!sp)
661 return -EINVAL;
662
663 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
664 &args->value);
665}
666
667static int tegra_client_open(struct tegra_drm_file *fpriv,
668 struct tegra_drm_client *client,
669 struct tegra_drm_context *context)
670{
671 int err;
672
673 err = client->ops->open_channel(client, context);
674 if (err < 0)
675 return err;
676
677 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
678 if (err < 0) {
679 client->ops->close_channel(context);
680 return err;
681 }
682
683 context->client = client;
684 context->id = err;
685
686 return 0;
687}
688
689static int tegra_open_channel(struct drm_device *drm, void *data,
690 struct drm_file *file)
691{
692 struct tegra_drm_file *fpriv = file->driver_priv;
693 struct tegra_drm *tegra = drm->dev_private;
694 struct drm_tegra_open_channel *args = data;
695 struct tegra_drm_context *context;
696 struct tegra_drm_client *client;
697 int err = -ENODEV;
698
699 context = kzalloc(sizeof(*context), GFP_KERNEL);
700 if (!context)
701 return -ENOMEM;
702
703 mutex_lock(&fpriv->lock);
704
705 list_for_each_entry(client, &tegra->clients, list)
706 if (client->base.class == args->client) {
707 err = tegra_client_open(fpriv, client, context);
708 if (err < 0)
709 break;
710
711 args->context = context->id;
712 break;
713 }
714
715 if (err < 0)
716 kfree(context);
717
718 mutex_unlock(&fpriv->lock);
719 return err;
720}
721
722static int tegra_close_channel(struct drm_device *drm, void *data,
723 struct drm_file *file)
724{
725 struct tegra_drm_file *fpriv = file->driver_priv;
726 struct drm_tegra_close_channel *args = data;
727 struct tegra_drm_context *context;
728 int err = 0;
729
730 mutex_lock(&fpriv->lock);
731
732 context = idr_find(&fpriv->contexts, args->context);
733 if (!context) {
734 err = -EINVAL;
735 goto unlock;
736 }
737
738 idr_remove(&fpriv->contexts, context->id);
739 tegra_drm_context_free(context);
740
741unlock:
742 mutex_unlock(&fpriv->lock);
743 return err;
744}
745
746static int tegra_get_syncpt(struct drm_device *drm, void *data,
747 struct drm_file *file)
748{
749 struct tegra_drm_file *fpriv = file->driver_priv;
750 struct drm_tegra_get_syncpt *args = data;
751 struct tegra_drm_context *context;
752 struct host1x_syncpt *syncpt;
753 int err = 0;
754
755 mutex_lock(&fpriv->lock);
756
757 context = idr_find(&fpriv->contexts, args->context);
758 if (!context) {
759 err = -ENODEV;
760 goto unlock;
761 }
762
763 if (args->index >= context->client->base.num_syncpts) {
764 err = -EINVAL;
765 goto unlock;
766 }
767
768 syncpt = context->client->base.syncpts[args->index];
769 args->id = host1x_syncpt_id(syncpt);
770
771unlock:
772 mutex_unlock(&fpriv->lock);
773 return err;
774}
775
776static int tegra_submit(struct drm_device *drm, void *data,
777 struct drm_file *file)
778{
779 struct tegra_drm_file *fpriv = file->driver_priv;
780 struct drm_tegra_submit *args = data;
781 struct tegra_drm_context *context;
782 int err;
783
784 mutex_lock(&fpriv->lock);
785
786 context = idr_find(&fpriv->contexts, args->context);
787 if (!context) {
788 err = -ENODEV;
789 goto unlock;
790 }
791
792 err = context->client->ops->submit(context, args, drm, file);
793
794unlock:
795 mutex_unlock(&fpriv->lock);
796 return err;
797}
798
799static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
800 struct drm_file *file)
801{
802 struct tegra_drm_file *fpriv = file->driver_priv;
803 struct drm_tegra_get_syncpt_base *args = data;
804 struct tegra_drm_context *context;
805 struct host1x_syncpt_base *base;
806 struct host1x_syncpt *syncpt;
807 int err = 0;
808
809 mutex_lock(&fpriv->lock);
810
811 context = idr_find(&fpriv->contexts, args->context);
812 if (!context) {
813 err = -ENODEV;
814 goto unlock;
815 }
816
817 if (args->syncpt >= context->client->base.num_syncpts) {
818 err = -EINVAL;
819 goto unlock;
820 }
821
822 syncpt = context->client->base.syncpts[args->syncpt];
823
824 base = host1x_syncpt_get_base(syncpt);
825 if (!base) {
826 err = -ENXIO;
827 goto unlock;
828 }
829
830 args->id = host1x_syncpt_base_id(base);
831
832unlock:
833 mutex_unlock(&fpriv->lock);
834 return err;
835}
836
837static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
838 struct drm_file *file)
839{
840 struct drm_tegra_gem_set_tiling *args = data;
841 enum tegra_bo_tiling_mode mode;
842 struct drm_gem_object *gem;
843 unsigned long value = 0;
844 struct tegra_bo *bo;
845
846 switch (args->mode) {
847 case DRM_TEGRA_GEM_TILING_MODE_PITCH:
848 mode = TEGRA_BO_TILING_MODE_PITCH;
849
850 if (args->value != 0)
851 return -EINVAL;
852
853 break;
854
855 case DRM_TEGRA_GEM_TILING_MODE_TILED:
856 mode = TEGRA_BO_TILING_MODE_TILED;
857
858 if (args->value != 0)
859 return -EINVAL;
860
861 break;
862
863 case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
864 mode = TEGRA_BO_TILING_MODE_BLOCK;
865
866 if (args->value > 5)
867 return -EINVAL;
868
869 value = args->value;
870 break;
871
872 default:
873 return -EINVAL;
874 }
875
876 gem = drm_gem_object_lookup(file, args->handle);
877 if (!gem)
878 return -ENOENT;
879
880 bo = to_tegra_bo(gem);
881
882 bo->tiling.mode = mode;
883 bo->tiling.value = value;
884
885 drm_gem_object_put_unlocked(gem);
886
887 return 0;
888}
889
890static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
891 struct drm_file *file)
892{
893 struct drm_tegra_gem_get_tiling *args = data;
894 struct drm_gem_object *gem;
895 struct tegra_bo *bo;
896 int err = 0;
897
898 gem = drm_gem_object_lookup(file, args->handle);
899 if (!gem)
900 return -ENOENT;
901
902 bo = to_tegra_bo(gem);
903
904 switch (bo->tiling.mode) {
905 case TEGRA_BO_TILING_MODE_PITCH:
906 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
907 args->value = 0;
908 break;
909
910 case TEGRA_BO_TILING_MODE_TILED:
911 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
912 args->value = 0;
913 break;
914
915 case TEGRA_BO_TILING_MODE_BLOCK:
916 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
917 args->value = bo->tiling.value;
918 break;
919
920 default:
921 err = -EINVAL;
922 break;
923 }
924
925 drm_gem_object_put_unlocked(gem);
926
927 return err;
928}
929
930static int tegra_gem_set_flags(struct drm_device *drm, void *data,
931 struct drm_file *file)
932{
933 struct drm_tegra_gem_set_flags *args = data;
934 struct drm_gem_object *gem;
935 struct tegra_bo *bo;
936
937 if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
938 return -EINVAL;
939
940 gem = drm_gem_object_lookup(file, args->handle);
941 if (!gem)
942 return -ENOENT;
943
944 bo = to_tegra_bo(gem);
945 bo->flags = 0;
946
947 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
948 bo->flags |= TEGRA_BO_BOTTOM_UP;
949
950 drm_gem_object_put_unlocked(gem);
951
952 return 0;
953}
954
955static int tegra_gem_get_flags(struct drm_device *drm, void *data,
956 struct drm_file *file)
957{
958 struct drm_tegra_gem_get_flags *args = data;
959 struct drm_gem_object *gem;
960 struct tegra_bo *bo;
961
962 gem = drm_gem_object_lookup(file, args->handle);
963 if (!gem)
964 return -ENOENT;
965
966 bo = to_tegra_bo(gem);
967 args->flags = 0;
968
969 if (bo->flags & TEGRA_BO_BOTTOM_UP)
970 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
971
972 drm_gem_object_put_unlocked(gem);
973
974 return 0;
975}
976#endif
977
978static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
979#ifdef CONFIG_DRM_TEGRA_STAGING
980 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
981 DRM_UNLOCKED | DRM_RENDER_ALLOW),
982 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
983 DRM_UNLOCKED | DRM_RENDER_ALLOW),
984 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
985 DRM_UNLOCKED | DRM_RENDER_ALLOW),
986 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
987 DRM_UNLOCKED | DRM_RENDER_ALLOW),
988 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
989 DRM_UNLOCKED | DRM_RENDER_ALLOW),
990 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
991 DRM_UNLOCKED | DRM_RENDER_ALLOW),
992 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
993 DRM_UNLOCKED | DRM_RENDER_ALLOW),
994 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
995 DRM_UNLOCKED | DRM_RENDER_ALLOW),
996 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
997 DRM_UNLOCKED | DRM_RENDER_ALLOW),
998 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
999 DRM_UNLOCKED | DRM_RENDER_ALLOW),
1000 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
1001 DRM_UNLOCKED | DRM_RENDER_ALLOW),
1002 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
1003 DRM_UNLOCKED | DRM_RENDER_ALLOW),
1004 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
1005 DRM_UNLOCKED | DRM_RENDER_ALLOW),
1006 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
1007 DRM_UNLOCKED | DRM_RENDER_ALLOW),
1008#endif
1009};
1010
1011static const struct file_operations tegra_drm_fops = {
1012 .owner = THIS_MODULE,
1013 .open = drm_open,
1014 .release = drm_release,
1015 .unlocked_ioctl = drm_ioctl,
1016 .mmap = tegra_drm_mmap,
1017 .poll = drm_poll,
1018 .read = drm_read,
1019 .compat_ioctl = drm_compat_ioctl,
1020 .llseek = noop_llseek,
1021};
1022
1023static int tegra_drm_context_cleanup(int id, void *p, void *data)
1024{
1025 struct tegra_drm_context *context = p;
1026
1027 tegra_drm_context_free(context);
1028
1029 return 0;
1030}
1031
1032static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
1033{
1034 struct tegra_drm_file *fpriv = file->driver_priv;
1035
1036 mutex_lock(&fpriv->lock);
1037 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
1038 mutex_unlock(&fpriv->lock);
1039
1040 idr_destroy(&fpriv->contexts);
1041 mutex_destroy(&fpriv->lock);
1042 kfree(fpriv);
1043}
1044
1045#ifdef CONFIG_DEBUG_FS
1046static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
1047{
1048 struct drm_info_node *node = (struct drm_info_node *)s->private;
1049 struct drm_device *drm = node->minor->dev;
1050 struct drm_framebuffer *fb;
1051
1052 mutex_lock(&drm->mode_config.fb_lock);
1053
1054 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
1055 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
1056 fb->base.id, fb->width, fb->height,
1057 fb->format->depth,
1058 fb->format->cpp[0] * 8,
1059 drm_framebuffer_read_refcount(fb));
1060 }
1061
1062 mutex_unlock(&drm->mode_config.fb_lock);
1063
1064 return 0;
1065}
1066
1067static int tegra_debugfs_iova(struct seq_file *s, void *data)
1068{
1069 struct drm_info_node *node = (struct drm_info_node *)s->private;
1070 struct drm_device *drm = node->minor->dev;
1071 struct tegra_drm *tegra = drm->dev_private;
1072 struct drm_printer p = drm_seq_file_printer(s);
1073
1074 if (tegra->domain) {
1075 mutex_lock(&tegra->mm_lock);
1076 drm_mm_print(&tegra->mm, &p);
1077 mutex_unlock(&tegra->mm_lock);
1078 }
1079
1080 return 0;
1081}
1082
1083static struct drm_info_list tegra_debugfs_list[] = {
1084 { "framebuffers", tegra_debugfs_framebuffers, 0 },
1085 { "iova", tegra_debugfs_iova, 0 },
1086};
1087
1088static int tegra_debugfs_init(struct drm_minor *minor)
1089{
1090 return drm_debugfs_create_files(tegra_debugfs_list,
1091 ARRAY_SIZE(tegra_debugfs_list),
1092 minor->debugfs_root, minor);
1093}
1094#endif
1095
1096static struct drm_driver tegra_drm_driver = {
1097 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
1098 DRIVER_ATOMIC | DRIVER_RENDER,
1099 .load = tegra_drm_load,
1100 .unload = tegra_drm_unload,
1101 .open = tegra_drm_open,
1102 .postclose = tegra_drm_postclose,
1103 .lastclose = tegra_drm_lastclose,
1104
1105#if defined(CONFIG_DEBUG_FS)
1106 .debugfs_init = tegra_debugfs_init,
1107#endif
1108
1109 .gem_free_object_unlocked = tegra_bo_free_object,
1110 .gem_vm_ops = &tegra_bo_vm_ops,
1111
1112 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1113 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1114 .gem_prime_export = tegra_gem_prime_export,
1115 .gem_prime_import = tegra_gem_prime_import,
1116
1117 .dumb_create = tegra_bo_dumb_create,
1118
1119 .ioctls = tegra_drm_ioctls,
1120 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
1121 .fops = &tegra_drm_fops,
1122
1123 .name = DRIVER_NAME,
1124 .desc = DRIVER_DESC,
1125 .date = DRIVER_DATE,
1126 .major = DRIVER_MAJOR,
1127 .minor = DRIVER_MINOR,
1128 .patchlevel = DRIVER_PATCHLEVEL,
1129};
1130
1131int tegra_drm_register_client(struct tegra_drm *tegra,
1132 struct tegra_drm_client *client)
1133{
1134 mutex_lock(&tegra->clients_lock);
1135 list_add_tail(&client->list, &tegra->clients);
1136 mutex_unlock(&tegra->clients_lock);
1137
1138 return 0;
1139}
1140
1141int tegra_drm_unregister_client(struct tegra_drm *tegra,
1142 struct tegra_drm_client *client)
1143{
1144 mutex_lock(&tegra->clients_lock);
1145 list_del_init(&client->list);
1146 mutex_unlock(&tegra->clients_lock);
1147
1148 return 0;
1149}
1150
1151void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
1152 dma_addr_t *dma)
1153{
1154 struct iova *alloc;
1155 void *virt;
1156 gfp_t gfp;
1157 int err;
1158
1159 if (tegra->domain)
1160 size = iova_align(&tegra->carveout.domain, size);
1161 else
1162 size = PAGE_ALIGN(size);
1163
1164 gfp = GFP_KERNEL | __GFP_ZERO;
1165 if (!tegra->domain) {
1166 /*
1167 * Many units only support 32-bit addresses, even on 64-bit
1168 * SoCs. If there is no IOMMU to translate into a 32-bit IO
1169 * virtual address space, force allocations to be in the
1170 * lower 32-bit range.
1171 */
1172 gfp |= GFP_DMA;
1173 }
1174
1175 virt = (void *)__get_free_pages(gfp, get_order(size));
1176 if (!virt)
1177 return ERR_PTR(-ENOMEM);
1178
1179 if (!tegra->domain) {
1180 /*
1181 * If IOMMU is disabled, devices address physical memory
1182 * directly.
1183 */
1184 *dma = virt_to_phys(virt);
1185 return virt;
1186 }
1187
1188 alloc = alloc_iova(&tegra->carveout.domain,
1189 size >> tegra->carveout.shift,
1190 tegra->carveout.limit, true);
1191 if (!alloc) {
1192 err = -EBUSY;
1193 goto free_pages;
1194 }
1195
1196 *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
1197 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
1198 size, IOMMU_READ | IOMMU_WRITE);
1199 if (err < 0)
1200 goto free_iova;
1201
1202 return virt;
1203
1204free_iova:
1205 __free_iova(&tegra->carveout.domain, alloc);
1206free_pages:
1207 free_pages((unsigned long)virt, get_order(size));
1208
1209 return ERR_PTR(err);
1210}
1211
1212void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
1213 dma_addr_t dma)
1214{
1215 if (tegra->domain)
1216 size = iova_align(&tegra->carveout.domain, size);
1217 else
1218 size = PAGE_ALIGN(size);
1219
1220 if (tegra->domain) {
1221 iommu_unmap(tegra->domain, dma, size);
1222 free_iova(&tegra->carveout.domain,
1223 iova_pfn(&tegra->carveout.domain, dma));
1224 }
1225
1226 free_pages((unsigned long)virt, get_order(size));
1227}
1228
1229static int host1x_drm_probe(struct host1x_device *dev)
1230{
1231 struct drm_driver *driver = &tegra_drm_driver;
1232 struct drm_device *drm;
1233 int err;
1234
1235 drm = drm_dev_alloc(driver, &dev->dev);
1236 if (IS_ERR(drm))
1237 return PTR_ERR(drm);
1238
1239 dev_set_drvdata(&dev->dev, drm);
1240
1241 err = drm_dev_register(drm, 0);
1242 if (err < 0)
1243 goto unref;
1244
1245 return 0;
1246
1247unref:
1248 drm_dev_unref(drm);
1249 return err;
1250}
1251
1252static int host1x_drm_remove(struct host1x_device *dev)
1253{
1254 struct drm_device *drm = dev_get_drvdata(&dev->dev);
1255
1256 drm_dev_unregister(drm);
1257 drm_dev_unref(drm);
1258
1259 return 0;
1260}
1261
1262#ifdef CONFIG_PM_SLEEP
1263static int host1x_drm_suspend(struct device *dev)
1264{
1265 struct drm_device *drm = dev_get_drvdata(dev);
1266 struct tegra_drm *tegra = drm->dev_private;
1267
1268 drm_kms_helper_poll_disable(drm);
1269 tegra_drm_fb_suspend(drm);
1270
1271 tegra->state = drm_atomic_helper_suspend(drm);
1272 if (IS_ERR(tegra->state)) {
1273 tegra_drm_fb_resume(drm);
1274 drm_kms_helper_poll_enable(drm);
1275 return PTR_ERR(tegra->state);
1276 }
1277
1278 return 0;
1279}
1280
1281static int host1x_drm_resume(struct device *dev)
1282{
1283 struct drm_device *drm = dev_get_drvdata(dev);
1284 struct tegra_drm *tegra = drm->dev_private;
1285
1286 drm_atomic_helper_resume(drm, tegra->state);
1287 tegra_drm_fb_resume(drm);
1288 drm_kms_helper_poll_enable(drm);
1289
1290 return 0;
1291}
1292#endif
1293
1294static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
1295 host1x_drm_resume);
1296
1297static const struct of_device_id host1x_drm_subdevs[] = {
1298 { .compatible = "nvidia,tegra20-dc", },
1299 { .compatible = "nvidia,tegra20-hdmi", },
1300 { .compatible = "nvidia,tegra20-gr2d", },
1301 { .compatible = "nvidia,tegra20-gr3d", },
1302 { .compatible = "nvidia,tegra30-dc", },
1303 { .compatible = "nvidia,tegra30-hdmi", },
1304 { .compatible = "nvidia,tegra30-gr2d", },
1305 { .compatible = "nvidia,tegra30-gr3d", },
1306 { .compatible = "nvidia,tegra114-dsi", },
1307 { .compatible = "nvidia,tegra114-hdmi", },
1308 { .compatible = "nvidia,tegra114-gr3d", },
1309 { .compatible = "nvidia,tegra124-dc", },
1310 { .compatible = "nvidia,tegra124-sor", },
1311 { .compatible = "nvidia,tegra124-hdmi", },
1312 { .compatible = "nvidia,tegra124-dsi", },
1313 { .compatible = "nvidia,tegra124-vic", },
1314 { .compatible = "nvidia,tegra132-dsi", },
1315 { .compatible = "nvidia,tegra210-dc", },
1316 { .compatible = "nvidia,tegra210-dsi", },
1317 { .compatible = "nvidia,tegra210-sor", },
1318 { .compatible = "nvidia,tegra210-sor1", },
1319 { .compatible = "nvidia,tegra210-vic", },
1320 { .compatible = "nvidia,tegra186-vic", },
1321 { /* sentinel */ }
1322};
1323
1324static struct host1x_driver host1x_drm_driver = {
1325 .driver = {
1326 .name = "drm",
1327 .pm = &host1x_drm_pm_ops,
1328 },
1329 .probe = host1x_drm_probe,
1330 .remove = host1x_drm_remove,
1331 .subdevs = host1x_drm_subdevs,
1332};
1333
1334static struct platform_driver * const drivers[] = {
1335 &tegra_dc_driver,
1336 &tegra_hdmi_driver,
1337 &tegra_dsi_driver,
1338 &tegra_dpaux_driver,
1339 &tegra_sor_driver,
1340 &tegra_gr2d_driver,
1341 &tegra_gr3d_driver,
1342 &tegra_vic_driver,
1343};
1344
1345static int __init host1x_drm_init(void)
1346{
1347 int err;
1348
1349 err = host1x_driver_register(&host1x_drm_driver);
1350 if (err < 0)
1351 return err;
1352
1353 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1354 if (err < 0)
1355 goto unregister_host1x;
1356
1357 return 0;
1358
1359unregister_host1x:
1360 host1x_driver_unregister(&host1x_drm_driver);
1361 return err;
1362}
1363module_init(host1x_drm_init);
1364
1365static void __exit host1x_drm_exit(void)
1366{
1367 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1368 host1x_driver_unregister(&host1x_drm_driver);
1369}
1370module_exit(host1x_drm_exit);
1371
1372MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1373MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1374MODULE_LICENSE("GPL v2");