Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/file.h>
8#include <linux/sync_file.h>
9#include <linux/uaccess.h>
10
11#include <drm/drm_drv.h>
12#include <drm/drm_file.h>
13#include <drm/drm_syncobj.h>
14
15#include "msm_drv.h"
16#include "msm_gpu.h"
17#include "msm_gem.h"
18#include "msm_gpu_trace.h"
19
20/*
21 * Cmdstream submission:
22 */
23
24static struct msm_gem_submit *submit_create(struct drm_device *dev,
25 struct msm_gpu *gpu,
26 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
27 uint32_t nr_cmds)
28{
29 static atomic_t ident = ATOMIC_INIT(0);
30 struct msm_gem_submit *submit;
31 uint64_t sz;
32 int ret;
33
34 sz = struct_size(submit, bos, nr_bos) +
35 ((u64)nr_cmds * sizeof(submit->cmd[0]));
36
37 if (sz > SIZE_MAX)
38 return ERR_PTR(-ENOMEM);
39
40 submit = kzalloc(sz, GFP_KERNEL);
41 if (!submit)
42 return ERR_PTR(-ENOMEM);
43
44 submit->hw_fence = msm_fence_alloc();
45 if (IS_ERR(submit->hw_fence)) {
46 ret = PTR_ERR(submit->hw_fence);
47 kfree(submit);
48 return ERR_PTR(ret);
49 }
50
51 ret = drm_sched_job_init(&submit->base, queue->entity, queue);
52 if (ret) {
53 kfree(submit->hw_fence);
54 kfree(submit);
55 return ERR_PTR(ret);
56 }
57
58 kref_init(&submit->ref);
59 submit->dev = dev;
60 submit->aspace = queue->ctx->aspace;
61 submit->gpu = gpu;
62 submit->cmd = (void *)&submit->bos[nr_bos];
63 submit->queue = queue;
64 submit->pid = get_pid(task_pid(current));
65 submit->ring = gpu->rb[queue->ring_nr];
66 submit->fault_dumped = false;
67
68 /* Get a unique identifier for the submission for logging purposes */
69 submit->ident = atomic_inc_return(&ident) - 1;
70
71 INIT_LIST_HEAD(&submit->node);
72
73 return submit;
74}
75
76void __msm_gem_submit_destroy(struct kref *kref)
77{
78 struct msm_gem_submit *submit =
79 container_of(kref, struct msm_gem_submit, ref);
80 unsigned i;
81
82 if (submit->fence_id) {
83 spin_lock(&submit->queue->idr_lock);
84 idr_remove(&submit->queue->fence_idr, submit->fence_id);
85 spin_unlock(&submit->queue->idr_lock);
86 }
87
88 dma_fence_put(submit->user_fence);
89
90 /*
91 * If the submit is freed before msm_job_run(), then hw_fence is
92 * just some pre-allocated memory, not a reference counted fence.
93 * Once the job runs and the hw_fence is initialized, it will
94 * have a refcount of at least one, since the submit holds a ref
95 * to the hw_fence.
96 */
97 if (kref_read(&submit->hw_fence->refcount) == 0) {
98 kfree(submit->hw_fence);
99 } else {
100 dma_fence_put(submit->hw_fence);
101 }
102
103 put_pid(submit->pid);
104 msm_submitqueue_put(submit->queue);
105
106 for (i = 0; i < submit->nr_cmds; i++)
107 kfree(submit->cmd[i].relocs);
108
109 kfree(submit);
110}
111
112static int submit_lookup_objects(struct msm_gem_submit *submit,
113 struct drm_msm_gem_submit *args, struct drm_file *file)
114{
115 unsigned i;
116 int ret = 0;
117
118 for (i = 0; i < args->nr_bos; i++) {
119 struct drm_msm_gem_submit_bo submit_bo;
120 void __user *userptr =
121 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
122
123 /* make sure we don't have garbage flags, in case we hit
124 * error path before flags is initialized:
125 */
126 submit->bos[i].flags = 0;
127
128 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
129 ret = -EFAULT;
130 i = 0;
131 goto out;
132 }
133
134/* at least one of READ and/or WRITE flags should be set: */
135#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
136
137 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
138 !(submit_bo.flags & MANDATORY_FLAGS)) {
139 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
140 ret = -EINVAL;
141 i = 0;
142 goto out;
143 }
144
145 submit->bos[i].handle = submit_bo.handle;
146 submit->bos[i].flags = submit_bo.flags;
147 /* in validate_objects() we figure out if this is true: */
148 submit->bos[i].iova = submit_bo.presumed;
149 }
150
151 spin_lock(&file->table_lock);
152
153 for (i = 0; i < args->nr_bos; i++) {
154 struct drm_gem_object *obj;
155
156 /* normally use drm_gem_object_lookup(), but for bulk lookup
157 * all under single table_lock just hit object_idr directly:
158 */
159 obj = idr_find(&file->object_idr, submit->bos[i].handle);
160 if (!obj) {
161 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
162 ret = -EINVAL;
163 goto out_unlock;
164 }
165
166 drm_gem_object_get(obj);
167
168 submit->bos[i].obj = to_msm_bo(obj);
169 }
170
171out_unlock:
172 spin_unlock(&file->table_lock);
173
174out:
175 submit->nr_bos = i;
176
177 return ret;
178}
179
180static int submit_lookup_cmds(struct msm_gem_submit *submit,
181 struct drm_msm_gem_submit *args, struct drm_file *file)
182{
183 unsigned i;
184 size_t sz;
185 int ret = 0;
186
187 for (i = 0; i < args->nr_cmds; i++) {
188 struct drm_msm_gem_submit_cmd submit_cmd;
189 void __user *userptr =
190 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
191
192 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
193 if (ret) {
194 ret = -EFAULT;
195 goto out;
196 }
197
198 /* validate input from userspace: */
199 switch (submit_cmd.type) {
200 case MSM_SUBMIT_CMD_BUF:
201 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
202 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
203 break;
204 default:
205 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
206 return -EINVAL;
207 }
208
209 if (submit_cmd.size % 4) {
210 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
211 submit_cmd.size);
212 ret = -EINVAL;
213 goto out;
214 }
215
216 submit->cmd[i].type = submit_cmd.type;
217 submit->cmd[i].size = submit_cmd.size / 4;
218 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
219 submit->cmd[i].idx = submit_cmd.submit_idx;
220 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
221
222 userptr = u64_to_user_ptr(submit_cmd.relocs);
223
224 sz = array_size(submit_cmd.nr_relocs,
225 sizeof(struct drm_msm_gem_submit_reloc));
226 /* check for overflow: */
227 if (sz == SIZE_MAX) {
228 ret = -ENOMEM;
229 goto out;
230 }
231 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
232 if (!submit->cmd[i].relocs) {
233 ret = -ENOMEM;
234 goto out;
235 }
236 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
237 if (ret) {
238 ret = -EFAULT;
239 goto out;
240 }
241 }
242
243out:
244 return ret;
245}
246
247/* Unwind bo state, according to cleanup_flags. In the success case, only
248 * the lock is dropped at the end of the submit (and active/pin ref is dropped
249 * later when the submit is retired).
250 */
251static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
252 unsigned cleanup_flags)
253{
254 struct drm_gem_object *obj = &submit->bos[i].obj->base;
255 unsigned flags = submit->bos[i].flags & cleanup_flags;
256
257 /*
258 * Clear flags bit before dropping lock, so that the msm_job_run()
259 * path isn't racing with submit_cleanup() (ie. the read/modify/
260 * write is protected by the obj lock in all paths)
261 */
262 submit->bos[i].flags &= ~cleanup_flags;
263
264 if (flags & BO_VMA_PINNED)
265 msm_gem_vma_unpin(submit->bos[i].vma);
266
267 if (flags & BO_OBJ_PINNED)
268 msm_gem_unpin_locked(obj);
269
270 if (flags & BO_LOCKED)
271 dma_resv_unlock(obj->resv);
272}
273
274static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
275{
276 unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
277 submit_cleanup_bo(submit, i, cleanup_flags);
278
279 if (!(submit->bos[i].flags & BO_VALID))
280 submit->bos[i].iova = 0;
281}
282
283/* This is where we make sure all the bo's are reserved and pin'd: */
284static int submit_lock_objects(struct msm_gem_submit *submit)
285{
286 int contended, slow_locked = -1, i, ret = 0;
287
288retry:
289 for (i = 0; i < submit->nr_bos; i++) {
290 struct msm_gem_object *msm_obj = submit->bos[i].obj;
291
292 if (slow_locked == i)
293 slow_locked = -1;
294
295 contended = i;
296
297 if (!(submit->bos[i].flags & BO_LOCKED)) {
298 ret = dma_resv_lock_interruptible(msm_obj->base.resv,
299 &submit->ticket);
300 if (ret)
301 goto fail;
302 submit->bos[i].flags |= BO_LOCKED;
303 }
304 }
305
306 ww_acquire_done(&submit->ticket);
307
308 return 0;
309
310fail:
311 if (ret == -EALREADY) {
312 DRM_ERROR("handle %u at index %u already on submit list\n",
313 submit->bos[i].handle, i);
314 ret = -EINVAL;
315 }
316
317 for (; i >= 0; i--)
318 submit_unlock_unpin_bo(submit, i);
319
320 if (slow_locked > 0)
321 submit_unlock_unpin_bo(submit, slow_locked);
322
323 if (ret == -EDEADLK) {
324 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
325 /* we lost out in a seqno race, lock and retry.. */
326 ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
327 &submit->ticket);
328 if (!ret) {
329 submit->bos[contended].flags |= BO_LOCKED;
330 slow_locked = contended;
331 goto retry;
332 }
333
334 /* Not expecting -EALREADY here, if the bo was already
335 * locked, we should have gotten -EALREADY already from
336 * the dma_resv_lock_interruptable() call.
337 */
338 WARN_ON_ONCE(ret == -EALREADY);
339 }
340
341 return ret;
342}
343
344static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
345{
346 int i, ret = 0;
347
348 for (i = 0; i < submit->nr_bos; i++) {
349 struct drm_gem_object *obj = &submit->bos[i].obj->base;
350 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
351
352 /* NOTE: _reserve_shared() must happen before
353 * _add_shared_fence(), which makes this a slightly
354 * strange place to call it. OTOH this is a
355 * convenient can-fail point to hook it in.
356 */
357 ret = dma_resv_reserve_fences(obj->resv, 1);
358 if (ret)
359 return ret;
360
361 /* If userspace has determined that explicit fencing is
362 * used, it can disable implicit sync on the entire
363 * submit:
364 */
365 if (no_implicit)
366 continue;
367
368 /* Otherwise userspace can ask for implicit sync to be
369 * disabled on specific buffers. This is useful for internal
370 * usermode driver managed buffers, suballocation, etc.
371 */
372 if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
373 continue;
374
375 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
376 obj,
377 write);
378 if (ret)
379 break;
380 }
381
382 return ret;
383}
384
385static int submit_pin_objects(struct msm_gem_submit *submit)
386{
387 int i, ret = 0;
388
389 submit->valid = true;
390
391 for (i = 0; i < submit->nr_bos; i++) {
392 struct drm_gem_object *obj = &submit->bos[i].obj->base;
393 struct msm_gem_vma *vma;
394
395 /* if locking succeeded, pin bo: */
396 vma = msm_gem_get_vma_locked(obj, submit->aspace);
397 if (IS_ERR(vma)) {
398 ret = PTR_ERR(vma);
399 break;
400 }
401
402 ret = msm_gem_pin_vma_locked(obj, vma);
403 if (ret)
404 break;
405
406 submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
407 submit->bos[i].vma = vma;
408
409 if (vma->iova == submit->bos[i].iova) {
410 submit->bos[i].flags |= BO_VALID;
411 } else {
412 submit->bos[i].iova = vma->iova;
413 /* iova changed, so address in cmdstream is not valid: */
414 submit->bos[i].flags &= ~BO_VALID;
415 submit->valid = false;
416 }
417 }
418
419 return ret;
420}
421
422static void submit_attach_object_fences(struct msm_gem_submit *submit)
423{
424 int i;
425
426 for (i = 0; i < submit->nr_bos; i++) {
427 struct drm_gem_object *obj = &submit->bos[i].obj->base;
428
429 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
430 dma_resv_add_fence(obj->resv, submit->user_fence,
431 DMA_RESV_USAGE_WRITE);
432 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
433 dma_resv_add_fence(obj->resv, submit->user_fence,
434 DMA_RESV_USAGE_READ);
435 }
436}
437
438static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
439 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
440{
441 if (idx >= submit->nr_bos) {
442 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
443 idx, submit->nr_bos);
444 return -EINVAL;
445 }
446
447 if (obj)
448 *obj = submit->bos[idx].obj;
449 if (iova)
450 *iova = submit->bos[idx].iova;
451 if (valid)
452 *valid = !!(submit->bos[idx].flags & BO_VALID);
453
454 return 0;
455}
456
457/* process the reloc's and patch up the cmdstream as needed: */
458static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
459 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
460{
461 uint32_t i, last_offset = 0;
462 uint32_t *ptr;
463 int ret = 0;
464
465 if (!nr_relocs)
466 return 0;
467
468 if (offset % 4) {
469 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
470 return -EINVAL;
471 }
472
473 /* For now, just map the entire thing. Eventually we probably
474 * to do it page-by-page, w/ kmap() if not vmap()d..
475 */
476 ptr = msm_gem_get_vaddr_locked(&obj->base);
477
478 if (IS_ERR(ptr)) {
479 ret = PTR_ERR(ptr);
480 DBG("failed to map: %d", ret);
481 return ret;
482 }
483
484 for (i = 0; i < nr_relocs; i++) {
485 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
486 uint32_t off;
487 uint64_t iova;
488 bool valid;
489
490 if (submit_reloc.submit_offset % 4) {
491 DRM_ERROR("non-aligned reloc offset: %u\n",
492 submit_reloc.submit_offset);
493 ret = -EINVAL;
494 goto out;
495 }
496
497 /* offset in dwords: */
498 off = submit_reloc.submit_offset / 4;
499
500 if ((off >= (obj->base.size / 4)) ||
501 (off < last_offset)) {
502 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
503 ret = -EINVAL;
504 goto out;
505 }
506
507 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
508 if (ret)
509 goto out;
510
511 if (valid)
512 continue;
513
514 iova += submit_reloc.reloc_offset;
515
516 if (submit_reloc.shift < 0)
517 iova >>= -submit_reloc.shift;
518 else
519 iova <<= submit_reloc.shift;
520
521 ptr[off] = iova | submit_reloc.or;
522
523 last_offset = off;
524 }
525
526out:
527 msm_gem_put_vaddr_locked(&obj->base);
528
529 return ret;
530}
531
532/* Cleanup submit at end of ioctl. In the error case, this also drops
533 * references, unpins, and drops active refcnt. In the non-error case,
534 * this is done when the submit is retired.
535 */
536static void submit_cleanup(struct msm_gem_submit *submit, bool error)
537{
538 unsigned cleanup_flags = BO_LOCKED;
539 unsigned i;
540
541 if (error)
542 cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED;
543
544 for (i = 0; i < submit->nr_bos; i++) {
545 struct msm_gem_object *msm_obj = submit->bos[i].obj;
546 submit_cleanup_bo(submit, i, cleanup_flags);
547 if (error)
548 drm_gem_object_put(&msm_obj->base);
549 }
550}
551
552void msm_submit_retire(struct msm_gem_submit *submit)
553{
554 int i;
555
556 for (i = 0; i < submit->nr_bos; i++) {
557 struct drm_gem_object *obj = &submit->bos[i].obj->base;
558
559 drm_gem_object_put(obj);
560 }
561}
562
563struct msm_submit_post_dep {
564 struct drm_syncobj *syncobj;
565 uint64_t point;
566 struct dma_fence_chain *chain;
567};
568
569static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
570 struct drm_file *file,
571 uint64_t in_syncobjs_addr,
572 uint32_t nr_in_syncobjs,
573 size_t syncobj_stride)
574{
575 struct drm_syncobj **syncobjs = NULL;
576 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
577 int ret = 0;
578 uint32_t i, j;
579
580 syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
581 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
582 if (!syncobjs)
583 return ERR_PTR(-ENOMEM);
584
585 for (i = 0; i < nr_in_syncobjs; ++i) {
586 uint64_t address = in_syncobjs_addr + i * syncobj_stride;
587
588 if (copy_from_user(&syncobj_desc,
589 u64_to_user_ptr(address),
590 min(syncobj_stride, sizeof(syncobj_desc)))) {
591 ret = -EFAULT;
592 break;
593 }
594
595 if (syncobj_desc.point &&
596 !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
597 ret = -EOPNOTSUPP;
598 break;
599 }
600
601 if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
602 ret = -EINVAL;
603 break;
604 }
605
606 ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
607 syncobj_desc.handle, syncobj_desc.point);
608 if (ret)
609 break;
610
611 if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
612 syncobjs[i] =
613 drm_syncobj_find(file, syncobj_desc.handle);
614 if (!syncobjs[i]) {
615 ret = -EINVAL;
616 break;
617 }
618 }
619 }
620
621 if (ret) {
622 for (j = 0; j <= i; ++j) {
623 if (syncobjs[j])
624 drm_syncobj_put(syncobjs[j]);
625 }
626 kfree(syncobjs);
627 return ERR_PTR(ret);
628 }
629 return syncobjs;
630}
631
632static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
633 uint32_t nr_syncobjs)
634{
635 uint32_t i;
636
637 for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
638 if (syncobjs[i])
639 drm_syncobj_replace_fence(syncobjs[i], NULL);
640 }
641}
642
643static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
644 struct drm_file *file,
645 uint64_t syncobjs_addr,
646 uint32_t nr_syncobjs,
647 size_t syncobj_stride)
648{
649 struct msm_submit_post_dep *post_deps;
650 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
651 int ret = 0;
652 uint32_t i, j;
653
654 post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
655 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
656 if (!post_deps)
657 return ERR_PTR(-ENOMEM);
658
659 for (i = 0; i < nr_syncobjs; ++i) {
660 uint64_t address = syncobjs_addr + i * syncobj_stride;
661
662 if (copy_from_user(&syncobj_desc,
663 u64_to_user_ptr(address),
664 min(syncobj_stride, sizeof(syncobj_desc)))) {
665 ret = -EFAULT;
666 break;
667 }
668
669 post_deps[i].point = syncobj_desc.point;
670
671 if (syncobj_desc.flags) {
672 ret = -EINVAL;
673 break;
674 }
675
676 if (syncobj_desc.point) {
677 if (!drm_core_check_feature(dev,
678 DRIVER_SYNCOBJ_TIMELINE)) {
679 ret = -EOPNOTSUPP;
680 break;
681 }
682
683 post_deps[i].chain = dma_fence_chain_alloc();
684 if (!post_deps[i].chain) {
685 ret = -ENOMEM;
686 break;
687 }
688 }
689
690 post_deps[i].syncobj =
691 drm_syncobj_find(file, syncobj_desc.handle);
692 if (!post_deps[i].syncobj) {
693 ret = -EINVAL;
694 break;
695 }
696 }
697
698 if (ret) {
699 for (j = 0; j <= i; ++j) {
700 dma_fence_chain_free(post_deps[j].chain);
701 if (post_deps[j].syncobj)
702 drm_syncobj_put(post_deps[j].syncobj);
703 }
704
705 kfree(post_deps);
706 return ERR_PTR(ret);
707 }
708
709 return post_deps;
710}
711
712static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
713 uint32_t count, struct dma_fence *fence)
714{
715 uint32_t i;
716
717 for (i = 0; post_deps && i < count; ++i) {
718 if (post_deps[i].chain) {
719 drm_syncobj_add_point(post_deps[i].syncobj,
720 post_deps[i].chain,
721 fence, post_deps[i].point);
722 post_deps[i].chain = NULL;
723 } else {
724 drm_syncobj_replace_fence(post_deps[i].syncobj,
725 fence);
726 }
727 }
728}
729
730int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
731 struct drm_file *file)
732{
733 struct msm_drm_private *priv = dev->dev_private;
734 struct drm_msm_gem_submit *args = data;
735 struct msm_file_private *ctx = file->driver_priv;
736 struct msm_gem_submit *submit = NULL;
737 struct msm_gpu *gpu = priv->gpu;
738 struct msm_gpu_submitqueue *queue;
739 struct msm_ringbuffer *ring;
740 struct msm_submit_post_dep *post_deps = NULL;
741 struct drm_syncobj **syncobjs_to_reset = NULL;
742 int out_fence_fd = -1;
743 bool has_ww_ticket = false;
744 unsigned i;
745 int ret;
746
747 if (!gpu)
748 return -ENXIO;
749
750 if (args->pad)
751 return -EINVAL;
752
753 if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
754 DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
755 return -EPERM;
756 }
757
758 /* for now, we just have 3d pipe.. eventually this would need to
759 * be more clever to dispatch to appropriate gpu module:
760 */
761 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
762 return -EINVAL;
763
764 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
765 return -EINVAL;
766
767 if (args->flags & MSM_SUBMIT_SUDO) {
768 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
769 !capable(CAP_SYS_RAWIO))
770 return -EINVAL;
771 }
772
773 queue = msm_submitqueue_get(ctx, args->queueid);
774 if (!queue)
775 return -ENOENT;
776
777 ring = gpu->rb[queue->ring_nr];
778
779 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
780 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
781 if (out_fence_fd < 0) {
782 ret = out_fence_fd;
783 goto out_post_unlock;
784 }
785 }
786
787 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
788 if (IS_ERR(submit)) {
789 ret = PTR_ERR(submit);
790 goto out_post_unlock;
791 }
792
793 trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
794 args->nr_bos, args->nr_cmds);
795
796 ret = mutex_lock_interruptible(&queue->lock);
797 if (ret)
798 goto out_post_unlock;
799
800 if (args->flags & MSM_SUBMIT_SUDO)
801 submit->in_rb = true;
802
803 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
804 struct dma_fence *in_fence;
805
806 in_fence = sync_file_get_fence(args->fence_fd);
807
808 if (!in_fence) {
809 ret = -EINVAL;
810 goto out_unlock;
811 }
812
813 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
814 if (ret)
815 goto out_unlock;
816 }
817
818 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
819 syncobjs_to_reset = msm_parse_deps(submit, file,
820 args->in_syncobjs,
821 args->nr_in_syncobjs,
822 args->syncobj_stride);
823 if (IS_ERR(syncobjs_to_reset)) {
824 ret = PTR_ERR(syncobjs_to_reset);
825 goto out_unlock;
826 }
827 }
828
829 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
830 post_deps = msm_parse_post_deps(dev, file,
831 args->out_syncobjs,
832 args->nr_out_syncobjs,
833 args->syncobj_stride);
834 if (IS_ERR(post_deps)) {
835 ret = PTR_ERR(post_deps);
836 goto out_unlock;
837 }
838 }
839
840 ret = submit_lookup_objects(submit, args, file);
841 if (ret)
842 goto out;
843
844 ret = submit_lookup_cmds(submit, args, file);
845 if (ret)
846 goto out;
847
848 /* copy_*_user while holding a ww ticket upsets lockdep */
849 ww_acquire_init(&submit->ticket, &reservation_ww_class);
850 has_ww_ticket = true;
851 ret = submit_lock_objects(submit);
852 if (ret)
853 goto out;
854
855 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
856 if (ret)
857 goto out;
858
859 ret = submit_pin_objects(submit);
860 if (ret)
861 goto out;
862
863 for (i = 0; i < args->nr_cmds; i++) {
864 struct msm_gem_object *msm_obj;
865 uint64_t iova;
866
867 ret = submit_bo(submit, submit->cmd[i].idx,
868 &msm_obj, &iova, NULL);
869 if (ret)
870 goto out;
871
872 if (!submit->cmd[i].size ||
873 ((submit->cmd[i].size + submit->cmd[i].offset) >
874 msm_obj->base.size / 4)) {
875 DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
876 ret = -EINVAL;
877 goto out;
878 }
879
880 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
881
882 if (submit->valid)
883 continue;
884
885 ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
886 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
887 if (ret)
888 goto out;
889 }
890
891 submit->nr_cmds = i;
892
893 idr_preload(GFP_KERNEL);
894
895 spin_lock(&queue->idr_lock);
896
897 /*
898 * If using userspace provided seqno fence, validate that the id
899 * is available before arming sched job. Since access to fence_idr
900 * is serialized on the queue lock, the slot should be still avail
901 * after the job is armed
902 */
903 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
904 (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
905 spin_unlock(&queue->idr_lock);
906 idr_preload_end();
907 ret = -EINVAL;
908 goto out;
909 }
910
911 drm_sched_job_arm(&submit->base);
912
913 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
914
915 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
916 /*
917 * Userspace has assigned the seqno fence that it wants
918 * us to use. It is an error to pick a fence sequence
919 * number that is not available.
920 */
921 submit->fence_id = args->fence;
922 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
923 &submit->fence_id, submit->fence_id,
924 GFP_NOWAIT);
925 /*
926 * We've already validated that the fence_id slot is valid,
927 * so if idr_alloc_u32 failed, it is a kernel bug
928 */
929 WARN_ON(ret);
930 } else {
931 /*
932 * Allocate an id which can be used by WAIT_FENCE ioctl to map
933 * back to the underlying fence.
934 */
935 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
936 submit->user_fence, 1,
937 INT_MAX, GFP_NOWAIT);
938 }
939
940 spin_unlock(&queue->idr_lock);
941 idr_preload_end();
942
943 if (submit->fence_id < 0) {
944 ret = submit->fence_id;
945 submit->fence_id = 0;
946 }
947
948 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
949 struct sync_file *sync_file = sync_file_create(submit->user_fence);
950 if (!sync_file) {
951 ret = -ENOMEM;
952 } else {
953 fd_install(out_fence_fd, sync_file->file);
954 args->fence_fd = out_fence_fd;
955 }
956 }
957
958 submit_attach_object_fences(submit);
959
960 /* The scheduler owns a ref now: */
961 msm_gem_submit_get(submit);
962
963 msm_rd_dump_submit(priv->rd, submit, NULL);
964
965 drm_sched_entity_push_job(&submit->base);
966
967 args->fence = submit->fence_id;
968 queue->last_fence = submit->fence_id;
969
970 msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
971 msm_process_post_deps(post_deps, args->nr_out_syncobjs,
972 submit->user_fence);
973
974
975out:
976 submit_cleanup(submit, !!ret);
977 if (has_ww_ticket)
978 ww_acquire_fini(&submit->ticket);
979out_unlock:
980 mutex_unlock(&queue->lock);
981out_post_unlock:
982 if (ret && (out_fence_fd >= 0))
983 put_unused_fd(out_fence_fd);
984
985 if (!IS_ERR_OR_NULL(submit)) {
986 msm_gem_submit_put(submit);
987 } else {
988 /*
989 * If the submit hasn't yet taken ownership of the queue
990 * then we need to drop the reference ourself:
991 */
992 msm_submitqueue_put(queue);
993 }
994 if (!IS_ERR_OR_NULL(post_deps)) {
995 for (i = 0; i < args->nr_out_syncobjs; ++i) {
996 kfree(post_deps[i].chain);
997 drm_syncobj_put(post_deps[i].syncobj);
998 }
999 kfree(post_deps);
1000 }
1001
1002 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
1003 for (i = 0; i < args->nr_in_syncobjs; ++i) {
1004 if (syncobjs_to_reset[i])
1005 drm_syncobj_put(syncobjs_to_reset[i]);
1006 }
1007 kfree(syncobjs_to_reset);
1008 }
1009
1010 return ret;
1011}