Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27
28#include <linux/file.h>
29#include <linux/pagemap.h>
30#include <linux/sync_file.h>
31#include <linux/dma-buf.h>
32
33#include <drm/amdgpu_drm.h>
34#include <drm/drm_syncobj.h>
35#include <drm/ttm/ttm_tt.h>
36
37#include "amdgpu_cs.h"
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40#include "amdgpu_gmc.h"
41#include "amdgpu_gem.h"
42#include "amdgpu_ras.h"
43
44static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
45 struct amdgpu_device *adev,
46 struct drm_file *filp,
47 union drm_amdgpu_cs *cs)
48{
49 struct amdgpu_fpriv *fpriv = filp->driver_priv;
50
51 if (cs->in.num_chunks == 0)
52 return -EINVAL;
53
54 memset(p, 0, sizeof(*p));
55 p->adev = adev;
56 p->filp = filp;
57
58 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
59 if (!p->ctx)
60 return -EINVAL;
61
62 if (atomic_read(&p->ctx->guilty)) {
63 amdgpu_ctx_put(p->ctx);
64 return -ECANCELED;
65 }
66
67 amdgpu_sync_create(&p->sync);
68 return 0;
69}
70
71static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
72 struct drm_amdgpu_cs_chunk_ib *chunk_ib)
73{
74 struct drm_sched_entity *entity;
75 unsigned int i;
76 int r;
77
78 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
79 chunk_ib->ip_instance,
80 chunk_ib->ring, &entity);
81 if (r)
82 return r;
83
84 /*
85 * Abort if there is no run queue associated with this entity.
86 * Possibly because of disabled HW IP.
87 */
88 if (entity->rq == NULL)
89 return -EINVAL;
90
91 /* Check if we can add this IB to some existing job */
92 for (i = 0; i < p->gang_size; ++i)
93 if (p->entities[i] == entity)
94 return i;
95
96 /* If not increase the gang size if possible */
97 if (i == AMDGPU_CS_GANG_SIZE)
98 return -EINVAL;
99
100 p->entities[i] = entity;
101 p->gang_size = i + 1;
102 return i;
103}
104
105static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
106 struct drm_amdgpu_cs_chunk_ib *chunk_ib,
107 unsigned int *num_ibs)
108{
109 int r;
110
111 r = amdgpu_cs_job_idx(p, chunk_ib);
112 if (r < 0)
113 return r;
114
115 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
116 return -EINVAL;
117
118 ++(num_ibs[r]);
119 p->gang_leader_idx = r;
120 return 0;
121}
122
123static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
124 struct drm_amdgpu_cs_chunk_fence *data,
125 uint32_t *offset)
126{
127 struct drm_gem_object *gobj;
128 struct amdgpu_bo *bo;
129 unsigned long size;
130 int r;
131
132 gobj = drm_gem_object_lookup(p->filp, data->handle);
133 if (gobj == NULL)
134 return -EINVAL;
135
136 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
137 p->uf_entry.priority = 0;
138 p->uf_entry.tv.bo = &bo->tbo;
139 drm_gem_object_put(gobj);
140
141 size = amdgpu_bo_size(bo);
142 if (size != PAGE_SIZE || (data->offset + 8) > size) {
143 r = -EINVAL;
144 goto error_unref;
145 }
146
147 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
148 r = -EINVAL;
149 goto error_unref;
150 }
151
152 *offset = data->offset;
153
154 return 0;
155
156error_unref:
157 amdgpu_bo_unref(&bo);
158 return r;
159}
160
161static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
162 struct drm_amdgpu_bo_list_in *data)
163{
164 struct drm_amdgpu_bo_list_entry *info;
165 int r;
166
167 r = amdgpu_bo_create_list_entry_array(data, &info);
168 if (r)
169 return r;
170
171 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
172 &p->bo_list);
173 if (r)
174 goto error_free;
175
176 kvfree(info);
177 return 0;
178
179error_free:
180 kvfree(info);
181
182 return r;
183}
184
185/* Copy the data from userspace and go over it the first time */
186static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
187 union drm_amdgpu_cs *cs)
188{
189 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
190 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
191 struct amdgpu_vm *vm = &fpriv->vm;
192 uint64_t *chunk_array_user;
193 uint64_t *chunk_array;
194 uint32_t uf_offset = 0;
195 size_t size;
196 int ret;
197 int i;
198
199 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
200 GFP_KERNEL);
201 if (!chunk_array)
202 return -ENOMEM;
203
204 /* get chunks */
205 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
206 if (copy_from_user(chunk_array, chunk_array_user,
207 sizeof(uint64_t)*cs->in.num_chunks)) {
208 ret = -EFAULT;
209 goto free_chunk;
210 }
211
212 p->nchunks = cs->in.num_chunks;
213 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
214 GFP_KERNEL);
215 if (!p->chunks) {
216 ret = -ENOMEM;
217 goto free_chunk;
218 }
219
220 for (i = 0; i < p->nchunks; i++) {
221 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
222 struct drm_amdgpu_cs_chunk user_chunk;
223 uint32_t __user *cdata;
224
225 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
226 if (copy_from_user(&user_chunk, chunk_ptr,
227 sizeof(struct drm_amdgpu_cs_chunk))) {
228 ret = -EFAULT;
229 i--;
230 goto free_partial_kdata;
231 }
232 p->chunks[i].chunk_id = user_chunk.chunk_id;
233 p->chunks[i].length_dw = user_chunk.length_dw;
234
235 size = p->chunks[i].length_dw;
236 cdata = u64_to_user_ptr(user_chunk.chunk_data);
237
238 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
239 GFP_KERNEL);
240 if (p->chunks[i].kdata == NULL) {
241 ret = -ENOMEM;
242 i--;
243 goto free_partial_kdata;
244 }
245 size *= sizeof(uint32_t);
246 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
247 ret = -EFAULT;
248 goto free_partial_kdata;
249 }
250
251 /* Assume the worst on the following checks */
252 ret = -EINVAL;
253 switch (p->chunks[i].chunk_id) {
254 case AMDGPU_CHUNK_ID_IB:
255 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
256 goto free_partial_kdata;
257
258 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
259 if (ret)
260 goto free_partial_kdata;
261 break;
262
263 case AMDGPU_CHUNK_ID_FENCE:
264 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
265 goto free_partial_kdata;
266
267 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
268 &uf_offset);
269 if (ret)
270 goto free_partial_kdata;
271 break;
272
273 case AMDGPU_CHUNK_ID_BO_HANDLES:
274 if (size < sizeof(struct drm_amdgpu_bo_list_in))
275 goto free_partial_kdata;
276
277 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
278 if (ret)
279 goto free_partial_kdata;
280 break;
281
282 case AMDGPU_CHUNK_ID_DEPENDENCIES:
283 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
284 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
285 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
286 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
287 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
288 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
289 break;
290
291 default:
292 goto free_partial_kdata;
293 }
294 }
295
296 if (!p->gang_size) {
297 ret = -EINVAL;
298 goto free_all_kdata;
299 }
300
301 for (i = 0; i < p->gang_size; ++i) {
302 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
303 num_ibs[i], &p->jobs[i]);
304 if (ret)
305 goto free_all_kdata;
306 }
307 p->gang_leader = p->jobs[p->gang_leader_idx];
308
309 if (p->ctx->generation != p->gang_leader->generation) {
310 ret = -ECANCELED;
311 goto free_all_kdata;
312 }
313
314 if (p->uf_entry.tv.bo)
315 p->gang_leader->uf_addr = uf_offset;
316 kvfree(chunk_array);
317
318 /* Use this opportunity to fill in task info for the vm */
319 amdgpu_vm_set_task_info(vm);
320
321 return 0;
322
323free_all_kdata:
324 i = p->nchunks - 1;
325free_partial_kdata:
326 for (; i >= 0; i--)
327 kvfree(p->chunks[i].kdata);
328 kvfree(p->chunks);
329 p->chunks = NULL;
330 p->nchunks = 0;
331free_chunk:
332 kvfree(chunk_array);
333
334 return ret;
335}
336
337static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
338 struct amdgpu_cs_chunk *chunk,
339 unsigned int *ce_preempt,
340 unsigned int *de_preempt)
341{
342 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
343 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
344 struct amdgpu_vm *vm = &fpriv->vm;
345 struct amdgpu_ring *ring;
346 struct amdgpu_job *job;
347 struct amdgpu_ib *ib;
348 int r;
349
350 r = amdgpu_cs_job_idx(p, chunk_ib);
351 if (r < 0)
352 return r;
353
354 job = p->jobs[r];
355 ring = amdgpu_job_ring(job);
356 ib = &job->ibs[job->num_ibs++];
357
358 /* MM engine doesn't support user fences */
359 if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
360 return -EINVAL;
361
362 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
363 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
364 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
365 (*ce_preempt)++;
366 else
367 (*de_preempt)++;
368
369 /* Each GFX command submit allows only 1 IB max
370 * preemptible for CE & DE */
371 if (*ce_preempt > 1 || *de_preempt > 1)
372 return -EINVAL;
373 }
374
375 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
376 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
377
378 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
379 chunk_ib->ib_bytes : 0,
380 AMDGPU_IB_POOL_DELAYED, ib);
381 if (r) {
382 DRM_ERROR("Failed to get ib !\n");
383 return r;
384 }
385
386 ib->gpu_addr = chunk_ib->va_start;
387 ib->length_dw = chunk_ib->ib_bytes / 4;
388 ib->flags = chunk_ib->flags;
389 return 0;
390}
391
392static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
393 struct amdgpu_cs_chunk *chunk)
394{
395 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
396 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
397 unsigned int num_deps;
398 int i, r;
399
400 num_deps = chunk->length_dw * 4 /
401 sizeof(struct drm_amdgpu_cs_chunk_dep);
402
403 for (i = 0; i < num_deps; ++i) {
404 struct amdgpu_ctx *ctx;
405 struct drm_sched_entity *entity;
406 struct dma_fence *fence;
407
408 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
409 if (ctx == NULL)
410 return -EINVAL;
411
412 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
413 deps[i].ip_instance,
414 deps[i].ring, &entity);
415 if (r) {
416 amdgpu_ctx_put(ctx);
417 return r;
418 }
419
420 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
421 amdgpu_ctx_put(ctx);
422
423 if (IS_ERR(fence))
424 return PTR_ERR(fence);
425 else if (!fence)
426 continue;
427
428 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
429 struct drm_sched_fence *s_fence;
430 struct dma_fence *old = fence;
431
432 s_fence = to_drm_sched_fence(fence);
433 fence = dma_fence_get(&s_fence->scheduled);
434 dma_fence_put(old);
435 }
436
437 r = amdgpu_sync_fence(&p->sync, fence);
438 dma_fence_put(fence);
439 if (r)
440 return r;
441 }
442 return 0;
443}
444
445static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
446 uint32_t handle, u64 point,
447 u64 flags)
448{
449 struct dma_fence *fence;
450 int r;
451
452 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
453 if (r) {
454 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
455 handle, point, r);
456 return r;
457 }
458
459 r = amdgpu_sync_fence(&p->sync, fence);
460 dma_fence_put(fence);
461 return r;
462}
463
464static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
465 struct amdgpu_cs_chunk *chunk)
466{
467 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
468 unsigned int num_deps;
469 int i, r;
470
471 num_deps = chunk->length_dw * 4 /
472 sizeof(struct drm_amdgpu_cs_chunk_sem);
473 for (i = 0; i < num_deps; ++i) {
474 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
475 if (r)
476 return r;
477 }
478
479 return 0;
480}
481
482static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
483 struct amdgpu_cs_chunk *chunk)
484{
485 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
486 unsigned int num_deps;
487 int i, r;
488
489 num_deps = chunk->length_dw * 4 /
490 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
491 for (i = 0; i < num_deps; ++i) {
492 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
493 syncobj_deps[i].point,
494 syncobj_deps[i].flags);
495 if (r)
496 return r;
497 }
498
499 return 0;
500}
501
502static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
503 struct amdgpu_cs_chunk *chunk)
504{
505 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
506 unsigned int num_deps;
507 int i;
508
509 num_deps = chunk->length_dw * 4 /
510 sizeof(struct drm_amdgpu_cs_chunk_sem);
511
512 if (p->post_deps)
513 return -EINVAL;
514
515 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
516 GFP_KERNEL);
517 p->num_post_deps = 0;
518
519 if (!p->post_deps)
520 return -ENOMEM;
521
522
523 for (i = 0; i < num_deps; ++i) {
524 p->post_deps[i].syncobj =
525 drm_syncobj_find(p->filp, deps[i].handle);
526 if (!p->post_deps[i].syncobj)
527 return -EINVAL;
528 p->post_deps[i].chain = NULL;
529 p->post_deps[i].point = 0;
530 p->num_post_deps++;
531 }
532
533 return 0;
534}
535
536static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
537 struct amdgpu_cs_chunk *chunk)
538{
539 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
540 unsigned int num_deps;
541 int i;
542
543 num_deps = chunk->length_dw * 4 /
544 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
545
546 if (p->post_deps)
547 return -EINVAL;
548
549 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
550 GFP_KERNEL);
551 p->num_post_deps = 0;
552
553 if (!p->post_deps)
554 return -ENOMEM;
555
556 for (i = 0; i < num_deps; ++i) {
557 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
558
559 dep->chain = NULL;
560 if (syncobj_deps[i].point) {
561 dep->chain = dma_fence_chain_alloc();
562 if (!dep->chain)
563 return -ENOMEM;
564 }
565
566 dep->syncobj = drm_syncobj_find(p->filp,
567 syncobj_deps[i].handle);
568 if (!dep->syncobj) {
569 dma_fence_chain_free(dep->chain);
570 return -EINVAL;
571 }
572 dep->point = syncobj_deps[i].point;
573 p->num_post_deps++;
574 }
575
576 return 0;
577}
578
579static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
580 struct amdgpu_cs_chunk *chunk)
581{
582 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
583 int i;
584
585 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
586 return -EINVAL;
587
588 for (i = 0; i < p->gang_size; ++i) {
589 p->jobs[i]->shadow_va = shadow->shadow_va;
590 p->jobs[i]->csa_va = shadow->csa_va;
591 p->jobs[i]->gds_va = shadow->gds_va;
592 p->jobs[i]->init_shadow =
593 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
594 }
595
596 return 0;
597}
598
599static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
600{
601 unsigned int ce_preempt = 0, de_preempt = 0;
602 int i, r;
603
604 for (i = 0; i < p->nchunks; ++i) {
605 struct amdgpu_cs_chunk *chunk;
606
607 chunk = &p->chunks[i];
608
609 switch (chunk->chunk_id) {
610 case AMDGPU_CHUNK_ID_IB:
611 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
612 if (r)
613 return r;
614 break;
615 case AMDGPU_CHUNK_ID_DEPENDENCIES:
616 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
617 r = amdgpu_cs_p2_dependencies(p, chunk);
618 if (r)
619 return r;
620 break;
621 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
622 r = amdgpu_cs_p2_syncobj_in(p, chunk);
623 if (r)
624 return r;
625 break;
626 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
627 r = amdgpu_cs_p2_syncobj_out(p, chunk);
628 if (r)
629 return r;
630 break;
631 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
632 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
633 if (r)
634 return r;
635 break;
636 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
637 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
638 if (r)
639 return r;
640 break;
641 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
642 r = amdgpu_cs_p2_shadow(p, chunk);
643 if (r)
644 return r;
645 break;
646 }
647 }
648
649 return 0;
650}
651
652/* Convert microseconds to bytes. */
653static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
654{
655 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
656 return 0;
657
658 /* Since accum_us is incremented by a million per second, just
659 * multiply it by the number of MB/s to get the number of bytes.
660 */
661 return us << adev->mm_stats.log2_max_MBps;
662}
663
664static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
665{
666 if (!adev->mm_stats.log2_max_MBps)
667 return 0;
668
669 return bytes >> adev->mm_stats.log2_max_MBps;
670}
671
672/* Returns how many bytes TTM can move right now. If no bytes can be moved,
673 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
674 * which means it can go over the threshold once. If that happens, the driver
675 * will be in debt and no other buffer migrations can be done until that debt
676 * is repaid.
677 *
678 * This approach allows moving a buffer of any size (it's important to allow
679 * that).
680 *
681 * The currency is simply time in microseconds and it increases as the clock
682 * ticks. The accumulated microseconds (us) are converted to bytes and
683 * returned.
684 */
685static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
686 u64 *max_bytes,
687 u64 *max_vis_bytes)
688{
689 s64 time_us, increment_us;
690 u64 free_vram, total_vram, used_vram;
691 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
692 * throttling.
693 *
694 * It means that in order to get full max MBps, at least 5 IBs per
695 * second must be submitted and not more than 200ms apart from each
696 * other.
697 */
698 const s64 us_upper_bound = 200000;
699
700 if (!adev->mm_stats.log2_max_MBps) {
701 *max_bytes = 0;
702 *max_vis_bytes = 0;
703 return;
704 }
705
706 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
707 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
708 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
709
710 spin_lock(&adev->mm_stats.lock);
711
712 /* Increase the amount of accumulated us. */
713 time_us = ktime_to_us(ktime_get());
714 increment_us = time_us - adev->mm_stats.last_update_us;
715 adev->mm_stats.last_update_us = time_us;
716 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
717 us_upper_bound);
718
719 /* This prevents the short period of low performance when the VRAM
720 * usage is low and the driver is in debt or doesn't have enough
721 * accumulated us to fill VRAM quickly.
722 *
723 * The situation can occur in these cases:
724 * - a lot of VRAM is freed by userspace
725 * - the presence of a big buffer causes a lot of evictions
726 * (solution: split buffers into smaller ones)
727 *
728 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
729 * accum_us to a positive number.
730 */
731 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
732 s64 min_us;
733
734 /* Be more aggressive on dGPUs. Try to fill a portion of free
735 * VRAM now.
736 */
737 if (!(adev->flags & AMD_IS_APU))
738 min_us = bytes_to_us(adev, free_vram / 4);
739 else
740 min_us = 0; /* Reset accum_us on APUs. */
741
742 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
743 }
744
745 /* This is set to 0 if the driver is in debt to disallow (optional)
746 * buffer moves.
747 */
748 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
749
750 /* Do the same for visible VRAM if half of it is free */
751 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
752 u64 total_vis_vram = adev->gmc.visible_vram_size;
753 u64 used_vis_vram =
754 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
755
756 if (used_vis_vram < total_vis_vram) {
757 u64 free_vis_vram = total_vis_vram - used_vis_vram;
758
759 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
760 increment_us, us_upper_bound);
761
762 if (free_vis_vram >= total_vis_vram / 2)
763 adev->mm_stats.accum_us_vis =
764 max(bytes_to_us(adev, free_vis_vram / 2),
765 adev->mm_stats.accum_us_vis);
766 }
767
768 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
769 } else {
770 *max_vis_bytes = 0;
771 }
772
773 spin_unlock(&adev->mm_stats.lock);
774}
775
776/* Report how many bytes have really been moved for the last command
777 * submission. This can result in a debt that can stop buffer migrations
778 * temporarily.
779 */
780void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
781 u64 num_vis_bytes)
782{
783 spin_lock(&adev->mm_stats.lock);
784 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
785 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
786 spin_unlock(&adev->mm_stats.lock);
787}
788
789static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
790{
791 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
792 struct amdgpu_cs_parser *p = param;
793 struct ttm_operation_ctx ctx = {
794 .interruptible = true,
795 .no_wait_gpu = false,
796 .resv = bo->tbo.base.resv
797 };
798 uint32_t domain;
799 int r;
800
801 if (bo->tbo.pin_count)
802 return 0;
803
804 /* Don't move this buffer if we have depleted our allowance
805 * to move it. Don't move anything if the threshold is zero.
806 */
807 if (p->bytes_moved < p->bytes_moved_threshold &&
808 (!bo->tbo.base.dma_buf ||
809 list_empty(&bo->tbo.base.dma_buf->attachments))) {
810 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
811 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
812 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
813 * visible VRAM if we've depleted our allowance to do
814 * that.
815 */
816 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
817 domain = bo->preferred_domains;
818 else
819 domain = bo->allowed_domains;
820 } else {
821 domain = bo->preferred_domains;
822 }
823 } else {
824 domain = bo->allowed_domains;
825 }
826
827retry:
828 amdgpu_bo_placement_from_domain(bo, domain);
829 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
830
831 p->bytes_moved += ctx.bytes_moved;
832 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
833 amdgpu_bo_in_cpu_visible_vram(bo))
834 p->bytes_moved_vis += ctx.bytes_moved;
835
836 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
837 domain = bo->allowed_domains;
838 goto retry;
839 }
840
841 return r;
842}
843
844static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
845 struct list_head *validated)
846{
847 struct ttm_operation_ctx ctx = { true, false };
848 struct amdgpu_bo_list_entry *lobj;
849 int r;
850
851 list_for_each_entry(lobj, validated, tv.head) {
852 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
853 struct mm_struct *usermm;
854
855 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
856 if (usermm && usermm != current->mm)
857 return -EPERM;
858
859 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
860 lobj->user_invalidated && lobj->user_pages) {
861 amdgpu_bo_placement_from_domain(bo,
862 AMDGPU_GEM_DOMAIN_CPU);
863 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
864 if (r)
865 return r;
866
867 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
868 lobj->user_pages);
869 }
870
871 r = amdgpu_cs_bo_validate(p, bo);
872 if (r)
873 return r;
874
875 kvfree(lobj->user_pages);
876 lobj->user_pages = NULL;
877 }
878 return 0;
879}
880
881static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
882 union drm_amdgpu_cs *cs)
883{
884 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
885 struct amdgpu_vm *vm = &fpriv->vm;
886 struct amdgpu_bo_list_entry *e;
887 struct list_head duplicates;
888 unsigned int i;
889 int r;
890
891 INIT_LIST_HEAD(&p->validated);
892
893 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
894 if (cs->in.bo_list_handle) {
895 if (p->bo_list)
896 return -EINVAL;
897
898 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
899 &p->bo_list);
900 if (r)
901 return r;
902 } else if (!p->bo_list) {
903 /* Create a empty bo_list when no handle is provided */
904 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
905 &p->bo_list);
906 if (r)
907 return r;
908 }
909
910 mutex_lock(&p->bo_list->bo_list_mutex);
911
912 /* One for TTM and one for each CS job */
913 amdgpu_bo_list_for_each_entry(e, p->bo_list)
914 e->tv.num_shared = 1 + p->gang_size;
915 p->uf_entry.tv.num_shared = 1 + p->gang_size;
916
917 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
918
919 INIT_LIST_HEAD(&duplicates);
920 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
921
922 /* Two for VM updates, one for TTM and one for each CS job */
923 p->vm_pd.tv.num_shared = 3 + p->gang_size;
924
925 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
926 list_add(&p->uf_entry.tv.head, &p->validated);
927
928 /* Get userptr backing pages. If pages are updated after registered
929 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
930 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
931 */
932 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
933 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
934 bool userpage_invalidated = false;
935 int i;
936
937 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
938 sizeof(struct page *),
939 GFP_KERNEL | __GFP_ZERO);
940 if (!e->user_pages) {
941 DRM_ERROR("kvmalloc_array failure\n");
942 r = -ENOMEM;
943 goto out_free_user_pages;
944 }
945
946 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
947 if (r) {
948 kvfree(e->user_pages);
949 e->user_pages = NULL;
950 goto out_free_user_pages;
951 }
952
953 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
954 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
955 userpage_invalidated = true;
956 break;
957 }
958 }
959 e->user_invalidated = userpage_invalidated;
960 }
961
962 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
963 &duplicates);
964 if (unlikely(r != 0)) {
965 if (r != -ERESTARTSYS)
966 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
967 goto out_free_user_pages;
968 }
969
970 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
971 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
972
973 e->bo_va = amdgpu_vm_bo_find(vm, bo);
974 }
975
976 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
977 &p->bytes_moved_vis_threshold);
978 p->bytes_moved = 0;
979 p->bytes_moved_vis = 0;
980
981 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
982 amdgpu_cs_bo_validate, p);
983 if (r) {
984 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
985 goto error_validate;
986 }
987
988 r = amdgpu_cs_list_validate(p, &duplicates);
989 if (r)
990 goto error_validate;
991
992 r = amdgpu_cs_list_validate(p, &p->validated);
993 if (r)
994 goto error_validate;
995
996 if (p->uf_entry.tv.bo) {
997 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
998
999 r = amdgpu_ttm_alloc_gart(&uf->tbo);
1000 if (r)
1001 goto error_validate;
1002
1003 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
1004 }
1005
1006 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
1007 p->bytes_moved_vis);
1008
1009 for (i = 0; i < p->gang_size; ++i)
1010 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
1011 p->bo_list->gws_obj,
1012 p->bo_list->oa_obj);
1013 return 0;
1014
1015error_validate:
1016 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
1017
1018out_free_user_pages:
1019 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1020 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1021
1022 if (!e->user_pages)
1023 continue;
1024 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1025 kvfree(e->user_pages);
1026 e->user_pages = NULL;
1027 e->range = NULL;
1028 }
1029 mutex_unlock(&p->bo_list->bo_list_mutex);
1030 return r;
1031}
1032
1033static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1034{
1035 int i, j;
1036
1037 if (!trace_amdgpu_cs_enabled())
1038 return;
1039
1040 for (i = 0; i < p->gang_size; ++i) {
1041 struct amdgpu_job *job = p->jobs[i];
1042
1043 for (j = 0; j < job->num_ibs; ++j)
1044 trace_amdgpu_cs(p, job, &job->ibs[j]);
1045 }
1046}
1047
1048static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1049 struct amdgpu_job *job)
1050{
1051 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1052 unsigned int i;
1053 int r;
1054
1055 /* Only for UVD/VCE VM emulation */
1056 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1057 return 0;
1058
1059 for (i = 0; i < job->num_ibs; ++i) {
1060 struct amdgpu_ib *ib = &job->ibs[i];
1061 struct amdgpu_bo_va_mapping *m;
1062 struct amdgpu_bo *aobj;
1063 uint64_t va_start;
1064 uint8_t *kptr;
1065
1066 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1067 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1068 if (r) {
1069 DRM_ERROR("IB va_start is invalid\n");
1070 return r;
1071 }
1072
1073 if ((va_start + ib->length_dw * 4) >
1074 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1075 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1076 return -EINVAL;
1077 }
1078
1079 /* the IB should be reserved at this point */
1080 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1081 if (r)
1082 return r;
1083
1084 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1085
1086 if (ring->funcs->parse_cs) {
1087 memcpy(ib->ptr, kptr, ib->length_dw * 4);
1088 amdgpu_bo_kunmap(aobj);
1089
1090 r = amdgpu_ring_parse_cs(ring, p, job, ib);
1091 if (r)
1092 return r;
1093 } else {
1094 ib->ptr = (uint32_t *)kptr;
1095 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1096 amdgpu_bo_kunmap(aobj);
1097 if (r)
1098 return r;
1099 }
1100 }
1101
1102 return 0;
1103}
1104
1105static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1106{
1107 unsigned int i;
1108 int r;
1109
1110 for (i = 0; i < p->gang_size; ++i) {
1111 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1112 if (r)
1113 return r;
1114 }
1115 return 0;
1116}
1117
1118static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1119{
1120 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1121 struct amdgpu_job *job = p->gang_leader;
1122 struct amdgpu_device *adev = p->adev;
1123 struct amdgpu_vm *vm = &fpriv->vm;
1124 struct amdgpu_bo_list_entry *e;
1125 struct amdgpu_bo_va *bo_va;
1126 struct amdgpu_bo *bo;
1127 unsigned int i;
1128 int r;
1129
1130 r = amdgpu_vm_clear_freed(adev, vm, NULL);
1131 if (r)
1132 return r;
1133
1134 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1135 if (r)
1136 return r;
1137
1138 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1139 if (r)
1140 return r;
1141
1142 if (fpriv->csa_va) {
1143 bo_va = fpriv->csa_va;
1144 BUG_ON(!bo_va);
1145 r = amdgpu_vm_bo_update(adev, bo_va, false);
1146 if (r)
1147 return r;
1148
1149 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1150 if (r)
1151 return r;
1152 }
1153
1154 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1155 /* ignore duplicates */
1156 bo = ttm_to_amdgpu_bo(e->tv.bo);
1157 if (!bo)
1158 continue;
1159
1160 bo_va = e->bo_va;
1161 if (bo_va == NULL)
1162 continue;
1163
1164 r = amdgpu_vm_bo_update(adev, bo_va, false);
1165 if (r)
1166 return r;
1167
1168 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1169 if (r)
1170 return r;
1171 }
1172
1173 r = amdgpu_vm_handle_moved(adev, vm);
1174 if (r)
1175 return r;
1176
1177 r = amdgpu_vm_update_pdes(adev, vm, false);
1178 if (r)
1179 return r;
1180
1181 r = amdgpu_sync_fence(&p->sync, vm->last_update);
1182 if (r)
1183 return r;
1184
1185 for (i = 0; i < p->gang_size; ++i) {
1186 job = p->jobs[i];
1187
1188 if (!job->vm)
1189 continue;
1190
1191 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1192 }
1193
1194 if (amdgpu_vm_debug) {
1195 /* Invalidate all BOs to test for userspace bugs */
1196 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1197 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1198
1199 /* ignore duplicates */
1200 if (!bo)
1201 continue;
1202
1203 amdgpu_vm_bo_invalidate(adev, bo, false);
1204 }
1205 }
1206
1207 return 0;
1208}
1209
1210static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1211{
1212 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1213 struct drm_gpu_scheduler *sched;
1214 struct amdgpu_bo_list_entry *e;
1215 struct dma_fence *fence;
1216 unsigned int i;
1217 int r;
1218
1219 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1220 if (r) {
1221 if (r != -ERESTARTSYS)
1222 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1223 return r;
1224 }
1225
1226 list_for_each_entry(e, &p->validated, tv.head) {
1227 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1228 struct dma_resv *resv = bo->tbo.base.resv;
1229 enum amdgpu_sync_mode sync_mode;
1230
1231 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1232 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1233 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1234 &fpriv->vm);
1235 if (r)
1236 return r;
1237 }
1238
1239 for (i = 0; i < p->gang_size; ++i) {
1240 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1241 if (r)
1242 return r;
1243 }
1244
1245 sched = p->gang_leader->base.entity->rq->sched;
1246 while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1247 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1248
1249 /*
1250 * When we have an dependency it might be necessary to insert a
1251 * pipeline sync to make sure that all caches etc are flushed and the
1252 * next job actually sees the results from the previous one
1253 * before we start executing on the same scheduler ring.
1254 */
1255 if (!s_fence || s_fence->sched != sched) {
1256 dma_fence_put(fence);
1257 continue;
1258 }
1259
1260 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1261 dma_fence_put(fence);
1262 if (r)
1263 return r;
1264 }
1265 return 0;
1266}
1267
1268static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1269{
1270 int i;
1271
1272 for (i = 0; i < p->num_post_deps; ++i) {
1273 if (p->post_deps[i].chain && p->post_deps[i].point) {
1274 drm_syncobj_add_point(p->post_deps[i].syncobj,
1275 p->post_deps[i].chain,
1276 p->fence, p->post_deps[i].point);
1277 p->post_deps[i].chain = NULL;
1278 } else {
1279 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1280 p->fence);
1281 }
1282 }
1283}
1284
1285static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1286 union drm_amdgpu_cs *cs)
1287{
1288 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1289 struct amdgpu_job *leader = p->gang_leader;
1290 struct amdgpu_bo_list_entry *e;
1291 unsigned int i;
1292 uint64_t seq;
1293 int r;
1294
1295 for (i = 0; i < p->gang_size; ++i)
1296 drm_sched_job_arm(&p->jobs[i]->base);
1297
1298 for (i = 0; i < p->gang_size; ++i) {
1299 struct dma_fence *fence;
1300
1301 if (p->jobs[i] == leader)
1302 continue;
1303
1304 fence = &p->jobs[i]->base.s_fence->scheduled;
1305 dma_fence_get(fence);
1306 r = drm_sched_job_add_dependency(&leader->base, fence);
1307 if (r) {
1308 dma_fence_put(fence);
1309 return r;
1310 }
1311 }
1312
1313 if (p->gang_size > 1) {
1314 for (i = 0; i < p->gang_size; ++i)
1315 amdgpu_job_set_gang_leader(p->jobs[i], leader);
1316 }
1317
1318 /* No memory allocation is allowed while holding the notifier lock.
1319 * The lock is held until amdgpu_cs_submit is finished and fence is
1320 * added to BOs.
1321 */
1322 mutex_lock(&p->adev->notifier_lock);
1323
1324 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1325 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1326 */
1327 r = 0;
1328 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1329 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1330
1331 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1332 e->range = NULL;
1333 }
1334 if (r) {
1335 r = -EAGAIN;
1336 mutex_unlock(&p->adev->notifier_lock);
1337 return r;
1338 }
1339
1340 p->fence = dma_fence_get(&leader->base.s_fence->finished);
1341 list_for_each_entry(e, &p->validated, tv.head) {
1342
1343 /* Everybody except for the gang leader uses READ */
1344 for (i = 0; i < p->gang_size; ++i) {
1345 if (p->jobs[i] == leader)
1346 continue;
1347
1348 dma_resv_add_fence(e->tv.bo->base.resv,
1349 &p->jobs[i]->base.s_fence->finished,
1350 DMA_RESV_USAGE_READ);
1351 }
1352
1353 /* The gang leader is remembered as writer */
1354 e->tv.num_shared = 0;
1355 }
1356
1357 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1358 p->fence);
1359 amdgpu_cs_post_dependencies(p);
1360
1361 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1362 !p->ctx->preamble_presented) {
1363 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1364 p->ctx->preamble_presented = true;
1365 }
1366
1367 cs->out.handle = seq;
1368 leader->uf_sequence = seq;
1369
1370 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1371 for (i = 0; i < p->gang_size; ++i) {
1372 amdgpu_job_free_resources(p->jobs[i]);
1373 trace_amdgpu_cs_ioctl(p->jobs[i]);
1374 drm_sched_entity_push_job(&p->jobs[i]->base);
1375 p->jobs[i] = NULL;
1376 }
1377
1378 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1379 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1380
1381 mutex_unlock(&p->adev->notifier_lock);
1382 mutex_unlock(&p->bo_list->bo_list_mutex);
1383 return 0;
1384}
1385
1386/* Cleanup the parser structure */
1387static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1388{
1389 unsigned int i;
1390
1391 amdgpu_sync_free(&parser->sync);
1392 for (i = 0; i < parser->num_post_deps; i++) {
1393 drm_syncobj_put(parser->post_deps[i].syncobj);
1394 kfree(parser->post_deps[i].chain);
1395 }
1396 kfree(parser->post_deps);
1397
1398 dma_fence_put(parser->fence);
1399
1400 if (parser->ctx)
1401 amdgpu_ctx_put(parser->ctx);
1402 if (parser->bo_list)
1403 amdgpu_bo_list_put(parser->bo_list);
1404
1405 for (i = 0; i < parser->nchunks; i++)
1406 kvfree(parser->chunks[i].kdata);
1407 kvfree(parser->chunks);
1408 for (i = 0; i < parser->gang_size; ++i) {
1409 if (parser->jobs[i])
1410 amdgpu_job_free(parser->jobs[i]);
1411 }
1412 if (parser->uf_entry.tv.bo) {
1413 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1414
1415 amdgpu_bo_unref(&uf);
1416 }
1417}
1418
1419int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1420{
1421 struct amdgpu_device *adev = drm_to_adev(dev);
1422 struct amdgpu_cs_parser parser;
1423 int r;
1424
1425 if (amdgpu_ras_intr_triggered())
1426 return -EHWPOISON;
1427
1428 if (!adev->accel_working)
1429 return -EBUSY;
1430
1431 r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1432 if (r) {
1433 if (printk_ratelimit())
1434 DRM_ERROR("Failed to initialize parser %d!\n", r);
1435 return r;
1436 }
1437
1438 r = amdgpu_cs_pass1(&parser, data);
1439 if (r)
1440 goto error_fini;
1441
1442 r = amdgpu_cs_pass2(&parser);
1443 if (r)
1444 goto error_fini;
1445
1446 r = amdgpu_cs_parser_bos(&parser, data);
1447 if (r) {
1448 if (r == -ENOMEM)
1449 DRM_ERROR("Not enough memory for command submission!\n");
1450 else if (r != -ERESTARTSYS && r != -EAGAIN)
1451 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1452 goto error_fini;
1453 }
1454
1455 r = amdgpu_cs_patch_jobs(&parser);
1456 if (r)
1457 goto error_backoff;
1458
1459 r = amdgpu_cs_vm_handling(&parser);
1460 if (r)
1461 goto error_backoff;
1462
1463 r = amdgpu_cs_sync_rings(&parser);
1464 if (r)
1465 goto error_backoff;
1466
1467 trace_amdgpu_cs_ibs(&parser);
1468
1469 r = amdgpu_cs_submit(&parser, data);
1470 if (r)
1471 goto error_backoff;
1472
1473 amdgpu_cs_parser_fini(&parser);
1474 return 0;
1475
1476error_backoff:
1477 ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1478 mutex_unlock(&parser.bo_list->bo_list_mutex);
1479
1480error_fini:
1481 amdgpu_cs_parser_fini(&parser);
1482 return r;
1483}
1484
1485/**
1486 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1487 *
1488 * @dev: drm device
1489 * @data: data from userspace
1490 * @filp: file private
1491 *
1492 * Wait for the command submission identified by handle to finish.
1493 */
1494int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1495 struct drm_file *filp)
1496{
1497 union drm_amdgpu_wait_cs *wait = data;
1498 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1499 struct drm_sched_entity *entity;
1500 struct amdgpu_ctx *ctx;
1501 struct dma_fence *fence;
1502 long r;
1503
1504 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1505 if (ctx == NULL)
1506 return -EINVAL;
1507
1508 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1509 wait->in.ring, &entity);
1510 if (r) {
1511 amdgpu_ctx_put(ctx);
1512 return r;
1513 }
1514
1515 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1516 if (IS_ERR(fence))
1517 r = PTR_ERR(fence);
1518 else if (fence) {
1519 r = dma_fence_wait_timeout(fence, true, timeout);
1520 if (r > 0 && fence->error)
1521 r = fence->error;
1522 dma_fence_put(fence);
1523 } else
1524 r = 1;
1525
1526 amdgpu_ctx_put(ctx);
1527 if (r < 0)
1528 return r;
1529
1530 memset(wait, 0, sizeof(*wait));
1531 wait->out.status = (r == 0);
1532
1533 return 0;
1534}
1535
1536/**
1537 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1538 *
1539 * @adev: amdgpu device
1540 * @filp: file private
1541 * @user: drm_amdgpu_fence copied from user space
1542 */
1543static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1544 struct drm_file *filp,
1545 struct drm_amdgpu_fence *user)
1546{
1547 struct drm_sched_entity *entity;
1548 struct amdgpu_ctx *ctx;
1549 struct dma_fence *fence;
1550 int r;
1551
1552 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1553 if (ctx == NULL)
1554 return ERR_PTR(-EINVAL);
1555
1556 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1557 user->ring, &entity);
1558 if (r) {
1559 amdgpu_ctx_put(ctx);
1560 return ERR_PTR(r);
1561 }
1562
1563 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1564 amdgpu_ctx_put(ctx);
1565
1566 return fence;
1567}
1568
1569int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1570 struct drm_file *filp)
1571{
1572 struct amdgpu_device *adev = drm_to_adev(dev);
1573 union drm_amdgpu_fence_to_handle *info = data;
1574 struct dma_fence *fence;
1575 struct drm_syncobj *syncobj;
1576 struct sync_file *sync_file;
1577 int fd, r;
1578
1579 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1580 if (IS_ERR(fence))
1581 return PTR_ERR(fence);
1582
1583 if (!fence)
1584 fence = dma_fence_get_stub();
1585
1586 switch (info->in.what) {
1587 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1588 r = drm_syncobj_create(&syncobj, 0, fence);
1589 dma_fence_put(fence);
1590 if (r)
1591 return r;
1592 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1593 drm_syncobj_put(syncobj);
1594 return r;
1595
1596 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1597 r = drm_syncobj_create(&syncobj, 0, fence);
1598 dma_fence_put(fence);
1599 if (r)
1600 return r;
1601 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1602 drm_syncobj_put(syncobj);
1603 return r;
1604
1605 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1606 fd = get_unused_fd_flags(O_CLOEXEC);
1607 if (fd < 0) {
1608 dma_fence_put(fence);
1609 return fd;
1610 }
1611
1612 sync_file = sync_file_create(fence);
1613 dma_fence_put(fence);
1614 if (!sync_file) {
1615 put_unused_fd(fd);
1616 return -ENOMEM;
1617 }
1618
1619 fd_install(fd, sync_file->file);
1620 info->out.handle = fd;
1621 return 0;
1622
1623 default:
1624 dma_fence_put(fence);
1625 return -EINVAL;
1626 }
1627}
1628
1629/**
1630 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1631 *
1632 * @adev: amdgpu device
1633 * @filp: file private
1634 * @wait: wait parameters
1635 * @fences: array of drm_amdgpu_fence
1636 */
1637static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1638 struct drm_file *filp,
1639 union drm_amdgpu_wait_fences *wait,
1640 struct drm_amdgpu_fence *fences)
1641{
1642 uint32_t fence_count = wait->in.fence_count;
1643 unsigned int i;
1644 long r = 1;
1645
1646 for (i = 0; i < fence_count; i++) {
1647 struct dma_fence *fence;
1648 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1649
1650 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1651 if (IS_ERR(fence))
1652 return PTR_ERR(fence);
1653 else if (!fence)
1654 continue;
1655
1656 r = dma_fence_wait_timeout(fence, true, timeout);
1657 if (r > 0 && fence->error)
1658 r = fence->error;
1659
1660 dma_fence_put(fence);
1661 if (r < 0)
1662 return r;
1663
1664 if (r == 0)
1665 break;
1666 }
1667
1668 memset(wait, 0, sizeof(*wait));
1669 wait->out.status = (r > 0);
1670
1671 return 0;
1672}
1673
1674/**
1675 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1676 *
1677 * @adev: amdgpu device
1678 * @filp: file private
1679 * @wait: wait parameters
1680 * @fences: array of drm_amdgpu_fence
1681 */
1682static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1683 struct drm_file *filp,
1684 union drm_amdgpu_wait_fences *wait,
1685 struct drm_amdgpu_fence *fences)
1686{
1687 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1688 uint32_t fence_count = wait->in.fence_count;
1689 uint32_t first = ~0;
1690 struct dma_fence **array;
1691 unsigned int i;
1692 long r;
1693
1694 /* Prepare the fence array */
1695 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1696
1697 if (array == NULL)
1698 return -ENOMEM;
1699
1700 for (i = 0; i < fence_count; i++) {
1701 struct dma_fence *fence;
1702
1703 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1704 if (IS_ERR(fence)) {
1705 r = PTR_ERR(fence);
1706 goto err_free_fence_array;
1707 } else if (fence) {
1708 array[i] = fence;
1709 } else { /* NULL, the fence has been already signaled */
1710 r = 1;
1711 first = i;
1712 goto out;
1713 }
1714 }
1715
1716 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1717 &first);
1718 if (r < 0)
1719 goto err_free_fence_array;
1720
1721out:
1722 memset(wait, 0, sizeof(*wait));
1723 wait->out.status = (r > 0);
1724 wait->out.first_signaled = first;
1725
1726 if (first < fence_count && array[first])
1727 r = array[first]->error;
1728 else
1729 r = 0;
1730
1731err_free_fence_array:
1732 for (i = 0; i < fence_count; i++)
1733 dma_fence_put(array[i]);
1734 kfree(array);
1735
1736 return r;
1737}
1738
1739/**
1740 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1741 *
1742 * @dev: drm device
1743 * @data: data from userspace
1744 * @filp: file private
1745 */
1746int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1747 struct drm_file *filp)
1748{
1749 struct amdgpu_device *adev = drm_to_adev(dev);
1750 union drm_amdgpu_wait_fences *wait = data;
1751 uint32_t fence_count = wait->in.fence_count;
1752 struct drm_amdgpu_fence *fences_user;
1753 struct drm_amdgpu_fence *fences;
1754 int r;
1755
1756 /* Get the fences from userspace */
1757 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1758 GFP_KERNEL);
1759 if (fences == NULL)
1760 return -ENOMEM;
1761
1762 fences_user = u64_to_user_ptr(wait->in.fences);
1763 if (copy_from_user(fences, fences_user,
1764 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1765 r = -EFAULT;
1766 goto err_free_fences;
1767 }
1768
1769 if (wait->in.wait_all)
1770 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1771 else
1772 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1773
1774err_free_fences:
1775 kfree(fences);
1776
1777 return r;
1778}
1779
1780/**
1781 * amdgpu_cs_find_mapping - find bo_va for VM address
1782 *
1783 * @parser: command submission parser context
1784 * @addr: VM address
1785 * @bo: resulting BO of the mapping found
1786 * @map: Placeholder to return found BO mapping
1787 *
1788 * Search the buffer objects in the command submission context for a certain
1789 * virtual memory address. Returns allocation structure when found, NULL
1790 * otherwise.
1791 */
1792int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1793 uint64_t addr, struct amdgpu_bo **bo,
1794 struct amdgpu_bo_va_mapping **map)
1795{
1796 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1797 struct ttm_operation_ctx ctx = { false, false };
1798 struct amdgpu_vm *vm = &fpriv->vm;
1799 struct amdgpu_bo_va_mapping *mapping;
1800 int r;
1801
1802 addr /= AMDGPU_GPU_PAGE_SIZE;
1803
1804 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1805 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1806 return -EINVAL;
1807
1808 *bo = mapping->bo_va->base.bo;
1809 *map = mapping;
1810
1811 /* Double check that the BO is reserved by this CS */
1812 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1813 return -EINVAL;
1814
1815 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1816 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1817 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1818 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1819 if (r)
1820 return r;
1821 }
1822
1823 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1824}