Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_exec.h"
7
8#include <drm/drm_device.h>
9#include <drm/drm_exec.h>
10#include <drm/drm_file.h>
11#include <drm/xe_drm.h>
12#include <linux/delay.h>
13
14#include "xe_bo.h"
15#include "xe_device.h"
16#include "xe_exec_queue.h"
17#include "xe_macros.h"
18#include "xe_ring_ops_types.h"
19#include "xe_sched_job.h"
20#include "xe_sync.h"
21#include "xe_vm.h"
22
23/**
24 * DOC: Execbuf (User GPU command submission)
25 *
26 * Execs have historically been rather complicated in DRM drivers (at least in
27 * the i915) because a few things:
28 *
29 * - Passing in a list BO which are read / written to creating implicit syncs
30 * - Binding at exec time
31 * - Flow controlling the ring at exec time
32 *
33 * In XE we avoid all of this complication by not allowing a BO list to be
34 * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
35 * seperate operations, and using the DRM scheduler to flow control the ring.
36 * Let's deep dive on each of these.
37 *
38 * We can get away from a BO list by forcing the user to use in / out fences on
39 * every exec rather than the kernel tracking dependencies of BO (e.g. if the
40 * user knows an exec writes to a BO and reads from the BO in the next exec, it
41 * is the user's responsibility to pass in / out fence between the two execs).
42 *
43 * Implicit dependencies for external BOs are handled by using the dma-buf
44 * implicit dependency uAPI (TODO: add link). To make this works each exec must
45 * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
46 * BO mapped in the VM.
47 *
48 * We do not allow a user to trigger a bind at exec time rather we have a VM
49 * bind IOCTL which uses the same in / out fence interface as exec. In that
50 * sense, a VM bind is basically the same operation as an exec from the user
51 * perspective. e.g. If an exec depends on a VM bind use the in / out fence
52 * interface (struct drm_xe_sync) to synchronize like syncing between two
53 * dependent execs.
54 *
55 * Although a user cannot trigger a bind, we still have to rebind userptrs in
56 * the VM that have been invalidated since the last exec, likewise we also have
57 * to rebind BOs that have been evicted by the kernel. We schedule these rebinds
58 * behind any pending kernel operations on any external BOs in VM or any BOs
59 * private to the VM. This is accomplished by the rebinds waiting on BOs
60 * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
61 * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
62 * in DMA_RESV_USAGE_WRITE for external BOs).
63 *
64 * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
65 * mode VMs we use preempt fences and a rebind worker (TODO: add link).
66 *
67 * There is no need to flow control the ring in the exec as we write the ring at
68 * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
69 * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
70 * ring is available.
71 *
72 * All of this results in a rather simple exec implementation.
73 *
74 * Flow
75 * ~~~~
76 *
77 * .. code-block::
78 *
79 * Parse input arguments
80 * Wait for any async VM bind passed as in-fences to start
81 * <----------------------------------------------------------------------|
82 * Lock global VM lock in read mode |
83 * Pin userptrs (also finds userptr invalidated since last exec) |
84 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
85 * Validate BOs that have been evicted |
86 * Create job |
87 * Rebind invalidated userptrs + evicted BOs (non-compute-mode) |
88 * Add rebind fence dependency to job |
89 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
90 * Add job to external BOs dma-resv write slots (non-compute mode) |
91 * Check if any userptrs invalidated since pin ------ Drop locks ---------|
92 * Install in / out fences for job
93 * Submit job
94 * Unlock all
95 */
96
97static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
98{
99 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
100}
101
102int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
103{
104 struct xe_device *xe = to_xe_device(dev);
105 struct xe_file *xef = to_xe_file(file);
106 struct drm_xe_exec *args = data;
107 struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
108 u64 __user *addresses_user = u64_to_user_ptr(args->address);
109 struct xe_exec_queue *q;
110 struct xe_sync_entry *syncs = NULL;
111 u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
112 struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
113 struct drm_exec *exec = &vm_exec.exec;
114 u32 i, num_syncs = 0;
115 struct xe_sched_job *job;
116 struct dma_fence *rebind_fence;
117 struct xe_vm *vm;
118 bool write_locked, skip_retry = false;
119 ktime_t end = 0;
120 int err = 0;
121
122 if (XE_IOCTL_DBG(xe, args->extensions) ||
123 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
124 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
125 return -EINVAL;
126
127 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
128 if (XE_IOCTL_DBG(xe, !q))
129 return -ENOENT;
130
131 if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
132 return -EINVAL;
133
134 if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
135 q->width != args->num_batch_buffer))
136 return -EINVAL;
137
138 if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
139 err = -ECANCELED;
140 goto err_exec_queue;
141 }
142
143 if (args->num_syncs) {
144 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
145 if (!syncs) {
146 err = -ENOMEM;
147 goto err_exec_queue;
148 }
149 }
150
151 vm = q->vm;
152
153 for (i = 0; i < args->num_syncs; i++) {
154 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
155 &syncs_user[i], SYNC_PARSE_FLAG_EXEC |
156 (xe_vm_in_lr_mode(vm) ?
157 SYNC_PARSE_FLAG_LR_MODE : 0));
158 if (err)
159 goto err_syncs;
160 }
161
162 if (xe_exec_queue_is_parallel(q)) {
163 err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
164 q->width);
165 if (err) {
166 err = -EFAULT;
167 goto err_syncs;
168 }
169 }
170
171retry:
172 if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
173 err = down_write_killable(&vm->lock);
174 write_locked = true;
175 } else {
176 /* We don't allow execs while the VM is in error state */
177 err = down_read_interruptible(&vm->lock);
178 write_locked = false;
179 }
180 if (err)
181 goto err_syncs;
182
183 if (write_locked) {
184 err = xe_vm_userptr_pin(vm);
185 downgrade_write(&vm->lock);
186 write_locked = false;
187 if (err)
188 goto err_unlock_list;
189 }
190
191 vm_exec.vm = &vm->gpuvm;
192 vm_exec.num_fences = 1 + vm->xe->info.tile_count;
193 vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
194 if (xe_vm_in_lr_mode(vm)) {
195 drm_exec_init(exec, vm_exec.flags, 0);
196 } else {
197 err = drm_gpuvm_exec_lock(&vm_exec);
198 if (err) {
199 if (xe_vm_validate_should_retry(exec, err, &end))
200 err = -EAGAIN;
201 goto err_unlock_list;
202 }
203 }
204
205 if (xe_vm_is_closed_or_banned(q->vm)) {
206 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
207 err = -ECANCELED;
208 goto err_exec;
209 }
210
211 if (!args->num_batch_buffer) {
212 if (!xe_vm_in_lr_mode(vm)) {
213 struct dma_fence *fence;
214
215 fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
216 if (IS_ERR(fence)) {
217 err = PTR_ERR(fence);
218 goto err_exec;
219 }
220 for (i = 0; i < num_syncs; i++)
221 xe_sync_entry_signal(&syncs[i], NULL, fence);
222 xe_exec_queue_last_fence_set(q, vm, fence);
223 dma_fence_put(fence);
224 }
225
226 goto err_exec;
227 }
228
229 if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
230 err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
231 skip_retry = true;
232 goto err_exec;
233 }
234
235 job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
236 addresses : &args->address);
237 if (IS_ERR(job)) {
238 err = PTR_ERR(job);
239 goto err_exec;
240 }
241
242 /*
243 * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
244 * VM mode only.
245 */
246 rebind_fence = xe_vm_rebind(vm, false);
247 if (IS_ERR(rebind_fence)) {
248 err = PTR_ERR(rebind_fence);
249 goto err_put_job;
250 }
251
252 /*
253 * We store the rebind_fence in the VM so subsequent execs don't get
254 * scheduled before the rebinds of userptrs / evicted BOs is complete.
255 */
256 if (rebind_fence) {
257 dma_fence_put(vm->rebind_fence);
258 vm->rebind_fence = rebind_fence;
259 }
260 if (vm->rebind_fence) {
261 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
262 &vm->rebind_fence->flags)) {
263 dma_fence_put(vm->rebind_fence);
264 vm->rebind_fence = NULL;
265 } else {
266 dma_fence_get(vm->rebind_fence);
267 err = drm_sched_job_add_dependency(&job->drm,
268 vm->rebind_fence);
269 if (err)
270 goto err_put_job;
271 }
272 }
273
274 /* Wait behind munmap style rebinds */
275 if (!xe_vm_in_lr_mode(vm)) {
276 err = drm_sched_job_add_resv_dependencies(&job->drm,
277 xe_vm_resv(vm),
278 DMA_RESV_USAGE_KERNEL);
279 if (err)
280 goto err_put_job;
281 }
282
283 for (i = 0; i < num_syncs && !err; i++)
284 err = xe_sync_entry_add_deps(&syncs[i], job);
285 if (err)
286 goto err_put_job;
287
288 if (!xe_vm_in_lr_mode(vm)) {
289 err = xe_sched_job_last_fence_add_dep(job, vm);
290 if (err)
291 goto err_put_job;
292
293 err = down_read_interruptible(&vm->userptr.notifier_lock);
294 if (err)
295 goto err_put_job;
296
297 err = __xe_vm_userptr_needs_repin(vm);
298 if (err)
299 goto err_repin;
300 }
301
302 /*
303 * Point of no return, if we error after this point just set an error on
304 * the job and let the DRM scheduler / backend clean up the job.
305 */
306 xe_sched_job_arm(job);
307 if (!xe_vm_in_lr_mode(vm))
308 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
309 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
310
311 for (i = 0; i < num_syncs; i++)
312 xe_sync_entry_signal(&syncs[i], job,
313 &job->drm.s_fence->finished);
314
315 if (xe_exec_queue_is_lr(q))
316 q->ring_ops->emit_job(job);
317 if (!xe_vm_in_lr_mode(vm))
318 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
319 xe_sched_job_push(job);
320 xe_vm_reactivate_rebind(vm);
321
322 if (!err && !xe_vm_in_lr_mode(vm)) {
323 spin_lock(&xe->ttm.lru_lock);
324 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
325 spin_unlock(&xe->ttm.lru_lock);
326 }
327
328err_repin:
329 if (!xe_vm_in_lr_mode(vm))
330 up_read(&vm->userptr.notifier_lock);
331err_put_job:
332 if (err)
333 xe_sched_job_put(job);
334err_exec:
335 drm_exec_fini(exec);
336err_unlock_list:
337 if (write_locked)
338 up_write(&vm->lock);
339 else
340 up_read(&vm->lock);
341 if (err == -EAGAIN && !skip_retry)
342 goto retry;
343err_syncs:
344 for (i = 0; i < num_syncs; i++)
345 xe_sync_entry_cleanup(&syncs[i]);
346 kfree(syncs);
347err_exec_queue:
348 xe_exec_queue_put(q);
349
350 return err;
351}