jcs's openbsd hax
openbsd
1/*
2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33 * container for a synchronization primitive which can be used by userspace
34 * to explicitly synchronize GPU commands, can be shared between userspace
35 * processes, and can be shared between different DRM drivers.
36 * Their primary use-case is to implement Vulkan fences and semaphores.
37 * The syncobj userspace API provides ioctls for several operations:
38 *
39 * - Creation and destruction of syncobjs
40 * - Import and export of syncobjs to/from a syncobj file descriptor
41 * - Import and export a syncobj's underlying fence to/from a sync file
42 * - Reset a syncobj (set its fence to NULL)
43 * - Signal a syncobj (set a trivially signaled fence)
44 * - Wait for a syncobj's fence to appear and be signaled
45 *
46 * The syncobj userspace API also provides operations to manipulate a syncobj
47 * in terms of a timeline of struct &dma_fence_chain rather than a single
48 * struct &dma_fence, through the following operations:
49 *
50 * - Signal a given point on the timeline
51 * - Wait for a given point to appear and/or be signaled
52 * - Import and export from/to a given point of a timeline
53 *
54 * At it's core, a syncobj is simply a wrapper around a pointer to a struct
55 * &dma_fence which may be NULL.
56 * When a syncobj is first created, its pointer is either NULL or a pointer
57 * to an already signaled fence depending on whether the
58 * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
59 * &DRM_IOCTL_SYNCOBJ_CREATE.
60 *
61 * If the syncobj is considered as a binary (its state is either signaled or
62 * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
63 * the syncobj, the syncobj's fence is replaced with a fence which will be
64 * signaled by the completion of that work.
65 * If the syncobj is considered as a timeline primitive, when GPU work is
66 * enqueued in a DRM driver to signal the a given point of the syncobj, a new
67 * struct &dma_fence_chain pointing to the DRM driver's fence and also
68 * pointing to the previous fence that was in the syncobj. The new struct
69 * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
70 * completion of the DRM driver's work and also any work associated with the
71 * fence previously in the syncobj.
72 *
73 * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
74 * time the work is enqueued, it waits on the syncobj's fence before
75 * submitting the work to hardware. That fence is either :
76 *
77 * - The syncobj's current fence if the syncobj is considered as a binary
78 * primitive.
79 * - The struct &dma_fence associated with a given point if the syncobj is
80 * considered as a timeline primitive.
81 *
82 * If the syncobj's fence is NULL or not present in the syncobj's timeline,
83 * the enqueue operation is expected to fail.
84 *
85 * With binary syncobj, all manipulation of the syncobjs's fence happens in
86 * terms of the current fence at the time the ioctl is called by userspace
87 * regardless of whether that operation is an immediate host-side operation
88 * (signal or reset) or or an operation which is enqueued in some driver
89 * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
90 * to manipulate a syncobj from the host by resetting its pointer to NULL or
91 * setting its pointer to a fence which is already signaled.
92 *
93 * With a timeline syncobj, all manipulation of the synobj's fence happens in
94 * terms of a u64 value referring to point in the timeline. See
95 * dma_fence_chain_find_seqno() to see how a given point is found in the
96 * timeline.
97 *
98 * Note that applications should be careful to always use timeline set of
99 * ioctl() when dealing with syncobj considered as timeline. Using a binary
100 * set of ioctl() with a syncobj considered as timeline could result incorrect
101 * synchronization. The use of binary syncobj is supported through the
102 * timeline set of ioctl() by using a point value of 0, this will reproduce
103 * the behavior of the binary set of ioctl() (for example replace the
104 * syncobj's fence when signaling).
105 *
106 *
107 * Host-side wait on syncobjs
108 * --------------------------
109 *
110 * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
111 * host-side wait on all of the syncobj fences simultaneously.
112 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
113 * all of the syncobj fences to be signaled before it returns.
114 * Otherwise, it returns once at least one syncobj fence has been signaled
115 * and the index of a signaled fence is written back to the client.
116 *
117 * Unlike the enqueued GPU work dependencies which fail if they see a NULL
118 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
119 * the host-side wait will first wait for the syncobj to receive a non-NULL
120 * fence and then wait on that fence.
121 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
122 * syncobjs in the array has a NULL fence, -EINVAL will be returned.
123 * Assuming the syncobj starts off with a NULL fence, this allows a client
124 * to do a host wait in one thread (or process) which waits on GPU work
125 * submitted in another thread (or process) without having to manually
126 * synchronize between the two.
127 * This requirement is inherited from the Vulkan fence API.
128 *
129 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set
130 * a fence deadline hint on the backing fences before waiting, to provide the
131 * fence signaler with an appropriate sense of urgency. The deadline is
132 * specified as an absolute &CLOCK_MONOTONIC value in units of ns.
133 *
134 * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
135 * handles as well as an array of u64 points and does a host-side wait on all
136 * of syncobj fences at the given points simultaneously.
137 *
138 * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
139 * fence to materialize on the timeline without waiting for the fence to be
140 * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
141 * requirement is inherited from the wait-before-signal behavior required by
142 * the Vulkan timeline semaphore API.
143 *
144 * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
145 * blocking: an eventfd will be signaled when the syncobj is. This is useful to
146 * integrate the wait in an event loop.
147 *
148 *
149 * Import/export of syncobjs
150 * -------------------------
151 *
152 * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
153 * provide two mechanisms for import/export of syncobjs.
154 *
155 * The first lets the client import or export an entire syncobj to a file
156 * descriptor.
157 * These fd's are opaque and have no other use case, except passing the
158 * syncobj between processes.
159 * All exported file descriptors and any syncobj handles created as a
160 * result of importing those file descriptors own a reference to the
161 * same underlying struct &drm_syncobj and the syncobj can be used
162 * persistently across all the processes with which it is shared.
163 * The syncobj is freed only once the last reference is dropped.
164 * Unlike dma-buf, importing a syncobj creates a new handle (with its own
165 * reference) for every import instead of de-duplicating.
166 * The primary use-case of this persistent import/export is for shared
167 * Vulkan fences and semaphores.
168 *
169 * The second import/export mechanism, which is indicated by
170 * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
171 * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
172 * import/export the syncobj's current fence from/to a &sync_file.
173 * When a syncobj is exported to a sync file, that sync file wraps the
174 * sycnobj's fence at the time of export and any later signal or reset
175 * operations on the syncobj will not affect the exported sync file.
176 * When a sync file is imported into a syncobj, the syncobj's fence is set
177 * to the fence wrapped by that sync file.
178 * Because sync files are immutable, resetting or signaling the syncobj
179 * will not affect any sync files whose fences have been imported into the
180 * syncobj.
181 *
182 *
183 * Import/export of timeline points in timeline syncobjs
184 * -----------------------------------------------------
185 *
186 * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
187 * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
188 * into another syncobj.
189 *
190 * Note that if you want to transfer a struct &dma_fence_chain from a given
191 * point on a timeline syncobj from/into a binary syncobj, you can use the
192 * point 0 to mean take/replace the fence in the syncobj.
193 */
194
195#include <linux/anon_inodes.h>
196#include <linux/dma-fence-unwrap.h>
197#include <linux/eventfd.h>
198#include <linux/file.h>
199#include <linux/fs.h>
200#include <linux/sched/signal.h>
201#include <linux/sync_file.h>
202#include <linux/uaccess.h>
203
204#include <drm/drm.h>
205#include <drm/drm_drv.h>
206#include <drm/drm_file.h>
207#include <drm/drm_gem.h>
208#include <drm/drm_print.h>
209#include <drm/drm_syncobj.h>
210#include <drm/drm_utils.h>
211
212#include "drm_internal.h"
213
214struct syncobj_wait_entry {
215 struct list_head node;
216#ifdef __linux__
217 struct task_struct *task;
218#else
219 struct proc *task;
220#endif
221 struct dma_fence *fence;
222 struct dma_fence_cb fence_cb;
223 u64 point;
224};
225
226static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
227 struct syncobj_wait_entry *wait);
228
229struct syncobj_eventfd_entry {
230 struct list_head node;
231 struct dma_fence *fence;
232 struct dma_fence_cb fence_cb;
233 struct drm_syncobj *syncobj;
234 struct eventfd_ctx *ev_fd_ctx;
235 u64 point;
236 u32 flags;
237};
238
239static void
240syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
241 struct syncobj_eventfd_entry *entry);
242
243/**
244 * drm_syncobj_find - lookup and reference a sync object.
245 * @file_private: drm file private pointer
246 * @handle: sync object handle to lookup.
247 *
248 * Returns a reference to the syncobj pointed to by handle or NULL. The
249 * reference must be released by calling drm_syncobj_put().
250 */
251struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
252 u32 handle)
253{
254 struct drm_syncobj *syncobj;
255
256 spin_lock(&file_private->syncobj_table_lock);
257
258 /* Check if we currently have a reference on the object */
259 syncobj = idr_find(&file_private->syncobj_idr, handle);
260 if (syncobj)
261 drm_syncobj_get(syncobj);
262
263 spin_unlock(&file_private->syncobj_table_lock);
264
265 return syncobj;
266}
267EXPORT_SYMBOL(drm_syncobj_find);
268
269static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
270 struct syncobj_wait_entry *wait)
271{
272 struct dma_fence *fence;
273
274 if (wait->fence)
275 return;
276
277 spin_lock(&syncobj->lock);
278 /* We've already tried once to get a fence and failed. Now that we
279 * have the lock, try one more time just to be sure we don't add a
280 * callback when a fence has already been set.
281 */
282 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
283 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
284 dma_fence_put(fence);
285 list_add_tail(&wait->node, &syncobj->cb_list);
286 } else if (!fence) {
287 wait->fence = dma_fence_get_stub();
288 } else {
289 wait->fence = fence;
290 }
291 spin_unlock(&syncobj->lock);
292}
293
294static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
295 struct syncobj_wait_entry *wait)
296{
297 if (!wait->node.next)
298 return;
299
300 spin_lock(&syncobj->lock);
301 list_del_init(&wait->node);
302 spin_unlock(&syncobj->lock);
303}
304
305static void
306syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
307{
308 eventfd_ctx_put(entry->ev_fd_ctx);
309 dma_fence_put(entry->fence);
310 /* This happens either inside the syncobj lock, or after the node has
311 * already been removed from the list.
312 */
313 list_del(&entry->node);
314 kfree(entry);
315}
316
317#ifdef notyet
318static void
319drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
320 struct syncobj_eventfd_entry *entry)
321{
322 spin_lock(&syncobj->lock);
323 list_add_tail(&entry->node, &syncobj->ev_fd_list);
324 syncobj_eventfd_entry_func(syncobj, entry);
325 spin_unlock(&syncobj->lock);
326}
327#endif
328
329/**
330 * drm_syncobj_add_point - add new timeline point to the syncobj
331 * @syncobj: sync object to add timeline point do
332 * @chain: chain node to use to add the point
333 * @fence: fence to encapsulate in the chain node
334 * @point: sequence number to use for the point
335 *
336 * Add the chain node as new timeline point to the syncobj.
337 */
338void drm_syncobj_add_point(struct drm_syncobj *syncobj,
339 struct dma_fence_chain *chain,
340 struct dma_fence *fence,
341 uint64_t point)
342{
343 struct syncobj_wait_entry *wait_cur, *wait_tmp;
344 struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
345 struct dma_fence *prev;
346
347 dma_fence_get(fence);
348
349 spin_lock(&syncobj->lock);
350
351 prev = drm_syncobj_fence_get(syncobj);
352 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
353 if (prev && prev->seqno >= point)
354 DRM_DEBUG("You are adding an unorder point to timeline!\n");
355 dma_fence_chain_init(chain, prev, fence, point);
356 rcu_assign_pointer(syncobj->fence, &chain->base);
357
358 list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
359 syncobj_wait_syncobj_func(syncobj, wait_cur);
360 list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
361 syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
362 spin_unlock(&syncobj->lock);
363
364 /* Walk the chain once to trigger garbage collection */
365 dma_fence_chain_for_each(fence, prev);
366 dma_fence_put(prev);
367}
368EXPORT_SYMBOL(drm_syncobj_add_point);
369
370/**
371 * drm_syncobj_replace_fence - replace fence in a sync object.
372 * @syncobj: Sync object to replace fence in
373 * @fence: fence to install in sync file.
374 *
375 * This replaces the fence on a sync object.
376 */
377void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
378 struct dma_fence *fence)
379{
380 struct dma_fence *old_fence;
381 struct syncobj_wait_entry *wait_cur, *wait_tmp;
382 struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
383
384 if (fence)
385 dma_fence_get(fence);
386
387 spin_lock(&syncobj->lock);
388
389 old_fence = rcu_dereference_protected(syncobj->fence,
390 lockdep_is_held(&syncobj->lock));
391 rcu_assign_pointer(syncobj->fence, fence);
392
393 if (fence != old_fence) {
394 list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
395 syncobj_wait_syncobj_func(syncobj, wait_cur);
396 list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
397 syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
398 }
399
400 spin_unlock(&syncobj->lock);
401
402 dma_fence_put(old_fence);
403}
404EXPORT_SYMBOL(drm_syncobj_replace_fence);
405
406/**
407 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
408 * @syncobj: sync object to assign the fence on
409 *
410 * Assign a already signaled stub fence to the sync object.
411 */
412static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
413{
414 struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
415
416 if (!fence)
417 return -ENOMEM;
418
419 drm_syncobj_replace_fence(syncobj, fence);
420 dma_fence_put(fence);
421 return 0;
422}
423
424/* 5s default for wait submission */
425#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
426/**
427 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
428 * @file_private: drm file private pointer
429 * @handle: sync object handle to lookup.
430 * @point: timeline point
431 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
432 * @fence: out parameter for the fence
433 *
434 * This is just a convenience function that combines drm_syncobj_find() and
435 * drm_syncobj_fence_get().
436 *
437 * Returns 0 on success or a negative error value on failure. On success @fence
438 * contains a reference to the fence, which must be released by calling
439 * dma_fence_put().
440 */
441int drm_syncobj_find_fence(struct drm_file *file_private,
442 u32 handle, u64 point, u64 flags,
443 struct dma_fence **fence)
444{
445 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
446 struct syncobj_wait_entry wait;
447 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
448 int ret;
449
450 if (flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
451 return -EINVAL;
452
453 if (!syncobj)
454 return -ENOENT;
455
456 /* Waiting for userspace with locks help is illegal cause that can
457 * trivial deadlock with page faults for example. Make lockdep complain
458 * about it early on.
459 */
460 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
461 might_sleep();
462 lockdep_assert_none_held_once();
463 }
464
465 *fence = drm_syncobj_fence_get(syncobj);
466
467 if (*fence) {
468 ret = dma_fence_chain_find_seqno(fence, point);
469 if (!ret) {
470 /* If the requested seqno is already signaled
471 * drm_syncobj_find_fence may return a NULL
472 * fence. To make sure the recipient gets
473 * signalled, use a new fence instead.
474 */
475 if (!*fence)
476 *fence = dma_fence_get_stub();
477
478 goto out;
479 }
480 dma_fence_put(*fence);
481 } else {
482 ret = -EINVAL;
483 }
484
485 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
486 goto out;
487
488 memset(&wait, 0, sizeof(wait));
489#ifdef __linux__
490 wait.task = current;
491#else
492 wait.task = curproc;
493#endif
494 wait.point = point;
495 drm_syncobj_fence_add_wait(syncobj, &wait);
496
497 do {
498 set_current_state(TASK_INTERRUPTIBLE);
499 if (wait.fence) {
500 ret = 0;
501 break;
502 }
503 if (timeout == 0) {
504 ret = -ETIME;
505 break;
506 }
507
508 if (signal_pending(current)) {
509 ret = -ERESTARTSYS;
510 break;
511 }
512
513 timeout = schedule_timeout(timeout);
514 } while (1);
515
516 __set_current_state(TASK_RUNNING);
517 *fence = wait.fence;
518
519 if (wait.node.next)
520 drm_syncobj_remove_wait(syncobj, &wait);
521
522out:
523 drm_syncobj_put(syncobj);
524
525 return ret;
526}
527EXPORT_SYMBOL(drm_syncobj_find_fence);
528
529/**
530 * drm_syncobj_free - free a sync object.
531 * @kref: kref to free.
532 *
533 * Only to be called from kref_put in drm_syncobj_put.
534 */
535void drm_syncobj_free(struct kref *kref)
536{
537 struct drm_syncobj *syncobj = container_of(kref,
538 struct drm_syncobj,
539 refcount);
540 struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
541
542 drm_syncobj_replace_fence(syncobj, NULL);
543
544 list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
545 syncobj_eventfd_entry_free(ev_fd_cur);
546
547 kfree(syncobj);
548}
549EXPORT_SYMBOL(drm_syncobj_free);
550
551/**
552 * drm_syncobj_create - create a new syncobj
553 * @out_syncobj: returned syncobj
554 * @flags: DRM_SYNCOBJ_* flags
555 * @fence: if non-NULL, the syncobj will represent this fence
556 *
557 * This is the first function to create a sync object. After creating, drivers
558 * probably want to make it available to userspace, either through
559 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
560 *
561 * Returns 0 on success or a negative error value on failure.
562 */
563int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
564 struct dma_fence *fence)
565{
566 int ret;
567 struct drm_syncobj *syncobj;
568
569 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
570 if (!syncobj)
571 return -ENOMEM;
572
573 kref_init(&syncobj->refcount);
574 INIT_LIST_HEAD(&syncobj->cb_list);
575 INIT_LIST_HEAD(&syncobj->ev_fd_list);
576 mtx_init(&syncobj->lock, IPL_NONE);
577
578 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
579 ret = drm_syncobj_assign_null_handle(syncobj);
580 if (ret < 0) {
581 drm_syncobj_put(syncobj);
582 return ret;
583 }
584 }
585
586 if (fence)
587 drm_syncobj_replace_fence(syncobj, fence);
588
589 *out_syncobj = syncobj;
590 return 0;
591}
592EXPORT_SYMBOL(drm_syncobj_create);
593
594/**
595 * drm_syncobj_get_handle - get a handle from a syncobj
596 * @file_private: drm file private pointer
597 * @syncobj: Sync object to export
598 * @handle: out parameter with the new handle
599 *
600 * Exports a sync object created with drm_syncobj_create() as a handle on
601 * @file_private to userspace.
602 *
603 * Returns 0 on success or a negative error value on failure.
604 */
605int drm_syncobj_get_handle(struct drm_file *file_private,
606 struct drm_syncobj *syncobj, u32 *handle)
607{
608 int ret;
609
610 /* take a reference to put in the idr */
611 drm_syncobj_get(syncobj);
612
613 idr_preload(GFP_KERNEL);
614 spin_lock(&file_private->syncobj_table_lock);
615 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
616 spin_unlock(&file_private->syncobj_table_lock);
617
618 idr_preload_end();
619
620 if (ret < 0) {
621 drm_syncobj_put(syncobj);
622 return ret;
623 }
624
625 *handle = ret;
626 return 0;
627}
628EXPORT_SYMBOL(drm_syncobj_get_handle);
629
630static int drm_syncobj_create_as_handle(struct drm_file *file_private,
631 u32 *handle, uint32_t flags)
632{
633 int ret;
634 struct drm_syncobj *syncobj;
635
636 ret = drm_syncobj_create(&syncobj, flags, NULL);
637 if (ret)
638 return ret;
639
640 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
641 drm_syncobj_put(syncobj);
642 return ret;
643}
644
645static int drm_syncobj_destroy(struct drm_file *file_private,
646 u32 handle)
647{
648 struct drm_syncobj *syncobj;
649
650 spin_lock(&file_private->syncobj_table_lock);
651 syncobj = idr_remove(&file_private->syncobj_idr, handle);
652 spin_unlock(&file_private->syncobj_table_lock);
653
654 if (!syncobj)
655 return -EINVAL;
656
657 drm_syncobj_put(syncobj);
658 return 0;
659}
660
661#ifdef notyet
662static int drm_syncobj_file_release(struct inode *inode, struct file *file)
663{
664 struct drm_syncobj *syncobj = file->private_data;
665
666 drm_syncobj_put(syncobj);
667 return 0;
668}
669
670static const struct file_operations drm_syncobj_file_fops = {
671 .release = drm_syncobj_file_release,
672};
673#endif
674
675/**
676 * drm_syncobj_get_fd - get a file descriptor from a syncobj
677 * @syncobj: Sync object to export
678 * @p_fd: out parameter with the new file descriptor
679 *
680 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
681 *
682 * Returns 0 on success or a negative error value on failure.
683 */
684int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
685{
686 STUB();
687 return -ENOSYS;
688#ifdef notyet
689 struct file *file;
690 int fd;
691
692 fd = get_unused_fd_flags(O_CLOEXEC);
693 if (fd < 0)
694 return fd;
695
696 file = anon_inode_getfile("syncobj_file",
697 &drm_syncobj_file_fops,
698 syncobj, 0);
699 if (IS_ERR(file)) {
700 put_unused_fd(fd);
701 return PTR_ERR(file);
702 }
703
704 drm_syncobj_get(syncobj);
705 fd_install(fd, file);
706
707 *p_fd = fd;
708 return 0;
709#endif
710}
711EXPORT_SYMBOL(drm_syncobj_get_fd);
712
713static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
714 u32 handle, int *p_fd)
715{
716 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
717 int ret;
718
719 if (!syncobj)
720 return -EINVAL;
721
722 ret = drm_syncobj_get_fd(syncobj, p_fd);
723 drm_syncobj_put(syncobj);
724 return ret;
725}
726
727static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
728 int fd, u32 *handle)
729{
730 STUB();
731 return -ENOSYS;
732#ifdef notyet
733 struct drm_syncobj *syncobj;
734 struct fd f = fdget(fd);
735 int ret;
736
737 if (!fd_file(f))
738 return -EINVAL;
739
740 if (fd_file(f)->f_op != &drm_syncobj_file_fops) {
741 fdput(f);
742 return -EINVAL;
743 }
744
745 /* take a reference to put in the idr */
746 syncobj = fd_file(f)->private_data;
747 drm_syncobj_get(syncobj);
748
749 idr_preload(GFP_KERNEL);
750 spin_lock(&file_private->syncobj_table_lock);
751 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
752 spin_unlock(&file_private->syncobj_table_lock);
753 idr_preload_end();
754
755 if (ret > 0) {
756 *handle = ret;
757 ret = 0;
758 } else
759 drm_syncobj_put(syncobj);
760
761 fdput(f);
762 return ret;
763#endif
764}
765
766static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
767 int fd, int handle)
768{
769 struct dma_fence *fence = sync_file_get_fence(fd);
770 struct drm_syncobj *syncobj;
771
772 if (!fence)
773 return -EINVAL;
774
775 syncobj = drm_syncobj_find(file_private, handle);
776 if (!syncobj) {
777 dma_fence_put(fence);
778 return -ENOENT;
779 }
780
781 drm_syncobj_replace_fence(syncobj, fence);
782 dma_fence_put(fence);
783 drm_syncobj_put(syncobj);
784 return 0;
785}
786
787static int drm_syncobj_export_sync_file(struct drm_file *file_private,
788 int handle, int *p_fd)
789{
790 int ret;
791 struct dma_fence *fence;
792 struct sync_file *sync_file;
793 int fd = get_unused_fd_flags(O_CLOEXEC);
794
795 if (fd < 0)
796 return fd;
797
798 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
799 if (ret)
800 goto err_put_fd;
801
802 sync_file = sync_file_create(fence);
803
804 dma_fence_put(fence);
805
806 if (!sync_file) {
807 ret = -EINVAL;
808 goto err_put_fd;
809 }
810
811 fd_install(fd, sync_file->file);
812
813 *p_fd = fd;
814 return 0;
815err_put_fd:
816 put_unused_fd(fd);
817 return ret;
818}
819/**
820 * drm_syncobj_open - initializes syncobj file-private structures at devnode open time
821 * @file_private: drm file-private structure to set up
822 *
823 * Called at device open time, sets up the structure for handling refcounting
824 * of sync objects.
825 */
826void
827drm_syncobj_open(struct drm_file *file_private)
828{
829 idr_init_base(&file_private->syncobj_idr, 1);
830 mtx_init(&file_private->syncobj_table_lock, IPL_NONE);
831}
832
833static int
834drm_syncobj_release_handle(int id, void *ptr, void *data)
835{
836 struct drm_syncobj *syncobj = ptr;
837
838 drm_syncobj_put(syncobj);
839 return 0;
840}
841
842/**
843 * drm_syncobj_release - release file-private sync object resources
844 * @file_private: drm file-private structure to clean up
845 *
846 * Called at close time when the filp is going away.
847 *
848 * Releases any remaining references on objects by this filp.
849 */
850void
851drm_syncobj_release(struct drm_file *file_private)
852{
853 idr_for_each(&file_private->syncobj_idr,
854 &drm_syncobj_release_handle, file_private);
855 idr_destroy(&file_private->syncobj_idr);
856}
857
858int
859drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
860 struct drm_file *file_private)
861{
862 struct drm_syncobj_create *args = data;
863
864 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
865 return -EOPNOTSUPP;
866
867 /* no valid flags yet */
868 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
869 return -EINVAL;
870
871 return drm_syncobj_create_as_handle(file_private,
872 &args->handle, args->flags);
873}
874
875int
876drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
877 struct drm_file *file_private)
878{
879 struct drm_syncobj_destroy *args = data;
880
881 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
882 return -EOPNOTSUPP;
883
884 /* make sure padding is empty */
885 if (args->pad)
886 return -EINVAL;
887 return drm_syncobj_destroy(file_private, args->handle);
888}
889
890int
891drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
892 struct drm_file *file_private)
893{
894 struct drm_syncobj_handle *args = data;
895
896 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
897 return -EOPNOTSUPP;
898
899 if (args->pad)
900 return -EINVAL;
901
902 if (args->flags != 0 &&
903 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
904 return -EINVAL;
905
906 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
907 return drm_syncobj_export_sync_file(file_private, args->handle,
908 &args->fd);
909
910 return drm_syncobj_handle_to_fd(file_private, args->handle,
911 &args->fd);
912}
913
914int
915drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *file_private)
917{
918 struct drm_syncobj_handle *args = data;
919
920 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
921 return -EOPNOTSUPP;
922
923 if (args->pad)
924 return -EINVAL;
925
926 if (args->flags != 0 &&
927 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
928 return -EINVAL;
929
930 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
931 return drm_syncobj_import_sync_file_fence(file_private,
932 args->fd,
933 args->handle);
934
935 return drm_syncobj_fd_to_handle(file_private, args->fd,
936 &args->handle);
937}
938
939
940/*
941 * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be
942 * added as timeline fence to a chain again.
943 */
944static int drm_syncobj_flatten_chain(struct dma_fence **f)
945{
946 struct dma_fence_chain *chain = to_dma_fence_chain(*f);
947 struct dma_fence *tmp, **fences;
948 struct dma_fence_array *array;
949 unsigned int count;
950
951 if (!chain)
952 return 0;
953
954 count = 0;
955 dma_fence_chain_for_each(tmp, &chain->base)
956 ++count;
957
958 fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
959 if (!fences)
960 return -ENOMEM;
961
962 count = 0;
963 dma_fence_chain_for_each(tmp, &chain->base)
964 fences[count++] = dma_fence_get(tmp);
965
966 array = dma_fence_array_create(count, fences,
967 dma_fence_context_alloc(1),
968 1, false);
969 if (!array)
970 goto free_fences;
971
972 dma_fence_put(*f);
973 *f = &array->base;
974 return 0;
975
976free_fences:
977 while (count--)
978 dma_fence_put(fences[count]);
979
980 kfree(fences);
981 return -ENOMEM;
982}
983
984static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
985 struct drm_syncobj_transfer *args)
986{
987 struct drm_syncobj *timeline_syncobj = NULL;
988 struct dma_fence_chain *chain;
989 struct dma_fence *fence;
990 int ret;
991
992 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
993 if (!timeline_syncobj) {
994 return -ENOENT;
995 }
996 ret = drm_syncobj_find_fence(file_private, args->src_handle,
997 args->src_point, args->flags,
998 &fence);
999 if (ret)
1000 goto err_put_timeline;
1001
1002 ret = drm_syncobj_flatten_chain(&fence);
1003 if (ret)
1004 goto err_free_fence;
1005
1006 chain = dma_fence_chain_alloc();
1007 if (!chain) {
1008 ret = -ENOMEM;
1009 goto err_free_fence;
1010 }
1011
1012 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
1013err_free_fence:
1014 dma_fence_put(fence);
1015err_put_timeline:
1016 drm_syncobj_put(timeline_syncobj);
1017
1018 return ret;
1019}
1020
1021static int
1022drm_syncobj_transfer_to_binary(struct drm_file *file_private,
1023 struct drm_syncobj_transfer *args)
1024{
1025 struct drm_syncobj *binary_syncobj = NULL;
1026 struct dma_fence *fence;
1027 int ret;
1028
1029 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
1030 if (!binary_syncobj)
1031 return -ENOENT;
1032 ret = drm_syncobj_find_fence(file_private, args->src_handle,
1033 args->src_point, args->flags, &fence);
1034 if (ret)
1035 goto err;
1036 drm_syncobj_replace_fence(binary_syncobj, fence);
1037 dma_fence_put(fence);
1038err:
1039 drm_syncobj_put(binary_syncobj);
1040
1041 return ret;
1042}
1043int
1044drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
1045 struct drm_file *file_private)
1046{
1047 struct drm_syncobj_transfer *args = data;
1048 int ret;
1049
1050 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1051 return -EOPNOTSUPP;
1052
1053 if (args->pad)
1054 return -EINVAL;
1055
1056 if (args->dst_point)
1057 ret = drm_syncobj_transfer_to_timeline(file_private, args);
1058 else
1059 ret = drm_syncobj_transfer_to_binary(file_private, args);
1060
1061 return ret;
1062}
1063
1064static void syncobj_wait_fence_func(struct dma_fence *fence,
1065 struct dma_fence_cb *cb)
1066{
1067 struct syncobj_wait_entry *wait =
1068 container_of(cb, struct syncobj_wait_entry, fence_cb);
1069
1070 wake_up_process(wait->task);
1071}
1072
1073static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
1074 struct syncobj_wait_entry *wait)
1075{
1076 struct dma_fence *fence;
1077
1078 /* This happens inside the syncobj lock */
1079 fence = rcu_dereference_protected(syncobj->fence,
1080 lockdep_is_held(&syncobj->lock));
1081 dma_fence_get(fence);
1082 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
1083 dma_fence_put(fence);
1084 return;
1085 } else if (!fence) {
1086 wait->fence = dma_fence_get_stub();
1087 } else {
1088 wait->fence = fence;
1089 }
1090
1091 wake_up_process(wait->task);
1092 list_del_init(&wait->node);
1093}
1094
1095static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
1096 void __user *user_points,
1097 uint32_t count,
1098 uint32_t flags,
1099 signed long timeout,
1100 uint32_t *idx,
1101 ktime_t *deadline)
1102{
1103 struct syncobj_wait_entry *entries;
1104 struct dma_fence *fence;
1105 uint64_t *points;
1106 uint32_t signaled_count, i;
1107
1108 if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1109 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
1110 might_sleep();
1111 lockdep_assert_none_held_once();
1112 }
1113
1114 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
1115 if (points == NULL)
1116 return -ENOMEM;
1117
1118 if (!user_points) {
1119 memset(points, 0, count * sizeof(uint64_t));
1120
1121 } else if (copy_from_user(points, user_points,
1122 sizeof(uint64_t) * count)) {
1123 timeout = -EFAULT;
1124 goto err_free_points;
1125 }
1126
1127 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1128 if (!entries) {
1129 timeout = -ENOMEM;
1130 goto err_free_points;
1131 }
1132 /* Walk the list of sync objects and initialize entries. We do
1133 * this up-front so that we can properly return -EINVAL if there is
1134 * a syncobj with a missing fence and then never have the chance of
1135 * returning -EINVAL again.
1136 */
1137 signaled_count = 0;
1138 for (i = 0; i < count; ++i) {
1139 struct dma_fence *fence;
1140
1141#ifdef __linux__
1142 entries[i].task = current;
1143#else
1144 entries[i].task = curproc;
1145#endif
1146 entries[i].point = points[i];
1147 fence = drm_syncobj_fence_get(syncobjs[i]);
1148 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
1149 dma_fence_put(fence);
1150 if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1151 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
1152 continue;
1153 } else {
1154 timeout = -EINVAL;
1155 goto cleanup_entries;
1156 }
1157 }
1158
1159 if (fence)
1160 entries[i].fence = fence;
1161 else
1162 entries[i].fence = dma_fence_get_stub();
1163
1164 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1165 dma_fence_is_signaled(entries[i].fence)) {
1166 if (signaled_count == 0 && idx)
1167 *idx = i;
1168 signaled_count++;
1169 }
1170 }
1171
1172 if (signaled_count == count ||
1173 (signaled_count > 0 &&
1174 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
1175 goto cleanup_entries;
1176
1177 /* There's a very annoying laxness in the dma_fence API here, in
1178 * that backends are not required to automatically report when a
1179 * fence is signaled prior to fence->ops->enable_signaling() being
1180 * called. So here if we fail to match signaled_count, we need to
1181 * fallthough and try a 0 timeout wait!
1182 */
1183
1184 if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1185 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
1186 for (i = 0; i < count; ++i)
1187 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
1188 }
1189
1190 if (deadline) {
1191 for (i = 0; i < count; ++i) {
1192 fence = entries[i].fence;
1193 if (!fence)
1194 continue;
1195 dma_fence_set_deadline(fence, *deadline);
1196 }
1197 }
1198
1199 do {
1200 set_current_state(TASK_INTERRUPTIBLE);
1201
1202 signaled_count = 0;
1203 for (i = 0; i < count; ++i) {
1204 fence = entries[i].fence;
1205 if (!fence)
1206 continue;
1207
1208 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1209 dma_fence_is_signaled(fence) ||
1210 (!entries[i].fence_cb.func &&
1211 dma_fence_add_callback(fence,
1212 &entries[i].fence_cb,
1213 syncobj_wait_fence_func))) {
1214 /* The fence has been signaled */
1215 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
1216 signaled_count++;
1217 } else {
1218 if (idx)
1219 *idx = i;
1220 goto done_waiting;
1221 }
1222 }
1223 }
1224
1225 if (signaled_count == count)
1226 goto done_waiting;
1227
1228 if (timeout == 0) {
1229 timeout = -ETIME;
1230 goto done_waiting;
1231 }
1232
1233 if (signal_pending(current)) {
1234 timeout = -ERESTARTSYS;
1235 goto done_waiting;
1236 }
1237
1238 timeout = schedule_timeout(timeout);
1239 } while (1);
1240
1241done_waiting:
1242 __set_current_state(TASK_RUNNING);
1243
1244cleanup_entries:
1245 for (i = 0; i < count; ++i) {
1246 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
1247 if (entries[i].fence_cb.func)
1248 dma_fence_remove_callback(entries[i].fence,
1249 &entries[i].fence_cb);
1250 dma_fence_put(entries[i].fence);
1251 }
1252 kfree(entries);
1253
1254err_free_points:
1255 kfree(points);
1256
1257 return timeout;
1258}
1259
1260/**
1261 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1262 *
1263 * @timeout_nsec: timeout nsec component in ns, 0 for poll
1264 *
1265 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1266 */
1267signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1268{
1269 ktime_t abs_timeout, now;
1270 u64 timeout_ns, timeout_jiffies64;
1271
1272 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1273 if (timeout_nsec == 0)
1274 return 0;
1275
1276 abs_timeout = ns_to_ktime(timeout_nsec);
1277 now = ktime_get();
1278
1279 if (!ktime_after(abs_timeout, now))
1280 return 0;
1281
1282 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1283
1284 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1285 /* clamp timeout to avoid infinite timeout */
1286 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1287 return MAX_SCHEDULE_TIMEOUT - 1;
1288
1289 return timeout_jiffies64 + 1;
1290}
1291EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
1292
1293static int drm_syncobj_array_wait(struct drm_device *dev,
1294 struct drm_file *file_private,
1295 struct drm_syncobj_wait *wait,
1296 struct drm_syncobj_timeline_wait *timeline_wait,
1297 struct drm_syncobj **syncobjs, bool timeline,
1298 ktime_t *deadline)
1299{
1300 signed long timeout = 0;
1301 uint32_t first = ~0;
1302
1303 if (!timeline) {
1304 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1305 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1306 NULL,
1307 wait->count_handles,
1308 wait->flags,
1309 timeout, &first,
1310 deadline);
1311 if (timeout < 0)
1312 return timeout;
1313 wait->first_signaled = first;
1314 } else {
1315 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1316 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1317 u64_to_user_ptr(timeline_wait->points),
1318 timeline_wait->count_handles,
1319 timeline_wait->flags,
1320 timeout, &first,
1321 deadline);
1322 if (timeout < 0)
1323 return timeout;
1324 timeline_wait->first_signaled = first;
1325 }
1326 return 0;
1327}
1328
1329static int drm_syncobj_array_find(struct drm_file *file_private,
1330 void __user *user_handles,
1331 uint32_t count_handles,
1332 struct drm_syncobj ***syncobjs_out)
1333{
1334 uint32_t i, *handles;
1335 struct drm_syncobj **syncobjs;
1336 int ret;
1337
1338 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1339 if (handles == NULL)
1340 return -ENOMEM;
1341
1342 if (copy_from_user(handles, user_handles,
1343 sizeof(uint32_t) * count_handles)) {
1344 ret = -EFAULT;
1345 goto err_free_handles;
1346 }
1347
1348 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1349 if (syncobjs == NULL) {
1350 ret = -ENOMEM;
1351 goto err_free_handles;
1352 }
1353
1354 for (i = 0; i < count_handles; i++) {
1355 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1356 if (!syncobjs[i]) {
1357 ret = -ENOENT;
1358 goto err_put_syncobjs;
1359 }
1360 }
1361
1362 kfree(handles);
1363 *syncobjs_out = syncobjs;
1364 return 0;
1365
1366err_put_syncobjs:
1367 while (i-- > 0)
1368 drm_syncobj_put(syncobjs[i]);
1369 kfree(syncobjs);
1370err_free_handles:
1371 kfree(handles);
1372
1373 return ret;
1374}
1375
1376static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1377 uint32_t count)
1378{
1379 uint32_t i;
1380
1381 for (i = 0; i < count; i++)
1382 drm_syncobj_put(syncobjs[i]);
1383 kfree(syncobjs);
1384}
1385
1386int
1387drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1388 struct drm_file *file_private)
1389{
1390 struct drm_syncobj_wait *args = data;
1391 struct drm_syncobj **syncobjs;
1392 unsigned int possible_flags;
1393 ktime_t t, *tp = NULL;
1394 int ret = 0;
1395
1396 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1397 return -EOPNOTSUPP;
1398
1399 possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1400 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1401 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
1402
1403 if (args->flags & ~possible_flags)
1404 return -EINVAL;
1405
1406 if (args->count_handles == 0)
1407 return 0;
1408
1409 ret = drm_syncobj_array_find(file_private,
1410 u64_to_user_ptr(args->handles),
1411 args->count_handles,
1412 &syncobjs);
1413 if (ret < 0)
1414 return ret;
1415
1416 if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
1417 t = ns_to_ktime(args->deadline_nsec);
1418 tp = &t;
1419 }
1420
1421 ret = drm_syncobj_array_wait(dev, file_private,
1422 args, NULL, syncobjs, false, tp);
1423
1424 drm_syncobj_array_free(syncobjs, args->count_handles);
1425
1426 return ret;
1427}
1428
1429int
1430drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1431 struct drm_file *file_private)
1432{
1433 struct drm_syncobj_timeline_wait *args = data;
1434 struct drm_syncobj **syncobjs;
1435 unsigned int possible_flags;
1436 ktime_t t, *tp = NULL;
1437 int ret = 0;
1438
1439 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1440 return -EOPNOTSUPP;
1441
1442 possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1443 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1444 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE |
1445 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
1446
1447 if (args->flags & ~possible_flags)
1448 return -EINVAL;
1449
1450 if (args->count_handles == 0)
1451 return 0;
1452
1453 ret = drm_syncobj_array_find(file_private,
1454 u64_to_user_ptr(args->handles),
1455 args->count_handles,
1456 &syncobjs);
1457 if (ret < 0)
1458 return ret;
1459
1460 if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
1461 t = ns_to_ktime(args->deadline_nsec);
1462 tp = &t;
1463 }
1464
1465 ret = drm_syncobj_array_wait(dev, file_private,
1466 NULL, args, syncobjs, true, tp);
1467
1468 drm_syncobj_array_free(syncobjs, args->count_handles);
1469
1470 return ret;
1471}
1472
1473static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
1474 struct dma_fence_cb *cb)
1475{
1476 struct syncobj_eventfd_entry *entry =
1477 container_of(cb, struct syncobj_eventfd_entry, fence_cb);
1478
1479 eventfd_signal(entry->ev_fd_ctx);
1480 syncobj_eventfd_entry_free(entry);
1481}
1482
1483static void
1484syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
1485 struct syncobj_eventfd_entry *entry)
1486{
1487 int ret;
1488 struct dma_fence *fence;
1489
1490 /* This happens inside the syncobj lock */
1491 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
1492 if (!fence)
1493 return;
1494
1495 ret = dma_fence_chain_find_seqno(&fence, entry->point);
1496 if (ret != 0) {
1497 /* The given seqno has not been submitted yet. */
1498 dma_fence_put(fence);
1499 return;
1500 } else if (!fence) {
1501 /* If dma_fence_chain_find_seqno returns 0 but sets the fence
1502 * to NULL, it implies that the given seqno is signaled and a
1503 * later seqno has already been submitted. Assign a stub fence
1504 * so that the eventfd still gets signaled below.
1505 */
1506 fence = dma_fence_get_stub();
1507 }
1508
1509 list_del_init(&entry->node);
1510 entry->fence = fence;
1511
1512 if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
1513 eventfd_signal(entry->ev_fd_ctx);
1514 syncobj_eventfd_entry_free(entry);
1515 } else {
1516 ret = dma_fence_add_callback(fence, &entry->fence_cb,
1517 syncobj_eventfd_entry_fence_func);
1518 if (ret == -ENOENT) {
1519 eventfd_signal(entry->ev_fd_ctx);
1520 syncobj_eventfd_entry_free(entry);
1521 }
1522 }
1523}
1524
1525int
1526drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
1527 struct drm_file *file_private)
1528{
1529 return -EOPNOTSUPP;
1530#ifdef notyet
1531 struct drm_syncobj_eventfd *args = data;
1532 struct drm_syncobj *syncobj;
1533 struct eventfd_ctx *ev_fd_ctx;
1534 struct syncobj_eventfd_entry *entry;
1535 int ret;
1536
1537 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1538 return -EOPNOTSUPP;
1539
1540 if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
1541 return -EINVAL;
1542
1543 if (args->pad)
1544 return -EINVAL;
1545
1546 syncobj = drm_syncobj_find(file_private, args->handle);
1547 if (!syncobj)
1548 return -ENOENT;
1549
1550 ev_fd_ctx = eventfd_ctx_fdget(args->fd);
1551 if (IS_ERR(ev_fd_ctx)) {
1552 ret = PTR_ERR(ev_fd_ctx);
1553 goto err_fdget;
1554 }
1555
1556 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1557 if (!entry) {
1558 ret = -ENOMEM;
1559 goto err_kzalloc;
1560 }
1561 entry->syncobj = syncobj;
1562 entry->ev_fd_ctx = ev_fd_ctx;
1563 entry->point = args->point;
1564 entry->flags = args->flags;
1565
1566 drm_syncobj_add_eventfd(syncobj, entry);
1567 drm_syncobj_put(syncobj);
1568
1569 return 0;
1570
1571err_kzalloc:
1572 eventfd_ctx_put(ev_fd_ctx);
1573err_fdget:
1574 drm_syncobj_put(syncobj);
1575 return ret;
1576#endif
1577}
1578
1579int
1580drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1581 struct drm_file *file_private)
1582{
1583 struct drm_syncobj_array *args = data;
1584 struct drm_syncobj **syncobjs;
1585 uint32_t i;
1586 int ret;
1587
1588 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1589 return -EOPNOTSUPP;
1590
1591 if (args->pad != 0)
1592 return -EINVAL;
1593
1594 if (args->count_handles == 0)
1595 return -EINVAL;
1596
1597 ret = drm_syncobj_array_find(file_private,
1598 u64_to_user_ptr(args->handles),
1599 args->count_handles,
1600 &syncobjs);
1601 if (ret < 0)
1602 return ret;
1603
1604 for (i = 0; i < args->count_handles; i++)
1605 drm_syncobj_replace_fence(syncobjs[i], NULL);
1606
1607 drm_syncobj_array_free(syncobjs, args->count_handles);
1608
1609 return 0;
1610}
1611
1612int
1613drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1614 struct drm_file *file_private)
1615{
1616 struct drm_syncobj_array *args = data;
1617 struct drm_syncobj **syncobjs;
1618 uint32_t i;
1619 int ret;
1620
1621 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1622 return -EOPNOTSUPP;
1623
1624 if (args->pad != 0)
1625 return -EINVAL;
1626
1627 if (args->count_handles == 0)
1628 return -EINVAL;
1629
1630 ret = drm_syncobj_array_find(file_private,
1631 u64_to_user_ptr(args->handles),
1632 args->count_handles,
1633 &syncobjs);
1634 if (ret < 0)
1635 return ret;
1636
1637 for (i = 0; i < args->count_handles; i++) {
1638 ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1639 if (ret < 0)
1640 break;
1641 }
1642
1643 drm_syncobj_array_free(syncobjs, args->count_handles);
1644
1645 return ret;
1646}
1647
1648int
1649drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1650 struct drm_file *file_private)
1651{
1652 struct drm_syncobj_timeline_array *args = data;
1653 struct drm_syncobj **syncobjs;
1654 struct dma_fence_chain **chains;
1655 uint64_t *points;
1656 uint32_t i, j;
1657 int ret;
1658
1659 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1660 return -EOPNOTSUPP;
1661
1662 if (args->flags != 0)
1663 return -EINVAL;
1664
1665 if (args->count_handles == 0)
1666 return -EINVAL;
1667
1668 ret = drm_syncobj_array_find(file_private,
1669 u64_to_user_ptr(args->handles),
1670 args->count_handles,
1671 &syncobjs);
1672 if (ret < 0)
1673 return ret;
1674
1675 points = kmalloc_array(args->count_handles, sizeof(*points),
1676 GFP_KERNEL);
1677 if (!points) {
1678 ret = -ENOMEM;
1679 goto out;
1680 }
1681 if (!u64_to_user_ptr(args->points)) {
1682 memset(points, 0, args->count_handles * sizeof(uint64_t));
1683 } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1684 sizeof(uint64_t) * args->count_handles)) {
1685 ret = -EFAULT;
1686 goto err_points;
1687 }
1688
1689 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1690 if (!chains) {
1691 ret = -ENOMEM;
1692 goto err_points;
1693 }
1694 for (i = 0; i < args->count_handles; i++) {
1695 chains[i] = dma_fence_chain_alloc();
1696 if (!chains[i]) {
1697 for (j = 0; j < i; j++)
1698 dma_fence_chain_free(chains[j]);
1699 ret = -ENOMEM;
1700 goto err_chains;
1701 }
1702 }
1703
1704 for (i = 0; i < args->count_handles; i++) {
1705 struct dma_fence *fence = dma_fence_get_stub();
1706
1707 drm_syncobj_add_point(syncobjs[i], chains[i],
1708 fence, points[i]);
1709 dma_fence_put(fence);
1710 }
1711err_chains:
1712 kfree(chains);
1713err_points:
1714 kfree(points);
1715out:
1716 drm_syncobj_array_free(syncobjs, args->count_handles);
1717
1718 return ret;
1719}
1720
1721int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1722 struct drm_file *file_private)
1723{
1724 struct drm_syncobj_timeline_array *args = data;
1725 struct drm_syncobj **syncobjs;
1726 uint64_t __user *points = u64_to_user_ptr(args->points);
1727 uint32_t i;
1728 int ret;
1729
1730 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1731 return -EOPNOTSUPP;
1732
1733 if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
1734 return -EINVAL;
1735
1736 if (args->count_handles == 0)
1737 return -EINVAL;
1738
1739 ret = drm_syncobj_array_find(file_private,
1740 u64_to_user_ptr(args->handles),
1741 args->count_handles,
1742 &syncobjs);
1743 if (ret < 0)
1744 return ret;
1745
1746 for (i = 0; i < args->count_handles; i++) {
1747 struct dma_fence_chain *chain;
1748 struct dma_fence *fence;
1749 uint64_t point;
1750
1751 fence = drm_syncobj_fence_get(syncobjs[i]);
1752 chain = to_dma_fence_chain(fence);
1753 if (chain) {
1754 struct dma_fence *iter, *last_signaled =
1755 dma_fence_get(fence);
1756
1757 if (args->flags &
1758 DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
1759 point = fence->seqno;
1760 } else {
1761 dma_fence_chain_for_each(iter, fence) {
1762 if (iter->context != fence->context) {
1763 dma_fence_put(iter);
1764 /* It is most likely that timeline has
1765 * unorder points. */
1766 break;
1767 }
1768 dma_fence_put(last_signaled);
1769 last_signaled = dma_fence_get(iter);
1770 }
1771 point = dma_fence_is_signaled(last_signaled) ?
1772 last_signaled->seqno :
1773 to_dma_fence_chain(last_signaled)->prev_seqno;
1774 }
1775 dma_fence_put(last_signaled);
1776 } else {
1777 point = 0;
1778 }
1779 dma_fence_put(fence);
1780 ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1781 ret = ret ? -EFAULT : 0;
1782 if (ret)
1783 break;
1784 }
1785 drm_syncobj_array_free(syncobjs, args->count_handles);
1786
1787 return ret;
1788}