Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
35 *
36 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
39 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
53#include <linux/sync_file.h>
54#include <linux/sched/signal.h>
55
56#include "drm_internal.h"
57#include <drm/drm_syncobj.h>
58
59struct drm_syncobj_stub_fence {
60 struct dma_fence base;
61 spinlock_t lock;
62};
63
64static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
65{
66 return "syncobjstub";
67}
68
69static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
70 .get_driver_name = drm_syncobj_stub_fence_get_name,
71 .get_timeline_name = drm_syncobj_stub_fence_get_name,
72};
73
74
75/**
76 * drm_syncobj_find - lookup and reference a sync object.
77 * @file_private: drm file private pointer
78 * @handle: sync object handle to lookup.
79 *
80 * Returns a reference to the syncobj pointed to by handle or NULL. The
81 * reference must be released by calling drm_syncobj_put().
82 */
83struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
84 u32 handle)
85{
86 struct drm_syncobj *syncobj;
87
88 spin_lock(&file_private->syncobj_table_lock);
89
90 /* Check if we currently have a reference on the object */
91 syncobj = idr_find(&file_private->syncobj_idr, handle);
92 if (syncobj)
93 drm_syncobj_get(syncobj);
94
95 spin_unlock(&file_private->syncobj_table_lock);
96
97 return syncobj;
98}
99EXPORT_SYMBOL(drm_syncobj_find);
100
101static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
102 struct drm_syncobj_cb *cb,
103 drm_syncobj_func_t func)
104{
105 cb->func = func;
106 list_add_tail(&cb->node, &syncobj->cb_list);
107}
108
109static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
110 struct dma_fence **fence,
111 struct drm_syncobj_cb *cb,
112 drm_syncobj_func_t func)
113{
114 int ret;
115
116 WARN_ON(*fence);
117
118 *fence = drm_syncobj_fence_get(syncobj);
119 if (*fence)
120 return 1;
121
122 spin_lock(&syncobj->lock);
123 /* We've already tried once to get a fence and failed. Now that we
124 * have the lock, try one more time just to be sure we don't add a
125 * callback when a fence has already been set.
126 */
127 if (syncobj->fence) {
128 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
129 lockdep_is_held(&syncobj->lock)));
130 ret = 1;
131 } else {
132 *fence = NULL;
133 drm_syncobj_add_callback_locked(syncobj, cb, func);
134 ret = 0;
135 }
136 spin_unlock(&syncobj->lock);
137
138 return ret;
139}
140
141void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
142 struct drm_syncobj_cb *cb,
143 drm_syncobj_func_t func)
144{
145 spin_lock(&syncobj->lock);
146 drm_syncobj_add_callback_locked(syncobj, cb, func);
147 spin_unlock(&syncobj->lock);
148}
149
150void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
151 struct drm_syncobj_cb *cb)
152{
153 spin_lock(&syncobj->lock);
154 list_del_init(&cb->node);
155 spin_unlock(&syncobj->lock);
156}
157
158/**
159 * drm_syncobj_replace_fence - replace fence in a sync object.
160 * @syncobj: Sync object to replace fence in
161 * @point: timeline point
162 * @fence: fence to install in sync file.
163 *
164 * This replaces the fence on a sync object, or a timeline point fence.
165 */
166void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
167 u64 point,
168 struct dma_fence *fence)
169{
170 struct dma_fence *old_fence;
171 struct drm_syncobj_cb *cur, *tmp;
172
173 if (fence)
174 dma_fence_get(fence);
175
176 spin_lock(&syncobj->lock);
177
178 old_fence = rcu_dereference_protected(syncobj->fence,
179 lockdep_is_held(&syncobj->lock));
180 rcu_assign_pointer(syncobj->fence, fence);
181
182 if (fence != old_fence) {
183 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
184 list_del_init(&cur->node);
185 cur->func(syncobj, cur);
186 }
187 }
188
189 spin_unlock(&syncobj->lock);
190
191 dma_fence_put(old_fence);
192}
193EXPORT_SYMBOL(drm_syncobj_replace_fence);
194
195static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
196{
197 struct drm_syncobj_stub_fence *fence;
198 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
199 if (fence == NULL)
200 return -ENOMEM;
201
202 spin_lock_init(&fence->lock);
203 dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
204 &fence->lock, 0, 0);
205 dma_fence_signal(&fence->base);
206
207 drm_syncobj_replace_fence(syncobj, 0, &fence->base);
208
209 dma_fence_put(&fence->base);
210
211 return 0;
212}
213
214/**
215 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
216 * @file_private: drm file private pointer
217 * @handle: sync object handle to lookup.
218 * @point: timeline point
219 * @fence: out parameter for the fence
220 *
221 * This is just a convenience function that combines drm_syncobj_find() and
222 * drm_syncobj_fence_get().
223 *
224 * Returns 0 on success or a negative error value on failure. On success @fence
225 * contains a reference to the fence, which must be released by calling
226 * dma_fence_put().
227 */
228int drm_syncobj_find_fence(struct drm_file *file_private,
229 u32 handle, u64 point,
230 struct dma_fence **fence)
231{
232 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
233 int ret = 0;
234
235 if (!syncobj)
236 return -ENOENT;
237
238 *fence = drm_syncobj_fence_get(syncobj);
239 if (!*fence) {
240 ret = -EINVAL;
241 }
242 drm_syncobj_put(syncobj);
243 return ret;
244}
245EXPORT_SYMBOL(drm_syncobj_find_fence);
246
247/**
248 * drm_syncobj_free - free a sync object.
249 * @kref: kref to free.
250 *
251 * Only to be called from kref_put in drm_syncobj_put.
252 */
253void drm_syncobj_free(struct kref *kref)
254{
255 struct drm_syncobj *syncobj = container_of(kref,
256 struct drm_syncobj,
257 refcount);
258 drm_syncobj_replace_fence(syncobj, 0, NULL);
259 kfree(syncobj);
260}
261EXPORT_SYMBOL(drm_syncobj_free);
262
263/**
264 * drm_syncobj_create - create a new syncobj
265 * @out_syncobj: returned syncobj
266 * @flags: DRM_SYNCOBJ_* flags
267 * @fence: if non-NULL, the syncobj will represent this fence
268 *
269 * This is the first function to create a sync object. After creating, drivers
270 * probably want to make it available to userspace, either through
271 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
272 *
273 * Returns 0 on success or a negative error value on failure.
274 */
275int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
276 struct dma_fence *fence)
277{
278 int ret;
279 struct drm_syncobj *syncobj;
280
281 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
282 if (!syncobj)
283 return -ENOMEM;
284
285 kref_init(&syncobj->refcount);
286 INIT_LIST_HEAD(&syncobj->cb_list);
287 spin_lock_init(&syncobj->lock);
288
289 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
290 ret = drm_syncobj_assign_null_handle(syncobj);
291 if (ret < 0) {
292 drm_syncobj_put(syncobj);
293 return ret;
294 }
295 }
296
297 if (fence)
298 drm_syncobj_replace_fence(syncobj, 0, fence);
299
300 *out_syncobj = syncobj;
301 return 0;
302}
303EXPORT_SYMBOL(drm_syncobj_create);
304
305/**
306 * drm_syncobj_get_handle - get a handle from a syncobj
307 * @file_private: drm file private pointer
308 * @syncobj: Sync object to export
309 * @handle: out parameter with the new handle
310 *
311 * Exports a sync object created with drm_syncobj_create() as a handle on
312 * @file_private to userspace.
313 *
314 * Returns 0 on success or a negative error value on failure.
315 */
316int drm_syncobj_get_handle(struct drm_file *file_private,
317 struct drm_syncobj *syncobj, u32 *handle)
318{
319 int ret;
320
321 /* take a reference to put in the idr */
322 drm_syncobj_get(syncobj);
323
324 idr_preload(GFP_KERNEL);
325 spin_lock(&file_private->syncobj_table_lock);
326 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
327 spin_unlock(&file_private->syncobj_table_lock);
328
329 idr_preload_end();
330
331 if (ret < 0) {
332 drm_syncobj_put(syncobj);
333 return ret;
334 }
335
336 *handle = ret;
337 return 0;
338}
339EXPORT_SYMBOL(drm_syncobj_get_handle);
340
341static int drm_syncobj_create_as_handle(struct drm_file *file_private,
342 u32 *handle, uint32_t flags)
343{
344 int ret;
345 struct drm_syncobj *syncobj;
346
347 ret = drm_syncobj_create(&syncobj, flags, NULL);
348 if (ret)
349 return ret;
350
351 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
352 drm_syncobj_put(syncobj);
353 return ret;
354}
355
356static int drm_syncobj_destroy(struct drm_file *file_private,
357 u32 handle)
358{
359 struct drm_syncobj *syncobj;
360
361 spin_lock(&file_private->syncobj_table_lock);
362 syncobj = idr_remove(&file_private->syncobj_idr, handle);
363 spin_unlock(&file_private->syncobj_table_lock);
364
365 if (!syncobj)
366 return -EINVAL;
367
368 drm_syncobj_put(syncobj);
369 return 0;
370}
371
372static int drm_syncobj_file_release(struct inode *inode, struct file *file)
373{
374 struct drm_syncobj *syncobj = file->private_data;
375
376 drm_syncobj_put(syncobj);
377 return 0;
378}
379
380static const struct file_operations drm_syncobj_file_fops = {
381 .release = drm_syncobj_file_release,
382};
383
384/**
385 * drm_syncobj_get_fd - get a file descriptor from a syncobj
386 * @syncobj: Sync object to export
387 * @p_fd: out parameter with the new file descriptor
388 *
389 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
390 *
391 * Returns 0 on success or a negative error value on failure.
392 */
393int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
394{
395 struct file *file;
396 int fd;
397
398 fd = get_unused_fd_flags(O_CLOEXEC);
399 if (fd < 0)
400 return fd;
401
402 file = anon_inode_getfile("syncobj_file",
403 &drm_syncobj_file_fops,
404 syncobj, 0);
405 if (IS_ERR(file)) {
406 put_unused_fd(fd);
407 return PTR_ERR(file);
408 }
409
410 drm_syncobj_get(syncobj);
411 fd_install(fd, file);
412
413 *p_fd = fd;
414 return 0;
415}
416EXPORT_SYMBOL(drm_syncobj_get_fd);
417
418static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
419 u32 handle, int *p_fd)
420{
421 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
422 int ret;
423
424 if (!syncobj)
425 return -EINVAL;
426
427 ret = drm_syncobj_get_fd(syncobj, p_fd);
428 drm_syncobj_put(syncobj);
429 return ret;
430}
431
432static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
433 int fd, u32 *handle)
434{
435 struct drm_syncobj *syncobj;
436 struct file *file;
437 int ret;
438
439 file = fget(fd);
440 if (!file)
441 return -EINVAL;
442
443 if (file->f_op != &drm_syncobj_file_fops) {
444 fput(file);
445 return -EINVAL;
446 }
447
448 /* take a reference to put in the idr */
449 syncobj = file->private_data;
450 drm_syncobj_get(syncobj);
451
452 idr_preload(GFP_KERNEL);
453 spin_lock(&file_private->syncobj_table_lock);
454 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
455 spin_unlock(&file_private->syncobj_table_lock);
456 idr_preload_end();
457
458 if (ret > 0) {
459 *handle = ret;
460 ret = 0;
461 } else
462 drm_syncobj_put(syncobj);
463
464 fput(file);
465 return ret;
466}
467
468static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
469 int fd, int handle)
470{
471 struct dma_fence *fence = sync_file_get_fence(fd);
472 struct drm_syncobj *syncobj;
473
474 if (!fence)
475 return -EINVAL;
476
477 syncobj = drm_syncobj_find(file_private, handle);
478 if (!syncobj) {
479 dma_fence_put(fence);
480 return -ENOENT;
481 }
482
483 drm_syncobj_replace_fence(syncobj, 0, fence);
484 dma_fence_put(fence);
485 drm_syncobj_put(syncobj);
486 return 0;
487}
488
489static int drm_syncobj_export_sync_file(struct drm_file *file_private,
490 int handle, int *p_fd)
491{
492 int ret;
493 struct dma_fence *fence;
494 struct sync_file *sync_file;
495 int fd = get_unused_fd_flags(O_CLOEXEC);
496
497 if (fd < 0)
498 return fd;
499
500 ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
501 if (ret)
502 goto err_put_fd;
503
504 sync_file = sync_file_create(fence);
505
506 dma_fence_put(fence);
507
508 if (!sync_file) {
509 ret = -EINVAL;
510 goto err_put_fd;
511 }
512
513 fd_install(fd, sync_file->file);
514
515 *p_fd = fd;
516 return 0;
517err_put_fd:
518 put_unused_fd(fd);
519 return ret;
520}
521/**
522 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
523 * @file_private: drm file-private structure to set up
524 *
525 * Called at device open time, sets up the structure for handling refcounting
526 * of sync objects.
527 */
528void
529drm_syncobj_open(struct drm_file *file_private)
530{
531 idr_init_base(&file_private->syncobj_idr, 1);
532 spin_lock_init(&file_private->syncobj_table_lock);
533}
534
535static int
536drm_syncobj_release_handle(int id, void *ptr, void *data)
537{
538 struct drm_syncobj *syncobj = ptr;
539
540 drm_syncobj_put(syncobj);
541 return 0;
542}
543
544/**
545 * drm_syncobj_release - release file-private sync object resources
546 * @file_private: drm file-private structure to clean up
547 *
548 * Called at close time when the filp is going away.
549 *
550 * Releases any remaining references on objects by this filp.
551 */
552void
553drm_syncobj_release(struct drm_file *file_private)
554{
555 idr_for_each(&file_private->syncobj_idr,
556 &drm_syncobj_release_handle, file_private);
557 idr_destroy(&file_private->syncobj_idr);
558}
559
560int
561drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *file_private)
563{
564 struct drm_syncobj_create *args = data;
565
566 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
567 return -EOPNOTSUPP;
568
569 /* no valid flags yet */
570 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
571 return -EINVAL;
572
573 return drm_syncobj_create_as_handle(file_private,
574 &args->handle, args->flags);
575}
576
577int
578drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
579 struct drm_file *file_private)
580{
581 struct drm_syncobj_destroy *args = data;
582
583 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
584 return -EOPNOTSUPP;
585
586 /* make sure padding is empty */
587 if (args->pad)
588 return -EINVAL;
589 return drm_syncobj_destroy(file_private, args->handle);
590}
591
592int
593drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
594 struct drm_file *file_private)
595{
596 struct drm_syncobj_handle *args = data;
597
598 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
599 return -EOPNOTSUPP;
600
601 if (args->pad)
602 return -EINVAL;
603
604 if (args->flags != 0 &&
605 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
606 return -EINVAL;
607
608 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
609 return drm_syncobj_export_sync_file(file_private, args->handle,
610 &args->fd);
611
612 return drm_syncobj_handle_to_fd(file_private, args->handle,
613 &args->fd);
614}
615
616int
617drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file_private)
619{
620 struct drm_syncobj_handle *args = data;
621
622 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
623 return -EOPNOTSUPP;
624
625 if (args->pad)
626 return -EINVAL;
627
628 if (args->flags != 0 &&
629 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
630 return -EINVAL;
631
632 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
633 return drm_syncobj_import_sync_file_fence(file_private,
634 args->fd,
635 args->handle);
636
637 return drm_syncobj_fd_to_handle(file_private, args->fd,
638 &args->handle);
639}
640
641struct syncobj_wait_entry {
642 struct task_struct *task;
643 struct dma_fence *fence;
644 struct dma_fence_cb fence_cb;
645 struct drm_syncobj_cb syncobj_cb;
646};
647
648static void syncobj_wait_fence_func(struct dma_fence *fence,
649 struct dma_fence_cb *cb)
650{
651 struct syncobj_wait_entry *wait =
652 container_of(cb, struct syncobj_wait_entry, fence_cb);
653
654 wake_up_process(wait->task);
655}
656
657static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
658 struct drm_syncobj_cb *cb)
659{
660 struct syncobj_wait_entry *wait =
661 container_of(cb, struct syncobj_wait_entry, syncobj_cb);
662
663 /* This happens inside the syncobj lock */
664 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
665 lockdep_is_held(&syncobj->lock)));
666 wake_up_process(wait->task);
667}
668
669static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
670 uint32_t count,
671 uint32_t flags,
672 signed long timeout,
673 uint32_t *idx)
674{
675 struct syncobj_wait_entry *entries;
676 struct dma_fence *fence;
677 uint32_t signaled_count, i;
678
679 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
680 if (!entries)
681 return -ENOMEM;
682
683 /* Walk the list of sync objects and initialize entries. We do
684 * this up-front so that we can properly return -EINVAL if there is
685 * a syncobj with a missing fence and then never have the chance of
686 * returning -EINVAL again.
687 */
688 signaled_count = 0;
689 for (i = 0; i < count; ++i) {
690 entries[i].task = current;
691 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
692 if (!entries[i].fence) {
693 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
694 continue;
695 } else {
696 timeout = -EINVAL;
697 goto cleanup_entries;
698 }
699 }
700
701 if (dma_fence_is_signaled(entries[i].fence)) {
702 if (signaled_count == 0 && idx)
703 *idx = i;
704 signaled_count++;
705 }
706 }
707
708 if (signaled_count == count ||
709 (signaled_count > 0 &&
710 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
711 goto cleanup_entries;
712
713 /* There's a very annoying laxness in the dma_fence API here, in
714 * that backends are not required to automatically report when a
715 * fence is signaled prior to fence->ops->enable_signaling() being
716 * called. So here if we fail to match signaled_count, we need to
717 * fallthough and try a 0 timeout wait!
718 */
719
720 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
721 for (i = 0; i < count; ++i) {
722 if (entries[i].fence)
723 continue;
724
725 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
726 &entries[i].fence,
727 &entries[i].syncobj_cb,
728 syncobj_wait_syncobj_func);
729 }
730 }
731
732 do {
733 set_current_state(TASK_INTERRUPTIBLE);
734
735 signaled_count = 0;
736 for (i = 0; i < count; ++i) {
737 fence = entries[i].fence;
738 if (!fence)
739 continue;
740
741 if (dma_fence_is_signaled(fence) ||
742 (!entries[i].fence_cb.func &&
743 dma_fence_add_callback(fence,
744 &entries[i].fence_cb,
745 syncobj_wait_fence_func))) {
746 /* The fence has been signaled */
747 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
748 signaled_count++;
749 } else {
750 if (idx)
751 *idx = i;
752 goto done_waiting;
753 }
754 }
755 }
756
757 if (signaled_count == count)
758 goto done_waiting;
759
760 if (timeout == 0) {
761 timeout = -ETIME;
762 goto done_waiting;
763 }
764
765 if (signal_pending(current)) {
766 timeout = -ERESTARTSYS;
767 goto done_waiting;
768 }
769
770 timeout = schedule_timeout(timeout);
771 } while (1);
772
773done_waiting:
774 __set_current_state(TASK_RUNNING);
775
776cleanup_entries:
777 for (i = 0; i < count; ++i) {
778 if (entries[i].syncobj_cb.func)
779 drm_syncobj_remove_callback(syncobjs[i],
780 &entries[i].syncobj_cb);
781 if (entries[i].fence_cb.func)
782 dma_fence_remove_callback(entries[i].fence,
783 &entries[i].fence_cb);
784 dma_fence_put(entries[i].fence);
785 }
786 kfree(entries);
787
788 return timeout;
789}
790
791/**
792 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
793 *
794 * @timeout_nsec: timeout nsec component in ns, 0 for poll
795 *
796 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
797 */
798static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
799{
800 ktime_t abs_timeout, now;
801 u64 timeout_ns, timeout_jiffies64;
802
803 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
804 if (timeout_nsec == 0)
805 return 0;
806
807 abs_timeout = ns_to_ktime(timeout_nsec);
808 now = ktime_get();
809
810 if (!ktime_after(abs_timeout, now))
811 return 0;
812
813 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
814
815 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
816 /* clamp timeout to avoid infinite timeout */
817 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
818 return MAX_SCHEDULE_TIMEOUT - 1;
819
820 return timeout_jiffies64 + 1;
821}
822
823static int drm_syncobj_array_wait(struct drm_device *dev,
824 struct drm_file *file_private,
825 struct drm_syncobj_wait *wait,
826 struct drm_syncobj **syncobjs)
827{
828 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
829 uint32_t first = ~0;
830
831 timeout = drm_syncobj_array_wait_timeout(syncobjs,
832 wait->count_handles,
833 wait->flags,
834 timeout, &first);
835 if (timeout < 0)
836 return timeout;
837
838 wait->first_signaled = first;
839 return 0;
840}
841
842static int drm_syncobj_array_find(struct drm_file *file_private,
843 void __user *user_handles,
844 uint32_t count_handles,
845 struct drm_syncobj ***syncobjs_out)
846{
847 uint32_t i, *handles;
848 struct drm_syncobj **syncobjs;
849 int ret;
850
851 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
852 if (handles == NULL)
853 return -ENOMEM;
854
855 if (copy_from_user(handles, user_handles,
856 sizeof(uint32_t) * count_handles)) {
857 ret = -EFAULT;
858 goto err_free_handles;
859 }
860
861 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
862 if (syncobjs == NULL) {
863 ret = -ENOMEM;
864 goto err_free_handles;
865 }
866
867 for (i = 0; i < count_handles; i++) {
868 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
869 if (!syncobjs[i]) {
870 ret = -ENOENT;
871 goto err_put_syncobjs;
872 }
873 }
874
875 kfree(handles);
876 *syncobjs_out = syncobjs;
877 return 0;
878
879err_put_syncobjs:
880 while (i-- > 0)
881 drm_syncobj_put(syncobjs[i]);
882 kfree(syncobjs);
883err_free_handles:
884 kfree(handles);
885
886 return ret;
887}
888
889static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
890 uint32_t count)
891{
892 uint32_t i;
893 for (i = 0; i < count; i++)
894 drm_syncobj_put(syncobjs[i]);
895 kfree(syncobjs);
896}
897
898int
899drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
900 struct drm_file *file_private)
901{
902 struct drm_syncobj_wait *args = data;
903 struct drm_syncobj **syncobjs;
904 int ret = 0;
905
906 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
907 return -EOPNOTSUPP;
908
909 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
910 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
911 return -EINVAL;
912
913 if (args->count_handles == 0)
914 return -EINVAL;
915
916 ret = drm_syncobj_array_find(file_private,
917 u64_to_user_ptr(args->handles),
918 args->count_handles,
919 &syncobjs);
920 if (ret < 0)
921 return ret;
922
923 ret = drm_syncobj_array_wait(dev, file_private,
924 args, syncobjs);
925
926 drm_syncobj_array_free(syncobjs, args->count_handles);
927
928 return ret;
929}
930
931int
932drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
933 struct drm_file *file_private)
934{
935 struct drm_syncobj_array *args = data;
936 struct drm_syncobj **syncobjs;
937 uint32_t i;
938 int ret;
939
940 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
941 return -EOPNOTSUPP;
942
943 if (args->pad != 0)
944 return -EINVAL;
945
946 if (args->count_handles == 0)
947 return -EINVAL;
948
949 ret = drm_syncobj_array_find(file_private,
950 u64_to_user_ptr(args->handles),
951 args->count_handles,
952 &syncobjs);
953 if (ret < 0)
954 return ret;
955
956 for (i = 0; i < args->count_handles; i++)
957 drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
958
959 drm_syncobj_array_free(syncobjs, args->count_handles);
960
961 return 0;
962}
963
964int
965drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
966 struct drm_file *file_private)
967{
968 struct drm_syncobj_array *args = data;
969 struct drm_syncobj **syncobjs;
970 uint32_t i;
971 int ret;
972
973 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
974 return -EOPNOTSUPP;
975
976 if (args->pad != 0)
977 return -EINVAL;
978
979 if (args->count_handles == 0)
980 return -EINVAL;
981
982 ret = drm_syncobj_array_find(file_private,
983 u64_to_user_ptr(args->handles),
984 args->count_handles,
985 &syncobjs);
986 if (ret < 0)
987 return ret;
988
989 for (i = 0; i < args->count_handles; i++) {
990 ret = drm_syncobj_assign_null_handle(syncobjs[i]);
991 if (ret < 0)
992 break;
993 }
994
995 drm_syncobj_array_free(syncobjs, args->count_handles);
996
997 return ret;
998}