Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31#define CREATE_TRACE_POINTS
32#include "trace/sync.h"
33
34static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
36static void sync_fence_free(struct kref *kref);
37static void sync_dump(void);
38
39static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
45struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47{
48 struct sync_timeline *obj;
49 unsigned long flags;
50
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
53
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
57
58 kref_init(&obj->kref);
59 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
61
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
67
68 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
72 return obj;
73}
74EXPORT_SYMBOL(sync_timeline_create);
75
76static void sync_timeline_free(struct kref *kref)
77{
78 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
80 unsigned long flags;
81
82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86 if (obj->ops->release_obj)
87 obj->ops->release_obj(obj);
88
89 kfree(obj);
90}
91
92void sync_timeline_destroy(struct sync_timeline *obj)
93{
94 obj->destroyed = true;
95 /*
96 * Ensure timeline is marked as destroyed before
97 * changing timeline's fences status.
98 */
99 smp_wmb();
100
101 /*
102 * signal any children that their parent is going away.
103 */
104 sync_timeline_signal(obj);
105
106 kref_put(&obj->kref, sync_timeline_free);
107}
108EXPORT_SYMBOL(sync_timeline_destroy);
109
110static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
111{
112 unsigned long flags;
113
114 pt->parent = obj;
115
116 spin_lock_irqsave(&obj->child_list_lock, flags);
117 list_add_tail(&pt->child_list, &obj->child_list_head);
118 spin_unlock_irqrestore(&obj->child_list_lock, flags);
119}
120
121static void sync_timeline_remove_pt(struct sync_pt *pt)
122{
123 struct sync_timeline *obj = pt->parent;
124 unsigned long flags;
125
126 spin_lock_irqsave(&obj->active_list_lock, flags);
127 if (!list_empty(&pt->active_list))
128 list_del_init(&pt->active_list);
129 spin_unlock_irqrestore(&obj->active_list_lock, flags);
130
131 spin_lock_irqsave(&obj->child_list_lock, flags);
132 if (!list_empty(&pt->child_list))
133 list_del_init(&pt->child_list);
134
135 spin_unlock_irqrestore(&obj->child_list_lock, flags);
136}
137
138void sync_timeline_signal(struct sync_timeline *obj)
139{
140 unsigned long flags;
141 LIST_HEAD(signaled_pts);
142 struct list_head *pos, *n;
143
144 trace_sync_timeline(obj);
145
146 spin_lock_irqsave(&obj->active_list_lock, flags);
147
148 list_for_each_safe(pos, n, &obj->active_list_head) {
149 struct sync_pt *pt =
150 container_of(pos, struct sync_pt, active_list);
151
152 if (_sync_pt_has_signaled(pt)) {
153 list_del_init(pos);
154 list_add(&pt->signaled_list, &signaled_pts);
155 kref_get(&pt->fence->kref);
156 }
157 }
158
159 spin_unlock_irqrestore(&obj->active_list_lock, flags);
160
161 list_for_each_safe(pos, n, &signaled_pts) {
162 struct sync_pt *pt =
163 container_of(pos, struct sync_pt, signaled_list);
164
165 list_del_init(pos);
166 sync_fence_signal_pt(pt);
167 kref_put(&pt->fence->kref, sync_fence_free);
168 }
169}
170EXPORT_SYMBOL(sync_timeline_signal);
171
172struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
173{
174 struct sync_pt *pt;
175
176 if (size < sizeof(struct sync_pt))
177 return NULL;
178
179 pt = kzalloc(size, GFP_KERNEL);
180 if (pt == NULL)
181 return NULL;
182
183 INIT_LIST_HEAD(&pt->active_list);
184 kref_get(&parent->kref);
185 sync_timeline_add_pt(parent, pt);
186
187 return pt;
188}
189EXPORT_SYMBOL(sync_pt_create);
190
191void sync_pt_free(struct sync_pt *pt)
192{
193 if (pt->parent->ops->free_pt)
194 pt->parent->ops->free_pt(pt);
195
196 sync_timeline_remove_pt(pt);
197
198 kref_put(&pt->parent->kref, sync_timeline_free);
199
200 kfree(pt);
201}
202EXPORT_SYMBOL(sync_pt_free);
203
204/* call with pt->parent->active_list_lock held */
205static int _sync_pt_has_signaled(struct sync_pt *pt)
206{
207 int old_status = pt->status;
208
209 if (!pt->status)
210 pt->status = pt->parent->ops->has_signaled(pt);
211
212 if (!pt->status && pt->parent->destroyed)
213 pt->status = -ENOENT;
214
215 if (pt->status != old_status)
216 pt->timestamp = ktime_get();
217
218 return pt->status;
219}
220
221static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
222{
223 return pt->parent->ops->dup(pt);
224}
225
226/* Adds a sync pt to the active queue. Called when added to a fence */
227static void sync_pt_activate(struct sync_pt *pt)
228{
229 struct sync_timeline *obj = pt->parent;
230 unsigned long flags;
231 int err;
232
233 spin_lock_irqsave(&obj->active_list_lock, flags);
234
235 err = _sync_pt_has_signaled(pt);
236 if (err != 0)
237 goto out;
238
239 list_add_tail(&pt->active_list, &obj->active_list_head);
240
241out:
242 spin_unlock_irqrestore(&obj->active_list_lock, flags);
243}
244
245static int sync_fence_release(struct inode *inode, struct file *file);
246static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
247static long sync_fence_ioctl(struct file *file, unsigned int cmd,
248 unsigned long arg);
249
250
251static const struct file_operations sync_fence_fops = {
252 .release = sync_fence_release,
253 .poll = sync_fence_poll,
254 .unlocked_ioctl = sync_fence_ioctl,
255 .compat_ioctl = sync_fence_ioctl,
256};
257
258static struct sync_fence *sync_fence_alloc(const char *name)
259{
260 struct sync_fence *fence;
261 unsigned long flags;
262
263 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
264 if (fence == NULL)
265 return NULL;
266
267 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
268 fence, 0);
269 if (IS_ERR(fence->file))
270 goto err;
271
272 kref_init(&fence->kref);
273 strlcpy(fence->name, name, sizeof(fence->name));
274
275 INIT_LIST_HEAD(&fence->pt_list_head);
276 INIT_LIST_HEAD(&fence->waiter_list_head);
277 spin_lock_init(&fence->waiter_list_lock);
278
279 init_waitqueue_head(&fence->wq);
280
281 spin_lock_irqsave(&sync_fence_list_lock, flags);
282 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
283 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
284
285 return fence;
286
287err:
288 kfree(fence);
289 return NULL;
290}
291
292/* TODO: implement a create which takes more that one sync_pt */
293struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
294{
295 struct sync_fence *fence;
296
297 if (pt->fence)
298 return NULL;
299
300 fence = sync_fence_alloc(name);
301 if (fence == NULL)
302 return NULL;
303
304 pt->fence = fence;
305 list_add(&pt->pt_list, &fence->pt_list_head);
306 sync_pt_activate(pt);
307
308 /*
309 * signal the fence in case pt was activated before
310 * sync_pt_activate(pt) was called
311 */
312 sync_fence_signal_pt(pt);
313
314 return fence;
315}
316EXPORT_SYMBOL(sync_fence_create);
317
318static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
319{
320 struct list_head *pos;
321
322 list_for_each(pos, &src->pt_list_head) {
323 struct sync_pt *orig_pt =
324 container_of(pos, struct sync_pt, pt_list);
325 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
326
327 if (new_pt == NULL)
328 return -ENOMEM;
329
330 new_pt->fence = dst;
331 list_add(&new_pt->pt_list, &dst->pt_list_head);
332 }
333
334 return 0;
335}
336
337static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
338{
339 struct list_head *src_pos, *dst_pos, *n;
340
341 list_for_each(src_pos, &src->pt_list_head) {
342 struct sync_pt *src_pt =
343 container_of(src_pos, struct sync_pt, pt_list);
344 bool collapsed = false;
345
346 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
347 struct sync_pt *dst_pt =
348 container_of(dst_pos, struct sync_pt, pt_list);
349 /* collapse two sync_pts on the same timeline
350 * to a single sync_pt that will signal at
351 * the later of the two
352 */
353 if (dst_pt->parent == src_pt->parent) {
354 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
355 == -1) {
356 struct sync_pt *new_pt =
357 sync_pt_dup(src_pt);
358 if (new_pt == NULL)
359 return -ENOMEM;
360
361 new_pt->fence = dst;
362 list_replace(&dst_pt->pt_list,
363 &new_pt->pt_list);
364 sync_pt_free(dst_pt);
365 }
366 collapsed = true;
367 break;
368 }
369 }
370
371 if (!collapsed) {
372 struct sync_pt *new_pt = sync_pt_dup(src_pt);
373
374 if (new_pt == NULL)
375 return -ENOMEM;
376
377 new_pt->fence = dst;
378 list_add(&new_pt->pt_list, &dst->pt_list_head);
379 }
380 }
381
382 return 0;
383}
384
385static void sync_fence_detach_pts(struct sync_fence *fence)
386{
387 struct list_head *pos, *n;
388
389 list_for_each_safe(pos, n, &fence->pt_list_head) {
390 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
391
392 sync_timeline_remove_pt(pt);
393 }
394}
395
396static void sync_fence_free_pts(struct sync_fence *fence)
397{
398 struct list_head *pos, *n;
399
400 list_for_each_safe(pos, n, &fence->pt_list_head) {
401 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
402
403 sync_pt_free(pt);
404 }
405}
406
407struct sync_fence *sync_fence_fdget(int fd)
408{
409 struct file *file = fget(fd);
410
411 if (file == NULL)
412 return NULL;
413
414 if (file->f_op != &sync_fence_fops)
415 goto err;
416
417 return file->private_data;
418
419err:
420 fput(file);
421 return NULL;
422}
423EXPORT_SYMBOL(sync_fence_fdget);
424
425void sync_fence_put(struct sync_fence *fence)
426{
427 fput(fence->file);
428}
429EXPORT_SYMBOL(sync_fence_put);
430
431void sync_fence_install(struct sync_fence *fence, int fd)
432{
433 fd_install(fd, fence->file);
434}
435EXPORT_SYMBOL(sync_fence_install);
436
437static int sync_fence_get_status(struct sync_fence *fence)
438{
439 struct list_head *pos;
440 int status = 1;
441
442 list_for_each(pos, &fence->pt_list_head) {
443 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
444 int pt_status = pt->status;
445
446 if (pt_status < 0) {
447 status = pt_status;
448 break;
449 } else if (status == 1) {
450 status = pt_status;
451 }
452 }
453
454 return status;
455}
456
457struct sync_fence *sync_fence_merge(const char *name,
458 struct sync_fence *a, struct sync_fence *b)
459{
460 struct sync_fence *fence;
461 struct list_head *pos;
462 int err;
463
464 fence = sync_fence_alloc(name);
465 if (fence == NULL)
466 return NULL;
467
468 err = sync_fence_copy_pts(fence, a);
469 if (err < 0)
470 goto err;
471
472 err = sync_fence_merge_pts(fence, b);
473 if (err < 0)
474 goto err;
475
476 list_for_each(pos, &fence->pt_list_head) {
477 struct sync_pt *pt =
478 container_of(pos, struct sync_pt, pt_list);
479 sync_pt_activate(pt);
480 }
481
482 /*
483 * signal the fence in case one of it's pts were activated before
484 * they were activated
485 */
486 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
487 struct sync_pt,
488 pt_list));
489
490 return fence;
491err:
492 sync_fence_free_pts(fence);
493 kfree(fence);
494 return NULL;
495}
496EXPORT_SYMBOL(sync_fence_merge);
497
498static void sync_fence_signal_pt(struct sync_pt *pt)
499{
500 LIST_HEAD(signaled_waiters);
501 struct sync_fence *fence = pt->fence;
502 struct list_head *pos;
503 struct list_head *n;
504 unsigned long flags;
505 int status;
506
507 status = sync_fence_get_status(fence);
508
509 spin_lock_irqsave(&fence->waiter_list_lock, flags);
510 /*
511 * this should protect against two threads racing on the signaled
512 * false -> true transition
513 */
514 if (status && !fence->status) {
515 list_for_each_safe(pos, n, &fence->waiter_list_head)
516 list_move(pos, &signaled_waiters);
517
518 fence->status = status;
519 } else {
520 status = 0;
521 }
522 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
523
524 if (status) {
525 list_for_each_safe(pos, n, &signaled_waiters) {
526 struct sync_fence_waiter *waiter =
527 container_of(pos, struct sync_fence_waiter,
528 waiter_list);
529
530 list_del(pos);
531 waiter->callback(fence, waiter);
532 }
533 wake_up(&fence->wq);
534 }
535}
536
537int sync_fence_wait_async(struct sync_fence *fence,
538 struct sync_fence_waiter *waiter)
539{
540 unsigned long flags;
541 int err = 0;
542
543 spin_lock_irqsave(&fence->waiter_list_lock, flags);
544
545 if (fence->status) {
546 err = fence->status;
547 goto out;
548 }
549
550 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
551out:
552 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
553
554 return err;
555}
556EXPORT_SYMBOL(sync_fence_wait_async);
557
558int sync_fence_cancel_async(struct sync_fence *fence,
559 struct sync_fence_waiter *waiter)
560{
561 struct list_head *pos;
562 struct list_head *n;
563 unsigned long flags;
564 int ret = -ENOENT;
565
566 spin_lock_irqsave(&fence->waiter_list_lock, flags);
567 /*
568 * Make sure waiter is still in waiter_list because it is possible for
569 * the waiter to be removed from the list while the callback is still
570 * pending.
571 */
572 list_for_each_safe(pos, n, &fence->waiter_list_head) {
573 struct sync_fence_waiter *list_waiter =
574 container_of(pos, struct sync_fence_waiter,
575 waiter_list);
576 if (list_waiter == waiter) {
577 list_del(pos);
578 ret = 0;
579 break;
580 }
581 }
582 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
583 return ret;
584}
585EXPORT_SYMBOL(sync_fence_cancel_async);
586
587static bool sync_fence_check(struct sync_fence *fence)
588{
589 /*
590 * Make sure that reads to fence->status are ordered with the
591 * wait queue event triggering
592 */
593 smp_rmb();
594 return fence->status != 0;
595}
596
597int sync_fence_wait(struct sync_fence *fence, long timeout)
598{
599 int err = 0;
600 struct sync_pt *pt;
601
602 trace_sync_wait(fence, 1);
603 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
604 trace_sync_pt(pt);
605
606 if (timeout > 0) {
607 timeout = msecs_to_jiffies(timeout);
608 err = wait_event_interruptible_timeout(fence->wq,
609 sync_fence_check(fence),
610 timeout);
611 } else if (timeout < 0) {
612 err = wait_event_interruptible(fence->wq,
613 sync_fence_check(fence));
614 }
615 trace_sync_wait(fence, 0);
616
617 if (err < 0)
618 return err;
619
620 if (fence->status < 0) {
621 pr_info("fence error %d on [%p]\n", fence->status, fence);
622 sync_dump();
623 return fence->status;
624 }
625
626 if (fence->status == 0) {
627 if (timeout > 0) {
628 pr_info("fence timeout on [%p] after %dms\n", fence,
629 jiffies_to_msecs(timeout));
630 sync_dump();
631 }
632 return -ETIME;
633 }
634
635 return 0;
636}
637EXPORT_SYMBOL(sync_fence_wait);
638
639static void sync_fence_free(struct kref *kref)
640{
641 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
642
643 sync_fence_free_pts(fence);
644
645 kfree(fence);
646}
647
648static int sync_fence_release(struct inode *inode, struct file *file)
649{
650 struct sync_fence *fence = file->private_data;
651 unsigned long flags;
652
653 /*
654 * We need to remove all ways to access this fence before droping
655 * our ref.
656 *
657 * start with its membership in the global fence list
658 */
659 spin_lock_irqsave(&sync_fence_list_lock, flags);
660 list_del(&fence->sync_fence_list);
661 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
662
663 /*
664 * remove its pts from their parents so that sync_timeline_signal()
665 * can't reference the fence.
666 */
667 sync_fence_detach_pts(fence);
668
669 kref_put(&fence->kref, sync_fence_free);
670
671 return 0;
672}
673
674static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
675{
676 struct sync_fence *fence = file->private_data;
677
678 poll_wait(file, &fence->wq, wait);
679
680 /*
681 * Make sure that reads to fence->status are ordered with the
682 * wait queue event triggering
683 */
684 smp_rmb();
685
686 if (fence->status == 1)
687 return POLLIN;
688 else if (fence->status < 0)
689 return POLLERR;
690 else
691 return 0;
692}
693
694static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
695{
696 __s32 value;
697
698 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
699 return -EFAULT;
700
701 return sync_fence_wait(fence, value);
702}
703
704static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
705{
706 int fd = get_unused_fd_flags(O_CLOEXEC);
707 int err;
708 struct sync_fence *fence2, *fence3;
709 struct sync_merge_data data;
710
711 if (fd < 0)
712 return fd;
713
714 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
715 err = -EFAULT;
716 goto err_put_fd;
717 }
718
719 fence2 = sync_fence_fdget(data.fd2);
720 if (fence2 == NULL) {
721 err = -ENOENT;
722 goto err_put_fd;
723 }
724
725 data.name[sizeof(data.name) - 1] = '\0';
726 fence3 = sync_fence_merge(data.name, fence, fence2);
727 if (fence3 == NULL) {
728 err = -ENOMEM;
729 goto err_put_fence2;
730 }
731
732 data.fence = fd;
733 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
734 err = -EFAULT;
735 goto err_put_fence3;
736 }
737
738 sync_fence_install(fence3, fd);
739 sync_fence_put(fence2);
740 return 0;
741
742err_put_fence3:
743 sync_fence_put(fence3);
744
745err_put_fence2:
746 sync_fence_put(fence2);
747
748err_put_fd:
749 put_unused_fd(fd);
750 return err;
751}
752
753static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
754{
755 struct sync_pt_info *info = data;
756 int ret;
757
758 if (size < sizeof(struct sync_pt_info))
759 return -ENOMEM;
760
761 info->len = sizeof(struct sync_pt_info);
762
763 if (pt->parent->ops->fill_driver_data) {
764 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
765 size - sizeof(*info));
766 if (ret < 0)
767 return ret;
768
769 info->len += ret;
770 }
771
772 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
773 strlcpy(info->driver_name, pt->parent->ops->driver_name,
774 sizeof(info->driver_name));
775 info->status = pt->status;
776 info->timestamp_ns = ktime_to_ns(pt->timestamp);
777
778 return info->len;
779}
780
781static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
782 unsigned long arg)
783{
784 struct sync_fence_info_data *data;
785 struct list_head *pos;
786 __u32 size;
787 __u32 len = 0;
788 int ret;
789
790 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
791 return -EFAULT;
792
793 if (size < sizeof(struct sync_fence_info_data))
794 return -EINVAL;
795
796 if (size > 4096)
797 size = 4096;
798
799 data = kzalloc(size, GFP_KERNEL);
800 if (data == NULL)
801 return -ENOMEM;
802
803 strlcpy(data->name, fence->name, sizeof(data->name));
804 data->status = fence->status;
805 len = sizeof(struct sync_fence_info_data);
806
807 list_for_each(pos, &fence->pt_list_head) {
808 struct sync_pt *pt =
809 container_of(pos, struct sync_pt, pt_list);
810
811 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
812
813 if (ret < 0)
814 goto out;
815
816 len += ret;
817 }
818
819 data->len = len;
820
821 if (copy_to_user((void __user *)arg, data, len))
822 ret = -EFAULT;
823 else
824 ret = 0;
825
826out:
827 kfree(data);
828
829 return ret;
830}
831
832static long sync_fence_ioctl(struct file *file, unsigned int cmd,
833 unsigned long arg)
834{
835 struct sync_fence *fence = file->private_data;
836
837 switch (cmd) {
838 case SYNC_IOC_WAIT:
839 return sync_fence_ioctl_wait(fence, arg);
840
841 case SYNC_IOC_MERGE:
842 return sync_fence_ioctl_merge(fence, arg);
843
844 case SYNC_IOC_FENCE_INFO:
845 return sync_fence_ioctl_fence_info(fence, arg);
846
847 default:
848 return -ENOTTY;
849 }
850}
851
852#ifdef CONFIG_DEBUG_FS
853static const char *sync_status_str(int status)
854{
855 if (status > 0)
856 return "signaled";
857 else if (status == 0)
858 return "active";
859 else
860 return "error";
861}
862
863static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
864{
865 int status = pt->status;
866
867 seq_printf(s, " %s%spt %s",
868 fence ? pt->parent->name : "",
869 fence ? "_" : "",
870 sync_status_str(status));
871 if (pt->status) {
872 struct timeval tv = ktime_to_timeval(pt->timestamp);
873
874 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
875 }
876
877 if (pt->parent->ops->timeline_value_str &&
878 pt->parent->ops->pt_value_str) {
879 char value[64];
880
881 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
882 seq_printf(s, ": %s", value);
883 if (fence) {
884 pt->parent->ops->timeline_value_str(pt->parent, value,
885 sizeof(value));
886 seq_printf(s, " / %s", value);
887 }
888 } else if (pt->parent->ops->print_pt) {
889 seq_puts(s, ": ");
890 pt->parent->ops->print_pt(s, pt);
891 }
892
893 seq_puts(s, "\n");
894}
895
896static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
897{
898 struct list_head *pos;
899 unsigned long flags;
900
901 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
902
903 if (obj->ops->timeline_value_str) {
904 char value[64];
905
906 obj->ops->timeline_value_str(obj, value, sizeof(value));
907 seq_printf(s, ": %s", value);
908 } else if (obj->ops->print_obj) {
909 seq_puts(s, ": ");
910 obj->ops->print_obj(s, obj);
911 }
912
913 seq_puts(s, "\n");
914
915 spin_lock_irqsave(&obj->child_list_lock, flags);
916 list_for_each(pos, &obj->child_list_head) {
917 struct sync_pt *pt =
918 container_of(pos, struct sync_pt, child_list);
919 sync_print_pt(s, pt, false);
920 }
921 spin_unlock_irqrestore(&obj->child_list_lock, flags);
922}
923
924static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
925{
926 struct list_head *pos;
927 unsigned long flags;
928
929 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
930 sync_status_str(fence->status));
931
932 list_for_each(pos, &fence->pt_list_head) {
933 struct sync_pt *pt =
934 container_of(pos, struct sync_pt, pt_list);
935 sync_print_pt(s, pt, true);
936 }
937
938 spin_lock_irqsave(&fence->waiter_list_lock, flags);
939 list_for_each(pos, &fence->waiter_list_head) {
940 struct sync_fence_waiter *waiter =
941 container_of(pos, struct sync_fence_waiter,
942 waiter_list);
943
944 seq_printf(s, "waiter %pF\n", waiter->callback);
945 }
946 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
947}
948
949static int sync_debugfs_show(struct seq_file *s, void *unused)
950{
951 unsigned long flags;
952 struct list_head *pos;
953
954 seq_puts(s, "objs:\n--------------\n");
955
956 spin_lock_irqsave(&sync_timeline_list_lock, flags);
957 list_for_each(pos, &sync_timeline_list_head) {
958 struct sync_timeline *obj =
959 container_of(pos, struct sync_timeline,
960 sync_timeline_list);
961
962 sync_print_obj(s, obj);
963 seq_puts(s, "\n");
964 }
965 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
966
967 seq_puts(s, "fences:\n--------------\n");
968
969 spin_lock_irqsave(&sync_fence_list_lock, flags);
970 list_for_each(pos, &sync_fence_list_head) {
971 struct sync_fence *fence =
972 container_of(pos, struct sync_fence, sync_fence_list);
973
974 sync_print_fence(s, fence);
975 seq_puts(s, "\n");
976 }
977 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
978 return 0;
979}
980
981static int sync_debugfs_open(struct inode *inode, struct file *file)
982{
983 return single_open(file, sync_debugfs_show, inode->i_private);
984}
985
986static const struct file_operations sync_debugfs_fops = {
987 .open = sync_debugfs_open,
988 .read = seq_read,
989 .llseek = seq_lseek,
990 .release = single_release,
991};
992
993static __init int sync_debugfs_init(void)
994{
995 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
996 return 0;
997}
998late_initcall(sync_debugfs_init);
999
1000#define DUMP_CHUNK 256
1001static char sync_dump_buf[64 * 1024];
1002static void sync_dump(void)
1003{
1004 struct seq_file s = {
1005 .buf = sync_dump_buf,
1006 .size = sizeof(sync_dump_buf) - 1,
1007 };
1008 int i;
1009
1010 sync_debugfs_show(&s, NULL);
1011
1012 for (i = 0; i < s.count; i += DUMP_CHUNK) {
1013 if ((s.count - i) > DUMP_CHUNK) {
1014 char c = s.buf[i + DUMP_CHUNK];
1015
1016 s.buf[i + DUMP_CHUNK] = 0;
1017 pr_cont("%s", s.buf + i);
1018 s.buf[i + DUMP_CHUNK] = c;
1019 } else {
1020 s.buf[s.count] = 0;
1021 pr_cont("%s", s.buf + i);
1022 }
1023 }
1024}
1025#else
1026static void sync_dump(void)
1027{
1028}
1029#endif