4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
35 // disable these logs temporarily
36 // TODO: redirect these logs to proc_fs
37 //#define SYNC_OBJ_DEBUG
40 static void sync_fence_signal_pt(struct sync_pt
*pt
);
41 static int _sync_pt_has_signaled(struct sync_pt
*pt
);
42 static void sync_fence_free(struct kref
*kref
);
43 static void sync_dump(void);
45 static LIST_HEAD(sync_timeline_list_head
);
46 static DEFINE_SPINLOCK(sync_timeline_list_lock
);
48 static LIST_HEAD(sync_fence_list_head
);
49 static DEFINE_SPINLOCK(sync_fence_list_lock
);
51 struct sync_timeline
*sync_timeline_create(const struct sync_timeline_ops
*ops
,
52 int size
, const char *name
)
54 struct sync_timeline
*obj
;
57 if (size
< sizeof(struct sync_timeline
))
60 obj
= kzalloc(size
, GFP_KERNEL
);
64 kref_init(&obj
->kref
);
66 strlcpy(obj
->name
, name
, sizeof(obj
->name
));
68 INIT_LIST_HEAD(&obj
->child_list_head
);
69 spin_lock_init(&obj
->child_list_lock
);
71 INIT_LIST_HEAD(&obj
->active_list_head
);
72 spin_lock_init(&obj
->active_list_lock
);
74 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
75 list_add_tail(&obj
->sync_timeline_list
, &sync_timeline_list_head
);
76 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
80 EXPORT_SYMBOL(sync_timeline_create
);
82 static void sync_timeline_free(struct kref
*kref
)
84 struct sync_timeline
*obj
=
85 container_of(kref
, struct sync_timeline
, kref
);
88 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
89 list_del(&obj
->sync_timeline_list
);
90 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
92 if (obj
->ops
->release_obj
)
93 obj
->ops
->release_obj(obj
);
98 void sync_timeline_destroy(struct sync_timeline
*obj
)
100 obj
->destroyed
= true;
104 * signal any children that their parent is going away.
106 sync_timeline_signal(obj
);
108 kref_put(&obj
->kref
, sync_timeline_free
);
110 EXPORT_SYMBOL(sync_timeline_destroy
);
112 static void sync_timeline_add_pt(struct sync_timeline
*obj
, struct sync_pt
*pt
)
118 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
119 list_add_tail(&pt
->child_list
, &obj
->child_list_head
);
120 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
123 static void sync_timeline_remove_pt(struct sync_pt
*pt
)
125 struct sync_timeline
*obj
= pt
->parent
;
128 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
129 if (!list_empty(&pt
->active_list
))
130 list_del_init(&pt
->active_list
);
131 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
133 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
134 if (!list_empty(&pt
->child_list
)) {
135 list_del_init(&pt
->child_list
);
137 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
140 void sync_timeline_signal(struct sync_timeline
*obj
)
143 LIST_HEAD(signaled_pts
);
144 struct list_head
*pos
, *n
;
146 trace_sync_timeline(obj
);
148 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
150 list_for_each_safe(pos
, n
, &obj
->active_list_head
) {
152 container_of(pos
, struct sync_pt
, active_list
);
154 if (_sync_pt_has_signaled(pt
)) {
156 list_add(&pt
->signaled_list
, &signaled_pts
);
157 kref_get(&pt
->fence
->kref
);
161 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
163 list_for_each_safe(pos
, n
, &signaled_pts
) {
165 container_of(pos
, struct sync_pt
, signaled_list
);
168 sync_fence_signal_pt(pt
);
169 kref_put(&pt
->fence
->kref
, sync_fence_free
);
172 EXPORT_SYMBOL(sync_timeline_signal
);
174 struct sync_pt
*sync_pt_create(struct sync_timeline
*parent
, int size
)
178 if (size
< sizeof(struct sync_pt
))
181 pt
= kzalloc(size
, GFP_KERNEL
);
185 INIT_LIST_HEAD(&pt
->active_list
);
186 kref_get(&parent
->kref
);
187 sync_timeline_add_pt(parent
, pt
);
191 EXPORT_SYMBOL(sync_pt_create
);
193 void sync_pt_free(struct sync_pt
*pt
)
195 if (pt
->parent
->ops
->free_pt
)
196 pt
->parent
->ops
->free_pt(pt
);
198 sync_timeline_remove_pt(pt
);
200 kref_put(&pt
->parent
->kref
, sync_timeline_free
);
204 EXPORT_SYMBOL(sync_pt_free
);
206 /* call with pt->parent->active_list_lock held */
207 static int _sync_pt_has_signaled(struct sync_pt
*pt
)
209 int old_status
= pt
->status
;
212 pt
->status
= pt
->parent
->ops
->has_signaled(pt
);
214 if (!pt
->status
&& pt
->parent
->destroyed
)
215 pt
->status
= -ENOENT
;
217 if (pt
->status
!= old_status
)
218 pt
->timestamp
= ktime_get();
223 static struct sync_pt
*sync_pt_dup(struct sync_pt
*pt
)
225 return pt
->parent
->ops
->dup(pt
);
228 /* Adds a sync pt to the active queue. Called when added to a fence */
229 static void sync_pt_activate(struct sync_pt
*pt
)
231 struct sync_timeline
*obj
= pt
->parent
;
235 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
237 err
= _sync_pt_has_signaled(pt
);
241 list_add_tail(&pt
->active_list
, &obj
->active_list_head
);
244 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
247 static int sync_fence_release(struct inode
*inode
, struct file
*file
);
248 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
);
249 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
253 static const struct file_operations sync_fence_fops
= {
254 .release
= sync_fence_release
,
255 .poll
= sync_fence_poll
,
256 .unlocked_ioctl
= sync_fence_ioctl
,
257 .compat_ioctl
= sync_fence_ioctl
,
260 static struct sync_fence
*sync_fence_alloc(const char *name
)
262 struct sync_fence
*fence
;
265 fence
= kzalloc(sizeof(struct sync_fence
), GFP_KERNEL
);
269 fence
->file
= anon_inode_getfile("sync_fence", &sync_fence_fops
,
271 if (IS_ERR(fence
->file
))
274 kref_init(&fence
->kref
);
275 strlcpy(fence
->name
, name
, sizeof(fence
->name
));
277 INIT_LIST_HEAD(&fence
->pt_list_head
);
278 INIT_LIST_HEAD(&fence
->waiter_list_head
);
279 spin_lock_init(&fence
->waiter_list_lock
);
281 init_waitqueue_head(&fence
->wq
);
283 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
284 list_add_tail(&fence
->sync_fence_list
, &sync_fence_list_head
);
285 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
294 /* TODO: implement a create which takes more that one sync_pt */
295 struct sync_fence
*sync_fence_create(const char *name
, struct sync_pt
*pt
)
297 struct sync_fence
*fence
;
302 fence
= sync_fence_alloc(name
);
307 list_add(&pt
->pt_list
, &fence
->pt_list_head
);
308 sync_pt_activate(pt
);
311 * signal the fence in case pt was activated before
312 * sync_pt_activate(pt) was called
314 sync_fence_signal_pt(pt
);
318 EXPORT_SYMBOL(sync_fence_create
);
320 static int sync_fence_copy_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
322 struct list_head
*pos
;
324 list_for_each(pos
, &src
->pt_list_head
) {
325 struct sync_pt
*orig_pt
=
326 container_of(pos
, struct sync_pt
, pt_list
);
328 // rjan Eide <orjan.eide@arm.com>
329 struct sync_pt
*new_pt
;
331 /* Skip already signaled points */
332 if (1 == orig_pt
->status
)
336 new_pt
= sync_pt_dup(orig_pt
);
342 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
348 static int sync_fence_merge_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
350 struct list_head
*src_pos
, *dst_pos
, *n
;
352 list_for_each(src_pos
, &src
->pt_list_head
) {
353 struct sync_pt
*src_pt
=
354 container_of(src_pos
, struct sync_pt
, pt_list
);
355 bool collapsed
= false;
358 // rjan Eide <orjan.eide@arm.com>
359 /* Skip already signaled points */
360 if (1 == src_pt
->status
)
364 list_for_each_safe(dst_pos
, n
, &dst
->pt_list_head
) {
365 struct sync_pt
*dst_pt
=
366 container_of(dst_pos
, struct sync_pt
, pt_list
);
367 /* collapse two sync_pts on the same timeline
368 * to a single sync_pt that will signal at
369 * the later of the two
371 if (dst_pt
->parent
== src_pt
->parent
) {
372 if (dst_pt
->parent
->ops
->compare(dst_pt
, src_pt
)
374 struct sync_pt
*new_pt
=
380 list_replace(&dst_pt
->pt_list
,
382 sync_pt_free(dst_pt
);
390 struct sync_pt
*new_pt
= sync_pt_dup(src_pt
);
396 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
403 static void sync_fence_detach_pts(struct sync_fence
*fence
)
405 struct list_head
*pos
, *n
;
407 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
408 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
409 sync_timeline_remove_pt(pt
);
413 static void sync_fence_free_pts(struct sync_fence
*fence
)
415 struct list_head
*pos
, *n
;
417 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
418 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
423 struct sync_fence
*sync_fence_fdget(int fd
)
425 struct file
*file
= fget(fd
);
430 if (file
->f_op
!= &sync_fence_fops
)
433 return file
->private_data
;
439 EXPORT_SYMBOL(sync_fence_fdget
);
441 void sync_fence_put(struct sync_fence
*fence
)
445 EXPORT_SYMBOL(sync_fence_put
);
447 void sync_fence_install(struct sync_fence
*fence
, int fd
)
449 fd_install(fd
, fence
->file
);
451 EXPORT_SYMBOL(sync_fence_install
);
453 static int sync_fence_get_status(struct sync_fence
*fence
)
455 struct list_head
*pos
;
458 list_for_each(pos
, &fence
->pt_list_head
) {
459 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
460 int pt_status
= pt
->status
;
465 } else if (status
== 1) {
473 struct sync_fence
*sync_fence_merge(const char *name
,
474 struct sync_fence
*a
, struct sync_fence
*b
)
476 struct sync_fence
*fence
;
477 struct list_head
*pos
;
480 fence
= sync_fence_alloc(name
);
484 err
= sync_fence_copy_pts(fence
, a
);
488 err
= sync_fence_merge_pts(fence
, b
);
493 // rjan Eide <orjan.eide@arm.com>
494 /* Make sure there is at least one point in the fence */
495 if (list_empty(&fence
->pt_list_head
)) {
496 struct sync_pt
*orig_pt
= list_first_entry(&a
->pt_list_head
,
497 struct sync_pt
, pt_list
);
498 struct sync_pt
*new_pt
= sync_pt_dup(orig_pt
);
500 new_pt
->fence
= fence
;
501 list_add(&new_pt
->pt_list
, &fence
->pt_list_head
);
505 list_for_each(pos
, &fence
->pt_list_head
) {
507 container_of(pos
, struct sync_pt
, pt_list
);
508 sync_pt_activate(pt
);
512 * signal the fence in case one of it's pts were activated before
513 * they were activated
515 sync_fence_signal_pt(list_first_entry(&fence
->pt_list_head
,
521 sync_fence_free_pts(fence
);
525 EXPORT_SYMBOL(sync_fence_merge
);
527 static void sync_fence_signal_pt(struct sync_pt
*pt
)
529 LIST_HEAD(signaled_waiters
);
530 struct sync_fence
*fence
= pt
->fence
;
531 struct list_head
*pos
;
536 status
= sync_fence_get_status(fence
);
538 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
540 * this should protect against two threads racing on the signaled
541 * false -> true transition
543 if (status
&& !fence
->status
) {
544 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
)
545 list_move(pos
, &signaled_waiters
);
547 fence
->status
= status
;
551 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
554 list_for_each_safe(pos
, n
, &signaled_waiters
) {
555 struct sync_fence_waiter
*waiter
=
556 container_of(pos
, struct sync_fence_waiter
,
560 waiter
->callback(fence
, waiter
);
566 int sync_fence_wait_async(struct sync_fence
*fence
,
567 struct sync_fence_waiter
*waiter
)
572 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
579 list_add_tail(&waiter
->waiter_list
, &fence
->waiter_list_head
);
581 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
585 EXPORT_SYMBOL(sync_fence_wait_async
);
587 int sync_fence_cancel_async(struct sync_fence
*fence
,
588 struct sync_fence_waiter
*waiter
)
590 struct list_head
*pos
;
595 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
597 * Make sure waiter is still in waiter_list because it is possible for
598 * the waiter to be removed from the list while the callback is still
601 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
) {
602 struct sync_fence_waiter
*list_waiter
=
603 container_of(pos
, struct sync_fence_waiter
,
605 if (list_waiter
== waiter
) {
611 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
614 EXPORT_SYMBOL(sync_fence_cancel_async
);
616 static bool sync_fence_check(struct sync_fence
*fence
)
619 * Make sure that reads to fence->status are ordered with the
620 * wait queue event triggering
623 return fence
->status
!= 0;
626 int sync_fence_wait(struct sync_fence
*fence
, long timeout
)
631 trace_sync_wait(fence
, 1);
632 list_for_each_entry(pt
, &fence
->pt_list_head
, pt_list
)
636 timeout
= msecs_to_jiffies(timeout
);
637 err
= wait_event_interruptible_timeout(fence
->wq
,
638 sync_fence_check(fence
),
640 } else if (timeout
< 0) {
641 err
= wait_event_interruptible(fence
->wq
,
642 sync_fence_check(fence
));
644 trace_sync_wait(fence
, 0);
649 if (fence
->status
< 0) {
650 pr_info("fence error %d on [%p]\n", fence
->status
, fence
);
652 return fence
->status
;
655 if (fence
->status
== 0) {
657 pr_info("fence timeout on [%p] after %dms\n", fence
,
658 jiffies_to_msecs(timeout
));
666 EXPORT_SYMBOL(sync_fence_wait
);
668 static void sync_fence_free(struct kref
*kref
)
670 struct sync_fence
*fence
= container_of(kref
, struct sync_fence
, kref
);
672 sync_fence_free_pts(fence
);
677 static int sync_fence_release(struct inode
*inode
, struct file
*file
)
679 struct sync_fence
*fence
= file
->private_data
;
683 * We need to remove all ways to access this fence before droping
686 * start with its membership in the global fence list
688 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
689 list_del(&fence
->sync_fence_list
);
690 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
693 * remove its pts from their parents so that sync_timeline_signal()
694 * can't reference the fence.
696 sync_fence_detach_pts(fence
);
698 kref_put(&fence
->kref
, sync_fence_free
);
703 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
)
705 struct sync_fence
*fence
= file
->private_data
;
707 poll_wait(file
, &fence
->wq
, wait
);
710 * Make sure that reads to fence->status are ordered with the
711 * wait queue event triggering
715 if (fence
->status
== 1)
717 else if (fence
->status
< 0)
723 static long sync_fence_ioctl_wait(struct sync_fence
*fence
, unsigned long arg
)
727 if (copy_from_user(&value
, (void __user
*)arg
, sizeof(value
)))
730 return sync_fence_wait(fence
, value
);
733 static long sync_fence_ioctl_merge(struct sync_fence
*fence
, unsigned long arg
)
735 int fd
= get_unused_fd();
737 struct sync_fence
*fence2
, *fence3
;
738 struct sync_merge_data data
;
743 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
))) {
748 fence2
= sync_fence_fdget(data
.fd2
);
749 if (fence2
== NULL
) {
754 data
.name
[sizeof(data
.name
) - 1] = '\0';
755 fence3
= sync_fence_merge(data
.name
, fence
, fence2
);
756 if (fence3
== NULL
) {
762 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
))) {
767 sync_fence_install(fence3
, fd
);
768 sync_fence_put(fence2
);
772 sync_fence_put(fence3
);
775 sync_fence_put(fence2
);
782 static int sync_fill_pt_info(struct sync_pt
*pt
, void *data
, int size
)
784 struct sync_pt_info
*info
= data
;
787 if (size
< sizeof(struct sync_pt_info
))
790 info
->len
= sizeof(struct sync_pt_info
);
792 if (pt
->parent
->ops
->fill_driver_data
) {
793 ret
= pt
->parent
->ops
->fill_driver_data(pt
, info
->driver_data
,
794 size
- sizeof(*info
));
801 strlcpy(info
->obj_name
, pt
->parent
->name
, sizeof(info
->obj_name
));
802 strlcpy(info
->driver_name
, pt
->parent
->ops
->driver_name
,
803 sizeof(info
->driver_name
));
804 info
->status
= pt
->status
;
805 info
->timestamp_ns
= ktime_to_ns(pt
->timestamp
);
810 static long sync_fence_ioctl_fence_info(struct sync_fence
*fence
,
813 struct sync_fence_info_data
*data
;
814 struct list_head
*pos
;
819 if (copy_from_user(&size
, (void __user
*)arg
, sizeof(size
)))
822 if (size
< sizeof(struct sync_fence_info_data
))
828 data
= kzalloc(size
, GFP_KERNEL
);
832 strlcpy(data
->name
, fence
->name
, sizeof(data
->name
));
833 data
->status
= fence
->status
;
834 len
= sizeof(struct sync_fence_info_data
);
836 list_for_each(pos
, &fence
->pt_list_head
) {
838 container_of(pos
, struct sync_pt
, pt_list
);
840 ret
= sync_fill_pt_info(pt
, (u8
*)data
+ len
, size
- len
);
850 if (copy_to_user((void __user
*)arg
, data
, len
))
861 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
864 struct sync_fence
*fence
= file
->private_data
;
867 return sync_fence_ioctl_wait(fence
, arg
);
870 return sync_fence_ioctl_merge(fence
, arg
);
872 case SYNC_IOC_FENCE_INFO
:
873 return sync_fence_ioctl_fence_info(fence
, arg
);
880 #ifdef CONFIG_DEBUG_FS
881 static const char *sync_status_str(int status
)
885 else if (status
== 0)
891 static void sync_print_pt(struct seq_file
*s
, struct sync_pt
*pt
, bool fence
)
893 int status
= pt
->status
;
896 // only dump non-signaled fence
897 if (status
> 0) return;
900 seq_printf(s
, " %s%spt %s",
901 fence
? pt
->parent
->name
: "",
903 sync_status_str(status
));
905 struct timeval tv
= ktime_to_timeval(pt
->timestamp
);
906 seq_printf(s
, "@%ld.%06ld", tv
.tv_sec
, tv
.tv_usec
);
909 if (pt
->parent
->ops
->timeline_value_str
&&
910 pt
->parent
->ops
->pt_value_str
) {
912 pt
->parent
->ops
->pt_value_str(pt
, value
, sizeof(value
));
913 seq_printf(s
, ": %s", value
);
915 pt
->parent
->ops
->timeline_value_str(pt
->parent
, value
,
917 seq_printf(s
, " / %s", value
);
919 } else if (pt
->parent
->ops
->print_pt
) {
921 pt
->parent
->ops
->print_pt(s
, pt
);
928 #ifdef SYNC_OBJ_DEBUG
929 static void sync_print_obj(struct seq_file
*s
, struct sync_timeline
*obj
)
931 struct list_head
*pos
;
934 seq_printf(s
, "%s %s", obj
->name
, obj
->ops
->driver_name
);
936 if (obj
->ops
->timeline_value_str
) {
938 obj
->ops
->timeline_value_str(obj
, value
, sizeof(value
));
939 seq_printf(s
, ": %s", value
);
940 } else if (obj
->ops
->print_obj
) {
942 obj
->ops
->print_obj(s
, obj
);
947 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
948 list_for_each(pos
, &obj
->child_list_head
) {
950 container_of(pos
, struct sync_pt
, child_list
);
951 sync_print_pt(s
, pt
, false);
953 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
958 static void sync_print_fence(struct seq_file
*s
, struct sync_fence
*fence
)
960 struct list_head
*pos
;
963 seq_printf(s
, "[%p] %s: %s\n", fence
, fence
->name
,
964 sync_status_str(fence
->status
));
966 list_for_each(pos
, &fence
->pt_list_head
) {
968 container_of(pos
, struct sync_pt
, pt_list
);
969 sync_print_pt(s
, pt
, true);
972 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
973 list_for_each(pos
, &fence
->waiter_list_head
) {
974 struct sync_fence_waiter
*waiter
=
975 container_of(pos
, struct sync_fence_waiter
,
978 seq_printf(s
, "waiter %pF\n", waiter
->callback
);
980 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
983 static int sync_debugfs_show(struct seq_file
*s
, void *unused
)
986 struct list_head
*pos
;
989 #ifdef SYNC_OBJ_DEBUG
990 seq_printf(s
, "objs:\n--------------\n");
992 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
993 list_for_each(pos
, &sync_timeline_list_head
) {
994 struct sync_timeline
*obj
=
995 container_of(pos
, struct sync_timeline
,
998 sync_print_obj(s
, obj
);
1001 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
1005 seq_printf(s
, "fences:\n--------------\n");
1007 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
1008 list_for_each(pos
, &sync_fence_list_head
) {
1009 struct sync_fence
*fence
=
1010 container_of(pos
, struct sync_fence
, sync_fence_list
);
1013 // only dump non-signaled fence
1014 if (fence
->status
> 0)
1018 sync_print_fence(s
, fence
);
1020 //seq_printf(s, "\n");
1023 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
1027 static int sync_debugfs_open(struct inode
*inode
, struct file
*file
)
1029 return single_open(file
, sync_debugfs_show
, inode
->i_private
);
1032 static const struct file_operations sync_debugfs_fops
= {
1033 .open
= sync_debugfs_open
,
1035 .llseek
= seq_lseek
,
1036 .release
= single_release
,
1039 static __init
int sync_debugfs_init(void)
1041 debugfs_create_file("sync", S_IRUGO
, NULL
, NULL
, &sync_debugfs_fops
);
1044 late_initcall(sync_debugfs_init
);
1047 #ifdef SYNC_OBJ_DEBUG
1048 #define DUMP_CHUNK 256
1049 static char sync_dump_buf
[64 * 1024];
1052 void sync_dump(void)
1055 #ifdef SYNC_OBJ_DEBUG
1056 struct seq_file s
= {
1057 .buf
= sync_dump_buf
,
1058 .size
= sizeof(sync_dump_buf
) - 1,
1062 sync_debugfs_show(&s
, NULL
);
1064 for (i
= 0; i
< s
.count
; i
+= DUMP_CHUNK
) {
1065 if ((s
.count
- i
) > DUMP_CHUNK
) {
1066 char c
= s
.buf
[i
+ DUMP_CHUNK
];
1067 s
.buf
[i
+ DUMP_CHUNK
] = 0;
1068 pr_cont("%s", s
.buf
+ i
);
1069 s
.buf
[i
+ DUMP_CHUNK
] = c
;
1072 pr_cont("%s", s
.buf
+ i
);
1079 static void sync_dump(void)