import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / android / sync.c
1 /*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28
29 #include "sync.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
33
34 // [MTK] {{{
35 // disable these logs temporarily
36 // TODO: redirect these logs to proc_fs
37 //#define SYNC_OBJ_DEBUG
38 // [MTK] }}}
39
40 static void sync_fence_signal_pt(struct sync_pt *pt);
41 static int _sync_pt_has_signaled(struct sync_pt *pt);
42 static void sync_fence_free(struct kref *kref);
43 static void sync_dump(void);
44
45 static LIST_HEAD(sync_timeline_list_head);
46 static DEFINE_SPINLOCK(sync_timeline_list_lock);
47
48 static LIST_HEAD(sync_fence_list_head);
49 static DEFINE_SPINLOCK(sync_fence_list_lock);
50
51 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
52 int size, const char *name)
53 {
54 struct sync_timeline *obj;
55 unsigned long flags;
56
57 if (size < sizeof(struct sync_timeline))
58 return NULL;
59
60 obj = kzalloc(size, GFP_KERNEL);
61 if (obj == NULL)
62 return NULL;
63
64 kref_init(&obj->kref);
65 obj->ops = ops;
66 strlcpy(obj->name, name, sizeof(obj->name));
67
68 INIT_LIST_HEAD(&obj->child_list_head);
69 spin_lock_init(&obj->child_list_lock);
70
71 INIT_LIST_HEAD(&obj->active_list_head);
72 spin_lock_init(&obj->active_list_lock);
73
74 spin_lock_irqsave(&sync_timeline_list_lock, flags);
75 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
76 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
77
78 return obj;
79 }
80 EXPORT_SYMBOL(sync_timeline_create);
81
82 static void sync_timeline_free(struct kref *kref)
83 {
84 struct sync_timeline *obj =
85 container_of(kref, struct sync_timeline, kref);
86 unsigned long flags;
87
88 spin_lock_irqsave(&sync_timeline_list_lock, flags);
89 list_del(&obj->sync_timeline_list);
90 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
91
92 if (obj->ops->release_obj)
93 obj->ops->release_obj(obj);
94
95 kfree(obj);
96 }
97
98 void sync_timeline_destroy(struct sync_timeline *obj)
99 {
100 obj->destroyed = true;
101 smp_wmb();
102
103 /*
104 * signal any children that their parent is going away.
105 */
106 sync_timeline_signal(obj);
107
108 kref_put(&obj->kref, sync_timeline_free);
109 }
110 EXPORT_SYMBOL(sync_timeline_destroy);
111
112 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
113 {
114 unsigned long flags;
115
116 pt->parent = obj;
117
118 spin_lock_irqsave(&obj->child_list_lock, flags);
119 list_add_tail(&pt->child_list, &obj->child_list_head);
120 spin_unlock_irqrestore(&obj->child_list_lock, flags);
121 }
122
123 static void sync_timeline_remove_pt(struct sync_pt *pt)
124 {
125 struct sync_timeline *obj = pt->parent;
126 unsigned long flags;
127
128 spin_lock_irqsave(&obj->active_list_lock, flags);
129 if (!list_empty(&pt->active_list))
130 list_del_init(&pt->active_list);
131 spin_unlock_irqrestore(&obj->active_list_lock, flags);
132
133 spin_lock_irqsave(&obj->child_list_lock, flags);
134 if (!list_empty(&pt->child_list)) {
135 list_del_init(&pt->child_list);
136 }
137 spin_unlock_irqrestore(&obj->child_list_lock, flags);
138 }
139
140 void sync_timeline_signal(struct sync_timeline *obj)
141 {
142 unsigned long flags;
143 LIST_HEAD(signaled_pts);
144 struct list_head *pos, *n;
145
146 trace_sync_timeline(obj);
147
148 spin_lock_irqsave(&obj->active_list_lock, flags);
149
150 list_for_each_safe(pos, n, &obj->active_list_head) {
151 struct sync_pt *pt =
152 container_of(pos, struct sync_pt, active_list);
153
154 if (_sync_pt_has_signaled(pt)) {
155 list_del_init(pos);
156 list_add(&pt->signaled_list, &signaled_pts);
157 kref_get(&pt->fence->kref);
158 }
159 }
160
161 spin_unlock_irqrestore(&obj->active_list_lock, flags);
162
163 list_for_each_safe(pos, n, &signaled_pts) {
164 struct sync_pt *pt =
165 container_of(pos, struct sync_pt, signaled_list);
166
167 list_del_init(pos);
168 sync_fence_signal_pt(pt);
169 kref_put(&pt->fence->kref, sync_fence_free);
170 }
171 }
172 EXPORT_SYMBOL(sync_timeline_signal);
173
174 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
175 {
176 struct sync_pt *pt;
177
178 if (size < sizeof(struct sync_pt))
179 return NULL;
180
181 pt = kzalloc(size, GFP_KERNEL);
182 if (pt == NULL)
183 return NULL;
184
185 INIT_LIST_HEAD(&pt->active_list);
186 kref_get(&parent->kref);
187 sync_timeline_add_pt(parent, pt);
188
189 return pt;
190 }
191 EXPORT_SYMBOL(sync_pt_create);
192
193 void sync_pt_free(struct sync_pt *pt)
194 {
195 if (pt->parent->ops->free_pt)
196 pt->parent->ops->free_pt(pt);
197
198 sync_timeline_remove_pt(pt);
199
200 kref_put(&pt->parent->kref, sync_timeline_free);
201
202 kfree(pt);
203 }
204 EXPORT_SYMBOL(sync_pt_free);
205
206 /* call with pt->parent->active_list_lock held */
207 static int _sync_pt_has_signaled(struct sync_pt *pt)
208 {
209 int old_status = pt->status;
210
211 if (!pt->status)
212 pt->status = pt->parent->ops->has_signaled(pt);
213
214 if (!pt->status && pt->parent->destroyed)
215 pt->status = -ENOENT;
216
217 if (pt->status != old_status)
218 pt->timestamp = ktime_get();
219
220 return pt->status;
221 }
222
223 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
224 {
225 return pt->parent->ops->dup(pt);
226 }
227
228 /* Adds a sync pt to the active queue. Called when added to a fence */
229 static void sync_pt_activate(struct sync_pt *pt)
230 {
231 struct sync_timeline *obj = pt->parent;
232 unsigned long flags;
233 int err;
234
235 spin_lock_irqsave(&obj->active_list_lock, flags);
236
237 err = _sync_pt_has_signaled(pt);
238 if (err != 0)
239 goto out;
240
241 list_add_tail(&pt->active_list, &obj->active_list_head);
242
243 out:
244 spin_unlock_irqrestore(&obj->active_list_lock, flags);
245 }
246
247 static int sync_fence_release(struct inode *inode, struct file *file);
248 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
249 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
250 unsigned long arg);
251
252
253 static const struct file_operations sync_fence_fops = {
254 .release = sync_fence_release,
255 .poll = sync_fence_poll,
256 .unlocked_ioctl = sync_fence_ioctl,
257 .compat_ioctl = sync_fence_ioctl,
258 };
259
260 static struct sync_fence *sync_fence_alloc(const char *name)
261 {
262 struct sync_fence *fence;
263 unsigned long flags;
264
265 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
266 if (fence == NULL)
267 return NULL;
268
269 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
270 fence, 0);
271 if (IS_ERR(fence->file))
272 goto err;
273
274 kref_init(&fence->kref);
275 strlcpy(fence->name, name, sizeof(fence->name));
276
277 INIT_LIST_HEAD(&fence->pt_list_head);
278 INIT_LIST_HEAD(&fence->waiter_list_head);
279 spin_lock_init(&fence->waiter_list_lock);
280
281 init_waitqueue_head(&fence->wq);
282
283 spin_lock_irqsave(&sync_fence_list_lock, flags);
284 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
285 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
286
287 return fence;
288
289 err:
290 kfree(fence);
291 return NULL;
292 }
293
294 /* TODO: implement a create which takes more that one sync_pt */
295 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
296 {
297 struct sync_fence *fence;
298
299 if (pt->fence)
300 return NULL;
301
302 fence = sync_fence_alloc(name);
303 if (fence == NULL)
304 return NULL;
305
306 pt->fence = fence;
307 list_add(&pt->pt_list, &fence->pt_list_head);
308 sync_pt_activate(pt);
309
310 /*
311 * signal the fence in case pt was activated before
312 * sync_pt_activate(pt) was called
313 */
314 sync_fence_signal_pt(pt);
315
316 return fence;
317 }
318 EXPORT_SYMBOL(sync_fence_create);
319
320 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
321 {
322 struct list_head *pos;
323
324 list_for_each(pos, &src->pt_list_head) {
325 struct sync_pt *orig_pt =
326 container_of(pos, struct sync_pt, pt_list);
327 // [MTK] {{{
328 // rjan Eide <orjan.eide@arm.com>
329 struct sync_pt *new_pt;
330
331 /* Skip already signaled points */
332 if (1 == orig_pt->status)
333 continue;
334 // [MTK] }}}
335
336 new_pt = sync_pt_dup(orig_pt);
337
338 if (new_pt == NULL)
339 return -ENOMEM;
340
341 new_pt->fence = dst;
342 list_add(&new_pt->pt_list, &dst->pt_list_head);
343 }
344
345 return 0;
346 }
347
348 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
349 {
350 struct list_head *src_pos, *dst_pos, *n;
351
352 list_for_each(src_pos, &src->pt_list_head) {
353 struct sync_pt *src_pt =
354 container_of(src_pos, struct sync_pt, pt_list);
355 bool collapsed = false;
356
357 // [MTK] {{{
358 // rjan Eide <orjan.eide@arm.com>
359 /* Skip already signaled points */
360 if (1 == src_pt->status)
361 continue;
362 // [MTK] }}}
363
364 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
365 struct sync_pt *dst_pt =
366 container_of(dst_pos, struct sync_pt, pt_list);
367 /* collapse two sync_pts on the same timeline
368 * to a single sync_pt that will signal at
369 * the later of the two
370 */
371 if (dst_pt->parent == src_pt->parent) {
372 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
373 == -1) {
374 struct sync_pt *new_pt =
375 sync_pt_dup(src_pt);
376 if (new_pt == NULL)
377 return -ENOMEM;
378
379 new_pt->fence = dst;
380 list_replace(&dst_pt->pt_list,
381 &new_pt->pt_list);
382 sync_pt_free(dst_pt);
383 }
384 collapsed = true;
385 break;
386 }
387 }
388
389 if (!collapsed) {
390 struct sync_pt *new_pt = sync_pt_dup(src_pt);
391
392 if (new_pt == NULL)
393 return -ENOMEM;
394
395 new_pt->fence = dst;
396 list_add(&new_pt->pt_list, &dst->pt_list_head);
397 }
398 }
399
400 return 0;
401 }
402
403 static void sync_fence_detach_pts(struct sync_fence *fence)
404 {
405 struct list_head *pos, *n;
406
407 list_for_each_safe(pos, n, &fence->pt_list_head) {
408 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
409 sync_timeline_remove_pt(pt);
410 }
411 }
412
413 static void sync_fence_free_pts(struct sync_fence *fence)
414 {
415 struct list_head *pos, *n;
416
417 list_for_each_safe(pos, n, &fence->pt_list_head) {
418 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
419 sync_pt_free(pt);
420 }
421 }
422
423 struct sync_fence *sync_fence_fdget(int fd)
424 {
425 struct file *file = fget(fd);
426
427 if (file == NULL)
428 return NULL;
429
430 if (file->f_op != &sync_fence_fops)
431 goto err;
432
433 return file->private_data;
434
435 err:
436 fput(file);
437 return NULL;
438 }
439 EXPORT_SYMBOL(sync_fence_fdget);
440
441 void sync_fence_put(struct sync_fence *fence)
442 {
443 fput(fence->file);
444 }
445 EXPORT_SYMBOL(sync_fence_put);
446
447 void sync_fence_install(struct sync_fence *fence, int fd)
448 {
449 fd_install(fd, fence->file);
450 }
451 EXPORT_SYMBOL(sync_fence_install);
452
453 static int sync_fence_get_status(struct sync_fence *fence)
454 {
455 struct list_head *pos;
456 int status = 1;
457
458 list_for_each(pos, &fence->pt_list_head) {
459 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
460 int pt_status = pt->status;
461
462 if (pt_status < 0) {
463 status = pt_status;
464 break;
465 } else if (status == 1) {
466 status = pt_status;
467 }
468 }
469
470 return status;
471 }
472
473 struct sync_fence *sync_fence_merge(const char *name,
474 struct sync_fence *a, struct sync_fence *b)
475 {
476 struct sync_fence *fence;
477 struct list_head *pos;
478 int err;
479
480 fence = sync_fence_alloc(name);
481 if (fence == NULL)
482 return NULL;
483
484 err = sync_fence_copy_pts(fence, a);
485 if (err < 0)
486 goto err;
487
488 err = sync_fence_merge_pts(fence, b);
489 if (err < 0)
490 goto err;
491
492 // [MTK] {{{
493 // rjan Eide <orjan.eide@arm.com>
494 /* Make sure there is at least one point in the fence */
495 if (list_empty(&fence->pt_list_head)) {
496 struct sync_pt *orig_pt = list_first_entry(&a->pt_list_head,
497 struct sync_pt, pt_list);
498 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
499
500 new_pt->fence = fence;
501 list_add(&new_pt->pt_list, &fence->pt_list_head);
502 }
503 // [MTK] }}}
504
505 list_for_each(pos, &fence->pt_list_head) {
506 struct sync_pt *pt =
507 container_of(pos, struct sync_pt, pt_list);
508 sync_pt_activate(pt);
509 }
510
511 /*
512 * signal the fence in case one of it's pts were activated before
513 * they were activated
514 */
515 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
516 struct sync_pt,
517 pt_list));
518
519 return fence;
520 err:
521 sync_fence_free_pts(fence);
522 kfree(fence);
523 return NULL;
524 }
525 EXPORT_SYMBOL(sync_fence_merge);
526
527 static void sync_fence_signal_pt(struct sync_pt *pt)
528 {
529 LIST_HEAD(signaled_waiters);
530 struct sync_fence *fence = pt->fence;
531 struct list_head *pos;
532 struct list_head *n;
533 unsigned long flags;
534 int status;
535
536 status = sync_fence_get_status(fence);
537
538 spin_lock_irqsave(&fence->waiter_list_lock, flags);
539 /*
540 * this should protect against two threads racing on the signaled
541 * false -> true transition
542 */
543 if (status && !fence->status) {
544 list_for_each_safe(pos, n, &fence->waiter_list_head)
545 list_move(pos, &signaled_waiters);
546
547 fence->status = status;
548 } else {
549 status = 0;
550 }
551 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
552
553 if (status) {
554 list_for_each_safe(pos, n, &signaled_waiters) {
555 struct sync_fence_waiter *waiter =
556 container_of(pos, struct sync_fence_waiter,
557 waiter_list);
558
559 list_del(pos);
560 waiter->callback(fence, waiter);
561 }
562 wake_up(&fence->wq);
563 }
564 }
565
566 int sync_fence_wait_async(struct sync_fence *fence,
567 struct sync_fence_waiter *waiter)
568 {
569 unsigned long flags;
570 int err = 0;
571
572 spin_lock_irqsave(&fence->waiter_list_lock, flags);
573
574 if (fence->status) {
575 err = fence->status;
576 goto out;
577 }
578
579 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
580 out:
581 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
582
583 return err;
584 }
585 EXPORT_SYMBOL(sync_fence_wait_async);
586
587 int sync_fence_cancel_async(struct sync_fence *fence,
588 struct sync_fence_waiter *waiter)
589 {
590 struct list_head *pos;
591 struct list_head *n;
592 unsigned long flags;
593 int ret = -ENOENT;
594
595 spin_lock_irqsave(&fence->waiter_list_lock, flags);
596 /*
597 * Make sure waiter is still in waiter_list because it is possible for
598 * the waiter to be removed from the list while the callback is still
599 * pending.
600 */
601 list_for_each_safe(pos, n, &fence->waiter_list_head) {
602 struct sync_fence_waiter *list_waiter =
603 container_of(pos, struct sync_fence_waiter,
604 waiter_list);
605 if (list_waiter == waiter) {
606 list_del(pos);
607 ret = 0;
608 break;
609 }
610 }
611 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
612 return ret;
613 }
614 EXPORT_SYMBOL(sync_fence_cancel_async);
615
616 static bool sync_fence_check(struct sync_fence *fence)
617 {
618 /*
619 * Make sure that reads to fence->status are ordered with the
620 * wait queue event triggering
621 */
622 smp_rmb();
623 return fence->status != 0;
624 }
625
626 int sync_fence_wait(struct sync_fence *fence, long timeout)
627 {
628 int err = 0;
629 struct sync_pt *pt;
630
631 trace_sync_wait(fence, 1);
632 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
633 trace_sync_pt(pt);
634
635 if (timeout > 0) {
636 timeout = msecs_to_jiffies(timeout);
637 err = wait_event_interruptible_timeout(fence->wq,
638 sync_fence_check(fence),
639 timeout);
640 } else if (timeout < 0) {
641 err = wait_event_interruptible(fence->wq,
642 sync_fence_check(fence));
643 }
644 trace_sync_wait(fence, 0);
645
646 if (err < 0)
647 return err;
648
649 if (fence->status < 0) {
650 pr_info("fence error %d on [%p]\n", fence->status, fence);
651 sync_dump();
652 return fence->status;
653 }
654
655 if (fence->status == 0) {
656 if (timeout > 0) {
657 pr_info("fence timeout on [%p] after %dms\n", fence,
658 jiffies_to_msecs(timeout));
659 sync_dump();
660 }
661 return -ETIME;
662 }
663
664 return 0;
665 }
666 EXPORT_SYMBOL(sync_fence_wait);
667
668 static void sync_fence_free(struct kref *kref)
669 {
670 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
671
672 sync_fence_free_pts(fence);
673
674 kfree(fence);
675 }
676
677 static int sync_fence_release(struct inode *inode, struct file *file)
678 {
679 struct sync_fence *fence = file->private_data;
680 unsigned long flags;
681
682 /*
683 * We need to remove all ways to access this fence before droping
684 * our ref.
685 *
686 * start with its membership in the global fence list
687 */
688 spin_lock_irqsave(&sync_fence_list_lock, flags);
689 list_del(&fence->sync_fence_list);
690 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
691
692 /*
693 * remove its pts from their parents so that sync_timeline_signal()
694 * can't reference the fence.
695 */
696 sync_fence_detach_pts(fence);
697
698 kref_put(&fence->kref, sync_fence_free);
699
700 return 0;
701 }
702
703 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
704 {
705 struct sync_fence *fence = file->private_data;
706
707 poll_wait(file, &fence->wq, wait);
708
709 /*
710 * Make sure that reads to fence->status are ordered with the
711 * wait queue event triggering
712 */
713 smp_rmb();
714
715 if (fence->status == 1)
716 return POLLIN;
717 else if (fence->status < 0)
718 return POLLERR;
719 else
720 return 0;
721 }
722
723 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
724 {
725 __s32 value;
726
727 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
728 return -EFAULT;
729
730 return sync_fence_wait(fence, value);
731 }
732
733 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
734 {
735 int fd = get_unused_fd();
736 int err;
737 struct sync_fence *fence2, *fence3;
738 struct sync_merge_data data;
739
740 if (fd < 0)
741 return fd;
742
743 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
744 err = -EFAULT;
745 goto err_put_fd;
746 }
747
748 fence2 = sync_fence_fdget(data.fd2);
749 if (fence2 == NULL) {
750 err = -ENOENT;
751 goto err_put_fd;
752 }
753
754 data.name[sizeof(data.name) - 1] = '\0';
755 fence3 = sync_fence_merge(data.name, fence, fence2);
756 if (fence3 == NULL) {
757 err = -ENOMEM;
758 goto err_put_fence2;
759 }
760
761 data.fence = fd;
762 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
763 err = -EFAULT;
764 goto err_put_fence3;
765 }
766
767 sync_fence_install(fence3, fd);
768 sync_fence_put(fence2);
769 return 0;
770
771 err_put_fence3:
772 sync_fence_put(fence3);
773
774 err_put_fence2:
775 sync_fence_put(fence2);
776
777 err_put_fd:
778 put_unused_fd(fd);
779 return err;
780 }
781
782 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
783 {
784 struct sync_pt_info *info = data;
785 int ret;
786
787 if (size < sizeof(struct sync_pt_info))
788 return -ENOMEM;
789
790 info->len = sizeof(struct sync_pt_info);
791
792 if (pt->parent->ops->fill_driver_data) {
793 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
794 size - sizeof(*info));
795 if (ret < 0)
796 return ret;
797
798 info->len += ret;
799 }
800
801 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
802 strlcpy(info->driver_name, pt->parent->ops->driver_name,
803 sizeof(info->driver_name));
804 info->status = pt->status;
805 info->timestamp_ns = ktime_to_ns(pt->timestamp);
806
807 return info->len;
808 }
809
810 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
811 unsigned long arg)
812 {
813 struct sync_fence_info_data *data;
814 struct list_head *pos;
815 __u32 size;
816 __u32 len = 0;
817 int ret;
818
819 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
820 return -EFAULT;
821
822 if (size < sizeof(struct sync_fence_info_data))
823 return -EINVAL;
824
825 if (size > 4096)
826 size = 4096;
827
828 data = kzalloc(size, GFP_KERNEL);
829 if (data == NULL)
830 return -ENOMEM;
831
832 strlcpy(data->name, fence->name, sizeof(data->name));
833 data->status = fence->status;
834 len = sizeof(struct sync_fence_info_data);
835
836 list_for_each(pos, &fence->pt_list_head) {
837 struct sync_pt *pt =
838 container_of(pos, struct sync_pt, pt_list);
839
840 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
841
842 if (ret < 0)
843 goto out;
844
845 len += ret;
846 }
847
848 data->len = len;
849
850 if (copy_to_user((void __user *)arg, data, len))
851 ret = -EFAULT;
852 else
853 ret = 0;
854
855 out:
856 kfree(data);
857
858 return ret;
859 }
860
861 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
862 unsigned long arg)
863 {
864 struct sync_fence *fence = file->private_data;
865 switch (cmd) {
866 case SYNC_IOC_WAIT:
867 return sync_fence_ioctl_wait(fence, arg);
868
869 case SYNC_IOC_MERGE:
870 return sync_fence_ioctl_merge(fence, arg);
871
872 case SYNC_IOC_FENCE_INFO:
873 return sync_fence_ioctl_fence_info(fence, arg);
874
875 default:
876 return -ENOTTY;
877 }
878 }
879
880 #ifdef CONFIG_DEBUG_FS
881 static const char *sync_status_str(int status)
882 {
883 if (status > 0)
884 return "signaled";
885 else if (status == 0)
886 return "active";
887 else
888 return "error";
889 }
890
891 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
892 {
893 int status = pt->status;
894
895 // [MTK] {{{
896 // only dump non-signaled fence
897 if (status > 0) return;
898 // [MTK] }}}
899
900 seq_printf(s, " %s%spt %s",
901 fence ? pt->parent->name : "",
902 fence ? "_" : "",
903 sync_status_str(status));
904 if (pt->status) {
905 struct timeval tv = ktime_to_timeval(pt->timestamp);
906 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
907 }
908
909 if (pt->parent->ops->timeline_value_str &&
910 pt->parent->ops->pt_value_str) {
911 char value[64];
912 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
913 seq_printf(s, ": %s", value);
914 if (fence) {
915 pt->parent->ops->timeline_value_str(pt->parent, value,
916 sizeof(value));
917 seq_printf(s, " / %s", value);
918 }
919 } else if (pt->parent->ops->print_pt) {
920 seq_printf(s, ": ");
921 pt->parent->ops->print_pt(s, pt);
922 }
923
924 seq_printf(s, "\n");
925 }
926
927 // [MTK] {{{
928 #ifdef SYNC_OBJ_DEBUG
929 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
930 {
931 struct list_head *pos;
932 unsigned long flags;
933
934 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
935
936 if (obj->ops->timeline_value_str) {
937 char value[64];
938 obj->ops->timeline_value_str(obj, value, sizeof(value));
939 seq_printf(s, ": %s", value);
940 } else if (obj->ops->print_obj) {
941 seq_printf(s, ": ");
942 obj->ops->print_obj(s, obj);
943 }
944
945 seq_printf(s, "\n");
946
947 spin_lock_irqsave(&obj->child_list_lock, flags);
948 list_for_each(pos, &obj->child_list_head) {
949 struct sync_pt *pt =
950 container_of(pos, struct sync_pt, child_list);
951 sync_print_pt(s, pt, false);
952 }
953 spin_unlock_irqrestore(&obj->child_list_lock, flags);
954 }
955 #endif
956 // [MTK] }}}
957
958 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
959 {
960 struct list_head *pos;
961 unsigned long flags;
962
963 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
964 sync_status_str(fence->status));
965
966 list_for_each(pos, &fence->pt_list_head) {
967 struct sync_pt *pt =
968 container_of(pos, struct sync_pt, pt_list);
969 sync_print_pt(s, pt, true);
970 }
971
972 spin_lock_irqsave(&fence->waiter_list_lock, flags);
973 list_for_each(pos, &fence->waiter_list_head) {
974 struct sync_fence_waiter *waiter =
975 container_of(pos, struct sync_fence_waiter,
976 waiter_list);
977
978 seq_printf(s, "waiter %pF\n", waiter->callback);
979 }
980 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
981 }
982
983 static int sync_debugfs_show(struct seq_file *s, void *unused)
984 {
985 unsigned long flags;
986 struct list_head *pos;
987
988 // [MTK] {{{
989 #ifdef SYNC_OBJ_DEBUG
990 seq_printf(s, "objs:\n--------------\n");
991
992 spin_lock_irqsave(&sync_timeline_list_lock, flags);
993 list_for_each(pos, &sync_timeline_list_head) {
994 struct sync_timeline *obj =
995 container_of(pos, struct sync_timeline,
996 sync_timeline_list);
997
998 sync_print_obj(s, obj);
999 seq_printf(s, "\n");
1000 }
1001 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
1002 #endif
1003 // [MTK] }}}
1004
1005 seq_printf(s, "fences:\n--------------\n");
1006
1007 spin_lock_irqsave(&sync_fence_list_lock, flags);
1008 list_for_each(pos, &sync_fence_list_head) {
1009 struct sync_fence *fence =
1010 container_of(pos, struct sync_fence, sync_fence_list);
1011
1012 // [MTK] {{{
1013 // only dump non-signaled fence
1014 if (fence->status > 0)
1015 continue;
1016 // [MTK] }}}
1017
1018 sync_print_fence(s, fence);
1019 // [MTK] {{{
1020 //seq_printf(s, "\n");
1021 // [MTK] }}}
1022 }
1023 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
1024 return 0;
1025 }
1026
1027 static int sync_debugfs_open(struct inode *inode, struct file *file)
1028 {
1029 return single_open(file, sync_debugfs_show, inode->i_private);
1030 }
1031
1032 static const struct file_operations sync_debugfs_fops = {
1033 .open = sync_debugfs_open,
1034 .read = seq_read,
1035 .llseek = seq_lseek,
1036 .release = single_release,
1037 };
1038
1039 static __init int sync_debugfs_init(void)
1040 {
1041 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
1042 return 0;
1043 }
1044 late_initcall(sync_debugfs_init);
1045
1046 // [MTK] {{{
1047 #ifdef SYNC_OBJ_DEBUG
1048 #define DUMP_CHUNK 256
1049 static char sync_dump_buf[64 * 1024];
1050 #endif
1051 // [MTK] }}}
1052 void sync_dump(void)
1053 {
1054 // [MTK] {{{
1055 #ifdef SYNC_OBJ_DEBUG
1056 struct seq_file s = {
1057 .buf = sync_dump_buf,
1058 .size = sizeof(sync_dump_buf) - 1,
1059 };
1060 int i;
1061
1062 sync_debugfs_show(&s, NULL);
1063
1064 for (i = 0; i < s.count; i += DUMP_CHUNK) {
1065 if ((s.count - i) > DUMP_CHUNK) {
1066 char c = s.buf[i + DUMP_CHUNK];
1067 s.buf[i + DUMP_CHUNK] = 0;
1068 pr_cont("%s", s.buf + i);
1069 s.buf[i + DUMP_CHUNK] = c;
1070 } else {
1071 s.buf[s.count] = 0;
1072 pr_cont("%s", s.buf + i);
1073 }
1074 }
1075 #endif
1076 // [MTK] }}}
1077 }
1078 #else
1079 static void sync_dump(void)
1080 {
1081 }
1082 #endif