3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <asm/cacheflush.h>
22 #include <linux/fdtable.h>
23 #include <linux/file.h>
24 #include <linux/freezer.h>
26 #include <linux/list.h>
27 #include <linux/miscdevice.h>
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/nsproxy.h>
32 #include <linux/poll.h>
33 #include <linux/debugfs.h>
34 #include <linux/rbtree.h>
35 #include <linux/sched.h>
36 #include <linux/seq_file.h>
37 #include <linux/uaccess.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40 #include <linux/pid_namespace.h>
41 #include <linux/security.h>
42 #include <linux/time.h>
43 #include <linux/delay.h>
44 #include <linux/kthread.h>
45 #include <linux/rtc.h>
46 #include <linux/aee.h>
48 #ifdef CONFIG_MT_PRIO_TRACER
49 #include <linux/prio_tracer.h>
53 #include "binder_trace.h"
55 static DEFINE_MUTEX(binder_main_lock
);
56 static DEFINE_MUTEX(binder_deferred_lock
);
57 static DEFINE_MUTEX(binder_mmap_lock
);
59 static HLIST_HEAD(binder_procs
);
60 static HLIST_HEAD(binder_deferred_list
);
61 static HLIST_HEAD(binder_dead_nodes
);
63 static struct dentry
*binder_debugfs_dir_entry_root
;
64 static struct dentry
*binder_debugfs_dir_entry_proc
;
65 static struct binder_node
*binder_context_mgr_node
;
66 static kuid_t binder_context_mgr_uid
= INVALID_UID
;
67 static int binder_last_id
;
68 static struct workqueue_struct
*binder_deferred_workqueue
;
69 static pid_t system_server_pid
;
71 #define RT_PRIO_INHERIT "v1.7"
72 #ifdef RT_PRIO_INHERIT
73 #include <linux/sched/rt.h>
76 #define MTK_BINDER_DEBUG "v0.1" /* defined for mtk internal added debug code */
78 /************************************************************************************************************************/
79 /* MTK Death Notify | */
80 /* Debug Log Prefix | Description */
81 /* --------------------------------------------------------------------- */
82 /* [DN #1] | Some one requests Death Notify from upper layer. */
83 /* [DN #2] | Some one cancels Death Notify from upper layer. */
84 /* [DN #3] | Binder Driver sends Death Notify to all requesters' Binder Thread. */
85 /* [DN #4] | Some requester's binder_thread_read() handles Death Notify works. */
86 /* [DN #5] | Some requester sends confirmation to Binder Driver. (In IPCThreadState.cpp) */
87 /* [DN #6] | Finally receive requester's confirmation from upper layer. */
88 /************************************************************************************************************************/
89 #define MTK_DEATH_NOTIFY_MONITOR "v0.1"
92 * Revision history of binder monitor
94 * v0.1 - enhance debug log
95 * v0.2 - transaction timeout log
96 * v0.2.1 - buffer allocation debug
98 #ifdef CONFIG_MT_ENG_BUILD
99 #define BINDER_MONITOR "v0.2.1" /* BINDER_MONITOR only turn on for eng build */
102 #ifdef BINDER_MONITOR
103 #define MAX_SERVICE_NAME_LEN 32
104 /************************************************************************************************************************/
105 /* Payload layout of addService(): */
106 /* | Parcel header | IServiceManager.descriptor | Parcel header | Service name | ... */
107 /* (Please refer ServiceManagerNative.java:addService()) */
108 /* IServiceManager.descriptor is 'android.os.IServiceManager' interleaved with character '\0'. */
109 /* that is, 'a', '\0', 'n', '\0', 'd', '\0', 'r', '\0', 'o', ... */
110 /* so the offset of Service name = Parcel header x2 + strlen(android.os.IServiceManager) x2 = 8x2 + 26x2 = 68 */
111 /************************************************************************************************************************/
112 #define MAGIC_SERVICE_NAME_OFFSET 68
114 #define MAX_ENG_TRANS_LOG_BUFF_LEN 10240
116 static int binder_check_buf_pid
;
117 static int binder_check_buf_tid
;
118 static unsigned long binder_log_level
= 0;
121 static int bt_folder
= 0;//just for native backtrace
122 #define TRANS_LOG_LEN 210
123 char large_msg
[TRANS_LOG_LEN
];
125 #define BINDER_PERF_EVAL "V0.1"
128 #ifdef BINDER_PERF_EVAL
129 /* binder_perf_evalue bitmap
131 * ||_ 1: send counter enable
132 * |__ 1: timeout counter enable
134 static unsigned int binder_perf_evalue
= 0;
135 #define BINDER_PERF_SEND_COUNTER 0x1
136 #define BINDER_PERF_TIMEOUT_COUNTER 0x2
139 #define BINDER_DEBUG_ENTRY(name) \
140 static int binder_##name##_open(struct inode *inode, struct file *file) \
142 return single_open(file, binder_##name##_show, inode->i_private); \
145 static const struct file_operations binder_##name##_fops = { \
146 .owner = THIS_MODULE, \
147 .open = binder_##name##_open, \
149 .llseek = seq_lseek, \
150 .release = single_release, \
153 #ifdef BINDER_MONITOR
154 #define BINDER_DEBUG_SETTING_ENTRY(name) \
155 static int binder_##name##_open(struct inode *inode, struct file *file) \
157 return single_open(file, binder_##name##_show, inode->i_private); \
160 static const struct file_operations binder_##name##_fops = { \
161 .owner = THIS_MODULE, \
162 .open = binder_##name##_open, \
164 .write = binder_##name##_write, \
165 .llseek = seq_lseek, \
166 .release = single_release, \
170 //LCH add, for binder pages leakage debug
171 #ifdef CONFIG_MT_ENG_BUILD
172 #define MTK_BINDER_PAGE_USED_RECORD
175 #ifdef MTK_BINDER_PAGE_USED_RECORD
176 static unsigned int binder_page_used
= 0;
177 static unsigned int binder_page_used_peak
= 0;
180 static int binder_proc_show(struct seq_file
*m
, void *unused
);
181 BINDER_DEBUG_ENTRY(proc
);
183 /* This is only defined in include/asm-arm/sizes.h */
189 #define SZ_4M 0x400000
192 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
194 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
197 BINDER_DEBUG_USER_ERROR
= 1U << 0,
198 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
199 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
200 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
201 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
202 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
203 BINDER_DEBUG_READ_WRITE
= 1U << 6,
204 BINDER_DEBUG_USER_REFS
= 1U << 7,
205 BINDER_DEBUG_THREADS
= 1U << 8,
206 BINDER_DEBUG_TRANSACTION
= 1U << 9,
207 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
208 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
209 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
210 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 13,
211 BINDER_DEBUG_PRIORITY_CAP
= 1U << 14,
212 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 15,
214 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
215 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
216 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
218 static bool binder_debug_no_lock
;
219 module_param_named(proc_no_lock
, binder_debug_no_lock
, bool, S_IWUSR
| S_IRUGO
);
221 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
222 static int binder_stop_on_user_error
;
224 static int binder_set_stop_on_user_error(const char *val
,
225 struct kernel_param
*kp
)
228 ret
= param_set_int(val
, kp
);
229 if (binder_stop_on_user_error
< 2)
230 wake_up(&binder_user_error_wait
);
233 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
234 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
236 #define binder_debug(mask, x...) \
238 if (binder_debug_mask & mask) \
242 #ifdef BINDER_MONITOR
243 #define binder_user_error(x...) \
245 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
247 if (binder_stop_on_user_error) \
248 binder_stop_on_user_error = 2; \
251 #define binder_user_error(x...) \
253 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
255 if (binder_stop_on_user_error) \
256 binder_stop_on_user_error = 2; \
260 enum binder_stat_types
{
266 BINDER_STAT_TRANSACTION
,
267 BINDER_STAT_TRANSACTION_COMPLETE
,
271 struct binder_stats
{
272 int br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
273 int bc
[_IOC_NR(BC_DEAD_BINDER_DONE
) + 1];
274 int obj_created
[BINDER_STAT_COUNT
];
275 int obj_deleted
[BINDER_STAT_COUNT
];
278 static struct binder_stats binder_stats
;
280 static inline void binder_stats_deleted(enum binder_stat_types type
)
282 binder_stats
.obj_deleted
[type
]++;
285 static inline void binder_stats_created(enum binder_stat_types type
)
287 binder_stats
.obj_created
[type
]++;
290 struct binder_transaction_log_entry
{
301 #ifdef BINDER_MONITOR
303 struct timespec timestamp
;
304 char service
[MAX_SERVICE_NAME_LEN
];
307 struct timespec readstamp
;
308 struct timespec endstamp
;
311 struct binder_transaction_log
{
314 #ifdef BINDER_MONITOR
316 struct binder_transaction_log_entry
*entry
;
318 struct binder_transaction_log_entry entry
[32];
321 static struct binder_transaction_log binder_transaction_log
;
322 static struct binder_transaction_log binder_transaction_log_failed
;
324 static struct binder_transaction_log_entry
*binder_transaction_log_add(
325 struct binder_transaction_log
*log
)
327 struct binder_transaction_log_entry
*e
;
328 e
= &log
->entry
[log
->next
];
329 memset(e
, 0, sizeof(*e
));
331 #ifdef BINDER_MONITOR
332 if (log
->next
== log
->size
) {
337 if (log
->next
== ARRAY_SIZE(log
->entry
)) {
345 #ifdef BINDER_MONITOR
346 static struct binder_transaction_log_entry entry_failed
[32];
348 /* log_disable bitmap
350 * | |||||_ 0: log enable / 1: log disable
351 * | ||||__ 1: self resume
352 * | |||____2: manually trigger kernel warning for buffer allocation
353 * | ||____ 3: 1:rt_inherit log enable / 0: rt_inherit log disable
356 static int log_disable
;
357 #define BINDER_LOG_RESUME 0x2
358 #define BINDER_BUF_WARN 0x4
359 #ifdef RT_PRIO_INHERIT
360 #define BINDER_RT_LOG_ENABLE 0x8
362 #ifdef CONFIG_MTK_EXTMEM
363 extern void* extmem_malloc_page_align(size_t bytes
);
365 static struct binder_transaction_log_entry entry_t
[MAX_ENG_TRANS_LOG_BUFF_LEN
];
369 struct list_head entry
;
371 BINDER_WORK_TRANSACTION
= 1,
372 BINDER_WORK_TRANSACTION_COMPLETE
,
374 BINDER_WORK_DEAD_BINDER
,
375 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
376 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
382 struct binder_work work
;
384 struct rb_node rb_node
;
385 struct hlist_node dead_node
;
387 struct binder_proc
*proc
;
388 struct hlist_head refs
;
389 int internal_strong_refs
;
391 int local_strong_refs
;
392 binder_uintptr_t ptr
;
393 binder_uintptr_t cookie
;
394 unsigned has_strong_ref
:1;
395 unsigned pending_strong_ref
:1;
396 unsigned has_weak_ref
:1;
397 unsigned pending_weak_ref
:1;
398 unsigned has_async_transaction
:1;
399 unsigned accept_fds
:1;
400 unsigned min_priority
:8;
401 struct list_head async_todo
;
402 #ifdef BINDER_MONITOR
403 char name
[MAX_SERVICE_NAME_LEN
];
405 #ifdef MTK_BINDER_DEBUG
410 struct binder_ref_death
{
411 struct binder_work work
;
412 binder_uintptr_t cookie
;
416 /* Lookups needed: */
417 /* node + proc => ref (transaction) */
418 /* desc + proc => ref (transaction, inc/dec ref) */
419 /* node => refs + procs (proc exit) */
421 struct rb_node rb_node_desc
;
422 struct rb_node rb_node_node
;
423 struct hlist_node node_entry
;
424 struct binder_proc
*proc
;
425 struct binder_node
*node
;
429 struct binder_ref_death
*death
;
432 struct binder_buffer
{
433 struct list_head entry
; /* free and allocated entries by address */
434 struct rb_node rb_node
; /* free entry by size or allocated entry */
437 unsigned allow_user_free
:1;
438 unsigned async_transaction
:1;
439 unsigned debug_id
:29;
441 struct binder_transaction
*transaction
;
442 #ifdef BINDER_MONITOR
443 struct binder_transaction_log_entry
*log_entry
;
445 struct binder_node
*target_node
;
451 enum binder_deferred_state
{
452 BINDER_DEFERRED_PUT_FILES
= 0x01,
453 BINDER_DEFERRED_FLUSH
= 0x02,
454 BINDER_DEFERRED_RELEASE
= 0x04,
457 #ifdef BINDER_MONITOR
458 enum wait_on_reason
{
462 WAIT_ON_REPLY_READ
= 3U
466 #ifdef BINDER_PERF_EVAL
467 #define BC_CODE_NR 60
468 #define BC_STATS_NR 30
469 struct binder_bc_stats
{
470 char service
[MAX_SERVICE_NAME_LEN
];
471 unsigned int code
[BC_CODE_NR
];
472 unsigned int code_num
[BC_CODE_NR
];
474 struct binder_timeout_stats
{
475 unsigned long bto
[WAIT_ON_REPLY_READ
];
476 struct timespec read_t
[32];
477 struct timespec exec_t
[32];
478 struct timespec rrply_t
[32];
483 struct hlist_node proc_node
;
484 struct rb_root threads
;
485 struct rb_root nodes
;
486 struct rb_root refs_by_desc
;
487 struct rb_root refs_by_node
;
489 struct vm_area_struct
*vma
;
490 struct mm_struct
*vma_vm_mm
;
491 struct task_struct
*tsk
;
492 struct files_struct
*files
;
493 struct hlist_node deferred_work_node
;
496 ptrdiff_t user_buffer_offset
;
498 struct list_head buffers
;
499 struct rb_root free_buffers
;
500 struct rb_root allocated_buffers
;
501 size_t free_async_space
;
505 uint32_t buffer_free
;
506 struct list_head todo
;
507 wait_queue_head_t wait
;
508 struct binder_stats stats
;
509 struct list_head delivered_death
;
511 int requested_threads
;
512 int requested_threads_started
;
514 long default_priority
;
515 struct dentry
*debugfs_entry
;
516 #ifdef RT_PRIO_INHERIT
517 unsigned long default_rt_prio
:16;
518 unsigned long default_policy
:16;
520 #ifdef BINDER_MONITOR
521 struct binder_buffer
*large_buffer
;
523 #ifdef BINDER_PERF_EVAL
525 struct binder_bc_stats
*bc_stats
[BC_STATS_NR
];
526 struct binder_timeout_stats to_stats
;
528 #ifdef MTK_BINDER_PAGE_USED_RECORD
529 unsigned int page_used
;
530 unsigned int page_used_peak
;
535 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
536 BINDER_LOOPER_STATE_ENTERED
= 0x02,
537 BINDER_LOOPER_STATE_EXITED
= 0x04,
538 BINDER_LOOPER_STATE_INVALID
= 0x08,
539 BINDER_LOOPER_STATE_WAITING
= 0x10,
540 BINDER_LOOPER_STATE_NEED_RETURN
= 0x20
543 struct binder_thread
{
544 struct binder_proc
*proc
;
545 struct rb_node rb_node
;
548 struct binder_transaction
*transaction_stack
;
549 struct list_head todo
;
550 uint32_t return_error
; /* Write failed, return error code in read buf */
551 uint32_t return_error2
; /* Write failed, return error code in read */
552 /* buffer. Used when sending a reply to a dead process that */
553 /* we are also waiting on */
554 wait_queue_head_t wait
;
555 struct binder_stats stats
;
556 #ifdef BINDER_PERF_EVAL
557 struct binder_timeout_stats to_stats
;
562 struct binder_transaction
{
564 struct binder_work work
;
565 struct binder_thread
*from
;
566 struct binder_transaction
*from_parent
;
567 struct binder_proc
*to_proc
;
568 struct binder_thread
*to_thread
;
569 struct binder_transaction
*to_parent
;
570 unsigned need_reply
:1;
571 /* unsigned is_dead:1; */ /* not used at the moment */
573 struct binder_buffer
*buffer
;
579 #ifdef RT_PRIO_INHERIT
580 unsigned long rt_prio
:16;
581 unsigned long policy
:16;
582 unsigned long saved_rt_prio
:16;
583 unsigned long saved_policy
:16;
585 #ifdef BINDER_MONITOR
586 struct timespec timestamp
;
588 enum wait_on_reason wait_on
;
589 enum wait_on_reason bark_on
;
590 struct rb_node rb_node
; /* by bark_time */
591 struct timespec bark_time
;
592 struct timespec exe_timestamp
;
594 char service
[MAX_SERVICE_NAME_LEN
];
599 unsigned int log_idx
;
604 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
605 static inline void binder_lock(const char *tag
);
606 static inline void binder_unlock(const char *tag
);
608 #ifdef BINDER_MONITOR
609 /* work should be done within how many secs */
610 #define WAIT_BUDGET_READ 2
611 #define WAIT_BUDGET_EXEC 4
612 #define WAIT_BUDGET_MIN min(WAIT_BUDGET_READ, WAIT_BUDGET_EXEC)
614 static struct rb_root bwdog_transacts
;
616 static const char *binder_wait_on_str
[] = {
623 struct binder_timeout_log_entry
{
624 enum wait_on_reason r
;
633 char service
[MAX_SERVICE_NAME_LEN
];
637 struct binder_timeout_log
{
640 #ifdef BINDER_PERF_EVAL
641 struct binder_timeout_log_entry entry
[256];
643 struct binder_timeout_log_entry entry
[64];
647 static struct binder_timeout_log binder_timeout_log_t
;
650 * binder_timeout_log_add - Insert a timeout log
652 static struct binder_timeout_log_entry
*binder_timeout_log_add(void)
654 struct binder_timeout_log
*log
= &binder_timeout_log_t
;
655 struct binder_timeout_log_entry
*e
;
657 e
= &log
->entry
[log
->next
];
658 memset(e
, 0, sizeof(*e
));
660 if (log
->next
== ARRAY_SIZE(log
->entry
)) {
668 * binder_print_bwdog - Output info of a timeout transaction
669 * @t: pointer to the timeout transaction
670 * @cur_in: current timespec while going to print
671 * @e: timeout log entry to record
672 * @r: output reason, either while barking or after barked
674 static void binder_print_bwdog(struct binder_transaction
*t
,
675 struct timespec
*cur_in
,
676 struct binder_timeout_log_entry
*e
,
677 enum wait_on_reason r
)
680 struct timespec
*startime
;
681 struct timespec cur
, sub_t
;
684 memcpy(&cur
, cur_in
, sizeof(struct timespec
));
686 do_posix_clock_monotonic_gettime(&cur
);
687 //monotonic_to_bootbased(&cur);
689 startime
= (r
== WAIT_ON_EXEC
) ? &t
->exe_timestamp
: &t
->timestamp
;
690 sub_t
= timespec_sub(cur
, *startime
);
692 #ifdef BINDER_PERF_EVAL
693 if ( !(cur_in
&& e
) && (binder_perf_evalue
& BINDER_PERF_TIMEOUT_COUNTER
))
701 unsigned long proc_t
= t
->to_proc
->to_stats
.bto
[r
- 1] ++;
702 t
->to_proc
->to_stats
.read_t
[(proc_t
% 32)] = sub_t
;
710 unsigned long proc_t
= t
->to_proc
->to_stats
.bto
[r
- 1] ++;
711 t
->to_proc
->to_stats
.exec_t
[(proc_t
% 32)] = sub_t
;
715 unsigned long thread_t
= t
->to_thread
->to_stats
.bto
[r
- 1] ++;
716 t
->to_thread
->to_stats
.exec_t
[(thread_t
% 32)] = sub_t
;
720 case WAIT_ON_REPLY_READ
:
724 unsigned long proc_t
= t
->to_proc
->to_stats
.bto
[r
- 1] ++;
725 t
->to_proc
->to_stats
.rrply_t
[(proc_t
% 32)] = sub_t
;
729 unsigned long thread_t
= t
->to_thread
->to_stats
.bto
[r
- 1] ++;
730 t
->to_thread
->to_stats
.rrply_t
[(thread_t
% 32)] = sub_t
;
742 rtc_time_to_tm(t
->tv
.tv_sec
, &tm
);
743 pr_debug("%d %s %d:%d to %d:%d %s %u.%03ld "
744 "sec (%s) dex_code %u start_at %lu.%03ld android "
745 "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
746 t
->debug_id
, binder_wait_on_str
[r
],
747 t
->fproc
, t
->fthrd
, t
->tproc
, t
->tthrd
,
748 (cur_in
&& e
) ? "over" : "total",
749 (unsigned)sub_t
.tv_sec
, (sub_t
.tv_nsec
/ NSEC_PER_MSEC
),
752 (unsigned long)startime
->tv_sec
,
753 (startime
->tv_nsec
/ NSEC_PER_MSEC
),
754 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
755 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
756 (unsigned long)(t
->tv
.tv_usec
/ USEC_PER_MSEC
));
759 e
->over_sec
= sub_t
.tv_sec
;
760 memcpy(&e
->ts
, startime
, sizeof(struct timespec
));
765 * binder_bwdog_safe - Check a transaction is monitor-free or not
766 * @t: pointer to the transaction to check
768 * Returns 1 means safe.
770 static inline int binder_bwdog_safe(struct binder_transaction
*t
)
772 return (t
->wait_on
== WAIT_ON_NONE
) ? 1 : 0;
776 * binder_query_bwdog - Check a transaction is queued or not
777 * @t: pointer to the transaction to check
779 * Returns a pointer points to t, or NULL if it's not queued.
781 static struct rb_node
**binder_query_bwdog(struct binder_transaction
*t
)
783 struct rb_node
**p
= &bwdog_transacts
.rb_node
;
784 struct rb_node
*parent
= NULL
;
785 struct binder_transaction
*transact
= NULL
;
790 transact
= rb_entry(parent
, struct binder_transaction
, rb_node
);
792 comp
= timespec_compare(&t
->bark_time
, &transact
->bark_time
);
804 * binder_queue_bwdog - Queue a transaction to keep tracking
805 * @t: pointer to the transaction being tracked
806 * @budget: seconds, which this transaction can afford
808 static void binder_queue_bwdog(struct binder_transaction
*t
, time_t budget
)
810 struct rb_node
**p
= &bwdog_transacts
.rb_node
;
811 struct rb_node
*parent
= NULL
;
812 struct binder_transaction
*transact
= NULL
;
815 do_posix_clock_monotonic_gettime(&t
->bark_time
);
816 //monotonic_to_bootbased(&t->bark_time);
817 t
->bark_time
.tv_sec
+= budget
;
821 transact
= rb_entry(parent
, struct binder_transaction
, rb_node
);
823 ret
= timespec_compare(&t
->bark_time
, &transact
->bark_time
);
829 pr_info("%d found same key\n",
831 t
->bark_time
.tv_nsec
+= 1;
835 rb_link_node(&t
->rb_node
, parent
, p
);
836 rb_insert_color(&t
->rb_node
, &bwdog_transacts
);
840 * binder_cancel_bwdog - Cancel a transaction from tracking list
841 * @t: pointer to the transaction being cancelled
843 static void binder_cancel_bwdog(struct binder_transaction
*t
)
845 struct rb_node
**p
= NULL
;
847 if (binder_bwdog_safe(t
)) {
849 binder_print_bwdog(t
, NULL
, NULL
, t
->bark_on
);
850 t
->bark_on
= WAIT_ON_NONE
;
855 p
= binder_query_bwdog(t
);
857 pr_err("%d waits %s, but not queued...\n",
858 t
->debug_id
, binder_wait_on_str
[t
->wait_on
]);
862 rb_erase(&t
->rb_node
, &bwdog_transacts
);
863 t
->wait_on
= WAIT_ON_NONE
;
867 * binder_bwdog_bark -
868 * Barking funcion while timeout. Record target process or thread, which
869 * cannot handle transaction in time, including todo list. Also add a log
870 * entry for AMS reference.
872 * @t: pointer to the transaction, which triggers watchdog
873 * @cur: current kernel timespec
875 static void binder_bwdog_bark(struct binder_transaction
*t
, struct timespec
*cur
)
877 struct binder_timeout_log_entry
*e
;
880 if (binder_bwdog_safe(t
)) {
881 pr_debug("%d watched, but wait nothing\n",
886 e
= binder_timeout_log_add();
887 binder_print_bwdog(t
, cur
, e
, t
->wait_on
);
890 e
->from_proc
= t
->fproc
;
891 e
->from_thrd
= t
->fthrd
;
892 e
->debug_id
= t
->debug_id
;
893 memcpy(&e
->tv
, &t
->tv
, sizeof(struct timeval
));
895 switch (t
->wait_on
) {
898 pr_err("%d has NULL target\n",
902 e
->to_proc
= t
->tproc
;
903 e
->to_thrd
= t
->tthrd
;
905 strcpy(e
->service
, t
->service
);
911 pr_err("%d has NULL target for "
912 "execution\n", t
->debug_id
);
915 e
->to_proc
= t
->tproc
;
916 e
->to_thrd
= t
->tthrd
;
918 strcpy(e
->service
, t
->service
);
922 case WAIT_ON_REPLY_READ
: {
924 pr_err("%d has NULL target thread\n",
928 e
->to_proc
= t
->tproc
;
929 e
->to_thrd
= t
->tthrd
;
930 strcpy(e
->service
, "");
944 * binder_bwdog_thread - Main thread to check timeout list periodically
946 static int binder_bwdog_thread(void *__unused
)
948 unsigned long sleep_sec
;
949 struct rb_node
*n
= NULL
;
950 struct timespec cur_time
;
951 struct binder_transaction
*t
= NULL
;
954 binder_lock(__func__
);
955 do_posix_clock_monotonic_gettime(&cur_time
);
956 //monotonic_to_bootbased(&cur_time);
958 for (n
= rb_first(&bwdog_transacts
); n
!= NULL
; n
= rb_next(n
)) {
959 t
= rb_entry(n
, struct binder_transaction
, rb_node
);
960 if (timespec_compare(&cur_time
, &t
->bark_time
) < 0)
963 binder_bwdog_bark(t
, &cur_time
);
964 rb_erase(&t
->rb_node
, &bwdog_transacts
);
965 t
->bark_on
= t
->wait_on
;
966 t
->wait_on
= WAIT_ON_NONE
;
970 sleep_sec
= WAIT_BUDGET_MIN
;
972 sleep_sec
= timespec_sub(t
->bark_time
, cur_time
).tv_sec
;
973 binder_unlock(__func__
);
975 msleep(sleep_sec
* MSEC_PER_SEC
);
977 pr_debug("%s exit...\n", __func__
);
982 * binder_usermodehelper - Call shell to do some command
983 * @cmd: string of command
986 static void binder_usermodehelper(char *cmd
, int w
)
989 char *envp
[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL
};
990 char *argv
[] = {"/system/bin/sh", "-c", "", NULL
};
993 pr_debug("%s\n", argv
[2]);
994 if ((ret
= call_usermodehelper(argv
[0], argv
, envp
, w
)) != 0)
995 pr_err("%s: return %d\n", __func__
, ret
);
999 * find_process_by_pid - convert pid to task_struct
1000 * @pid: pid for convert task
1002 static inline struct task_struct
*find_process_by_pid(pid_t pid
)
1004 return pid
? find_task_by_vpid(pid
) : NULL
;
1007 * binder_find_buffer_sender - find the sender task_struct of this buffer
1008 * @buf binder buffer
1009 * @tsk task_struct of buf sender
1011 static struct task_struct
*binder_find_buffer_sender(struct binder_buffer
*buf
)
1013 struct binder_transaction
*t
;
1014 struct binder_transaction_log_entry
*e
;
1015 struct task_struct
*tsk
;
1016 t
= buf
->transaction
;
1018 tsk
= find_process_by_pid(t
->fproc
);
1023 if ((buf
->debug_id
== e
->debug_id
) && e
->from_proc
)
1024 tsk
= find_process_by_pid(e
->from_proc
);
1032 * copy from /kernel/fs/proc/base.c and modified to get task full name
1034 static int binder_proc_pid_cmdline(struct task_struct
*task
, char * buf
)
1038 struct mm_struct
*mm
;
1039 /*============ add begin =============================*/
1047 /*============ add end ===============================*/
1048 mm
= get_task_mm(task
);
1052 goto out_mm
; /* Shh! No looking before we're done */
1053 /*============ add begin =============================*/
1054 buffer
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1057 /*============ add end ===============================*/
1059 len
= mm
->arg_end
- mm
->arg_start
;
1061 if (len
> PAGE_SIZE
)
1064 res
= access_process_vm(task
, mm
->arg_start
, buffer
, len
, 0);
1066 // If the nul at the end of args has been overwritten, then
1067 // assume application is using setproctitle(3).
1068 if (res
> 0 && buffer
[res
-1] != '\0' && len
< PAGE_SIZE
) {
1069 len
= strnlen(buffer
, res
);
1073 len
= mm
->env_end
- mm
->env_start
;
1074 if (len
> PAGE_SIZE
- res
)
1075 len
= PAGE_SIZE
- res
;
1076 res
+= access_process_vm(task
, mm
->env_start
, buffer
+res
, len
, 0);
1077 res
= strnlen(buffer
, res
);
1080 /*============ add begin =============================*/
1081 str
= strchr(buffer
, c
);
1083 size
= (unsigned int)(str
- buffer
);
1088 snprintf(buf
, size
, buffer
);
1090 /*============ add end ===============================*/
1098 * binder_print_buf - Print buffer info
1100 * @buffer: target buffer
1101 * @dest: dest string pointer
1102 * @success: does this buffer allocate success
1103 * @check: check this log for owner finding
1105 static void binder_print_buf(struct binder_buffer
*buffer
, char *dest
, int success
, int check
)
1108 struct binder_transaction
*t
= buffer
->transaction
;
1109 char str
[TRANS_LOG_LEN
];
1110 struct task_struct
*sender_tsk
;
1111 struct task_struct
*rec_tsk
;
1112 char sender_name
[256], rec_name
[256];
1115 struct binder_transaction_log_entry
*log_entry
= buffer
->log_entry
;
1116 rtc_time_to_tm(log_entry
->tv
.tv_sec
, &tm
);
1117 if ((log_entry
!= NULL
) && (buffer
->debug_id
== log_entry
->debug_id
))
1119 sender_tsk
= find_process_by_pid(log_entry
->from_proc
);
1120 rec_tsk
= find_process_by_pid(log_entry
->to_proc
);
1121 len_s
= binder_proc_pid_cmdline(sender_tsk
, sender_name
);
1122 len_r
= binder_proc_pid_cmdline(rec_tsk
, rec_name
);
1123 snprintf(str
, sizeof(str
),
1124 "binder:check=%d,success=%d,id=%d,call=%s,type=%s,"
1125 "from=%d,tid=%d,name=%s,to=%d,name=%s,tid=%d,name=%s,"
1126 "size=%zd,node=%d,handle=%d,dex=%u,auf=%d,start=%lu.%03ld,"
1127 "android=%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
1128 check
, success
, buffer
->debug_id
,
1129 buffer
->async_transaction
? "async" : "sync",
1130 (2 == log_entry
->call_type
) ? "reply" :
1131 ((1 == log_entry
->call_type
) ? "async" : "call"),
1132 log_entry
->from_proc
, log_entry
->from_thread
,
1133 len_s
? sender_name
: ((sender_tsk
!= NULL
) ? sender_tsk
->comm
: ""),
1135 len_r
? rec_name
: ((rec_tsk
!= NULL
) ? rec_tsk
->comm
: ""),
1136 log_entry
->to_thread
, log_entry
->service
,
1137 (buffer
->data_size
+ buffer
->offsets_size
),
1138 log_entry
->to_node
, log_entry
->target_handle
, log_entry
->code
,
1139 buffer
->allow_user_free
,
1140 (unsigned long)log_entry
->timestamp
.tv_sec
,
1141 (log_entry
->timestamp
.tv_nsec
/ NSEC_PER_MSEC
),
1142 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
1143 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
1144 (unsigned long)(log_entry
->tv
.tv_usec
/ USEC_PER_MSEC
));
1147 snprintf(str
, sizeof(str
), "binder:check=%d,success=%d,id=%d,call=%s, ,"
1148 ",,,,,,,size=%zd,,,,"
1150 check
, success
, buffer
->debug_id
,
1151 buffer
->async_transaction
? "async" : "sync",
1152 (buffer
->data_size
+ buffer
->offsets_size
),
1153 buffer
->allow_user_free
);
1157 rtc_time_to_tm(t
->tv
.tv_sec
, &tm
);
1158 sender_tsk
= find_process_by_pid(t
->fproc
);
1159 rec_tsk
= find_process_by_pid(t
->tproc
);
1160 len_s
= binder_proc_pid_cmdline(sender_tsk
, sender_name
);
1161 len_r
= binder_proc_pid_cmdline(rec_tsk
, rec_name
);
1162 snprintf(str
, sizeof(str
),
1163 "binder:check=%d,success=%d,id=%d,call=%s,type=%s,"
1164 "from=%d,tid=%d,name=%s,to=%d,name=%s,tid=%d,name=%s,"
1165 "size=%zd,,,dex=%u,auf=%d,start=%lu.%03ld,android="
1166 "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
1167 check
, success
, t
->debug_id
,
1168 buffer
->async_transaction
? "async" : "sync ",
1169 binder_wait_on_str
[t
->wait_on
],
1171 len_s
? sender_name
: ((sender_tsk
!= NULL
) ? sender_tsk
->comm
: ""),
1173 len_r
? rec_name
: ((rec_tsk
!= NULL
) ? rec_tsk
->comm
: ""),
1174 t
->tthrd
, t
->service
,
1175 (buffer
->data_size
+buffer
->offsets_size
), t
->code
,
1176 buffer
->allow_user_free
,
1177 (unsigned long)t
->timestamp
.tv_sec
,
1178 (t
->timestamp
.tv_nsec
/ NSEC_PER_MSEC
),
1179 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
1180 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
1181 (unsigned long)(t
->tv
.tv_usec
/ USEC_PER_MSEC
));
1183 pr_debug("%s", str
);
1185 strncat(dest
, str
, sizeof(str
));
1189 * binder_check_buf_checked -
1190 * Consider buffer related issue usually makes a series of failure.
1191 * Only care about the first problem time to minimize debug overhead.
1193 static int binder_check_buf_checked(void)
1195 return (binder_check_buf_pid
== -1);
1198 static size_t binder_buffer_size(struct binder_proc
*proc
,
1199 struct binder_buffer
*buffer
);
1202 * binder_check_buf - Dump necessary info for buffer usage analysis
1203 * @target_proc: receiver
1204 * @size: requested size
1205 * @is_async: 1 if an async call
1207 static void binder_check_buf(struct binder_proc
*target_proc
,
1208 size_t size
, int is_async
)
1211 struct binder_buffer
*buffer
;
1213 int large_buffer_count
= 0;
1214 size_t tmp_size
, threshold
;
1215 struct task_struct
*sender
;
1216 struct task_struct
*larger
;
1217 char sender_name
[256], rec_name
[256];
1218 struct timespec exp_timestamp
;
1221 int db_flag
= DB_OPT_BINDER_INFO
;
1224 pr_debug("buffer allocation failed on %d:0 "
1225 "%s from %d:%d size %zd\n",
1227 is_async
? "async" : "call ",
1228 binder_check_buf_pid
, binder_check_buf_tid
, size
);
1230 if (binder_check_buf_checked())
1232 /* check blocked service for async call */
1234 pr_debug("buffer allocation failed on %d:0 "
1235 "(%s) async service blocked\n",
1237 target_proc
->tsk
? target_proc
->tsk
->comm
: "");
1240 pr_debug("%d:0 pending transactions:\n", target_proc
->pid
);
1241 threshold
= target_proc
->buffer_size
/16;
1242 for (n
= rb_last(&target_proc
->allocated_buffers
), i
= 0;
1243 n
; n
= rb_prev(n
), i
++)
1245 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
1246 tmp_size
= binder_buffer_size(target_proc
, buffer
);
1247 BUG_ON(buffer
->free
);
1249 if (tmp_size
> threshold
)
1251 if ((NULL
== target_proc
->large_buffer
) ||
1252 (target_proc
->large_buffer
&&
1253 (tmp_size
> binder_buffer_size(target_proc
, target_proc
->large_buffer
))))
1254 target_proc
->large_buffer
= buffer
;
1255 large_buffer_count
++;
1256 binder_print_buf(buffer
, NULL
, 1, 0);
1261 binder_print_buf(buffer
, NULL
, 1, 0);
1264 pr_debug("%d:0 total pending trans: %d(%d large isze)\n",
1265 target_proc
->pid
, i
, large_buffer_count
);
1267 do_posix_clock_monotonic_gettime(&exp_timestamp
);
1268 //monotonic_to_bootbased(&exp_timestamp);
1269 do_gettimeofday(&tv
);
1270 /* consider time zone. translate to android time */
1271 tv
.tv_sec
-= (sys_tz
.tz_minuteswest
* 60);
1272 rtc_time_to_tm(tv
.tv_sec
, &tm
);
1274 sender
= find_process_by_pid(binder_check_buf_pid
);
1275 len_s
= binder_proc_pid_cmdline(sender
, sender_name
);
1276 len_r
= binder_proc_pid_cmdline(target_proc
->tsk
, rec_name
);
1277 if (size
> threshold
)
1279 if (target_proc
->large_buffer
)
1281 pr_debug("on %d:0 the largest pending trans is:\n",
1283 binder_print_buf(target_proc
->large_buffer
, large_msg
, 1, 0);
1285 snprintf(aee_word
, sizeof(aee_word
), "check %s: large binder trans fail on %d:0 size %zd",
1286 len_s
? sender_name
: ((sender
!= NULL
) ? sender
->comm
: ""),
1287 target_proc
->pid
, size
);
1288 snprintf(aee_msg
, sizeof(aee_msg
), "BINDER_BUF_DEBUG\n%s"
1289 "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,"
1290 "name=%s,to=%d,name=%s,,,size=%zd,,,,"
1291 ",start=%lu.%03ld,android="
1292 "%d-%02d-%02d %02d:%02d:%02d.%03lu\n"
1293 "large data size,check sender %d(%s)!\n"
1294 "check kernel log for more info\n",
1295 large_msg
, 1, 0, is_async
? "async" : "sync",
1296 binder_check_buf_pid
, binder_check_buf_tid
,
1297 len_s
? sender_name
: ((sender
!= NULL
) ? sender
->comm
: ""),
1300 ((target_proc
->tsk
!= NULL
) ? target_proc
->tsk
->comm
: ""),
1302 (unsigned long)exp_timestamp
.tv_sec
,
1303 (exp_timestamp
.tv_nsec
/ NSEC_PER_MSEC
),
1304 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
1305 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
1306 (unsigned long)(tv
.tv_usec
/ USEC_PER_MSEC
),
1307 binder_check_buf_pid
, sender
? sender
->comm
: "");
1311 if (target_proc
->large_buffer
)
1313 pr_debug("on %d:0 the largest pending trans is:\n",
1315 binder_print_buf(target_proc
->large_buffer
, large_msg
, 1, 1);
1316 larger
= binder_find_buffer_sender(target_proc
->large_buffer
);
1317 snprintf(aee_word
, sizeof(aee_word
), "check %s: large binder trans",
1318 (larger
!= NULL
) ? larger
->comm
: "");
1319 snprintf(aee_msg
, sizeof(aee_msg
), "BINDER_BUF_DEBUG:\n%s"
1320 "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,name=%s,"
1321 "to=%d,name=%s,,,size=%zd,,,,"
1322 ",start=%lu.%03ld,android="
1323 "%d-%02d-%02d %02d:%02d:%02d.%03lu\n"
1324 "large data size,check sender %d(%s)!\n"
1325 "check kernel log for more info\n",
1326 large_msg
, 0, 0, is_async
? "async" : "sync",
1327 binder_check_buf_pid
, binder_check_buf_tid
,
1328 len_s
? sender_name
: ((sender
!= NULL
) ? sender
->comm
: ""),
1331 ((target_proc
->tsk
!= NULL
) ? target_proc
->tsk
->comm
: ""),
1333 (unsigned long)exp_timestamp
.tv_sec
,
1334 (exp_timestamp
.tv_nsec
/ NSEC_PER_MSEC
),
1335 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
1336 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
1337 (unsigned long)(tv
.tv_usec
/ USEC_PER_MSEC
),
1338 (larger
!= NULL
) ? larger
->pid
: 0,
1339 (larger
!= NULL
) ? larger
->comm
: "");
1344 snprintf(aee_word
, sizeof(aee_word
), "check %s: binder buffer exhaust ",
1346 ((target_proc
->tsk
!= NULL
) ? target_proc
->tsk
->comm
: ""));
1347 snprintf(aee_msg
, sizeof(aee_msg
), "BINDER_BUF_DEBUG\n"
1348 "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,name=%s,"
1349 "to=%d,name=%s,,,size=%zd,,,,"
1350 ",start=%lu.%03ld,android="
1351 "%d-%02d-%02d %02d:%02d:%02d.%03lu\n"
1352 "%d small trans pending, check receiver %d(%s)!\n"
1353 "check kernel log for more info\n",
1354 1, 0, is_async
? "async" : "sync",
1355 binder_check_buf_pid
, binder_check_buf_tid
,
1356 len_s
? sender_name
: ((sender
!= NULL
) ? sender
->comm
: ""),
1359 ((target_proc
->tsk
!= NULL
) ? target_proc
->tsk
->comm
: ""),
1361 (unsigned long)exp_timestamp
.tv_sec
,
1362 (exp_timestamp
.tv_nsec
/ NSEC_PER_MSEC
),
1363 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
1364 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
1365 (unsigned long)(tv
.tv_usec
/ USEC_PER_MSEC
),
1366 i
, target_proc
->pid
,
1367 target_proc
->tsk
? target_proc
->tsk
->comm
: "");
1372 binder_check_buf_pid
= -1;
1373 binder_check_buf_tid
= -1;
1374 aee_kernel_warning_api(__FILE__
, __LINE__
, db_flag
, &aee_word
[0],&aee_msg
[0]);
1378 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
1380 struct files_struct
*files
= proc
->files
;
1381 unsigned long rlim_cur
;
1387 if (!lock_task_sighand(proc
->tsk
, &irqs
))
1390 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
1391 unlock_task_sighand(proc
->tsk
, &irqs
);
1393 return __alloc_fd(files
, 0, rlim_cur
, flags
);
1397 * copied from fd_install
1399 static void task_fd_install(
1400 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
1403 __fd_install(proc
->files
, fd
, file
);
1407 * copied from sys_close
1409 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
1413 if (proc
->files
== NULL
)
1416 retval
= __close_fd(proc
->files
, fd
);
1417 /* can't restart close syscall because file table entry was cleared */
1418 if (unlikely(retval
== -ERESTARTSYS
||
1419 retval
== -ERESTARTNOINTR
||
1420 retval
== -ERESTARTNOHAND
||
1421 retval
== -ERESTART_RESTARTBLOCK
))
1427 static inline void binder_lock(const char *tag
)
1429 trace_binder_lock(tag
);
1430 mutex_lock(&binder_main_lock
);
1431 trace_binder_locked(tag
);
1434 static inline void binder_unlock(const char *tag
)
1436 trace_binder_unlock(tag
);
1437 mutex_unlock(&binder_main_lock
);
1440 static void binder_set_nice(long nice
)
1443 if (can_nice(current
, nice
)) {
1444 #ifdef CONFIG_MT_PRIO_TRACER
1445 set_user_nice_binder(current
, nice
);
1447 set_user_nice(current
, nice
);
1451 min_nice
= 20 - current
->signal
->rlim
[RLIMIT_NICE
].rlim_cur
;
1452 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1453 "%d: nice value %ld not allowed use %ld instead\n",
1454 current
->pid
, nice
, min_nice
);
1455 #ifdef CONFIG_MT_PRIO_TRACER
1456 set_user_nice_binder(current
, min_nice
);
1458 set_user_nice(current
, min_nice
);
1462 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
1465 static size_t binder_buffer_size(struct binder_proc
*proc
,
1466 struct binder_buffer
*buffer
)
1468 if (list_is_last(&buffer
->entry
, &proc
->buffers
))
1469 return proc
->buffer
+ proc
->buffer_size
- (void *)buffer
->data
;
1471 return (size_t)list_entry(buffer
->entry
.next
,
1472 struct binder_buffer
, entry
) - (size_t)buffer
->data
;
1475 static void binder_insert_free_buffer(struct binder_proc
*proc
,
1476 struct binder_buffer
*new_buffer
)
1478 struct rb_node
**p
= &proc
->free_buffers
.rb_node
;
1479 struct rb_node
*parent
= NULL
;
1480 struct binder_buffer
*buffer
;
1482 size_t new_buffer_size
;
1484 BUG_ON(!new_buffer
->free
);
1486 new_buffer_size
= binder_buffer_size(proc
, new_buffer
);
1488 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1489 "%d: add free buffer, size %zd, at %pK\n",
1490 proc
->pid
, new_buffer_size
, new_buffer
);
1494 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
1495 BUG_ON(!buffer
->free
);
1497 buffer_size
= binder_buffer_size(proc
, buffer
);
1499 if (new_buffer_size
< buffer_size
)
1500 p
= &parent
->rb_left
;
1502 p
= &parent
->rb_right
;
1504 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
1505 rb_insert_color(&new_buffer
->rb_node
, &proc
->free_buffers
);
1508 static void binder_insert_allocated_buffer(struct binder_proc
*proc
,
1509 struct binder_buffer
*new_buffer
)
1511 struct rb_node
**p
= &proc
->allocated_buffers
.rb_node
;
1512 struct rb_node
*parent
= NULL
;
1513 struct binder_buffer
*buffer
;
1515 BUG_ON(new_buffer
->free
);
1519 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
1520 BUG_ON(buffer
->free
);
1522 if (new_buffer
< buffer
)
1523 p
= &parent
->rb_left
;
1524 else if (new_buffer
> buffer
)
1525 p
= &parent
->rb_right
;
1529 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
1530 rb_insert_color(&new_buffer
->rb_node
, &proc
->allocated_buffers
);
1533 static struct binder_buffer
*binder_buffer_lookup(struct binder_proc
*proc
,
1536 struct rb_node
*n
= proc
->allocated_buffers
.rb_node
;
1537 struct binder_buffer
*buffer
;
1538 struct binder_buffer
*kern_ptr
;
1540 kern_ptr
= (struct binder_buffer
*)(user_ptr
- proc
->user_buffer_offset
1541 - offsetof(struct binder_buffer
, data
));
1544 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
1545 BUG_ON(buffer
->free
);
1547 if (kern_ptr
< buffer
)
1549 else if (kern_ptr
> buffer
)
1557 static int binder_update_page_range(struct binder_proc
*proc
, int allocate
,
1558 void *start
, void *end
,
1559 struct vm_area_struct
*vma
)
1562 unsigned long user_page_addr
;
1563 struct vm_struct tmp_area
;
1565 struct mm_struct
*mm
;
1567 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1568 "%d: %s pages %pK-%pK\n", proc
->pid
,
1569 allocate
? "allocate" : "free", start
, end
);
1574 trace_binder_update_page_range(proc
, allocate
, start
, end
);
1579 mm
= get_task_mm(proc
->tsk
);
1582 down_write(&mm
->mmap_sem
);
1584 if (vma
&& mm
!= proc
->vma_vm_mm
) {
1585 pr_err("%d: vma mm and task mm mismatch\n",
1595 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
1600 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
1602 struct page
**page_array_ptr
;
1603 page
= &proc
->pages
[(page_addr
- proc
->buffer
) / PAGE_SIZE
];
1606 *page
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
1607 if (*page
== NULL
) {
1608 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
1609 proc
->pid
, page_addr
);
1610 goto err_alloc_page_failed
;
1612 #ifdef MTK_BINDER_PAGE_USED_RECORD
1615 if(binder_page_used
> binder_page_used_peak
)
1616 binder_page_used_peak
= binder_page_used
;
1617 if (proc
->page_used
> proc
->page_used_peak
)
1618 proc
->page_used_peak
= proc
->page_used
;
1620 tmp_area
.addr
= page_addr
;
1621 tmp_area
.size
= PAGE_SIZE
+ PAGE_SIZE
/* guard page? */;
1622 page_array_ptr
= page
;
1623 ret
= map_vm_area(&tmp_area
, PAGE_KERNEL
, &page_array_ptr
);
1625 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
1626 proc
->pid
, page_addr
);
1627 goto err_map_kernel_failed
;
1630 (uintptr_t)page_addr
+ proc
->user_buffer_offset
;
1631 ret
= vm_insert_page(vma
, user_page_addr
, page
[0]);
1633 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
1634 proc
->pid
, user_page_addr
);
1635 goto err_vm_insert_page_failed
;
1637 /* vm_insert_page does not seem to increment the refcount */
1640 up_write(&mm
->mmap_sem
);
1646 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
1647 page_addr
-= PAGE_SIZE
) {
1648 page
= &proc
->pages
[(page_addr
- proc
->buffer
) / PAGE_SIZE
];
1650 zap_page_range(vma
, (uintptr_t)page_addr
+
1651 proc
->user_buffer_offset
, PAGE_SIZE
, NULL
);
1652 err_vm_insert_page_failed
:
1653 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
1654 err_map_kernel_failed
:
1657 #ifdef MTK_BINDER_PAGE_USED_RECORD
1658 if(binder_page_used
> 0)
1660 if (proc
->page_used
> 0)
1663 err_alloc_page_failed
:
1668 up_write(&mm
->mmap_sem
);
1674 static struct binder_buffer
*binder_alloc_buf(struct binder_proc
*proc
,
1676 size_t offsets_size
, int is_async
)
1678 struct rb_node
*n
= proc
->free_buffers
.rb_node
;
1679 struct binder_buffer
*buffer
;
1681 struct rb_node
*best_fit
= NULL
;
1682 void *has_page_addr
;
1683 void *end_page_addr
;
1685 #ifdef MTK_BINDER_DEBUG
1686 size_t proc_max_size
;
1688 if (proc
->vma
== NULL
) {
1689 pr_err("%d: binder_alloc_buf, no vma\n",
1694 size
= ALIGN(data_size
, sizeof(void *)) +
1695 ALIGN(offsets_size
, sizeof(void *));
1697 if (size
< data_size
|| size
< offsets_size
) {
1698 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
1699 proc
->pid
, data_size
, offsets_size
);
1703 #ifdef MTK_BINDER_DEBUG
1704 proc_max_size
= (is_async
? (proc
->buffer_size
/2) : proc
->buffer_size
);
1706 if(proc_max_size
< size
+ sizeof(struct binder_buffer
)){
1707 binder_user_error("%d: got transaction with too large size "
1708 "%s alloc size %zd-%zd allowed size %zd\n", proc
->pid
,
1709 is_async
? "async" : "sync", data_size
, offsets_size
,
1710 (proc_max_size
- sizeof(struct binder_buffer
)));
1715 proc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
1716 #ifdef MTK_BINDER_DEBUG
1717 pr_err("%d: binder_alloc_buf size %zd "
1718 "failed, no async space left (%zd)\n",
1719 proc
->pid
, size
, proc
->free_async_space
);
1721 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1722 "%d: binder_alloc_buf size %zd failed, no async space left\n",
1725 #ifdef BINDER_MONITOR
1726 binder_check_buf(proc
, size
, 1);
1732 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
1733 BUG_ON(!buffer
->free
);
1734 buffer_size
= binder_buffer_size(proc
, buffer
);
1736 if (size
< buffer_size
) {
1739 } else if (size
> buffer_size
)
1746 #ifdef BINDER_MONITOR
1747 if (log_disable
& BINDER_BUF_WARN
)
1751 pr_err("%d: binder_alloc_buf size %zd failed, UT auto triggerd!\n",
1753 binder_check_buf(proc
, size
, 0);
1757 if (best_fit
== NULL
) {
1758 pr_err("%d: binder_alloc_buf size %zd failed, "
1759 "no address space\n", proc
->pid
, size
);
1760 #ifdef BINDER_MONITOR
1761 binder_check_buf(proc
, size
, 0);
1766 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
1767 buffer_size
= binder_buffer_size(proc
, buffer
);
1770 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1771 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
1772 proc
->pid
, size
, buffer
, buffer_size
);
1775 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
1777 if (size
+ sizeof(struct binder_buffer
) + 4 >= buffer_size
)
1778 buffer_size
= size
; /* no room for other buffers */
1780 buffer_size
= size
+ sizeof(struct binder_buffer
);
1783 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ buffer_size
);
1784 if (end_page_addr
> has_page_addr
)
1785 end_page_addr
= has_page_addr
;
1786 if (binder_update_page_range(proc
, 1,
1787 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
, NULL
))
1790 rb_erase(best_fit
, &proc
->free_buffers
);
1792 binder_insert_allocated_buffer(proc
, buffer
);
1793 if (buffer_size
!= size
) {
1794 struct binder_buffer
*new_buffer
= (void *)buffer
->data
+ size
;
1795 list_add(&new_buffer
->entry
, &buffer
->entry
);
1796 new_buffer
->free
= 1;
1797 binder_insert_free_buffer(proc
, new_buffer
);
1799 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1800 "%d: binder_alloc_buf size %zd got %pK\n",
1801 proc
->pid
, size
, buffer
);
1802 buffer
->data_size
= data_size
;
1803 buffer
->offsets_size
= offsets_size
;
1804 buffer
->async_transaction
= is_async
;
1806 proc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
1807 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
1808 "%d: binder_alloc_buf size %zd async free %zd\n",
1809 proc
->pid
, size
, proc
->free_async_space
);
1815 static void *buffer_start_page(struct binder_buffer
*buffer
)
1817 return (void *)((uintptr_t)buffer
& PAGE_MASK
);
1820 static void *buffer_end_page(struct binder_buffer
*buffer
)
1822 return (void *)(((uintptr_t)(buffer
+ 1) - 1) & PAGE_MASK
);
1825 static void binder_delete_free_buffer(struct binder_proc
*proc
,
1826 struct binder_buffer
*buffer
)
1828 struct binder_buffer
*prev
, *next
= NULL
;
1829 int free_page_end
= 1;
1830 int free_page_start
= 1;
1832 BUG_ON(proc
->buffers
.next
== &buffer
->entry
);
1833 prev
= list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
1834 BUG_ON(!prev
->free
);
1835 if (buffer_end_page(prev
) == buffer_start_page(buffer
)) {
1836 free_page_start
= 0;
1837 if (buffer_end_page(prev
) == buffer_end_page(buffer
))
1839 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1840 "%d: merge free, buffer %pK share page with %pK\n",
1841 proc
->pid
, buffer
, prev
);
1844 if (!list_is_last(&buffer
->entry
, &proc
->buffers
)) {
1845 next
= list_entry(buffer
->entry
.next
,
1846 struct binder_buffer
, entry
);
1847 if (buffer_start_page(next
) == buffer_end_page(buffer
)) {
1849 if (buffer_start_page(next
) ==
1850 buffer_start_page(buffer
))
1851 free_page_start
= 0;
1852 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1853 "%d: merge free, buffer %pK share page with %pK\n",
1854 proc
->pid
, buffer
, prev
);
1857 list_del(&buffer
->entry
);
1858 if (free_page_start
|| free_page_end
) {
1859 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1860 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
1861 proc
->pid
, buffer
, free_page_start
? "" : " end",
1862 free_page_end
? "" : " start", prev
, next
);
1863 binder_update_page_range(proc
, 0, free_page_start
?
1864 buffer_start_page(buffer
) : buffer_end_page(buffer
),
1865 (free_page_end
? buffer_end_page(buffer
) :
1866 buffer_start_page(buffer
)) + PAGE_SIZE
, NULL
);
1870 static void binder_free_buf(struct binder_proc
*proc
,
1871 struct binder_buffer
*buffer
)
1873 size_t size
, buffer_size
;
1875 buffer_size
= binder_buffer_size(proc
, buffer
);
1877 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
1878 ALIGN(buffer
->offsets_size
, sizeof(void *));
1880 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
1881 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
1882 proc
->pid
, buffer
, size
, buffer_size
);
1884 BUG_ON(buffer
->free
);
1885 BUG_ON(size
> buffer_size
);
1886 BUG_ON(buffer
->transaction
!= NULL
);
1887 BUG_ON((void *)buffer
< proc
->buffer
);
1888 BUG_ON((void *)buffer
> proc
->buffer
+ proc
->buffer_size
);
1889 #ifdef BINDER_MONITOR
1890 buffer
->log_entry
= NULL
;
1893 if (buffer
->async_transaction
) {
1894 proc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
1896 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
1897 "%d: binder_free_buf size %zd async free %zd\n",
1898 proc
->pid
, size
, proc
->free_async_space
);
1901 binder_update_page_range(proc
, 0,
1902 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
1903 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
),
1905 rb_erase(&buffer
->rb_node
, &proc
->allocated_buffers
);
1907 if (!list_is_last(&buffer
->entry
, &proc
->buffers
)) {
1908 struct binder_buffer
*next
= list_entry(buffer
->entry
.next
,
1909 struct binder_buffer
, entry
);
1911 rb_erase(&next
->rb_node
, &proc
->free_buffers
);
1912 binder_delete_free_buffer(proc
, next
);
1915 if (proc
->buffers
.next
!= &buffer
->entry
) {
1916 struct binder_buffer
*prev
= list_entry(buffer
->entry
.prev
,
1917 struct binder_buffer
, entry
);
1919 binder_delete_free_buffer(proc
, buffer
);
1920 rb_erase(&prev
->rb_node
, &proc
->free_buffers
);
1924 binder_insert_free_buffer(proc
, buffer
);
1927 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1928 binder_uintptr_t ptr
)
1930 struct rb_node
*n
= proc
->nodes
.rb_node
;
1931 struct binder_node
*node
;
1934 node
= rb_entry(n
, struct binder_node
, rb_node
);
1936 if (ptr
< node
->ptr
)
1938 else if (ptr
> node
->ptr
)
1946 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1947 binder_uintptr_t ptr
,
1948 binder_uintptr_t cookie
)
1950 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1951 struct rb_node
*parent
= NULL
;
1952 struct binder_node
*node
;
1956 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1958 if (ptr
< node
->ptr
)
1960 else if (ptr
> node
->ptr
)
1961 p
= &(*p
)->rb_right
;
1966 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1969 binder_stats_created(BINDER_STAT_NODE
);
1970 rb_link_node(&node
->rb_node
, parent
, p
);
1971 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1972 node
->debug_id
= ++binder_last_id
;
1975 node
->cookie
= cookie
;
1976 node
->work
.type
= BINDER_WORK_NODE
;
1977 INIT_LIST_HEAD(&node
->work
.entry
);
1978 INIT_LIST_HEAD(&node
->async_todo
);
1979 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1980 "%d:%d node %d u%016llx c%016llx created\n",
1981 proc
->pid
, current
->pid
, node
->debug_id
,
1982 (u64
)node
->ptr
, (u64
)node
->cookie
);
1986 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1987 struct list_head
*target_list
)
1991 if (target_list
== NULL
&&
1992 node
->internal_strong_refs
== 0 &&
1993 !(node
== binder_context_mgr_node
&&
1994 node
->has_strong_ref
)) {
1995 pr_err("invalid inc strong node for %d\n",
1999 node
->internal_strong_refs
++;
2001 node
->local_strong_refs
++;
2002 if (!node
->has_strong_ref
&& target_list
) {
2003 list_del_init(&node
->work
.entry
);
2004 list_add_tail(&node
->work
.entry
, target_list
);
2008 node
->local_weak_refs
++;
2009 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
2010 if (target_list
== NULL
) {
2011 pr_err("invalid inc weak node for %d\n",
2015 list_add_tail(&node
->work
.entry
, target_list
);
2021 static int binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
2025 node
->internal_strong_refs
--;
2027 node
->local_strong_refs
--;
2028 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
2032 node
->local_weak_refs
--;
2033 if (node
->local_weak_refs
|| !hlist_empty(&node
->refs
))
2036 if (node
->proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
2037 if (list_empty(&node
->work
.entry
)) {
2038 list_add_tail(&node
->work
.entry
, &node
->proc
->todo
);
2039 wake_up_interruptible(&node
->proc
->wait
);
2042 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
2043 !node
->local_weak_refs
) {
2044 list_del_init(&node
->work
.entry
);
2046 rb_erase(&node
->rb_node
, &node
->proc
->nodes
);
2047 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2048 "refless node %d deleted\n",
2051 hlist_del(&node
->dead_node
);
2052 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2053 "dead node %d deleted\n",
2057 binder_stats_deleted(BINDER_STAT_NODE
);
2065 static struct binder_ref
*binder_get_ref(struct binder_proc
*proc
,
2066 uint32_t desc
, bool need_strong_ref
)
2068 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
2069 struct binder_ref
*ref
;
2072 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
2074 if (desc
< ref
->desc
) {
2076 } else if (desc
> ref
->desc
) {
2078 } else if (need_strong_ref
&& !ref
->strong
) {
2079 binder_user_error("tried to use weak ref as strong ref\n");
2088 static struct binder_ref
*binder_get_ref_for_node(struct binder_proc
*proc
,
2089 struct binder_node
*node
)
2092 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
2093 struct rb_node
*parent
= NULL
;
2094 struct binder_ref
*ref
, *new_ref
;
2098 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
2100 if (node
< ref
->node
)
2102 else if (node
> ref
->node
)
2103 p
= &(*p
)->rb_right
;
2107 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
2108 if (new_ref
== NULL
)
2110 binder_stats_created(BINDER_STAT_REF
);
2111 new_ref
->debug_id
= ++binder_last_id
;
2112 new_ref
->proc
= proc
;
2113 new_ref
->node
= node
;
2114 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
2115 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
2117 new_ref
->desc
= (node
== binder_context_mgr_node
) ? 0 : 1;
2118 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
2119 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
2120 if (ref
->desc
> new_ref
->desc
)
2122 new_ref
->desc
= ref
->desc
+ 1;
2125 p
= &proc
->refs_by_desc
.rb_node
;
2128 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
2130 if (new_ref
->desc
< ref
->desc
)
2132 else if (new_ref
->desc
> ref
->desc
)
2133 p
= &(*p
)->rb_right
;
2137 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
2138 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
2140 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
2142 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2143 "%d new ref %d desc %d for node %d\n",
2144 proc
->pid
, new_ref
->debug_id
, new_ref
->desc
,
2147 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2148 "%d new ref %d desc %d for dead node\n",
2149 proc
->pid
, new_ref
->debug_id
, new_ref
->desc
);
2154 static void binder_delete_ref(struct binder_ref
*ref
)
2156 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2157 "%d delete ref %d desc %d for node %d\n",
2158 ref
->proc
->pid
, ref
->debug_id
, ref
->desc
,
2159 ref
->node
->debug_id
);
2161 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
2162 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
2164 binder_dec_node(ref
->node
, 1, 1);
2165 hlist_del(&ref
->node_entry
);
2166 binder_dec_node(ref
->node
, 0, 1);
2168 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2169 "%d delete ref %d desc %d has death notification\n",
2170 ref
->proc
->pid
, ref
->debug_id
, ref
->desc
);
2171 list_del(&ref
->death
->work
.entry
);
2173 binder_stats_deleted(BINDER_STAT_DEATH
);
2176 binder_stats_deleted(BINDER_STAT_REF
);
2179 static int binder_inc_ref(struct binder_ref
*ref
, int strong
,
2180 struct list_head
*target_list
)
2184 if (ref
->strong
== 0) {
2185 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
2191 if (ref
->weak
== 0) {
2192 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
2202 static int binder_dec_ref(struct binder_ref
*ref
, int strong
)
2205 if (ref
->strong
== 0) {
2206 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
2207 ref
->proc
->pid
, ref
->debug_id
,
2208 ref
->desc
, ref
->strong
, ref
->weak
);
2212 if (ref
->strong
== 0) {
2214 ret
= binder_dec_node(ref
->node
, strong
, 1);
2219 if (ref
->weak
== 0) {
2220 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
2221 ref
->proc
->pid
, ref
->debug_id
,
2222 ref
->desc
, ref
->strong
, ref
->weak
);
2227 if (ref
->strong
== 0 && ref
->weak
== 0)
2228 binder_delete_ref(ref
);
2232 static void binder_pop_transaction(struct binder_thread
*target_thread
,
2233 struct binder_transaction
*t
)
2235 if (target_thread
) {
2236 BUG_ON(target_thread
->transaction_stack
!= t
);
2237 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
2238 target_thread
->transaction_stack
=
2239 target_thread
->transaction_stack
->from_parent
;
2244 t
->buffer
->transaction
= NULL
;
2245 #ifdef BINDER_MONITOR
2246 binder_cancel_bwdog(t
);
2249 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2252 static void binder_send_failed_reply(struct binder_transaction
*t
,
2253 uint32_t error_code
)
2255 struct binder_thread
*target_thread
;
2256 BUG_ON(t
->flags
& TF_ONE_WAY
);
2258 target_thread
= t
->from
;
2259 if (target_thread
) {
2260 if (target_thread
->return_error
!= BR_OK
&&
2261 target_thread
->return_error2
== BR_OK
) {
2262 target_thread
->return_error2
=
2263 target_thread
->return_error
;
2264 target_thread
->return_error
= BR_OK
;
2266 if (target_thread
->return_error
== BR_OK
) {
2267 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2268 "send failed reply for transaction %d to %d:%d\n",
2269 t
->debug_id
, target_thread
->proc
->pid
,
2270 target_thread
->pid
);
2272 binder_pop_transaction(target_thread
, t
);
2273 target_thread
->return_error
= error_code
;
2274 wake_up_interruptible(&target_thread
->wait
);
2276 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
2277 target_thread
->proc
->pid
,
2279 target_thread
->return_error
);
2283 struct binder_transaction
*next
= t
->from_parent
;
2285 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2286 "send failed reply for transaction %d, target dead\n",
2289 binder_pop_transaction(target_thread
, t
);
2291 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2292 "reply failed, no target thread at root\n");
2296 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2297 "reply failed, no target thread -- retry %d\n",
2303 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2304 struct binder_buffer
*buffer
,
2305 binder_size_t
*failed_at
)
2307 binder_size_t
*offp
, *off_end
;
2308 int debug_id
= buffer
->debug_id
;
2310 binder_debug(BINDER_DEBUG_TRANSACTION
,
2311 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2312 proc
->pid
, buffer
->debug_id
,
2313 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2315 if (buffer
->target_node
)
2316 binder_dec_node(buffer
->target_node
, 1, 0);
2318 offp
= (binder_size_t
*)(buffer
->data
+
2319 ALIGN(buffer
->data_size
, sizeof(void *)));
2321 off_end
= failed_at
;
2323 off_end
= (void *)offp
+ buffer
->offsets_size
;
2324 for (; offp
< off_end
; offp
++) {
2325 struct flat_binder_object
*fp
;
2326 if (*offp
> buffer
->data_size
- sizeof(*fp
) ||
2327 buffer
->data_size
< sizeof(*fp
) ||
2328 !IS_ALIGNED(*offp
, sizeof(u32
))) {
2329 pr_err("transaction release %d bad offset %lld, size %zd\n",
2330 debug_id
, (u64
)*offp
, buffer
->data_size
);
2333 fp
= (struct flat_binder_object
*)(buffer
->data
+ *offp
);
2335 case BINDER_TYPE_BINDER
:
2336 case BINDER_TYPE_WEAK_BINDER
: {
2337 struct binder_node
*node
= binder_get_node(proc
, fp
->binder
);
2339 pr_err("transaction release %d bad node %016llx\n",
2340 debug_id
, (u64
)fp
->binder
);
2343 binder_debug(BINDER_DEBUG_TRANSACTION
,
2344 " node %d u%016llx\n",
2345 node
->debug_id
, (u64
)node
->ptr
);
2346 binder_dec_node(node
, fp
->type
== BINDER_TYPE_BINDER
, 0);
2348 case BINDER_TYPE_HANDLE
:
2349 case BINDER_TYPE_WEAK_HANDLE
: {
2350 struct binder_ref
*ref
= binder_get_ref(proc
, fp
->handle
,
2351 fp
->type
== BINDER_TYPE_HANDLE
);
2353 pr_err("transaction release %d bad handle %d\n",
2354 debug_id
, fp
->handle
);
2357 binder_debug(BINDER_DEBUG_TRANSACTION
,
2358 " ref %d desc %d (node %d)\n",
2359 ref
->debug_id
, ref
->desc
, ref
->node
->debug_id
);
2360 binder_dec_ref(ref
, fp
->type
== BINDER_TYPE_HANDLE
);
2363 case BINDER_TYPE_FD
:
2364 binder_debug(BINDER_DEBUG_TRANSACTION
,
2365 " fd %d\n", fp
->handle
);
2367 task_close_fd(proc
, fp
->handle
);
2371 pr_err("transaction release %d bad object type %x\n",
2372 debug_id
, fp
->type
);
2378 #ifdef RT_PRIO_INHERIT
2379 static void mt_sched_setscheduler_nocheck(struct task_struct
*p
, int policy
, struct sched_param
*param
)
2382 if (policy
== SCHED_FIFO
|| policy
== SCHED_RR
)
2383 param
->sched_priority
|= MT_ALLOW_RT_PRIO_BIT
;
2385 #ifdef CONFIG_MT_PRIO_TRACER
2386 ret
= sched_setscheduler_nocheck_binder(p
, policy
, param
);
2388 ret
= sched_setscheduler_nocheck(p
, policy
, param
);
2391 pr_err("set scheduler fail, error code: %d\n", ret
);
2395 #ifdef BINDER_MONITOR
2396 /* binder_update_transaction_time - update read/exec done time for transaction
2398 ** 0: start // not used
2402 static void binder_update_transaction_time(struct binder_transaction_log
*t_log
,
2403 struct binder_transaction
*bt
, int step
)
2405 if (step
< 1 || step
> 2) {
2406 pr_err("update trans time fail, wrong step value for id %d\n",
2411 if ((NULL
== bt
) || (bt
->log_idx
== -1) || (bt
->log_idx
> (t_log
->size
- 1)))
2413 if (t_log
->entry
[bt
->log_idx
].debug_id
== bt
->debug_id
) {
2415 do_posix_clock_monotonic_gettime(&t_log
->entry
[bt
->log_idx
].readstamp
);
2417 do_posix_clock_monotonic_gettime(&t_log
->entry
[bt
->log_idx
].endstamp
);
2420 /* binder_update_transaction_tid - update to thread pid transaction
2422 static void binder_update_transaction_ttid(struct binder_transaction_log
*t_log
,
2423 struct binder_transaction
*bt
)
2425 if ((NULL
== bt
) || (NULL
== t_log
))
2427 if ((bt
->log_idx
== -1) || (bt
->log_idx
> (t_log
->size
- 1)))
2431 if ((t_log
->entry
[bt
->log_idx
].debug_id
== bt
->debug_id
) &&
2432 (t_log
->entry
[bt
->log_idx
].to_thread
== 0)){
2433 t_log
->entry
[bt
->log_idx
].to_thread
= bt
->tthrd
;
2439 static void binder_transaction(struct binder_proc
*proc
,
2440 struct binder_thread
*thread
,
2441 struct binder_transaction_data
*tr
, int reply
)
2443 struct binder_transaction
*t
;
2444 struct binder_work
*tcomplete
;
2445 binder_size_t
*offp
, *off_end
;
2446 binder_size_t off_min
;
2447 struct binder_proc
*target_proc
;
2448 struct binder_thread
*target_thread
= NULL
;
2449 struct binder_node
*target_node
= NULL
;
2450 struct list_head
*target_list
;
2451 wait_queue_head_t
*target_wait
;
2452 struct binder_transaction
*in_reply_to
= NULL
;
2453 struct binder_transaction_log_entry
*e
;
2454 uint32_t return_error
;
2456 #ifdef BINDER_MONITOR
2457 struct binder_transaction_log_entry log_entry
;
2458 unsigned int log_idx
= -1;
2460 if ((reply
&& (tr
->data_size
< (proc
->buffer_size
/16))) || log_disable
)
2464 e
= binder_transaction_log_add(&binder_transaction_log
);
2465 if (binder_transaction_log
.next
)
2466 log_idx
= binder_transaction_log
.next
- 1;
2468 log_idx
= binder_transaction_log
.size
- 1;
2471 e
= binder_transaction_log_add(&binder_transaction_log
);
2473 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2474 e
->from_proc
= proc
->pid
;
2475 e
->from_thread
= thread
->pid
;
2476 e
->target_handle
= tr
->target
.handle
;
2477 e
->data_size
= tr
->data_size
;
2478 e
->offsets_size
= tr
->offsets_size
;
2479 #ifdef BINDER_MONITOR
2481 /* fd 0 is also valid... set initial value to -1 */
2483 do_posix_clock_monotonic_gettime(&e
->timestamp
);
2484 //monotonic_to_bootbased(&e->timestamp);
2486 do_gettimeofday(&e
->tv
);
2487 /* consider time zone. translate to android time */
2488 e
->tv
.tv_sec
-= (sys_tz
.tz_minuteswest
* 60);
2492 in_reply_to
= thread
->transaction_stack
;
2493 if (in_reply_to
== NULL
) {
2494 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2495 proc
->pid
, thread
->pid
);
2496 return_error
= BR_FAILED_REPLY
;
2497 goto err_empty_call_stack
;
2499 #ifdef BINDER_MONITOR
2500 binder_cancel_bwdog(in_reply_to
);
2502 binder_set_nice(in_reply_to
->saved_priority
);
2503 #ifdef RT_PRIO_INHERIT
2504 if (rt_task(current
) && (MAX_RT_PRIO
!= in_reply_to
->saved_rt_prio
) &&
2505 !(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
2506 BINDER_LOOPER_STATE_ENTERED
))) {
2507 struct sched_param param
= {
2508 .sched_priority
= in_reply_to
->saved_rt_prio
,
2510 mt_sched_setscheduler_nocheck(current
,
2511 in_reply_to
->saved_policy
, ¶m
);
2512 #ifdef BINDER_MONITOR
2513 if (log_disable
& BINDER_RT_LOG_ENABLE
)
2515 pr_debug("reply reset %d sched_policy from %d to %d rt_prio from %d to %d\n",
2516 proc
->pid
, in_reply_to
->policy
, in_reply_to
->saved_policy
,
2517 in_reply_to
->rt_prio
, in_reply_to
->saved_rt_prio
);
2522 if (in_reply_to
->to_thread
!= thread
) {
2523 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2524 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2525 in_reply_to
->to_proc
?
2526 in_reply_to
->to_proc
->pid
: 0,
2527 in_reply_to
->to_thread
?
2528 in_reply_to
->to_thread
->pid
: 0);
2529 return_error
= BR_FAILED_REPLY
;
2531 goto err_bad_call_stack
;
2533 thread
->transaction_stack
= in_reply_to
->to_parent
;
2534 target_thread
= in_reply_to
->from
;
2535 if (target_thread
== NULL
) {
2536 #ifdef MTK_BINDER_DEBUG
2537 binder_user_error("%d:%d got reply transaction "
2538 "with bad transaction reply_from, "
2539 "transaction %d has target %d:%d\n",
2540 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2541 in_reply_to
->to_proc
?
2542 in_reply_to
->to_proc
->pid
: 0,
2543 in_reply_to
->to_thread
?
2544 in_reply_to
->to_thread
->pid
: 0);
2546 return_error
= BR_DEAD_REPLY
;
2547 goto err_dead_binder
;
2549 if (target_thread
->transaction_stack
!= in_reply_to
) {
2550 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2551 proc
->pid
, thread
->pid
,
2552 target_thread
->transaction_stack
?
2553 target_thread
->transaction_stack
->debug_id
: 0,
2554 in_reply_to
->debug_id
);
2555 return_error
= BR_FAILED_REPLY
;
2557 target_thread
= NULL
;
2558 goto err_dead_binder
;
2560 target_proc
= target_thread
->proc
;
2561 #ifdef BINDER_MONITOR
2562 e
->service
[0] = '\0';
2565 if (tr
->target
.handle
) {
2566 struct binder_ref
*ref
;
2567 ref
= binder_get_ref(proc
, tr
->target
.handle
, true);
2569 binder_user_error("%d:%d got transaction to invalid handle\n",
2570 proc
->pid
, thread
->pid
);
2571 return_error
= BR_FAILED_REPLY
;
2572 goto err_invalid_target_handle
;
2574 target_node
= ref
->node
;
2576 target_node
= binder_context_mgr_node
;
2577 if (target_node
== NULL
) {
2578 #ifdef MTK_BINDER_DEBUG
2579 binder_user_error("%d:%d "
2580 "binder_context_mgr_node is NULL\n",
2581 proc
->pid
, thread
->pid
);
2583 return_error
= BR_DEAD_REPLY
;
2584 goto err_no_context_mgr_node
;
2587 e
->to_node
= target_node
->debug_id
;
2588 #ifdef BINDER_MONITOR
2589 strcpy(e
->service
, target_node
->name
);
2591 target_proc
= target_node
->proc
;
2592 if (target_proc
== NULL
) {
2593 #ifdef MTK_BINDER_DEBUG
2594 binder_user_error("%d:%d target_proc is NULL\n",
2595 proc
->pid
, thread
->pid
);
2597 return_error
= BR_DEAD_REPLY
;
2598 goto err_dead_binder
;
2600 if (security_binder_transaction(proc
->tsk
, target_proc
->tsk
) < 0) {
2601 return_error
= BR_FAILED_REPLY
;
2602 goto err_invalid_target_handle
;
2604 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2605 struct binder_transaction
*tmp
;
2606 tmp
= thread
->transaction_stack
;
2607 if (tmp
->to_thread
!= thread
) {
2608 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2609 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2610 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2612 tmp
->to_thread
->pid
: 0);
2613 return_error
= BR_FAILED_REPLY
;
2614 goto err_bad_call_stack
;
2617 if (tmp
->from
&& tmp
->from
->proc
== target_proc
)
2618 target_thread
= tmp
->from
;
2619 tmp
= tmp
->from_parent
;
2623 if (target_thread
) {
2624 e
->to_thread
= target_thread
->pid
;
2625 target_list
= &target_thread
->todo
;
2626 target_wait
= &target_thread
->wait
;
2628 target_list
= &target_proc
->todo
;
2629 target_wait
= &target_proc
->wait
;
2631 e
->to_proc
= target_proc
->pid
;
2633 /* TODO: reuse incoming transaction for reply */
2634 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2636 #ifdef MTK_BINDER_DEBUG
2637 binder_user_error("%d:%d transaction allocation failed\n",
2638 proc
->pid
, thread
->pid
);
2640 return_error
= BR_FAILED_REPLY
;
2641 goto err_alloc_t_failed
;
2643 #ifdef BINDER_MONITOR
2644 memcpy(&t
->timestamp
, &e
->timestamp
, sizeof(struct timespec
));
2645 //do_gettimeofday(&t->tv);
2646 /* consider time zone. translate to android time */
2647 //t->tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
2648 memcpy(&t
->tv
, &e
->tv
, sizeof(struct timeval
));
2650 strcpy(t
->service
, target_node
->name
);
2652 binder_stats_created(BINDER_STAT_TRANSACTION
);
2654 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2655 if (tcomplete
== NULL
) {
2656 #ifdef MTK_BINDER_DEBUG
2657 binder_user_error("%d:%d tcomplete allocation failed\n",
2658 proc
->pid
, thread
->pid
);
2660 return_error
= BR_FAILED_REPLY
;
2661 goto err_alloc_tcomplete_failed
;
2663 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2665 t
->debug_id
= ++binder_last_id
;
2666 e
->debug_id
= t
->debug_id
;
2668 #ifdef BINDER_PERF_EVAL
2669 if (!reply
&& (binder_perf_evalue
& BINDER_PERF_SEND_COUNTER
))
2674 for (i
= 0; i
< BC_STATS_NR
; i
++)
2676 if (proc
->bc_stats
[i
] == NULL
)
2677 proc
->bc_stats
[i
] = kzalloc(sizeof(struct binder_bc_stats
), GFP_KERNEL
);
2678 if (proc
->bc_stats
[i
] == NULL
) {
2679 pr_err("perf_e kzalloc fail for proc %d bc_stats[%d]\n", proc
->pid
, i
);
2683 if(!strcmp(proc
->bc_stats
[i
]->service
, "") &&
2684 (0 == proc
->bc_stats
[i
]->code
[0]))
2686 strcpy(proc
->bc_stats
[i
]->service
, e
->service
);
2689 else if (!strcmp(proc
->bc_stats
[i
]->service
, e
->service
))
2694 if (BC_STATS_NR
== i
){
2695 pr_err("perf_e bc_Stats array size"
2696 " is not enough\n");
2700 for (j
= 0; j
< BC_CODE_NR
; j
++)
2702 if (0 == proc
->bc_stats
[i
]->code
[j
])
2704 proc
->bc_stats
[i
]->code
[j
] = e
->code
;
2705 proc
->bc_stats
[i
]->code_num
[j
]++;
2708 else if (proc
->bc_stats
[i
]->code
[j
] == e
->code
)
2710 proc
->bc_stats
[i
]->code_num
[j
]++;
2716 if (BC_CODE_NR
== j
) {
2717 pr_err("perf_e bc_code array size"
2718 " is not enough\n");
2722 pr_err("perf_e update proc %d bc_stats error %d\n", proc
->pid
, err_code
);
2726 binder_debug(BINDER_DEBUG_TRANSACTION
,
2727 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
2728 proc
->pid
, thread
->pid
, t
->debug_id
,
2729 target_proc
->pid
, target_thread
->pid
,
2730 (u64
)tr
->data
.ptr
.buffer
,
2731 (u64
)tr
->data
.ptr
.offsets
,
2732 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
);
2734 binder_debug(BINDER_DEBUG_TRANSACTION
,
2735 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
2736 proc
->pid
, thread
->pid
, t
->debug_id
,
2737 target_proc
->pid
, target_node
->debug_id
,
2738 (u64
)tr
->data
.ptr
.buffer
,
2739 (u64
)tr
->data
.ptr
.offsets
,
2740 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
);
2742 #ifdef BINDER_MONITOR
2743 t
->fproc
= proc
->pid
;
2744 t
->fthrd
= thread
->pid
;
2745 t
->tproc
= target_proc
->pid
;
2746 t
->tthrd
= target_thread
? target_thread
->pid
: 0;
2747 t
->log_idx
= log_idx
;
2749 if (!binder_check_buf_checked())
2751 binder_check_buf_pid
= proc
->pid
;
2752 binder_check_buf_tid
= thread
->pid
;
2755 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
2759 t
->sender_euid
= proc
->tsk
->cred
->euid
;
2760 t
->to_proc
= target_proc
;
2761 t
->to_thread
= target_thread
;
2763 t
->flags
= tr
->flags
;
2764 t
->priority
= task_nice(current
);
2765 #ifdef RT_PRIO_INHERIT
2766 t
->rt_prio
= current
->rt_priority
;
2767 t
->policy
= current
->policy
;
2768 t
->saved_rt_prio
= MAX_RT_PRIO
;
2771 trace_binder_transaction(reply
, t
, target_node
);
2773 t
->buffer
= binder_alloc_buf(target_proc
, tr
->data_size
,
2774 tr
->offsets_size
, !reply
&& (t
->flags
& TF_ONE_WAY
));
2775 if (t
->buffer
== NULL
) {
2776 #ifdef MTK_BINDER_DEBUG
2777 binder_user_error("%d:%d buffer allocation failed "
2778 "on %d:0\n", proc
->pid
, thread
->pid
, target_proc
->pid
);
2780 return_error
= BR_FAILED_REPLY
;
2781 goto err_binder_alloc_buf_failed
;
2783 t
->buffer
->allow_user_free
= 0;
2784 t
->buffer
->debug_id
= t
->debug_id
;
2785 t
->buffer
->transaction
= t
;
2786 #ifdef BINDER_MONITOR
2787 t
->buffer
->log_entry
= e
;
2789 t
->buffer
->target_node
= target_node
;
2790 trace_binder_transaction_alloc_buf(t
->buffer
);
2792 binder_inc_node(target_node
, 1, 0, NULL
);
2794 offp
= (binder_size_t
*)(t
->buffer
->data
+
2795 ALIGN(tr
->data_size
, sizeof(void *)));
2797 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
2798 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
2799 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2800 proc
->pid
, thread
->pid
);
2801 return_error
= BR_FAILED_REPLY
;
2802 goto err_copy_data_failed
;
2804 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
2805 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
2806 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2807 proc
->pid
, thread
->pid
);
2808 return_error
= BR_FAILED_REPLY
;
2809 goto err_copy_data_failed
;
2811 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
2812 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2813 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
2814 return_error
= BR_FAILED_REPLY
;
2815 goto err_bad_offset
;
2817 off_end
= (void *)offp
+ tr
->offsets_size
;
2819 for (; offp
< off_end
; offp
++) {
2820 struct flat_binder_object
*fp
;
2821 if (*offp
> t
->buffer
->data_size
- sizeof(*fp
) ||
2823 t
->buffer
->data_size
< sizeof(*fp
) ||
2824 !IS_ALIGNED(*offp
, sizeof(u32
))) {
2825 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
2826 proc
->pid
, thread
->pid
, (u64
)*offp
,
2828 (u64
)(t
->buffer
->data_size
-
2830 return_error
= BR_FAILED_REPLY
;
2831 goto err_bad_offset
;
2833 fp
= (struct flat_binder_object
*)(t
->buffer
->data
+ *offp
);
2834 off_min
= *offp
+ sizeof(struct flat_binder_object
);
2836 case BINDER_TYPE_BINDER
:
2837 case BINDER_TYPE_WEAK_BINDER
: {
2838 struct binder_ref
*ref
;
2839 struct binder_node
*node
= binder_get_node(proc
, fp
->binder
);
2841 node
= binder_new_node(proc
, fp
->binder
, fp
->cookie
);
2843 #ifdef MTK_BINDER_DEBUG
2844 binder_user_error("%d:%d create new node failed\n",
2845 proc
->pid
, thread
->pid
);
2847 return_error
= BR_FAILED_REPLY
;
2848 goto err_binder_new_node_failed
;
2850 node
->min_priority
= fp
->flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
2851 node
->accept_fds
= !!(fp
->flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
2852 #ifdef BINDER_MONITOR
2854 unsigned int i
, len
= 0;
2856 /* this is an addService() transaction identified by:
2857 * fp->type == BINDER_TYPE_BINDER && tr->target.handle == 0
2859 if (tr
->target
.handle
== 0) {
2860 /* hack into addService() payload:
2861 * service name string is located at MAGIC_SERVICE_NAME_OFFSET,
2862 * and interleaved with character '\0'.
2863 * for example, 'p', '\0', 'h', '\0', 'o', '\0', 'n', '\0', 'e'
2865 for (i
= 0; (2 * i
) < tr
->data_size
; i
++) {
2866 if ((2 * i
) < MAGIC_SERVICE_NAME_OFFSET
)
2868 /* prevent array index overflow */
2869 if (len
>= (MAX_SERVICE_NAME_LEN
- 1))
2871 tmp
= (char *)(uintptr_t)(tr
->data
.ptr
.buffer
+ (2 * i
));
2872 len
+= sprintf((node
->name
) + len
, "%c", *tmp
);
2874 node
->name
[len
] = '\0';
2876 node
->name
[0] = '\0';
2878 /* via addService of activity service, identify
2879 * system_server's process id.
2881 if (!strcmp(node
->name
, "activity")) {
2882 system_server_pid
= proc
->pid
;
2883 pr_debug("system_server %d\n", system_server_pid
);
2888 if (fp
->cookie
!= node
->cookie
) {
2889 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2890 proc
->pid
, thread
->pid
,
2891 (u64
)fp
->binder
, node
->debug_id
,
2892 (u64
)fp
->cookie
, (u64
)node
->cookie
);
2893 goto err_binder_get_ref_for_node_failed
;
2895 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2896 return_error
= BR_FAILED_REPLY
;
2897 goto err_binder_get_ref_for_node_failed
;
2899 ref
= binder_get_ref_for_node(target_proc
, node
);
2901 #ifdef MTK_BINDER_DEBUG
2902 binder_user_error("%d:%d get binder ref failed\n",
2903 proc
->pid
, thread
->pid
);
2905 return_error
= BR_FAILED_REPLY
;
2906 goto err_binder_get_ref_for_node_failed
;
2908 if (fp
->type
== BINDER_TYPE_BINDER
)
2909 fp
->type
= BINDER_TYPE_HANDLE
;
2911 fp
->type
= BINDER_TYPE_WEAK_HANDLE
;
2913 fp
->handle
= ref
->desc
;
2915 binder_inc_ref(ref
, fp
->type
== BINDER_TYPE_HANDLE
,
2918 trace_binder_transaction_node_to_ref(t
, node
, ref
);
2919 binder_debug(BINDER_DEBUG_TRANSACTION
,
2920 " node %d u%016llx -> ref %d desc %d\n",
2921 node
->debug_id
, (u64
)node
->ptr
,
2922 ref
->debug_id
, ref
->desc
);
2924 case BINDER_TYPE_HANDLE
:
2925 case BINDER_TYPE_WEAK_HANDLE
: {
2926 struct binder_ref
*ref
= binder_get_ref(proc
, fp
->handle
,
2927 fp
->type
== BINDER_TYPE_HANDLE
);
2929 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2931 thread
->pid
, fp
->handle
);
2932 return_error
= BR_FAILED_REPLY
;
2933 goto err_binder_get_ref_failed
;
2935 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2936 return_error
= BR_FAILED_REPLY
;
2937 goto err_binder_get_ref_failed
;
2939 if (ref
->node
->proc
== target_proc
) {
2940 if (fp
->type
== BINDER_TYPE_HANDLE
)
2941 fp
->type
= BINDER_TYPE_BINDER
;
2943 fp
->type
= BINDER_TYPE_WEAK_BINDER
;
2944 fp
->binder
= ref
->node
->ptr
;
2945 fp
->cookie
= ref
->node
->cookie
;
2946 binder_inc_node(ref
->node
, fp
->type
== BINDER_TYPE_BINDER
, 0, NULL
);
2947 trace_binder_transaction_ref_to_node(t
, ref
);
2948 binder_debug(BINDER_DEBUG_TRANSACTION
,
2949 " ref %d desc %d -> node %d u%016llx\n",
2950 ref
->debug_id
, ref
->desc
, ref
->node
->debug_id
,
2951 (u64
)ref
->node
->ptr
);
2953 struct binder_ref
*new_ref
;
2954 new_ref
= binder_get_ref_for_node(target_proc
, ref
->node
);
2955 if (new_ref
== NULL
) {
2956 #ifdef MTK_BINDER_DEBUG
2957 binder_user_error("%d:%d get new binder ref failed\n",
2958 proc
->pid
, thread
->pid
);
2960 return_error
= BR_FAILED_REPLY
;
2961 goto err_binder_get_ref_for_node_failed
;
2964 fp
->handle
= new_ref
->desc
;
2966 binder_inc_ref(new_ref
, fp
->type
== BINDER_TYPE_HANDLE
, NULL
);
2967 trace_binder_transaction_ref_to_ref(t
, ref
,
2969 binder_debug(BINDER_DEBUG_TRANSACTION
,
2970 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2971 ref
->debug_id
, ref
->desc
, new_ref
->debug_id
,
2972 new_ref
->desc
, ref
->node
->debug_id
);
2976 case BINDER_TYPE_FD
: {
2981 if (!(in_reply_to
->flags
& TF_ACCEPT_FDS
)) {
2982 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
2983 proc
->pid
, thread
->pid
, fp
->handle
);
2984 return_error
= BR_FAILED_REPLY
;
2985 goto err_fd_not_allowed
;
2987 } else if (!target_node
->accept_fds
) {
2988 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
2989 proc
->pid
, thread
->pid
, fp
->handle
);
2990 return_error
= BR_FAILED_REPLY
;
2991 goto err_fd_not_allowed
;
2994 file
= fget(fp
->handle
);
2996 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2997 proc
->pid
, thread
->pid
, fp
->handle
);
2998 return_error
= BR_FAILED_REPLY
;
2999 goto err_fget_failed
;
3001 if (security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
) < 0) {
3003 return_error
= BR_FAILED_REPLY
;
3004 goto err_get_unused_fd_failed
;
3006 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
3007 if (target_fd
< 0) {
3009 #ifdef MTK_BINDER_DEBUG
3010 binder_user_error("%d:%d to %d failed due to %d no unused fd available(%d:%s fd leak?), %d\n",
3011 proc
->pid
, thread
->pid
,
3012 target_proc
->pid
, target_proc
->pid
, target_proc
->pid
,
3013 target_proc
->tsk
? target_proc
->tsk
->comm
: "",
3016 return_error
= BR_FAILED_REPLY
;
3017 goto err_get_unused_fd_failed
;
3019 task_fd_install(target_proc
, target_fd
, file
);
3020 trace_binder_transaction_fd(t
, fp
->handle
, target_fd
);
3021 binder_debug(BINDER_DEBUG_TRANSACTION
,
3022 " fd %d -> %d\n", fp
->handle
, target_fd
);
3025 fp
->handle
= target_fd
;
3026 #ifdef BINDER_MONITOR
3032 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3033 proc
->pid
, thread
->pid
, fp
->type
);
3034 return_error
= BR_FAILED_REPLY
;
3035 goto err_bad_object_type
;
3039 BUG_ON(t
->buffer
->async_transaction
!= 0);
3040 #ifdef BINDER_MONITOR
3041 binder_update_transaction_time(&binder_transaction_log
,in_reply_to
, 2);
3043 binder_pop_transaction(target_thread
, in_reply_to
);
3044 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3045 BUG_ON(t
->buffer
->async_transaction
!= 0);
3047 t
->from_parent
= thread
->transaction_stack
;
3048 thread
->transaction_stack
= t
;
3050 BUG_ON(target_node
== NULL
);
3051 BUG_ON(t
->buffer
->async_transaction
!= 1);
3052 if (target_node
->has_async_transaction
) {
3053 target_list
= &target_node
->async_todo
;
3056 target_node
->has_async_transaction
= 1;
3058 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3059 list_add_tail(&t
->work
.entry
, target_list
);
3060 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3061 list_add_tail(&tcomplete
->entry
, &thread
->todo
);
3062 #ifdef RT_PRIO_INHERIT
3065 wait_queue_t
*curr
, *next
;
3066 bool is_lock
= false;
3068 spin_lock_irqsave(&target_wait
->lock
, flag
);
3070 list_for_each_entry_safe(curr
, next
, &target_wait
->task_list
, task_list
) {
3071 unsigned flags
= curr
->flags
;
3072 struct task_struct
*tsk
= curr
->private;
3074 spin_unlock_irqrestore(&target_wait
->lock
, flag
);
3076 wake_up_interruptible(target_wait
);
3079 # ifdef MTK_BINDER_DEBUG
3080 if (tsk
->state
== TASK_UNINTERRUPTIBLE
) {
3081 pr_err("from %d:%d to %d:%d target "
3082 "thread state: %ld\n",
3083 proc
->pid
, thread
->pid
,
3084 tsk
->tgid
, tsk
->pid
, tsk
->state
);
3085 show_stack(tsk
, NULL
);
3088 if (!reply
&& (t
->policy
== SCHED_RR
|| t
->policy
== SCHED_FIFO
)&&
3089 t
->rt_prio
> tsk
->rt_priority
&&
3090 !(t
->flags
& TF_ONE_WAY
)) {
3091 struct sched_param param
= {
3092 .sched_priority
= t
->rt_prio
,
3095 t
->saved_rt_prio
= tsk
->rt_priority
;
3096 t
->saved_policy
= tsk
->policy
;
3097 mt_sched_setscheduler_nocheck(tsk
, t
->policy
, ¶m
);
3098 #ifdef BINDER_MONITOR
3099 if (log_disable
& BINDER_RT_LOG_ENABLE
)
3101 pr_debug("write set %d sched_policy from %d to %d rt_prio from %d to %d\n",
3102 tsk
->pid
, t
->saved_policy
, t
->policy
,
3103 t
->saved_rt_prio
, t
->rt_prio
);
3107 if (curr
->func(curr
, TASK_INTERRUPTIBLE
, 0, NULL
) &&
3108 (flags
& WQ_FLAG_EXCLUSIVE
))
3112 spin_unlock_irqrestore(&target_wait
->lock
, flag
);
3116 wake_up_interruptible(target_wait
);
3119 #ifdef BINDER_MONITOR
3120 t
->wait_on
= reply
? WAIT_ON_REPLY_READ
: WAIT_ON_READ
;
3121 binder_queue_bwdog(t
, (time_t)WAIT_BUDGET_READ
);
3125 err_get_unused_fd_failed
:
3128 err_binder_get_ref_for_node_failed
:
3129 err_binder_get_ref_failed
:
3130 err_binder_new_node_failed
:
3131 err_bad_object_type
:
3133 err_copy_data_failed
:
3134 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3135 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3136 t
->buffer
->transaction
= NULL
;
3137 binder_free_buf(target_proc
, t
->buffer
);
3138 err_binder_alloc_buf_failed
:
3140 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3141 err_alloc_tcomplete_failed
:
3142 #ifdef BINDER_MONITOR
3143 binder_cancel_bwdog(t
);
3146 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3149 err_empty_call_stack
:
3151 err_invalid_target_handle
:
3152 err_no_context_mgr_node
:
3153 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3154 "%d:%d transaction failed %d, size %lld-%lld\n",
3155 proc
->pid
, thread
->pid
, return_error
,
3156 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
);
3159 struct binder_transaction_log_entry
*fe
;
3160 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3164 BUG_ON(thread
->return_error
!= BR_OK
);
3166 thread
->return_error
= BR_TRANSACTION_COMPLETE
;
3167 binder_send_failed_reply(in_reply_to
, return_error
);
3169 thread
->return_error
= return_error
;
3172 int binder_thread_write(struct binder_proc
*proc
, struct binder_thread
*thread
,
3173 binder_uintptr_t binder_buffer
, size_t size
,
3174 binder_size_t
*consumed
)
3177 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3178 void __user
*ptr
= buffer
+ *consumed
;
3179 void __user
*end
= buffer
+ size
;
3181 while (ptr
< end
&& thread
->return_error
== BR_OK
) {
3182 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3184 ptr
+= sizeof(uint32_t);
3185 trace_binder_command(cmd
);
3186 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3187 binder_stats
.bc
[_IOC_NR(cmd
)]++;
3188 proc
->stats
.bc
[_IOC_NR(cmd
)]++;
3189 thread
->stats
.bc
[_IOC_NR(cmd
)]++;
3197 struct binder_ref
*ref
;
3198 const char *debug_string
;
3200 if (get_user(target
, (uint32_t __user
*)ptr
))
3202 ptr
+= sizeof(uint32_t);
3203 if (target
== 0 && binder_context_mgr_node
&&
3204 (cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
)) {
3205 ref
= binder_get_ref_for_node(proc
,
3206 binder_context_mgr_node
);
3207 if (ref
->desc
!= target
) {
3208 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
3209 proc
->pid
, thread
->pid
,
3213 ref
= binder_get_ref(proc
, target
,
3214 cmd
== BC_ACQUIRE
||
3217 binder_user_error("%d:%d refcount change on invalid ref %d\n",
3218 proc
->pid
, thread
->pid
, target
);
3223 debug_string
= "IncRefs";
3224 binder_inc_ref(ref
, 0, NULL
);
3227 debug_string
= "Acquire";
3228 binder_inc_ref(ref
, 1, NULL
);
3231 debug_string
= "Release";
3232 binder_dec_ref(ref
, 1);
3236 debug_string
= "DecRefs";
3237 binder_dec_ref(ref
, 0);
3240 binder_debug(BINDER_DEBUG_USER_REFS
,
3241 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
3242 proc
->pid
, thread
->pid
, debug_string
, ref
->debug_id
,
3243 ref
->desc
, ref
->strong
, ref
->weak
, ref
->node
->debug_id
);
3246 case BC_INCREFS_DONE
:
3247 case BC_ACQUIRE_DONE
: {
3248 binder_uintptr_t node_ptr
;
3249 binder_uintptr_t cookie
;
3250 struct binder_node
*node
;
3252 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3254 ptr
+= sizeof(binder_uintptr_t
);
3255 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3257 ptr
+= sizeof(binder_uintptr_t
);
3258 node
= binder_get_node(proc
, node_ptr
);
3260 binder_user_error("%d:%d %s u%016llx no match\n",
3261 proc
->pid
, thread
->pid
,
3262 cmd
== BC_INCREFS_DONE
?
3268 if (cookie
!= node
->cookie
) {
3269 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3270 proc
->pid
, thread
->pid
,
3271 cmd
== BC_INCREFS_DONE
?
3272 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3273 (u64
)node_ptr
, node
->debug_id
,
3274 (u64
)cookie
, (u64
)node
->cookie
);
3277 if (cmd
== BC_ACQUIRE_DONE
) {
3278 if (node
->pending_strong_ref
== 0) {
3279 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3280 proc
->pid
, thread
->pid
,
3284 node
->pending_strong_ref
= 0;
3286 if (node
->pending_weak_ref
== 0) {
3287 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3288 proc
->pid
, thread
->pid
,
3292 node
->pending_weak_ref
= 0;
3294 binder_dec_node(node
, cmd
== BC_ACQUIRE_DONE
, 0);
3295 binder_debug(BINDER_DEBUG_USER_REFS
,
3296 "%d:%d %s node %d ls %d lw %d\n",
3297 proc
->pid
, thread
->pid
,
3298 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3299 node
->debug_id
, node
->local_strong_refs
, node
->local_weak_refs
);
3302 case BC_ATTEMPT_ACQUIRE
:
3303 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3305 case BC_ACQUIRE_RESULT
:
3306 pr_err("BC_ACQUIRE_RESULT not supported\n");
3309 case BC_FREE_BUFFER
: {
3310 binder_uintptr_t data_ptr
;
3311 struct binder_buffer
*buffer
;
3313 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3315 ptr
+= sizeof(binder_uintptr_t
);
3317 buffer
= binder_buffer_lookup(proc
, data_ptr
);
3318 if (buffer
== NULL
) {
3319 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3320 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3323 if (!buffer
->allow_user_free
) {
3324 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3325 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3328 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3329 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3330 proc
->pid
, thread
->pid
, (u64
)data_ptr
, buffer
->debug_id
,
3331 buffer
->transaction
? "active" : "finished");
3333 if (buffer
->transaction
) {
3334 buffer
->transaction
->buffer
= NULL
;
3335 buffer
->transaction
= NULL
;
3337 if (buffer
->async_transaction
&& buffer
->target_node
) {
3338 BUG_ON(!buffer
->target_node
->has_async_transaction
);
3339 if (list_empty(&buffer
->target_node
->async_todo
))
3340 #ifdef MTK_BINDER_DEBUG
3343 buffer
->target_node
->has_async_transaction
= 0;
3344 #ifdef MTK_BINDER_DEBUG
3345 buffer
->target_node
->async_pid
= 0;
3349 #ifdef MTK_BINDER_DEBUG
3352 list_move_tail(buffer
->target_node
->async_todo
.next
, &thread
->todo
);
3353 #ifdef MTK_BINDER_DEBUG
3354 buffer
->target_node
->async_pid
= thread
->pid
;
3358 trace_binder_transaction_buffer_release(buffer
);
3359 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3360 binder_free_buf(proc
, buffer
);
3364 case BC_TRANSACTION
:
3366 struct binder_transaction_data tr
;
3368 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3371 binder_transaction(proc
, thread
, &tr
, cmd
== BC_REPLY
);
3375 case BC_REGISTER_LOOPER
:
3376 binder_debug(BINDER_DEBUG_THREADS
,
3377 "%d:%d BC_REGISTER_LOOPER\n",
3378 proc
->pid
, thread
->pid
);
3379 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3380 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3381 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3382 proc
->pid
, thread
->pid
);
3383 } else if (proc
->requested_threads
== 0) {
3384 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3385 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3386 proc
->pid
, thread
->pid
);
3388 proc
->requested_threads
--;
3389 proc
->requested_threads_started
++;
3391 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3393 case BC_ENTER_LOOPER
:
3394 binder_debug(BINDER_DEBUG_THREADS
,
3395 "%d:%d BC_ENTER_LOOPER\n",
3396 proc
->pid
, thread
->pid
);
3397 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3398 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3399 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3400 proc
->pid
, thread
->pid
);
3402 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3404 case BC_EXIT_LOOPER
:
3405 binder_debug(BINDER_DEBUG_THREADS
,
3406 "%d:%d BC_EXIT_LOOPER\n",
3407 proc
->pid
, thread
->pid
);
3408 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3411 case BC_REQUEST_DEATH_NOTIFICATION
:
3412 case BC_CLEAR_DEATH_NOTIFICATION
: {
3414 binder_uintptr_t cookie
;
3415 struct binder_ref
*ref
;
3416 struct binder_ref_death
*death
;
3418 if (get_user(target
, (uint32_t __user
*)ptr
))
3420 ptr
+= sizeof(uint32_t);
3421 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3423 ptr
+= sizeof(binder_uintptr_t
);
3424 ref
= binder_get_ref(proc
, target
, false);
3426 binder_user_error("%d:%d %s invalid ref %d\n",
3427 proc
->pid
, thread
->pid
,
3428 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3429 "BC_REQUEST_DEATH_NOTIFICATION" :
3430 "BC_CLEAR_DEATH_NOTIFICATION",
3434 #ifdef MTK_DEATH_NOTIFY_MONITOR
3435 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3436 "[DN #%s]binder: %d:%d %s %d(%s) cookie 0x%016llx\n",
3437 cmd
== BC_REQUEST_DEATH_NOTIFICATION
? "1" : "2",
3438 proc
->pid
, thread
->pid
,
3439 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3440 "BC_REQUEST_DEATH_NOTIFICATION" :
3441 "BC_CLEAR_DEATH_NOTIFICATION",
3442 ref
->node
->proc
? ref
->node
->proc
->pid
: 0,
3443 #ifdef BINDER_MONITOR
3444 ref
->node
? ref
->node
->name
: "",
3450 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3451 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3452 proc
->pid
, thread
->pid
,
3453 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3454 "BC_REQUEST_DEATH_NOTIFICATION" :
3455 "BC_CLEAR_DEATH_NOTIFICATION",
3456 (u64
)cookie
, ref
->debug_id
, ref
->desc
,
3457 ref
->strong
, ref
->weak
, ref
->node
->debug_id
);
3460 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3462 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3463 proc
->pid
, thread
->pid
);
3466 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3467 if (death
== NULL
) {
3468 thread
->return_error
= BR_ERROR
;
3469 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3470 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3471 proc
->pid
, thread
->pid
);
3474 binder_stats_created(BINDER_STAT_DEATH
);
3475 INIT_LIST_HEAD(&death
->work
.entry
);
3476 death
->cookie
= cookie
;
3478 if (ref
->node
->proc
== NULL
) {
3479 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3480 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
3481 list_add_tail(&ref
->death
->work
.entry
, &thread
->todo
);
3483 list_add_tail(&ref
->death
->work
.entry
, &proc
->todo
);
3484 wake_up_interruptible(&proc
->wait
);
3488 if (ref
->death
== NULL
) {
3489 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3490 proc
->pid
, thread
->pid
);
3494 if (death
->cookie
!= cookie
) {
3495 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3496 proc
->pid
, thread
->pid
,
3497 (u64
)death
->cookie
, (u64
)cookie
);
3501 if (list_empty(&death
->work
.entry
)) {
3502 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3503 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
3504 list_add_tail(&death
->work
.entry
, &thread
->todo
);
3506 list_add_tail(&death
->work
.entry
, &proc
->todo
);
3507 wake_up_interruptible(&proc
->wait
);
3510 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3511 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3515 case BC_DEAD_BINDER_DONE
: {
3516 struct binder_work
*w
;
3517 binder_uintptr_t cookie
;
3518 struct binder_ref_death
*death
= NULL
;
3519 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3522 #ifdef MTK_DEATH_NOTIFY_MONITOR
3523 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3524 "[DN #6]binder: %d:%d cookie 0x%016llx\n", proc
->pid
, thread
->pid
, (u64
)cookie
);
3527 ptr
+= sizeof(void *);
3528 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
3529 struct binder_ref_death
*tmp_death
= container_of(w
, struct binder_ref_death
, work
);
3530 if (tmp_death
->cookie
== cookie
) {
3535 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3536 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3537 proc
->pid
, thread
->pid
, (u64
)cookie
, death
);
3538 if (death
== NULL
) {
3539 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3540 proc
->pid
, thread
->pid
, (u64
)cookie
);
3544 list_del_init(&death
->work
.entry
);
3545 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3546 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3547 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
3548 list_add_tail(&death
->work
.entry
, &thread
->todo
);
3550 list_add_tail(&death
->work
.entry
, &proc
->todo
);
3551 wake_up_interruptible(&proc
->wait
);
3557 pr_err("%d:%d unknown command %d\n",
3558 proc
->pid
, thread
->pid
, cmd
);
3561 *consumed
= ptr
- buffer
;
3566 void binder_stat_br(struct binder_proc
*proc
, struct binder_thread
*thread
,
3569 trace_binder_return(cmd
);
3570 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3571 binder_stats
.br
[_IOC_NR(cmd
)]++;
3572 proc
->stats
.br
[_IOC_NR(cmd
)]++;
3573 thread
->stats
.br
[_IOC_NR(cmd
)]++;
3577 static int binder_has_proc_work(struct binder_proc
*proc
,
3578 struct binder_thread
*thread
)
3580 return !list_empty(&proc
->todo
) ||
3581 (thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
);
3584 static int binder_has_thread_work(struct binder_thread
*thread
)
3586 return !list_empty(&thread
->todo
) || thread
->return_error
!= BR_OK
||
3587 (thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
);
3590 static int binder_thread_read(struct binder_proc
*proc
,
3591 struct binder_thread
*thread
,
3592 binder_uintptr_t binder_buffer
, size_t size
,
3593 binder_size_t
*consumed
, int non_block
)
3595 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3596 void __user
*ptr
= buffer
+ *consumed
;
3597 void __user
*end
= buffer
+ size
;
3600 int wait_for_proc_work
;
3602 if (*consumed
== 0) {
3603 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
3605 ptr
+= sizeof(uint32_t);
3609 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
3610 list_empty(&thread
->todo
);
3612 if (thread
->return_error
!= BR_OK
&& ptr
< end
) {
3613 if (thread
->return_error2
!= BR_OK
) {
3614 if (put_user(thread
->return_error2
, (uint32_t __user
*)ptr
))
3616 ptr
+= sizeof(uint32_t);
3617 pr_err("read put err2 %u to user %p, thread error %u:%u\n",
3618 thread
->return_error2
, ptr
, thread
->return_error
, thread
->return_error2
);
3619 binder_stat_br(proc
, thread
, thread
->return_error2
);
3622 thread
->return_error2
= BR_OK
;
3624 if (put_user(thread
->return_error
, (uint32_t __user
*)ptr
))
3626 ptr
+= sizeof(uint32_t);
3627 pr_err("read put err %u to user %p, thread error %u:%u\n",
3628 thread
->return_error
, ptr
, thread
->return_error
, thread
->return_error2
);
3629 binder_stat_br(proc
, thread
, thread
->return_error
);
3630 thread
->return_error
= BR_OK
;
3635 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
3636 if (wait_for_proc_work
)
3637 proc
->ready_threads
++;
3639 binder_unlock(__func__
);
3641 trace_binder_wait_for_work(wait_for_proc_work
,
3642 !!thread
->transaction_stack
,
3643 !list_empty(&thread
->todo
));
3644 if (wait_for_proc_work
) {
3645 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3646 BINDER_LOOPER_STATE_ENTERED
))) {
3647 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3648 proc
->pid
, thread
->pid
, thread
->looper
);
3649 wait_event_interruptible(binder_user_error_wait
,
3650 binder_stop_on_user_error
< 2);
3652 #ifdef RT_PRIO_INHERIT
3653 /* disable preemption to prevent from schedule-out immediately */
3656 binder_set_nice(proc
->default_priority
);
3657 #ifdef RT_PRIO_INHERIT
3658 if (rt_task(current
) && !binder_has_proc_work(proc
, thread
)) {
3659 /* make sure binder has no work before setting priority back*/
3660 struct sched_param param
= {
3661 .sched_priority
= proc
->default_rt_prio
,
3663 #ifdef BINDER_MONITOR
3664 if (log_disable
& BINDER_RT_LOG_ENABLE
)
3666 pr_debug("enter threadpool reset %d sched_policy from %u to %d rt_prio from %u to %d\n",
3667 current
->pid
, current
->policy
, proc
->default_policy
,
3668 current
->rt_priority
, proc
->default_rt_prio
);
3671 mt_sched_setscheduler_nocheck(current
,
3672 proc
->default_policy
, ¶m
);
3674 preempt_enable_no_resched();
3677 if (!binder_has_proc_work(proc
, thread
))
3680 ret
= wait_event_freezable_exclusive(proc
->wait
, binder_has_proc_work(proc
, thread
));
3683 if (!binder_has_thread_work(thread
))
3686 ret
= wait_event_freezable(thread
->wait
, binder_has_thread_work(thread
));
3689 binder_lock(__func__
);
3691 if (wait_for_proc_work
)
3692 proc
->ready_threads
--;
3693 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
3700 struct binder_transaction_data tr
;
3701 struct binder_work
*w
;
3702 struct binder_transaction
*t
= NULL
;
3704 if (!list_empty(&thread
->todo
))
3705 w
= list_first_entry(&thread
->todo
, struct binder_work
, entry
);
3706 else if (!list_empty(&proc
->todo
) && wait_for_proc_work
)
3707 w
= list_first_entry(&proc
->todo
, struct binder_work
, entry
);
3709 if (ptr
- buffer
== 4 && !(thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
)) /* no data added */
3714 if (end
- ptr
< sizeof(tr
) + 4)
3718 case BINDER_WORK_TRANSACTION
: {
3719 t
= container_of(w
, struct binder_transaction
, work
);
3720 #ifdef BINDER_MONITOR
3721 binder_cancel_bwdog(t
);
3724 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3725 cmd
= BR_TRANSACTION_COMPLETE
;
3726 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3728 ptr
+= sizeof(uint32_t);
3730 binder_stat_br(proc
, thread
, cmd
);
3731 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
3732 "%d:%d BR_TRANSACTION_COMPLETE\n",
3733 proc
->pid
, thread
->pid
);
3735 list_del(&w
->entry
);
3737 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3739 case BINDER_WORK_NODE
: {
3740 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
3741 uint32_t cmd
= BR_NOOP
;
3742 const char *cmd_name
;
3743 int strong
= node
->internal_strong_refs
|| node
->local_strong_refs
;
3744 int weak
= !hlist_empty(&node
->refs
) || node
->local_weak_refs
|| strong
;
3745 if (weak
&& !node
->has_weak_ref
) {
3747 cmd_name
= "BR_INCREFS";
3748 node
->has_weak_ref
= 1;
3749 node
->pending_weak_ref
= 1;
3750 node
->local_weak_refs
++;
3751 } else if (strong
&& !node
->has_strong_ref
) {
3753 cmd_name
= "BR_ACQUIRE";
3754 node
->has_strong_ref
= 1;
3755 node
->pending_strong_ref
= 1;
3756 node
->local_strong_refs
++;
3757 } else if (!strong
&& node
->has_strong_ref
) {
3759 cmd_name
= "BR_RELEASE";
3760 node
->has_strong_ref
= 0;
3761 } else if (!weak
&& node
->has_weak_ref
) {
3763 cmd_name
= "BR_DECREFS";
3764 node
->has_weak_ref
= 0;
3766 if (cmd
!= BR_NOOP
) {
3767 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3769 ptr
+= sizeof(uint32_t);
3770 if (put_user(node
->ptr
,
3771 (binder_uintptr_t __user
*)ptr
))
3773 ptr
+= sizeof(binder_uintptr_t
);
3774 if (put_user(node
->cookie
,
3775 (binder_uintptr_t __user
*)ptr
))
3777 ptr
+= sizeof(binder_uintptr_t
);
3779 binder_stat_br(proc
, thread
, cmd
);
3780 binder_debug(BINDER_DEBUG_USER_REFS
,
3781 "%d:%d %s %d u%016llx c%016llx\n",
3782 proc
->pid
, thread
->pid
, cmd_name
,
3784 (u64
)node
->ptr
, (u64
)node
->cookie
);
3786 list_del_init(&w
->entry
);
3787 if (!weak
&& !strong
) {
3788 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3789 "%d:%d node %d u%016llx c%016llx deleted\n",
3790 proc
->pid
, thread
->pid
, node
->debug_id
,
3791 (u64
)node
->ptr
, (u64
)node
->cookie
);
3792 rb_erase(&node
->rb_node
, &proc
->nodes
);
3794 binder_stats_deleted(BINDER_STAT_NODE
);
3796 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3797 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3798 proc
->pid
, thread
->pid
, node
->debug_id
,
3799 (u64
)node
->ptr
, (u64
)node
->cookie
);
3803 case BINDER_WORK_DEAD_BINDER
:
3804 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3805 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3806 struct binder_ref_death
*death
;
3809 death
= container_of(w
, struct binder_ref_death
, work
);
3811 #ifdef MTK_DEATH_NOTIFY_MONITOR
3813 case BINDER_WORK_DEAD_BINDER
:
3814 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3815 "[DN #4]binder: %d:%d BINDER_WORK_DEAD_BINDER cookie 0x%016llx\n",
3816 proc
->pid
, thread
->pid
, (u64
)death
->cookie
);
3818 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3819 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3820 "[DN #4]binder: %d:%d BINDER_WORK_DEAD_BINDER_AND_CLEAR cookie "
3821 "0x%016llx\n", proc
->pid
, thread
->pid
, (u64
)death
->cookie
);
3823 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
3824 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3825 "[DN #4]binder: %d:%d BINDER_WORK_CLEAR_DEATH_NOTIFICATION cookie "
3826 "0x%016llx\n", proc
->pid
, thread
->pid
, (u64
)death
->cookie
);
3829 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3830 "[DN #4]binder: %d:%d UNKNOWN-%d cookie 0x%016llx\n",
3831 proc
->pid
, thread
->pid
, w
->type
, (u64
)death
->cookie
);
3836 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
3837 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
3839 cmd
= BR_DEAD_BINDER
;
3840 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3842 ptr
+= sizeof(uint32_t);
3843 if (put_user(death
->cookie
,
3844 (binder_uintptr_t __user
*)ptr
))
3846 ptr
+= sizeof(binder_uintptr_t
);
3847 binder_stat_br(proc
, thread
, cmd
);
3848 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3849 "%d:%d %s %016llx\n",
3850 proc
->pid
, thread
->pid
,
3851 cmd
== BR_DEAD_BINDER
?
3853 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3854 (u64
)death
->cookie
);
3856 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
3857 list_del(&w
->entry
);
3859 binder_stats_deleted(BINDER_STAT_DEATH
);
3861 list_move(&w
->entry
, &proc
->delivered_death
);
3862 if (cmd
== BR_DEAD_BINDER
)
3863 goto done
; /* DEAD_BINDER notifications can cause transactions */
3870 BUG_ON(t
->buffer
== NULL
);
3871 if (t
->buffer
->target_node
) {
3872 struct binder_node
*target_node
= t
->buffer
->target_node
;
3873 tr
.target
.ptr
= target_node
->ptr
;
3874 tr
.cookie
= target_node
->cookie
;
3875 t
->saved_priority
= task_nice(current
);
3876 #ifdef RT_PRIO_INHERIT
3877 /* since we may fail the rt inherit due to target
3878 * wait queue task_list is empty, check again here.
3880 if ((SCHED_RR
== t
->policy
|| SCHED_FIFO
== t
->policy
) && t
->rt_prio
> current
->rt_priority
&&
3881 !(t
->flags
& TF_ONE_WAY
)) {
3882 struct sched_param param
= {
3883 .sched_priority
= t
->rt_prio
,
3886 t
->saved_rt_prio
= current
->rt_priority
;
3887 t
->saved_policy
= current
->policy
;
3888 mt_sched_setscheduler_nocheck(current
, t
->policy
, ¶m
);
3889 #ifdef BINDER_MONITOR
3890 if (log_disable
& BINDER_RT_LOG_ENABLE
)
3892 pr_debug("read set %d sched_policy from %d to %d rt_prio from %d to %d\n",
3893 proc
->pid
, t
->saved_policy
, t
->policy
,
3894 t
->saved_rt_prio
, t
->rt_prio
);
3899 if (t
->priority
< target_node
->min_priority
&&
3900 !(t
->flags
& TF_ONE_WAY
))
3901 binder_set_nice(t
->priority
);
3902 else if (!(t
->flags
& TF_ONE_WAY
) ||
3903 t
->saved_priority
> target_node
->min_priority
)
3904 binder_set_nice(target_node
->min_priority
);
3905 cmd
= BR_TRANSACTION
;
3912 tr
.flags
= t
->flags
;
3913 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
3916 struct task_struct
*sender
= t
->from
->proc
->tsk
;
3917 tr
.sender_pid
= task_tgid_nr_ns(sender
,
3918 task_active_pid_ns(current
));
3923 tr
.data_size
= t
->buffer
->data_size
;
3924 tr
.offsets_size
= t
->buffer
->offsets_size
;
3925 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)(
3926 (uintptr_t)t
->buffer
->data
+
3927 proc
->user_buffer_offset
);
3928 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
3929 ALIGN(t
->buffer
->data_size
,
3932 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3934 ptr
+= sizeof(uint32_t);
3935 if (copy_to_user(ptr
, &tr
, sizeof(tr
)))
3939 trace_binder_transaction_received(t
);
3940 binder_stat_br(proc
, thread
, cmd
);
3941 binder_debug(BINDER_DEBUG_TRANSACTION
,
3942 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3943 proc
->pid
, thread
->pid
,
3944 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
3946 t
->debug_id
, t
->from
? t
->from
->proc
->pid
: 0,
3947 t
->from
? t
->from
->pid
: 0, cmd
,
3948 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
3949 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
3951 list_del(&t
->work
.entry
);
3952 t
->buffer
->allow_user_free
= 1;
3953 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
3954 t
->to_parent
= thread
->transaction_stack
;
3955 t
->to_thread
= thread
;
3956 thread
->transaction_stack
= t
;
3957 #ifdef BINDER_MONITOR
3958 do_posix_clock_monotonic_gettime(&t
->exe_timestamp
);
3959 //monotonic_to_bootbased(&t->exe_timestamp);
3960 do_gettimeofday(&t
->tv
);
3961 /* consider time zone. translate to android time */
3962 t
->tv
.tv_sec
-= (sys_tz
.tz_minuteswest
* 60);
3963 t
->wait_on
= WAIT_ON_EXEC
;
3964 t
->tthrd
= thread
->pid
;
3965 binder_queue_bwdog(t
, (time_t)WAIT_BUDGET_EXEC
);
3966 binder_update_transaction_time(&binder_transaction_log
, t
, 1);
3967 binder_update_transaction_ttid(&binder_transaction_log
, t
);
3970 t
->buffer
->transaction
= NULL
;
3971 #ifdef BINDER_MONITOR
3972 binder_cancel_bwdog(t
);
3973 if (cmd
== BR_TRANSACTION
&& (t
->flags
& TF_ONE_WAY
)) {
3974 binder_update_transaction_time(&binder_transaction_log
, t
, 1);
3975 t
->tthrd
= thread
->pid
;
3976 binder_update_transaction_ttid(&binder_transaction_log
, t
);
3980 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3987 *consumed
= ptr
- buffer
;
3988 if (proc
->requested_threads
+ proc
->ready_threads
== 0 &&
3989 proc
->requested_threads_started
< proc
->max_threads
&&
3990 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3991 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
3992 /*spawn a new thread if we leave this out */) {
3993 proc
->requested_threads
++;
3994 binder_debug(BINDER_DEBUG_THREADS
,
3995 "%d:%d BR_SPAWN_LOOPER\n",
3996 proc
->pid
, thread
->pid
);
3997 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
3999 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4004 static void binder_release_work(struct list_head
*list
)
4006 struct binder_work
*w
;
4007 while (!list_empty(list
)) {
4008 w
= list_first_entry(list
, struct binder_work
, entry
);
4009 list_del_init(&w
->entry
);
4011 case BINDER_WORK_TRANSACTION
: {
4012 struct binder_transaction
*t
;
4014 t
= container_of(w
, struct binder_transaction
, work
);
4015 if (t
->buffer
->target_node
&&
4016 !(t
->flags
& TF_ONE_WAY
)) {
4017 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
4019 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4020 "undelivered transaction %d\n",
4022 t
->buffer
->transaction
= NULL
;
4023 #ifdef BINDER_MONITOR
4024 binder_cancel_bwdog(t
);
4027 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
4030 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4031 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4032 "undelivered TRANSACTION_COMPLETE\n");
4034 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4036 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4037 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4038 struct binder_ref_death
*death
;
4040 death
= container_of(w
, struct binder_ref_death
, work
);
4041 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4042 "undelivered death notification, %016llx\n",
4043 (u64
)death
->cookie
);
4045 binder_stats_deleted(BINDER_STAT_DEATH
);
4048 pr_err("unexpected work type, %d, not freed\n",
4056 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4058 struct binder_thread
*thread
= NULL
;
4059 struct rb_node
*parent
= NULL
;
4060 struct rb_node
**p
= &proc
->threads
.rb_node
;
4064 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4066 if (current
->pid
< thread
->pid
)
4068 else if (current
->pid
> thread
->pid
)
4069 p
= &(*p
)->rb_right
;
4074 thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4077 binder_stats_created(BINDER_STAT_THREAD
);
4078 thread
->proc
= proc
;
4079 thread
->pid
= current
->pid
;
4080 init_waitqueue_head(&thread
->wait
);
4081 INIT_LIST_HEAD(&thread
->todo
);
4082 rb_link_node(&thread
->rb_node
, parent
, p
);
4083 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4084 thread
->looper
|= BINDER_LOOPER_STATE_NEED_RETURN
;
4085 thread
->return_error
= BR_OK
;
4086 thread
->return_error2
= BR_OK
;
4091 static int binder_free_thread(struct binder_proc
*proc
,
4092 struct binder_thread
*thread
)
4094 struct binder_transaction
*t
;
4095 struct binder_transaction
*send_reply
= NULL
;
4096 int active_transactions
= 0;
4098 rb_erase(&thread
->rb_node
, &proc
->threads
);
4099 t
= thread
->transaction_stack
;
4100 if (t
&& t
->to_thread
== thread
)
4103 active_transactions
++;
4104 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4105 "release %d:%d transaction %d %s, still active\n",
4106 proc
->pid
, thread
->pid
,
4108 (t
->to_thread
== thread
) ? "in" : "out");
4110 #ifdef MTK_BINDER_DEBUG
4111 pr_err("%d: %p from %d:%d to %d:%d code %x flags %x "
4113 #ifdef BINDER_MONITOR
4118 t
->from
? t
->from
->proc
->pid
: 0,
4119 t
->from
? t
->from
->pid
: 0,
4120 t
->to_proc
? t
->to_proc
->pid
: 0,
4121 t
->to_thread
? t
->to_thread
->pid
: 0,
4122 t
->code
, t
->flags
, t
->priority
, t
->need_reply
4123 #ifdef BINDER_MONITOR
4124 , (unsigned long)t
->timestamp
.tv_sec
,
4125 (t
->timestamp
.tv_nsec
/ NSEC_PER_USEC
)
4129 if (t
->to_thread
== thread
) {
4131 t
->to_thread
= NULL
;
4133 t
->buffer
->transaction
= NULL
;
4137 } else if (t
->from
== thread
) {
4144 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4145 binder_release_work(&thread
->todo
);
4147 binder_stats_deleted(BINDER_STAT_THREAD
);
4148 return active_transactions
;
4151 static unsigned int binder_poll(struct file
*filp
,
4152 struct poll_table_struct
*wait
)
4154 struct binder_proc
*proc
= filp
->private_data
;
4155 struct binder_thread
*thread
= NULL
;
4156 int wait_for_proc_work
;
4158 binder_lock(__func__
);
4160 thread
= binder_get_thread(proc
);
4162 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
4163 list_empty(&thread
->todo
) && thread
->return_error
== BR_OK
;
4165 binder_unlock(__func__
);
4167 if (wait_for_proc_work
) {
4168 if (binder_has_proc_work(proc
, thread
))
4170 poll_wait(filp
, &proc
->wait
, wait
);
4171 if (binder_has_proc_work(proc
, thread
))
4174 if (binder_has_thread_work(thread
))
4176 poll_wait(filp
, &thread
->wait
, wait
);
4177 if (binder_has_thread_work(thread
))
4183 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4186 struct binder_proc
*proc
= filp
->private_data
;
4187 struct binder_thread
*thread
;
4188 unsigned int size
= _IOC_SIZE(cmd
);
4189 void __user
*ubuf
= (void __user
*)arg
;
4191 /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
4193 trace_binder_ioctl(cmd
, arg
);
4195 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4199 binder_lock(__func__
);
4200 thread
= binder_get_thread(proc
);
4201 if (thread
== NULL
) {
4207 case BINDER_WRITE_READ
: {
4208 struct binder_write_read bwr
;
4209 if (size
!= sizeof(struct binder_write_read
)) {
4213 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4217 binder_debug(BINDER_DEBUG_READ_WRITE
,
4218 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4219 proc
->pid
, thread
->pid
,
4220 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4221 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4223 if (bwr
.write_size
> 0) {
4224 ret
= binder_thread_write(proc
, thread
, bwr
.write_buffer
, bwr
.write_size
, &bwr
.write_consumed
);
4225 trace_binder_write_done(ret
);
4227 bwr
.read_consumed
= 0;
4228 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4233 if (bwr
.read_size
> 0) {
4234 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
, bwr
.read_size
, &bwr
.read_consumed
, filp
->f_flags
& O_NONBLOCK
);
4235 trace_binder_read_done(ret
);
4236 if (!list_empty(&proc
->todo
)) {
4237 if (thread
->proc
!= proc
) {
4240 printk(KERN_ERR
"binder: "
4241 "thread->proc != proc\n");
4243 printk(KERN_ERR
"binder: thread %p\n",
4245 p
= (unsigned int *)thread
- 32;
4246 for (i
= -4; i
<= 3; i
++, p
+=8) {
4247 printk(KERN_ERR
"%p %08x %08x "
4248 "%08x %08x %08x %08x "
4250 p
, *(p
), *(p
+1), *(p
+2),
4251 *(p
+3), *(p
+4), *(p
+5),
4255 printk(KERN_ERR
"binder: thread->proc "
4256 "%p\n", thread
->proc
);
4257 p
= (unsigned int *)thread
->proc
- 32;
4258 for (i
= -4; i
<= 5; i
++, p
+=8) {
4259 printk(KERN_ERR
"%p %08x %08x "
4260 "%08x %08x %08x %08x "
4262 p
, *(p
), *(p
+1), *(p
+2),
4263 *(p
+3), *(p
+4), *(p
+5),
4267 printk(KERN_ERR
"binder: proc %p\n",
4269 p
= (unsigned int *)proc
- 32;
4270 for (i
= -4; i
<= 5; i
++, p
+=8) {
4271 printk(KERN_ERR
"%p %08x %08x "
4272 "%08x %08x %08x %08x "
4274 p
, *(p
), *(p
+1), *(p
+2),
4275 *(p
+3), *(p
+4), *(p
+5),
4280 wake_up_interruptible(&proc
->wait
);
4283 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4288 binder_debug(BINDER_DEBUG_READ_WRITE
,
4289 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4290 proc
->pid
, thread
->pid
,
4291 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4292 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4293 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4299 case BINDER_SET_MAX_THREADS
:
4300 if (copy_from_user(&proc
->max_threads
, ubuf
, sizeof(proc
->max_threads
))) {
4305 case BINDER_SET_CONTEXT_MGR
:
4306 if (binder_context_mgr_node
!= NULL
) {
4307 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4311 ret
= security_binder_set_context_mgr(proc
->tsk
);
4314 if (uid_valid(binder_context_mgr_uid
)) {
4315 if (!uid_eq(binder_context_mgr_uid
, current
->cred
->euid
)) {
4316 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4317 from_kuid(&init_user_ns
, current
->cred
->euid
),
4318 from_kuid(&init_user_ns
, binder_context_mgr_uid
));
4323 binder_context_mgr_uid
= current
->cred
->euid
;
4324 binder_context_mgr_node
= binder_new_node(proc
, 0, 0);
4325 if (binder_context_mgr_node
== NULL
) {
4329 #ifdef BINDER_MONITOR
4330 strcpy(binder_context_mgr_node
->name
, "servicemanager");
4331 pr_debug("%d:%d set as servicemanager uid %d\n",
4332 proc
->pid
, thread
->pid
, binder_context_mgr_uid
);
4334 binder_context_mgr_node
->local_weak_refs
++;
4335 binder_context_mgr_node
->local_strong_refs
++;
4336 binder_context_mgr_node
->has_strong_ref
= 1;
4337 binder_context_mgr_node
->has_weak_ref
= 1;
4339 case BINDER_THREAD_EXIT
:
4340 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4341 proc
->pid
, thread
->pid
);
4342 binder_free_thread(proc
, thread
);
4345 case BINDER_VERSION
:
4346 if (size
!= sizeof(struct binder_version
)) {
4350 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
, &((struct binder_version
*)ubuf
)->protocol_version
)) {
4362 thread
->looper
&= ~BINDER_LOOPER_STATE_NEED_RETURN
;
4363 binder_unlock(__func__
);
4364 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4365 if (ret
&& ret
!= -ERESTARTSYS
)
4366 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4368 trace_binder_ioctl_done(ret
);
4372 static void binder_vma_open(struct vm_area_struct
*vma
)
4374 struct binder_proc
*proc
= vma
->vm_private_data
;
4375 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4376 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4377 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4378 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4379 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4382 static void binder_vma_close(struct vm_area_struct
*vma
)
4384 struct binder_proc
*proc
= vma
->vm_private_data
;
4385 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4386 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4387 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4388 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4389 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4391 proc
->vma_vm_mm
= NULL
;
4392 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4395 static struct vm_operations_struct binder_vm_ops
= {
4396 .open
= binder_vma_open
,
4397 .close
= binder_vma_close
,
4400 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4403 struct vm_struct
*area
;
4404 struct binder_proc
*proc
= filp
->private_data
;
4405 const char *failure_string
;
4406 struct binder_buffer
*buffer
;
4408 if (proc
->tsk
!= current
)
4411 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4412 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4414 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4415 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4416 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4417 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4418 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4420 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4422 failure_string
= "bad vm_flags";
4425 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4427 mutex_lock(&binder_mmap_lock
);
4430 failure_string
= "already mapped";
4431 goto err_already_mapped
;
4434 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_IOREMAP
);
4437 failure_string
= "get_vm_area";
4438 goto err_get_vm_area_failed
;
4440 proc
->buffer
= area
->addr
;
4441 proc
->user_buffer_offset
= vma
->vm_start
- (uintptr_t)proc
->buffer
;
4442 mutex_unlock(&binder_mmap_lock
);
4444 #ifdef CONFIG_CPU_CACHE_VIPT
4445 if (cache_is_vipt_aliasing()) {
4446 while (CACHE_COLOUR((vma
->vm_start
^ (uint32_t)proc
->buffer
))) {
4447 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", proc
->pid
, vma
->vm_start
, vma
->vm_end
, proc
->buffer
);
4448 vma
->vm_start
+= PAGE_SIZE
;
4452 proc
->pages
= kzalloc(sizeof(proc
->pages
[0]) * ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
), GFP_KERNEL
);
4453 if (proc
->pages
== NULL
) {
4455 failure_string
= "alloc page array";
4456 goto err_alloc_pages_failed
;
4458 proc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
4460 vma
->vm_ops
= &binder_vm_ops
;
4461 vma
->vm_private_data
= proc
;
4463 if (binder_update_page_range(proc
, 1, proc
->buffer
, proc
->buffer
+ PAGE_SIZE
, vma
)) {
4465 failure_string
= "alloc small buf";
4466 goto err_alloc_small_buf_failed
;
4468 buffer
= proc
->buffer
;
4469 INIT_LIST_HEAD(&proc
->buffers
);
4470 list_add(&buffer
->entry
, &proc
->buffers
);
4472 binder_insert_free_buffer(proc
, buffer
);
4473 proc
->free_async_space
= proc
->buffer_size
/ 2;
4475 proc
->files
= get_files_struct(current
);
4477 proc
->vma_vm_mm
= vma
->vm_mm
;
4479 /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
4480 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
4483 err_alloc_small_buf_failed
:
4486 err_alloc_pages_failed
:
4487 mutex_lock(&binder_mmap_lock
);
4488 vfree(proc
->buffer
);
4489 proc
->buffer
= NULL
;
4490 err_get_vm_area_failed
:
4492 mutex_unlock(&binder_mmap_lock
);
4494 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4495 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4499 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4501 struct binder_proc
*proc
;
4503 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
4504 current
->group_leader
->pid
, current
->pid
);
4506 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4509 get_task_struct(current
);
4510 proc
->tsk
= current
;
4511 INIT_LIST_HEAD(&proc
->todo
);
4512 init_waitqueue_head(&proc
->wait
);
4513 proc
->default_priority
= task_nice(current
);
4514 #ifdef RT_PRIO_INHERIT
4515 proc
->default_rt_prio
= current
->rt_priority
;
4516 proc
->default_policy
= current
->policy
;
4519 binder_lock(__func__
);
4521 binder_stats_created(BINDER_STAT_PROC
);
4522 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4523 proc
->pid
= current
->group_leader
->pid
;
4524 INIT_LIST_HEAD(&proc
->delivered_death
);
4525 filp
->private_data
= proc
;
4527 binder_unlock(__func__
);
4529 if (binder_debugfs_dir_entry_proc
) {
4531 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4532 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
4533 binder_debugfs_dir_entry_proc
, proc
, &binder_proc_fops
);
4539 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4541 struct binder_proc
*proc
= filp
->private_data
;
4543 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4548 static void binder_deferred_flush(struct binder_proc
*proc
)
4552 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4553 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4554 thread
->looper
|= BINDER_LOOPER_STATE_NEED_RETURN
;
4555 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
4556 wake_up_interruptible(&thread
->wait
);
4560 wake_up_interruptible_all(&proc
->wait
);
4562 #ifdef MTK_BINDER_DEBUG
4564 pr_debug("binder_flush: %d woke %d threads\n", proc
->pid
,
4567 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4568 "binder_flush: %d woke %d threads\n", proc
->pid
,
4573 static int binder_release(struct inode
*nodp
, struct file
*filp
)
4575 struct binder_proc
*proc
= filp
->private_data
;
4576 debugfs_remove(proc
->debugfs_entry
);
4577 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
4582 static int binder_node_release(struct binder_node
*node
, int refs
)
4584 struct binder_ref
*ref
;
4586 #ifdef BINDER_MONITOR
4589 #if defined(MTK_DEATH_NOTIFY_MONITOR) || defined(MTK_BINDER_DEBUG)
4590 int dead_pid
= node
->proc
? node
->proc
->pid
: 0;
4591 char dead_pname
[TASK_COMM_LEN
] = "";
4592 if(node
->proc
&& node
->proc
->tsk
)
4593 strcpy(dead_pname
, node
->proc
->tsk
->comm
);
4596 list_del_init(&node
->work
.entry
);
4597 binder_release_work(&node
->async_todo
);
4599 if (hlist_empty(&node
->refs
)) {
4601 binder_stats_deleted(BINDER_STAT_NODE
);
4607 node
->local_strong_refs
= 0;
4608 node
->local_weak_refs
= 0;
4609 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
4611 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
4617 #ifdef MTK_DEATH_NOTIFY_MONITOR
4618 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4619 "[DN #3]binder: %d:(%s) cookie 0x%016llx\n",
4621 #ifdef BINDER_MONITOR
4626 (u64
)ref
->death
->cookie
);
4628 #ifdef BINDER_MONITOR
4630 ref
->proc
->pid
== system_server_pid
)
4635 if (list_empty(&ref
->death
->work
.entry
)) {
4636 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
4637 list_add_tail(&ref
->death
->work
.entry
,
4639 wake_up_interruptible(&ref
->proc
->wait
);
4644 #if defined(BINDER_MONITOR) && defined(MTK_BINDER_DEBUG)
4646 pr_debug("%d:%s node %d:%s exits with %d:system_server DeathNotify\n",
4647 dead_pid
, dead_pname
,
4648 node
->debug_id
, node
->name
,
4652 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4653 "node %d now dead, refs %d, death %d\n",
4654 node
->debug_id
, refs
, death
);
4659 static void binder_deferred_release(struct binder_proc
*proc
)
4661 struct binder_transaction
*t
;
4663 int threads
, nodes
, incoming_refs
, outgoing_refs
, buffers
,
4664 active_transactions
, page_count
;
4667 BUG_ON(proc
->files
);
4669 hlist_del(&proc
->proc_node
);
4671 if (binder_context_mgr_node
&& binder_context_mgr_node
->proc
== proc
) {
4672 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4673 "%s: %d context_mgr_node gone\n",
4674 __func__
, proc
->pid
);
4675 binder_context_mgr_node
= NULL
;
4679 active_transactions
= 0;
4680 while ((n
= rb_first(&proc
->threads
))) {
4681 struct binder_thread
*thread
;
4683 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4685 active_transactions
+= binder_free_thread(proc
, thread
);
4690 while ((n
= rb_first(&proc
->nodes
))) {
4691 struct binder_node
*node
;
4693 node
= rb_entry(n
, struct binder_node
, rb_node
);
4695 rb_erase(&node
->rb_node
, &proc
->nodes
);
4696 incoming_refs
= binder_node_release(node
, incoming_refs
);
4700 while ((n
= rb_first(&proc
->refs_by_desc
))) {
4701 struct binder_ref
*ref
;
4703 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
4705 binder_delete_ref(ref
);
4708 binder_release_work(&proc
->todo
);
4709 binder_release_work(&proc
->delivered_death
);
4712 while ((n
= rb_first(&proc
->allocated_buffers
))) {
4713 struct binder_buffer
*buffer
;
4715 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
4717 t
= buffer
->transaction
;
4720 buffer
->transaction
= NULL
;
4721 pr_err("release proc %d, transaction %d, not freed\n",
4722 proc
->pid
, t
->debug_id
);
4724 #ifdef MTK_BINDER_DEBUG
4725 pr_err("%d: %p from %d:%d to %d:%d code %x flags %x "
4727 #ifdef BINDER_MONITOR
4732 t
->from
? t
->from
->proc
->pid
: 0,
4733 t
->from
? t
->from
->pid
: 0,
4734 t
->to_proc
? t
->to_proc
->pid
: 0,
4735 t
->to_thread
? t
->to_thread
->pid
: 0,
4736 t
->code
, t
->flags
, t
->priority
, t
->need_reply
4737 #ifdef BINDER_MONITOR
4738 , (unsigned long)t
->timestamp
.tv_sec
,
4739 (t
->timestamp
.tv_nsec
/ NSEC_PER_USEC
)
4745 binder_free_buf(proc
, buffer
);
4749 binder_stats_deleted(BINDER_STAT_PROC
);
4755 for (i
= 0; i
< proc
->buffer_size
/ PAGE_SIZE
; i
++) {
4758 if (!proc
->pages
[i
])
4761 page_addr
= proc
->buffer
+ i
* PAGE_SIZE
;
4762 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
4763 "%s: %d: page %d at %pK not freed\n",
4764 __func__
, proc
->pid
, i
, page_addr
);
4765 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
4766 __free_page(proc
->pages
[i
]);
4768 #ifdef MTK_BINDER_PAGE_USED_RECORD
4769 if(binder_page_used
> 0)
4771 if (proc
->page_used
> 0)
4776 vfree(proc
->buffer
);
4779 put_task_struct(proc
->tsk
);
4781 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4782 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
4783 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
4784 outgoing_refs
, active_transactions
, buffers
, page_count
);
4786 #ifdef BINDER_PERF_EVAL
4789 for (i
= 0; i
< BC_STATS_NR
; i
++) {
4790 if (proc
->bc_stats
[i
] != NULL
) {
4791 kfree(proc
->bc_stats
[i
]);
4792 proc
->bc_stats
[i
] = NULL
;
4793 pr_debug("binder_release: release %d bc_stats[%d]\n", proc
->pid
, i
);
4801 static void binder_deferred_func(struct work_struct
*work
)
4803 struct binder_proc
*proc
;
4804 struct files_struct
*files
;
4808 binder_lock(__func__
);
4809 mutex_lock(&binder_deferred_lock
);
4810 if (!hlist_empty(&binder_deferred_list
)) {
4811 proc
= hlist_entry(binder_deferred_list
.first
,
4812 struct binder_proc
, deferred_work_node
);
4813 hlist_del_init(&proc
->deferred_work_node
);
4814 defer
= proc
->deferred_work
;
4815 proc
->deferred_work
= 0;
4820 mutex_unlock(&binder_deferred_lock
);
4823 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
4824 files
= proc
->files
;
4829 if (defer
& BINDER_DEFERRED_FLUSH
)
4830 binder_deferred_flush(proc
);
4832 if (defer
& BINDER_DEFERRED_RELEASE
)
4833 binder_deferred_release(proc
); /* frees proc */
4835 binder_unlock(__func__
);
4837 put_files_struct(files
);
4841 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
4844 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
4846 mutex_lock(&binder_deferred_lock
);
4847 proc
->deferred_work
|= defer
;
4848 if (hlist_unhashed(&proc
->deferred_work_node
)) {
4849 hlist_add_head(&proc
->deferred_work_node
,
4850 &binder_deferred_list
);
4851 queue_work(binder_deferred_workqueue
, &binder_deferred_work
);
4853 mutex_unlock(&binder_deferred_lock
);
4856 static void print_binder_transaction(struct seq_file
*m
, const char *prefix
,
4857 struct binder_transaction
*t
)
4859 #ifdef BINDER_MONITOR
4861 rtc_time_to_tm(t
->tv
.tv_sec
, &tm
);
4864 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4865 prefix
, t
->debug_id
, t
,
4866 t
->from
? t
->from
->proc
->pid
: 0,
4867 t
->from
? t
->from
->pid
: 0,
4868 t
->to_proc
? t
->to_proc
->pid
: 0,
4869 t
->to_thread
? t
->to_thread
->pid
: 0,
4870 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
4871 if (t
->buffer
== NULL
) {
4872 #ifdef BINDER_MONITOR
4873 seq_printf(m
, " start %lu.%06lu android %d-%02d-%02d %02d:%02d:%02d.%03lu"
4875 (unsigned long)t
->timestamp
.tv_sec
,
4876 (t
->timestamp
.tv_nsec
/ NSEC_PER_USEC
),
4877 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
4878 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
4879 (unsigned long)(t
->tv
.tv_usec
/ USEC_PER_MSEC
));
4881 seq_puts(m
, " buffer free\n");
4885 if (t
->buffer
->target_node
)
4886 seq_printf(m
, " node %d",
4887 t
->buffer
->target_node
->debug_id
);
4888 #ifdef BINDER_MONITOR
4889 seq_printf(m
, " size %zd:%zd data %pK auf %d start %lu.%06lu"
4890 " android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
4891 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4892 t
->buffer
->data
, t
->buffer
->allow_user_free
,
4893 (unsigned long)t
->timestamp
.tv_sec
,
4894 (t
->timestamp
.tv_nsec
/ NSEC_PER_USEC
),
4895 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
4896 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
4897 (unsigned long)(t
->tv
.tv_usec
/ USEC_PER_MSEC
));
4899 seq_printf(m
, " size %zd:%zd data %p\n",
4900 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4905 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
4906 struct binder_buffer
*buffer
)
4908 seq_printf(m
, "%s %d: %pK size %zd:%zd %s\n",
4909 prefix
, buffer
->debug_id
, buffer
->data
,
4910 buffer
->data_size
, buffer
->offsets_size
,
4911 buffer
->transaction
? "active" : "delivered");
4914 static void print_binder_work(struct seq_file
*m
, const char *prefix
,
4915 const char *transaction_prefix
,
4916 struct binder_work
*w
)
4918 struct binder_node
*node
;
4919 struct binder_transaction
*t
;
4922 case BINDER_WORK_TRANSACTION
:
4923 t
= container_of(w
, struct binder_transaction
, work
);
4924 print_binder_transaction(m
, transaction_prefix
, t
);
4926 case BINDER_WORK_TRANSACTION_COMPLETE
:
4927 seq_printf(m
, "%stransaction complete\n", prefix
);
4929 case BINDER_WORK_NODE
:
4930 node
= container_of(w
, struct binder_node
, work
);
4931 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
4932 prefix
, node
->debug_id
,
4933 (u64
)node
->ptr
, (u64
)node
->cookie
);
4935 case BINDER_WORK_DEAD_BINDER
:
4936 seq_printf(m
, "%shas dead binder\n", prefix
);
4938 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4939 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
4941 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
4942 seq_printf(m
, "%shas cleared death notification\n", prefix
);
4945 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
4950 static void print_binder_thread(struct seq_file
*m
,
4951 struct binder_thread
*thread
,
4954 struct binder_transaction
*t
;
4955 struct binder_work
*w
;
4956 size_t start_pos
= m
->count
;
4959 seq_printf(m
, " thread %d: l %02x\n", thread
->pid
, thread
->looper
);
4960 header_pos
= m
->count
;
4961 t
= thread
->transaction_stack
;
4963 if (t
->from
== thread
) {
4964 print_binder_transaction(m
,
4965 " outgoing transaction", t
);
4967 } else if (t
->to_thread
== thread
) {
4968 print_binder_transaction(m
,
4969 " incoming transaction", t
);
4972 print_binder_transaction(m
, " bad transaction", t
);
4976 list_for_each_entry(w
, &thread
->todo
, entry
) {
4977 print_binder_work(m
, " ", " pending transaction", w
);
4979 if (!print_always
&& m
->count
== header_pos
)
4980 m
->count
= start_pos
;
4983 static void print_binder_node(struct seq_file
*m
, struct binder_node
*node
)
4985 struct binder_ref
*ref
;
4986 struct binder_work
*w
;
4990 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
4993 #ifdef BINDER_MONITOR
4994 seq_printf(m
, " node %d (%s): u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
4995 node
->debug_id
, node
->name
, (u64
)node
->ptr
, (u64
)node
->cookie
,
4996 node
->has_strong_ref
, node
->has_weak_ref
,
4997 node
->local_strong_refs
, node
->local_weak_refs
,
4998 node
->internal_strong_refs
, count
);
5000 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
5001 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5002 node
->has_strong_ref
, node
->has_weak_ref
,
5003 node
->local_strong_refs
, node
->local_weak_refs
,
5004 node
->internal_strong_refs
, count
);
5007 seq_puts(m
, " proc");
5008 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5009 seq_printf(m
, " %d", ref
->proc
->pid
);
5012 #ifdef MTK_BINDER_DEBUG
5013 if (node
->async_pid
)
5014 seq_printf(m
, " pending async transaction on %d:\n", node
->async_pid
);
5016 list_for_each_entry(w
, &node
->async_todo
, entry
)
5017 print_binder_work(m
, " ",
5018 " pending async transaction", w
);
5021 static void print_binder_ref(struct seq_file
*m
, struct binder_ref
*ref
)
5023 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5024 ref
->debug_id
, ref
->desc
, ref
->node
->proc
? "" : "dead ",
5025 ref
->node
->debug_id
, ref
->strong
, ref
->weak
, ref
->death
);
5028 static void print_binder_proc(struct seq_file
*m
,
5029 struct binder_proc
*proc
, int print_all
)
5031 struct binder_work
*w
;
5033 size_t start_pos
= m
->count
;
5036 seq_printf(m
, "proc %d\n", proc
->pid
);
5037 header_pos
= m
->count
;
5039 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5040 print_binder_thread(m
, rb_entry(n
, struct binder_thread
,
5041 rb_node
), print_all
);
5042 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5043 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5045 if (print_all
|| node
->has_async_transaction
)
5046 print_binder_node(m
, node
);
5049 for (n
= rb_first(&proc
->refs_by_desc
);
5052 print_binder_ref(m
, rb_entry(n
, struct binder_ref
,
5055 for (n
= rb_first(&proc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
5056 print_binder_buffer(m
, " buffer",
5057 rb_entry(n
, struct binder_buffer
, rb_node
));
5058 list_for_each_entry(w
, &proc
->todo
, entry
)
5059 print_binder_work(m
, " ", " pending transaction", w
);
5060 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5061 seq_puts(m
, " has delivered dead binder\n");
5064 if (!print_all
&& m
->count
== header_pos
)
5065 m
->count
= start_pos
;
5068 static const char * const binder_return_strings
[] = {
5073 "BR_ACQUIRE_RESULT",
5075 "BR_TRANSACTION_COMPLETE",
5080 "BR_ATTEMPT_ACQUIRE",
5085 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5089 static const char * const binder_command_strings
[] = {
5092 "BC_ACQUIRE_RESULT",
5100 "BC_ATTEMPT_ACQUIRE",
5101 "BC_REGISTER_LOOPER",
5104 "BC_REQUEST_DEATH_NOTIFICATION",
5105 "BC_CLEAR_DEATH_NOTIFICATION",
5106 "BC_DEAD_BINDER_DONE"
5109 static const char * const binder_objstat_strings
[] = {
5116 "transaction_complete"
5119 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5120 struct binder_stats
*stats
)
5124 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5125 ARRAY_SIZE(binder_command_strings
));
5126 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5128 seq_printf(m
, "%s%s: %d\n", prefix
,
5129 binder_command_strings
[i
], stats
->bc
[i
]);
5132 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5133 ARRAY_SIZE(binder_return_strings
));
5134 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5136 seq_printf(m
, "%s%s: %d\n", prefix
,
5137 binder_return_strings
[i
], stats
->br
[i
]);
5140 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5141 ARRAY_SIZE(binder_objstat_strings
));
5142 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5143 ARRAY_SIZE(stats
->obj_deleted
));
5144 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5145 if (stats
->obj_created
[i
] || stats
->obj_deleted
[i
])
5146 seq_printf(m
, "%s%s: active %d total %d\n", prefix
,
5147 binder_objstat_strings
[i
],
5148 stats
->obj_created
[i
] - stats
->obj_deleted
[i
],
5149 stats
->obj_created
[i
]);
5153 #ifdef BINDER_PERF_EVAL
5154 static void print_binder_timeout_stats(struct seq_file
*m
, const char *prefix
,
5155 struct binder_timeout_stats
*to_stats
)
5159 BUILD_BUG_ON(ARRAY_SIZE(to_stats
->bto
) !=
5160 (ARRAY_SIZE(binder_wait_on_str
) - 1));
5161 for (i
= 0; i
< ARRAY_SIZE(to_stats
->bto
); i
++)
5163 if (to_stats
->bto
[i
])
5164 seq_printf(m
, "%s%s: %lu\n", prefix
,
5165 binder_wait_on_str
[i
+ 1], to_stats
->bto
[i
]);
5167 for (i
= 0, j
= 0; i
< ARRAY_SIZE(to_stats
->read_t
); i
++)
5169 struct timespec sub_t
= to_stats
->read_t
[i
];
5174 seq_printf(m
, "%s%s: timeout total time list:\n",
5175 prefix
, binder_wait_on_str
[WAIT_ON_READ
]);
5177 seq_printf(m
, " %s%u.%03ld", prefix
,
5178 (unsigned)sub_t
.tv_sec
, (sub_t
.tv_nsec
/ NSEC_PER_MSEC
));
5182 seq_printf(m
, "\n");
5183 for (i
= 0, j
= 0; i
< ARRAY_SIZE(to_stats
->exec_t
); i
++)
5185 struct timespec sub_t
= to_stats
->exec_t
[i
];
5190 seq_printf(m
, "%s%s: timeout total time list:\n",
5191 prefix
, binder_wait_on_str
[WAIT_ON_EXEC
]);
5193 seq_printf(m
, " %s%u.%03ld", prefix
,
5194 (unsigned)sub_t
.tv_sec
, (sub_t
.tv_nsec
/ NSEC_PER_MSEC
));
5198 seq_printf(m
, "\n");
5199 for (i
= 0, j
= 0; i
< ARRAY_SIZE(to_stats
->rrply_t
); i
++)
5201 struct timespec sub_t
= to_stats
->rrply_t
[i
];
5206 seq_printf(m
, "%s%s: timeout total time list:\n",
5207 prefix
, binder_wait_on_str
[WAIT_ON_REPLY_READ
]);
5209 seq_printf(m
, " %s%u.%03ld", prefix
,
5210 (unsigned)sub_t
.tv_sec
, (sub_t
.tv_nsec
/ NSEC_PER_MSEC
));
5214 seq_printf(m
, "\n");
5217 static void print_binder_proc_perf_timeout_stats(struct seq_file
*m
,
5218 struct binder_proc
*proc
)
5223 int proc_to_counter
= 0;
5224 for (i
= 0; i
< ARRAY_SIZE(proc
->to_stats
.bto
); i
++)
5226 proc_to_counter
+= proc
->to_stats
.bto
[i
];
5227 if (proc_to_counter
> 0)
5230 if (proc_to_counter
> 0)
5232 seq_printf(m
, "proc %d(%s) timeout stats:\n",
5233 proc
->pid
, (proc
->tsk
!= NULL
) ? proc
->tsk
->comm
: "");
5234 print_binder_timeout_stats(m
, " ", &proc
->to_stats
);
5236 seq_printf(m
, " threads timeout stats:\n");
5237 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5239 int thread_to_counter
= 0;
5240 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5241 for (i
= 0; i
< ARRAY_SIZE(thread
->to_stats
.bto
); i
++)
5243 thread_to_counter
+= thread
->to_stats
.bto
[i
];
5244 if (thread_to_counter
> 0)
5247 if (thread_to_counter
> 0)
5249 seq_printf(m
, " thread: %d\n", thread
->pid
);
5250 print_binder_timeout_stats(m
, " ", &thread
->to_stats
);
5259 static void print_binder_proc_stats(struct seq_file
*m
,
5260 struct binder_proc
*proc
)
5262 struct binder_work
*w
;
5264 int count
, strong
, weak
;
5266 seq_printf(m
, "proc %d\n", proc
->pid
);
5268 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5270 seq_printf(m
, " threads: %d\n", count
);
5271 seq_printf(m
, " requested threads: %d+%d/%d\n"
5272 " ready threads %d\n"
5273 " free async space %zd\n", proc
->requested_threads
,
5274 proc
->requested_threads_started
, proc
->max_threads
,
5275 proc
->ready_threads
, proc
->free_async_space
);
5277 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5279 seq_printf(m
, " nodes: %d\n", count
);
5283 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5284 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5287 strong
+= ref
->strong
;
5290 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5293 for (n
= rb_first(&proc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
5295 seq_printf(m
, " buffers: %d\n", count
);
5298 list_for_each_entry(w
, &proc
->todo
, entry
) {
5300 case BINDER_WORK_TRANSACTION
:
5307 seq_printf(m
, " pending transactions: %d\n", count
);
5309 print_binder_stats(m
, " ", &proc
->stats
);
5313 static int binder_state_show(struct seq_file
*m
, void *unused
)
5315 struct binder_proc
*proc
;
5316 struct binder_node
*node
;
5317 int do_lock
= !binder_debug_no_lock
;
5320 binder_lock(__func__
);
5322 seq_puts(m
, "binder state:\n");
5324 if (!hlist_empty(&binder_dead_nodes
))
5325 seq_puts(m
, "dead nodes:\n");
5326 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
)
5327 print_binder_node(m
, node
);
5329 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5330 print_binder_proc(m
, proc
, 1);
5332 binder_unlock(__func__
);
5336 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5338 struct binder_proc
*proc
;
5339 int do_lock
= !binder_debug_no_lock
;
5342 binder_lock(__func__
);
5344 seq_puts(m
, "binder stats:\n");
5346 print_binder_stats(m
, "", &binder_stats
);
5348 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5349 print_binder_proc_stats(m
, proc
);
5351 binder_unlock(__func__
);
5355 #ifdef BINDER_PERF_EVAL
5356 static int binder_perf_stats_show(struct seq_file
*m
, void *unused
)
5358 struct binder_proc
*proc
;
5359 int do_lock
= !binder_debug_no_lock
;
5362 binder_lock(__func__
);
5364 seq_puts(m
, "binder stats:\n");
5365 //print_binder_stats(m, "", &binder_stats);
5367 if (binder_perf_evalue
& BINDER_PERF_SEND_COUNTER
)
5369 seq_puts(m
, "binder send transaction stats:\n");
5370 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5375 seq_printf(m
, "proc %d(%s): %d\n", proc
->pid
,
5376 proc
->tsk
? proc
->tsk
->comm
: "",
5378 for (i
= 0; i
< BC_STATS_NR
; i
++)
5380 if (proc
->bc_stats
[i
] == NULL
)
5382 if (!strcmp(proc
->bc_stats
[i
]->service
, "") &&
5383 (0 == proc
->bc_stats
[i
]->code
[0]))
5385 seq_printf(m
, " service %s\n", proc
->bc_stats
[i
]->service
);
5386 for (j
= 0; j
< BC_CODE_NR
; j
++)
5388 if (0 == proc
->bc_stats
[i
]->code_num
[j
])
5390 seq_printf(m
, " dex %u: %u\n", proc
->bc_stats
[i
]->code
[j
], proc
->bc_stats
[i
]->code_num
[j
]);
5399 if (binder_perf_evalue
& BINDER_PERF_TIMEOUT_COUNTER
)
5401 seq_puts(m
, "binder transaction time out stats:\n");
5402 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5403 print_binder_proc_perf_timeout_stats(m
,proc
);
5406 binder_unlock(__func__
);
5410 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5412 struct binder_proc
*proc
;
5413 int do_lock
= !binder_debug_no_lock
;
5416 binder_lock(__func__
);
5418 seq_puts(m
, "binder transactions:\n");
5419 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5420 print_binder_proc(m
, proc
, 0);
5422 binder_unlock(__func__
);
5426 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5428 struct binder_proc
*proc
= m
->private;
5429 int do_lock
= !binder_debug_no_lock
;
5430 #ifdef MTK_BINDER_DEBUG
5431 struct binder_proc
*tmp_proc
;
5436 binder_lock(__func__
);
5437 seq_puts(m
, "binder proc state:\n");
5438 #ifdef MTK_BINDER_DEBUG
5439 hlist_for_each_entry(tmp_proc
, &binder_procs
, proc_node
)
5441 if (proc
== tmp_proc
)
5449 print_binder_proc(m
, proc
, 1);
5450 #ifdef MTK_BINDER_DEBUG
5452 pr_debug("show proc addr 0x%p exit\n", proc
);
5455 binder_unlock(__func__
);
5459 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5460 struct binder_transaction_log_entry
*e
)
5462 #ifdef BINDER_MONITOR
5465 struct timespec sub_read_t
, sub_total_t
;
5466 unsigned long read_ms
=0;
5467 unsigned long total_ms
= 0;
5469 memset(&sub_read_t
, 0, sizeof(sub_read_t
));
5470 memset(&sub_total_t
, 0, sizeof(sub_total_t
));
5473 sprintf(tmp
, " (fd %d)", e
->fd
);
5477 if ((e
->call_type
== 0) && timespec_valid_strict(&e
->endstamp
) &&
5478 (timespec_compare(&e
->endstamp
, &e
->timestamp
) > 0)) {
5479 sub_total_t
= timespec_sub(e
->endstamp
, e
->timestamp
);
5480 total_ms
= ((unsigned long)sub_total_t
.tv_sec
)*MSEC_PER_SEC
+
5481 sub_total_t
.tv_nsec
/ NSEC_PER_MSEC
;
5483 if ((e
->call_type
== 1) && timespec_valid_strict(&e
->readstamp
) &&
5484 (timespec_compare(&e
->readstamp
, &e
->timestamp
) > 0)) {
5485 sub_read_t
= timespec_sub(e
->readstamp
, e
->timestamp
);
5486 read_ms
= ((unsigned long)sub_read_t
.tv_sec
)*MSEC_PER_SEC
+
5487 sub_read_t
.tv_nsec
/ NSEC_PER_MSEC
;
5490 rtc_time_to_tm(e
->tv
.tv_sec
, &tm
);
5492 "%d: %s from %d:%d to %d:%d node %d handle %d (%s) size %d:%d%s "
5493 "dex %u start %lu.%06lu android %d-%02d-%02d %02d:%02d:%02d.%03lu read %lu.%06lu %s %lu.%06lu total %lu.%06lums\n",
5494 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5495 ((e
->call_type
== 1) ? "async" : "call "),
5496 e
->from_proc
, e
->from_thread
, e
->to_proc
, e
->to_thread
,
5497 e
->to_node
, e
->target_handle
, e
->service
,
5498 e
->data_size
, e
->offsets_size
, tmp
, e
->code
,
5499 (unsigned long)e
->timestamp
.tv_sec
,
5500 (e
->timestamp
.tv_nsec
/ NSEC_PER_USEC
),
5501 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
5502 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
5503 (unsigned long)(e
->tv
.tv_usec
/ USEC_PER_MSEC
),
5504 (unsigned long)e
->readstamp
.tv_sec
,
5505 (e
->readstamp
.tv_nsec
/ NSEC_PER_USEC
),
5506 (e
->call_type
== 0) ? "end" : "",
5507 (e
->call_type
== 0) ? ((unsigned long)e
->endstamp
.tv_sec
) : 0,
5508 (e
->call_type
== 0) ? (e
->endstamp
.tv_nsec
/ NSEC_PER_USEC
) : 0,
5509 (e
->call_type
== 0) ? total_ms
: read_ms
,
5510 (e
->call_type
== 0) ? (sub_total_t
.tv_nsec
% NSEC_PER_MSEC
) :
5511 (sub_read_t
.tv_nsec
% NSEC_PER_MSEC
));
5514 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
5515 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5516 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5517 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->to_node
,
5518 e
->target_handle
, e
->data_size
, e
->offsets_size
);
5522 #ifdef BINDER_MONITOR
5523 static void log_resume_func(struct work_struct
*w
)
5525 pr_debug("transaction log is self resumed\n");
5529 static DECLARE_DELAYED_WORK(log_resume_work
, log_resume_func
);
5531 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5533 struct binder_transaction_log
*log
= m
->private;
5540 for (i
= log
->next
; i
< log
->size
; i
++)
5541 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
5543 for (i
= 0; i
< log
->next
; i
++)
5544 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
5546 if (log_disable
& BINDER_LOG_RESUME
) {
5547 pr_debug("%d (%s) read transaction log and "
5549 task_pid_nr(current
), current
->comm
);
5550 cancel_delayed_work(&log_resume_work
);
5557 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5559 struct binder_transaction_log
*log
= m
->private;
5563 for (i
= log
->next
; i
< ARRAY_SIZE(log
->entry
); i
++)
5564 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
5566 for (i
= 0; i
< log
->next
; i
++)
5567 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
5572 static const struct file_operations binder_fops
= {
5573 .owner
= THIS_MODULE
,
5574 .poll
= binder_poll
,
5575 .unlocked_ioctl
= binder_ioctl
,
5576 .compat_ioctl
= binder_ioctl
,
5577 .mmap
= binder_mmap
,
5578 .open
= binder_open
,
5579 .flush
= binder_flush
,
5580 .release
= binder_release
,
5583 static struct miscdevice binder_miscdev
= {
5584 .minor
= MISC_DYNAMIC_MINOR
,
5586 .fops
= &binder_fops
5589 #ifdef BINDER_PERF_EVAL
5590 static void binder_perf_timeout_zero_init(struct binder_timeout_stats
*to_stats
)
5592 memset(&to_stats
->bto
[0], 0, sizeof(to_stats
->bto
));
5593 memset(&to_stats
->read_t
[0], 0, sizeof(to_stats
->read_t
));
5594 memset(&to_stats
->exec_t
[0], 0, sizeof(to_stats
->exec_t
));
5595 memset(&to_stats
->rrply_t
[0], 0, sizeof(to_stats
->rrply_t
));
5599 static void binder_perf_stats_timeout_clean(void)
5601 struct binder_proc
*proc
;
5603 int do_lock
= !binder_debug_no_lock
;
5606 binder_lock(__func__
);
5607 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5609 binder_perf_timeout_zero_init(&proc
->to_stats
);
5610 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5612 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5613 binder_perf_timeout_zero_init(&thread
->to_stats
);
5617 binder_unlock(__func__
);
5620 static void binder_perf_stats_bct_clean(void)
5622 struct binder_proc
*proc
;
5623 int do_lock
= !binder_debug_no_lock
;
5626 binder_lock(__func__
);
5627 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5631 for (i
= 0; i
< BC_STATS_NR
; i
++)
5633 if (proc
->bc_stats
[i
] != NULL
) {
5634 kfree(proc
->bc_stats
[i
]);
5635 proc
->bc_stats
[i
] = NULL
;
5640 binder_unlock(__func__
);
5643 static int binder_perf_evalue_show(struct seq_file
*m
, void *unused
)
5645 seq_printf(m
, " Current binder performance evalue is: %u\n", binder_perf_evalue
);
5649 static ssize_t
binder_perf_evalue_write(struct file
*filp
, const char *ubuf
,
5650 size_t cnt
, loff_t
*data
)
5653 size_t copy_size
= cnt
;
5657 if (cnt
>= sizeof(buf
))
5659 buf
[copy_size
] = '\0';
5661 if (copy_from_user(&buf
, ubuf
, copy_size
))
5664 printk("[Binder] Set binder perf evalue:%u -> ", binder_perf_evalue
);
5665 ret
= strict_strtoul(buf
, 10, &val
);
5667 printk("Null\ninvalid string, need number foramt, err:%d \n",ret
);
5668 printk("perf evalue level: 0 ---- 3 \n");
5669 printk(" Less ---- More\n");
5670 return cnt
; //string to unsined long fail
5672 printk("%lu\n", val
);
5674 binder_perf_evalue
= val
;
5675 if (0 == (val
& BINDER_PERF_SEND_COUNTER
))
5676 binder_perf_stats_bct_clean();
5677 if (0 == (val
& BINDER_PERF_TIMEOUT_COUNTER
))
5678 binder_perf_stats_timeout_clean();
5680 printk("invalid value:%lu, should be 0 ~ 3\n", val
);
5682 pr_debug("%d (%s) set performance evaluate type %s %s\n",
5683 task_pid_nr(current
), current
->comm
,
5684 (binder_perf_evalue
& BINDER_PERF_SEND_COUNTER
) ? "sender counter enable" : "",
5685 (binder_perf_evalue
& BINDER_PERF_TIMEOUT_COUNTER
) ?
5686 "time out counter enable" : "");
5691 #ifdef BINDER_MONITOR
5692 static int binder_log_level_show(struct seq_file
*m
, void *unused
)
5694 seq_printf(m
, " Current log level: %lu\n", binder_log_level
);
5698 static ssize_t
binder_log_level_write(struct file
*filp
, const char *ubuf
,
5699 size_t cnt
, loff_t
*data
)
5702 size_t copy_size
= cnt
;
5706 if (cnt
>= sizeof(buf
))
5708 buf
[copy_size
] = '\0';
5710 if (copy_from_user(&buf
, ubuf
, copy_size
))
5713 printk("[Binder] Set binder log level:%lu -> ", binder_log_level
);
5714 ret
= strict_strtoul(buf
, 10, &val
);
5716 printk("Null\ninvalid string, need number foramt, err:%d \n",ret
);
5717 printk("Log Level: 0 ---- 4 \n");
5718 printk(" Less ---- More\n");
5719 return cnt
; //string to unsined long fail
5721 printk("%lu\n", val
);
5724 BINDER_DEBUG_USER_ERROR
| BINDER_DEBUG_FAILED_TRANSACTION
|
5725 BINDER_DEBUG_DEAD_TRANSACTION
;
5726 binder_log_level
= val
;
5727 } else if (val
== 1) {
5729 BINDER_DEBUG_USER_ERROR
| BINDER_DEBUG_FAILED_TRANSACTION
|
5730 BINDER_DEBUG_DEAD_TRANSACTION
| BINDER_DEBUG_DEAD_BINDER
|
5731 BINDER_DEBUG_DEATH_NOTIFICATION
;
5732 binder_log_level
= val
;
5733 } else if (val
== 2) {
5735 BINDER_DEBUG_USER_ERROR
| BINDER_DEBUG_FAILED_TRANSACTION
|
5736 BINDER_DEBUG_DEAD_TRANSACTION
| BINDER_DEBUG_DEAD_BINDER
|
5737 BINDER_DEBUG_DEATH_NOTIFICATION
|BINDER_DEBUG_THREADS
|
5738 BINDER_DEBUG_TRANSACTION
| BINDER_DEBUG_TRANSACTION_COMPLETE
;
5739 binder_log_level
= val
;
5740 } else if (val
== 3) {
5742 BINDER_DEBUG_USER_ERROR
| BINDER_DEBUG_FAILED_TRANSACTION
|
5743 BINDER_DEBUG_DEAD_TRANSACTION
| BINDER_DEBUG_DEAD_BINDER
|
5744 BINDER_DEBUG_DEATH_NOTIFICATION
| BINDER_DEBUG_THREADS
|
5745 BINDER_DEBUG_TRANSACTION
| BINDER_DEBUG_TRANSACTION_COMPLETE
|
5746 BINDER_DEBUG_OPEN_CLOSE
| BINDER_DEBUG_READ_WRITE
;
5747 binder_log_level
= val
;
5748 } else if (val
== 4) {
5750 BINDER_DEBUG_USER_ERROR
| BINDER_DEBUG_FAILED_TRANSACTION
|
5751 BINDER_DEBUG_DEAD_TRANSACTION
| BINDER_DEBUG_DEAD_BINDER
|
5752 BINDER_DEBUG_DEATH_NOTIFICATION
| BINDER_DEBUG_THREADS
|
5753 BINDER_DEBUG_OPEN_CLOSE
| BINDER_DEBUG_READ_WRITE
|
5754 BINDER_DEBUG_TRANSACTION
| BINDER_DEBUG_TRANSACTION_COMPLETE
|
5755 BINDER_DEBUG_USER_REFS
| BINDER_DEBUG_INTERNAL_REFS
|
5756 BINDER_DEBUG_PRIORITY_CAP
|BINDER_DEBUG_FREE_BUFFER
|
5757 BINDER_DEBUG_BUFFER_ALLOC
;
5758 binder_log_level
= val
;
5760 printk("invalid value:%lu, should be 0 ~ 4\n", val
);
5765 static void print_binder_timeout_log_entry(struct seq_file
*m
,
5766 struct binder_timeout_log_entry
*e
)
5770 rtc_time_to_tm(e
->tv
.tv_sec
, &tm
);
5771 seq_printf(m
, "%d:%s %d:%d to %d:%d spends %u000 ms (%s) dex_code %u "
5772 "start_at %lu.%03ld android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
5773 e
->debug_id
, binder_wait_on_str
[e
->r
],
5774 e
->from_proc
, e
->from_thrd
, e
->to_proc
, e
->to_thrd
,
5775 e
->over_sec
, e
->service
, e
->code
,
5776 (unsigned long)e
->ts
.tv_sec
,
5777 (e
->ts
.tv_nsec
/ NSEC_PER_MSEC
),
5778 (tm
.tm_year
+ 1900), (tm
.tm_mon
+ 1), tm
.tm_mday
,
5779 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
,
5780 (unsigned long)(e
->tv
.tv_usec
/ USEC_PER_MSEC
));
5783 static int binder_timeout_log_show(struct seq_file
*m
, void *unused
)
5785 struct binder_timeout_log
*log
= m
->private;
5787 int end_idx
= ARRAY_SIZE(log
->entry
) - 1;
5789 binder_lock(__func__
);
5791 latest
= log
->next
? (log
->next
- 1) : end_idx
;
5792 if (log
->next
== 0 && !log
->full
)
5793 goto timeout_log_show_unlock
;
5795 if (latest
>= ARRAY_SIZE(log
->entry
) || latest
< 0) {
5798 pr_alert("timeout log index error, "
5799 "log %p latest %d next %d end_idx %d\n",
5800 log
, latest
, log
->next
, end_idx
);
5801 for (j
= -4; j
<= 3; j
++) {
5802 unsigned int *tmp
= (unsigned int *)log
+ (j
* 8);
5803 pr_alert("0x%p %08x %08x %08x %08x "
5804 "%08x %08x %08x %08x\n",
5806 *tmp
, *(tmp
+ 1), *(tmp
+ 2), *(tmp
+ 3),
5807 *(tmp
+ 4), *(tmp
+ 5), *(tmp
+ 6),
5810 aee_kernel_warning_api(__FILE__
, __LINE__
, DB_OPT_SWT_JBT_TRACES
|DB_OPT_BINDER_INFO
, "binder: timeout log index error",
5811 "detect for memory corruption\n\n"
5812 "check kernel log for more details\n");
5813 goto timeout_log_show_unlock
;
5816 for (i
= latest
; i
>= 0; i
--)
5817 print_binder_timeout_log_entry(m
, &log
->entry
[i
]);
5819 for (i
= end_idx
; i
> latest
; i
--)
5820 print_binder_timeout_log_entry(m
, &log
->entry
[i
]);
5823 timeout_log_show_unlock
:
5824 binder_unlock(__func__
);
5828 BINDER_DEBUG_SETTING_ENTRY(log_level
);
5829 #ifdef BINDER_PERF_EVAL
5830 BINDER_DEBUG_SETTING_ENTRY(perf_evalue
);
5832 BINDER_DEBUG_ENTRY(timeout_log
);
5833 #ifdef BINDER_PERF_EVAL
5834 BINDER_DEBUG_ENTRY(perf_stats
);
5837 static int binder_transaction_log_enable_show(struct seq_file
*m
, void *unused
)
5839 #ifdef BINDER_MONITOR
5840 seq_printf(m
, " Current transaciton log is %s %s %s"
5841 #ifdef RT_PRIO_INHERIT
5845 (log_disable
& 0x1) ? "disabled" : "enabled",
5846 (log_disable
& BINDER_LOG_RESUME
) ? "(self resume)" : "",
5847 (log_disable
& BINDER_BUF_WARN
) ? "(buf warning enabled)" : ""
5848 #ifdef RT_PRIO_INHERIT
5850 ,(log_disable
& BINDER_RT_LOG_ENABLE
) ? "(rt inherit log enabled)" : ""
5854 seq_printf(m
, " Current transaciton log is %s %s\n",
5855 log_disable
? "disabled" : "enabled",
5856 (log_disable
& BINDER_LOG_RESUME
) ? "(self resume)" : "");
5861 static ssize_t
binder_transaction_log_enable_write(struct file
*filp
,
5863 size_t cnt
, loff_t
*data
)
5866 size_t copy_size
= cnt
;
5870 if (cnt
>= sizeof(buf
))
5873 buf
[copy_size
] = '\0';
5875 if (copy_from_user(&buf
, ubuf
, copy_size
))
5878 ret
= strict_strtoul(buf
, 10, &val
);
5880 pr_info("failed to switch logging, "
5881 "need number format\n");
5885 log_disable
= !(val
& 0x1);
5886 if (log_disable
&& (val
& BINDER_LOG_RESUME
)) {
5887 log_disable
|= BINDER_LOG_RESUME
;
5888 queue_delayed_work(binder_deferred_workqueue
,
5889 &log_resume_work
, (120 * HZ
));
5891 #ifdef BINDER_MONITOR
5892 if (val
& BINDER_BUF_WARN
) {
5893 log_disable
|= BINDER_BUF_WARN
;
5895 #ifdef RT_PRIO_INHERIT
5896 if (val
& BINDER_RT_LOG_ENABLE
) {
5897 log_disable
|= BINDER_RT_LOG_ENABLE
;
5900 pr_info("%d (%s) set transaction log %s %s %s"
5901 #ifdef RT_PRIO_INHERIT
5905 task_pid_nr(current
), current
->comm
,
5906 (log_disable
& 0x1) ? "disabled" : "enabled",
5907 (log_disable
& BINDER_LOG_RESUME
) ?
5908 "(self resume)" : "",
5909 (log_disable
& BINDER_BUF_WARN
) ? "(buf warning)" : ""
5910 #ifdef RT_PRIO_INHERIT
5911 ,(log_disable
& BINDER_RT_LOG_ENABLE
) ? "(rt inherit log enabled)" : ""
5915 pr_info("%d (%s) set transaction log %s %s\n",
5916 task_pid_nr(current
), current
->comm
,
5917 log_disable
? "disabled" : "enabled",
5918 (log_disable
& BINDER_LOG_RESUME
) ?
5919 "(self resume)" : "");
5923 BINDER_DEBUG_SETTING_ENTRY(transaction_log_enable
);
5926 #ifdef MTK_BINDER_PAGE_USED_RECORD
5927 static int binder_page_used_show(struct seq_file
*s
, void *p
)
5929 struct binder_proc
*proc
;
5930 int do_lock
= !binder_debug_no_lock
;
5931 seq_printf(s
, "page_used:%d[%dMB]\npage_used_peak:%d[%dMB]\n",
5932 binder_page_used
, binder_page_used
>>8,
5933 binder_page_used_peak
, binder_page_used_peak
>>8);
5936 binder_lock(__func__
);
5937 seq_puts(s
, "binder page stats by binder_proc:\n");
5938 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5940 seq_printf(s
, " proc %d(%s):page_used:%d[%dMB] page_used_peak:%d[%dMB]\n",
5941 proc
->pid
, proc
->tsk
? proc
->tsk
->comm
: " ",
5942 proc
->page_used
, proc
->page_used
>>8,
5943 proc
->page_used_peak
, proc
->page_used_peak
>>8);
5946 binder_unlock(__func__
);
5951 BINDER_DEBUG_ENTRY(page_used
);
5954 BINDER_DEBUG_ENTRY(state
);
5955 BINDER_DEBUG_ENTRY(stats
);
5956 BINDER_DEBUG_ENTRY(transactions
);
5957 BINDER_DEBUG_ENTRY(transaction_log
);
5959 static int __init
binder_init(void)
5962 #ifdef BINDER_MONITOR
5963 struct task_struct
*th
;
5966 th
= kthread_create(binder_bwdog_thread
, NULL
, "binder_watchdog");
5968 pr_err("fail to create watchdog thread "
5969 "(err:%li)\n", PTR_ERR(th
));
5971 wake_up_process(th
);
5974 binder_transaction_log_failed
.entry
= &entry_failed
[0];
5975 binder_transaction_log_failed
.size
= ARRAY_SIZE(entry_failed
);
5977 #ifdef CONFIG_MTK_EXTMEM
5978 binder_transaction_log
.entry
= extmem_malloc_page_align(sizeof(struct binder_transaction_log_entry
)*MAX_ENG_TRANS_LOG_BUFF_LEN
);
5979 binder_transaction_log
.size
= MAX_ENG_TRANS_LOG_BUFF_LEN
;
5981 if(binder_transaction_log
.entry
== NULL
) {
5982 pr_err("%s[%s] ext emory alloc failed!!!\n", __FILE__
, __FUNCTION__
);
5983 binder_transaction_log
.entry
= vmalloc(sizeof(struct binder_transaction_log_entry
)*MAX_ENG_TRANS_LOG_BUFF_LEN
);
5986 binder_transaction_log
.entry
= &entry_t
[0];
5987 binder_transaction_log
.size
= ARRAY_SIZE(entry_t
);
5991 binder_deferred_workqueue
= create_singlethread_workqueue("binder");
5992 if (!binder_deferred_workqueue
)
5995 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5996 if (binder_debugfs_dir_entry_root
)
5997 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5998 binder_debugfs_dir_entry_root
);
5999 ret
= misc_register(&binder_miscdev
);
6000 if (binder_debugfs_dir_entry_root
) {
6001 debugfs_create_file("state",
6003 binder_debugfs_dir_entry_root
,
6005 &binder_state_fops
);
6006 debugfs_create_file("stats",
6008 binder_debugfs_dir_entry_root
,
6010 &binder_stats_fops
);
6011 debugfs_create_file("transactions",
6013 binder_debugfs_dir_entry_root
,
6015 &binder_transactions_fops
);
6016 debugfs_create_file("transaction_log",
6018 binder_debugfs_dir_entry_root
,
6019 &binder_transaction_log
,
6020 &binder_transaction_log_fops
);
6021 debugfs_create_file("failed_transaction_log",
6023 binder_debugfs_dir_entry_root
,
6024 &binder_transaction_log_failed
,
6025 &binder_transaction_log_fops
);
6026 #ifdef BINDER_MONITOR
6027 /* system_server is the main writer, remember to
6028 * change group as "system" for write permission
6029 * via related init.rc */
6030 debugfs_create_file("transaction_log_enable",
6031 (S_IRUGO
| S_IWUSR
| S_IWGRP
),
6032 binder_debugfs_dir_entry_root
,
6034 &binder_transaction_log_enable_fops
);
6035 debugfs_create_file("log_level",
6036 (S_IRUGO
| S_IWUSR
| S_IWGRP
),
6037 binder_debugfs_dir_entry_root
,
6039 &binder_log_level_fops
);
6040 debugfs_create_file("timeout_log",
6042 binder_debugfs_dir_entry_root
,
6043 &binder_timeout_log_t
,
6044 &binder_timeout_log_fops
);
6046 #ifdef BINDER_PERF_EVAL
6047 debugfs_create_file("perf_evalue",
6048 (S_IRUGO
| S_IWUSR
| S_IWGRP
),
6049 binder_debugfs_dir_entry_root
,
6051 &binder_perf_evalue_fops
);
6052 debugfs_create_file("perf_stats",
6054 binder_debugfs_dir_entry_root
,
6056 &binder_perf_stats_fops
);
6059 #ifdef MTK_BINDER_PAGE_USED_RECORD
6060 debugfs_create_file("page_used",
6062 binder_debugfs_dir_entry_root
,
6064 &binder_page_used_fops
);
6070 device_initcall(binder_init
);
6072 #define CREATE_TRACE_POINTS
6073 #include "binder_trace.h"
6075 MODULE_LICENSE("GPL v2");